diff --git a/Cargo.lock b/Cargo.lock index c42f6db801a66..406a66f48cc52 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,6 +332,34 @@ dependencies = [ "serde_json", ] +[[package]] +name = "async-backtrace" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a5baf8b0bb3055d2468928ed9f5d60fda91015470204e7c54ddb20b0bb22ebc" +dependencies = [ + "async-backtrace-attributes", + "dashmap", + "futures", + "itertools", + "loom", + "once_cell", + "pin-project-lite", + "rustc-hash", + "static_assertions", +] + +[[package]] +name = "async-backtrace-attributes" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3659ebaa5387be8848f768e72a2f967c471fa6225c6b1506a1f3acfdaf25ed17" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-channel" version = "1.8.0" @@ -1451,6 +1479,7 @@ version = "0.1.0" dependencies = [ "anyerror", "anyhow", + "async-backtrace", "async-channel", "async-trait", "bytesize", @@ -1499,6 +1528,7 @@ dependencies = [ name = "common-catalog" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait", "chrono", "common-arrow", @@ -1754,6 +1784,7 @@ name = "common-http" version = "0.1.0" dependencies = [ "anyerror", + "async-backtrace", "common-base", "common-exception", "futures", @@ -1787,6 +1818,7 @@ dependencies = [ name = "common-management" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait", "common-base", "common-exception", @@ -2047,6 +2079,7 @@ dependencies = [ name = "common-pipeline-core" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait", "common-exception", "common-expression", @@ -2062,6 +2095,7 @@ dependencies = [ name = "common-pipeline-sinks" version = "0.1.0" dependencies = [ + "async-backtrace", "async-channel", "async-trait-fn", "common-base", @@ -2076,6 +2110,7 @@ dependencies = [ name = "common-pipeline-sources" version = "0.1.0" dependencies = [ + "async-backtrace", "async-channel", "async-trait-fn", "bstr 1.4.0", @@ -2155,6 +2190,7 @@ dependencies = [ name = "common-settings" version = "0.1.0" dependencies = [ + "async-backtrace", "common-ast", "common-config", "common-exception", @@ -2175,6 +2211,7 @@ name = "common-sharing" version = "0.1.0" dependencies = [ "anyhow", + "async-backtrace", "async-trait", "bytes", "common-auth", @@ -2202,6 +2239,7 @@ version = "0.1.0" dependencies = [ "ahash 0.8.3", "anyhow", + "async-backtrace", "async-recursion", "async-trait-fn", "chrono", @@ -2251,6 +2289,7 @@ name = "common-storage" version = "0.1.0" dependencies = [ "anyhow", + "async-backtrace", "async-trait", "bytes", "chrono", @@ -2288,6 +2327,7 @@ dependencies = [ name = "common-storages-fuse" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait-fn", "backoff", "chrono", @@ -2333,6 +2373,7 @@ dependencies = [ name = "common-storages-hive" version = "0.1.0" dependencies = [ + "async-backtrace", "async-recursion", "async-trait", "chrono", @@ -2366,6 +2407,7 @@ dependencies = [ name = "common-storages-iceberg" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait", "chrono", "common-catalog", @@ -2398,6 +2440,7 @@ dependencies = [ name = "common-storages-memory" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait-fn", "common-building", "common-catalog", @@ -2418,6 +2461,7 @@ dependencies = [ name = "common-storages-null" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait-fn", "common-building", "common-catalog", @@ -2433,6 +2477,7 @@ dependencies = [ name = "common-storages-parquet" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait-fn", "chrono", "common-arrow", @@ -2459,6 +2504,7 @@ dependencies = [ name = "common-storages-random" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait-fn", "common-building", "common-catalog", @@ -2475,6 +2521,7 @@ dependencies = [ name = "common-storages-result-cache" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait-fn", "common-arrow", "common-catalog", @@ -2501,6 +2548,7 @@ dependencies = [ name = "common-storages-share" version = "0.1.0" dependencies = [ + "async-backtrace", "common-exception", "common-meta-app", "goldenfile", @@ -2515,6 +2563,7 @@ dependencies = [ name = "common-storages-stage" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait-fn", "common-base", "common-building", @@ -2535,6 +2584,7 @@ dependencies = [ name = "common-storages-system" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait-fn", "chrono", "common-base", @@ -2599,6 +2649,7 @@ dependencies = [ name = "common-users" version = "0.1.0" dependencies = [ + "async-backtrace", "base64 0.21.0", "common-base", "common-exception", @@ -3119,6 +3170,7 @@ version = "0.1.0" dependencies = [ "anyerror", "anyhow", + "async-backtrace", "clap 3.2.23", "comfy-table", "common-base", @@ -3232,6 +3284,7 @@ dependencies = [ "arrow-flight", "arrow-ipc", "arrow-schema", + "async-backtrace", "async-channel", "async-stream", "async-trait-fn", @@ -9202,6 +9255,7 @@ dependencies = [ name = "sharing-endpoint" version = "0.1.0" dependencies = [ + "async-backtrace", "base64 0.21.0", "clap 3.2.23", "common-base", @@ -9514,6 +9568,7 @@ dependencies = [ name = "storages-common-cache" version = "0.1.0" dependencies = [ + "async-backtrace", "async-trait-fn", "common-cache", "common-exception", diff --git a/Cargo.toml b/Cargo.toml index bef9447513a14..7db1d7eddc4bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -145,6 +145,9 @@ reqwest = { version = "0.11", default-features = false, features = [ # runtime tokio = { version = "1.26.0", features = ["full"] } +# backtrace +async-backtrace = "0.2.2" + [profile.release] debug = 1 lto = "thin" diff --git a/src/binaries/Cargo.toml b/src/binaries/Cargo.toml index 5ab6f624898e0..2089923947d09 100644 --- a/src/binaries/Cargo.toml +++ b/src/binaries/Cargo.toml @@ -60,6 +60,7 @@ sharing-endpoint = { path = "../query/sharing-endpoint" } # Crates.io dependencies anyerror = { workspace = true } anyhow = { workspace = true } +async-backtrace = { workspace = true } clap = { workspace = true } comfy-table = "6.1.3" limits-rs = "0.2.0" diff --git a/src/binaries/query/main.rs b/src/binaries/query/main.rs index 3a30e9bbf3d49..59948a7944cfa 100644 --- a/src/binaries/query/main.rs +++ b/src/binaries/query/main.rs @@ -54,7 +54,7 @@ fn main() { std::process::exit(cause.code() as i32); } Ok(rt) => { - if let Err(cause) = rt.block_on(main_entrypoint()) { + if let Err(cause) = rt.block_on(async_backtrace::location!().frame(main_entrypoint())) { eprintln!("Databend Query start failure, cause: {:?}", cause); std::process::exit(cause.code() as i32); } diff --git a/src/common/base/Cargo.toml b/src/common/base/Cargo.toml index f78f53f46d0a4..4857ca79c0751 100644 --- a/src/common/base/Cargo.toml +++ b/src/common/base/Cargo.toml @@ -26,6 +26,7 @@ common-exception = { path = "../exception" } # Github dependencies # Crates.io dependencies +async-backtrace = "0.2.2" async-channel = "1.7.1" async-trait = "0.1.57" bytesize = "1.1.0" diff --git a/src/common/base/src/runtime/runtime.rs b/src/common/base/src/runtime/runtime.rs index 29075be9a2200..a0ae99ac6f48a 100644 --- a/src/common/base/src/runtime/runtime.rs +++ b/src/common/base/src/runtime/runtime.rs @@ -155,6 +155,8 @@ impl Runtime { runtime_builder.thread_name(thread_name); } } + + runtime_builder.thread_stack_size(5 * 1024 * 1024); } Self::create(None, mem_stat, &mut runtime_builder) @@ -179,6 +181,8 @@ impl Runtime { thread_name = Some(cur_thread_name.to_string()); } } + + runtime_builder.thread_stack_size(5 * 1024 * 1024); } if let Some(thread_name) = &thread_name { @@ -255,10 +259,12 @@ impl Runtime { let permit = semaphore.acquire_owned().await.map_err(|e| { ErrorCode::Internal(format!("semaphore closed, acquire permit failure. {}", e)) })?; - let handler = self.handle.spawn(async move { - // take the ownership of the permit, (implicitly) drop it when task is done - fut(permit).await - }); + let handler = self + .handle + .spawn(async_backtrace::location!().frame(async move { + // take the ownership of the permit, (implicitly) drop it when task is done + fut(permit).await + })); handlers.push(handler) } @@ -273,7 +279,7 @@ impl TrySpawn for Runtime { T: Future + Send + 'static, T::Output: Send + 'static, { - Ok(self.handle.spawn(task)) + Ok(self.handle.spawn(async_backtrace::location!().frame(task))) } } diff --git a/src/common/base/src/runtime/runtime_tracker.rs b/src/common/base/src/runtime/runtime_tracker.rs index 2f80e517f17c7..0c94849135664 100644 --- a/src/common/base/src/runtime/runtime_tracker.rs +++ b/src/common/base/src/runtime/runtime_tracker.rs @@ -670,7 +670,7 @@ mod tests { .unwrap(); rt.block_on(async { - let h = tokio::spawn(f); + let h = tokio::spawn(async_backtrace::location!().frame(f)); let res = h.await; assert!(res.is_err(), "panicked"); }); diff --git a/src/common/base/src/runtime/thread.rs b/src/common/base/src/runtime/thread.rs index 73e965a5b1ffc..e5d928af73128 100644 --- a/src/common/base/src/runtime/thread.rs +++ b/src/common/base/src/runtime/thread.rs @@ -63,6 +63,8 @@ impl Thread { name = Some(thread_name.to_string()); } } + + thread_builder = thread_builder.stack_size(5 * 1024 * 1024); } let mut mem_stat_name = String::from("UnnamedThread"); diff --git a/src/common/base/tests/it/runtime.rs b/src/common/base/tests/it/runtime.rs index d856b8c48b4a3..f28fc94914406 100644 --- a/src/common/base/tests/it/runtime.rs +++ b/src/common/base/tests/it/runtime.rs @@ -85,6 +85,7 @@ async fn test_shutdown_long_run_runtime() -> Result<()> { } static START_TIME: Lazy = Lazy::new(Instant::now); + // println can more clearly know if they are parallel async fn mock_get_page(i: usize) -> Vec { let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); diff --git a/src/common/http/Cargo.toml b/src/common/http/Cargo.toml index f8ecbcc8927d6..8cb4da50c0257 100644 --- a/src/common/http/Cargo.toml +++ b/src/common/http/Cargo.toml @@ -24,6 +24,7 @@ common-exception = { path = "../exception" } # Crates.io dependencies anyerror = { workspace = true } +async-backtrace = { workspace = true } futures = "0.3.24" poem = { version = "1", features = ["rustls"] } serde = { workspace = true } diff --git a/src/common/http/src/debug/mod.rs b/src/common/http/src/debug/mod.rs index bed6a30d097e0..eb00668e02502 100644 --- a/src/common/http/src/debug/mod.rs +++ b/src/common/http/src/debug/mod.rs @@ -17,5 +17,6 @@ pub mod pprof; #[cfg(feature = "memory-profiling")] pub mod jeprof; +pub mod stack; pub use home::PProfRequest; diff --git a/src/common/http/src/debug/stack.rs b/src/common/http/src/debug/stack.rs new file mode 100644 index 0000000000000..4dd49dd0b715a --- /dev/null +++ b/src/common/http/src/debug/stack.rs @@ -0,0 +1,89 @@ +// Copyright 2023 Datafuse Labs. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::Write; + +use poem::web::Query; +use poem::IntoResponse; + +#[derive(serde::Serialize, serde::Deserialize, Debug)] +pub struct DumpStackRequest { + wait_for_running_tasks: bool, +} + +#[derive(Debug)] +struct AsyncTaskItem { + stack_frames: Vec, +} + +#[poem::handler] +pub async fn debug_dump_stack(req: Option>) -> impl IntoResponse { + let tree = + async_backtrace::taskdump_tree(req.map(|x| x.wait_for_running_tasks).unwrap_or(false)); + + let mut tasks = vec![]; + let mut polling_tasks = vec![]; + let mut current_stack_frames = vec![]; + + let mut first = true; + let mut is_polling = false; + for line in tree.lines() { + if line.starts_with(|x: char| !x.is_ascii_whitespace()) { + if !first { + match is_polling { + true => polling_tasks.push(AsyncTaskItem { + stack_frames: std::mem::take(&mut current_stack_frames), + }), + false => tasks.push(AsyncTaskItem { + stack_frames: std::mem::take(&mut current_stack_frames), + }), + }; + + is_polling = false; + } + + first = false; + } + + if line.ends_with("[POLLING]") { + is_polling = true; + } + + current_stack_frames.push(line.to_string()); + } + + match is_polling { + true => polling_tasks.push(AsyncTaskItem { + stack_frames: std::mem::take(&mut current_stack_frames), + }), + false => tasks.push(AsyncTaskItem { + stack_frames: std::mem::take(&mut current_stack_frames), + }), + }; + + let mut output = String::new(); + for mut tasks in [tasks, polling_tasks] { + tasks.sort_by(|l, r| Ord::cmp(&l.stack_frames.len(), &r.stack_frames.len())); + + for item in tasks.into_iter().rev() { + for frame in item.stack_frames { + writeln!(output, "{}", frame).unwrap(); + } + + writeln!(output).unwrap(); + } + } + + output +} diff --git a/src/common/http/src/http_shutdown_handlers.rs b/src/common/http/src/http_shutdown_handlers.rs index 4057c07e64f36..2130d03477ae9 100644 --- a/src/common/http/src/http_shutdown_handlers.rs +++ b/src/common/http/src/http_shutdown_handlers.rs @@ -80,9 +80,11 @@ impl HttpShutdownHandler { let (tx, rx) = oneshot::channel(); let join_handle = common_base::base::tokio::spawn( - poem::Server::new_with_acceptor(acceptor) - .name(self.service_name.clone()) - .run_with_graceful_shutdown(ep, rx.map(|_| ()), graceful_shutdown_timeout), + async_backtrace::location!().frame( + poem::Server::new_with_acceptor(acceptor) + .name(self.service_name.clone()) + .run_with_graceful_shutdown(ep, rx.map(|_| ()), graceful_shutdown_timeout), + ), ); self.join_handle = Some(join_handle); self.abort_handle = Some(tx); diff --git a/src/common/storage/Cargo.toml b/src/common/storage/Cargo.toml index 8a32bf3018f98..a8a2476042c3f 100644 --- a/src/common/storage/Cargo.toml +++ b/src/common/storage/Cargo.toml @@ -18,6 +18,7 @@ common-expression = { path = "../../query/expression" } common-meta-app = { path = "../../meta/app" } anyhow = { workspace = true } +async-backtrace = { workspace = true } async-trait = "0.1" bytes = "1" chrono = { workspace = true } diff --git a/src/common/storage/src/metrics.rs b/src/common/storage/src/metrics.rs index cbcfadc0d1117..7e850a90f55e5 100644 --- a/src/common/storage/src/metrics.rs +++ b/src/common/storage/src/metrics.rs @@ -176,6 +176,7 @@ impl LayeredAccessor for StorageMetricsAccessor { &self.inner } + #[async_backtrace::framed] async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { self.inner .read(path, args) @@ -183,6 +184,7 @@ impl LayeredAccessor for StorageMetricsAccessor { .map(|(rp, r)| (rp, StorageMetricsWrapper::new(r, self.metrics.clone()))) } + #[async_backtrace::framed] async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { self.inner .write(path, args) @@ -190,10 +192,12 @@ impl LayeredAccessor for StorageMetricsAccessor { .map(|(rp, r)| (rp, StorageMetricsWrapper::new(r, self.metrics.clone()))) } + #[async_backtrace::framed] async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Pager)> { self.inner.list(path, args).await } + #[async_backtrace::framed] async fn scan(&self, path: &str, args: OpScan) -> Result<(RpScan, Self::Pager)> { self.inner.scan(path, args).await } @@ -287,6 +291,7 @@ impl oio::BlockingRead for StorageMetricsWrapper { #[async_trait] impl oio::Write for StorageMetricsWrapper { + #[async_backtrace::framed] async fn write(&mut self, bs: Bytes) -> Result<()> { let size = bs.len(); let start = Instant::now(); @@ -300,6 +305,7 @@ impl oio::Write for StorageMetricsWrapper { result } + #[async_backtrace::framed] async fn append(&mut self, bs: Bytes) -> Result<()> { let size = bs.len(); let start = Instant::now(); @@ -313,6 +319,7 @@ impl oio::Write for StorageMetricsWrapper { result } + #[async_backtrace::framed] async fn close(&mut self) -> Result<()> { self.inner.close().await } diff --git a/src/common/storage/src/operator.rs b/src/common/storage/src/operator.rs index ce864cbf07257..b94187979cd5b 100644 --- a/src/common/storage/src/operator.rs +++ b/src/common/storage/src/operator.rs @@ -331,12 +331,14 @@ impl DataOperator { self.params.clone() } + #[async_backtrace::framed] pub async fn init(conf: &StorageConfig) -> common_exception::Result<()> { GlobalInstance::set(Self::try_create(&conf.params).await?); Ok(()) } + #[async_backtrace::framed] pub async fn try_create(sp: &StorageParams) -> common_exception::Result { let operator = init_operator(sp)?; diff --git a/src/common/storage/src/parquet.rs b/src/common/storage/src/parquet.rs index 1c3748be4f0b0..03c51d595709a 100644 --- a/src/common/storage/src/parquet.rs +++ b/src/common/storage/src/parquet.rs @@ -20,6 +20,7 @@ use common_exception::ErrorCode; use common_exception::Result; use opendal::Operator; +#[async_backtrace::framed] pub async fn read_parquet_schema_async(operator: &Operator, path: &str) -> Result { let mut reader = operator.reader(path).await?; let meta = pread::read_metadata_async(&mut reader).await.map_err(|e| { @@ -43,6 +44,7 @@ async fn read_parquet_metas_batch( Ok(metas) } +#[async_backtrace::framed] pub async fn read_parquet_metas_in_parallel( op: Operator, file_infos: Vec<(String, u64)>, diff --git a/src/common/storage/src/runtime_layer.rs b/src/common/storage/src/runtime_layer.rs index a3ff590d77baf..331a607d160fd 100644 --- a/src/common/storage/src/runtime_layer.rs +++ b/src/common/storage/src/runtime_layer.rs @@ -80,6 +80,7 @@ impl LayeredAccessor for RuntimeAccessor { &self.inner } + #[async_backtrace::framed] async fn create(&self, path: &str, args: OpCreate) -> Result { let op = self.inner.clone(); let path = path.to_string(); @@ -88,6 +89,7 @@ impl LayeredAccessor for RuntimeAccessor { self.runtime.spawn(future).await.expect("join must success") } + #[async_backtrace::framed] async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let op = self.inner.clone(); let path = path.to_string(); @@ -96,6 +98,7 @@ impl LayeredAccessor for RuntimeAccessor { self.runtime.spawn(future).await.expect("join must success") } + #[async_backtrace::framed] async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> { let op = self.inner.clone(); let path = path.to_string(); @@ -104,6 +107,7 @@ impl LayeredAccessor for RuntimeAccessor { self.runtime.spawn(future).await.expect("join must success") } + #[async_backtrace::framed] async fn stat(&self, path: &str, args: OpStat) -> Result { let op = self.inner.clone(); let path = path.to_string(); @@ -112,6 +116,7 @@ impl LayeredAccessor for RuntimeAccessor { self.runtime.spawn(future).await.expect("join must success") } + #[async_backtrace::framed] async fn delete(&self, path: &str, args: OpDelete) -> Result { let op = self.inner.clone(); let path = path.to_string(); @@ -120,6 +125,7 @@ impl LayeredAccessor for RuntimeAccessor { self.runtime.spawn(future).await.expect("join must success") } + #[async_backtrace::framed] async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Pager)> { let op = self.inner.clone(); let path = path.to_string(); @@ -128,6 +134,7 @@ impl LayeredAccessor for RuntimeAccessor { self.runtime.spawn(future).await.expect("join must success") } + #[async_backtrace::framed] async fn scan(&self, path: &str, args: OpScan) -> Result<(RpScan, Self::Pager)> { let op = self.inner.clone(); let path = path.to_string(); diff --git a/src/common/storage/src/stage.rs b/src/common/storage/src/stage.rs index 3a512a16a5ff2..937227b743a6e 100644 --- a/src/common/storage/src/stage.rs +++ b/src/common/storage/src/stage.rs @@ -107,6 +107,7 @@ impl StageFilesInfo { } } + #[async_backtrace::framed] pub async fn list( &self, operator: &Operator, @@ -148,6 +149,7 @@ impl StageFilesInfo { } } + #[async_backtrace::framed] pub async fn first_file(&self, operator: &Operator) -> Result { let mut files = self.list(operator, true, None).await?; match files.pop() { @@ -195,6 +197,7 @@ impl StageFilesInfo { } } + #[async_backtrace::framed] pub async fn list_files_with_pattern( operator: &Operator, path: &str, @@ -299,6 +302,7 @@ fn blocking_list_files_with_pattern( /// - `Ok(None)` if given object is not a file. /// - `Err(err)` if there is an error happened. #[allow(unused)] +#[async_backtrace::framed] pub async fn stat_file(op: Operator, de: Entry) -> Result> { let meta = op .metadata(&de, { diff --git a/src/query/catalog/Cargo.toml b/src/query/catalog/Cargo.toml index 521d916ad5096..5866cfbb5da59 100644 --- a/src/query/catalog/Cargo.toml +++ b/src/query/catalog/Cargo.toml @@ -20,6 +20,7 @@ common-pipeline-core = { path = "../pipeline/core" } common-settings = { path = "../settings" } common-storage = { path = "../../common/storage" } +async-backtrace = { workspace = true } async-trait = "0.1.57" chrono = { workspace = true } dashmap = "5.4" diff --git a/src/query/catalog/src/catalog/interface.rs b/src/query/catalog/src/catalog/interface.rs index dbaca403377d7..677a11b73df88 100644 --- a/src/query/catalog/src/catalog/interface.rs +++ b/src/query/catalog/src/catalog/interface.rs @@ -75,6 +75,7 @@ pub trait Catalog: DynClone + Send + Sync { async fn undrop_database(&self, req: UndropDatabaseReq) -> Result; + #[async_backtrace::framed] async fn exists_database(&self, tenant: &str, db_name: &str) -> Result { match self.get_database(tenant, db_name).await { Ok(_) => Ok(true), @@ -119,6 +120,7 @@ pub trait Catalog: DynClone + Send + Sync { async fn rename_table(&self, req: RenameTableReq) -> Result; // Check a db.table is exists or not. + #[async_backtrace::framed] async fn exists_table(&self, tenant: &str, db_name: &str, table_name: &str) -> Result { match self.get_table(tenant, db_name, table_name).await { Ok(_) => Ok(true), diff --git a/src/query/catalog/src/database.rs b/src/query/catalog/src/database.rs index 0fab15d002e1d..474e32ef5812b 100644 --- a/src/query/catalog/src/database.rs +++ b/src/query/catalog/src/database.rs @@ -68,6 +68,7 @@ pub trait Database: DynClone + Sync + Send { } // Initial a database. + #[async_backtrace::framed] async fn init_database(&self, _tenant: &str) -> Result<()> { Ok(()) } @@ -81,6 +82,7 @@ pub trait Database: DynClone + Sync + Send { } // Get one table by db and table name. + #[async_backtrace::framed] async fn get_table(&self, _table_name: &str) -> Result> { Err(ErrorCode::Unimplemented(format!( "UnImplement get_table in {} Database", @@ -88,6 +90,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn list_tables(&self) -> Result>> { Err(ErrorCode::Unimplemented(format!( "UnImplement list_tables in {} Database", @@ -95,6 +98,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn list_tables_history(&self) -> Result>> { Err(ErrorCode::Unimplemented(format!( "UnImplement list_tables_history in {} Database", @@ -102,6 +106,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn create_table(&self, _req: CreateTableReq) -> Result<()> { Err(ErrorCode::Unimplemented(format!( "UnImplement create_table in {} Database", @@ -109,6 +114,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn drop_table_by_id(&self, _req: DropTableByIdReq) -> Result { Err(ErrorCode::Unimplemented(format!( "UnImplement drop_table_by_id in {} Database", @@ -116,6 +122,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn undrop_table(&self, _req: UndropTableReq) -> Result { Err(ErrorCode::Unimplemented(format!( "UnImplement undrop_table in {} Database", @@ -123,6 +130,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn rename_table(&self, _req: RenameTableReq) -> Result { Err(ErrorCode::Unimplemented(format!( "UnImplement rename_table in {} Database", @@ -130,6 +138,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn upsert_table_option( &self, _req: UpsertTableOptionReq, @@ -140,6 +149,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn update_table_meta(&self, _req: UpdateTableMetaReq) -> Result { Err(ErrorCode::Unimplemented(format!( "UnImplement update_table_meta in {} Database", @@ -147,6 +157,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn get_table_copied_file_info( &self, _req: GetTableCopiedFileReq, @@ -157,6 +168,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn upsert_table_copied_file_info( &self, _req: UpsertTableCopiedFileReq, @@ -167,6 +179,7 @@ pub trait Database: DynClone + Sync + Send { ))) } + #[async_backtrace::framed] async fn truncate_table(&self, _req: TruncateTableReq) -> Result { Err(ErrorCode::Unimplemented(format!( "UnImplement truncate_table in {} Database", diff --git a/src/query/catalog/src/table.rs b/src/query/catalog/src/table.rs index 76bdb6c95dd27..8b761a76f537f 100644 --- a/src/query/catalog/src/table.rs +++ b/src/query/catalog/src/table.rs @@ -110,6 +110,7 @@ pub trait Table: Sync + Send { false } + #[async_backtrace::framed] async fn alter_table_cluster_keys( &self, ctx: Arc, @@ -123,6 +124,7 @@ pub trait Table: Sync + Send { ))) } + #[async_backtrace::framed] async fn drop_table_cluster_keys(&self, ctx: Arc) -> Result<()> { let _ = ctx; @@ -133,6 +135,7 @@ pub trait Table: Sync + Send { } /// Gather partitions to be scanned according to the push_downs + #[async_backtrace::framed] async fn read_partitions( &self, ctx: Arc, @@ -183,6 +186,7 @@ pub trait Table: Sync + Send { ))) } + #[async_backtrace::framed] async fn replace_into( &self, ctx: Arc, @@ -198,6 +202,7 @@ pub trait Table: Sync + Send { ))) } + #[async_backtrace::framed] async fn commit_insertion( &self, ctx: Arc, @@ -210,17 +215,20 @@ pub trait Table: Sync + Send { Ok(()) } + #[async_backtrace::framed] async fn truncate(&self, ctx: Arc, purge: bool) -> Result<()> { let (_, _) = (ctx, purge); Ok(()) } + #[async_backtrace::framed] async fn purge(&self, ctx: Arc, keep_last_snapshot: bool) -> Result<()> { let (_, _) = (ctx, keep_last_snapshot); Ok(()) } + #[async_backtrace::framed] async fn analyze(&self, ctx: Arc) -> Result<()> { let _ = ctx; @@ -231,10 +239,12 @@ pub trait Table: Sync + Send { Ok(None) } + #[async_backtrace::framed] async fn column_statistics_provider(&self) -> Result> { Ok(Box::new(DummyColumnStatisticsProvider)) } + #[async_backtrace::framed] async fn navigate_to(&self, instant: &NavigationPoint) -> Result> { let _ = instant; @@ -245,6 +255,7 @@ pub trait Table: Sync + Send { ))) } + #[async_backtrace::framed] async fn delete( &self, ctx: Arc, @@ -261,6 +272,7 @@ pub trait Table: Sync + Send { ))) } + #[async_backtrace::framed] async fn update( &self, ctx: Arc, @@ -291,6 +303,7 @@ pub trait Table: Sync + Send { } // return false if the table does not need to be compacted. + #[async_backtrace::framed] async fn compact( &self, ctx: Arc, @@ -307,6 +320,7 @@ pub trait Table: Sync + Send { ))) } + #[async_backtrace::framed] async fn recluster( &self, ctx: Arc, @@ -322,6 +336,7 @@ pub trait Table: Sync + Send { ))) } + #[async_backtrace::framed] async fn revert_to( &self, ctx: Arc, @@ -338,6 +353,7 @@ pub trait Table: Sync + Send { #[async_trait::async_trait] pub trait TableExt: Table { + #[async_backtrace::framed] async fn refresh(&self, ctx: &dyn TableContext) -> Result> { let table_info = self.get_table_info(); let name = table_info.name.clone(); diff --git a/src/query/catalog/src/table_mutator.rs b/src/query/catalog/src/table_mutator.rs index 47ad847af438f..c9cf15c9ba4fb 100644 --- a/src/query/catalog/src/table_mutator.rs +++ b/src/query/catalog/src/table_mutator.rs @@ -21,5 +21,6 @@ use crate::table::Table; #[async_trait::async_trait] pub trait TableMutator: Send + Sync { async fn target_select(&mut self) -> Result; + async fn try_commit(self: Box, table: Arc) -> Result<()>; } diff --git a/src/query/management/Cargo.toml b/src/query/management/Cargo.toml index 2f7a3943f64b6..cd6023de952b0 100644 --- a/src/query/management/Cargo.toml +++ b/src/query/management/Cargo.toml @@ -24,6 +24,7 @@ common-meta-types = { path = "../../meta/types" } common-proto-conv = { path = "../../meta/proto-conv" } common-protos = { path = "../../meta/protos" } +async-backtrace = { workspace = true } async-trait = "0.1.57" serde_json = { workspace = true } diff --git a/src/query/management/src/cluster/cluster_mgr.rs b/src/query/management/src/cluster/cluster_mgr.rs index b05af47ec12db..d571c69d74247 100644 --- a/src/query/management/src/cluster/cluster_mgr.rs +++ b/src/query/management/src/cluster/cluster_mgr.rs @@ -80,6 +80,7 @@ impl ClusterMgr { #[async_trait::async_trait] impl ClusterApi for ClusterMgr { + #[async_backtrace::framed] async fn add_node(&self, node: NodeInfo) -> Result { // Only when there are no record, i.e. seq=0 let seq = MatchSeq::Exact(0); @@ -100,6 +101,7 @@ impl ClusterApi for ClusterMgr { Ok(res.seq) } + #[async_backtrace::framed] async fn get_nodes(&self) -> Result> { let values = self.metastore.prefix_list_kv(&self.cluster_prefix).await?; @@ -114,6 +116,7 @@ impl ClusterApi for ClusterMgr { Ok(nodes_info) } + #[async_backtrace::framed] async fn drop_node(&self, node_id: String, seq: MatchSeq) -> Result<()> { let node_key = format!("{}/{}", self.cluster_prefix, escape_for_key(&node_id)?); let upsert_node = @@ -133,6 +136,7 @@ impl ClusterApi for ClusterMgr { } } + #[async_backtrace::framed] async fn heartbeat(&self, node: &NodeInfo, seq: MatchSeq) -> Result { let meta = Some(self.new_lift_time()); let node_key = format!("{}/{}", self.cluster_prefix, escape_for_key(&node.id)?); @@ -151,6 +155,7 @@ impl ClusterApi for ClusterMgr { } } + #[async_backtrace::framed] async fn get_local_addr(&self) -> Result> { Ok(self.metastore.get_local_addr().await?) } diff --git a/src/query/management/src/file_format/file_format_mgr.rs b/src/query/management/src/file_format/file_format_mgr.rs index c637793dfacdf..d9bf67835625c 100644 --- a/src/query/management/src/file_format/file_format_mgr.rs +++ b/src/query/management/src/file_format/file_format_mgr.rs @@ -58,6 +58,7 @@ impl FileFormatMgr { #[async_trait::async_trait] impl FileFormatApi for FileFormatMgr { + #[async_backtrace::framed] async fn add_file_format(&self, info: UserDefinedFileFormat) -> Result { let seq = MatchSeq::Exact(0); let val = Operation::Update(serialize_struct( @@ -84,6 +85,7 @@ impl FileFormatApi for FileFormatMgr { Ok(res.seq) } + #[async_backtrace::framed] async fn get_file_format( &self, name: &str, @@ -108,6 +110,7 @@ impl FileFormatApi for FileFormatMgr { } } + #[async_backtrace::framed] async fn get_file_formats(&self) -> Result> { let values = self.kv_api.prefix_list_kv(&self.file_format_prefix).await?; @@ -120,6 +123,7 @@ impl FileFormatApi for FileFormatMgr { Ok(file_format_infos) } + #[async_backtrace::framed] async fn drop_file_format(&self, name: &str, seq: MatchSeq) -> Result<()> { let key = format!("{}/{}", self.file_format_prefix, escape_for_key(name)?); let kv_api = self.kv_api.clone(); diff --git a/src/query/management/src/quota/quota_mgr.rs b/src/query/management/src/quota/quota_mgr.rs index b9bf7d66bed74..d84ba430b794b 100644 --- a/src/query/management/src/quota/quota_mgr.rs +++ b/src/query/management/src/quota/quota_mgr.rs @@ -52,6 +52,7 @@ impl QuotaMgr { #[async_trait::async_trait] impl QuotaApi for QuotaMgr { + #[async_backtrace::framed] async fn get_quota(&self, seq: MatchSeq) -> Result> { let res = self.kv_api.get_kv(&self.key).await?; match res { @@ -63,6 +64,7 @@ impl QuotaApi for QuotaMgr { } } + #[async_backtrace::framed] async fn set_quota(&self, quota: &TenantQuota, seq: MatchSeq) -> Result { let value = serde_json::to_vec(quota)?; let res = self diff --git a/src/query/management/src/role/role_mgr.rs b/src/query/management/src/role/role_mgr.rs index daf64c4f3d27b..9e948ef5431f1 100644 --- a/src/query/management/src/role/role_mgr.rs +++ b/src/query/management/src/role/role_mgr.rs @@ -52,6 +52,7 @@ impl RoleMgr { }) } + #[async_backtrace::framed] async fn upsert_role_info( &self, role_info: &RoleInfo, @@ -80,6 +81,7 @@ impl RoleMgr { #[async_trait::async_trait] impl RoleApi for RoleMgr { + #[async_backtrace::framed] async fn add_role(&self, role_info: RoleInfo) -> common_exception::Result { let match_seq = MatchSeq::Exact(0); let key = self.make_role_key(role_info.identity()); @@ -100,6 +102,7 @@ impl RoleApi for RoleMgr { Ok(res.seq) } + #[async_backtrace::framed] async fn get_role(&self, role: &String, seq: MatchSeq) -> Result, ErrorCode> { let key = self.make_role_key(role); let res = self.kv_api.get_kv(&key).await?; @@ -112,6 +115,7 @@ impl RoleApi for RoleMgr { } } + #[async_backtrace::framed] async fn get_roles(&self) -> Result>, ErrorCode> { let role_prefix = self.role_prefix.clone(); let kv_api = self.kv_api.clone(); @@ -133,6 +137,7 @@ impl RoleApi for RoleMgr { /// It fetch the role that matches the specified seq number, update it in place, then write it back with the seq it sees. /// /// Seq number ensures there is no other write happens between get and set. + #[async_backtrace::framed] async fn update_role_with( &self, role: &String, @@ -156,6 +161,7 @@ impl RoleApi for RoleMgr { Ok(Some(seq)) } + #[async_backtrace::framed] async fn drop_role(&self, role: String, seq: MatchSeq) -> Result<(), ErrorCode> { let key = self.make_role_key(&role); let kv_api = self.kv_api.clone(); diff --git a/src/query/management/src/setting/setting_mgr.rs b/src/query/management/src/setting/setting_mgr.rs index 35b16fd26af44..259dc152d196e 100644 --- a/src/query/management/src/setting/setting_mgr.rs +++ b/src/query/management/src/setting/setting_mgr.rs @@ -47,6 +47,7 @@ impl SettingMgr { #[async_trait::async_trait] impl SettingApi for SettingMgr { + #[async_backtrace::framed] async fn set_setting(&self, setting: UserSetting) -> Result { // Upsert. let seq = MatchSeq::GE(0); @@ -64,6 +65,7 @@ impl SettingApi for SettingMgr { } } + #[async_backtrace::framed] async fn get_settings(&self) -> Result> { let values = self.kv_api.prefix_list_kv(&self.setting_prefix).await?; @@ -75,6 +77,7 @@ impl SettingApi for SettingMgr { Ok(settings) } + #[async_backtrace::framed] async fn get_setting(&self, name: &str, seq: MatchSeq) -> Result> { let key = format!("{}/{}", self.setting_prefix, name); let kv_api = self.kv_api.clone(); @@ -92,6 +95,7 @@ impl SettingApi for SettingMgr { } } + #[async_backtrace::framed] async fn drop_setting(&self, name: &str, seq: MatchSeq) -> Result<()> { let key = format!("{}/{}", self.setting_prefix, name); let kv_api = self.kv_api.clone(); diff --git a/src/query/management/src/stage/stage_mgr.rs b/src/query/management/src/stage/stage_mgr.rs index fcefd5dc7f3f1..3f53fe79d5610 100644 --- a/src/query/management/src/stage/stage_mgr.rs +++ b/src/query/management/src/stage/stage_mgr.rs @@ -67,6 +67,7 @@ impl StageMgr { #[async_trait::async_trait] impl StageApi for StageMgr { + #[async_backtrace::framed] async fn add_stage(&self, info: StageInfo) -> Result { let seq = MatchSeq::Exact(0); let val = Operation::Update(serialize_struct( @@ -90,6 +91,7 @@ impl StageApi for StageMgr { Ok(res.seq) } + #[async_backtrace::framed] async fn get_stage(&self, name: &str, seq: MatchSeq) -> Result> { let key = format!("{}/{}", self.stage_prefix, escape_for_key(name)?); let kv_api = self.kv_api.clone(); @@ -107,6 +109,7 @@ impl StageApi for StageMgr { } } + #[async_backtrace::framed] async fn get_stages(&self) -> Result> { let values = self.kv_api.prefix_list_kv(&self.stage_prefix).await?; @@ -119,6 +122,7 @@ impl StageApi for StageMgr { Ok(stage_infos) } + #[async_backtrace::framed] async fn drop_stage(&self, name: &str) -> Result<()> { let stage_key = format!("{}/{}", self.stage_prefix, escape_for_key(name)?); let file_key_prefix = format!("{}/{}/", self.stage_file_prefix, escape_for_key(name)?); @@ -158,6 +162,7 @@ impl StageApi for StageMgr { )) } + #[async_backtrace::framed] async fn add_file(&self, name: &str, file: StageFile) -> Result { let stage_key = format!("{}/{}", self.stage_prefix, escape_for_key(name)?); let file_key = format!( @@ -221,6 +226,7 @@ impl StageApi for StageMgr { )) } + #[async_backtrace::framed] async fn list_files(&self, name: &str) -> Result> { let list_prefix = format!("{}/{}/", self.stage_file_prefix, escape_for_key(name)?); let values = self.kv_api.prefix_list_kv(&list_prefix).await?; @@ -232,6 +238,7 @@ impl StageApi for StageMgr { Ok(files) } + #[async_backtrace::framed] async fn remove_files(&self, name: &str, paths: Vec) -> Result<()> { let stage_key = format!("{}/{}", self.stage_prefix, escape_for_key(name)?); diff --git a/src/query/management/src/udf/udf_mgr.rs b/src/query/management/src/udf/udf_mgr.rs index e15583a3b354d..766e1f92ba300 100644 --- a/src/query/management/src/udf/udf_mgr.rs +++ b/src/query/management/src/udf/udf_mgr.rs @@ -54,6 +54,7 @@ impl UdfMgr { #[async_trait::async_trait] impl UdfApi for UdfMgr { + #[async_backtrace::framed] async fn add_udf(&self, info: UserDefinedFunction) -> Result { if is_builtin_function(info.name.as_str()) { return Err(ErrorCode::UdfAlreadyExists(format!( @@ -76,6 +77,7 @@ impl UdfApi for UdfMgr { Ok(res.seq) } + #[async_backtrace::framed] async fn update_udf(&self, info: UserDefinedFunction, seq: MatchSeq) -> Result { if is_builtin_function(info.name.as_str()) { return Err(ErrorCode::UdfAlreadyExists(format!( @@ -103,6 +105,7 @@ impl UdfApi for UdfMgr { } } + #[async_backtrace::framed] async fn get_udf(&self, udf_name: &str, seq: MatchSeq) -> Result> { let key = format!("{}/{}", self.udf_prefix, escape_for_key(udf_name)?); let kv_api = self.kv_api.clone(); @@ -120,6 +123,7 @@ impl UdfApi for UdfMgr { } } + #[async_backtrace::framed] async fn get_udfs(&self) -> Result> { let values = self.kv_api.prefix_list_kv(&self.udf_prefix).await?; @@ -131,6 +135,7 @@ impl UdfApi for UdfMgr { Ok(udfs) } + #[async_backtrace::framed] async fn drop_udf(&self, udf_name: &str, seq: MatchSeq) -> Result<()> { let key = format!("{}/{}", self.udf_prefix, escape_for_key(udf_name)?); let kv_api = self.kv_api.clone(); diff --git a/src/query/management/src/user/user_mgr.rs b/src/query/management/src/user/user_mgr.rs index 482c067a4fec7..3aa19555d502c 100644 --- a/src/query/management/src/user/user_mgr.rs +++ b/src/query/management/src/user/user_mgr.rs @@ -52,6 +52,7 @@ impl UserMgr { }) } + #[async_backtrace::framed] async fn upsert_user_info( &self, user_info: &UserInfo, @@ -78,6 +79,7 @@ impl UserMgr { #[async_trait::async_trait] impl UserApi for UserMgr { + #[async_backtrace::framed] async fn add_user(&self, user_info: UserInfo) -> common_exception::Result { let user_identity = UserIdentity::new(&user_info.name, &user_info.hostname); @@ -108,6 +110,7 @@ impl UserApi for UserMgr { Ok(res.seq) } + #[async_backtrace::framed] async fn get_user(&self, user: UserIdentity, seq: MatchSeq) -> Result> { let user_key = format_user_key(&user.username, &user.hostname); let key = format!("{}/{}", self.user_prefix, escape_for_key(&user_key)?); @@ -124,6 +127,7 @@ impl UserApi for UserMgr { } } + #[async_backtrace::framed] async fn get_users(&self) -> Result>> { let user_prefix = self.user_prefix.clone(); let values = self.kv_api.prefix_list_kv(user_prefix.as_str()).await?; @@ -138,6 +142,7 @@ impl UserApi for UserMgr { Ok(r) } + #[async_backtrace::framed] async fn update_user_with( &self, user: UserIdentity, @@ -161,6 +166,7 @@ impl UserApi for UserMgr { Ok(Some(seq)) } + #[async_backtrace::framed] async fn drop_user(&self, user: UserIdentity, seq: MatchSeq) -> Result<()> { let user_key = format_user_key(&user.username, &user.hostname); let key = format!("{}/{}", self.user_prefix, escape_for_key(&user_key)?); diff --git a/src/query/pipeline/core/Cargo.toml b/src/query/pipeline/core/Cargo.toml index f1cd9d14da069..504535c49a69b 100644 --- a/src/query/pipeline/core/Cargo.toml +++ b/src/query/pipeline/core/Cargo.toml @@ -15,6 +15,7 @@ test = false common-exception = { path = "../../../common/exception" } common-expression = { path = "../../expression" } +async-backtrace = { workspace = true } async-trait = "0.1.57" futures = "0.3.24" petgraph = "0.6.2" diff --git a/src/query/pipeline/core/src/processors/processor.rs b/src/query/pipeline/core/src/processors/processor.rs index 7925d85d87f1a..0eb3f1ff85baf 100644 --- a/src/query/pipeline/core/src/processors/processor.rs +++ b/src/query/pipeline/core/src/processors/processor.rs @@ -51,6 +51,7 @@ pub trait Processor: Send { } // Asynchronous work. + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { Err(ErrorCode::Unimplemented("Unimplemented async_process.")) } diff --git a/src/query/pipeline/sinks/Cargo.toml b/src/query/pipeline/sinks/Cargo.toml index 17f93ddb5fc76..bfbf3a7f222f7 100644 --- a/src/query/pipeline/sinks/Cargo.toml +++ b/src/query/pipeline/sinks/Cargo.toml @@ -18,6 +18,7 @@ common-exception = { path = "../../../common/exception" } common-expression = { path = "../../expression" } common-pipeline-core = { path = "../core" } +async-backtrace = { workspace = true } async-channel = "1.7.1" async-trait = { version = "0.1.57", package = "async-trait-fn" } diff --git a/src/query/pipeline/sinks/src/async_mpsc_sink.rs b/src/query/pipeline/sinks/src/async_mpsc_sink.rs index 1798b57d3d6f0..61e9103917b1f 100644 --- a/src/query/pipeline/sinks/src/async_mpsc_sink.rs +++ b/src/query/pipeline/sinks/src/async_mpsc_sink.rs @@ -28,10 +28,12 @@ use common_pipeline_core::processors::Processor; pub trait AsyncMpscSink: Send { const NAME: &'static str; + #[async_backtrace::framed] async fn on_start(&mut self) -> Result<()> { Ok(()) } + #[async_backtrace::framed] async fn on_finish(&mut self) -> Result<()> { Ok(()) } @@ -149,6 +151,7 @@ impl Processor for AsyncMpscSinker { } } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { if !self.called_on_start { self.called_on_start = true; diff --git a/src/query/pipeline/sinks/src/async_sink.rs b/src/query/pipeline/sinks/src/async_sink.rs index 902510ac3d9c3..8d010e5891b44 100644 --- a/src/query/pipeline/sinks/src/async_sink.rs +++ b/src/query/pipeline/sinks/src/async_sink.rs @@ -29,10 +29,12 @@ use common_pipeline_core::processors::Processor; pub trait AsyncSink: Send { const NAME: &'static str; + #[async_backtrace::framed] async fn on_start(&mut self) -> Result<()> { Ok(()) } + #[async_backtrace::framed] async fn on_finish(&mut self) -> Result<()> { Ok(()) } @@ -132,6 +134,7 @@ impl Processor for AsyncSinker { } } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { if !self.called_on_start { self.called_on_start = true; diff --git a/src/query/pipeline/sinks/src/union_receive_sink.rs b/src/query/pipeline/sinks/src/union_receive_sink.rs index 9542dc345a36c..1d1f9eae6bdee 100644 --- a/src/query/pipeline/sinks/src/union_receive_sink.rs +++ b/src/query/pipeline/sinks/src/union_receive_sink.rs @@ -40,12 +40,14 @@ impl UnionReceiveSink { impl AsyncSink for UnionReceiveSink { const NAME: &'static str = "UnionReceiveSink"; + #[async_backtrace::framed] async fn on_finish(&mut self) -> Result<()> { drop(self.sender.take()); Ok(()) } #[unboxed_simple] + #[async_backtrace::framed] async fn consume(&mut self, data_block: DataBlock) -> Result { if let Some(sender) = self.sender.as_ref() { if sender.send(data_block).await.is_err() { diff --git a/src/query/pipeline/sources/Cargo.toml b/src/query/pipeline/sources/Cargo.toml index 75eb9efe1ec06..9d188d9954239 100644 --- a/src/query/pipeline/sources/Cargo.toml +++ b/src/query/pipeline/sources/Cargo.toml @@ -12,6 +12,7 @@ test = false ignored = ["xml-rs"] [dependencies] +async-backtrace = { workspace = true } async-channel = "1.7.1" common-arrow = { path = "../../../common/arrow" } common-base = { path = "../../../common/base" } diff --git a/src/query/pipeline/sources/src/async_source.rs b/src/query/pipeline/sources/src/async_source.rs index 14509654c2cfa..b1a29101bb193 100644 --- a/src/query/pipeline/sources/src/async_source.rs +++ b/src/query/pipeline/sources/src/async_source.rs @@ -96,6 +96,7 @@ impl Processor for AsyncSourcer { } } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { match self.inner.generate().await? { None => self.is_finish = true, diff --git a/src/query/pipeline/sources/src/input_formats/beyond_end_reader.rs b/src/query/pipeline/sources/src/input_formats/beyond_end_reader.rs index 96cc4dd9ea9ec..6021f40a4ec48 100644 --- a/src/query/pipeline/sources/src/input_formats/beyond_end_reader.rs +++ b/src/query/pipeline/sources/src/input_formats/beyond_end_reader.rs @@ -30,6 +30,7 @@ pub struct BeyondEndReader { } impl BeyondEndReader { + #[async_backtrace::framed] pub async fn read(self) -> Result> { let split_info = &self.split_info; if split_info.num_file_splits > 1 && split_info.seq_in_file < split_info.num_file_splits - 1 diff --git a/src/query/pipeline/sources/src/input_formats/impls/input_format_parquet.rs b/src/query/pipeline/sources/src/input_formats/impls/input_format_parquet.rs index b7a50807034c8..a4957cd84424e 100644 --- a/src/query/pipeline/sources/src/input_formats/impls/input_format_parquet.rs +++ b/src/query/pipeline/sources/src/input_formats/impls/input_format_parquet.rs @@ -128,6 +128,7 @@ fn col_offset(meta: &ColumnChunkMetaData) -> i64 { #[async_trait::async_trait] impl InputFormat for InputFormatParquet { + #[async_backtrace::framed] async fn get_splits( &self, file_infos: Vec, @@ -143,6 +144,7 @@ impl InputFormat for InputFormatParquet { Self::make_splits(file_infos, metas) } + #[async_backtrace::framed] async fn infer_schema(&self, path: &str, op: &Operator) -> Result { let mut reader = op.reader(path).await?; let file_meta = read_metadata_async(&mut reader).await?; @@ -169,6 +171,7 @@ impl InputFormatPipe for ParquetFormatPipe { type AligningState = AligningState; type BlockBuilder = ParquetBlockBuilder; + #[async_backtrace::framed] async fn read_split( ctx: Arc, split_info: Arc, @@ -263,6 +266,7 @@ impl RowGroupInMemory { }) } + #[async_backtrace::framed] async fn read_field_async( op: Operator, path: String, @@ -277,6 +281,7 @@ impl RowGroupInMemory { Ok((index, cols)) } + #[async_backtrace::framed] async fn read_async( split_info: Arc, operator: Operator, @@ -517,6 +522,7 @@ fn get_field_columns<'a>( .collect() } +#[async_backtrace::framed] async fn read_single_column_async( reader: &mut R, meta: &ColumnChunkMetaData, diff --git a/src/query/pipeline/sources/src/input_formats/input_context.rs b/src/query/pipeline/sources/src/input_formats/input_context.rs index efb8554ff3485..32a10b0ffccbc 100644 --- a/src/query/pipeline/sources/src/input_formats/input_context.rs +++ b/src/query/pipeline/sources/src/input_formats/input_context.rs @@ -191,6 +191,7 @@ impl InputContext { }) } + #[async_backtrace::framed] pub async fn try_create_from_insert_clickhouse( format_name: &str, stream_receiver: Receiver>, @@ -233,6 +234,7 @@ impl InputContext { }) } + #[async_backtrace::framed] pub async fn try_create_from_insert_file_format( stream_receiver: Receiver>, settings: Arc, diff --git a/src/query/pipeline/sources/src/input_formats/input_format_text.rs b/src/query/pipeline/sources/src/input_formats/input_format_text.rs index d1862664db02e..b22aca9fa964b 100644 --- a/src/query/pipeline/sources/src/input_formats/input_format_text.rs +++ b/src/query/pipeline/sources/src/input_formats/input_format_text.rs @@ -285,6 +285,7 @@ impl InputFormatPipe for InputFormatTextPipe { #[async_trait::async_trait] impl InputFormat for T { + #[async_backtrace::framed] async fn get_splits( &self, file_infos: Vec, @@ -348,6 +349,7 @@ impl InputFormat for T { Ok(infos) } + #[async_backtrace::framed] async fn infer_schema(&self, _path: &str, _op: &Operator) -> Result { Err(ErrorCode::Unimplemented( "infer_schema is not implemented for this format yet.", diff --git a/src/query/pipeline/sources/src/input_formats/input_pipeline.rs b/src/query/pipeline/sources/src/input_formats/input_pipeline.rs index 97cc59322e17c..95ff53b093e94 100644 --- a/src/query/pipeline/sources/src/input_formats/input_pipeline.rs +++ b/src/query/pipeline/sources/src/input_formats/input_pipeline.rs @@ -162,7 +162,7 @@ pub trait InputFormatPipe: Sized + Send + 'static { let (data_tx, data_rx) = tokio::sync::mpsc::channel(ctx.num_prefetch_per_split()); let split_clone = s.clone(); let ctx_clone2 = ctx_clone.clone(); - tokio::spawn(async move { + tokio::spawn(async_backtrace::location!().frame(async move { if let Err(e) = Self::copy_reader_with_aligner(ctx_clone2, split_clone, data_tx).await { @@ -170,7 +170,7 @@ pub trait InputFormatPipe: Sized + Send + 'static { } else { tracing::debug!("copy split reader stopped"); } - }); + })); if split_tx .send(Ok(Split { info: s.clone(), @@ -293,6 +293,7 @@ pub trait InputFormatPipe: Sized + Send + 'static { Ok(()) } + #[async_backtrace::framed] async fn read_split( _ctx: Arc, _split_info: Arc, @@ -301,6 +302,7 @@ pub trait InputFormatPipe: Sized + Send + 'static { } #[tracing::instrument(level = "debug", skip(ctx, batch_tx))] + #[async_backtrace::framed] async fn copy_reader_with_aligner( ctx: Arc, split_info: Arc, @@ -342,6 +344,7 @@ pub trait InputFormatPipe: Sized + Send + 'static { } } +#[async_backtrace::framed] pub async fn read_full(reader: &mut R, buf: &mut [u8]) -> Result { let mut buf = &mut buf[0..]; let mut n = 0; diff --git a/src/query/pipeline/sources/src/input_formats/source_aligner.rs b/src/query/pipeline/sources/src/input_formats/source_aligner.rs index f9da697070b64..77e4710a2c9a0 100644 --- a/src/query/pipeline/sources/src/input_formats/source_aligner.rs +++ b/src/query/pipeline/sources/src/input_formats/source_aligner.rs @@ -150,6 +150,7 @@ impl Processor for Aligner { } } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { if !self.no_more_split { match &self.state { diff --git a/src/query/pipeline/sources/src/input_formats/source_deserializer.rs b/src/query/pipeline/sources/src/input_formats/source_deserializer.rs index ceb5b141bfc96..c2193a091c92a 100644 --- a/src/query/pipeline/sources/src/input_formats/source_deserializer.rs +++ b/src/query/pipeline/sources/src/input_formats/source_deserializer.rs @@ -116,6 +116,7 @@ impl Processor for DeserializeSource { Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { assert!(self.input_buffer.is_none() && !self.input_finished); match self.input_rx.recv().await { diff --git a/src/query/pipeline/sources/src/stream_source.rs b/src/query/pipeline/sources/src/stream_source.rs index 10f9b6be3d880..f94a5dc2ea2a8 100644 --- a/src/query/pipeline/sources/src/stream_source.rs +++ b/src/query/pipeline/sources/src/stream_source.rs @@ -58,6 +58,7 @@ impl AsyncSource for AsyncStreamSource { const SKIP_EMPTY_DATA_BLOCK: bool = T; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { match self .stream diff --git a/src/query/service/Cargo.toml b/src/query/service/Cargo.toml index a87490baba885..d785182f8584f 100644 --- a/src/query/service/Cargo.toml +++ b/src/query/service/Cargo.toml @@ -104,6 +104,7 @@ arrow-array = { version = "35.0.0" } arrow-flight = { version = "35.0.0", features = ["flight-sql-experimental"] } arrow-ipc = { version = "35.0.0" } arrow-schema = { version = "35.0.0" } +async-backtrace = { workspace = true } async-channel = "1.7.1" async-stream = "0.3.3" async-trait = { version = "0.1.57", package = "async-trait-fn" } diff --git a/src/query/service/src/api/http/v1/cluster.rs b/src/query/service/src/api/http/v1/cluster.rs index cc50bc44424eb..98a80ccf16eaf 100644 --- a/src/query/service/src/api/http/v1/cluster.rs +++ b/src/query/service/src/api/http/v1/cluster.rs @@ -31,6 +31,7 @@ use crate::sessions::TableContext; // cluster_state: the shared in memory state which store all nodes known to current node // return: return a list of cluster node information #[poem::handler] +#[async_backtrace::framed] pub async fn cluster_list_handler() -> poem::Result { let sessions = SessionManager::instance(); let nodes = list_nodes(&sessions).await.map_err(|cause| { diff --git a/src/query/service/src/api/http/v1/config.rs b/src/query/service/src/api/http/v1/config.rs index da255fa93fc50..5100a0103735e 100644 --- a/src/query/service/src/api/http/v1/config.rs +++ b/src/query/service/src/api/http/v1/config.rs @@ -17,6 +17,7 @@ use poem::web::Json; use poem::IntoResponse; #[poem::handler] +#[async_backtrace::framed] pub async fn config_handler() -> poem::Result { Ok(Json( GlobalConfig::instance().as_ref().clone().into_config(), diff --git a/src/query/service/src/api/http/v1/instance_status.rs b/src/query/service/src/api/http/v1/instance_status.rs index 71ca8ce5870ce..954da1c1b2a52 100644 --- a/src/query/service/src/api/http/v1/instance_status.rs +++ b/src/query/service/src/api/http/v1/instance_status.rs @@ -41,6 +41,7 @@ fn secs_since_epoch(t: SystemTime) -> u64 { // lightweight way to get status // return Status in json #[poem::handler] +#[async_backtrace::framed] pub async fn instance_status_handler() -> poem::Result { let session_manager = SessionManager::instance(); let status = session_manager.get_current_session_status(); diff --git a/src/query/service/src/api/http/v1/logs.rs b/src/query/service/src/api/http/v1/logs.rs index 6a444549af545..3149b7b325458 100644 --- a/src/query/service/src/api/http/v1/logs.rs +++ b/src/query/service/src/api/http/v1/logs.rs @@ -31,6 +31,7 @@ use crate::stream::ReadDataBlockStream; // read log files from cfg.log.log_dir #[poem::handler] +#[async_backtrace::framed] pub async fn logs_handler() -> poem::Result { let sessions = SessionManager::instance(); let data = select_table(&sessions).await.map_err(|err| { diff --git a/src/query/service/src/api/http/v1/status.rs b/src/query/service/src/api/http/v1/status.rs index ffb2bd86f1dc0..e88807671f676 100644 --- a/src/query/service/src/api/http/v1/status.rs +++ b/src/query/service/src/api/http/v1/status.rs @@ -43,6 +43,7 @@ fn secs_since_epoch(t: SystemTime) -> u64 { // lightweight way to get status // return Status in json #[poem::handler] +#[async_backtrace::framed] pub async fn instance_status_handler( sessions_extension: Data<&Arc>, ) -> poem::Result { diff --git a/src/query/service/src/api/http/v1/tenant_tables.rs b/src/query/service/src/api/http/v1/tenant_tables.rs index 3e66c9ed92d23..c2a6483ea4148 100644 --- a/src/query/service/src/api/http/v1/tenant_tables.rs +++ b/src/query/service/src/api/http/v1/tenant_tables.rs @@ -69,6 +69,7 @@ async fn load_tenant_tables(tenant: &str) -> Result> { // This handler returns the statistics about the tables of a tenant. It's only enabled in management mode. #[poem::handler] +#[async_backtrace::framed] pub async fn list_tenant_tables_handler( Path(tenant): Path, ) -> poem::Result { @@ -80,6 +81,7 @@ pub async fn list_tenant_tables_handler( // This handler returns the statistics about the tables of the current tenant. #[poem::handler] +#[async_backtrace::framed] pub async fn list_tables_handler() -> poem::Result { let tenant = &GlobalConfig::instance().query.tenant_id; if tenant.is_empty() { diff --git a/src/query/service/src/api/http_service.rs b/src/query/service/src/api/http_service.rs index 7997ed7714aae..a620ea8ef9771 100644 --- a/src/query/service/src/api/http_service.rs +++ b/src/query/service/src/api/http_service.rs @@ -22,6 +22,7 @@ use common_http::home::debug_home_handler; #[cfg(feature = "memory-profiling")] use common_http::jeprof::debug_jeprof_dump_handler; use common_http::pprof::debug_pprof_handler; +use common_http::stack::debug_dump_stack; use common_http::HttpError; use common_http::HttpShutdownHandler; use common_meta_types::anyerror::AnyError; @@ -67,7 +68,8 @@ impl HttpService { get(super::http::v1::cluster::cluster_list_handler), ) .at("/debug/home", get(debug_home_handler)) - .at("/debug/pprof/profile", get(debug_pprof_handler)); + .at("/debug/pprof/profile", get(debug_pprof_handler)) + .at("/debug/async_tasks/dump", get(debug_dump_stack)); if self.config.query.management_mode { route = route.at( @@ -107,6 +109,7 @@ impl HttpService { Ok(cfg) } + #[async_backtrace::framed] async fn start_with_tls(&mut self, listening: SocketAddr) -> Result { info!("Http API TLS enabled"); @@ -120,6 +123,7 @@ impl HttpService { Ok(addr) } + #[async_backtrace::framed] async fn start_without_tls(&mut self, listening: SocketAddr) -> Result { warn!("Http API TLS not set"); @@ -133,10 +137,12 @@ impl HttpService { #[async_trait::async_trait] impl Server for HttpService { + #[async_backtrace::framed] async fn shutdown(&mut self, graceful: bool) { self.shutdown_handler.shutdown(graceful).await; } + #[async_backtrace::framed] async fn start(&mut self, listening: SocketAddr) -> Result { let config = &self.config.query; let res = diff --git a/src/query/service/src/api/rpc/exchange/exchange_manager.rs b/src/query/service/src/api/rpc/exchange/exchange_manager.rs index 70b94a47f0899..15ba79c2fc82e 100644 --- a/src/query/service/src/api/rpc/exchange/exchange_manager.rs +++ b/src/query/service/src/api/rpc/exchange/exchange_manager.rs @@ -96,6 +96,7 @@ impl DataExchangeManager { } // Create connections for cluster all nodes. We will push data through this connection. + #[async_backtrace::framed] pub async fn init_nodes_channel(&self, packet: &InitNodesChannelPacket) -> Result<()> { let mut request_exchanges = HashMap::new(); let mut targets_exchanges = HashMap::new(); @@ -144,6 +145,7 @@ impl DataExchangeManager { } } + #[async_backtrace::framed] pub async fn create_client(address: &str) -> Result { let config = GlobalConfig::instance(); let address = address.to_string(); @@ -256,6 +258,7 @@ impl DataExchangeManager { } } + #[async_backtrace::framed] pub async fn commit_actions( &self, ctx: Arc, diff --git a/src/query/service/src/api/rpc/exchange/exchange_sink_writer.rs b/src/query/service/src/api/rpc/exchange/exchange_sink_writer.rs index c97f8f3b21877..eb1b6ec561579 100644 --- a/src/query/service/src/api/rpc/exchange/exchange_sink_writer.rs +++ b/src/query/service/src/api/rpc/exchange/exchange_sink_writer.rs @@ -42,12 +42,14 @@ impl ExchangeWriterSink { impl AsyncSink for ExchangeWriterSink { const NAME: &'static str = "ExchangeWriterSink"; + #[async_backtrace::framed] async fn on_finish(&mut self) -> Result<()> { self.flight_sender.close(); Ok(()) } #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn consume(&mut self, mut data_block: DataBlock) -> Result { let mut serialize_meta = match data_block.take_meta() { None => Err(ErrorCode::Internal( diff --git a/src/query/service/src/api/rpc/exchange/exchange_source_reader.rs b/src/query/service/src/api/rpc/exchange/exchange_source_reader.rs index 6723ee9592be3..fdba7a5aa076b 100644 --- a/src/query/service/src/api/rpc/exchange/exchange_source_reader.rs +++ b/src/query/service/src/api/rpc/exchange/exchange_source_reader.rs @@ -86,6 +86,7 @@ impl Processor for ExchangeSourceReader { Ok(Event::Async) } + #[async_backtrace::framed] async fn async_process(&mut self) -> common_exception::Result<()> { if self.output_data.is_none() { if let Some(output_data) = self.flight_receiver.recv().await? { diff --git a/src/query/service/src/api/rpc/exchange/statistics_receiver.rs b/src/query/service/src/api/rpc/exchange/statistics_receiver.rs index e67735f63558c..1f6c0401cc630 100644 --- a/src/query/service/src/api/rpc/exchange/statistics_receiver.rs +++ b/src/query/service/src/api/rpc/exchange/statistics_receiver.rs @@ -137,6 +137,7 @@ impl StatisticsReceiver { }) } + #[async_backtrace::framed] async fn fetch( ctx: &Arc, tx: &FlightSender, diff --git a/src/query/service/src/api/rpc/exchange/statistics_sender.rs b/src/query/service/src/api/rpc/exchange/statistics_sender.rs index d2cd497a8d654..22c9307794206 100644 --- a/src/query/service/src/api/rpc/exchange/statistics_sender.rs +++ b/src/query/service/src/api/rpc/exchange/statistics_sender.rs @@ -138,6 +138,7 @@ impl StatisticsSender { }); } + #[async_backtrace::framed] async fn on_command( ctx: &Arc, command: DataPacket, @@ -160,6 +161,7 @@ impl StatisticsSender { } } + #[async_backtrace::framed] async fn fetch_progress(ctx: &Arc) -> Result> { let mut progress_info = vec![]; @@ -187,6 +189,7 @@ impl StatisticsSender { Ok(progress_info) } + #[async_backtrace::framed] async fn fetch_precommit(ctx: &Arc) -> Result> { Ok(ctx .consume_precommit_blocks() diff --git a/src/query/service/src/api/rpc/flight_client.rs b/src/query/service/src/api/rpc/flight_client.rs index dedb96b1eeb8c..558c8f4b0079e 100644 --- a/src/query/service/src/api/rpc/flight_client.rs +++ b/src/query/service/src/api/rpc/flight_client.rs @@ -50,6 +50,7 @@ impl FlightClient { FlightClient { inner } } + #[async_backtrace::framed] pub async fn execute_action(&mut self, action: FlightAction, timeout: u64) -> Result<()> { if let Err(cause) = self.do_action(action, timeout).await { return Err(cause.add_message_back("(while in query flight)")); @@ -58,6 +59,7 @@ impl FlightClient { Ok(()) } + #[async_backtrace::framed] pub async fn request_server_exchange( &mut self, query_id: &str, @@ -89,6 +91,7 @@ impl FlightClient { Ok(FlightExchange::create_receiver(rx)) } + #[async_backtrace::framed] pub async fn do_get( &mut self, query_id: &str, @@ -122,6 +125,7 @@ impl FlightClient { Ok(FlightExchange::create_receiver(rx)) } + #[async_backtrace::framed] async fn get_streaming(&mut self, request: Request) -> Result> { match self.inner.do_get(request).await { Ok(res) => Ok(res.into_inner()), @@ -131,6 +135,7 @@ impl FlightClient { // Execute do_action. #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] async fn do_action(&mut self, action: FlightAction, timeout: u64) -> Result> { let action: Action = action.try_into()?; let action_type = action.r#type.clone(); @@ -171,6 +176,7 @@ impl FlightReceiver { } } + #[async_backtrace::framed] pub async fn recv(&self) -> Result> { match self.rx.recv().await { Err(_) => Ok(None), @@ -222,6 +228,7 @@ impl FlightSender { } } + #[async_backtrace::framed] pub async fn send(&self, data: DataPacket) -> Result<()> { if let Err(_cause) = self.tx.send(Ok(FlightData::from(data))).await { return Err(ErrorCode::AbortedQuery( diff --git a/src/query/service/src/api/rpc/flight_service.rs b/src/query/service/src/api/rpc/flight_service.rs index df2c63ee68434..c803b6f205080 100644 --- a/src/query/service/src/api/rpc/flight_service.rs +++ b/src/query/service/src/api/rpc/flight_service.rs @@ -63,6 +63,7 @@ type StreamReq = Request>; impl FlightService for DatabendQueryFlightService { type HandshakeStream = FlightStream; + #[async_backtrace::framed] async fn handshake(&self, _: StreamReq) -> Response { Result::Err(Status::unimplemented( "DatabendQuery does not implement handshake.", @@ -71,18 +72,21 @@ impl FlightService for DatabendQueryFlightService { type ListFlightsStream = FlightStream; + #[async_backtrace::framed] async fn list_flights(&self, _: Request) -> Response { Result::Err(Status::unimplemented( "DatabendQuery does not implement list_flights.", )) } + #[async_backtrace::framed] async fn get_flight_info(&self, _: Request) -> Response { Err(Status::unimplemented( "DatabendQuery does not implement get_flight_info.", )) } + #[async_backtrace::framed] async fn get_schema(&self, _: Request) -> Response { Err(Status::unimplemented( "DatabendQuery does not implement get_schema.", @@ -93,6 +97,7 @@ impl FlightService for DatabendQueryFlightService { type DoPutStream = FlightStream; + #[async_backtrace::framed] async fn do_put(&self, _req: StreamReq) -> Response { Err(Status::unimplemented("unimplement do_put")) } @@ -100,6 +105,7 @@ impl FlightService for DatabendQueryFlightService { type DoExchangeStream = FlightStream; #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] async fn do_get(&self, request: Request) -> Response { match request.get_metadata("x-type")?.as_str() { "request_server_exchange" => { @@ -129,6 +135,7 @@ impl FlightService for DatabendQueryFlightService { } } + #[async_backtrace::framed] async fn do_exchange(&self, _: StreamReq) -> Response { Err(Status::unimplemented("unimplement do_exchange")) } @@ -136,6 +143,7 @@ impl FlightService for DatabendQueryFlightService { type DoActionStream = FlightStream; #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] async fn do_action(&self, request: Request) -> Response { common_tracing::extract_remote_span_as_parent(&request); @@ -195,6 +203,7 @@ impl FlightService for DatabendQueryFlightService { type ListActionsStream = FlightStream; #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] async fn list_actions(&self, request: Request) -> Response { common_tracing::extract_remote_span_as_parent(&request); Result::Ok(RawResponse::new( diff --git a/src/query/service/src/api/rpc/packets/packet.rs b/src/query/service/src/api/rpc/packets/packet.rs index 25d9eb7c1f6b0..91d6270565038 100644 --- a/src/query/service/src/api/rpc/packets/packet.rs +++ b/src/query/service/src/api/rpc/packets/packet.rs @@ -26,6 +26,7 @@ pub trait Packet: Send + Sync { #[async_trait::async_trait] impl Packet for Vec { + #[async_backtrace::framed] async fn commit(&self, config: &InnerConfig, timeout: u64) -> Result<()> { for packet in self.iter() { packet.commit(config, timeout).await?; @@ -35,6 +36,7 @@ impl Packet for Vec { } } +#[async_backtrace::framed] pub async fn create_client(config: &InnerConfig, address: &str) -> Result { match config.tls_query_cli_enabled() { true => Ok(FlightClient::new(FlightServiceClient::new( diff --git a/src/query/service/src/api/rpc/packets/packet_execute.rs b/src/query/service/src/api/rpc/packets/packet_execute.rs index dcfe5f828c533..3258ff32ae92a 100644 --- a/src/query/service/src/api/rpc/packets/packet_execute.rs +++ b/src/query/service/src/api/rpc/packets/packet_execute.rs @@ -49,6 +49,7 @@ impl ExecutePartialQueryPacket { #[async_trait::async_trait] impl Packet for ExecutePartialQueryPacket { + #[async_backtrace::framed] async fn commit(&self, config: &InnerConfig, timeout: u64) -> Result<()> { if !self.executors_info.contains_key(&self.executor) { return Err(ErrorCode::ClusterUnknownNode(format!( diff --git a/src/query/service/src/api/rpc/packets/packet_executor.rs b/src/query/service/src/api/rpc/packets/packet_executor.rs index 72f220bf2eaf3..4bbd90669497f 100644 --- a/src/query/service/src/api/rpc/packets/packet_executor.rs +++ b/src/query/service/src/api/rpc/packets/packet_executor.rs @@ -60,6 +60,7 @@ impl QueryFragmentsPlanPacket { #[async_trait::async_trait] impl Packet for QueryFragmentsPlanPacket { + #[async_backtrace::framed] async fn commit(&self, config: &InnerConfig, timeout: u64) -> Result<()> { if !self.executors_info.contains_key(&self.executor) { return Err(ErrorCode::Internal(format!( diff --git a/src/query/service/src/api/rpc/packets/packet_publisher.rs b/src/query/service/src/api/rpc/packets/packet_publisher.rs index cd26deb1815f1..1248be90d3fc1 100644 --- a/src/query/service/src/api/rpc/packets/packet_publisher.rs +++ b/src/query/service/src/api/rpc/packets/packet_publisher.rs @@ -61,6 +61,7 @@ impl InitNodesChannelPacket { #[async_trait::async_trait] impl Packet for InitNodesChannelPacket { + #[async_backtrace::framed] async fn commit(&self, config: &InnerConfig, timeout: u64) -> Result<()> { let executor_info = &self.executor; let mut conn = create_client(config, &executor_info.flight_address).await?; diff --git a/src/query/service/src/api/rpc_service.rs b/src/query/service/src/api/rpc_service.rs index a53c528fd5878..dfcc966ea9a1c 100644 --- a/src/query/service/src/api/rpc_service.rs +++ b/src/query/service/src/api/rpc_service.rs @@ -45,6 +45,7 @@ impl RpcService { })) } + #[async_backtrace::framed] async fn listener_tcp(listening: SocketAddr) -> Result<(TcpListenerStream, SocketAddr)> { let listener = TcpListener::bind(listening).await.map_err(|e| { ErrorCode::TokioError(format!("{{{}:{}}} {}", listening.ip(), listening.port(), e)) @@ -60,6 +61,7 @@ impl RpcService { } } + #[async_backtrace::framed] async fn server_tls_config(conf: &InnerConfig) -> Result { let cert = tokio::fs::read(conf.query.rpc_tls_server_cert.as_str()).await?; let key = tokio::fs::read(conf.query.rpc_tls_server_key.as_str()).await?; @@ -68,6 +70,7 @@ impl RpcService { Ok(tls_conf) } + #[async_backtrace::framed] pub async fn start_with_incoming(&mut self, listener_stream: TcpListenerStream) -> Result<()> { let flight_api_service = DatabendQueryFlightService::create(); let builder = Server::builder(); @@ -90,15 +93,17 @@ impl RpcService { .add_service(FlightServiceServer::new(flight_api_service)) .serve_with_incoming_shutdown(listener_stream, self.shutdown_notify()); - tokio::spawn(server); + tokio::spawn(async_backtrace::location!().frame(server)); Ok(()) } } #[async_trait::async_trait] impl DatabendQueryServer for RpcService { + #[async_backtrace::framed] async fn shutdown(&mut self, _graceful: bool) {} + #[async_backtrace::framed] async fn start(&mut self, listening: SocketAddr) -> Result { let (listener_stream, listener_addr) = Self::listener_tcp(listening).await?; self.start_with_incoming(listener_stream).await?; diff --git a/src/query/service/src/auth.rs b/src/query/service/src/auth.rs index 6efdac20dd49c..611b016769d60 100644 --- a/src/query/service/src/auth.rs +++ b/src/query/service/src/auth.rs @@ -60,6 +60,7 @@ impl AuthMgr { }) } + #[async_backtrace::framed] pub async fn auth(&self, session: Arc, credential: &Credential) -> Result<()> { let user_api = UserApiProvider::instance(); match credential { diff --git a/src/query/service/src/catalogs/catalog_manager.rs b/src/query/service/src/catalogs/catalog_manager.rs index f42246cd57fda..2561d29604955 100644 --- a/src/query/service/src/catalogs/catalog_manager.rs +++ b/src/query/service/src/catalogs/catalog_manager.rs @@ -52,12 +52,14 @@ pub trait CatalogManagerHelper { #[async_trait::async_trait] impl CatalogManagerHelper for CatalogManager { + #[async_backtrace::framed] async fn init(conf: &InnerConfig) -> Result<()> { GlobalInstance::set(Self::try_create(conf).await?); Ok(()) } + #[async_backtrace::framed] async fn try_create(conf: &InnerConfig) -> Result> { let catalog_manager = CatalogManager { catalogs: DashMap::new(), @@ -73,6 +75,7 @@ impl CatalogManagerHelper for CatalogManager { Ok(Arc::new(catalog_manager)) } + #[async_backtrace::framed] async fn register_build_in_catalogs(&self, conf: &InnerConfig) -> Result<()> { let default_catalog: Arc = Arc::new(DatabaseCatalog::try_create_with_config(conf.clone()).await?); @@ -109,6 +112,7 @@ impl CatalogManagerHelper for CatalogManager { Ok(()) } + #[async_backtrace::framed] async fn create_user_defined_catalog(&self, req: CreateCatalogReq) -> Result<()> { let catalog_option = req.meta.catalog_option; diff --git a/src/query/service/src/catalogs/default/database_catalog.rs b/src/query/service/src/catalogs/default/database_catalog.rs index 675c31f9a37c2..c415fc08123af 100644 --- a/src/query/service/src/catalogs/default/database_catalog.rs +++ b/src/query/service/src/catalogs/default/database_catalog.rs @@ -85,6 +85,7 @@ impl DatabaseCatalog { } } + #[async_backtrace::framed] pub async fn try_create_with_config(conf: InnerConfig) -> Result { let immutable_catalog = ImmutableCatalog::try_create_with_config(&conf).await?; let mutable_catalog = MutableCatalog::try_create_with_config(conf).await?; @@ -104,6 +105,7 @@ impl Catalog for DatabaseCatalog { self } + #[async_backtrace::framed] async fn get_database(&self, tenant: &str, db_name: &str) -> Result> { if tenant.is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -124,6 +126,7 @@ impl Catalog for DatabaseCatalog { } } + #[async_backtrace::framed] async fn list_databases(&self, tenant: &str) -> Result>> { if tenant.is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -137,6 +140,7 @@ impl Catalog for DatabaseCatalog { Ok(dbs) } + #[async_backtrace::framed] async fn create_database(&self, req: CreateDatabaseReq) -> Result { if req.name_ident.tenant.is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -159,6 +163,7 @@ impl Catalog for DatabaseCatalog { self.mutable_catalog.create_database(req).await } + #[async_backtrace::framed] async fn drop_database(&self, req: DropDatabaseReq) -> Result<()> { if req.name_ident.tenant.is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -178,6 +183,7 @@ impl Catalog for DatabaseCatalog { self.mutable_catalog.drop_database(req).await } + #[async_backtrace::framed] async fn rename_database(&self, req: RenameDatabaseReq) -> Result { if req.name_ident.tenant.is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -215,6 +221,7 @@ impl Catalog for DatabaseCatalog { } } + #[async_backtrace::framed] async fn get_table_meta_by_id(&self, table_id: MetaId) -> Result<(TableIdent, Arc)> { let res = self.immutable_catalog.get_table_meta_by_id(table_id).await; @@ -225,6 +232,7 @@ impl Catalog for DatabaseCatalog { } } + #[async_backtrace::framed] async fn get_table( &self, tenant: &str, @@ -255,6 +263,7 @@ impl Catalog for DatabaseCatalog { } } + #[async_backtrace::framed] async fn list_tables(&self, tenant: &str, db_name: &str) -> Result>> { if tenant.is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -275,6 +284,7 @@ impl Catalog for DatabaseCatalog { } } + #[async_backtrace::framed] async fn list_tables_history( &self, tenant: &str, @@ -304,6 +314,7 @@ impl Catalog for DatabaseCatalog { } } + #[async_backtrace::framed] async fn create_table(&self, req: CreateTableReq) -> Result<()> { if req.tenant().is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -322,11 +333,13 @@ impl Catalog for DatabaseCatalog { self.mutable_catalog.create_table(req).await } + #[async_backtrace::framed] async fn drop_table_by_id(&self, req: DropTableByIdReq) -> Result { let res = self.mutable_catalog.drop_table_by_id(req).await?; Ok(res) } + #[async_backtrace::framed] async fn undrop_table(&self, req: UndropTableReq) -> Result { if req.tenant().is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -345,6 +358,7 @@ impl Catalog for DatabaseCatalog { self.mutable_catalog.undrop_table(req).await } + #[async_backtrace::framed] async fn undrop_database(&self, req: UndropDatabaseReq) -> Result { if req.tenant().is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -363,6 +377,7 @@ impl Catalog for DatabaseCatalog { self.mutable_catalog.undrop_database(req).await } + #[async_backtrace::framed] async fn rename_table(&self, req: RenameTableReq) -> Result { if req.tenant().is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -388,6 +403,7 @@ impl Catalog for DatabaseCatalog { self.mutable_catalog.rename_table(req).await } + #[async_backtrace::framed] async fn count_tables(&self, req: CountTablesReq) -> Result { if req.tenant.is_empty() { return Err(ErrorCode::TenantIsEmpty( @@ -400,6 +416,7 @@ impl Catalog for DatabaseCatalog { Ok(res) } + #[async_backtrace::framed] async fn get_table_copied_file_info( &self, tenant: &str, @@ -411,6 +428,7 @@ impl Catalog for DatabaseCatalog { .await } + #[async_backtrace::framed] async fn truncate_table( &self, table_info: &TableInfo, @@ -419,6 +437,7 @@ impl Catalog for DatabaseCatalog { self.mutable_catalog.truncate_table(table_info, req).await } + #[async_backtrace::framed] async fn upsert_table_option( &self, tenant: &str, @@ -430,6 +449,7 @@ impl Catalog for DatabaseCatalog { .await } + #[async_backtrace::framed] async fn update_table_meta( &self, table_info: &TableInfo, diff --git a/src/query/service/src/catalogs/default/immutable_catalog.rs b/src/query/service/src/catalogs/default/immutable_catalog.rs index edb0b192da28a..74ea91f69ae3d 100644 --- a/src/query/service/src/catalogs/default/immutable_catalog.rs +++ b/src/query/service/src/catalogs/default/immutable_catalog.rs @@ -68,6 +68,7 @@ pub struct ImmutableCatalog { } impl ImmutableCatalog { + #[async_backtrace::framed] pub async fn try_create_with_config(conf: &InnerConfig) -> Result { // The global db meta. let mut sys_db_meta = InMemoryMetas::create(SYS_DB_ID_BEGIN, SYS_TBL_ID_BEGIN); @@ -91,6 +92,7 @@ impl Catalog for ImmutableCatalog { self } + #[async_backtrace::framed] async fn get_database(&self, _tenant: &str, db_name: &str) -> Result> { match db_name { "system" => Ok(self.sys_db.clone()), @@ -102,18 +104,22 @@ impl Catalog for ImmutableCatalog { } } + #[async_backtrace::framed] async fn list_databases(&self, _tenant: &str) -> Result>> { Ok(vec![self.sys_db.clone(), self.info_schema_db.clone()]) } + #[async_backtrace::framed] async fn create_database(&self, _req: CreateDatabaseReq) -> Result { Err(ErrorCode::Unimplemented("Cannot create system database")) } + #[async_backtrace::framed] async fn drop_database(&self, _req: DropDatabaseReq) -> Result<()> { Err(ErrorCode::Unimplemented("Cannot drop system database")) } + #[async_backtrace::framed] async fn rename_database(&self, _req: RenameDatabaseReq) -> Result { Err(ErrorCode::Unimplemented("Cannot rename system database")) } @@ -128,6 +134,7 @@ impl Catalog for ImmutableCatalog { Ok(table.clone()) } + #[async_backtrace::framed] async fn get_table_meta_by_id(&self, table_id: MetaId) -> Result<(TableIdent, Arc)> { let table = self .sys_db_meta @@ -137,6 +144,7 @@ impl Catalog for ImmutableCatalog { Ok((ti.ident, Arc::new(ti.meta.clone()))) } + #[async_backtrace::framed] async fn get_table( &self, tenant: &str, @@ -148,10 +156,12 @@ impl Catalog for ImmutableCatalog { self.sys_db_meta.get_by_name(db_name, table_name) } + #[async_backtrace::framed] async fn list_tables(&self, _tenant: &str, db_name: &str) -> Result>> { self.sys_db_meta.get_all_tables(db_name) } + #[async_backtrace::framed] async fn list_tables_history( &self, tenant: &str, @@ -160,42 +170,49 @@ impl Catalog for ImmutableCatalog { self.list_tables(tenant, db_name).await } + #[async_backtrace::framed] async fn create_table(&self, _req: CreateTableReq) -> Result<()> { Err(ErrorCode::Unimplemented( "Cannot create table in system database", )) } + #[async_backtrace::framed] async fn drop_table_by_id(&self, _req: DropTableByIdReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot drop table in system database", )) } + #[async_backtrace::framed] async fn undrop_table(&self, _req: UndropTableReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot undrop table in system database", )) } + #[async_backtrace::framed] async fn undrop_database(&self, _req: UndropDatabaseReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot undrop database in system database", )) } + #[async_backtrace::framed] async fn rename_table(&self, _req: RenameTableReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot rename table in system database", )) } + #[async_backtrace::framed] async fn count_tables(&self, _req: CountTablesReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot count tables in system database", )) } + #[async_backtrace::framed] async fn get_table_copied_file_info( &self, _tenant: &str, @@ -208,6 +225,7 @@ impl Catalog for ImmutableCatalog { ))) } + #[async_backtrace::framed] async fn truncate_table( &self, _table_info: &TableInfo, @@ -219,6 +237,7 @@ impl Catalog for ImmutableCatalog { ))) } + #[async_backtrace::framed] async fn upsert_table_option( &self, _tenant: &str, @@ -231,6 +250,7 @@ impl Catalog for ImmutableCatalog { ))) } + #[async_backtrace::framed] async fn update_table_meta( &self, _table_info: &TableInfo, diff --git a/src/query/service/src/catalogs/default/mutable_catalog.rs b/src/query/service/src/catalogs/default/mutable_catalog.rs index 829db0512de95..c22085587ef30 100644 --- a/src/query/service/src/catalogs/default/mutable_catalog.rs +++ b/src/query/service/src/catalogs/default/mutable_catalog.rs @@ -90,6 +90,7 @@ impl MutableCatalog { /// /// MetaEmbedded /// ``` + #[async_backtrace::framed] pub async fn try_create_with_config(conf: InnerConfig) -> Result { let meta = { let provider = Arc::new(MetaStoreProvider::new(conf.meta.to_meta_grpc_client_conf())); @@ -146,6 +147,7 @@ impl Catalog for MutableCatalog { self } + #[async_backtrace::framed] async fn get_database(&self, tenant: &str, db_name: &str) -> Result> { let db_info = self .ctx @@ -155,6 +157,7 @@ impl Catalog for MutableCatalog { self.build_db_instance(&db_info) } + #[async_backtrace::framed] async fn list_databases(&self, tenant: &str) -> Result>> { let dbs = self .ctx @@ -171,6 +174,7 @@ impl Catalog for MutableCatalog { }) } + #[async_backtrace::framed] async fn create_database(&self, req: CreateDatabaseReq) -> Result { // Create database. let res = self.ctx.meta.create_database(req.clone()).await?; @@ -193,16 +197,19 @@ impl Catalog for MutableCatalog { Ok(CreateDatabaseReply { db_id: res.db_id }) } + #[async_backtrace::framed] async fn drop_database(&self, req: DropDatabaseReq) -> Result<()> { self.ctx.meta.drop_database(req).await?; Ok(()) } + #[async_backtrace::framed] async fn undrop_database(&self, req: UndropDatabaseReq) -> Result { let res = self.ctx.meta.undrop_database(req).await?; Ok(res) } + #[async_backtrace::framed] async fn rename_database(&self, req: RenameDatabaseReq) -> Result { let res = self.ctx.meta.rename_database(req).await?; Ok(res) @@ -213,6 +220,7 @@ impl Catalog for MutableCatalog { storage.get_table(table_info) } + #[async_backtrace::framed] async fn get_table_meta_by_id( &self, table_id: MetaId, @@ -221,6 +229,7 @@ impl Catalog for MutableCatalog { Ok(res) } + #[async_backtrace::framed] async fn get_table( &self, tenant: &str, @@ -231,11 +240,13 @@ impl Catalog for MutableCatalog { db.get_table(table_name).await } + #[async_backtrace::framed] async fn list_tables(&self, tenant: &str, db_name: &str) -> Result>> { let db = self.get_database(tenant, db_name).await?; db.list_tables().await } + #[async_backtrace::framed] async fn list_tables_history( &self, tenant: &str, @@ -245,6 +256,7 @@ impl Catalog for MutableCatalog { db.list_tables_history().await } + #[async_backtrace::framed] async fn create_table(&self, req: CreateTableReq) -> Result<()> { let db = self .get_database(&req.name_ident.tenant, &req.name_ident.db_name) @@ -252,11 +264,13 @@ impl Catalog for MutableCatalog { db.create_table(req).await } + #[async_backtrace::framed] async fn drop_table_by_id(&self, req: DropTableByIdReq) -> Result { let res = self.ctx.meta.drop_table_by_id(req).await?; Ok(res) } + #[async_backtrace::framed] async fn undrop_table(&self, req: UndropTableReq) -> Result { let db = self .get_database(&req.name_ident.tenant, &req.name_ident.db_name) @@ -264,6 +278,7 @@ impl Catalog for MutableCatalog { db.undrop_table(req).await } + #[async_backtrace::framed] async fn rename_table(&self, req: RenameTableReq) -> Result { let db = self .get_database(&req.name_ident.tenant, &req.name_ident.db_name) @@ -271,6 +286,7 @@ impl Catalog for MutableCatalog { db.rename_table(req).await } + #[async_backtrace::framed] async fn upsert_table_option( &self, tenant: &str, @@ -281,6 +297,7 @@ impl Catalog for MutableCatalog { db.upsert_table_option(req).await } + #[async_backtrace::framed] async fn update_table_meta( &self, table_info: &TableInfo, @@ -304,6 +321,7 @@ impl Catalog for MutableCatalog { } } + #[async_backtrace::framed] async fn get_table_copied_file_info( &self, tenant: &str, @@ -314,6 +332,7 @@ impl Catalog for MutableCatalog { db.get_table_copied_file_info(req).await } + #[async_backtrace::framed] async fn truncate_table( &self, table_info: &TableInfo, @@ -330,6 +349,7 @@ impl Catalog for MutableCatalog { } } + #[async_backtrace::framed] async fn count_tables(&self, req: CountTablesReq) -> Result { let res = self.ctx.meta.count_tables(req).await?; Ok(res) diff --git a/src/query/service/src/clusters/cluster.rs b/src/query/service/src/clusters/cluster.rs index afd9da7e08072..ed240bcd3665d 100644 --- a/src/query/service/src/clusters/cluster.rs +++ b/src/query/service/src/clusters/cluster.rs @@ -102,6 +102,7 @@ impl ClusterHelper for Cluster { self.local_id.clone() } + #[async_backtrace::framed] async fn create_node_conn(&self, name: &str, config: &InnerConfig) -> Result { for node in &self.nodes { if node.id == name { @@ -140,6 +141,7 @@ impl ClusterHelper for Cluster { impl ClusterDiscovery { const METRIC_LABEL_FUNCTION: &'static str = "function"; + #[async_backtrace::framed] pub async fn create_meta_client(cfg: &InnerConfig) -> Result { let meta_api_provider = MetaStoreProvider::new(cfg.meta.to_meta_grpc_client_conf()); match meta_api_provider.create_meta_store().await { @@ -150,6 +152,7 @@ impl ClusterDiscovery { } } + #[async_backtrace::framed] pub async fn init(cfg: InnerConfig) -> Result<()> { let metastore = ClusterDiscovery::create_meta_client(&cfg).await?; GlobalInstance::set(Self::try_create(&cfg, metastore).await?); @@ -157,6 +160,7 @@ impl ClusterDiscovery { Ok(()) } + #[async_backtrace::framed] pub async fn try_create( cfg: &InnerConfig, metastore: MetaStore, @@ -195,6 +199,7 @@ impl ClusterDiscovery { Ok((lift_time, Arc::new(cluster_manager))) } + #[async_backtrace::framed] pub async fn discover(&self, config: &InnerConfig) -> Result> { match self.api_provider.get_nodes().await { Err(cause) => { @@ -271,6 +276,7 @@ impl ClusterDiscovery { } } + #[async_backtrace::framed] async fn drop_invalid_nodes(self: &Arc, node_info: &NodeInfo) -> Result<()> { let current_nodes_info = match self.api_provider.get_nodes().await { Ok(nodes) => nodes, @@ -320,6 +326,7 @@ impl ClusterDiscovery { Ok(()) } + #[async_backtrace::framed] pub async fn unregister_to_metastore(self: &Arc, signal: &mut SignalStream) { let mut heartbeat = self.heartbeat.lock().await; @@ -354,6 +361,7 @@ impl ClusterDiscovery { }; } + #[async_backtrace::framed] pub async fn register_to_metastore(self: &Arc, cfg: &InnerConfig) -> Result<()> { let cpus = cfg.query.num_cpus; let mut address = cfg.query.flight_api_address.clone(); @@ -391,6 +399,7 @@ impl ClusterDiscovery { } } + #[async_backtrace::framed] async fn start_heartbeat(self: &Arc, node_info: NodeInfo) -> Result<()> { let mut heartbeat = self.heartbeat.lock().await; heartbeat.start(node_info); @@ -488,9 +497,12 @@ impl ClusterHeartbeat { } pub fn start(&mut self, node_info: NodeInfo) { - self.shutdown_handler = Some(tokio::spawn(self.heartbeat_loop(node_info))); + self.shutdown_handler = Some(tokio::spawn( + async_backtrace::location!().frame(self.heartbeat_loop(node_info)), + )); } + #[async_backtrace::framed] pub async fn shutdown(&mut self) -> Result<()> { if let Some(shutdown_handler) = self.shutdown_handler.take() { self.shutdown.store(true, Ordering::Relaxed); @@ -506,6 +518,7 @@ impl ClusterHeartbeat { } } +#[async_backtrace::framed] pub async fn create_client(config: &InnerConfig, address: &str) -> Result { match config.tls_query_cli_enabled() { true => Ok(FlightClient::new(FlightServiceClient::new( diff --git a/src/query/service/src/databases/default/default_database.rs b/src/query/service/src/databases/default/default_database.rs index b7411c5e2d6d0..d67dc0580b609 100644 --- a/src/query/service/src/databases/default/default_database.rs +++ b/src/query/service/src/databases/default/default_database.rs @@ -81,6 +81,7 @@ impl Database for DefaultDatabase { } // Get one table by db and table name. + #[async_backtrace::framed] async fn get_table(&self, table_name: &str) -> Result> { let table_info = self .ctx @@ -94,6 +95,7 @@ impl Database for DefaultDatabase { self.get_table_by_info(table_info.as_ref()) } + #[async_backtrace::framed] async fn list_tables(&self) -> Result>> { let table_infos = self .ctx @@ -104,6 +106,7 @@ impl Database for DefaultDatabase { self.load_tables(table_infos) } + #[async_backtrace::framed] async fn list_tables_history(&self) -> Result>> { // `get_table_history` will not fetch the tables that created before the // "metasrv time travel functions" is added. @@ -128,26 +131,31 @@ impl Database for DefaultDatabase { self.load_tables(table_infos) } + #[async_backtrace::framed] async fn create_table(&self, req: CreateTableReq) -> Result<()> { self.ctx.meta.create_table(req).await?; Ok(()) } + #[async_backtrace::framed] async fn drop_table_by_id(&self, req: DropTableByIdReq) -> Result { let res = self.ctx.meta.drop_table_by_id(req).await?; Ok(res) } + #[async_backtrace::framed] async fn undrop_table(&self, req: UndropTableReq) -> Result { let res = self.ctx.meta.undrop_table(req).await?; Ok(res) } + #[async_backtrace::framed] async fn rename_table(&self, req: RenameTableReq) -> Result { let res = self.ctx.meta.rename_table(req).await?; Ok(res) } + #[async_backtrace::framed] async fn upsert_table_option( &self, req: UpsertTableOptionReq, @@ -156,11 +164,13 @@ impl Database for DefaultDatabase { Ok(res) } + #[async_backtrace::framed] async fn update_table_meta(&self, req: UpdateTableMetaReq) -> Result { let res = self.ctx.meta.update_table_meta(req).await?; Ok(res) } + #[async_backtrace::framed] async fn get_table_copied_file_info( &self, req: GetTableCopiedFileReq, @@ -169,6 +179,7 @@ impl Database for DefaultDatabase { Ok(res) } + #[async_backtrace::framed] async fn upsert_table_copied_file_info( &self, req: UpsertTableCopiedFileReq, @@ -177,6 +188,7 @@ impl Database for DefaultDatabase { Ok(res) } + #[async_backtrace::framed] async fn truncate_table(&self, req: TruncateTableReq) -> Result { let res = self.ctx.meta.truncate_table(req).await?; Ok(res) diff --git a/src/query/service/src/databases/share/share_database.rs b/src/query/service/src/databases/share/share_database.rs index 9846b05e35587..8cf5eb9948af6 100644 --- a/src/query/service/src/databases/share/share_database.rs +++ b/src/query/service/src/databases/share/share_database.rs @@ -65,6 +65,7 @@ impl ShareDatabase { }) } + #[async_backtrace::framed] async fn get_table_info(&self, table_name: &str) -> Result> { let table_info_map = ShareEndpointManager::instance() .get_table_info_map(&self.ctx.tenant, &self.db_info, vec![ @@ -80,6 +81,7 @@ impl ShareDatabase { } } + #[async_backtrace::framed] async fn list_tables(&self) -> Result>> { let table_info_map = ShareEndpointManager::instance() .get_table_info_map(&self.ctx.tenant, &self.db_info, vec![]) @@ -108,47 +110,55 @@ impl Database for ShareDatabase { } // Get one table by db and table name. + #[async_backtrace::framed] async fn get_table(&self, table_name: &str) -> Result> { let table_info = self.get_table_info(table_name).await?; self.get_table_by_info(table_info.as_ref()) } + #[async_backtrace::framed] async fn list_tables(&self) -> Result>> { let table_infos = self.list_tables().await?; self.load_tables(table_infos) } + #[async_backtrace::framed] async fn list_tables_history(&self) -> Result>> { Err(ErrorCode::PermissionDenied( "Permission denied, cannot list table history from a shared database".to_string(), )) } + #[async_backtrace::framed] async fn create_table(&self, _req: CreateTableReq) -> Result<()> { Err(ErrorCode::PermissionDenied( "Permission denied, cannot create table from a shared database".to_string(), )) } + #[async_backtrace::framed] async fn drop_table_by_id(&self, _req: DropTableByIdReq) -> Result { Err(ErrorCode::PermissionDenied( "Permission denied, cannot drop table from a shared database".to_string(), )) } + #[async_backtrace::framed] async fn undrop_table(&self, _req: UndropTableReq) -> Result { Err(ErrorCode::PermissionDenied( "Permission denied, cannot undrop table from a shared database".to_string(), )) } + #[async_backtrace::framed] async fn rename_table(&self, _req: RenameTableReq) -> Result { Err(ErrorCode::PermissionDenied( "Permission denied, cannot rename table from a shared database".to_string(), )) } + #[async_backtrace::framed] async fn upsert_table_option( &self, _req: UpsertTableOptionReq, @@ -158,12 +168,14 @@ impl Database for ShareDatabase { )) } + #[async_backtrace::framed] async fn update_table_meta(&self, _req: UpdateTableMetaReq) -> Result { Err(ErrorCode::PermissionDenied( "Permission denied, cannot upsert table meta from a shared database".to_string(), )) } + #[async_backtrace::framed] async fn get_table_copied_file_info( &self, req: GetTableCopiedFileReq, @@ -172,6 +184,7 @@ impl Database for ShareDatabase { Ok(res) } + #[async_backtrace::framed] async fn upsert_table_copied_file_info( &self, req: UpsertTableCopiedFileReq, @@ -180,6 +193,7 @@ impl Database for ShareDatabase { Ok(res) } + #[async_backtrace::framed] async fn truncate_table(&self, _req: TruncateTableReq) -> Result { Err(ErrorCode::PermissionDenied( "Permission denied, cannot truncate table from a shared database".to_string(), diff --git a/src/query/service/src/global_services.rs b/src/query/service/src/global_services.rs index 3069ff01d3912..ef7034d1deb3e 100644 --- a/src/query/service/src/global_services.rs +++ b/src/query/service/src/global_services.rs @@ -37,11 +37,13 @@ use crate::sessions::SessionManager; pub struct GlobalServices; impl GlobalServices { + #[async_backtrace::framed] pub async fn init(config: InnerConfig) -> Result<()> { GlobalInstance::init_production(); GlobalServices::init_with(config).await } + #[async_backtrace::framed] pub async fn init_with(config: InnerConfig) -> Result<()> { // The order of initialization is very important GlobalConfig::init(config.clone())?; diff --git a/src/query/service/src/interpreters/access/accessor.rs b/src/query/service/src/interpreters/access/accessor.rs index 7cdab12c19861..2085e1c59ab1c 100644 --- a/src/query/service/src/interpreters/access/accessor.rs +++ b/src/query/service/src/interpreters/access/accessor.rs @@ -40,6 +40,7 @@ impl Accessor { Accessor { accessors } } + #[async_backtrace::framed] pub async fn check(&self, plan: &Plan) -> Result<()> { for accessor in self.accessors.values() { accessor.check(plan).await?; diff --git a/src/query/service/src/interpreters/access/management_mode_access.rs b/src/query/service/src/interpreters/access/management_mode_access.rs index c6a3c70af55fd..7501437a37a42 100644 --- a/src/query/service/src/interpreters/access/management_mode_access.rs +++ b/src/query/service/src/interpreters/access/management_mode_access.rs @@ -30,6 +30,7 @@ impl ManagementModeAccess { #[async_trait::async_trait] impl AccessChecker for ManagementModeAccess { // Check what we can do if in management mode. + #[async_backtrace::framed] async fn check(&self, plan: &Plan) -> Result<()> { // Allows for management-mode. if GlobalConfig::instance().query.management_mode { diff --git a/src/query/service/src/interpreters/access/privilege_access.rs b/src/query/service/src/interpreters/access/privilege_access.rs index 5c58abe110e54..88541e36a4d87 100644 --- a/src/query/service/src/interpreters/access/privilege_access.rs +++ b/src/query/service/src/interpreters/access/privilege_access.rs @@ -37,6 +37,7 @@ impl PrivilegeAccess { #[async_trait::async_trait] impl AccessChecker for PrivilegeAccess { + #[async_backtrace::framed] async fn check(&self, plan: &Plan) -> Result<()> { let session = self.ctx.get_current_session(); diff --git a/src/query/service/src/interpreters/common/grant.rs b/src/query/service/src/interpreters/common/grant.rs index 01c9a890bc5b6..f2d14109bd7e3 100644 --- a/src/query/service/src/interpreters/common/grant.rs +++ b/src/query/service/src/interpreters/common/grant.rs @@ -21,6 +21,7 @@ use common_meta_app::principal::GrantObject; use crate::procedures::ProcedureFactory; use crate::sessions::QueryContext; +#[async_backtrace::framed] pub async fn validate_grant_object_exists( ctx: &Arc, object: &GrantObject, diff --git a/src/query/service/src/interpreters/interpreter.rs b/src/query/service/src/interpreters/interpreter.rs index 4f5859e371cad..94201480738fe 100644 --- a/src/query/service/src/interpreters/interpreter.rs +++ b/src/query/service/src/interpreters/interpreter.rs @@ -48,6 +48,7 @@ pub trait Interpreter: Sync + Send { } /// The core of the databend processor which will execute the logical plan and get the DataBlock + #[async_backtrace::framed] async fn execute(&self, ctx: Arc) -> Result { InterpreterMetrics::record_query_start(&ctx); log_query_start(&ctx); diff --git a/src/query/service/src/interpreters/interpreter_call.rs b/src/query/service/src/interpreters/interpreter_call.rs index 78829c7dedf2c..be1d36e4e19b4 100644 --- a/src/query/service/src/interpreters/interpreter_call.rs +++ b/src/query/service/src/interpreters/interpreter_call.rs @@ -49,6 +49,7 @@ impl Interpreter for CallInterpreter { } #[tracing::instrument(level = "debug", name = "call_interpreter_execute", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let mut build_res = PipelineBuildResult::create(); self.func diff --git a/src/query/service/src/interpreters/interpreter_catalog_create.rs b/src/query/service/src/interpreters/interpreter_catalog_create.rs index d62ae452f9fce..171f70ebe7161 100644 --- a/src/query/service/src/interpreters/interpreter_catalog_create.rs +++ b/src/query/service/src/interpreters/interpreter_catalog_create.rs @@ -46,6 +46,7 @@ impl Interpreter for CreateCatalogInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { if let CatalogOption::Iceberg(opt) = &self.plan.meta.catalog_option { if !opt.storage_params.is_secure() && !GlobalConfig::instance().storage.allow_insecure { diff --git a/src/query/service/src/interpreters/interpreter_catalog_drop.rs b/src/query/service/src/interpreters/interpreter_catalog_drop.rs index be874000f3964..b944ccee28da3 100644 --- a/src/query/service/src/interpreters/interpreter_catalog_drop.rs +++ b/src/query/service/src/interpreters/interpreter_catalog_drop.rs @@ -43,6 +43,7 @@ impl Interpreter for DropCatalogInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let mgr = CatalogManager::instance(); mgr.drop_user_defined_catalog(self.plan.clone().into())?; diff --git a/src/query/service/src/interpreters/interpreter_cluster_key_alter.rs b/src/query/service/src/interpreters/interpreter_cluster_key_alter.rs index 0463411d3b9ee..618c784c653c3 100644 --- a/src/query/service/src/interpreters/interpreter_cluster_key_alter.rs +++ b/src/query/service/src/interpreters/interpreter_cluster_key_alter.rs @@ -39,6 +39,7 @@ impl Interpreter for AlterTableClusterKeyInterpreter { "AlterTableClusterKeyInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = &self.plan; let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_cluster_key_drop.rs b/src/query/service/src/interpreters/interpreter_cluster_key_drop.rs index 04ddb8f334e28..2f5545a12c884 100644 --- a/src/query/service/src/interpreters/interpreter_cluster_key_drop.rs +++ b/src/query/service/src/interpreters/interpreter_cluster_key_drop.rs @@ -39,6 +39,7 @@ impl Interpreter for DropTableClusterKeyInterpreter { "DropTableClusterKeyInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = &self.plan; let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_copy.rs b/src/query/service/src/interpreters/interpreter_copy.rs index f00205ca73379..5e98c27d4f1ae 100644 --- a/src/query/service/src/interpreters/interpreter_copy.rs +++ b/src/query/service/src/interpreters/interpreter_copy.rs @@ -64,6 +64,7 @@ impl CopyInterpreter { Ok(CopyInterpreter { ctx, plan }) } + #[async_backtrace::framed] async fn build_query(&self, query: &Plan) -> Result<(PipelineBuildResult, DataSchemaRef)> { let (s_expr, metadata, bind_context, formatted_ast) = match query { Plan::Query { @@ -103,6 +104,7 @@ impl CopyInterpreter { Ok((build_res, data_schema)) } + #[async_backtrace::framed] async fn build_copy_into_stage_pipeline( &self, stage: &StageInfo, @@ -134,6 +136,7 @@ impl CopyInterpreter { Ok(build_res) } + #[async_backtrace::framed] async fn try_purge_files( ctx: Arc, stage_info: &StageInfo, @@ -159,6 +162,7 @@ impl CopyInterpreter { } #[allow(clippy::too_many_arguments)] + #[async_backtrace::framed] async fn build_copy_into_table_with_transform_pipeline( &self, catalog_name: &str, @@ -223,6 +227,7 @@ impl CopyInterpreter { } #[allow(clippy::too_many_arguments)] + #[async_backtrace::framed] async fn build_copy_into_table_pipeline( &self, catalog_name: &str, @@ -535,6 +540,7 @@ impl Interpreter for CopyInterpreter { } #[tracing::instrument(level = "debug", name = "copy_interpreter_execute_v2", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { match &self.plan { CopyPlan::IntoTable { diff --git a/src/query/service/src/interpreters/interpreter_database_create.rs b/src/query/service/src/interpreters/interpreter_database_create.rs index 9196238968750..a50a50bba89e0 100644 --- a/src/query/service/src/interpreters/interpreter_database_create.rs +++ b/src/query/service/src/interpreters/interpreter_database_create.rs @@ -44,6 +44,7 @@ impl Interpreter for CreateDatabaseInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let tenant = self.plan.tenant.clone(); let quota_api = UserApiProvider::instance().get_tenant_quota_api_client(&tenant)?; diff --git a/src/query/service/src/interpreters/interpreter_database_drop.rs b/src/query/service/src/interpreters/interpreter_database_drop.rs index f114ef8a097a0..06aa51edf1a25 100644 --- a/src/query/service/src/interpreters/interpreter_database_drop.rs +++ b/src/query/service/src/interpreters/interpreter_database_drop.rs @@ -39,6 +39,7 @@ impl Interpreter for DropDatabaseInterpreter { "DropDatabaseInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog = self.ctx.get_catalog(&self.plan.catalog)?; catalog.drop_database(self.plan.clone().into()).await?; diff --git a/src/query/service/src/interpreters/interpreter_database_rename.rs b/src/query/service/src/interpreters/interpreter_database_rename.rs index ec71de543e866..6a57f42752fbf 100644 --- a/src/query/service/src/interpreters/interpreter_database_rename.rs +++ b/src/query/service/src/interpreters/interpreter_database_rename.rs @@ -41,6 +41,7 @@ impl Interpreter for RenameDatabaseInterpreter { "RenameDatabaseInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { for entity in &self.plan.entities { let catalog = self.ctx.get_catalog(&entity.catalog)?; diff --git a/src/query/service/src/interpreters/interpreter_database_show_create.rs b/src/query/service/src/interpreters/interpreter_database_show_create.rs index d1b21c1cee2ec..e75830443d0e7 100644 --- a/src/query/service/src/interpreters/interpreter_database_show_create.rs +++ b/src/query/service/src/interpreters/interpreter_database_show_create.rs @@ -50,6 +50,7 @@ impl Interpreter for ShowCreateDatabaseInterpreter { self.plan.schema() } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let tenant = self.ctx.get_tenant(); let catalog = self.ctx.get_catalog(&self.plan.catalog)?; diff --git a/src/query/service/src/interpreters/interpreter_database_undrop.rs b/src/query/service/src/interpreters/interpreter_database_undrop.rs index 006b74342d5cd..dd01565f68c27 100644 --- a/src/query/service/src/interpreters/interpreter_database_undrop.rs +++ b/src/query/service/src/interpreters/interpreter_database_undrop.rs @@ -39,6 +39,7 @@ impl Interpreter for UndropDatabaseInterpreter { "UndropDatabaseInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog_name = self.plan.catalog.as_str(); let catalog = self.ctx.get_catalog(catalog_name)?; diff --git a/src/query/service/src/interpreters/interpreter_delete.rs b/src/query/service/src/interpreters/interpreter_delete.rs index c818310a0e2d3..b1ce03fa6647a 100644 --- a/src/query/service/src/interpreters/interpreter_delete.rs +++ b/src/query/service/src/interpreters/interpreter_delete.rs @@ -55,6 +55,7 @@ impl Interpreter for DeleteInterpreter { } #[tracing::instrument(level = "debug", name = "delete_interpreter_execute", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog_name = self.plan.catalog_name.as_str(); let db_name = self.plan.database_name.as_str(); diff --git a/src/query/service/src/interpreters/interpreter_explain.rs b/src/query/service/src/interpreters/interpreter_explain.rs index 45100f22a8a09..4126e0040e3e5 100644 --- a/src/query/service/src/interpreters/interpreter_explain.rs +++ b/src/query/service/src/interpreters/interpreter_explain.rs @@ -58,6 +58,7 @@ impl Interpreter for ExplainInterpreter { self.schema.clone() } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let blocks = match &self.kind { ExplainKind::Raw => self.explain_plan(&self.plan)?, @@ -218,6 +219,7 @@ impl ExplainInterpreter { Ok(vec![DataBlock::new_from_columns(vec![formatted_plan])]) } + #[async_backtrace::framed] pub async fn explain_pipeline( &self, s_expr: SExpr, @@ -248,6 +250,7 @@ impl ExplainInterpreter { Ok(blocks) } + #[async_backtrace::framed] async fn explain_fragments( &self, s_expr: SExpr, @@ -272,6 +275,7 @@ impl ExplainInterpreter { Ok(vec![DataBlock::new_from_columns(vec![formatted_plan])]) } + #[async_backtrace::framed] async fn explain_analyze( &self, s_expr: &SExpr, diff --git a/src/query/service/src/interpreters/interpreter_factory.rs b/src/query/service/src/interpreters/interpreter_factory.rs index 1a02d19a8d87d..b03c0730745c2 100644 --- a/src/query/service/src/interpreters/interpreter_factory.rs +++ b/src/query/service/src/interpreters/interpreter_factory.rs @@ -49,6 +49,7 @@ pub struct InterpreterFactory; /// InterpreterFactory provides `get` method which transforms `Plan` into the corresponding interpreter. /// Such as: Plan::Query -> InterpreterSelectV2 impl InterpreterFactory { + #[async_backtrace::framed] pub async fn get(ctx: Arc, plan: &Plan) -> Result { // Check the access permission. let access_checker = Accessor::create(ctx.clone()); diff --git a/src/query/service/src/interpreters/interpreter_file_format_create.rs b/src/query/service/src/interpreters/interpreter_file_format_create.rs index 9d6bf974ac6cd..36ad7d22136da 100644 --- a/src/query/service/src/interpreters/interpreter_file_format_create.rs +++ b/src/query/service/src/interpreters/interpreter_file_format_create.rs @@ -44,6 +44,7 @@ impl Interpreter for CreateFileFormatInterpreter { } #[tracing::instrument(level = "info", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let user_mgr = UserApiProvider::instance(); diff --git a/src/query/service/src/interpreters/interpreter_file_format_drop.rs b/src/query/service/src/interpreters/interpreter_file_format_drop.rs index 3cbf0b403f9b5..9f72e949ae858 100644 --- a/src/query/service/src/interpreters/interpreter_file_format_drop.rs +++ b/src/query/service/src/interpreters/interpreter_file_format_drop.rs @@ -42,6 +42,7 @@ impl Interpreter for DropFileFormatInterpreter { } #[tracing::instrument(level = "info", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_file_format_show.rs b/src/query/service/src/interpreters/interpreter_file_format_show.rs index f26fdccf70fd6..95bc22cc2dc5c 100644 --- a/src/query/service/src/interpreters/interpreter_file_format_show.rs +++ b/src/query/service/src/interpreters/interpreter_file_format_show.rs @@ -50,6 +50,7 @@ impl Interpreter for ShowFileFormatsInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let user_mgr = UserApiProvider::instance(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_insert.rs b/src/query/service/src/interpreters/interpreter_insert.rs index 00cc18fe342b7..0f40825ec3600 100644 --- a/src/query/service/src/interpreters/interpreter_insert.rs +++ b/src/query/service/src/interpreters/interpreter_insert.rs @@ -125,6 +125,7 @@ impl InsertInterpreter { Ok(cast_needed) } + #[async_backtrace::framed] async fn try_purge_files( ctx: Arc, stage_info: &StageInfo, @@ -149,6 +150,7 @@ impl InsertInterpreter { } } + #[async_backtrace::framed] async fn prepared_values(&self, values_str: &str) -> Result<(DataSchemaRef, Vec)> { let settings = self.ctx.get_settings(); let sql_dialect = settings.get_sql_dialect()?; @@ -192,6 +194,7 @@ impl InsertInterpreter { Ok((Arc::new(DataSchema::new(attachment_fields)), const_values)) } + #[async_backtrace::framed] async fn build_insert_from_stage_pipeline( &self, table: Arc, @@ -346,6 +349,7 @@ impl Interpreter for InsertInterpreter { "InsertIntoInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = &self.plan; let table = self @@ -552,6 +556,7 @@ impl AsyncSource for ValueSource { const SKIP_EMPTY_DATA_BLOCK: bool = true; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { if self.is_finished { return Ok(None); @@ -601,6 +606,7 @@ impl ValueSource { } } + #[async_backtrace::framed] pub async fn read>( &self, estimated_rows: usize, @@ -648,6 +654,7 @@ impl ValueSource { } /// Parse single row value, like ('111', 222, 1 + 1) + #[async_backtrace::framed] async fn parse_next_row>( &self, field_decoder: &FastFieldDecoderValues, diff --git a/src/query/service/src/interpreters/interpreter_kill.rs b/src/query/service/src/interpreters/interpreter_kill.rs index 1503624d8f089..78aa81f878132 100644 --- a/src/query/service/src/interpreters/interpreter_kill.rs +++ b/src/query/service/src/interpreters/interpreter_kill.rs @@ -32,6 +32,7 @@ impl KillInterpreter { Ok(KillInterpreter { ctx, plan }) } + #[async_backtrace::framed] async fn execute_kill(&self, session_id: &String) -> Result { match self.ctx.get_session_by_id(session_id) { None => Err(ErrorCode::UnknownSession(format!( @@ -58,6 +59,7 @@ impl Interpreter for KillInterpreter { "KillInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let id = &self.plan.id; // If press Ctrl + C, MySQL Client will create a new session and send query diff --git a/src/query/service/src/interpreters/interpreter_presign.rs b/src/query/service/src/interpreters/interpreter_presign.rs index 9ac8a4c37f326..56ee06cefdb05 100644 --- a/src/query/service/src/interpreters/interpreter_presign.rs +++ b/src/query/service/src/interpreters/interpreter_presign.rs @@ -56,6 +56,7 @@ impl Interpreter for PresignInterpreter { } #[tracing::instrument(level = "debug", name = "presign_interpreter_execute", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let op = StageTable::get_op(&self.plan.stage)?; if !op.info().can_presign() { diff --git a/src/query/service/src/interpreters/interpreter_privilege_grant.rs b/src/query/service/src/interpreters/interpreter_privilege_grant.rs index 9b755eecc0aa4..69707e4d5eb7c 100644 --- a/src/query/service/src/interpreters/interpreter_privilege_grant.rs +++ b/src/query/service/src/interpreters/interpreter_privilege_grant.rs @@ -46,6 +46,7 @@ impl Interpreter for GrantPrivilegeInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); diff --git a/src/query/service/src/interpreters/interpreter_privilege_revoke.rs b/src/query/service/src/interpreters/interpreter_privilege_revoke.rs index 1075f16484ea9..d4880b99d394a 100644 --- a/src/query/service/src/interpreters/interpreter_privilege_revoke.rs +++ b/src/query/service/src/interpreters/interpreter_privilege_revoke.rs @@ -44,6 +44,7 @@ impl Interpreter for RevokePrivilegeInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); diff --git a/src/query/service/src/interpreters/interpreter_replace.rs b/src/query/service/src/interpreters/interpreter_replace.rs index e3861cb5ac83a..48bc6b625b698 100644 --- a/src/query/service/src/interpreters/interpreter_replace.rs +++ b/src/query/service/src/interpreters/interpreter_replace.rs @@ -52,6 +52,7 @@ impl Interpreter for ReplaceInterpreter { "ReplaceIntoInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { self.check_on_conflicts()?; @@ -105,6 +106,7 @@ impl ReplaceInterpreter { Ok(()) } } + #[async_backtrace::framed] async fn connect_input_source<'a>( &'a self, ctx: Arc, @@ -150,6 +152,7 @@ impl ReplaceInterpreter { Ok(build_res) } + #[async_backtrace::framed] async fn connect_query_plan_source<'a>( &'a self, ctx: Arc, diff --git a/src/query/service/src/interpreters/interpreter_role_create.rs b/src/query/service/src/interpreters/interpreter_role_create.rs index 896df9711d6d4..c7fcc597abe9d 100644 --- a/src/query/service/src/interpreters/interpreter_role_create.rs +++ b/src/query/service/src/interpreters/interpreter_role_create.rs @@ -44,6 +44,7 @@ impl Interpreter for CreateRoleInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { // TODO: add privilege check about CREATE ROLE let plan = self.plan.clone(); diff --git a/src/query/service/src/interpreters/interpreter_role_drop.rs b/src/query/service/src/interpreters/interpreter_role_drop.rs index 5a97e2f0413bb..3bd7f1fbadc99 100644 --- a/src/query/service/src/interpreters/interpreter_role_drop.rs +++ b/src/query/service/src/interpreters/interpreter_role_drop.rs @@ -43,6 +43,7 @@ impl Interpreter for DropRoleInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { // TODO: add privilege check about DROP role let plan = self.plan.clone(); diff --git a/src/query/service/src/interpreters/interpreter_role_grant.rs b/src/query/service/src/interpreters/interpreter_role_grant.rs index aec0132544447..c047740c0f757 100644 --- a/src/query/service/src/interpreters/interpreter_role_grant.rs +++ b/src/query/service/src/interpreters/interpreter_role_grant.rs @@ -44,6 +44,7 @@ impl Interpreter for GrantRoleInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_role_revoke.rs b/src/query/service/src/interpreters/interpreter_role_revoke.rs index e6aa7b9c038a3..2ab064d658aa8 100644 --- a/src/query/service/src/interpreters/interpreter_role_revoke.rs +++ b/src/query/service/src/interpreters/interpreter_role_revoke.rs @@ -44,6 +44,7 @@ impl Interpreter for RevokeRoleInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_role_set.rs b/src/query/service/src/interpreters/interpreter_role_set.rs index 5e681108c6305..284cddb25710b 100644 --- a/src/query/service/src/interpreters/interpreter_role_set.rs +++ b/src/query/service/src/interpreters/interpreter_role_set.rs @@ -42,6 +42,7 @@ impl Interpreter for SetRoleInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let session = self.ctx.get_current_session(); diff --git a/src/query/service/src/interpreters/interpreter_role_show.rs b/src/query/service/src/interpreters/interpreter_role_show.rs index 7d4585224290c..ef99933fb384c 100644 --- a/src/query/service/src/interpreters/interpreter_role_show.rs +++ b/src/query/service/src/interpreters/interpreter_role_show.rs @@ -51,6 +51,7 @@ impl Interpreter for ShowRolesInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let session = self.ctx.get_current_session(); let mut roles = session.get_all_available_roles().await?; diff --git a/src/query/service/src/interpreters/interpreter_select.rs b/src/query/service/src/interpreters/interpreter_select.rs index 09e38292f1c6e..521839e903437 100644 --- a/src/query/service/src/interpreters/interpreter_select.rs +++ b/src/query/service/src/interpreters/interpreter_select.rs @@ -74,11 +74,13 @@ impl SelectInterpreter { } #[inline] + #[async_backtrace::framed] pub async fn build_physical_plan(&self) -> Result { let mut builder = PhysicalPlanBuilder::new(self.metadata.clone(), self.ctx.clone()); builder.build(&self.s_expr).await } + #[async_backtrace::framed] pub async fn build_pipeline(&self, physical_plan: PhysicalPlan) -> Result { build_query_pipeline( &self.ctx, @@ -191,6 +193,7 @@ impl Interpreter for SelectInterpreter { /// This method will create a new pipeline /// The QueryPipelineBuilder will use the optimized plan to generate a Pipeline #[tracing::instrument(level = "debug", name = "select_interpreter_execute", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { // 0. Need to build physical plan first to get the partitions. let physical_plan = self.build_physical_plan().await?; diff --git a/src/query/service/src/interpreters/interpreter_setting.rs b/src/query/service/src/interpreters/interpreter_setting.rs index eb4713df862eb..af65c6ecfa8e7 100644 --- a/src/query/service/src/interpreters/interpreter_setting.rs +++ b/src/query/service/src/interpreters/interpreter_setting.rs @@ -42,6 +42,7 @@ impl Interpreter for SettingInterpreter { "SettingInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.set.clone(); let mut keys: Vec = vec![]; diff --git a/src/query/service/src/interpreters/interpreter_share_alter_tenants.rs b/src/query/service/src/interpreters/interpreter_share_alter_tenants.rs index 77d2d2d4d53c2..f7236a2e4f342 100644 --- a/src/query/service/src/interpreters/interpreter_share_alter_tenants.rs +++ b/src/query/service/src/interpreters/interpreter_share_alter_tenants.rs @@ -46,6 +46,7 @@ impl Interpreter for AlterShareTenantsInterpreter { "AlterShareTenantsInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let tenant = self.ctx.get_tenant(); let meta_api = UserApiProvider::instance().get_meta_store_client(); diff --git a/src/query/service/src/interpreters/interpreter_share_create.rs b/src/query/service/src/interpreters/interpreter_share_create.rs index b857355d1a350..d86b7b6081b11 100644 --- a/src/query/service/src/interpreters/interpreter_share_create.rs +++ b/src/query/service/src/interpreters/interpreter_share_create.rs @@ -42,6 +42,7 @@ impl Interpreter for CreateShareInterpreter { "CreateShareInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let meta_api = UserApiProvider::instance().get_meta_store_client(); let resp = meta_api.create_share(self.plan.clone().into()).await?; diff --git a/src/query/service/src/interpreters/interpreter_share_desc.rs b/src/query/service/src/interpreters/interpreter_share_desc.rs index d7a433b17a5ca..08b97b525d4ec 100644 --- a/src/query/service/src/interpreters/interpreter_share_desc.rs +++ b/src/query/service/src/interpreters/interpreter_share_desc.rs @@ -52,6 +52,7 @@ impl Interpreter for DescShareInterpreter { self.plan.schema() } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let meta_api = UserApiProvider::instance().get_meta_store_client(); let req = GetShareGrantObjectReq { diff --git a/src/query/service/src/interpreters/interpreter_share_drop.rs b/src/query/service/src/interpreters/interpreter_share_drop.rs index c34c28f292c86..4adc8172bf416 100644 --- a/src/query/service/src/interpreters/interpreter_share_drop.rs +++ b/src/query/service/src/interpreters/interpreter_share_drop.rs @@ -42,6 +42,7 @@ impl Interpreter for DropShareInterpreter { "DropShareInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let meta_api = UserApiProvider::instance().get_meta_store_client(); let resp = meta_api.drop_share(self.plan.clone().into()).await?; diff --git a/src/query/service/src/interpreters/interpreter_share_endpoint_create.rs b/src/query/service/src/interpreters/interpreter_share_endpoint_create.rs index 22eb018eafe47..fad93cde8655c 100644 --- a/src/query/service/src/interpreters/interpreter_share_endpoint_create.rs +++ b/src/query/service/src/interpreters/interpreter_share_endpoint_create.rs @@ -39,6 +39,7 @@ impl Interpreter for CreateShareEndpointInterpreter { "CreateShareEndpointInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let meta_api = UserApiProvider::instance().get_meta_store_client(); let _resp = meta_api diff --git a/src/query/service/src/interpreters/interpreter_share_endpoint_drop.rs b/src/query/service/src/interpreters/interpreter_share_endpoint_drop.rs index c207155b352e2..ec1551cb890e2 100644 --- a/src/query/service/src/interpreters/interpreter_share_endpoint_drop.rs +++ b/src/query/service/src/interpreters/interpreter_share_endpoint_drop.rs @@ -39,6 +39,7 @@ impl Interpreter for DropShareEndpointInterpreter { "DropShareEndpointInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let meta_api = UserApiProvider::instance().get_meta_store_client(); let _resp = meta_api diff --git a/src/query/service/src/interpreters/interpreter_share_endpoint_show.rs b/src/query/service/src/interpreters/interpreter_share_endpoint_show.rs index 3d4445398f3c6..236400b2ec9ec 100644 --- a/src/query/service/src/interpreters/interpreter_share_endpoint_show.rs +++ b/src/query/service/src/interpreters/interpreter_share_endpoint_show.rs @@ -47,6 +47,7 @@ impl Interpreter for ShowShareEndpointInterpreter { self.plan.schema() } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let meta_api = UserApiProvider::instance().get_meta_store_client(); let resp = meta_api diff --git a/src/query/service/src/interpreters/interpreter_share_grant_object.rs b/src/query/service/src/interpreters/interpreter_share_grant_object.rs index ab7e4360fe6ac..f2cbc3316e958 100644 --- a/src/query/service/src/interpreters/interpreter_share_grant_object.rs +++ b/src/query/service/src/interpreters/interpreter_share_grant_object.rs @@ -45,6 +45,7 @@ impl Interpreter for GrantShareObjectInterpreter { "GrantShareObjectInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let tenant = self.ctx.get_tenant(); let meta_api = UserApiProvider::instance().get_meta_store_client(); diff --git a/src/query/service/src/interpreters/interpreter_share_revoke_object.rs b/src/query/service/src/interpreters/interpreter_share_revoke_object.rs index ee8f81bbb09eb..09a6bf79ac8cf 100644 --- a/src/query/service/src/interpreters/interpreter_share_revoke_object.rs +++ b/src/query/service/src/interpreters/interpreter_share_revoke_object.rs @@ -45,6 +45,7 @@ impl Interpreter for RevokeShareObjectInterpreter { "RevokeShareObjectInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let tenant = self.ctx.get_tenant(); let meta_api = UserApiProvider::instance().get_meta_store_client(); diff --git a/src/query/service/src/interpreters/interpreter_share_show.rs b/src/query/service/src/interpreters/interpreter_share_show.rs index 5cd4d56434ce4..5ede8b73f09a9 100644 --- a/src/query/service/src/interpreters/interpreter_share_show.rs +++ b/src/query/service/src/interpreters/interpreter_share_show.rs @@ -50,6 +50,7 @@ impl Interpreter for ShowSharesInterpreter { self.plan.schema() } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let meta_api = UserApiProvider::instance().get_meta_store_client(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_share_show_grant_tenants.rs b/src/query/service/src/interpreters/interpreter_share_show_grant_tenants.rs index 9738235c0896d..99c5e7e45eab8 100644 --- a/src/query/service/src/interpreters/interpreter_share_show_grant_tenants.rs +++ b/src/query/service/src/interpreters/interpreter_share_show_grant_tenants.rs @@ -51,6 +51,7 @@ impl Interpreter for ShowGrantTenantsOfShareInterpreter { self.plan.schema() } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let meta_api = UserApiProvider::instance().get_meta_store_client(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_show_grants.rs b/src/query/service/src/interpreters/interpreter_show_grants.rs index e51b827f2e106..ed89603d02e9d 100644 --- a/src/query/service/src/interpreters/interpreter_show_grants.rs +++ b/src/query/service/src/interpreters/interpreter_show_grants.rs @@ -50,6 +50,7 @@ impl Interpreter for ShowGrantsInterpreter { self.plan.schema() } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_show_object_grant_privileges.rs b/src/query/service/src/interpreters/interpreter_show_object_grant_privileges.rs index 353be3157bdcb..f9dc0dc27ce43 100644 --- a/src/query/service/src/interpreters/interpreter_show_object_grant_privileges.rs +++ b/src/query/service/src/interpreters/interpreter_show_object_grant_privileges.rs @@ -50,6 +50,7 @@ impl Interpreter for ShowObjectGrantPrivilegesInterpreter { self.plan.schema() } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let meta_api = UserApiProvider::instance().get_meta_store_client(); let req = GetObjectGrantPrivilegesReq { diff --git a/src/query/service/src/interpreters/interpreter_table_add_column.rs b/src/query/service/src/interpreters/interpreter_table_add_column.rs index 9474e3d85e812..dadfb93a78377 100644 --- a/src/query/service/src/interpreters/interpreter_table_add_column.rs +++ b/src/query/service/src/interpreters/interpreter_table_add_column.rs @@ -45,6 +45,7 @@ impl Interpreter for AddTableColumnInterpreter { "AddTableColumnInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog_name = self.plan.catalog.as_str(); let db_name = self.plan.database.as_str(); diff --git a/src/query/service/src/interpreters/interpreter_table_analyze.rs b/src/query/service/src/interpreters/interpreter_table_analyze.rs index dd70e1cd2fc10..21b585e788d98 100644 --- a/src/query/service/src/interpreters/interpreter_table_analyze.rs +++ b/src/query/service/src/interpreters/interpreter_table_analyze.rs @@ -39,6 +39,7 @@ impl Interpreter for AnalyzeTableInterpreter { "AnalyzeTableInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = &self.plan; let table = self diff --git a/src/query/service/src/interpreters/interpreter_table_create.rs b/src/query/service/src/interpreters/interpreter_table_create.rs index 95dbba228751c..270931007e016 100644 --- a/src/query/service/src/interpreters/interpreter_table_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_create.rs @@ -60,6 +60,7 @@ impl Interpreter for CreateTableInterpreter { "CreateTableInterpreterV2" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let tenant = self.plan.tenant.clone(); let quota_api = UserApiProvider::instance().get_tenant_quota_api_client(&tenant)?; @@ -116,6 +117,7 @@ impl Interpreter for CreateTableInterpreter { } impl CreateTableInterpreter { + #[async_backtrace::framed] async fn create_table_as_select(&self, select_plan: Box) -> Result { let tenant = self.ctx.get_tenant(); let catalog = self.ctx.get_catalog(&self.plan.catalog)?; @@ -150,6 +152,7 @@ impl CreateTableInterpreter { .await } + #[async_backtrace::framed] async fn create_table(&self) -> Result { let catalog = self.ctx.get_catalog(self.plan.catalog.as_str())?; let mut stat = None; diff --git a/src/query/service/src/interpreters/interpreter_table_describe.rs b/src/query/service/src/interpreters/interpreter_table_describe.rs index 4e6ab019b06e8..7084362394ebd 100644 --- a/src/query/service/src/interpreters/interpreter_table_describe.rs +++ b/src/query/service/src/interpreters/interpreter_table_describe.rs @@ -53,6 +53,7 @@ impl Interpreter for DescribeTableInterpreter { self.plan.schema() } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog = self.plan.catalog.as_str(); let database = self.plan.database.as_str(); diff --git a/src/query/service/src/interpreters/interpreter_table_drop.rs b/src/query/service/src/interpreters/interpreter_table_drop.rs index 8efcda2e43803..81c7ca3ce5eaf 100644 --- a/src/query/service/src/interpreters/interpreter_table_drop.rs +++ b/src/query/service/src/interpreters/interpreter_table_drop.rs @@ -43,6 +43,7 @@ impl Interpreter for DropTableInterpreter { "DropTableInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog_name = self.plan.catalog.as_str(); let db_name = self.plan.database.as_str(); diff --git a/src/query/service/src/interpreters/interpreter_table_drop_column.rs b/src/query/service/src/interpreters/interpreter_table_drop_column.rs index a899f5a6fecfa..749a2a03386e9 100644 --- a/src/query/service/src/interpreters/interpreter_table_drop_column.rs +++ b/src/query/service/src/interpreters/interpreter_table_drop_column.rs @@ -44,6 +44,7 @@ impl Interpreter for DropTableColumnInterpreter { "DropTableColumnInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog_name = self.plan.catalog.as_str(); let db_name = self.plan.database.as_str(); diff --git a/src/query/service/src/interpreters/interpreter_table_exists.rs b/src/query/service/src/interpreters/interpreter_table_exists.rs index 554a61d5ff6dc..a8d8ef2191fb9 100644 --- a/src/query/service/src/interpreters/interpreter_table_exists.rs +++ b/src/query/service/src/interpreters/interpreter_table_exists.rs @@ -46,6 +46,7 @@ impl Interpreter for ExistsTableInterpreter { "ExistsTableInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog = self.plan.catalog.as_str(); let database = self.plan.database.as_str(); diff --git a/src/query/service/src/interpreters/interpreter_table_optimize.rs b/src/query/service/src/interpreters/interpreter_table_optimize.rs index 5726c770e8787..abedde8fb2851 100644 --- a/src/query/service/src/interpreters/interpreter_table_optimize.rs +++ b/src/query/service/src/interpreters/interpreter_table_optimize.rs @@ -44,6 +44,7 @@ impl Interpreter for OptimizeTableInterpreter { "OptimizeTableInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = &self.plan; let ctx = self.ctx.clone(); diff --git a/src/query/service/src/interpreters/interpreter_table_recluster.rs b/src/query/service/src/interpreters/interpreter_table_recluster.rs index 72bf93e420136..98ee1c0b0ae60 100644 --- a/src/query/service/src/interpreters/interpreter_table_recluster.rs +++ b/src/query/service/src/interpreters/interpreter_table_recluster.rs @@ -45,6 +45,7 @@ impl Interpreter for ReclusterTableInterpreter { "ReclusterTableInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = &self.plan; let ctx = self.ctx.clone(); diff --git a/src/query/service/src/interpreters/interpreter_table_rename.rs b/src/query/service/src/interpreters/interpreter_table_rename.rs index 478823ed3966c..e76070f89271b 100644 --- a/src/query/service/src/interpreters/interpreter_table_rename.rs +++ b/src/query/service/src/interpreters/interpreter_table_rename.rs @@ -41,6 +41,7 @@ impl Interpreter for RenameTableInterpreter { "RenameTableInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { // TODO check privileges // You must have ALTER and DROP privileges for the original table, diff --git a/src/query/service/src/interpreters/interpreter_table_revert.rs b/src/query/service/src/interpreters/interpreter_table_revert.rs index 5a0552e436fd7..f926826ad7bbb 100644 --- a/src/query/service/src/interpreters/interpreter_table_revert.rs +++ b/src/query/service/src/interpreters/interpreter_table_revert.rs @@ -40,6 +40,7 @@ impl Interpreter for RevertTableInterpreter { "RevertTableInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let tenant = self.ctx.get_tenant(); let catalog = self.ctx.get_catalog(self.plan.catalog.as_str())?; diff --git a/src/query/service/src/interpreters/interpreter_table_show_create.rs b/src/query/service/src/interpreters/interpreter_table_show_create.rs index df715051eb6df..38a43ca50a160 100644 --- a/src/query/service/src/interpreters/interpreter_table_show_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_show_create.rs @@ -54,6 +54,7 @@ impl Interpreter for ShowCreateTableInterpreter { self.plan.schema() } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let tenant = self.ctx.get_tenant(); let catalog = self.ctx.get_catalog(self.plan.catalog.as_str())?; diff --git a/src/query/service/src/interpreters/interpreter_table_truncate.rs b/src/query/service/src/interpreters/interpreter_table_truncate.rs index 01e06f4c78e54..c47aa86bcaf19 100644 --- a/src/query/service/src/interpreters/interpreter_table_truncate.rs +++ b/src/query/service/src/interpreters/interpreter_table_truncate.rs @@ -39,6 +39,7 @@ impl Interpreter for TruncateTableInterpreter { "TruncateTableInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog_name = self.plan.catalog.as_str(); let db_name = self.plan.database.as_str(); diff --git a/src/query/service/src/interpreters/interpreter_table_undrop.rs b/src/query/service/src/interpreters/interpreter_table_undrop.rs index 24b39b7e18414..ec0fc0f9ae998 100644 --- a/src/query/service/src/interpreters/interpreter_table_undrop.rs +++ b/src/query/service/src/interpreters/interpreter_table_undrop.rs @@ -39,6 +39,7 @@ impl Interpreter for UndropTableInterpreter { "UndropTableInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog_name = self.plan.catalog.as_str(); let catalog = self.ctx.get_catalog(catalog_name)?; diff --git a/src/query/service/src/interpreters/interpreter_unsetting.rs b/src/query/service/src/interpreters/interpreter_unsetting.rs index 4cf2dcb5898ab..77f1e8d3db5ce 100644 --- a/src/query/service/src/interpreters/interpreter_unsetting.rs +++ b/src/query/service/src/interpreters/interpreter_unsetting.rs @@ -42,6 +42,7 @@ impl Interpreter for UnSettingInterpreter { "SettingInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.set.clone(); let mut keys: Vec = vec![]; diff --git a/src/query/service/src/interpreters/interpreter_update.rs b/src/query/service/src/interpreters/interpreter_update.rs index bbfdc695a0a1a..c79daeef686d7 100644 --- a/src/query/service/src/interpreters/interpreter_update.rs +++ b/src/query/service/src/interpreters/interpreter_update.rs @@ -63,6 +63,7 @@ impl Interpreter for UpdateInterpreter { } #[tracing::instrument(level = "debug", name = "update_interpreter_execute", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog_name = self.plan.catalog.as_str(); let db_name = self.plan.database.as_str(); diff --git a/src/query/service/src/interpreters/interpreter_use_database.rs b/src/query/service/src/interpreters/interpreter_use_database.rs index 34cb2a9436fe4..a83d4a2ab706f 100644 --- a/src/query/service/src/interpreters/interpreter_use_database.rs +++ b/src/query/service/src/interpreters/interpreter_use_database.rs @@ -41,6 +41,7 @@ impl Interpreter for UseDatabaseInterpreter { "UseDatabaseInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { if self.plan.database.trim().is_empty() { return Err(ErrorCode::UnknownDatabase("No database selected")); diff --git a/src/query/service/src/interpreters/interpreter_user_alter.rs b/src/query/service/src/interpreters/interpreter_user_alter.rs index b4bb974efedec..e630fd33d52c9 100644 --- a/src/query/service/src/interpreters/interpreter_user_alter.rs +++ b/src/query/service/src/interpreters/interpreter_user_alter.rs @@ -42,6 +42,7 @@ impl Interpreter for AlterUserInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_user_create.rs b/src/query/service/src/interpreters/interpreter_user_create.rs index 7f0352fb77f30..a3923ccf5ad73 100644 --- a/src/query/service/src/interpreters/interpreter_user_create.rs +++ b/src/query/service/src/interpreters/interpreter_user_create.rs @@ -47,6 +47,7 @@ impl Interpreter for CreateUserInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_user_drop.rs b/src/query/service/src/interpreters/interpreter_user_drop.rs index 9246b1f887545..471cae6240725 100644 --- a/src/query/service/src/interpreters/interpreter_user_drop.rs +++ b/src/query/service/src/interpreters/interpreter_user_drop.rs @@ -42,6 +42,7 @@ impl Interpreter for DropUserInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_user_stage_create.rs b/src/query/service/src/interpreters/interpreter_user_stage_create.rs index 0a6325f34c10f..25e037c8a4286 100644 --- a/src/query/service/src/interpreters/interpreter_user_stage_create.rs +++ b/src/query/service/src/interpreters/interpreter_user_stage_create.rs @@ -45,6 +45,7 @@ impl Interpreter for CreateUserStageInterpreter { } #[tracing::instrument(level = "info", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let user_mgr = UserApiProvider::instance(); diff --git a/src/query/service/src/interpreters/interpreter_user_stage_drop.rs b/src/query/service/src/interpreters/interpreter_user_stage_drop.rs index 9bb7a506fad82..1b48a63740430 100644 --- a/src/query/service/src/interpreters/interpreter_user_stage_drop.rs +++ b/src/query/service/src/interpreters/interpreter_user_stage_drop.rs @@ -46,6 +46,7 @@ impl Interpreter for DropUserStageInterpreter { } #[tracing::instrument(level = "info", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_user_stage_remove.rs b/src/query/service/src/interpreters/interpreter_user_stage_remove.rs index 9331699d4f6fc..3059e89546487 100644 --- a/src/query/service/src/interpreters/interpreter_user_stage_remove.rs +++ b/src/query/service/src/interpreters/interpreter_user_stage_remove.rs @@ -45,6 +45,7 @@ impl Interpreter for RemoveUserStageInterpreter { } #[tracing::instrument(level = "info", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let op = StageTable::get_op(&self.plan.stage)?; diff --git a/src/query/service/src/interpreters/interpreter_user_udf_alter.rs b/src/query/service/src/interpreters/interpreter_user_udf_alter.rs index 82f0a8da4e129..e9c26454e8332 100644 --- a/src/query/service/src/interpreters/interpreter_user_udf_alter.rs +++ b/src/query/service/src/interpreters/interpreter_user_udf_alter.rs @@ -42,6 +42,7 @@ impl Interpreter for AlterUserUDFInterpreter { } #[tracing::instrument(level = "debug", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); diff --git a/src/query/service/src/interpreters/interpreter_user_udf_create.rs b/src/query/service/src/interpreters/interpreter_user_udf_create.rs index 95676bf3c0af4..610303cd46381 100644 --- a/src/query/service/src/interpreters/interpreter_user_udf_create.rs +++ b/src/query/service/src/interpreters/interpreter_user_udf_create.rs @@ -42,6 +42,7 @@ impl Interpreter for CreateUserUDFInterpreter { } #[tracing::instrument(level = "info", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_user_udf_drop.rs b/src/query/service/src/interpreters/interpreter_user_udf_drop.rs index bdd2359671dbb..25032e0ea0f17 100644 --- a/src/query/service/src/interpreters/interpreter_user_udf_drop.rs +++ b/src/query/service/src/interpreters/interpreter_user_udf_drop.rs @@ -42,6 +42,7 @@ impl Interpreter for DropUserUDFInterpreter { } #[tracing::instrument(level = "info", skip(self), fields(ctx.id = self.ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn execute2(&self) -> Result { let plan = self.plan.clone(); let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_view_alter.rs b/src/query/service/src/interpreters/interpreter_view_alter.rs index 6df23474bb3c3..9d36c91fe0eb1 100644 --- a/src/query/service/src/interpreters/interpreter_view_alter.rs +++ b/src/query/service/src/interpreters/interpreter_view_alter.rs @@ -47,6 +47,7 @@ impl Interpreter for AlterViewInterpreter { "AlterViewInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { // check whether view has exists if !self @@ -71,6 +72,7 @@ impl Interpreter for AlterViewInterpreter { } impl AlterViewInterpreter { + #[async_backtrace::framed] async fn alter_view(&self) -> Result { // drop view let catalog = self.ctx.get_catalog(&self.plan.catalog)?; diff --git a/src/query/service/src/interpreters/interpreter_view_create.rs b/src/query/service/src/interpreters/interpreter_view_create.rs index 9c1c5b875ea17..7aef9f9d153f8 100644 --- a/src/query/service/src/interpreters/interpreter_view_create.rs +++ b/src/query/service/src/interpreters/interpreter_view_create.rs @@ -48,6 +48,7 @@ impl Interpreter for CreateViewInterpreter { "CreateViewInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { // check whether view has exists if self @@ -69,6 +70,7 @@ impl Interpreter for CreateViewInterpreter { } impl CreateViewInterpreter { + #[async_backtrace::framed] async fn create_view(&self) -> Result { let catalog = self.ctx.get_catalog(&self.plan.catalog)?; let tenant = self.ctx.get_tenant(); diff --git a/src/query/service/src/interpreters/interpreter_view_drop.rs b/src/query/service/src/interpreters/interpreter_view_drop.rs index e08ad957efcdb..d5796338295c0 100644 --- a/src/query/service/src/interpreters/interpreter_view_drop.rs +++ b/src/query/service/src/interpreters/interpreter_view_drop.rs @@ -42,6 +42,7 @@ impl Interpreter for DropViewInterpreter { "DropViewInterpreter" } + #[async_backtrace::framed] async fn execute2(&self) -> Result { let catalog_name = self.plan.catalog.clone(); let db_name = self.plan.database.clone(); diff --git a/src/query/service/src/metrics/metric_service.rs b/src/query/service/src/metrics/metric_service.rs index 0d1d5245d6645..fdb7d975f8d3b 100644 --- a/src/query/service/src/metrics/metric_service.rs +++ b/src/query/service/src/metrics/metric_service.rs @@ -30,6 +30,7 @@ pub struct MetricService { } #[poem::handler] +#[async_backtrace::framed] pub async fn metric_handler(prom_extension: Data<&PrometheusHandle>) -> impl IntoResponse { prom_extension.0.render() } @@ -42,6 +43,7 @@ impl MetricService { }) } + #[async_backtrace::framed] async fn start_without_tls(&mut self, listening: SocketAddr) -> Result { let prometheus_handle = common_metrics::try_handle().unwrap(); @@ -58,10 +60,12 @@ impl MetricService { #[async_trait::async_trait] impl Server for MetricService { + #[async_backtrace::framed] async fn shutdown(&mut self, graceful: bool) { self.shutdown_handler.shutdown(graceful).await; } + #[async_backtrace::framed] async fn start(&mut self, listening: SocketAddr) -> Result { let res = self.start_without_tls(listening).await; diff --git a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_aggregate_spill_writer.rs b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_aggregate_spill_writer.rs index ce2b2050597bd..8fce4e7746290 100644 --- a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_aggregate_spill_writer.rs +++ b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_aggregate_spill_writer.rs @@ -160,6 +160,7 @@ impl Processor for TransformAggregateSpillWriter Result<()> { if let Some(spilling_future) = self.spilling_future.take() { return spilling_future.await; diff --git a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_group_by_spill_writer.rs b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_group_by_spill_writer.rs index 01d4facc4fcc4..08828462003da 100644 --- a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_group_by_spill_writer.rs +++ b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_group_by_spill_writer.rs @@ -157,6 +157,7 @@ impl Processor for TransformGroupBySpillWriter Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { if let Some(spilling_future) = self.spilling_future.take() { return spilling_future.await; diff --git a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_scatter_aggregate_spill_writer.rs b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_scatter_aggregate_spill_writer.rs index a20e77dd69634..ec039dd988435 100644 --- a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_scatter_aggregate_spill_writer.rs +++ b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_scatter_aggregate_spill_writer.rs @@ -161,6 +161,7 @@ impl Processor for TransformScatterAggregateSpillWrite Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { let spilling_futures = std::mem::take(&mut self.spilling_futures); futures::future::try_join_all(spilling_futures).await?; diff --git a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_scatter_group_by_spill_writer.rs b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_scatter_group_by_spill_writer.rs index fe314066cdeea..9fb01e54daff0 100644 --- a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_scatter_group_by_spill_writer.rs +++ b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_scatter_group_by_spill_writer.rs @@ -156,6 +156,7 @@ impl Processor for TransformScatterGroupBySpillWriter< Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { let spilling_futures = std::mem::take(&mut self.spilling_futures); futures::future::try_join_all(spilling_futures).await?; diff --git a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_spill_reader.rs b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_spill_reader.rs index 93a8075ae5935..da1c45480076e 100644 --- a/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_spill_reader.rs +++ b/src/query/service/src/pipelines/processors/transforms/aggregator/serde/transform_spill_reader.rs @@ -167,6 +167,7 @@ impl Processor Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { if let Some(block_meta) = self.reading_meta.take() { match &block_meta { @@ -198,25 +199,27 @@ impl Processor if let AggregateMeta::Spilled(payload) = meta { let location = payload.location.clone(); let operator = self.operator.clone(); - read_data.push(common_base::base::tokio::spawn(async move { - let instant = Instant::now(); - let data = operator.read(&location).await?; - - if let Err(cause) = operator.delete(&location).await { - error!( - "Cannot delete spill file {}, cause: {:?}", - location, cause + read_data.push(common_base::base::tokio::spawn( + async_backtrace::frame!(async move { + let instant = Instant::now(); + let data = operator.read(&location).await?; + + if let Err(cause) = operator.delete(&location).await { + error!( + "Cannot delete spill file {}, cause: {:?}", + location, cause + ); + } + + info!( + "Read aggregate spill {} successfully, elapsed: {:?}", + location, + instant.elapsed() ); - } - info!( - "Read aggregate spill {} successfully, elapsed: {:?}", - location, - instant.elapsed() - ); - - Ok(data) - })); + Ok(data) + }), + )); } } diff --git a/src/query/service/src/pipelines/processors/transforms/hash_join/hash_join_state_impl.rs b/src/query/service/src/pipelines/processors/transforms/hash_join/hash_join_state_impl.rs index 22c69af8c8d22..0c4a5538428dd 100644 --- a/src/query/service/src/pipelines/processors/transforms/hash_join/hash_join_state_impl.rs +++ b/src/query/service/src/pipelines/processors/transforms/hash_join/hash_join_state_impl.rs @@ -226,6 +226,7 @@ impl HashJoinState for JoinHashTable { Ok(()) } + #[async_backtrace::framed] async fn wait_finish(&self) -> Result<()> { let notified = { let finished_guard = self.is_finished.lock().unwrap(); diff --git a/src/query/service/src/pipelines/processors/transforms/profile_wrapper.rs b/src/query/service/src/pipelines/processors/transforms/profile_wrapper.rs index 821c20bb16f58..9df76341b575b 100644 --- a/src/query/service/src/pipelines/processors/transforms/profile_wrapper.rs +++ b/src/query/service/src/pipelines/processors/transforms/profile_wrapper.rs @@ -78,6 +78,7 @@ where T: Processor + 'static Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { // TODO: record profile information for async process self.inner.async_process().await diff --git a/src/query/service/src/pipelines/processors/transforms/runtime_filter/runtime_filter_source.rs b/src/query/service/src/pipelines/processors/transforms/runtime_filter/runtime_filter_source.rs index 57ba37bfb079c..0dc8b8d26e861 100644 --- a/src/query/service/src/pipelines/processors/transforms/runtime_filter/runtime_filter_source.rs +++ b/src/query/service/src/pipelines/processors/transforms/runtime_filter/runtime_filter_source.rs @@ -92,6 +92,7 @@ impl RuntimeFilterConnector for RuntimeFilterState { Ok(*self.finished.lock()) } + #[async_backtrace::framed] async fn wait_finish(&self) -> Result<()> { if !self.is_finished()? { self.finished_notify.notified().await; diff --git a/src/query/service/src/pipelines/processors/transforms/transform_create_sets.rs b/src/query/service/src/pipelines/processors/transforms/transform_create_sets.rs index c22a2109fdb74..925b2a2dbb3cf 100644 --- a/src/query/service/src/pipelines/processors/transforms/transform_create_sets.rs +++ b/src/query/service/src/pipelines/processors/transforms/transform_create_sets.rs @@ -146,6 +146,7 @@ impl Processor for TransformCreateSets { Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { if !self.initialized { self.initialized = true; diff --git a/src/query/service/src/pipelines/processors/transforms/transform_hash_join.rs b/src/query/service/src/pipelines/processors/transforms/transform_hash_join.rs index 0709d76e53237..647ee7d7f37c9 100644 --- a/src/query/service/src/pipelines/processors/transforms/transform_hash_join.rs +++ b/src/query/service/src/pipelines/processors/transforms/transform_hash_join.rs @@ -169,6 +169,7 @@ impl Processor for TransformHashJoinProbe { } } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { if let HashJoinStep::Build = &self.step { self.join_state.wait_finish().await?; diff --git a/src/query/service/src/pipelines/processors/transforms/transform_merge_block.rs b/src/query/service/src/pipelines/processors/transforms/transform_merge_block.rs index 3936520980cf7..a6c9e11e30d2e 100644 --- a/src/query/service/src/pipelines/processors/transforms/transform_merge_block.rs +++ b/src/query/service/src/pipelines/processors/transforms/transform_merge_block.rs @@ -194,6 +194,7 @@ impl Processor for TransformMergeBlock { Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { if !self.finished { if let Ok(result) = self.receiver.recv().await { diff --git a/src/query/service/src/pipelines/processors/transforms/transform_runtime_filter.rs b/src/query/service/src/pipelines/processors/transforms/transform_runtime_filter.rs index 60ae5eb640d9b..2a585b00183b9 100644 --- a/src/query/service/src/pipelines/processors/transforms/transform_runtime_filter.rs +++ b/src/query/service/src/pipelines/processors/transforms/transform_runtime_filter.rs @@ -148,6 +148,7 @@ impl Processor for TransformRuntimeFilter { } } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { if let RuntimeFilterStep::Collect = &self.step { self.connector.wait_finish().await?; diff --git a/src/query/service/src/procedures/admins/tenant_quota.rs b/src/query/service/src/procedures/admins/tenant_quota.rs index 60a205a30c292..2d3c7fec555eb 100644 --- a/src/query/service/src/procedures/admins/tenant_quota.rs +++ b/src/query/service/src/procedures/admins/tenant_quota.rs @@ -63,6 +63,7 @@ impl OneBlockProcedure for TenantQuotaProcedure { /// max_tables_per_database: u32 /// max_stages: u32 /// max_files_per_stage: u32 + #[async_backtrace::framed] async fn all_data(&self, ctx: Arc, args: Vec) -> Result { let mut tenant = ctx.get_tenant(); if !args.is_empty() { diff --git a/src/query/service/src/procedures/procedure.rs b/src/query/service/src/procedures/procedure.rs index ce9b7ea94b281..fe1fd635eb61d 100644 --- a/src/query/service/src/procedures/procedure.rs +++ b/src/query/service/src/procedures/procedure.rs @@ -152,6 +152,7 @@ mod impls { self.0.features() } + #[async_backtrace::framed] async fn eval( &self, ctx: Arc, @@ -195,6 +196,7 @@ mod impls { self.0.features() } + #[async_backtrace::framed] async fn eval( &self, ctx: Arc, diff --git a/src/query/service/src/procedures/systems/clustering_information.rs b/src/query/service/src/procedures/systems/clustering_information.rs index d4b8a32c534b7..b2d8965e49d25 100644 --- a/src/query/service/src/procedures/systems/clustering_information.rs +++ b/src/query/service/src/procedures/systems/clustering_information.rs @@ -46,6 +46,7 @@ impl OneBlockProcedure for ClusteringInformationProcedure { ProcedureFeatures::default().num_arguments(2) } + #[async_backtrace::framed] async fn all_data(&self, ctx: Arc, args: Vec) -> Result { let database_name = args[0].clone(); let table_name = args[1].clone(); diff --git a/src/query/service/src/procedures/systems/fuse_block.rs b/src/query/service/src/procedures/systems/fuse_block.rs index 9799f2e57eeac..2baba45c7415a 100644 --- a/src/query/service/src/procedures/systems/fuse_block.rs +++ b/src/query/service/src/procedures/systems/fuse_block.rs @@ -44,6 +44,7 @@ impl OneBlockProcedure for FuseBlockProcedure { ProcedureFeatures::default().variadic_arguments(2, 3) } + #[async_backtrace::framed] async fn all_data(&self, ctx: Arc, args: Vec) -> Result { let database_name = args[0].clone(); let table_name = args[1].clone(); diff --git a/src/query/service/src/procedures/systems/fuse_segment.rs b/src/query/service/src/procedures/systems/fuse_segment.rs index bc882c32ce1fa..c067422c1b239 100644 --- a/src/query/service/src/procedures/systems/fuse_segment.rs +++ b/src/query/service/src/procedures/systems/fuse_segment.rs @@ -44,6 +44,7 @@ impl OneBlockProcedure for FuseSegmentProcedure { ProcedureFeatures::default().num_arguments(3) } + #[async_backtrace::framed] async fn all_data(&self, ctx: Arc, args: Vec) -> Result { let database_name = args[0].clone(); let table_name = args[1].clone(); diff --git a/src/query/service/src/procedures/systems/fuse_snapshot.rs b/src/query/service/src/procedures/systems/fuse_snapshot.rs index 92f1a59e80abc..d90f9a1db8ee7 100644 --- a/src/query/service/src/procedures/systems/fuse_snapshot.rs +++ b/src/query/service/src/procedures/systems/fuse_snapshot.rs @@ -45,6 +45,7 @@ impl OneBlockProcedure for FuseSnapshotProcedure { ProcedureFeatures::default().variadic_arguments(2, 3) } + #[async_backtrace::framed] async fn all_data(&self, ctx: Arc, args: Vec) -> Result { let database_name = args[0].clone(); let table_name = args[1].clone(); diff --git a/src/query/service/src/procedures/systems/search_tables.rs b/src/query/service/src/procedures/systems/search_tables.rs index fde0a626bd26d..222631990e0bb 100644 --- a/src/query/service/src/procedures/systems/search_tables.rs +++ b/src/query/service/src/procedures/systems/search_tables.rs @@ -50,6 +50,7 @@ impl OneBlockProcedure for SearchTablesProcedure { .management_mode_required(true) } + #[async_backtrace::framed] async fn all_data(&self, ctx: Arc, args: Vec) -> Result { let query = format!( "SELECT * FROM system.tables WHERE name like '%{}%' ORDER BY database, name", diff --git a/src/query/service/src/schedulers/scheduler.rs b/src/query/service/src/schedulers/scheduler.rs index 995119f1469db..18296e20f06fe 100644 --- a/src/query/service/src/schedulers/scheduler.rs +++ b/src/query/service/src/schedulers/scheduler.rs @@ -30,6 +30,7 @@ use crate::sql::ColumnBinding; /// Build query pipeline from physical plan. /// If plan is distributed plan it will build_distributed_pipeline /// else build_local_pipeline. +#[async_backtrace::framed] pub async fn build_query_pipeline( ctx: &Arc, result_columns: &[ColumnBinding], @@ -60,6 +61,7 @@ pub async fn build_query_pipeline( } /// Build local pipeline. +#[async_backtrace::framed] pub async fn build_local_pipeline( ctx: &Arc, plan: &PhysicalPlan, @@ -75,6 +77,7 @@ pub async fn build_local_pipeline( } /// Build distributed pipeline via fragment and actions. +#[async_backtrace::framed] pub async fn build_distributed_pipeline( ctx: &Arc, plan: &PhysicalPlan, diff --git a/src/query/service/src/servers/flight_sql/flight_sql_server.rs b/src/query/service/src/servers/flight_sql/flight_sql_server.rs index 0ec57065b23d9..30f76369d9dad 100644 --- a/src/query/service/src/servers/flight_sql/flight_sql_server.rs +++ b/src/query/service/src/servers/flight_sql/flight_sql_server.rs @@ -44,6 +44,7 @@ impl FlightSQLServer { })) } + #[async_backtrace::framed] async fn listener_tcp(listening: SocketAddr) -> Result<(TcpListenerStream, SocketAddr)> { let listener = TcpListener::bind(listening).await.map_err(|e| { ErrorCode::TokioError(format!("{{{}:{}}} {}", listening.ip(), listening.port(), e)) @@ -60,6 +61,7 @@ impl FlightSQLServer { } #[allow(unused)] + #[async_backtrace::framed] async fn server_tls_config(conf: &InnerConfig) -> Result { let cert = tokio::fs::read(conf.query.flight_sql_tls_server_cert.as_str()).await?; let key = tokio::fs::read(conf.query.flight_sql_tls_server_key.as_str()).await?; @@ -68,6 +70,7 @@ impl FlightSQLServer { Ok(tls_conf) } + #[async_backtrace::framed] pub async fn start_with_incoming(&mut self, listener_stream: TcpListenerStream) -> Result<()> { let flight_sql_service = FlightSqlServiceImpl::create(); let builder = Server::builder(); @@ -90,15 +93,17 @@ impl FlightSQLServer { .add_service(FlightServiceServer::new(flight_sql_service)) .serve_with_incoming_shutdown(listener_stream, self.shutdown_notify()); - tokio::spawn(server); + tokio::spawn(async_backtrace::location!().frame(server)); Ok(()) } } #[async_trait::async_trait] impl DatabendQueryServer for FlightSQLServer { + #[async_backtrace::framed] async fn shutdown(&mut self, _graceful: bool) {} + #[async_backtrace::framed] async fn start(&mut self, listening: SocketAddr) -> Result { let (listener_stream, listener_addr) = Self::listener_tcp(listening).await?; self.start_with_incoming(listener_stream).await?; diff --git a/src/query/service/src/servers/flight_sql/flight_sql_service/query.rs b/src/query/service/src/servers/flight_sql/flight_sql_service/query.rs index 7269bc83483d4..05f8d83382973 100644 --- a/src/query/service/src/servers/flight_sql/flight_sql_service/query.rs +++ b/src/query/service/src/servers/flight_sql/flight_sql_service/query.rs @@ -47,6 +47,7 @@ impl FlightSqlServiceImpl { batches_to_flight_data(schema, batches).map_err(|e| ErrorCode::Internal(format!("{e:?}"))) } + #[async_backtrace::framed] pub(super) async fn plan_sql( &self, session: &Arc, @@ -61,6 +62,7 @@ impl FlightSqlServiceImpl { planner.plan_sql(query).await } + #[async_backtrace::framed] pub(super) async fn execute_update( &self, session: Arc, @@ -84,6 +86,7 @@ impl FlightSqlServiceImpl { Ok(affected_rows as i64) } + #[async_backtrace::framed] pub(super) async fn execute_query( &self, session: Arc, diff --git a/src/query/service/src/servers/flight_sql/flight_sql_service/service.rs b/src/query/service/src/servers/flight_sql/flight_sql_service/service.rs index 2159f96e19ff3..75ec0aed5ef4c 100644 --- a/src/query/service/src/servers/flight_sql/flight_sql_service/service.rs +++ b/src/query/service/src/servers/flight_sql/flight_sql_service/service.rs @@ -111,6 +111,7 @@ fn simple_flight_info(message: T) -> Response { impl FlightSqlService for FlightSqlServiceImpl { type FlightService = FlightSqlServiceImpl; + #[async_backtrace::framed] async fn do_handshake( &self, request: Request>, @@ -139,6 +140,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(resp) } + #[async_backtrace::framed] async fn do_get_fallback( &self, request: Request, @@ -165,6 +167,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(resp) } + #[async_backtrace::framed] async fn get_flight_info_statement( &self, query: CommandStatementQuery, @@ -175,6 +178,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(simple_flight_info(query)) } + #[async_backtrace::framed] async fn get_flight_info_prepared_statement( &self, cmd: CommandPreparedStatementQuery, @@ -223,6 +227,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(resp) } + #[async_backtrace::framed] async fn get_flight_info_catalogs( &self, query: CommandGetCatalogs, @@ -233,6 +238,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(simple_flight_info(query)) } + #[async_backtrace::framed] async fn get_flight_info_schemas( &self, query: CommandGetDbSchemas, @@ -243,6 +249,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(simple_flight_info(query)) } + #[async_backtrace::framed] async fn get_flight_info_tables( &self, query: CommandGetTables, @@ -253,6 +260,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(simple_flight_info(query)) } + #[async_backtrace::framed] async fn get_flight_info_table_types( &self, query: CommandGetTableTypes, @@ -263,6 +271,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(simple_flight_info(query)) } + #[async_backtrace::framed] async fn get_flight_info_sql_info( &self, query: CommandGetSqlInfo, @@ -273,6 +282,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(simple_flight_info(query)) } + #[async_backtrace::framed] async fn get_flight_info_primary_keys( &self, query: CommandGetPrimaryKeys, @@ -284,6 +294,7 @@ impl FlightSqlService for FlightSqlServiceImpl { )) } + #[async_backtrace::framed] async fn get_flight_info_exported_keys( &self, query: CommandGetExportedKeys, @@ -295,6 +306,7 @@ impl FlightSqlService for FlightSqlServiceImpl { )) } + #[async_backtrace::framed] async fn get_flight_info_imported_keys( &self, query: CommandGetImportedKeys, @@ -306,6 +318,7 @@ impl FlightSqlService for FlightSqlServiceImpl { )) } + #[async_backtrace::framed] async fn get_flight_info_cross_reference( &self, query: CommandGetCrossReference, @@ -318,6 +331,7 @@ impl FlightSqlService for FlightSqlServiceImpl { } // do_get + #[async_backtrace::framed] async fn do_get_statement( &self, ticket: TicketStatementQuery, @@ -327,6 +341,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Err(Status::unimplemented("do_get_statement not implemented")) } + #[async_backtrace::framed] async fn do_get_prepared_statement( &self, query: CommandPreparedStatementQuery, @@ -338,6 +353,7 @@ impl FlightSqlService for FlightSqlServiceImpl { )) } + #[async_backtrace::framed] async fn do_get_catalogs( &self, _query: CommandGetCatalogs, @@ -347,6 +363,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Err(Status::unimplemented("do_get_catalogs not implemented")) } + #[async_backtrace::framed] async fn do_get_schemas( &self, query: CommandGetDbSchemas, @@ -356,6 +373,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Err(Status::unimplemented("do_get_schemas not implemented")) } + #[async_backtrace::framed] async fn do_get_tables( &self, query: CommandGetTables, @@ -365,6 +383,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Err(Status::unimplemented("do_get_tables not implemented")) } + #[async_backtrace::framed] async fn do_get_table_types( &self, _query: CommandGetTableTypes, @@ -374,6 +393,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Err(Status::unimplemented("do_get_table_types not implemented")) } + #[async_backtrace::framed] async fn do_get_sql_info( &self, query: CommandGetSqlInfo, @@ -383,6 +403,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(Response::new(super::SqlInfoProvider::all_info()?)) } + #[async_backtrace::framed] async fn do_get_primary_keys( &self, query: CommandGetPrimaryKeys, @@ -392,6 +413,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Err(Status::unimplemented("do_get_primary_keys not implemented")) } + #[async_backtrace::framed] async fn do_get_exported_keys( &self, query: CommandGetExportedKeys, @@ -403,6 +425,7 @@ impl FlightSqlService for FlightSqlServiceImpl { )) } + #[async_backtrace::framed] async fn do_get_imported_keys( &self, query: CommandGetImportedKeys, @@ -414,6 +437,7 @@ impl FlightSqlService for FlightSqlServiceImpl { )) } + #[async_backtrace::framed] async fn do_get_cross_reference( &self, query: CommandGetCrossReference, @@ -426,6 +450,7 @@ impl FlightSqlService for FlightSqlServiceImpl { } // called by rust FlightSqlServiceClient, which is used in unit test. + #[async_backtrace::framed] async fn do_put_statement_update( &self, ticket: CommandStatementUpdate, @@ -446,6 +471,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(res) } + #[async_backtrace::framed] async fn do_put_prepared_statement_query( &self, query: CommandPreparedStatementQuery, @@ -471,6 +497,7 @@ impl FlightSqlService for FlightSqlServiceImpl { } // called by JDBC + #[async_backtrace::framed] async fn do_put_prepared_statement_update( &self, query: CommandPreparedStatementUpdate, @@ -492,6 +519,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(res) } + #[async_backtrace::framed] async fn do_action_create_prepared_statement( &self, query: ActionCreatePreparedStatementRequest, @@ -532,6 +560,7 @@ impl FlightSqlService for FlightSqlServiceImpl { Ok(res) } + #[async_backtrace::framed] async fn do_action_close_prepared_statement( &self, query: ActionClosePreparedStatementRequest, @@ -558,6 +587,7 @@ impl FlightSqlService for FlightSqlServiceImpl { } } + #[async_backtrace::framed] async fn register_sql_info(&self, id: i32, result: &SqlInfo) { tracing::info!("register_sql_info({id}, {result:?})"); } diff --git a/src/query/service/src/servers/flight_sql/flight_sql_service/session.rs b/src/query/service/src/servers/flight_sql/flight_sql_service/session.rs index 8c0f31029f1cf..455abf72bcfd0 100644 --- a/src/query/service/src/servers/flight_sql/flight_sql_service/session.rs +++ b/src/query/service/src/servers/flight_sql/flight_sql_service/session.rs @@ -78,6 +78,7 @@ impl FlightSqlServiceImpl { Ok((user.to_string(), pass.to_string())) } + #[async_backtrace::framed] pub(super) async fn auth_user_password( user: String, password: String, diff --git a/src/query/service/src/servers/http/clickhouse_handler.rs b/src/query/service/src/servers/http/clickhouse_handler.rs index a16ec78046702..2fe44001ded64 100644 --- a/src/query/service/src/servers/http/clickhouse_handler.rs +++ b/src/query/service/src/servers/http/clickhouse_handler.rs @@ -217,6 +217,7 @@ async fn execute( } #[poem::handler] +#[async_backtrace::framed] pub async fn clickhouse_handler_get( ctx: &HttpQueryContext, Query(params): Query, @@ -258,6 +259,7 @@ pub async fn clickhouse_handler_get( } #[poem::handler] +#[async_backtrace::framed] pub async fn clickhouse_handler_post( ctx: &HttpQueryContext, body: Body, @@ -417,6 +419,7 @@ pub async fn clickhouse_handler_post( } #[poem::handler] +#[async_backtrace::framed] pub async fn clickhouse_ping_handler() -> String { "OK.\n".to_string() } diff --git a/src/query/service/src/servers/http/http_services.rs b/src/query/service/src/servers/http/http_services.rs index ee0e311472e06..41cf3fc80f421 100644 --- a/src/query/service/src/servers/http/http_services.rs +++ b/src/query/service/src/servers/http/http_services.rs @@ -88,6 +88,7 @@ impl HttpHandler { ep.with(session_middleware).boxed() } + #[async_backtrace::framed] async fn build_router(&self, sock: SocketAddr) -> impl Endpoint { let ep_v1 = Route::new() .nest("/query", query_route()) @@ -138,6 +139,7 @@ impl HttpHandler { Ok(cfg) } + #[async_backtrace::framed] async fn start_with_tls(&mut self, listening: SocketAddr) -> Result { info!("Http Handler TLS enabled"); @@ -152,6 +154,7 @@ impl HttpHandler { .await } + #[async_backtrace::framed] async fn start_without_tls(&mut self, listening: SocketAddr) -> Result { let router = self.build_router(listening).await; self.shutdown_handler @@ -162,10 +165,12 @@ impl HttpHandler { #[async_trait::async_trait] impl Server for HttpHandler { + #[async_backtrace::framed] async fn shutdown(&mut self, graceful: bool) { self.shutdown_handler.shutdown(graceful).await; } + #[async_backtrace::framed] async fn start(&mut self, listening: SocketAddr) -> Result { let config = GlobalConfig::instance(); diff --git a/src/query/service/src/servers/http/middleware.rs b/src/query/service/src/servers/http/middleware.rs index d68cf21fb7857..1c95cb23ffa74 100644 --- a/src/query/service/src/servers/http/middleware.rs +++ b/src/query/service/src/servers/http/middleware.rs @@ -156,6 +156,7 @@ pub struct HTTPSessionEndpoint { pub auth_manager: Arc, } impl HTTPSessionEndpoint { + #[async_backtrace::framed] async fn auth(&self, req: &Request) -> Result { let credential = get_credential(req, self.kind)?; let session_manager = SessionManager::instance(); @@ -177,6 +178,7 @@ impl HTTPSessionEndpoint { impl Endpoint for HTTPSessionEndpoint { type Output = Response; + #[async_backtrace::framed] async fn call(&self, mut req: Request) -> PoemResult { // method, url, version, header info!("receive http handler request: {req:?},"); diff --git a/src/query/service/src/servers/http/v1/http_query_handlers.rs b/src/query/service/src/servers/http/v1/http_query_handlers.rs index af75d44460355..ba9c1f3983252 100644 --- a/src/query/service/src/servers/http/v1/http_query_handlers.rs +++ b/src/query/service/src/servers/http/v1/http_query_handlers.rs @@ -273,6 +273,7 @@ async fn query_page_handler( } #[poem::handler] +#[async_backtrace::framed] pub(crate) async fn query_handler( ctx: &HttpQueryContext, Json(req): Json, diff --git a/src/query/service/src/servers/http/v1/load.rs b/src/query/service/src/servers/http/v1/load.rs index 561bd5b2667f4..d2c643ecfe767 100644 --- a/src/query/service/src/servers/http/v1/load.rs +++ b/src/query/service/src/servers/http/v1/load.rs @@ -83,6 +83,7 @@ fn remove_quote(s: &[u8]) -> &[u8] { } #[poem::handler] +#[async_backtrace::framed] pub async fn streaming_load( ctx: &HttpQueryContext, req: &Request, @@ -276,6 +277,7 @@ async fn read_multi_part( Ok(files) } +#[async_backtrace::framed] pub async fn read_full(reader: &mut R, buf: &mut [u8]) -> Result { let mut buf = &mut buf[0..]; let mut n = 0; diff --git a/src/query/service/src/servers/http/v1/query/execute_state.rs b/src/query/service/src/servers/http/v1/query/execute_state.rs index fe66ff4888eaa..d8e89d4c3257b 100644 --- a/src/query/service/src/servers/http/v1/query/execute_state.rs +++ b/src/query/service/src/servers/http/v1/query/execute_state.rs @@ -138,6 +138,7 @@ impl Executor { } } + #[async_backtrace::framed] pub async fn start_to_running(this: &Arc>, state: ExecuteState) { let mut guard = this.write().await; if let Starting(_) = &guard.state { @@ -145,12 +146,14 @@ impl Executor { } } + #[async_backtrace::framed] pub async fn start_to_stop(this: &Arc>, state: ExecuteState) { let mut guard = this.write().await; if let Starting(_) = &guard.state { guard.state = state } } + #[async_backtrace::framed] pub async fn stop(this: &Arc>, reason: Result<()>, kill: bool) { { let guard = this.read().await; @@ -204,12 +207,14 @@ impl Executor { } impl ExecuteState { + #[async_backtrace::framed] pub(crate) async fn get_schema(sql: &str, ctx: Arc) -> Result { let mut planner = Planner::new(ctx.clone()); let (plan, _) = planner.plan_sql(sql).await?; Ok(InterpreterFactory::get_schema(ctx, &plan)) } + #[async_backtrace::framed] pub(crate) async fn try_start_query( executor: Arc>, sql: &str, diff --git a/src/query/service/src/servers/http/v1/query/http_query.rs b/src/query/service/src/servers/http/v1/query/http_query.rs index bfba4cbc7aefa..0e914348c905b 100644 --- a/src/query/service/src/servers/http/v1/query/http_query.rs +++ b/src/query/service/src/servers/http/v1/query/http_query.rs @@ -192,6 +192,7 @@ pub struct HttpQuery { } impl HttpQuery { + #[async_backtrace::framed] pub(crate) async fn try_create( ctx: &HttpQueryContext, request: HttpQueryRequest, @@ -333,6 +334,7 @@ impl HttpQuery { Ok(Arc::new(query)) } + #[async_backtrace::framed] pub async fn get_response_page(&self, page_no: usize) -> Result { let data = Some(self.get_page(page_no).await?); let state = self.get_state().await; @@ -351,6 +353,7 @@ impl HttpQuery { }) } + #[async_backtrace::framed] pub async fn get_response_state_only(&self) -> HttpQueryResponseInternal { HttpQueryResponseInternal { data: None, @@ -360,6 +363,7 @@ impl HttpQuery { } } + #[async_backtrace::framed] async fn get_state(&self) -> ResponseState { let state = self.state.read().await; let (exe_state, err) = state.state.extract(); @@ -372,6 +376,7 @@ impl HttpQuery { } } + #[async_backtrace::framed] async fn get_page(&self, page_no: usize) -> Result { let mut page_manager = self.page_manager.lock().await; let page = page_manager @@ -384,6 +389,7 @@ impl HttpQuery { Ok(response) } + #[async_backtrace::framed] pub async fn kill(&self) { Executor::stop( &self.state, @@ -393,11 +399,13 @@ impl HttpQuery { .await; } + #[async_backtrace::framed] pub async fn detach(&self) { let data = self.page_manager.lock().await; data.detach().await } + #[async_backtrace::framed] pub async fn update_expire_time(&self, before_wait: bool) { let duration = Duration::from_secs(self.config.result_timeout_secs) + if before_wait { @@ -410,12 +418,14 @@ impl HttpQuery { *t = ExpireState::ExpireAt(deadline); } + #[async_backtrace::framed] pub async fn mark_removed(&self) { let mut t = self.expire_state.lock().await; *t = ExpireState::Removed; } // return Duration to sleep + #[async_backtrace::framed] pub async fn check_expire(&self) -> ExpireResult { let expire_state = self.expire_state.lock().await; match *expire_state { diff --git a/src/query/service/src/servers/http/v1/query/http_query_context.rs b/src/query/service/src/servers/http/v1/query/http_query_context.rs index 2990ea3bd4ccf..746bda6a715e4 100644 --- a/src/query/service/src/servers/http/v1/query/http_query_context.rs +++ b/src/query/service/src/servers/http/v1/query/http_query_context.rs @@ -39,6 +39,7 @@ impl HttpQueryContext { #[async_trait::async_trait] impl<'a> FromRequest<'a> for &'a HttpQueryContext { + #[async_backtrace::framed] async fn from_request(req: &'a Request, _body: &mut RequestBody) -> PoemResult { Ok(req.extensions().get::().expect( "To use the `HttpQueryContext` extractor, the `HTTPSessionMiddleware` is required", diff --git a/src/query/service/src/servers/http/v1/query/http_query_manager.rs b/src/query/service/src/servers/http/v1/query/http_query_manager.rs index 66a06380ac719..5de6eb0c8aafc 100644 --- a/src/query/service/src/servers/http/v1/query/http_query_manager.rs +++ b/src/query/service/src/servers/http/v1/query/http_query_manager.rs @@ -48,6 +48,7 @@ pub struct HttpQueryManager { } impl HttpQueryManager { + #[async_backtrace::framed] pub async fn init(cfg: &InnerConfig) -> Result<()> { GlobalInstance::set(Arc::new(HttpQueryManager { queries: Arc::new(RwLock::new(HashMap::new())), @@ -64,6 +65,7 @@ impl HttpQueryManager { GlobalInstance::get() } + #[async_backtrace::framed] pub(crate) async fn try_create_query( self: &Arc, ctx: &HttpQueryContext, @@ -74,11 +76,13 @@ impl HttpQueryManager { Ok(query) } + #[async_backtrace::framed] pub(crate) async fn get_query(self: &Arc, query_id: &str) -> Option> { let queries = self.queries.read().await; queries.get(query_id).map(|q| q.to_owned()) } + #[async_backtrace::framed] async fn add_query(self: &Arc, query_id: &str, query: Arc) { let mut queries = self.queries.write().await; queries.insert(query_id.to_string(), query.clone()); @@ -113,6 +117,7 @@ impl HttpQueryManager { } // not remove it until timeout or cancelled by user, even if query execution is aborted + #[async_backtrace::framed] pub(crate) async fn remove_query(self: &Arc, query_id: &str) -> Option> { let mut queries = self.queries.write().await; let q = queries.remove(query_id); @@ -122,11 +127,13 @@ impl HttpQueryManager { q } + #[async_backtrace::framed] pub(crate) async fn get_session(self: &Arc, session_id: &str) -> Option> { let sessions = self.sessions.lock(); sessions.get(session_id) } + #[async_backtrace::framed] pub(crate) async fn add_session(self: &Arc, session: Arc, timeout: Duration) { let mut sessions = self.sessions.lock(); sessions.insert(session.get_id(), session, Some(timeout)); diff --git a/src/query/service/src/servers/http/v1/query/page_manager.rs b/src/query/service/src/servers/http/v1/query/page_manager.rs index b94ffc181c40d..638cc42e3d96f 100644 --- a/src/query/service/src/servers/http/v1/query/page_manager.rs +++ b/src/query/service/src/servers/http/v1/query/page_manager.rs @@ -97,6 +97,7 @@ impl PageManager { } } + #[async_backtrace::framed] pub async fn get_a_page(&mut self, page_no: usize, tp: &Wait) -> Result { let next_no = self.total_pages; if page_no == next_no && !self.end { @@ -143,6 +144,7 @@ impl PageManager { Ok(()) } + #[async_backtrace::framed] async fn collect_new_page(&mut self, tp: &Wait) -> Result<(JsonBlock, bool)> { let mut res: Vec> = Vec::with_capacity(self.max_rows_per_page); while res.len() < self.max_rows_per_page { @@ -207,6 +209,7 @@ impl PageManager { Ok((block, end)) } + #[async_backtrace::framed] pub async fn detach(&self) { self.block_receiver.close(); } diff --git a/src/query/service/src/servers/http/v1/query/sized_spsc.rs b/src/query/service/src/servers/http/v1/query/sized_spsc.rs index 7b57f48f6fd77..8b65ae046024f 100644 --- a/src/query/service/src/servers/http/v1/query/sized_spsc.rs +++ b/src/query/service/src/servers/http/v1/query/sized_spsc.rs @@ -114,6 +114,7 @@ impl SizedChannel { guard.try_recv() } + #[async_backtrace::framed] pub async fn send(&self, value: T, size: usize) -> bool { let mut to_send = value; loop { @@ -131,6 +132,7 @@ impl SizedChannel { } } + #[async_backtrace::framed] pub async fn recv(&self) -> Option { loop { match self.try_recv() { @@ -173,6 +175,7 @@ pub struct SizedChannelReceiver { } impl SizedChannelReceiver { + #[async_backtrace::framed] pub async fn recv(&self) -> Option { self.chan.recv().await } @@ -195,6 +198,7 @@ pub struct SizedChannelSender { } impl SizedChannelSender { + #[async_backtrace::framed] pub async fn send(&self, value: T, size: usize) -> bool { self.chan.send(value, size).await } diff --git a/src/query/service/src/servers/http/v1/stage.rs b/src/query/service/src/servers/http/v1/stage.rs index 88ce00892f262..7ba0a24a8c073 100644 --- a/src/query/service/src/servers/http/v1/stage.rs +++ b/src/query/service/src/servers/http/v1/stage.rs @@ -37,6 +37,7 @@ pub struct UploadToStageResponse { } #[poem::handler] +#[async_backtrace::framed] pub async fn upload_to_stage( ctx: &HttpQueryContext, req: &Request, diff --git a/src/query/service/src/servers/mysql/mysql_handler.rs b/src/query/service/src/servers/mysql/mysql_handler.rs index c0091df133347..e9d3728a112e1 100644 --- a/src/query/service/src/servers/mysql/mysql_handler.rs +++ b/src/query/service/src/servers/mysql/mysql_handler.rs @@ -62,6 +62,7 @@ impl MySQLHandler { })) } + #[async_backtrace::framed] async fn listener_tcp(listening: SocketAddr) -> Result<(TcpListenerStream, SocketAddr)> { let listener = tokio::net::TcpListener::bind(listening) .await @@ -117,6 +118,7 @@ impl MySQLHandler { }); } + #[async_backtrace::framed] async fn reject_session(stream: TcpStream, error: ErrorCode) { let (kind, message) = match error.code() { 41 => (ErrorKind::ER_TOO_MANY_USER_CONNECTIONS, error.message()), @@ -134,6 +136,7 @@ impl MySQLHandler { #[async_trait::async_trait] impl Server for MySQLHandler { + #[async_backtrace::framed] async fn shutdown(&mut self, graceful: bool) { if !graceful { return; @@ -151,6 +154,7 @@ impl Server for MySQLHandler { } } + #[async_backtrace::framed] async fn start(&mut self, listening: SocketAddr) -> Result { match self.abort_registration.take() { None => Err(ErrorCode::Internal("MySQLHandler already running.")), @@ -161,7 +165,9 @@ impl Server for MySQLHandler { )?); let (stream, listener) = Self::listener_tcp(listening).await?; let stream = Abortable::new(stream, registration); - self.join_handle = Some(tokio::spawn(self.listen_loop(stream, rejected_rt))); + self.join_handle = Some(tokio::spawn( + async_backtrace::location!().frame(self.listen_loop(stream, rejected_rt)), + )); Ok(listener) } } diff --git a/src/query/service/src/servers/mysql/mysql_interactive_worker.rs b/src/query/service/src/servers/mysql/mysql_interactive_worker.rs index 41971b41bc661..dad1918861d3c 100644 --- a/src/query/service/src/servers/mysql/mysql_interactive_worker.rs +++ b/src/query/service/src/servers/mysql/mysql_interactive_worker.rs @@ -91,6 +91,7 @@ impl AsyncMysqlShim for InteractiveWorke "mysql_native_password" } + #[async_backtrace::framed] async fn auth_plugin_for_username(&self, _user: &[u8]) -> &str { "mysql_native_password" } @@ -99,6 +100,7 @@ impl AsyncMysqlShim for InteractiveWorke self.salt } + #[async_backtrace::framed] async fn authenticate( &self, _auth_plugin: &str, @@ -126,6 +128,7 @@ impl AsyncMysqlShim for InteractiveWorke } } + #[async_backtrace::framed] async fn on_prepare<'a>( &'a mut self, query: &'a str, @@ -147,6 +150,7 @@ impl AsyncMysqlShim for InteractiveWorke self.base.do_prepare(query, writer).await } + #[async_backtrace::framed] async fn on_execute<'a>( &'a mut self, id: u32, @@ -170,11 +174,13 @@ impl AsyncMysqlShim for InteractiveWorke } /// https://dev.mysql.com/doc/internals/en/com-stmt-close.html + #[async_backtrace::framed] async fn on_close<'a>(&'a mut self, stmt_id: u32) where W: 'async_trait { self.base.do_close(stmt_id).await; } + #[async_backtrace::framed] async fn on_query<'a>( &'a mut self, query: &'a str, @@ -218,6 +224,7 @@ impl AsyncMysqlShim for InteractiveWorke write_result } + #[async_backtrace::framed] async fn on_init<'a>( &'a mut self, database_name: &'a str, @@ -243,6 +250,7 @@ impl AsyncMysqlShim for InteractiveWorke } impl InteractiveWorkerBase { + #[async_backtrace::framed] async fn authenticate(&self, salt: &[u8], info: CertifiedInfo) -> Result { let user_name = &info.user_name; let client_ip = info.user_client_address.split(':').collect::>()[0]; @@ -259,6 +267,7 @@ impl InteractiveWorkerBase { Ok(authed) } + #[async_backtrace::framed] async fn do_prepare(&mut self, _: &str, writer: StatementMetaWriter<'_, W>) -> Result<()> { writer .error( @@ -269,6 +278,7 @@ impl InteractiveWorkerBase { Ok(()) } + #[async_backtrace::framed] async fn do_execute( &mut self, _: u32, @@ -284,6 +294,7 @@ impl InteractiveWorkerBase { Ok(()) } + #[async_backtrace::framed] async fn do_close(&mut self, _: u32) {} // Check the query is a federated or driver setup command. @@ -298,6 +309,7 @@ impl InteractiveWorkerBase { } #[tracing::instrument(level = "debug", skip(self))] + #[async_backtrace::framed] async fn do_query(&mut self, query: &str) -> Result { match self.federated_server_command_check(query) { Some((schema, data_block)) => { @@ -348,6 +360,7 @@ impl InteractiveWorkerBase { } #[tracing::instrument(level = "debug", skip(interpreter, context))] + #[async_backtrace::framed] async fn exec_query( interpreter: Arc, context: &Arc, @@ -388,6 +401,7 @@ impl InteractiveWorkerBase { query_result.map(|data| (data, Some(reporter))) } + #[async_backtrace::framed] async fn do_init(&mut self, database_name: &str) -> Result<()> { if database_name.is_empty() { return Ok(()); diff --git a/src/query/service/src/servers/mysql/reject_connection.rs b/src/query/service/src/servers/mysql/reject_connection.rs index 74ba3e9edf63a..87a345387c260 100644 --- a/src/query/service/src/servers/mysql/reject_connection.rs +++ b/src/query/service/src/servers/mysql/reject_connection.rs @@ -21,6 +21,7 @@ use opensrv_mysql::ErrorKind; pub struct RejectConnection; impl RejectConnection { + #[async_backtrace::framed] pub async fn reject_mysql_connection( mut stream: TcpStream, code: ErrorKind, @@ -44,6 +45,7 @@ impl RejectConnection { Ok(()) } + #[async_backtrace::framed] async fn send_handshake(stream: &mut TcpStream) -> Result<()> { // Send handshake, packet from opensrv-mysql. Packet[seq = 0] stream @@ -60,6 +62,7 @@ impl RejectConnection { Ok(()) } + #[async_backtrace::framed] async fn receive_handshake_response(stream: &mut TcpStream) -> Result<()> { let mut buffer = vec![0; 4]; stream.read_exact(&mut buffer).await?; diff --git a/src/query/service/src/servers/mysql/writers/init_result_writer.rs b/src/query/service/src/servers/mysql/writers/init_result_writer.rs index 88c7cccadb889..74bf6475d1b21 100644 --- a/src/query/service/src/servers/mysql/writers/init_result_writer.rs +++ b/src/query/service/src/servers/mysql/writers/init_result_writer.rs @@ -27,6 +27,7 @@ impl<'a, W: AsyncWrite + Send + Unpin> DFInitResultWriter<'a, W> { DFInitResultWriter::<'a, W> { inner: Some(inner) } } + #[async_backtrace::framed] pub async fn write(&mut self, query_result: Result<()>) -> Result<()> { if let Some(writer) = self.inner.take() { match query_result { @@ -38,11 +39,13 @@ impl<'a, W: AsyncWrite + Send + Unpin> DFInitResultWriter<'a, W> { Ok(()) } + #[async_backtrace::framed] async fn ok(writer: InitWriter<'a, W>) -> Result<()> { writer.ok().await?; Ok(()) } + #[async_backtrace::framed] async fn err(error: &ErrorCode, writer: InitWriter<'a, W>) -> Result<()> { error!("OnInit Error: {:?}", error); writer diff --git a/src/query/service/src/servers/mysql/writers/query_result_writer.rs b/src/query/service/src/servers/mysql/writers/query_result_writer.rs index b125d7530cc76..11fbb75f512d2 100644 --- a/src/query/service/src/servers/mysql/writers/query_result_writer.rs +++ b/src/query/service/src/servers/mysql/writers/query_result_writer.rs @@ -88,6 +88,7 @@ impl<'a, W: AsyncWrite + Send + Unpin> DFQueryResultWriter<'a, W> { DFQueryResultWriter::<'a, W> { inner: Some(inner) } } + #[async_backtrace::framed] pub async fn write( &mut self, query_result: Result, @@ -102,6 +103,7 @@ impl<'a, W: AsyncWrite + Send + Unpin> DFQueryResultWriter<'a, W> { Ok(()) } + #[async_backtrace::framed] async fn ok( mut query_result: QueryResult, dataset_writer: QueryResultWriter<'a, W>, @@ -290,6 +292,7 @@ impl<'a, W: AsyncWrite + Send + Unpin> DFQueryResultWriter<'a, W> { } } + #[async_backtrace::framed] async fn err(error: &ErrorCode, writer: QueryResultWriter<'a, W>) -> Result<()> { if error.code() != ErrorCode::ABORTED_QUERY && error.code() != ErrorCode::ABORTED_SESSION { error!("OnQuery Error: {:?}", error); diff --git a/src/query/service/src/servers/server.rs b/src/query/service/src/servers/server.rs index 1039071d78e9d..4e86c3f7908fd 100644 --- a/src/query/service/src/servers/server.rs +++ b/src/query/service/src/servers/server.rs @@ -53,6 +53,7 @@ impl ShutdownHandle { shutdown: Arc::new(AtomicBool::new(false)), }) } + #[async_backtrace::framed] async fn shutdown_services(&mut self, graceful: bool) { let mut shutdown_jobs = vec![]; for service in &mut self.services { @@ -61,6 +62,7 @@ impl ShutdownHandle { futures::future::join_all(shutdown_jobs).await; } + #[async_backtrace::framed] pub async fn shutdown(&mut self, mut signal: SignalStream) { self.shutdown_services(true).await; ClusterDiscovery::instance() @@ -70,6 +72,7 @@ impl ShutdownHandle { self.shutdown_services(false).await; } + #[async_backtrace::framed] pub async fn wait_for_termination_request(&mut self) { match signal_stream() { Err(cause) => { diff --git a/src/query/service/src/sessions/query_ctx.rs b/src/query/service/src/sessions/query_ctx.rs index dea8c7b726f98..9dbea6e9d3062 100644 --- a/src/query/service/src/sessions/query_ctx.rs +++ b/src/query/service/src/sessions/query_ctx.rs @@ -135,6 +135,7 @@ impl QueryContext { StageTable::try_create(table_info.clone()) } + #[async_backtrace::framed] pub async fn set_current_database(&self, new_database_name: String) -> Result<()> { let tenant_id = self.get_tenant(); let catalog = self.get_catalog(self.get_current_catalog().as_str())?; @@ -461,6 +462,7 @@ impl TableContext for QueryContext { self.shared.consume_precommit_blocks() } + #[async_backtrace::framed] async fn get_file_format(&self, name: &str) -> Result { let opt = match StageFileFormatType::from_str(name) { Ok(typ) => FileFormatOptions::default_by_type(typ), @@ -483,6 +485,7 @@ impl TableContext for QueryContext { /// ```sql /// SELECT * FROM (SELECT * FROM db.table_name) as subquery_1, (SELECT * FROM db.table_name) AS subquery_2 /// ``` + #[async_backtrace::framed] async fn get_table( &self, catalog: &str, @@ -492,6 +495,7 @@ impl TableContext for QueryContext { self.shared.get_table(catalog, database, table).await } + #[async_backtrace::framed] async fn filter_out_copied_files( &self, catalog_name: &str, diff --git a/src/query/service/src/sessions/query_ctx_shared.rs b/src/query/service/src/sessions/query_ctx_shared.rs index 1b89ec1b6cacc..b94b863b3dcf6 100644 --- a/src/query/service/src/sessions/query_ctx_shared.rs +++ b/src/query/service/src/sessions/query_ctx_shared.rs @@ -203,6 +203,7 @@ impl QueryContextShared { self.session.apply_changed_settings(changes) } + #[async_backtrace::framed] pub async fn get_table( &self, catalog: &str, @@ -225,6 +226,7 @@ impl QueryContextShared { } } + #[async_backtrace::framed] async fn get_table_to_cache( &self, catalog: &str, diff --git a/src/query/service/src/sessions/session.rs b/src/query/service/src/sessions/session.rs index 02c19d472f9d9..62eaedd31b559 100644 --- a/src/query/service/src/sessions/session.rs +++ b/src/query/service/src/sessions/session.rs @@ -122,6 +122,7 @@ impl Session { /// Create a query context for query. /// For a query, execution environment(e.g cluster) should be immutable. /// We can bind the environment to the context in create_context method. + #[async_backtrace::framed] pub async fn create_query_context(self: &Arc) -> Result> { let config = GlobalConfig::instance(); let session = self.clone(); @@ -184,6 +185,7 @@ impl Session { // HTTP handler, clickhouse query handler, mysql query handler. auth_role represents the role // granted by external authenticator, it will over write the current user's granted roles, and // becomes the CURRENT ROLE if not set X-DATABEND-ROLE. + #[async_backtrace::framed] pub async fn set_authed_user( self: &Arc, user: UserInfo, @@ -196,6 +198,7 @@ impl Session { } // ensure_current_role() is called after authentication and before any privilege checks + #[async_backtrace::framed] async fn ensure_current_role(self: &Arc) -> Result<()> { let tenant = self.get_current_tenant(); let public_role = RoleCacheManager::instance() @@ -237,6 +240,7 @@ impl Session { Ok(()) } + #[async_backtrace::framed] pub async fn validate_available_role(self: &Arc, role_name: &str) -> Result { let available_roles = self.get_all_available_roles().await?; let role = available_roles.iter().find(|r| r.name == role_name); @@ -258,6 +262,7 @@ impl Session { // Only the available role can be set as current role. The current role can be set by the SET // ROLE statement, or by the X-DATABEND-ROLE header in HTTP protocol (not implemented yet). + #[async_backtrace::framed] pub async fn set_current_role_checked(self: &Arc, role_name: &str) -> Result<()> { let role = self.validate_available_role(role_name).await?; self.session_ctx.set_current_role(Some(role)); @@ -275,6 +280,7 @@ impl Session { // Returns all the roles the current session has. If the user have been granted auth_role, // the other roles will be ignored. // On executing SET ROLE, the role have to be one of the available roles. + #[async_backtrace::framed] pub async fn get_all_available_roles(self: &Arc) -> Result> { let roles = match self.session_ctx.get_auth_role() { Some(auth_role) => vec![auth_role], @@ -291,6 +297,7 @@ impl Session { Ok(related_roles) } + #[async_backtrace::framed] pub async fn validate_privilege( self: &Arc, object: &GrantObject, diff --git a/src/query/service/src/sessions/session_mgr.rs b/src/query/service/src/sessions/session_mgr.rs index 4873528b3cafa..2e797e767f47f 100644 --- a/src/query/service/src/sessions/session_mgr.rs +++ b/src/query/service/src/sessions/session_mgr.rs @@ -78,6 +78,7 @@ impl SessionManager { GlobalInstance::get() } + #[async_backtrace::framed] pub async fn create_session(&self, typ: SessionType) -> Result> { { let sessions = self.active_sessions.read(); diff --git a/src/query/service/src/stream/table_read_block_stream.rs b/src/query/service/src/stream/table_read_block_stream.rs index 74af67fb4c933..4e41e5c4690f8 100644 --- a/src/query/service/src/stream/table_read_block_stream.rs +++ b/src/query/service/src/stream/table_read_block_stream.rs @@ -37,6 +37,7 @@ pub trait ReadDataBlockStream: Send + Sync { #[async_trait::async_trait] impl ReadDataBlockStream for T { + #[async_backtrace::framed] async fn read_data_block_stream( &self, ctx: Arc, diff --git a/src/query/service/src/table_functions/async_crash_me.rs b/src/query/service/src/table_functions/async_crash_me.rs index 0963d241da68d..e073646cd27d9 100644 --- a/src/query/service/src/table_functions/async_crash_me.rs +++ b/src/query/service/src/table_functions/async_crash_me.rs @@ -106,6 +106,7 @@ impl Table for AsyncCrashMeTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _: Arc, @@ -157,6 +158,7 @@ impl AsyncSource for AsyncCrashMeSource { const NAME: &'static str = "async_crash_me"; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { match &self.message { None => panic!("async crash me panic"), diff --git a/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs b/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs index 0e1f5526b0fe1..0189fe936ab40 100644 --- a/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs +++ b/src/query/service/src/table_functions/infer_schema/infer_schema_table.rs @@ -106,6 +106,7 @@ impl Table for InferSchemaTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _ctx: Arc, @@ -168,6 +169,7 @@ impl AsyncSource for InferSchemaSource { const NAME: &'static str = INFER_SCHEMA; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { if self.is_finished { return Ok(None); diff --git a/src/query/service/src/table_functions/list_stage/list_stage_table.rs b/src/query/service/src/table_functions/list_stage/list_stage_table.rs index d931fdc52512f..c1e849bfa45f4 100644 --- a/src/query/service/src/table_functions/list_stage/list_stage_table.rs +++ b/src/query/service/src/table_functions/list_stage/list_stage_table.rs @@ -110,6 +110,7 @@ impl Table for ListStageTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _ctx: Arc, @@ -172,6 +173,7 @@ impl AsyncSource for ListStagesSource { const NAME: &'static str = LIST_STAGE; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { if self.is_finished { return Ok(None); diff --git a/src/query/service/src/table_functions/numbers/numbers_table.rs b/src/query/service/src/table_functions/numbers/numbers_table.rs index a9e0d920891b0..96868348f4ae4 100644 --- a/src/query/service/src/table_functions/numbers/numbers_table.rs +++ b/src/query/service/src/table_functions/numbers/numbers_table.rs @@ -132,6 +132,7 @@ impl Table for NumbersTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, ctx: Arc, diff --git a/src/query/service/src/table_functions/openai/ai_to_sql.rs b/src/query/service/src/table_functions/openai/ai_to_sql.rs index 32455dbc3da29..e0a31b63c9de7 100644 --- a/src/query/service/src/table_functions/openai/ai_to_sql.rs +++ b/src/query/service/src/table_functions/openai/ai_to_sql.rs @@ -118,6 +118,7 @@ impl Table for GPT2SQLTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _: Arc, @@ -172,6 +173,7 @@ impl AsyncSource for GPT2SQLSource { const NAME: &'static str = "gpt_to_sql"; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { if self.finished { return Ok(None); diff --git a/src/query/service/src/table_functions/srf/range.rs b/src/query/service/src/table_functions/srf/range.rs index acbb2336b2d67..c9269101ffc0d 100644 --- a/src/query/service/src/table_functions/srf/range.rs +++ b/src/query/service/src/table_functions/srf/range.rs @@ -139,6 +139,7 @@ impl Table for RangeTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _: Arc, diff --git a/src/query/service/src/table_functions/sync_crash_me.rs b/src/query/service/src/table_functions/sync_crash_me.rs index 705bbad03fcbe..45a2d7dfd57fd 100644 --- a/src/query/service/src/table_functions/sync_crash_me.rs +++ b/src/query/service/src/table_functions/sync_crash_me.rs @@ -106,6 +106,7 @@ impl Table for SyncCrashMeTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _: Arc, diff --git a/src/query/settings/Cargo.toml b/src/query/settings/Cargo.toml index 35f24480eeb8a..626f478bceee5 100644 --- a/src/query/settings/Cargo.toml +++ b/src/query/settings/Cargo.toml @@ -20,6 +20,7 @@ common-meta-types = { path = "../../meta/types" } common-users = { path = "../users" } serde = { workspace = true } +async-backtrace = { workspace = true } dashmap = "5.4" itertools = "0.10.5" num_cpus = "1.13.1" diff --git a/src/query/settings/src/settings_global.rs b/src/query/settings/src/settings_global.rs index efa61a3068ab1..2093730cc65a2 100644 --- a/src/query/settings/src/settings_global.rs +++ b/src/query/settings/src/settings_global.rs @@ -27,6 +27,7 @@ use crate::settings_default::DefaultSettings; use crate::ScopeLevel; impl Settings { + #[async_backtrace::framed] pub async fn load_settings( user_api: Arc, tenant: String, @@ -37,6 +38,7 @@ impl Settings { .await } + #[async_backtrace::framed] pub async fn try_drop_global_setting(&self, key: &str) -> Result<()> { self.changes.remove(key); @@ -53,6 +55,7 @@ impl Settings { .await } + #[async_backtrace::framed] pub async fn set_global_setting(&self, k: String, v: String) -> Result<()> { if let (key, Some(value)) = DefaultSettings::convert_value(k.clone(), v)? { self.changes.insert(key.clone(), ChangeValue { @@ -73,6 +76,7 @@ impl Settings { ))) } + #[async_backtrace::framed] pub async fn load_global_changes(&self) -> Result<()> { let default_settings = DefaultSettings::instance()?; diff --git a/src/query/sharing-endpoint/Cargo.toml b/src/query/sharing-endpoint/Cargo.toml index 85d50f5221aca..1ca02626256e4 100644 --- a/src/query/sharing-endpoint/Cargo.toml +++ b/src/query/sharing-endpoint/Cargo.toml @@ -10,6 +10,7 @@ edition = { workspace = true } doctest = false [dependencies] +async-backtrace = { workspace = true } common-base = { path = "../../common/base" } common-config = { path = "../../query/config" } common-exception = { path = "../../common/exception" } diff --git a/src/query/sharing-endpoint/src/accessor.rs b/src/query/sharing-endpoint/src/accessor.rs index d60e0bf9d5855..cbecfb343f8f7 100644 --- a/src/query/sharing-endpoint/src/accessor.rs +++ b/src/query/sharing-endpoint/src/accessor.rs @@ -57,6 +57,7 @@ pub fn truncate_root(root: String, loc: String) -> String { } impl SharingAccessor { + #[async_backtrace::framed] pub async fn init(cfg: &Config) -> Result<()> { GlobalInstance::set(Self::try_create(cfg).await?); @@ -67,6 +68,7 @@ impl SharingAccessor { GlobalInstance::get() } + #[async_backtrace::framed] pub async fn try_create(cfg: &Config) -> Result { let operator = init_operator(&cfg.storage.params)?; diff --git a/src/query/sharing-endpoint/src/accessor/share_table_accessor.rs b/src/query/sharing-endpoint/src/accessor/share_table_accessor.rs index d1922bef1479a..18ad7c5b9881e 100644 --- a/src/query/sharing-endpoint/src/accessor/share_table_accessor.rs +++ b/src/query/sharing-endpoint/src/accessor/share_table_accessor.rs @@ -25,6 +25,7 @@ use crate::models::SharedTableResponse; // Methods for access share table spec. impl SharingAccessor { // read share table spec from S3 and check whether requester has permission on the table + #[async_backtrace::framed] async fn get_shared_table_spec( &self, input: &models::LambdaInput, @@ -39,6 +40,7 @@ impl SharingAccessor { // presign_file would be separated into two steps: // 1. fetch the table location // 2. form the final path and presign it + #[async_backtrace::framed] async fn share_table_spec_presign_file( &self, table: &SharedTableResponse, @@ -66,6 +68,7 @@ impl SharingAccessor { ) } + #[async_backtrace::framed] pub async fn get_share_table_spec_presigned_files( input: &models::LambdaInput, ) -> Result> { diff --git a/src/query/sharing-endpoint/src/accessor/share_table_meta_accessor.rs b/src/query/sharing-endpoint/src/accessor/share_table_meta_accessor.rs index 66a761c7f97d9..56d0edf1949d0 100644 --- a/src/query/sharing-endpoint/src/accessor/share_table_meta_accessor.rs +++ b/src/query/sharing-endpoint/src/accessor/share_table_meta_accessor.rs @@ -23,6 +23,7 @@ use crate::models::TableMetaLambdaInput; // Methods for access share table meta. impl SharingAccessor { + #[async_backtrace::framed] pub async fn get_share_table_meta(input: &TableMetaLambdaInput) -> Result { let sharing_accessor = Self::instance(); let share_table_meta_loc = diff --git a/src/query/sharing-endpoint/src/handlers.rs b/src/query/sharing-endpoint/src/handlers.rs index d84466dc1b558..2d6113e88a660 100644 --- a/src/query/sharing-endpoint/src/handlers.rs +++ b/src/query/sharing-endpoint/src/handlers.rs @@ -25,6 +25,7 @@ use crate::models; use crate::models::PresignFileResponse; #[poem::handler] +#[async_backtrace::framed] pub async fn share_table_presign_files( credentials: &Credentials, Path((_tenant_id, share_name, table_name)): Path<(String, String, String)>, @@ -46,6 +47,7 @@ pub async fn share_table_presign_files( } #[poem::handler] +#[async_backtrace::framed] pub async fn share_table_meta( credentials: &Credentials, Path((_tenant_id, share_name)): Path<(String, String)>, diff --git a/src/query/sharing-endpoint/src/middlewares.rs b/src/query/sharing-endpoint/src/middlewares.rs index 0614ab7717292..e733a64ecace9 100644 --- a/src/query/sharing-endpoint/src/middlewares.rs +++ b/src/query/sharing-endpoint/src/middlewares.rs @@ -42,6 +42,7 @@ impl Endpoint for SharingAuthImpl { // TODO(zhihanz) current implementation only used for stateless test // for production usage, we need to implement a middleware with JWT authentication + #[async_backtrace::framed] async fn call(&self, mut req: Request) -> Result { // decode auth header from bearer base64 let auth_header = req diff --git a/src/query/sharing-endpoint/src/models.rs b/src/query/sharing-endpoint/src/models.rs index 6dccdc4b1e4b3..b968ae9bcf93e 100644 --- a/src/query/sharing-endpoint/src/models.rs +++ b/src/query/sharing-endpoint/src/models.rs @@ -174,6 +174,7 @@ pub struct Credentials { #[async_trait] impl<'a> FromRequest<'a> for &'a Credentials { + #[async_backtrace::framed] async fn from_request(req: &'a Request, _body: &mut RequestBody) -> PoemResult { Ok(req .extensions() diff --git a/src/query/sharing-endpoint/src/services.rs b/src/query/sharing-endpoint/src/services.rs index ace24b98d921d..52cc8aada15dd 100644 --- a/src/query/sharing-endpoint/src/services.rs +++ b/src/query/sharing-endpoint/src/services.rs @@ -23,6 +23,7 @@ use crate::configs::Config; pub struct SharingServices {} impl SharingServices { + #[async_backtrace::framed] pub async fn init(config: Config) -> Result<()> { // init global instance singleton GlobalInstance::init_production(); diff --git a/src/query/sharing/Cargo.toml b/src/query/sharing/Cargo.toml index 6aa0869e42119..f0e177f6932a4 100644 --- a/src/query/sharing/Cargo.toml +++ b/src/query/sharing/Cargo.toml @@ -22,6 +22,7 @@ common-meta-app = { path = "../../meta/app" } common-storage = { path = "../../common/storage" } common-users = { path = "../users" } +async-backtrace = { workspace = true } http = "0.2" log = "0.4" moka = "0.10" diff --git a/src/query/sharing/src/layer.rs b/src/query/sharing/src/layer.rs index a3fd44feeabef..21e1991277a33 100644 --- a/src/query/sharing/src/layer.rs +++ b/src/query/sharing/src/layer.rs @@ -135,6 +135,7 @@ impl Accessor for SharedAccessor { meta } + #[async_backtrace::framed] async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> { let req: PresignedRequest = self.signer @@ -167,6 +168,7 @@ impl Accessor for SharedAccessor { } } + #[async_backtrace::framed] async fn stat(&self, path: &str, _args: OpStat) -> Result { // Stat root always returns a DIR. if path == "/" { diff --git a/src/query/sharing/src/share_endpoint.rs b/src/query/sharing/src/share_endpoint.rs index 76f60e39b1fc2..f4d85452498df 100644 --- a/src/query/sharing/src/share_endpoint.rs +++ b/src/query/sharing/src/share_endpoint.rs @@ -64,6 +64,7 @@ impl ShareEndpointManager { GlobalInstance::get() } + #[async_backtrace::framed] async fn get_share_endpoint( &self, from_tenant: &str, @@ -100,6 +101,7 @@ impl ShareEndpointManager { ))) } + #[async_backtrace::framed] pub async fn get_table_info_map( &self, from_tenant: &str, diff --git a/src/query/sharing/src/signer.rs b/src/query/sharing/src/signer.rs index 56fa5b3c73eb6..23b3939ed3894 100644 --- a/src/query/sharing/src/signer.rs +++ b/src/query/sharing/src/signer.rs @@ -82,6 +82,7 @@ impl SharedSigner { } /// Fetch a presigned request. If not found, build a new one by sign. + #[async_backtrace::framed] pub async fn fetch(&self, path: &str, op: Operation) -> Result { match self.get(path, op) { Some(v) => Ok(v), @@ -101,11 +102,13 @@ impl SharedSigner { } /// Sign a request. + #[async_backtrace::framed] pub async fn sign(&self, req: PresignRequest) -> Result<()> { self.sign_inner(vec![req]).await } /// Batch sign multiple requests at once. + #[async_backtrace::framed] pub async fn batch_sign(&self, reqs: Vec) -> Result<()> { self.sign_inner(reqs).await } @@ -139,6 +142,7 @@ impl SharedSigner { /// } /// ] /// ``` + #[async_backtrace::framed] async fn sign_inner(&self, reqs: Vec) -> Result<()> { let now = time::Instant::now(); info!("started sharing signing"); diff --git a/src/query/sql/Cargo.toml b/src/query/sql/Cargo.toml index cee1d5f4838c2..db9013eabea27 100644 --- a/src/query/sql/Cargo.toml +++ b/src/query/sql/Cargo.toml @@ -40,6 +40,7 @@ storages-common-table-meta = { path = "../storages/common/table-meta" } # Crates.io dependencies ahash = { version = "0.8.2", features = ["no-rng"] } anyhow = { workspace = true } +async-backtrace = { workspace = true } async-recursion = "1.0.0" async-trait = { version = "0.1.57", package = "async-trait-fn" } chrono = { workspace = true } diff --git a/src/query/sql/src/executor/physical_plan_builder.rs b/src/query/sql/src/executor/physical_plan_builder.rs index c5ba7ecb2f8db..19fbff468d55e 100644 --- a/src/query/sql/src/executor/physical_plan_builder.rs +++ b/src/query/sql/src/executor/physical_plan_builder.rs @@ -161,6 +161,7 @@ impl PhysicalPlanBuilder { } #[async_recursion::async_recursion] + #[async_backtrace::framed] pub async fn build(&mut self, s_expr: &SExpr) -> Result { // Build stat info let stat_info = self.build_plan_stat_info(s_expr)?; diff --git a/src/query/sql/src/executor/table_read_plan.rs b/src/query/sql/src/executor/table_read_plan.rs index c146c9031734b..1ae96a33dc9ea 100644 --- a/src/query/sql/src/executor/table_read_plan.rs +++ b/src/query/sql/src/executor/table_read_plan.rs @@ -30,6 +30,7 @@ use common_expression::Scalar; #[async_trait::async_trait] pub trait ToReadDataSourcePlan { /// Real read_plan to access partitions/push_downs + #[async_backtrace::framed] async fn read_plan( &self, ctx: Arc, @@ -50,6 +51,7 @@ pub trait ToReadDataSourcePlan { #[async_trait::async_trait] impl ToReadDataSourcePlan for dyn Table { + #[async_backtrace::framed] async fn read_plan_with_catalog( &self, ctx: Arc, diff --git a/src/query/sql/src/planner/binder/aggregate.rs b/src/query/sql/src/planner/binder/aggregate.rs index 0747a28715841..4cbfc67f03467 100644 --- a/src/query/sql/src/planner/binder/aggregate.rs +++ b/src/query/sql/src/planner/binder/aggregate.rs @@ -480,6 +480,7 @@ impl Binder { /// `SELECT a as b, COUNT(a) FROM t GROUP BY b`. /// - Scalar expressions that can be evaluated in current scope(doesn't contain aliases), e.g. /// column `a` and expression `a+1` in `SELECT a as b, COUNT(a) FROM t GROUP BY a, a+1`. + #[async_backtrace::framed] pub async fn analyze_group_items<'a>( &mut self, bind_context: &mut BindContext, @@ -544,6 +545,7 @@ impl Binder { } } + #[async_backtrace::framed] pub(super) async fn bind_aggregate( &mut self, bind_context: &mut BindContext, @@ -589,6 +591,7 @@ impl Binder { Ok(new_expr) } + #[async_backtrace::framed] async fn resolve_grouping_sets( &mut self, bind_context: &mut BindContext, @@ -653,6 +656,7 @@ impl Binder { Ok(()) } + #[async_backtrace::framed] async fn resolve_group_items( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/binder.rs b/src/query/sql/src/planner/binder/binder.rs index b197befe4c82d..30e95b725b93b 100644 --- a/src/query/sql/src/planner/binder/binder.rs +++ b/src/query/sql/src/planner/binder/binder.rs @@ -80,12 +80,14 @@ impl<'a> Binder { } } + #[async_backtrace::framed] pub async fn bind(mut self, stmt: &Statement) -> Result { let mut init_bind_context = BindContext::new(); self.bind_statement(&mut init_bind_context, stmt).await } #[async_recursion::async_recursion] + #[async_backtrace::framed] pub(crate) async fn bind_statement( &mut self, bind_context: &mut BindContext, @@ -396,6 +398,7 @@ impl<'a> Binder { Ok(plan) } + #[async_backtrace::framed] pub(crate) async fn bind_rewrite_to_query( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/copy.rs b/src/query/sql/src/planner/binder/copy.rs index a3f482e756b99..1d6b09509d709 100644 --- a/src/query/sql/src/planner/binder/copy.rs +++ b/src/query/sql/src/planner/binder/copy.rs @@ -55,6 +55,7 @@ use crate::plans::ValidationMode; use crate::BindContext; impl<'a> Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_copy( &mut self, bind_context: &mut BindContext, @@ -214,6 +215,7 @@ impl<'a> Binder { /// Bind COPY INFO FROM #[allow(clippy::too_many_arguments)] + #[async_backtrace::framed] async fn bind_copy_from_stage_into_table( &mut self, _: &BindContext, @@ -272,6 +274,7 @@ impl<'a> Binder { /// Bind COPY INFO
FROM #[allow(clippy::too_many_arguments)] + #[async_backtrace::framed] async fn bind_copy_from_uri_into_table( &mut self, _: &BindContext, @@ -336,6 +339,7 @@ impl<'a> Binder { /// Bind COPY INFO FROM
#[allow(clippy::too_many_arguments)] + #[async_backtrace::framed] async fn bind_copy_from_table_into_stage( &mut self, bind_context: &mut BindContext, @@ -381,6 +385,7 @@ impl<'a> Binder { /// Bind COPY INFO FROM
#[allow(clippy::too_many_arguments)] + #[async_backtrace::framed] async fn bind_copy_from_table_into_uri( &mut self, bind_context: &mut BindContext, @@ -430,6 +435,7 @@ impl<'a> Binder { } /// Bind COPY INFO FROM + #[async_backtrace::framed] async fn bind_copy_from_query_into_stage( &mut self, bind_context: &mut BindContext, @@ -460,6 +466,7 @@ impl<'a> Binder { /// Bind COPY INFO FROM #[allow(clippy::too_many_arguments)] + #[async_backtrace::framed] async fn bind_copy_from_query_into_uri( &mut self, bind_context: &mut BindContext, @@ -494,6 +501,7 @@ impl<'a> Binder { } /// Bind COPY INTO
FROM + #[async_backtrace::framed] async fn bind_copy_from_query_into_table( &mut self, bind_context: &BindContext, @@ -631,6 +639,7 @@ impl<'a> Binder { }))) } + #[async_backtrace::framed] async fn apply_stage_options(&mut self, stmt: &CopyStmt, stage: &mut StageInfo) -> Result<()> { if !stmt.file_format.is_empty() { stage.file_format_options = self.try_resolve_file_format(&stmt.file_format).await?; @@ -727,6 +736,7 @@ fn check_transform_query( /// For internal stage, we will also add prefix `/stage//` /// /// - @internal/abc => (internal, "/stage/internal/abc") +#[async_backtrace::framed] pub async fn parse_stage_location( ctx: &Arc, location: &str, @@ -756,6 +766,7 @@ pub async fn parse_stage_location( /// /// # NOTE: /// `path` MUST starts with '/' +#[async_backtrace::framed] pub async fn parse_stage_location_v2( ctx: &Arc, name: &str, @@ -776,6 +787,7 @@ pub async fn parse_stage_location_v2( Ok((stage, relative_path)) } +#[async_backtrace::framed] pub async fn parse_file_location( ctx: &Arc, location: &FileLocation, diff --git a/src/query/sql/src/planner/binder/ddl/account.rs b/src/query/sql/src/planner/binder/ddl/account.rs index 9f84934de82ba..fdcd9bd524103 100644 --- a/src/query/sql/src/planner/binder/ddl/account.rs +++ b/src/query/sql/src/planner/binder/ddl/account.rs @@ -35,6 +35,7 @@ use crate::plans::RevokeRolePlan; use crate::Binder; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_grant( &mut self, stmt: &GrantStmt, @@ -77,6 +78,7 @@ impl Binder { } } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_revoke( &mut self, stmt: &RevokeStmt, @@ -142,6 +144,7 @@ impl Binder { } } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_create_user( &mut self, stmt: &CreateUserStmt, @@ -165,6 +168,7 @@ impl Binder { Ok(Plan::CreateUser(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_alter_user( &mut self, stmt: &AlterUserStmt, diff --git a/src/query/sql/src/planner/binder/ddl/catalog.rs b/src/query/sql/src/planner/binder/ddl/catalog.rs index a3032b7c41aa3..5737353568432 100644 --- a/src/query/sql/src/planner/binder/ddl/catalog.rs +++ b/src/query/sql/src/planner/binder/ddl/catalog.rs @@ -44,6 +44,7 @@ use crate::BindContext; use crate::Binder; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_catalogs( &mut self, bind_context: &mut BindContext, @@ -67,6 +68,7 @@ impl Binder { .await } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_create_catalogs( &self, stmt: &ShowCreateCatalogStmt, @@ -83,6 +85,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_create_catalog( &self, stmt: &CreateCatalogStmt, @@ -106,6 +109,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_drop_catalog( &self, stmt: &DropCatalogStmt, diff --git a/src/query/sql/src/planner/binder/ddl/column.rs b/src/query/sql/src/planner/binder/ddl/column.rs index 5153128ff823c..c28b05f32af17 100644 --- a/src/query/sql/src/planner/binder/ddl/column.rs +++ b/src/query/sql/src/planner/binder/ddl/column.rs @@ -25,6 +25,7 @@ use crate::Binder; use crate::SelectBuilder; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_columns( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/ddl/database.rs b/src/query/sql/src/planner/binder/ddl/database.rs index dee78935daca3..133777259ce04 100644 --- a/src/query/sql/src/planner/binder/ddl/database.rs +++ b/src/query/sql/src/planner/binder/ddl/database.rs @@ -46,6 +46,7 @@ use crate::BindContext; use crate::SelectBuilder; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_databases( &mut self, bind_context: &mut BindContext, @@ -90,6 +91,7 @@ impl Binder { .await } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_create_database( &self, stmt: &ShowCreateDatabaseStmt, @@ -113,6 +115,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_alter_database( &self, stmt: &AlterDatabaseStmt, @@ -149,6 +152,7 @@ impl Binder { } } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_drop_database( &self, stmt: &DropDatabaseStmt, @@ -174,6 +178,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_undrop_database( &self, stmt: &UndropDatabaseStmt, @@ -194,6 +199,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_create_database( &self, stmt: &CreateDatabaseStmt, diff --git a/src/query/sql/src/planner/binder/ddl/role.rs b/src/query/sql/src/planner/binder/ddl/role.rs index 44adb56ba8bd4..981a66595db5f 100644 --- a/src/query/sql/src/planner/binder/ddl/role.rs +++ b/src/query/sql/src/planner/binder/ddl/role.rs @@ -20,6 +20,7 @@ use crate::BindContext; use crate::Binder; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_set_role( &mut self, _bind_context: &BindContext, diff --git a/src/query/sql/src/planner/binder/ddl/share.rs b/src/query/sql/src/planner/binder/ddl/share.rs index b9961bd7f6281..148a072379049 100644 --- a/src/query/sql/src/planner/binder/ddl/share.rs +++ b/src/query/sql/src/planner/binder/ddl/share.rs @@ -34,6 +34,7 @@ use crate::plans::ShowShareEndpointPlan; use crate::plans::ShowSharesPlan; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_create_share_endpoint( &mut self, stmt: &CreateShareEndpointStmt, @@ -63,6 +64,7 @@ impl Binder { Ok(Plan::CreateShareEndpoint(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_share_endpoint( &mut self, _stmt: &ShowShareEndpointStmt, @@ -73,6 +75,7 @@ impl Binder { Ok(Plan::ShowShareEndpoint(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_drop_share_endpoint( &mut self, stmt: &DropShareEndpointStmt, @@ -89,6 +92,7 @@ impl Binder { Ok(Plan::DropShareEndpoint(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_create_share( &mut self, stmt: &CreateShareStmt, @@ -110,6 +114,7 @@ impl Binder { Ok(Plan::CreateShare(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_drop_share( &mut self, stmt: &DropShareStmt, @@ -126,6 +131,7 @@ impl Binder { Ok(Plan::DropShare(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_grant_share_object( &mut self, stmt: &GrantShareObjectStmt, @@ -146,6 +152,7 @@ impl Binder { Ok(Plan::GrantShareObject(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_revoke_share_object( &mut self, stmt: &RevokeShareObjectStmt, @@ -166,6 +173,7 @@ impl Binder { Ok(Plan::RevokeShareObject(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_alter_share_accounts( &mut self, stmt: &AlterShareTenantsStmt, @@ -188,6 +196,7 @@ impl Binder { Ok(Plan::AlterShareTenants(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_desc_share( &mut self, stmt: &DescShareStmt, @@ -200,6 +209,7 @@ impl Binder { Ok(Plan::DescShare(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_shares( &mut self, _stmt: &ShowSharesStmt, @@ -207,6 +217,7 @@ impl Binder { Ok(Plan::ShowShares(Box::new(ShowSharesPlan {}))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_object_grant_privileges( &mut self, stmt: &ShowObjectGrantPrivilegesStmt, @@ -219,6 +230,7 @@ impl Binder { Ok(Plan::ShowObjectGrantPrivileges(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_grants_of_share( &mut self, stmt: &ShowGrantsOfShareStmt, diff --git a/src/query/sql/src/planner/binder/ddl/stage.rs b/src/query/sql/src/planner/binder/ddl/stage.rs index f8c27683466f7..5bacc99f4d1c2 100644 --- a/src/query/sql/src/planner/binder/ddl/stage.rs +++ b/src/query/sql/src/planner/binder/ddl/stage.rs @@ -31,6 +31,7 @@ use crate::plans::Plan; use crate::plans::RemoveStagePlan; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_remove_stage( &mut self, location: &str, @@ -47,6 +48,7 @@ impl Binder { Ok(Plan::RemoveStage(Box::new(plan_node))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_create_stage( &mut self, stmt: &CreateStageStmt, @@ -113,6 +115,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(crate) async fn try_resolve_file_format( &self, options: &BTreeMap, diff --git a/src/query/sql/src/planner/binder/ddl/table.rs b/src/query/sql/src/planner/binder/ddl/table.rs index cf63de7a185cf..c168ac837a2f4 100644 --- a/src/query/sql/src/planner/binder/ddl/table.rs +++ b/src/query/sql/src/planner/binder/ddl/table.rs @@ -106,6 +106,7 @@ use crate::ScalarExpr; use crate::SelectBuilder; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_tables( &mut self, bind_context: &mut BindContext, @@ -179,6 +180,7 @@ impl Binder { .await } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_create_table( &mut self, stmt: &ShowCreateTableStmt, @@ -204,6 +206,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_describe_table( &mut self, stmt: &DescribeTableStmt, @@ -232,6 +235,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_tables_status( &mut self, bind_context: &mut BindContext, @@ -276,6 +280,7 @@ impl Binder { self.bind_statement(bind_context, &stmt).await } + #[async_backtrace::framed] async fn check_database_exist( &mut self, catalog: &Option, @@ -298,6 +303,7 @@ impl Binder { } } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_create_table( &mut self, stmt: &CreateTableStmt, @@ -510,6 +516,7 @@ impl Binder { Ok(Plan::CreateTable(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_drop_table( &mut self, stmt: &DropTableStmt, @@ -536,6 +543,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_undrop_table( &mut self, stmt: &UndropTableStmt, @@ -558,6 +566,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_alter_table( &mut self, bind_context: &mut BindContext, @@ -690,6 +699,7 @@ impl Binder { } } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_rename_table( &mut self, stmt: &RenameTableStmt, @@ -728,6 +738,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_truncate_table( &mut self, stmt: &TruncateTableStmt, @@ -750,6 +761,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_optimize_table( &mut self, bind_context: &mut BindContext, @@ -801,6 +813,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_analyze_table( &mut self, stmt: &AnalyzeTableStmt, @@ -821,6 +834,7 @@ impl Binder { }))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_exists_table( &mut self, stmt: &ExistsTableStmt, @@ -841,6 +855,7 @@ impl Binder { }))) } + #[async_backtrace::framed] async fn analyze_create_table_schema_by_columns( &self, columns: &[ColumnDefinition], @@ -895,6 +910,7 @@ impl Binder { Ok((schema, fields_default_expr, fields_comments)) } + #[async_backtrace::framed] async fn analyze_create_table_schema( &self, source: &CreateTableSource, @@ -959,6 +975,7 @@ impl Binder { } } + #[async_backtrace::framed] async fn analyze_cluster_keys( &mut self, cluster_by: &[Expr], diff --git a/src/query/sql/src/planner/binder/ddl/view.rs b/src/query/sql/src/planner/binder/ddl/view.rs index 1e393b7b26ccc..5acf3f808055d 100644 --- a/src/query/sql/src/planner/binder/ddl/view.rs +++ b/src/query/sql/src/planner/binder/ddl/view.rs @@ -25,6 +25,7 @@ use crate::plans::DropViewPlan; use crate::plans::Plan; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_create_view( &mut self, stmt: &CreateViewStmt, @@ -59,6 +60,7 @@ impl Binder { Ok(Plan::CreateView(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_alter_view( &mut self, stmt: &AlterViewStmt, @@ -91,6 +93,7 @@ impl Binder { Ok(Plan::AlterView(Box::new(plan))) } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_drop_view( &mut self, stmt: &DropViewStmt, diff --git a/src/query/sql/src/planner/binder/delete.rs b/src/query/sql/src/planner/binder/delete.rs index a7219fc56f6cd..074003804eb99 100644 --- a/src/query/sql/src/planner/binder/delete.rs +++ b/src/query/sql/src/planner/binder/delete.rs @@ -28,6 +28,7 @@ use crate::BindContext; use crate::ScalarExpr; impl<'a> Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_delete( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/having.rs b/src/query/sql/src/planner/binder/having.rs index 5a075b5a8afb0..cd7ab70285360 100644 --- a/src/query/sql/src/planner/binder/having.rs +++ b/src/query/sql/src/planner/binder/having.rs @@ -31,6 +31,7 @@ use crate::Binder; impl Binder { /// Analyze aggregates in having clause, this will rewrite aggregate functions. /// See `AggregateRewriter` for more details. + #[async_backtrace::framed] pub(super) async fn analyze_aggregate_having<'a>( &mut self, bind_context: &mut BindContext, @@ -54,6 +55,7 @@ impl Binder { Ok((rewriter.visit(&scalar)?, having.span())) } + #[async_backtrace::framed] pub(super) async fn bind_having( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/insert.rs b/src/query/sql/src/planner/binder/insert.rs index 12b98b478f6eb..f6ae229d1dfad 100644 --- a/src/query/sql/src/planner/binder/insert.rs +++ b/src/query/sql/src/planner/binder/insert.rs @@ -32,6 +32,7 @@ use crate::plans::Plan; use crate::BindContext; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_insert( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/join.rs b/src/query/sql/src/planner/binder/join.rs index 9f13a5bc5f415..7f27231643dce 100644 --- a/src/query/sql/src/planner/binder/join.rs +++ b/src/query/sql/src/planner/binder/join.rs @@ -54,6 +54,7 @@ pub struct JoinConditions { impl Binder { #[async_recursion] + #[async_backtrace::framed] pub(super) async fn bind_join( &mut self, bind_context: &BindContext, @@ -392,6 +393,7 @@ impl<'a> JoinConditionResolver<'a> { } } + #[async_backtrace::framed] pub async fn resolve( &mut self, left_join_conditions: &mut Vec, @@ -456,6 +458,7 @@ impl<'a> JoinConditionResolver<'a> { Ok(()) } + #[async_backtrace::framed] async fn resolve_on( &mut self, condition: &Expr, @@ -484,6 +487,7 @@ impl<'a> JoinConditionResolver<'a> { Ok(()) } + #[async_backtrace::framed] async fn resolve_predicate( &self, predicate: &Expr, @@ -534,6 +538,7 @@ impl<'a> JoinConditionResolver<'a> { Ok(()) } + #[async_backtrace::framed] async fn resolve_using( &mut self, using_columns: Vec<(Span, String)>, @@ -652,6 +657,7 @@ impl<'a> JoinConditionResolver<'a> { Ok(false) } + #[async_backtrace::framed] async fn add_other_conditions( &self, predicate: &Expr, diff --git a/src/query/sql/src/planner/binder/kill.rs b/src/query/sql/src/planner/binder/kill.rs index df8d2ff38a2dd..8d92be0e58d48 100644 --- a/src/query/sql/src/planner/binder/kill.rs +++ b/src/query/sql/src/planner/binder/kill.rs @@ -21,6 +21,7 @@ use crate::plans::KillPlan; use crate::plans::Plan; impl Binder { + #[async_backtrace::framed] pub(super) async fn bind_kill_stmt( &mut self, _bind_context: &BindContext, diff --git a/src/query/sql/src/planner/binder/limit.rs b/src/query/sql/src/planner/binder/limit.rs index 8f393a44d51d5..2d6506a66072a 100644 --- a/src/query/sql/src/planner/binder/limit.rs +++ b/src/query/sql/src/planner/binder/limit.rs @@ -23,6 +23,7 @@ use crate::plans::Limit; use crate::BindContext; impl Binder { + #[async_backtrace::framed] pub(super) async fn bind_limit( &mut self, _bind_context: &BindContext, diff --git a/src/query/sql/src/planner/binder/presign.rs b/src/query/sql/src/planner/binder/presign.rs index 0f335c9f82fae..5d02ed52f0b0f 100644 --- a/src/query/sql/src/planner/binder/presign.rs +++ b/src/query/sql/src/planner/binder/presign.rs @@ -26,6 +26,7 @@ use crate::plans::PresignPlan; use crate::BindContext; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_presign( &mut self, _: &BindContext, diff --git a/src/query/sql/src/planner/binder/project.rs b/src/query/sql/src/planner/binder/project.rs index 796246c93a96c..a76f101b6e8c2 100644 --- a/src/query/sql/src/planner/binder/project.rs +++ b/src/query/sql/src/planner/binder/project.rs @@ -157,6 +157,7 @@ impl Binder { /// For scalar expressions and aggregate expressions, we will register new columns for /// them in `Metadata`. And notice that, the semantic of aggregate expressions won't be checked /// in this function. + #[async_backtrace::framed] pub(super) async fn normalize_select_list<'a>( &mut self, input_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/project_set.rs b/src/query/sql/src/planner/binder/project_set.rs index acf1d541b5616..16cd4d4a7dd8b 100644 --- a/src/query/sql/src/planner/binder/project_set.rs +++ b/src/query/sql/src/planner/binder/project_set.rs @@ -84,6 +84,7 @@ impl SrfCollector { } impl Binder { + #[async_backtrace::framed] pub async fn bind_project_set( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/replace.rs b/src/query/sql/src/planner/binder/replace.rs index 09f2f5ad694dc..c0ded9adddae2 100644 --- a/src/query/sql/src/planner/binder/replace.rs +++ b/src/query/sql/src/planner/binder/replace.rs @@ -32,6 +32,7 @@ use crate::plans::Replace; use crate::BindContext; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_replace( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/scalar.rs b/src/query/sql/src/planner/binder/scalar.rs index 512b3c9452a2d..af69f6acae7fb 100644 --- a/src/query/sql/src/planner/binder/scalar.rs +++ b/src/query/sql/src/planner/binder/scalar.rs @@ -52,6 +52,7 @@ impl<'a> ScalarBinder<'a> { } } + #[async_backtrace::framed] pub async fn bind(&mut self, expr: &Expr) -> Result<(ScalarExpr, DataType)> { let mut type_checker = TypeChecker::new( self.bind_context, diff --git a/src/query/sql/src/planner/binder/select.rs b/src/query/sql/src/planner/binder/select.rs index a4b7996661a7b..21cd6390a23be 100644 --- a/src/query/sql/src/planner/binder/select.rs +++ b/src/query/sql/src/planner/binder/select.rs @@ -73,6 +73,7 @@ pub struct SelectItem<'a> { } impl Binder { + #[async_backtrace::framed] pub(super) async fn bind_select_stmt( &mut self, bind_context: &mut BindContext, @@ -216,6 +217,7 @@ impl Binder { } #[async_recursion] + #[async_backtrace::framed] pub(crate) async fn bind_set_expr( &mut self, bind_context: &mut BindContext, @@ -239,6 +241,7 @@ impl Binder { } #[async_recursion] + #[async_backtrace::framed] pub(crate) async fn bind_query( &mut self, bind_context: &mut BindContext, @@ -300,6 +303,7 @@ impl Binder { Ok((s_expr, bind_context)) } + #[async_backtrace::framed] pub(super) async fn bind_where( &mut self, bind_context: &mut BindContext, @@ -330,6 +334,7 @@ impl Binder { Ok(new_expr) } + #[async_backtrace::framed] pub(super) async fn bind_set_operator( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/setting.rs b/src/query/sql/src/planner/binder/setting.rs index e3fd3760c6ba3..9b07cdf363f34 100644 --- a/src/query/sql/src/planner/binder/setting.rs +++ b/src/query/sql/src/planner/binder/setting.rs @@ -32,6 +32,7 @@ use crate::plans::UnSettingPlan; use crate::plans::VarValue; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_set_variable( &mut self, bind_context: &mut BindContext, @@ -68,6 +69,7 @@ impl Binder { } } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_unset_variable( &mut self, _bind_context: &BindContext, diff --git a/src/query/sql/src/planner/binder/show.rs b/src/query/sql/src/planner/binder/show.rs index ee9251155e69d..b026d1387696e 100644 --- a/src/query/sql/src/planner/binder/show.rs +++ b/src/query/sql/src/planner/binder/show.rs @@ -21,6 +21,7 @@ use crate::BindContext; use crate::Binder; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_functions( &mut self, bind_context: &mut BindContext, @@ -49,6 +50,7 @@ impl Binder { .await } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_table_functions( &mut self, bind_context: &mut BindContext, @@ -74,6 +76,7 @@ impl Binder { .await } + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_show_settings( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/sort.rs b/src/query/sql/src/planner/binder/sort.rs index 49f75df426e5b..7ce24f6d2cb4b 100644 --- a/src/query/sql/src/planner/binder/sort.rs +++ b/src/query/sql/src/planner/binder/sort.rs @@ -60,6 +60,7 @@ pub struct OrderItem { } impl Binder { + #[async_backtrace::framed] pub(super) async fn analyze_order_items( &mut self, from_context: &BindContext, @@ -229,6 +230,7 @@ impl Binder { Ok(OrderItems { items: order_items }) } + #[async_backtrace::framed] pub(super) async fn bind_order_by( &mut self, from_context: &BindContext, @@ -318,6 +320,7 @@ impl Binder { Ok(new_expr) } + #[async_backtrace::framed] pub(crate) async fn bind_order_by_for_set_operation( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/table.rs b/src/query/sql/src/planner/binder/table.rs index 0cc75c26d468a..45da88fc966f8 100644 --- a/src/query/sql/src/planner/binder/table.rs +++ b/src/query/sql/src/planner/binder/table.rs @@ -81,6 +81,7 @@ use crate::IndexType; use crate::TableInternalColumn; impl Binder { + #[async_backtrace::framed] pub(super) async fn bind_one_table( &mut self, bind_context: &BindContext, @@ -139,6 +140,7 @@ impl Binder { } #[async_recursion] + #[async_backtrace::framed] async fn bind_single_table( &mut self, bind_context: &mut BindContext, @@ -454,6 +456,7 @@ impl Binder { } } + #[async_backtrace::framed] pub(crate) async fn bind_stage_table( &mut self, bind_context: &BindContext, @@ -500,6 +503,7 @@ impl Binder { } } + #[async_backtrace::framed] pub(super) async fn bind_table_reference( &mut self, bind_context: &mut BindContext, @@ -573,6 +577,7 @@ impl Binder { Ok((result_expr, result_ctx)) } + #[async_backtrace::framed] async fn bind_cte( &mut self, span: Span, @@ -629,6 +634,7 @@ impl Binder { Ok((s_expr, new_bind_context)) } + #[async_backtrace::framed] async fn bind_base_table( &mut self, bind_context: &BindContext, @@ -716,6 +722,7 @@ impl Binder { )) } + #[async_backtrace::framed] async fn resolve_data_source( &self, tenant: &str, @@ -734,6 +741,7 @@ impl Binder { Ok(table_meta) } + #[async_backtrace::framed] pub(crate) async fn resolve_data_travel_point( &self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/table_args.rs b/src/query/sql/src/planner/binder/table_args.rs index 4c2956719b4d2..5c43d6b31efa5 100644 --- a/src/query/sql/src/planner/binder/table_args.rs +++ b/src/query/sql/src/planner/binder/table_args.rs @@ -26,6 +26,7 @@ use crate::plans::ConstantExpr; use crate::ScalarBinder; use crate::ScalarExpr; +#[async_backtrace::framed] pub async fn bind_table_args( scalar_binder: &mut ScalarBinder<'_>, params: &Vec, diff --git a/src/query/sql/src/planner/binder/update.rs b/src/query/sql/src/planner/binder/update.rs index 8be523edd1efc..f8b6a250b2634 100644 --- a/src/query/sql/src/planner/binder/update.rs +++ b/src/query/sql/src/planner/binder/update.rs @@ -28,6 +28,7 @@ use crate::plans::UpdatePlan; use crate::BindContext; impl Binder { + #[async_backtrace::framed] pub(in crate::planner::binder) async fn bind_update( &mut self, bind_context: &mut BindContext, diff --git a/src/query/sql/src/planner/binder/window.rs b/src/query/sql/src/planner/binder/window.rs index 29cfdbe741b94..2c7e98d37c782 100644 --- a/src/query/sql/src/planner/binder/window.rs +++ b/src/query/sql/src/planner/binder/window.rs @@ -1,87 +1,88 @@ -// Copyright 2023 Datafuse Labs. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::collections::HashMap; - -use common_exception::Result; - -use crate::optimizer::SExpr; -use crate::plans::EvalScalar; -use crate::plans::ScalarItem; -use crate::plans::Window; -use crate::plans::WindowFuncFrame; -use crate::Binder; - -impl Binder { - pub(super) async fn bind_window_function( - &mut self, - window_info: &WindowFunctionInto, - child: SExpr, - ) -> Result { - let mut scalar_items: Vec = Vec::with_capacity( - window_info.aggregate_arguments.len() - + window_info.partition_by_items.len() - + window_info.order_by_items.len(), - ); - for arg in window_info.aggregate_arguments.iter() { - scalar_items.push(arg.clone()); - } - for part in window_info.partition_by_items.iter() { - scalar_items.push(part.clone()); - } - for order in window_info.order_by_items.iter() { - scalar_items.push(order.order_by_item.clone()) - } - - let mut new_expr = child; - if !scalar_items.is_empty() { - let eval_scalar = EvalScalar { - items: scalar_items, - }; - new_expr = SExpr::create_unary(eval_scalar.into(), new_expr); - } - - let window_plan = Window { - aggregate_function: window_info.aggregate_function.clone(), - partition_by: window_info.partition_by_items.clone(), - order_by: window_info.order_by_items.clone(), - frame: window_info.frame.clone(), - }; - new_expr = SExpr::create_unary(window_plan.into(), new_expr); - - Ok(new_expr) - } -} - -#[derive(Default, Clone, PartialEq, Eq, Debug)] -pub struct WindowInfo { - pub window_functions: Vec, - pub window_functions_map: HashMap, -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct WindowFunctionInto { - pub aggregate_function: ScalarItem, - pub aggregate_arguments: Vec, - pub partition_by_items: Vec, - pub order_by_items: Vec, - pub frame: WindowFuncFrame, -} - -#[derive(Clone, PartialEq, Eq, Debug, Hash)] -pub struct WindowOrderByInfo { - pub order_by_item: ScalarItem, - pub asc: Option, - pub nulls_first: Option, -} +// Copyright 2023 Datafuse Labs. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; + +use common_exception::Result; + +use crate::optimizer::SExpr; +use crate::plans::EvalScalar; +use crate::plans::ScalarItem; +use crate::plans::Window; +use crate::plans::WindowFuncFrame; +use crate::Binder; + +impl Binder { + #[async_backtrace::framed] + pub(super) async fn bind_window_function( + &mut self, + window_info: &WindowFunctionInto, + child: SExpr, + ) -> Result { + let mut scalar_items: Vec = Vec::with_capacity( + window_info.aggregate_arguments.len() + + window_info.partition_by_items.len() + + window_info.order_by_items.len(), + ); + for arg in window_info.aggregate_arguments.iter() { + scalar_items.push(arg.clone()); + } + for part in window_info.partition_by_items.iter() { + scalar_items.push(part.clone()); + } + for order in window_info.order_by_items.iter() { + scalar_items.push(order.order_by_item.clone()) + } + + let mut new_expr = child; + if !scalar_items.is_empty() { + let eval_scalar = EvalScalar { + items: scalar_items, + }; + new_expr = SExpr::create_unary(eval_scalar.into(), new_expr); + } + + let window_plan = Window { + aggregate_function: window_info.aggregate_function.clone(), + partition_by: window_info.partition_by_items.clone(), + order_by: window_info.order_by_items.clone(), + frame: window_info.frame.clone(), + }; + new_expr = SExpr::create_unary(window_plan.into(), new_expr); + + Ok(new_expr) + } +} + +#[derive(Default, Clone, PartialEq, Eq, Debug)] +pub struct WindowInfo { + pub window_functions: Vec, + pub window_functions_map: HashMap, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct WindowFunctionInto { + pub aggregate_function: ScalarItem, + pub aggregate_arguments: Vec, + pub partition_by_items: Vec, + pub order_by_items: Vec, + pub frame: WindowFuncFrame, +} + +#[derive(Clone, PartialEq, Eq, Debug, Hash)] +pub struct WindowOrderByInfo { + pub order_by_item: ScalarItem, + pub asc: Option, + pub nulls_first: Option, +} diff --git a/src/query/sql/src/planner/planner.rs b/src/query/sql/src/planner/planner.rs index d8368e018af5f..04631956ce3cb 100644 --- a/src/query/sql/src/planner/planner.rs +++ b/src/query/sql/src/planner/planner.rs @@ -58,6 +58,7 @@ impl Planner { Planner { ctx } } + #[async_backtrace::framed] pub async fn plan_sql(&mut self, sql: &str) -> Result<(Plan, PlanExtras)> { let settings = self.ctx.get_settings(); let sql_dialect = settings.get_sql_dialect()?; diff --git a/src/query/sql/src/planner/semantic/type_check.rs b/src/query/sql/src/planner/semantic/type_check.rs index add3536918aff..95baa4896b62c 100644 --- a/src/query/sql/src/planner/semantic/type_check.rs +++ b/src/query/sql/src/planner/semantic/type_check.rs @@ -143,6 +143,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] pub async fn resolve(&mut self, expr: &Expr) -> Result> { if let Some(scalar) = self.bind_context.srfs.get(&expr.to_string()) { if !matches!(self.bind_context.expr_context, ExprContext::SelectClause) { @@ -950,6 +951,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] pub async fn resolve_window( &mut self, _span: Span, @@ -1082,6 +1084,7 @@ impl<'a> TypeChecker<'a> { /// Resolve function call. #[async_recursion::async_recursion] + #[async_backtrace::framed] pub async fn resolve_function( &mut self, span: Span, @@ -1150,6 +1153,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] pub async fn resolve_scalar_function_call( &mut self, span: Span, @@ -1191,6 +1195,7 @@ impl<'a> TypeChecker<'a> { /// would be transformed into `FunctionCall`, except comparison /// expressions, conjunction(`AND`) and disjunction(`OR`). #[async_recursion::async_recursion] + #[async_backtrace::framed] pub async fn resolve_binary_op( &mut self, span: Span, @@ -1291,6 +1296,7 @@ impl<'a> TypeChecker<'a> { /// Resolve unary expressions. #[async_recursion::async_recursion] + #[async_backtrace::framed] pub async fn resolve_unary_op( &mut self, span: Span, @@ -1328,6 +1334,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] pub async fn resolve_extract_expr( &mut self, span: Span, @@ -1369,6 +1376,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] pub async fn resolve_date_add( &mut self, span: Span, @@ -1395,6 +1403,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] pub async fn resolve_date_trunc( &mut self, span: Span, @@ -1462,6 +1471,7 @@ impl<'a> TypeChecker<'a> { } } + #[async_backtrace::framed] pub async fn resolve_subquery( &mut self, typ: SubqueryType, @@ -1558,6 +1568,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] async fn try_rewrite_scalar_function( &mut self, span: Span, @@ -1778,6 +1789,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] async fn resolve_trim_function( &mut self, span: Span, @@ -1870,6 +1882,7 @@ impl<'a> TypeChecker<'a> { // TODO(leiysky): use an array builder function instead, since we should allow declaring // an array with variable as element. #[async_recursion::async_recursion] + #[async_backtrace::framed] async fn resolve_array( &mut self, span: Span, @@ -1886,6 +1899,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] async fn resolve_array_sort( &mut self, span: Span, @@ -1905,6 +1919,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] async fn resolve_map( &mut self, span: Span, @@ -1931,6 +1946,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] async fn resolve_tuple( &mut self, span: Span, @@ -1947,6 +1963,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] async fn resolve_udf( &mut self, span: Span, @@ -1997,6 +2014,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] async fn resolve_map_access( &mut self, expr: &Expr, @@ -2098,6 +2116,7 @@ impl<'a> TypeChecker<'a> { } #[async_recursion::async_recursion] + #[async_backtrace::framed] async fn resolve_tuple_map_access_pushdown( &mut self, span: Span, diff --git a/src/query/storages/common/cache/Cargo.toml b/src/query/storages/common/cache/Cargo.toml index b4f6d7347c2df..0ed48448f9c90 100644 --- a/src/query/storages/common/cache/Cargo.toml +++ b/src/query/storages/common/cache/Cargo.toml @@ -18,6 +18,7 @@ enable-histogram-metrics = ["metrics/enable-histogram"] common-cache = { path = "../../../../common/cache" } common-exception = { path = "../../../../common/exception" } +async-backtrace = { workspace = true } async-trait = { version = "0.1.57", package = "async-trait-fn" } crc32fast = "1.3.2" crossbeam-channel = "0.5.6" diff --git a/src/query/storages/common/cache/src/read/cached_reader.rs b/src/query/storages/common/cache/src/read/cached_reader.rs index 105e71db78f2d..25e72b8064eb8 100644 --- a/src/query/storages/common/cache/src/read/cached_reader.rs +++ b/src/query/storages/common/cache/src/read/cached_reader.rs @@ -46,6 +46,7 @@ where } /// Load the object at `location`, uses/populates the cache if possible/necessary. + #[async_backtrace::framed] pub async fn read(&self, params: &LoadParams) -> Result> { match &self.cache { None => Ok(Arc::new(self.loader.load(params).await?)), diff --git a/src/query/storages/fuse/Cargo.toml b/src/query/storages/fuse/Cargo.toml index 6213133efd591..d03115f968e8c 100644 --- a/src/query/storages/fuse/Cargo.toml +++ b/src/query/storages/fuse/Cargo.toml @@ -39,6 +39,7 @@ storages-common-index = { path = "../common/index" } storages-common-pruner = { path = "../common/pruner" } storages-common-table-meta = { path = "../common/table-meta" } +async-backtrace = { workspace = true } async-trait = { version = "0.1.57", package = "async-trait-fn" } backoff = { version = "0.4.0", features = ["futures", "tokio"] } chrono = { workspace = true } diff --git a/src/query/storages/fuse/src/fuse_table.rs b/src/query/storages/fuse/src/fuse_table.rs index 53012a7465a54..78c30df51b863 100644 --- a/src/query/storages/fuse/src/fuse_table.rs +++ b/src/query/storages/fuse/src/fuse_table.rs @@ -218,6 +218,7 @@ impl FuseTable { } #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] pub(crate) async fn read_table_snapshot_statistics( &self, snapshot: Option<&Arc>, @@ -245,6 +246,7 @@ impl FuseTable { } #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] pub async fn read_table_snapshot(&self) -> Result>> { if let Some(loc) = self.snapshot_loc().await? { let reader = MetaReaders::table_snapshot_reader(self.get_operator()); @@ -261,6 +263,7 @@ impl FuseTable { } } + #[async_backtrace::framed] pub async fn snapshot_format_version(&self) -> Result { match self.snapshot_loc().await? { Some(loc) => Ok(TableMetaLocationGenerator::snapshot_version(loc.as_str())), @@ -272,6 +275,7 @@ impl FuseTable { } } + #[async_backtrace::framed] pub async fn snapshot_loc(&self) -> Result> { match self.table_info.db_type { DatabaseType::ShareDB(_) => { @@ -366,6 +370,7 @@ impl Table for FuseTable { vec![] } + #[async_backtrace::framed] async fn alter_table_cluster_keys( &self, ctx: Arc, @@ -415,6 +420,7 @@ impl Table for FuseTable { .await } + #[async_backtrace::framed] async fn drop_table_cluster_keys(&self, ctx: Arc) -> Result<()> { if self.cluster_key_meta.is_none() { return Ok(()); @@ -466,6 +472,7 @@ impl Table for FuseTable { } #[tracing::instrument(level = "debug", name = "fuse_table_read_partitions", skip(self, ctx), fields(ctx.id = ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn read_partitions( &self, ctx: Arc, @@ -494,6 +501,7 @@ impl Table for FuseTable { self.do_append_data(ctx, pipeline, append_mode, need_output) } + #[async_backtrace::framed] async fn replace_into( &self, ctx: Arc, @@ -505,6 +513,7 @@ impl Table for FuseTable { } #[tracing::instrument(level = "debug", name = "fuse_table_commit_insertion", skip(self, ctx, operations), fields(ctx.id = ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn commit_insertion( &self, ctx: Arc, @@ -522,16 +531,19 @@ impl Table for FuseTable { } #[tracing::instrument(level = "debug", name = "fuse_table_truncate", skip(self, ctx), fields(ctx.id = ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn truncate(&self, ctx: Arc, purge: bool) -> Result<()> { self.do_truncate(ctx, purge).await } #[tracing::instrument(level = "debug", name = "fuse_table_optimize", skip(self, ctx), fields(ctx.id = ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn purge(&self, ctx: Arc, keep_last_snapshot: bool) -> Result<()> { self.do_purge(&ctx, keep_last_snapshot).await } #[tracing::instrument(level = "debug", name = "analyze", skip(self, ctx), fields(ctx.id = ctx.get_id().as_str()))] + #[async_backtrace::framed] async fn analyze(&self, ctx: Arc) -> Result<()> { self.do_analyze(&ctx).await } @@ -546,6 +558,7 @@ impl Table for FuseTable { })) } + #[async_backtrace::framed] async fn column_statistics_provider(&self) -> Result> { let provider = if let Some(snapshot) = self.read_table_snapshot().await? { let stats = &snapshot.summary.col_stats; @@ -571,6 +584,7 @@ impl Table for FuseTable { } #[tracing::instrument(level = "debug", name = "fuse_table_navigate_to", skip_all)] + #[async_backtrace::framed] async fn navigate_to(&self, point: &NavigationPoint) -> Result> { match point { NavigationPoint::SnapshotID(snapshot_id) => { @@ -582,6 +596,7 @@ impl Table for FuseTable { } } + #[async_backtrace::framed] async fn delete( &self, ctx: Arc, @@ -592,6 +607,7 @@ impl Table for FuseTable { self.do_delete(ctx, filter, col_indices, pipeline).await } + #[async_backtrace::framed] async fn update( &self, ctx: Arc, @@ -615,6 +631,7 @@ impl Table for FuseTable { BlockThresholds::new(max_rows_per_block, min_rows_per_block, max_bytes_per_block) } + #[async_backtrace::framed] async fn compact( &self, ctx: Arc, @@ -625,6 +642,7 @@ impl Table for FuseTable { self.do_compact(ctx, target, limit, pipeline).await } + #[async_backtrace::framed] async fn recluster( &self, ctx: Arc, @@ -634,6 +652,7 @@ impl Table for FuseTable { self.do_recluster(ctx, pipeline, push_downs).await } + #[async_backtrace::framed] async fn revert_to( &self, ctx: Arc, diff --git a/src/query/storages/fuse/src/io/files.rs b/src/query/storages/fuse/src/io/files.rs index 5e32082d26890..16c0cbaa5cbe4 100644 --- a/src/query/storages/fuse/src/io/files.rs +++ b/src/query/storages/fuse/src/io/files.rs @@ -33,6 +33,7 @@ impl Files { /// Removes a batch of files asynchronously by splitting a list of file locations into smaller groups of size 1000, /// and then deleting each group of files using the delete_files function. #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] pub async fn remove_file_in_batch( &self, file_locations: impl IntoIterator>, @@ -65,6 +66,7 @@ impl Files { Ok(()) } + #[async_backtrace::framed] async fn delete_files(op: Operator, locations: Vec) -> Result<()> { op.remove(locations).await?; Ok(()) diff --git a/src/query/storages/fuse/src/io/read/block/block_reader_deserialize.rs b/src/query/storages/fuse/src/io/read/block/block_reader_deserialize.rs index 19423c836b0d7..dc886c9a10026 100644 --- a/src/query/storages/fuse/src/io/read/block/block_reader_deserialize.rs +++ b/src/query/storages/fuse/src/io/read/block/block_reader_deserialize.rs @@ -59,6 +59,7 @@ impl BlockReader { } #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] pub async fn read_by_meta( &self, settings: &ReadSettings, diff --git a/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_async.rs b/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_async.rs index 4da3fd193a613..a6efdf87c8277 100644 --- a/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_async.rs +++ b/src/query/storages/fuse/src/io/read/block/block_reader_merge_io_async.rs @@ -41,6 +41,7 @@ impl BlockReader { /// /// It will *NOT* merge two requests: /// if the last io request size is larger than storage_io_page_bytes_for_read(Default is 512KB). + #[async_backtrace::framed] async fn merge_io_read( read_settings: &ReadSettings, op: Operator, @@ -125,6 +126,7 @@ impl BlockReader { Ok(read_res) } + #[async_backtrace::framed] pub async fn read_columns_data_by_merge_io( &self, settings: &ReadSettings, @@ -179,6 +181,7 @@ impl BlockReader { } #[inline] + #[async_backtrace::framed] pub async fn read_range( op: Operator, path: &str, diff --git a/src/query/storages/fuse/src/io/read/block/block_reader_native.rs b/src/query/storages/fuse/src/io/read/block/block_reader_native.rs index 00df886fc1537..98db5998c1dbb 100644 --- a/src/query/storages/fuse/src/io/read/block/block_reader_native.rs +++ b/src/query/storages/fuse/src/io/read/block/block_reader_native.rs @@ -48,6 +48,7 @@ impl NativeReaderExt for T {} pub type Reader = Box; impl BlockReader { + #[async_backtrace::framed] pub async fn async_read_native_columns_data( &self, part: PartInfoPtr, @@ -105,6 +106,7 @@ impl BlockReader { Ok(results) } + #[async_backtrace::framed] pub async fn read_native_columns_data( op: Operator, path: &str, diff --git a/src/query/storages/fuse/src/io/read/bloom/block_filter_reader.rs b/src/query/storages/fuse/src/io/read/bloom/block_filter_reader.rs index 4da7bbdb6ae31..6b52690725dfa 100644 --- a/src/query/storages/fuse/src/io/read/bloom/block_filter_reader.rs +++ b/src/query/storages/fuse/src/io/read/bloom/block_filter_reader.rs @@ -50,6 +50,7 @@ pub trait BloomBlockFilterReader { #[async_trait::async_trait] impl BloomBlockFilterReader for Location { + #[async_backtrace::framed] async fn read_block_filter( &self, dal: Operator, @@ -184,6 +185,7 @@ where T: Future + Send + 'static, T::Output: Send + 'static, { + #[async_backtrace::framed] async fn execute_in_runtime(self, runtime: &Runtime) -> Result { runtime .try_spawn(self)? diff --git a/src/query/storages/fuse/src/io/read/bloom/column_filter_reader.rs b/src/query/storages/fuse/src/io/read/bloom/column_filter_reader.rs index 886366e7e0014..34d8ceb72d0bf 100644 --- a/src/query/storages/fuse/src/io/read/bloom/column_filter_reader.rs +++ b/src/query/storages/fuse/src/io/read/bloom/column_filter_reader.rs @@ -112,6 +112,7 @@ impl BloomColumnFilterReader { } } + #[async_backtrace::framed] pub async fn read(&self) -> Result> { self.cached_reader.read(&self.param).await } @@ -128,6 +129,7 @@ pub struct Xor8FilterLoader { #[async_trait::async_trait] impl Loader for Xor8FilterLoader { + #[async_backtrace::framed] async fn load(&self, params: &LoadParams) -> Result { let bytes = self .operator diff --git a/src/query/storages/fuse/src/io/read/meta/meta_readers.rs b/src/query/storages/fuse/src/io/read/meta/meta_readers.rs index 252d4ae57b4f7..a1956a259cfb7 100644 --- a/src/query/storages/fuse/src/io/read/meta/meta_readers.rs +++ b/src/query/storages/fuse/src/io/read/meta/meta_readers.rs @@ -84,6 +84,7 @@ pub struct LoaderWrapper(T); #[async_trait::async_trait] impl Loader for LoaderWrapper { + #[async_backtrace::framed] async fn load(&self, params: &LoadParams) -> Result { let reader = bytes_reader(&self.0, params.location.as_str(), params.len_hint).await?; let version = SnapshotVersion::try_from(params.ver)?; @@ -93,6 +94,7 @@ impl Loader for LoaderWrapper { #[async_trait::async_trait] impl Loader for LoaderWrapper { + #[async_backtrace::framed] async fn load(&self, params: &LoadParams) -> Result { let version = TableSnapshotStatisticsVersion::try_from(params.ver)?; let reader = bytes_reader(&self.0, params.location.as_str(), params.len_hint).await?; @@ -102,6 +104,7 @@ impl Loader for LoaderWrapper { #[async_trait::async_trait] impl Loader for LoaderWrapper<(Operator, TableSchemaRef)> { + #[async_backtrace::framed] async fn load(&self, params: &LoadParams) -> Result { let version = SegmentInfoVersion::try_from(params.ver)?; let LoaderWrapper((operator, schema)) = &self; @@ -112,6 +115,7 @@ impl Loader for LoaderWrapper<(Operator, TableSchemaRef)> { #[async_trait::async_trait] impl Loader for LoaderWrapper { + #[async_backtrace::framed] async fn load(&self, params: &LoadParams) -> Result { let mut reader = bytes_reader(&self.0, params.location.as_str(), params.len_hint).await?; // read the ThriftFileMetaData, omit unnecessary conversions @@ -154,6 +158,7 @@ mod thrift_file_meta_read { /// The number of bytes read at the end of the parquet file on first read const DEFAULT_FOOTER_READ_SIZE: u64 = 64 * 1024; + #[async_backtrace::framed] async fn stream_len( seek: &mut (impl AsyncSeek + std::marker::Unpin), ) -> std::result::Result { @@ -173,6 +178,7 @@ mod thrift_file_meta_read { i32::from_le_bytes(buffer[len - 8..len - 4].try_into().unwrap()) } + #[async_backtrace::framed] pub async fn read_thrift_file_metadata( reader: &mut R, ) -> common_arrow::parquet::error::Result { diff --git a/src/query/storages/fuse/src/io/read/meta/versioned_reader.rs b/src/query/storages/fuse/src/io/read/meta/versioned_reader.rs index 2fdcfde9f414a..ac1c23e3aa729 100644 --- a/src/query/storages/fuse/src/io/read/meta/versioned_reader.rs +++ b/src/query/storages/fuse/src/io/read/meta/versioned_reader.rs @@ -36,6 +36,7 @@ pub trait VersionedReader { #[async_trait::async_trait] impl VersionedReader for SnapshotVersion { + #[async_backtrace::framed] async fn read(&self, reader: R) -> Result where R: AsyncRead + Unpin + Send { let r = match self { @@ -53,6 +54,7 @@ impl VersionedReader for SnapshotVersion { #[async_trait::async_trait] impl VersionedReader for TableSnapshotStatisticsVersion { + #[async_backtrace::framed] async fn read(&self, reader: R) -> Result where R: AsyncRead + Unpin + Send { let r = match self { @@ -64,6 +66,7 @@ impl VersionedReader for TableSnapshotStatisticsVersion #[async_trait::async_trait] impl VersionedReader for (SegmentInfoVersion, TableSchemaRef) { + #[async_backtrace::framed] async fn read(&self, reader: R) -> Result where R: AsyncRead + Unpin + Send { let schema = &self.1; diff --git a/src/query/storages/fuse/src/io/segments.rs b/src/query/storages/fuse/src/io/segments.rs index 0a21337153f80..7966498275f92 100644 --- a/src/query/storages/fuse/src/io/segments.rs +++ b/src/query/storages/fuse/src/io/segments.rs @@ -44,6 +44,7 @@ impl SegmentsIO { // Read one segment file by location. // The index is the index of the segment_location in segment_locations. + #[async_backtrace::framed] async fn read_segment( dal: Operator, segment_location: Location, @@ -66,6 +67,7 @@ impl SegmentsIO { // Read all segments information from s3 in concurrently. #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] pub async fn read_segments( &self, segment_locations: &[Location], @@ -101,6 +103,7 @@ impl SegmentsIO { .await } + #[async_backtrace::framed] pub async fn read_segment_into( dal: Operator, segment_location: Location, @@ -126,6 +129,7 @@ impl SegmentsIO { } #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] pub async fn read_segments_into( &self, segment_locations: &[Location], diff --git a/src/query/storages/fuse/src/io/snapshots.rs b/src/query/storages/fuse/src/io/snapshots.rs index e9a8b0f66dc22..d5d97391d56ed 100644 --- a/src/query/storages/fuse/src/io/snapshots.rs +++ b/src/query/storages/fuse/src/io/snapshots.rs @@ -77,6 +77,7 @@ impl SnapshotsIO { } } + #[async_backtrace::framed] async fn read_snapshot( snapshot_location: String, format_version: u64, @@ -92,6 +93,7 @@ impl SnapshotsIO { reader.read(&load_params).await } + #[async_backtrace::framed] async fn read_snapshot_lite( snapshot_location: String, format_version: u64, @@ -141,6 +143,7 @@ impl SnapshotsIO { } #[tracing::instrument(level = "debug", skip_all)] + #[async_backtrace::framed] async fn read_snapshot_lites( &self, snapshot_files: &[String], @@ -175,6 +178,7 @@ impl SnapshotsIO { // Read all the table statistic files by the root file(exclude the root file). // limit: read how many table statistic files + #[async_backtrace::framed] pub async fn read_table_statistic_files( &self, root_ts_file: &str, @@ -188,6 +192,7 @@ impl SnapshotsIO { } // read all the precedent snapshots of given `root_snapshot` + #[async_backtrace::framed] pub async fn read_chained_snapshot_lites( &self, location_generator: TableMetaLocationGenerator, @@ -209,6 +214,7 @@ impl SnapshotsIO { // Read all the snapshots by the root file. // limit: limits the number of snapshot files listed // with_segment_locations: if true will get the segments of the snapshot + #[async_backtrace::framed] pub async fn read_snapshot_lites_ext( &self, root_snapshot_file: String, @@ -316,6 +322,7 @@ impl SnapshotsIO { (chained_snapshot_lites, snapshot_map.into_values().collect()) } + #[async_backtrace::framed] async fn list_files( &self, prefix: &str, diff --git a/src/query/storages/fuse/src/io/write/block_writer.rs b/src/query/storages/fuse/src/io/write/block_writer.rs index 64a41ad8a5011..297bbe844c22f 100644 --- a/src/query/storages/fuse/src/io/write/block_writer.rs +++ b/src/query/storages/fuse/src/io/write/block_writer.rs @@ -80,6 +80,7 @@ pub fn serialize_block( } /// Take ownership here to avoid extra copy. +#[async_backtrace::framed] pub async fn write_data(data: Vec, data_accessor: &Operator, location: &str) -> Result<()> { data_accessor.write(location, data).await?; diff --git a/src/query/storages/fuse/src/io/write/meta_writer.rs b/src/query/storages/fuse/src/io/write/meta_writer.rs index f63a11896acae..887182fb9dd24 100644 --- a/src/query/storages/fuse/src/io/write/meta_writer.rs +++ b/src/query/storages/fuse/src/io/write/meta_writer.rs @@ -30,6 +30,7 @@ pub trait MetaWriter { impl MetaWriter for T where T: Serialize + Sync + Send { + #[async_backtrace::framed] async fn write_meta(&self, data_accessor: &Operator, location: &str) -> Result<()> { write_to_storage(data_accessor, location, &self).await } @@ -48,6 +49,7 @@ where T: Serialize, C: CacheAccessor, { + #[async_backtrace::framed] async fn write_meta_through_cache( self, data_accessor: &Operator, diff --git a/src/query/storages/fuse/src/io/write/segment_writer.rs b/src/query/storages/fuse/src/io/write/segment_writer.rs index 1b1de2dcc1c67..b5ca3f9708d4e 100644 --- a/src/query/storages/fuse/src/io/write/segment_writer.rs +++ b/src/query/storages/fuse/src/io/write/segment_writer.rs @@ -39,6 +39,7 @@ impl<'a> SegmentWriter<'a> { } } + #[async_backtrace::framed] pub async fn write_segment(&self, segment: SegmentInfo) -> Result { let location = self.generate_location(); segment @@ -47,6 +48,7 @@ impl<'a> SegmentWriter<'a> { Ok(location) } + #[async_backtrace::framed] pub async fn write_segment_no_cache(&self, segment: &SegmentInfo) -> Result { let location = self.generate_location(); segment diff --git a/src/query/storages/fuse/src/operations/analyze.rs b/src/query/storages/fuse/src/operations/analyze.rs index 6931f78987313..362a4d8ddd282 100644 --- a/src/query/storages/fuse/src/operations/analyze.rs +++ b/src/query/storages/fuse/src/operations/analyze.rs @@ -27,6 +27,7 @@ use crate::io::SegmentsIO; use crate::FuseTable; impl FuseTable { + #[async_backtrace::framed] pub async fn do_analyze(&self, ctx: &Arc) -> Result<()> { // 1. Read table snapshot. let r = self.read_table_snapshot().await; diff --git a/src/query/storages/fuse/src/operations/commit.rs b/src/query/storages/fuse/src/operations/commit.rs index 193c10b9e78c5..81e6b44becfb0 100644 --- a/src/query/storages/fuse/src/operations/commit.rs +++ b/src/query/storages/fuse/src/operations/commit.rs @@ -71,6 +71,7 @@ const OCC_DEFAULT_BACKOFF_MAX_ELAPSED_MS: Duration = Duration::from_millis(120 * const MAX_RETRIES: u64 = 10; impl FuseTable { + #[async_backtrace::framed] pub async fn do_commit( &self, ctx: Arc, @@ -82,6 +83,7 @@ impl FuseTable { .await } + #[async_backtrace::framed] pub async fn commit_with_max_retry_elapsed( &self, ctx: Arc, @@ -198,6 +200,7 @@ impl FuseTable { } #[inline] + #[async_backtrace::framed] pub async fn try_commit<'a>( &'a self, ctx: Arc, @@ -345,6 +348,7 @@ impl FuseTable { Ok(new_snapshot) } + #[async_backtrace::framed] pub async fn commit_to_meta_server( ctx: &dyn TableContext, table_info: &TableInfo, @@ -484,6 +488,7 @@ impl FuseTable { } // Left a hint file which indicates the location of the latest snapshot + #[async_backtrace::framed] pub async fn write_last_snapshot_hint( operator: &Operator, location_generator: &TableMetaLocationGenerator, @@ -514,6 +519,7 @@ impl FuseTable { } // TODO refactor, it is called by segment compaction and re-cluster now + #[async_backtrace::framed] pub async fn commit_mutation( &self, ctx: &Arc, @@ -634,6 +640,7 @@ impl FuseTable { ))) } + #[async_backtrace::framed] async fn merge_with_base( ctx: Arc, operator: Operator, @@ -707,6 +714,7 @@ mod utils { use crate::metrics::metrics_inc_commit_mutation_aborts; #[inline] + #[async_backtrace::framed] pub async fn abort_operations( operator: Operator, operation_log: TableOperationLog, diff --git a/src/query/storages/fuse/src/operations/compact.rs b/src/query/storages/fuse/src/operations/compact.rs index ef2344df678d6..5f68ead3022c4 100644 --- a/src/query/storages/fuse/src/operations/compact.rs +++ b/src/query/storages/fuse/src/operations/compact.rs @@ -44,6 +44,7 @@ pub struct CompactOptions { } impl FuseTable { + #[async_backtrace::framed] pub(crate) async fn do_compact( &self, ctx: Arc, @@ -78,6 +79,7 @@ impl FuseTable { } } + #[async_backtrace::framed] async fn compact_segments( &self, ctx: Arc, @@ -109,6 +111,7 @@ impl FuseTable { /// +--------------+ | +-----------------+ +------------+ /// |CompactSourceN| ------ /// +--------------+ + #[async_backtrace::framed] async fn compact_blocks( &self, ctx: Arc, diff --git a/src/query/storages/fuse/src/operations/delete.rs b/src/query/storages/fuse/src/operations/delete.rs index 1f57dc3d05041..83c36a26679d7 100644 --- a/src/query/storages/fuse/src/operations/delete.rs +++ b/src/query/storages/fuse/src/operations/delete.rs @@ -66,6 +66,7 @@ impl FuseTable { /// +---------------+ +-----------------------+ | +-----------------+ +------------+ /// |MutationSourceN| ---> |SerializeDataTransformN| ------ /// +---------------+ +-----------------------+ + #[async_backtrace::framed] pub async fn do_delete( &self, ctx: Arc, @@ -185,6 +186,7 @@ impl FuseTable { }) } + #[async_backtrace::framed] async fn try_add_deletion_source( &self, ctx: Arc, @@ -254,6 +256,7 @@ impl FuseTable { ) } + #[async_backtrace::framed] pub async fn mutation_block_pruning( &self, ctx: Arc, diff --git a/src/query/storages/fuse/src/operations/fuse_sink.rs b/src/query/storages/fuse/src/operations/fuse_sink.rs index 86ac90e9c9aba..7d0665b5f2c17 100644 --- a/src/query/storages/fuse/src/operations/fuse_sink.rs +++ b/src/query/storages/fuse/src/operations/fuse_sink.rs @@ -294,6 +294,7 @@ impl Processor for FuseTableSink { Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { match std::mem::replace(&mut self.state, State::None) { State::Serialized { diff --git a/src/query/storages/fuse/src/operations/gc.rs b/src/query/storages/fuse/src/operations/gc.rs index 9ba20b1a7623f..21b9861ae1996 100644 --- a/src/query/storages/fuse/src/operations/gc.rs +++ b/src/query/storages/fuse/src/operations/gc.rs @@ -66,6 +66,7 @@ impl From> for LocationTuple { } impl FuseTable { + #[async_backtrace::framed] pub async fn do_purge( &self, ctx: &Arc, @@ -416,6 +417,7 @@ impl FuseTable { } // Purge file by location chunks. + #[async_backtrace::framed] async fn try_purge_location_files( &self, ctx: Arc, @@ -427,6 +429,7 @@ impl FuseTable { } // Purge file by location chunks. + #[async_backtrace::framed] async fn try_purge_location_files_and_cache( &self, ctx: Arc, @@ -444,6 +447,7 @@ impl FuseTable { .await } + #[async_backtrace::framed] async fn get_block_locations( &self, ctx: Arc, diff --git a/src/query/storages/fuse/src/operations/merge_into/mutator/merge_into_mutator.rs b/src/query/storages/fuse/src/operations/merge_into/mutator/merge_into_mutator.rs index 986bf24ffd98d..ec6c12a544b74 100644 --- a/src/query/storages/fuse/src/operations/merge_into/mutator/merge_into_mutator.rs +++ b/src/query/storages/fuse/src/operations/merge_into/mutator/merge_into_mutator.rs @@ -108,6 +108,7 @@ impl MergeIntoOperationAggregator { // aggregate mutations (currently, deletion only) impl MergeIntoOperationAggregator { + #[async_backtrace::framed] pub async fn accumulate(&mut self, merge_action: MergeIntoOperation) -> Result<()> { match &merge_action { MergeIntoOperation::Delete(DeletionByColumn { @@ -147,6 +148,7 @@ impl MergeIntoOperationAggregator { // apply the mutations and generate mutation log impl MergeIntoOperationAggregator { + #[async_backtrace::framed] pub async fn apply(&mut self) -> Result> { let mut mutation_logs = Vec::new(); for (segment_idx, block_deletion) in &self.deletion_accumulator.deletions { @@ -182,6 +184,7 @@ impl MergeIntoOperationAggregator { })) } + #[async_backtrace::framed] async fn apply_deletion_to_data_block( &self, segment_index: SegmentIndex, diff --git a/src/query/storages/fuse/src/operations/merge_into/processors/sink_commit.rs b/src/query/storages/fuse/src/operations/merge_into/processors/sink_commit.rs index 38071b0fcdfdf..fa3233e703c59 100644 --- a/src/query/storages/fuse/src/operations/merge_into/processors/sink_commit.rs +++ b/src/query/storages/fuse/src/operations/merge_into/processors/sink_commit.rs @@ -193,6 +193,7 @@ impl Processor for CommitSink { Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { match std::mem::replace(&mut self.state, State::None) { State::TryCommit(new_snapshot) => { diff --git a/src/query/storages/fuse/src/operations/merge_into/processors/transform_append.rs b/src/query/storages/fuse/src/operations/merge_into/processors/transform_append.rs index b5903a6507c43..d1ae7fb96343c 100644 --- a/src/query/storages/fuse/src/operations/merge_into/processors/transform_append.rs +++ b/src/query/storages/fuse/src/operations/merge_into/processors/transform_append.rs @@ -95,6 +95,7 @@ impl AppendTransform { self.block_builder.clone() } + #[async_backtrace::framed] pub async fn try_output_mutation(&mut self) -> Result> { if self.accumulator.summary_block_count >= self.write_settings.block_per_seg as u64 { self.output_mutation().await @@ -103,6 +104,7 @@ impl AppendTransform { } } + #[async_backtrace::framed] pub async fn output_mutation(&mut self) -> Result> { let acc = std::mem::take(&mut self.accumulator); @@ -158,6 +160,7 @@ impl AppendTransform { impl AsyncAccumulatingTransform for AppendTransform { const NAME: &'static str = "AppendTransform"; + #[async_backtrace::framed] async fn transform(&mut self, data_block: DataBlock) -> Result> { // 1. serialize block and index let block_builder = self.block_builder.clone(); @@ -209,6 +212,7 @@ impl AsyncAccumulatingTransform for AppendTransform { self.output_mutation_block(append_log) } + #[async_backtrace::framed] async fn on_finish(&mut self, _output: bool) -> Result> { // output final operation log if any let append_log = self.output_mutation().await?; diff --git a/src/query/storages/fuse/src/operations/merge_into/processors/transform_merge_into_mutation_aggregator.rs b/src/query/storages/fuse/src/operations/merge_into/processors/transform_merge_into_mutation_aggregator.rs index e358df6f06d9e..d64d0521ba2aa 100644 --- a/src/query/storages/fuse/src/operations/merge_into/processors/transform_merge_into_mutation_aggregator.rs +++ b/src/query/storages/fuse/src/operations/merge_into/processors/transform_merge_into_mutation_aggregator.rs @@ -32,6 +32,7 @@ pub use crate::operations::merge_into::mutator::merge_into_mutator::MergeIntoOpe impl AsyncAccumulatingTransform for MergeIntoOperationAggregator { const NAME: &'static str = "MergeIntoMutationAggregator"; + #[async_backtrace::framed] async fn transform(&mut self, data: DataBlock) -> Result> { // accumulate mutations let merge_into_operation = MergeIntoOperation::try_from(data)?; @@ -40,6 +41,7 @@ impl AsyncAccumulatingTransform for MergeIntoOperationAggregator { Ok(None) } + #[async_backtrace::framed] async fn on_finish(&mut self, _output: bool) -> Result> { // apply mutations let mutation_logs = self.apply().await?; diff --git a/src/query/storages/fuse/src/operations/merge_into/processors/transform_mutation_aggregator.rs b/src/query/storages/fuse/src/operations/merge_into/processors/transform_mutation_aggregator.rs index 1f530258c98a1..bc21e486223ca 100644 --- a/src/query/storages/fuse/src/operations/merge_into/processors/transform_mutation_aggregator.rs +++ b/src/query/storages/fuse/src/operations/merge_into/processors/transform_mutation_aggregator.rs @@ -103,12 +103,14 @@ impl TableMutationAggregator { impl AsyncAccumulatingTransform for TableMutationAggregator { const NAME: &'static str = "MutationAggregator"; + #[async_backtrace::framed] async fn transform(&mut self, data: DataBlock) -> Result> { let mutation = MutationLogs::try_from(data)?; self.accumulate_mutation(mutation); Ok(None) } + #[async_backtrace::framed] async fn on_finish(&mut self, _output: bool) -> Result> { let mutations: CommitMeta = self.apply_mutations().await?; debug!("mutations {:?}", mutations); @@ -118,6 +120,7 @@ impl AsyncAccumulatingTransform for TableMutationAggregator { } impl TableMutationAggregator { + #[async_backtrace::framed] async fn apply_mutations(&mut self) -> Result { let base_segments_paths = self.base_segments.clone(); // NOTE: order matters! @@ -134,6 +137,7 @@ impl TableMutationAggregator { Ok::<_, ErrorCode>(commit_meta) } + #[async_backtrace::framed] async fn read_segments(&self) -> Result>> { let segments_io = SegmentsIO::create(self.ctx.clone(), self.dal.clone(), self.schema.clone()); @@ -147,6 +151,7 @@ impl TableMutationAggregator { } // TODO use batch_meta_writer + #[async_backtrace::framed] async fn write_segments(&self, segments: Vec) -> Result<()> { let mut tasks = Vec::with_capacity(segments.len()); for segment in segments { diff --git a/src/query/storages/fuse/src/operations/mutation/abort_operation.rs b/src/query/storages/fuse/src/operations/mutation/abort_operation.rs index e8cf742f81c58..6d500df210640 100644 --- a/src/query/storages/fuse/src/operations/mutation/abort_operation.rs +++ b/src/query/storages/fuse/src/operations/mutation/abort_operation.rs @@ -48,6 +48,7 @@ impl AbortOperation { self.segments.push(segment); } + #[async_backtrace::framed] pub async fn abort(self, ctx: Arc, operator: Operator) -> Result<()> { let fuse_file = Files::create(ctx, operator); // TODO the segments and the bloom filters? diff --git a/src/query/storages/fuse/src/operations/mutation/base_mutator.rs b/src/query/storages/fuse/src/operations/mutation/base_mutator.rs index 5b182a679e0c0..fe9eafb9d8891 100644 --- a/src/query/storages/fuse/src/operations/mutation/base_mutator.rs +++ b/src/query/storages/fuse/src/operations/mutation/base_mutator.rs @@ -87,6 +87,7 @@ impl BaseMutator { }); } + #[async_backtrace::framed] pub async fn generate_segments(&self) -> Result<(Vec, Statistics, AbortOperation)> { let mut abort_operation = AbortOperation::default(); let segments = self.base_snapshot.segments.clone(); diff --git a/src/query/storages/fuse/src/operations/mutation/compact/block_compact_mutator.rs b/src/query/storages/fuse/src/operations/mutation/compact/block_compact_mutator.rs index f8b81569e2a04..7dd7879a2ed1e 100644 --- a/src/query/storages/fuse/src/operations/mutation/compact/block_compact_mutator.rs +++ b/src/query/storages/fuse/src/operations/mutation/compact/block_compact_mutator.rs @@ -78,6 +78,7 @@ impl BlockCompactMutator { } } + #[async_backtrace::framed] pub async fn target_select(&mut self) -> Result<()> { let start = Instant::now(); let snapshot = self.compact_params.base_snapshot.clone(); diff --git a/src/query/storages/fuse/src/operations/mutation/compact/compact_source.rs b/src/query/storages/fuse/src/operations/mutation/compact/compact_source.rs index d28c01ca1d860..c2abb523d3371 100644 --- a/src/query/storages/fuse/src/operations/mutation/compact/compact_source.rs +++ b/src/query/storages/fuse/src/operations/mutation/compact/compact_source.rs @@ -115,6 +115,7 @@ impl Processor for CompactSource { Ok(Event::Async) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { match self.ctx.get_partition() { Some(part) => { diff --git a/src/query/storages/fuse/src/operations/mutation/compact/segment_compact_mutator.rs b/src/query/storages/fuse/src/operations/mutation/compact/segment_compact_mutator.rs index f522c719fa767..8fec10fae9a3a 100644 --- a/src/query/storages/fuse/src/operations/mutation/compact/segment_compact_mutator.rs +++ b/src/query/storages/fuse/src/operations/mutation/compact/segment_compact_mutator.rs @@ -77,6 +77,7 @@ impl SegmentCompactMutator { #[async_trait::async_trait] impl TableMutator for SegmentCompactMutator { + #[async_backtrace::framed] async fn target_select(&mut self) -> Result { let select_begin = Instant::now(); @@ -121,6 +122,7 @@ impl TableMutator for SegmentCompactMutator { Ok(self.has_compaction()) } + #[async_backtrace::framed] async fn try_commit(self: Box, table: Arc) -> Result<()> { if !self.has_compaction() { // defensive checking @@ -189,6 +191,7 @@ impl<'a> SegmentCompactor<'a> { } } + #[async_backtrace::framed] pub async fn compact( mut self, reverse_locations: Vec, @@ -282,6 +285,7 @@ impl<'a> SegmentCompactor<'a> { } // accumulate one segment + #[async_backtrace::framed] pub async fn add(&mut self, segment_info: Arc, location: Location) -> Result<()> { let num_blocks_current_segment = segment_info.blocks.len() as u64; @@ -313,6 +317,7 @@ impl<'a> SegmentCompactor<'a> { Ok(()) } + #[async_backtrace::framed] async fn compact_fragments(&mut self) -> Result<()> { if self.fragmented_segments.is_empty() { return Ok(()); @@ -362,6 +367,7 @@ impl<'a> SegmentCompactor<'a> { } // finalize the compaction, compacts left fragments (if any) + #[async_backtrace::framed] pub async fn finalize(mut self) -> Result { if !self.fragmented_segments.is_empty() { // some fragments left, compact them diff --git a/src/query/storages/fuse/src/operations/mutation/compact/transform_compact_aggregator.rs b/src/query/storages/fuse/src/operations/mutation/compact/transform_compact_aggregator.rs index 5e07411f1e00e..e6488cd5f9b59 100644 --- a/src/query/storages/fuse/src/operations/mutation/compact/transform_compact_aggregator.rs +++ b/src/query/storages/fuse/src/operations/mutation/compact/transform_compact_aggregator.rs @@ -86,6 +86,7 @@ impl CompactAggregator { } } + #[async_backtrace::framed] async fn write_segment(dal: Operator, segment: SerializedSegment) -> Result<()> { dal.write(&segment.location, serde_json::to_vec(&segment.segment)?) .await?; @@ -95,6 +96,7 @@ impl CompactAggregator { Ok(()) } + #[async_backtrace::framed] async fn write_segments(&self, segments: Vec) -> Result<()> { let mut iter = segments.iter(); let tasks = std::iter::from_fn(move || { @@ -123,6 +125,7 @@ impl CompactAggregator { impl AsyncAccumulatingTransform for CompactAggregator { const NAME: &'static str = "CompactAggregator"; + #[async_backtrace::framed] async fn transform(&mut self, data: DataBlock) -> Result> { // gather the input data. if let Some(meta) = data @@ -153,6 +156,7 @@ impl AsyncAccumulatingTransform for CompactAggregator { Ok(None) } + #[async_backtrace::framed] async fn on_finish(&mut self, _output: bool) -> Result> { let mut serialized_segments = Vec::with_capacity(self.merge_blocks.len()); for (segment_idx, block_map) in std::mem::take(&mut self.merge_blocks) { diff --git a/src/query/storages/fuse/src/operations/mutation/mutation_sink.rs b/src/query/storages/fuse/src/operations/mutation/mutation_sink.rs index 6e5a2bf9f41a3..6b2da401b1647 100644 --- a/src/query/storages/fuse/src/operations/mutation/mutation_sink.rs +++ b/src/query/storages/fuse/src/operations/mutation/mutation_sink.rs @@ -201,6 +201,7 @@ impl Processor for MutationSink { Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { match std::mem::replace(&mut self.state, State::None) { State::TryCommit(new_snapshot) => { diff --git a/src/query/storages/fuse/src/operations/mutation/mutation_source.rs b/src/query/storages/fuse/src/operations/mutation/mutation_source.rs index 2f38995272914..49f4d05e30b29 100644 --- a/src/query/storages/fuse/src/operations/mutation/mutation_source.rs +++ b/src/query/storages/fuse/src/operations/mutation/mutation_source.rs @@ -314,6 +314,7 @@ impl Processor for MutationSource { Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { match std::mem::replace(&mut self.state, State::Finish) { State::ReadData(Some(part)) => { diff --git a/src/query/storages/fuse/src/operations/mutation/mutation_transform.rs b/src/query/storages/fuse/src/operations/mutation/mutation_transform.rs index 8c1994b49b3ec..40315b2084ce7 100644 --- a/src/query/storages/fuse/src/operations/mutation/mutation_transform.rs +++ b/src/query/storages/fuse/src/operations/mutation/mutation_transform.rs @@ -148,6 +148,7 @@ impl MutationTransform { } } + #[async_backtrace::framed] async fn write_segments(&self, segments: Vec) -> Result<()> { let mut tasks = Vec::with_capacity(segments.len()); for segment in segments { @@ -324,6 +325,7 @@ impl Processor for MutationTransform { Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { match std::mem::replace(&mut self.state, State::None) { State::ReadSegments => { diff --git a/src/query/storages/fuse/src/operations/mutation/recluster_mutator.rs b/src/query/storages/fuse/src/operations/mutation/recluster_mutator.rs index a221df1d2a5df..218342f106786 100644 --- a/src/query/storages/fuse/src/operations/mutation/recluster_mutator.rs +++ b/src/query/storages/fuse/src/operations/mutation/recluster_mutator.rs @@ -88,6 +88,7 @@ impl ReclusterMutator { #[async_trait::async_trait] impl TableMutator for ReclusterMutator { + #[async_backtrace::framed] async fn target_select(&mut self) -> Result { let blocks_map = self.blocks_map.clone(); for (level, block_metas) in blocks_map.into_iter() { @@ -205,6 +206,7 @@ impl TableMutator for ReclusterMutator { Ok(false) } + #[async_backtrace::framed] async fn try_commit(self: Box, table: Arc) -> Result<()> { let ctx = &self.base_mutator.ctx; let (mut segments, mut summary, mut abort_operation) = diff --git a/src/query/storages/fuse/src/operations/mutation/serialize_data_transform.rs b/src/query/storages/fuse/src/operations/mutation/serialize_data_transform.rs index 960bb9166f996..480ebb4c85853 100644 --- a/src/query/storages/fuse/src/operations/mutation/serialize_data_transform.rs +++ b/src/query/storages/fuse/src/operations/mutation/serialize_data_transform.rs @@ -238,6 +238,7 @@ impl Processor for SerializeDataTransform { Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { match std::mem::replace(&mut self.state, State::Consume) { State::Serialized(serialize_state, block_meta) => { diff --git a/src/query/storages/fuse/src/operations/navigate.rs b/src/query/storages/fuse/src/operations/navigate.rs index 4b840c4a3a78c..4dd076523c075 100644 --- a/src/query/storages/fuse/src/operations/navigate.rs +++ b/src/query/storages/fuse/src/operations/navigate.rs @@ -28,6 +28,7 @@ use crate::io::SnapshotHistoryReader; use crate::FuseTable; impl FuseTable { + #[async_backtrace::framed] pub async fn navigate_to_time_point( &self, time_point: DateTime, @@ -41,6 +42,7 @@ impl FuseTable { }) .await } + #[async_backtrace::framed] pub async fn navigate_to_snapshot(&self, snapshot_id: &str) -> Result> { self.find(|snapshot| { snapshot @@ -53,6 +55,7 @@ impl FuseTable { .await } + #[async_backtrace::framed] pub async fn find

(&self, mut pred: P) -> Result> where P: FnMut(&TableSnapshot) -> bool { let snapshot_location = if let Some(loc) = self.snapshot_loc().await? { diff --git a/src/query/storages/fuse/src/operations/read/native_data_source_reader.rs b/src/query/storages/fuse/src/operations/read/native_data_source_reader.rs index 6a8994308d18e..1df84c7007240 100644 --- a/src/query/storages/fuse/src/operations/read/native_data_source_reader.rs +++ b/src/query/storages/fuse/src/operations/read/native_data_source_reader.rs @@ -135,6 +135,7 @@ impl Processor for ReadNativeDataSource { Ok(Event::Async) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { let parts = self.partitions.steal(self.id, self.batch_size); @@ -145,9 +146,9 @@ impl Processor for ReadNativeDataSource { let block_reader = self.block_reader.clone(); chunks.push(async move { - let handler = tokio::spawn(async move { + let handler = tokio::spawn(async_backtrace::location!().frame(async move { block_reader.async_read_native_columns_data(part).await - }); + })); handler.await.unwrap() }); } diff --git a/src/query/storages/fuse/src/operations/read/parquet_data_source_reader.rs b/src/query/storages/fuse/src/operations/read/parquet_data_source_reader.rs index 72b3fe8b5acef..b7a33c23a1cbc 100644 --- a/src/query/storages/fuse/src/operations/read/parquet_data_source_reader.rs +++ b/src/query/storages/fuse/src/operations/read/parquet_data_source_reader.rs @@ -132,6 +132,7 @@ impl Processor for ReadParquetDataSource { Ok(Event::Async) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { let parts = self.partitions.steal(self.id, self.batch_size); @@ -143,7 +144,7 @@ impl Processor for ReadParquetDataSource { let settings = ReadSettings::from_ctx(&self.partitions.ctx)?; chunks.push(async move { - tokio::spawn(async move { + tokio::spawn(async_backtrace::location!().frame(async move { let part = FusePartInfo::from_part(&part)?; block_reader @@ -153,7 +154,7 @@ impl Processor for ReadParquetDataSource { &part.columns_meta, ) .await - }) + })) .await .unwrap() }); diff --git a/src/query/storages/fuse/src/operations/read_partitions.rs b/src/query/storages/fuse/src/operations/read_partitions.rs index 78e3e54b34481..db689048cf091 100644 --- a/src/query/storages/fuse/src/operations/read_partitions.rs +++ b/src/query/storages/fuse/src/operations/read_partitions.rs @@ -50,6 +50,7 @@ use crate::FuseTable; impl FuseTable { #[tracing::instrument(level = "debug", name = "do_read_partitions", skip_all, fields(ctx.id = ctx.get_id().as_str()))] + #[async_backtrace::framed] pub async fn do_read_partitions( &self, ctx: Arc, @@ -98,6 +99,7 @@ impl FuseTable { #[allow(clippy::too_many_arguments)] #[tracing::instrument(level = "debug", name = "prune_snapshot_blocks", skip_all, fields(ctx.id = ctx.get_id().as_str()))] + #[async_backtrace::framed] pub async fn prune_snapshot_blocks( &self, ctx: Arc, diff --git a/src/query/storages/fuse/src/operations/recluster.rs b/src/query/storages/fuse/src/operations/recluster.rs index f4ed99ab5fd96..525bdc05841cc 100644 --- a/src/query/storages/fuse/src/operations/recluster.rs +++ b/src/query/storages/fuse/src/operations/recluster.rs @@ -43,6 +43,7 @@ use crate::DEFAULT_AVG_DEPTH_THRESHOLD; use crate::FUSE_OPT_KEY_ROW_AVG_DEPTH_THRESHOLD; impl FuseTable { + #[async_backtrace::framed] pub(crate) async fn do_recluster( &self, ctx: Arc, diff --git a/src/query/storages/fuse/src/operations/replace.rs b/src/query/storages/fuse/src/operations/replace.rs index 7621fccf91bbc..e72f9bbc19d2d 100644 --- a/src/query/storages/fuse/src/operations/replace.rs +++ b/src/query/storages/fuse/src/operations/replace.rs @@ -97,6 +97,7 @@ impl FuseTable { // └─────►│ResizeProcessor(1) ├──────►│TableMutationAggregator├────────►│ CommitSink │ // └───────────────────┘ └───────────────────────┘ └───────────────────┘ + #[async_backtrace::framed] pub async fn build_replace_pipeline<'a>( &'a self, ctx: Arc, @@ -253,6 +254,7 @@ impl FuseTable { Ok(()) } + #[async_backtrace::framed] async fn merge_into_mutators( &self, ctx: Arc, @@ -313,6 +315,7 @@ impl FuseTable { ) } + #[async_backtrace::framed] async fn chain_mutation_pipes( &self, ctx: &Arc, diff --git a/src/query/storages/fuse/src/operations/revert.rs b/src/query/storages/fuse/src/operations/revert.rs index 7966bf0097781..73ba277619cba 100644 --- a/src/query/storages/fuse/src/operations/revert.rs +++ b/src/query/storages/fuse/src/operations/revert.rs @@ -23,6 +23,7 @@ use common_meta_types::MatchSeq; use crate::FuseTable; impl FuseTable { + #[async_backtrace::framed] pub async fn do_revert_to( &self, ctx: &dyn TableContext, diff --git a/src/query/storages/fuse/src/operations/truncate.rs b/src/query/storages/fuse/src/operations/truncate.rs index ecce206305514..89b14322e067e 100644 --- a/src/query/storages/fuse/src/operations/truncate.rs +++ b/src/query/storages/fuse/src/operations/truncate.rs @@ -29,6 +29,7 @@ use crate::FuseTable; impl FuseTable { #[inline] + #[async_backtrace::framed] pub async fn do_truncate(&self, ctx: Arc, purge: bool) -> Result<()> { if let Some(prev_snapshot) = self.read_table_snapshot().await? { let prev_id = prev_snapshot.snapshot_id; diff --git a/src/query/storages/fuse/src/operations/update.rs b/src/query/storages/fuse/src/operations/update.rs index 1ea037744ade8..661820b1b3e80 100644 --- a/src/query/storages/fuse/src/operations/update.rs +++ b/src/query/storages/fuse/src/operations/update.rs @@ -39,6 +39,7 @@ use crate::FuseTable; impl FuseTable { /// UPDATE column = expression WHERE condition /// The flow of Pipeline is the same as that of deletion. + #[async_backtrace::framed] pub async fn do_update( &self, ctx: Arc, @@ -102,6 +103,7 @@ impl FuseTable { Ok(()) } + #[async_backtrace::framed] async fn try_add_update_source( &self, ctx: Arc, diff --git a/src/query/storages/fuse/src/pruning/block_pruner.rs b/src/query/storages/fuse/src/pruning/block_pruner.rs index 93edf25cc1a1b..bd312131d79fd 100644 --- a/src/query/storages/fuse/src/pruning/block_pruner.rs +++ b/src/query/storages/fuse/src/pruning/block_pruner.rs @@ -40,6 +40,7 @@ impl BlockPruner { Ok(BlockPruner { pruning_ctx }) } + #[async_backtrace::framed] pub async fn pruning( &self, segment_idx: usize, @@ -57,6 +58,7 @@ impl BlockPruner { } // async pruning with bloom index. + #[async_backtrace::framed] async fn block_pruning( &self, bloom_pruner: &Arc, diff --git a/src/query/storages/fuse/src/pruning/bloom_pruner.rs b/src/query/storages/fuse/src/pruning/bloom_pruner.rs index 265ee509dfc1e..a322ebf4fabd4 100644 --- a/src/query/storages/fuse/src/pruning/bloom_pruner.rs +++ b/src/query/storages/fuse/src/pruning/bloom_pruner.rs @@ -99,6 +99,7 @@ impl BloomPrunerCreator { } // Check a location file is hit or not by bloom filter. + #[async_backtrace::framed] pub async fn apply( &self, index_location: &Location, @@ -145,6 +146,7 @@ impl BloomPrunerCreator { #[async_trait::async_trait] impl BloomPruner for BloomPrunerCreator { + #[async_backtrace::framed] async fn should_keep( &self, index_location: &Option, diff --git a/src/query/storages/fuse/src/pruning/fuse_pruner.rs b/src/query/storages/fuse/src/pruning/fuse_pruner.rs index 1b69a7cd8a6c6..15726b8c0aa51 100644 --- a/src/query/storages/fuse/src/pruning/fuse_pruner.rs +++ b/src/query/storages/fuse/src/pruning/fuse_pruner.rs @@ -161,6 +161,7 @@ impl FusePruner { // Pruning chain: // segment pruner -> block pruner -> topn pruner + #[async_backtrace::framed] pub async fn pruning( &self, segment_locs: Vec, diff --git a/src/query/storages/fuse/src/pruning/segment_pruner.rs b/src/query/storages/fuse/src/pruning/segment_pruner.rs index d4168ed2f4689..51dee26b6c0d7 100644 --- a/src/query/storages/fuse/src/pruning/segment_pruner.rs +++ b/src/query/storages/fuse/src/pruning/segment_pruner.rs @@ -46,6 +46,7 @@ impl SegmentPruner { }) } + #[async_backtrace::framed] pub async fn pruning( &self, segment_locs: Vec, @@ -100,6 +101,7 @@ impl SegmentPruner { } // Pruning segment with range pruner, then pruning on Block. + #[async_backtrace::framed] async fn segment_pruning( pruning_ctx: Arc, permit: OwnedSemaphorePermit, diff --git a/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information.rs b/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information.rs index 6af1aa4b37411..8d11c9fb112c4 100644 --- a/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information.rs +++ b/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information.rs @@ -83,6 +83,7 @@ impl<'a> ClusteringInformation<'a> { } } + #[async_backtrace::framed] pub async fn get_clustering_info(&self) -> Result { let snapshot = self.table.read_table_snapshot().await?; diff --git a/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information_table.rs b/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information_table.rs index 9f5e8df481d5c..c35de5c4abc32 100644 --- a/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information_table.rs +++ b/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information_table.rs @@ -93,6 +93,7 @@ impl Table for ClusteringInformationTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _ctx: Arc, @@ -162,6 +163,7 @@ impl AsyncSource for ClusteringInformationSource { const NAME: &'static str = "clustering_information"; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { if self.finish { return Ok(None); diff --git a/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block.rs b/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block.rs index 349f1d1dd048c..4d9b7d686c0b4 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block.rs @@ -63,6 +63,7 @@ impl<'a> FuseBlock<'a> { } } + #[async_backtrace::framed] pub async fn get_blocks(&self) -> Result { let tbl = self.table; let maybe_snapshot = tbl.read_table_snapshot().await?; @@ -96,6 +97,7 @@ impl<'a> FuseBlock<'a> { ))) } + #[async_backtrace::framed] async fn to_block(&self, snapshot: Arc) -> Result { let len = snapshot.summary.block_count as usize; let limit = self.limit.unwrap_or(usize::MAX); diff --git a/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block_table.rs b/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block_table.rs index aed26e1178a30..2fb0b04fd2b67 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block_table.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block_table.rs @@ -92,6 +92,7 @@ impl Table for FuseBlockTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _ctx: Arc, @@ -170,6 +171,7 @@ impl AsyncSource for FuseBlockSource { const NAME: &'static str = "fuse_block"; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { if self.finish { return Ok(None); diff --git a/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment.rs b/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment.rs index 02e51ecf570a2..98d4e6223a8db 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment.rs @@ -49,6 +49,7 @@ impl<'a> FuseSegment<'a> { } } + #[async_backtrace::framed] pub async fn get_segments(&self) -> Result { let tbl = self.table; let maybe_snapshot = tbl.read_table_snapshot().await?; @@ -78,6 +79,7 @@ impl<'a> FuseSegment<'a> { ))) } + #[async_backtrace::framed] async fn to_block(&self, segment_locations: &[Location]) -> Result { let len = segment_locations.len(); let mut format_versions: Vec = Vec::with_capacity(len); diff --git a/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment_table.rs b/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment_table.rs index 3563f6e2949fa..a28e4472d7a62 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment_table.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment_table.rs @@ -92,6 +92,7 @@ impl Table for FuseSegmentTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _ctx: Arc, @@ -162,6 +163,7 @@ impl AsyncSource for FuseSegmentSource { const NAME: &'static str = "fuse_segment"; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { if self.finish { return Ok(None); diff --git a/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot.rs b/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot.rs index e8eabfb6ebae1..f7019a49af2bb 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot.rs @@ -45,6 +45,7 @@ impl<'a> FuseSnapshot<'a> { Self { ctx, table } } + #[async_backtrace::framed] pub async fn get_snapshots(self, limit: Option) -> Result { let meta_location_generator = self.table.meta_location_generator.clone(); let snapshot_location = self.table.snapshot_loc().await?; diff --git a/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot_table.rs b/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot_table.rs index 31daa3e0c0e00..7befeb9de54cb 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot_table.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot_table.rs @@ -89,6 +89,7 @@ impl Table for FuseSnapshotTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _ctx: Arc, @@ -169,6 +170,7 @@ impl AsyncSource for FuseSnapshotSource { const NAME: &'static str = "fuse_snapshot"; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { if self.finish { return Ok(None); diff --git a/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic.rs b/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic.rs index 203f871fbbb24..fc2baac752932 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic.rs @@ -38,6 +38,7 @@ impl<'a> FuseStatistic<'a> { Self { ctx, table } } + #[async_backtrace::framed] pub async fn get_statistic(self) -> Result { let snapshot_opt = self.table.read_table_snapshot().await?; if let Some(snapshot) = snapshot_opt { diff --git a/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic_table.rs b/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic_table.rs index fc85085c24e30..f04c488aea9cf 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic_table.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic_table.rs @@ -89,6 +89,7 @@ impl Table for FuseStatisticTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _ctx: Arc, @@ -165,6 +166,7 @@ impl AsyncSource for FuseStatisticSource { const NAME: &'static str = "fuse_statistic"; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { if self.finish { return Ok(None); diff --git a/src/query/storages/hive/hive/Cargo.toml b/src/query/storages/hive/hive/Cargo.toml index 847934a2aa7ff..e66c8e3b7cccb 100644 --- a/src/query/storages/hive/hive/Cargo.toml +++ b/src/query/storages/hive/hive/Cargo.toml @@ -31,6 +31,7 @@ storages-common-cache-manager = { path = "../../common/cache-manager" } storages-common-index = { path = "../../common/index" } storages-common-table-meta = { path = "../../common/table-meta" } +async-backtrace = { workspace = true } async-recursion = "1.0.0" async-trait = "0.1.57" chrono = { workspace = true } diff --git a/src/query/storages/hive/hive/src/hive_catalog.rs b/src/query/storages/hive/hive/src/hive_catalog.rs index 3adc87ae2edbe..63a2b802d2b3d 100644 --- a/src/query/storages/hive/hive/src/hive_catalog.rs +++ b/src/query/storages/hive/hive/src/hive_catalog.rs @@ -88,6 +88,7 @@ impl HiveCatalog { Ok(ThriftHiveMetastoreSyncClient::new(i_prot, o_prot)) } + #[async_backtrace::framed] pub async fn get_partitions( &self, db: String, @@ -124,6 +125,7 @@ impl HiveCatalog { } #[tracing::instrument(level = "info", skip(self))] + #[async_backtrace::framed] pub async fn get_partition_names( &self, db: String, @@ -220,7 +222,9 @@ impl Catalog for HiveCatalog { self } + #[async_backtrace::framed] #[tracing::instrument(level = "info", skip(self))] + #[async_backtrace::framed] async fn get_database(&self, _tenant: &str, db_name: &str) -> Result> { let client = self.get_client()?; let _tenant = _tenant.to_string(); @@ -231,29 +235,34 @@ impl Catalog for HiveCatalog { } // Get all the databases. + #[async_backtrace::framed] async fn list_databases(&self, _tenant: &str) -> Result>> { todo!() } // Operation with database. + #[async_backtrace::framed] async fn create_database(&self, _req: CreateDatabaseReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot create database in HIVE catalog", )) } + #[async_backtrace::framed] async fn drop_database(&self, _req: DropDatabaseReq) -> Result<()> { Err(ErrorCode::Unimplemented( "Cannot drop database in HIVE catalog", )) } + #[async_backtrace::framed] async fn undrop_database(&self, _req: UndropDatabaseReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot undrop database in HIVE catalog", )) } + #[async_backtrace::framed] async fn rename_database(&self, _req: RenameDatabaseReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot rename database in HIVE catalog", @@ -265,6 +274,7 @@ impl Catalog for HiveCatalog { Ok(res) } + #[async_backtrace::framed] async fn get_table_meta_by_id( &self, _table_id: MetaId, @@ -276,6 +286,7 @@ impl Catalog for HiveCatalog { // Get one table by db and table name. #[tracing::instrument(level = "info", skip(self))] + #[async_backtrace::framed] async fn get_table( &self, _tenant: &str, @@ -290,10 +301,12 @@ impl Catalog for HiveCatalog { .unwrap() } + #[async_backtrace::framed] async fn list_tables(&self, _tenant: &str, _db_name: &str) -> Result>> { todo!() } + #[async_backtrace::framed] async fn list_tables_history( &self, _tenant: &str, @@ -304,24 +317,28 @@ impl Catalog for HiveCatalog { )) } + #[async_backtrace::framed] async fn create_table(&self, _req: CreateTableReq) -> Result<()> { Err(ErrorCode::Unimplemented( "Cannot create table in HIVE catalog", )) } + #[async_backtrace::framed] async fn drop_table_by_id(&self, _req: DropTableByIdReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot drop table in HIVE catalog", )) } + #[async_backtrace::framed] async fn undrop_table(&self, _req: UndropTableReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot undrop table in HIVE catalog", )) } + #[async_backtrace::framed] async fn rename_table(&self, _req: RenameTableReq) -> Result { Err(ErrorCode::Unimplemented( "Cannot rename table in HIVE catalog", @@ -329,6 +346,7 @@ impl Catalog for HiveCatalog { } // Check a db.table is exists or not. + #[async_backtrace::framed] async fn exists_table(&self, tenant: &str, db_name: &str, table_name: &str) -> Result { // TODO refine this match self.get_table(tenant, db_name, table_name).await { @@ -343,6 +361,7 @@ impl Catalog for HiveCatalog { } } + #[async_backtrace::framed] async fn upsert_table_option( &self, _tenant: &str, @@ -354,6 +373,7 @@ impl Catalog for HiveCatalog { )) } + #[async_backtrace::framed] async fn update_table_meta( &self, _table_info: &TableInfo, @@ -364,6 +384,7 @@ impl Catalog for HiveCatalog { )) } + #[async_backtrace::framed] async fn get_table_copied_file_info( &self, _tenant: &str, @@ -373,6 +394,7 @@ impl Catalog for HiveCatalog { unimplemented!() } + #[async_backtrace::framed] async fn truncate_table( &self, _table_info: &TableInfo, @@ -381,6 +403,7 @@ impl Catalog for HiveCatalog { unimplemented!() } + #[async_backtrace::framed] async fn count_tables(&self, _req: CountTablesReq) -> Result { unimplemented!() } diff --git a/src/query/storages/hive/hive/src/hive_meta_data_reader.rs b/src/query/storages/hive/hive/src/hive_meta_data_reader.rs index abc252a812eeb..0a856f13bc0ab 100644 --- a/src/query/storages/hive/hive/src/hive_meta_data_reader.rs +++ b/src/query/storages/hive/hive/src/hive_meta_data_reader.rs @@ -37,6 +37,7 @@ impl MetaDataReader { #[async_trait::async_trait] impl Loader for LoaderWrapper { + #[async_backtrace::framed] async fn load(&self, params: &LoadParams) -> Result { let mut reader = if let Some(len) = params.len_hint { self.0.range_reader(¶ms.location, 0..len).await? diff --git a/src/query/storages/hive/hive/src/hive_parquet_block_reader.rs b/src/query/storages/hive/hive/src/hive_parquet_block_reader.rs index 8b8a3af17d4d6..31e3d6fc14c40 100644 --- a/src/query/storages/hive/hive/src/hive_parquet_block_reader.rs +++ b/src/query/storages/hive/hive/src/hive_parquet_block_reader.rs @@ -200,6 +200,7 @@ impl HiveBlockReader { Ok(column_meta[0]) } + #[async_backtrace::framed] async fn read_column( op: Operator, path: String, @@ -207,12 +208,13 @@ impl HiveBlockReader { length: u64, semaphore: Arc, ) -> Result> { - let handler = common_base::base::tokio::spawn(async move { - let chunk = op.range_read(&path, offset..offset + length).await?; + let handler = + common_base::base::tokio::spawn(async_backtrace::location!().frame(async move { + let chunk = op.range_read(&path, offset..offset + length).await?; - let _semaphore_permit = semaphore.acquire().await.unwrap(); - Ok(chunk) - }); + let _semaphore_permit = semaphore.acquire().await.unwrap(); + Ok(chunk) + })); match handler.await { Ok(Ok(data)) => Ok(data), @@ -224,6 +226,7 @@ impl HiveBlockReader { } } + #[async_backtrace::framed] pub async fn read_meta_data( &self, dal: Operator, @@ -242,6 +245,7 @@ impl HiveBlockReader { reader.read(&load_params).await } + #[async_backtrace::framed] pub async fn read_columns_data( &self, row_group: &RowGroupMetaData, diff --git a/src/query/storages/hive/hive/src/hive_table.rs b/src/query/storages/hive/hive/src/hive_table.rs index 88b5568a535d2..9a7b757ec61bc 100644 --- a/src/query/storages/hive/hive/src/hive_table.rs +++ b/src/query/storages/hive/hive/src/hive_table.rs @@ -379,6 +379,7 @@ impl HiveTable { Ok(Arc::new(TableSchema::new(fields))) } + #[async_backtrace::framed] async fn get_query_locations_from_partition_table( &self, ctx: Arc, @@ -438,6 +439,7 @@ impl HiveTable { } // return items: (hdfs_location, option) where part info likes 'c_region=Asia/c_nation=China' + #[async_backtrace::framed] async fn get_query_locations( &self, ctx: Arc, @@ -476,6 +478,7 @@ impl HiveTable { } #[tracing::instrument(level = "info", skip(self))] + #[async_backtrace::framed] async fn list_files_from_dirs( &self, dirs: Vec<(String, Option)>, @@ -487,8 +490,10 @@ impl HiveTable { let sem_t = sem.clone(); let operator_t = self.dal.clone(); let dir_t = dir.to_string(); - let task = - tokio::spawn(async move { list_files_from_dir(operator_t, dir_t, sem_t).await }); + let task = tokio::spawn( + async_backtrace::location!() + .frame(async move { list_files_from_dir(operator_t, dir_t, sem_t).await }), + ); tasks.push((task, partition)); } @@ -505,6 +510,7 @@ impl HiveTable { } #[tracing::instrument(level = "info", skip(self, ctx))] + #[async_backtrace::framed] async fn do_read_partitions( &self, ctx: Arc, @@ -559,6 +565,7 @@ impl Table for HiveTable { false } + #[async_backtrace::framed] async fn read_partitions( &self, ctx: Arc, @@ -580,6 +587,7 @@ impl Table for HiveTable { self.do_read2(ctx, plan, pipeline) } + #[async_backtrace::framed] async fn commit_insertion( &self, _ctx: Arc, @@ -594,6 +602,7 @@ impl Table for HiveTable { ))) } + #[async_backtrace::framed] async fn truncate(&self, _ctx: Arc, _: bool) -> Result<()> { Err(ErrorCode::Unimplemented(format!( "truncate for table {} is not implemented", @@ -601,6 +610,7 @@ impl Table for HiveTable { ))) } + #[async_backtrace::framed] async fn purge(&self, _ctx: Arc, _keep_last_snapshot: bool) -> Result<()> { Ok(()) } @@ -746,7 +756,10 @@ async fn list_files_from_dir( for dir in dirs { let sem_t = sem.clone(); let operator_t = operator.clone(); - let task = tokio::spawn(async move { list_files_from_dir(operator_t, dir, sem_t).await }); + let task = tokio::spawn( + async_backtrace::location!() + .frame(async move { list_files_from_dir(operator_t, dir, sem_t).await }), + ); tasks.push(task); } diff --git a/src/query/storages/hive/hive/src/hive_table_source.rs b/src/query/storages/hive/hive/src/hive_table_source.rs index 76c8712d6d8f9..79ea37189ac82 100644 --- a/src/query/storages/hive/hive/src/hive_table_source.rs +++ b/src/query/storages/hive/hive/src/hive_table_source.rs @@ -349,6 +349,7 @@ impl Processor for HiveTableSource { } } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { match std::mem::replace(&mut self.state, State::Finish) { State::ReadMeta(Some(part)) => { diff --git a/src/query/storages/iceberg/Cargo.toml b/src/query/storages/iceberg/Cargo.toml index 7a4d74fc93efa..bb582b1056759 100644 --- a/src/query/storages/iceberg/Cargo.toml +++ b/src/query/storages/iceberg/Cargo.toml @@ -16,6 +16,7 @@ common-meta-app = { path = "../../../meta/app" } common-meta-types = { path = "../../../meta/types" } common-storage = { path = "../../../common/storage" } +async-backtrace = { workspace = true } async-trait = "0.1" chrono = { workspace = true } futures = "0.3" diff --git a/src/query/storages/iceberg/src/catalog.rs b/src/query/storages/iceberg/src/catalog.rs index edc763ba36d66..b093ce37fa114 100644 --- a/src/query/storages/iceberg/src/catalog.rs +++ b/src/query/storages/iceberg/src/catalog.rs @@ -100,6 +100,7 @@ impl IcebergCatalog { /// list read databases #[tracing::instrument(level = "debug", skip(self))] + #[async_backtrace::framed] pub async fn list_database_from_read(&self) -> Result>> { if self.flatten { // is flatten catalog, return `default` catalog @@ -132,6 +133,7 @@ impl IcebergCatalog { #[async_trait] impl Catalog for IcebergCatalog { #[tracing::instrument(level = "debug", skip(self))] + #[async_backtrace::framed] async fn get_database(&self, _tenant: &str, db_name: &str) -> Result> { if self.flatten { // is flatten catalog, must return `default` catalog @@ -167,22 +169,27 @@ impl Catalog for IcebergCatalog { ))) } + #[async_backtrace::framed] async fn list_databases(&self, _tenant: &str) -> Result>> { self.list_database_from_read().await } + #[async_backtrace::framed] async fn create_database(&self, _req: CreateDatabaseReq) -> Result { unimplemented!() } + #[async_backtrace::framed] async fn drop_database(&self, _req: DropDatabaseReq) -> Result<()> { unimplemented!() } + #[async_backtrace::framed] async fn undrop_database(&self, _req: UndropDatabaseReq) -> Result { unimplemented!() } + #[async_backtrace::framed] async fn rename_database(&self, _req: RenameDatabaseReq) -> Result { unimplemented!() } @@ -191,6 +198,7 @@ impl Catalog for IcebergCatalog { unimplemented!() } + #[async_backtrace::framed] async fn get_table_meta_by_id( &self, _table_id: MetaId, @@ -199,6 +207,7 @@ impl Catalog for IcebergCatalog { } #[tracing::instrument(level = "info", skip(self))] + #[async_backtrace::framed] async fn get_table( &self, tenant: &str, @@ -209,11 +218,13 @@ impl Catalog for IcebergCatalog { db.get_table(table_name).await } + #[async_backtrace::framed] async fn list_tables(&self, tenant: &str, db_name: &str) -> Result>> { let db = self.get_database(tenant, db_name).await?; db.list_tables().await } + #[async_backtrace::framed] async fn list_tables_history( &self, _tenant: &str, @@ -222,22 +233,27 @@ impl Catalog for IcebergCatalog { unimplemented!() } + #[async_backtrace::framed] async fn create_table(&self, _req: CreateTableReq) -> Result<()> { unimplemented!() } + #[async_backtrace::framed] async fn drop_table_by_id(&self, _req: DropTableByIdReq) -> Result { unimplemented!() } + #[async_backtrace::framed] async fn undrop_table(&self, _req: UndropTableReq) -> Result { unimplemented!() } + #[async_backtrace::framed] async fn rename_table(&self, _req: RenameTableReq) -> Result { unimplemented!() } + #[async_backtrace::framed] async fn exists_table(&self, tenant: &str, db_name: &str, table_name: &str) -> Result { let db = self.get_database(tenant, db_name).await?; match db.get_table(table_name).await { @@ -249,6 +265,7 @@ impl Catalog for IcebergCatalog { } } + #[async_backtrace::framed] async fn upsert_table_option( &self, _tenant: &str, @@ -258,6 +275,7 @@ impl Catalog for IcebergCatalog { unimplemented!() } + #[async_backtrace::framed] async fn update_table_meta( &self, _table_info: &TableInfo, @@ -266,10 +284,12 @@ impl Catalog for IcebergCatalog { unimplemented!() } + #[async_backtrace::framed] async fn count_tables(&self, _req: CountTablesReq) -> Result { unimplemented!() } + #[async_backtrace::framed] async fn get_table_copied_file_info( &self, _tenant: &str, @@ -279,6 +299,7 @@ impl Catalog for IcebergCatalog { unimplemented!() } + #[async_backtrace::framed] async fn truncate_table( &self, _table_info: &TableInfo, diff --git a/src/query/storages/iceberg/src/database.rs b/src/query/storages/iceberg/src/database.rs index 2c129718d03bc..2d657300f8c50 100644 --- a/src/query/storages/iceberg/src/database.rs +++ b/src/query/storages/iceberg/src/database.rs @@ -98,6 +98,7 @@ impl Database for IcebergDatabase { &self.info } + #[async_backtrace::framed] async fn get_table(&self, table_name: &str) -> Result> { let path = format!("{table_name}/"); let op = self.db_root.operator(); @@ -121,6 +122,7 @@ impl Database for IcebergDatabase { return Ok(Arc::new(tbl) as Arc); } + #[async_backtrace::framed] async fn list_tables(&self) -> Result>> { let mut tables = vec![]; let op = self.db_root.operator(); diff --git a/src/query/storages/iceberg/src/table.rs b/src/query/storages/iceberg/src/table.rs index 880cfa64d00b6..0fefec57b2f8e 100644 --- a/src/query/storages/iceberg/src/table.rs +++ b/src/query/storages/iceberg/src/table.rs @@ -58,6 +58,7 @@ pub struct IcebergTable { impl IcebergTable { /// create a new table on the table directory + #[async_backtrace::framed] pub async fn try_create_table_from_read( catalog: &str, database: &str, @@ -106,6 +107,7 @@ impl IcebergTable { /// version_detect figures out the manifest list version of the table /// and gives the relative path from table root directory /// to latest metadata json file + #[async_backtrace::framed] async fn version_detect(tbl_root: &Operator) -> Result { // try Dremio's way // Dremio has an `version_hint.txt` file @@ -163,6 +165,7 @@ impl Table for IcebergTable { &self.get_table_info().name } + #[async_backtrace::framed] async fn read_partitions( &self, _ctx: Arc, diff --git a/src/query/storages/memory/Cargo.toml b/src/query/storages/memory/Cargo.toml index 3e073020afdb8..7993695607edf 100644 --- a/src/query/storages/memory/Cargo.toml +++ b/src/query/storages/memory/Cargo.toml @@ -21,6 +21,7 @@ common-pipeline-sinks = { path = "../../pipeline/sinks" } common-pipeline-sources = { path = "../../pipeline/sources" } common-storage = { path = "../../../common/storage" } +async-backtrace = { workspace = true } async-trait = { version = "0.1.57", package = "async-trait-fn" } once_cell = "1.15.0" parking_lot = "0.12.1" diff --git a/src/query/storages/memory/src/memory_table.rs b/src/query/storages/memory/src/memory_table.rs index 0fe283ac1b727..d4c272b835b94 100644 --- a/src/query/storages/memory/src/memory_table.rs +++ b/src/query/storages/memory/src/memory_table.rs @@ -151,6 +151,7 @@ impl Table for MemoryTable { Some(self.data_metrics.clone()) } + #[async_backtrace::framed] async fn read_partitions( &self, ctx: Arc, @@ -241,6 +242,7 @@ impl Table for MemoryTable { }) } + #[async_backtrace::framed] async fn commit_insertion( &self, _: Arc, @@ -263,6 +265,7 @@ impl Table for MemoryTable { Ok(()) } + #[async_backtrace::framed] async fn truncate(&self, _ctx: Arc, _: bool) -> Result<()> { let mut blocks = self.blocks.write(); blocks.clear(); diff --git a/src/query/storages/null/Cargo.toml b/src/query/storages/null/Cargo.toml index 9b81cc01b7b66..3c66b95294706 100644 --- a/src/query/storages/null/Cargo.toml +++ b/src/query/storages/null/Cargo.toml @@ -20,6 +20,7 @@ common-pipeline-core = { path = "../../pipeline/core" } common-pipeline-sinks = { path = "../../pipeline/sinks" } common-pipeline-sources = { path = "../../pipeline/sources" } +async-backtrace = { workspace = true } async-trait = { version = "0.1.57", package = "async-trait-fn" } [build-dependencies] diff --git a/src/query/storages/null/src/null_table.rs b/src/query/storages/null/src/null_table.rs index f3c4c589a0239..413c3349587c3 100644 --- a/src/query/storages/null/src/null_table.rs +++ b/src/query/storages/null/src/null_table.rs @@ -62,6 +62,7 @@ impl Table for NullTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _ctx: Arc, diff --git a/src/query/storages/parquet/Cargo.toml b/src/query/storages/parquet/Cargo.toml index 52a637732cf96..d94c31b0812db 100644 --- a/src/query/storages/parquet/Cargo.toml +++ b/src/query/storages/parquet/Cargo.toml @@ -27,6 +27,7 @@ storages-common-index = { path = "../common/index" } storages-common-pruner = { path = "../common/pruner" } storages-common-table-meta = { path = "../common/table-meta" } +async-backtrace = { workspace = true } async-trait = { version = "0.1.57", package = "async-trait-fn" } chrono = { workspace = true } futures = "0.3.24" diff --git a/src/query/storages/parquet/src/parquet_reader/reader.rs b/src/query/storages/parquet/src/parquet_reader/reader.rs index c9870d2336323..68ed23fe09035 100644 --- a/src/query/storages/parquet/src/parquet_reader/reader.rs +++ b/src/query/storages/parquet/src/parquet_reader/reader.rs @@ -214,6 +214,7 @@ impl ParquetReader { Ok(readers) } + #[async_backtrace::framed] pub async fn readers_from_non_blocking_io(&self, part: PartInfoPtr) -> Result { let part = ParquetRowGroupPart::from_part(&part)?; diff --git a/src/query/storages/parquet/src/parquet_source.rs b/src/query/storages/parquet/src/parquet_source.rs index a57fdd24cfb14..9f4b2ceb5e1e8 100644 --- a/src/query/storages/parquet/src/parquet_source.rs +++ b/src/query/storages/parquet/src/parquet_source.rs @@ -169,6 +169,7 @@ impl Processor for AsyncParquetSource { Ok(Event::Async) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { let parts = self.ctx.get_partitions(self.batch_size); @@ -179,9 +180,9 @@ impl Processor for AsyncParquetSource { let block_reader = self.block_reader.clone(); readers.push(async move { - let handler = tokio::spawn(async move { + let handler = tokio::spawn(async_backtrace::location!().frame(async move { block_reader.readers_from_non_blocking_io(part).await - }); + })); handler.await.unwrap() }); } diff --git a/src/query/storages/parquet/src/parquet_table/non_blocking.rs b/src/query/storages/parquet/src/parquet_table/non_blocking.rs index 1ebea975697f9..f28b447c9beab 100644 --- a/src/query/storages/parquet/src/parquet_table/non_blocking.rs +++ b/src/query/storages/parquet/src/parquet_table/non_blocking.rs @@ -30,6 +30,7 @@ use super::table::create_parquet_table_info; use crate::ParquetTable; impl ParquetTable { + #[async_backtrace::framed] pub async fn create( stage_info: StageInfo, files_info: StageFilesInfo, @@ -66,6 +67,7 @@ impl ParquetTable { })) } + #[async_backtrace::framed] async fn prepare_metas(path: &str, operator: Operator) -> Result { // Infer schema from the first parquet file. // Assume all parquet files have the same schema. diff --git a/src/query/storages/parquet/src/parquet_table/partition.rs b/src/query/storages/parquet/src/parquet_table/partition.rs index 292db6aaa453d..5a07a07a8d0ac 100644 --- a/src/query/storages/parquet/src/parquet_table/partition.rs +++ b/src/query/storages/parquet/src/parquet_table/partition.rs @@ -33,6 +33,7 @@ use crate::ParquetTable; impl ParquetTable { #[inline] + #[async_backtrace::framed] pub(super) async fn do_read_partitions( &self, ctx: Arc, diff --git a/src/query/storages/parquet/src/parquet_table/table.rs b/src/query/storages/parquet/src/parquet_table/table.rs index 071e5ff8cd670..4fce52ffc615b 100644 --- a/src/query/storages/parquet/src/parquet_table/table.rs +++ b/src/query/storages/parquet/src/parquet_table/table.rs @@ -109,6 +109,7 @@ impl Table for ParquetTable { /// The returned partitions only record the locations of files to read. /// So they don't have any real statistics. + #[async_backtrace::framed] async fn read_partitions( &self, ctx: Arc, diff --git a/src/query/storages/parquet/src/pruning.rs b/src/query/storages/parquet/src/pruning.rs index f3360155c9994..c39fc69646d1c 100644 --- a/src/query/storages/parquet/src/pruning.rs +++ b/src/query/storages/parquet/src/pruning.rs @@ -76,6 +76,7 @@ pub struct PartitionPruner { impl PartitionPruner { /// Try to read parquet meta to generate row-group-wise partitions. /// And prune row groups an pages to generate the final row group partitions. + #[async_backtrace::framed] pub async fn read_and_prune_partitions(&self) -> Result<(PartStatistics, Partitions)> { let PartitionPruner { schema, diff --git a/src/query/storages/random/Cargo.toml b/src/query/storages/random/Cargo.toml index dd061ac99c424..1f62778b06925 100644 --- a/src/query/storages/random/Cargo.toml +++ b/src/query/storages/random/Cargo.toml @@ -19,6 +19,7 @@ common-meta-app = { path = "../../../meta/app" } common-pipeline-core = { path = "../../pipeline/core" } common-pipeline-sources = { path = "../../pipeline/sources" } +async-backtrace = { workspace = true } async-trait = { version = "0.1.57", package = "async-trait-fn" } serde = { workspace = true } typetag = "0.2.3" diff --git a/src/query/storages/random/src/random_table.rs b/src/query/storages/random/src/random_table.rs index ee1eb4e1c8303..292fc3b8e110a 100644 --- a/src/query/storages/random/src/random_table.rs +++ b/src/query/storages/random/src/random_table.rs @@ -89,6 +89,7 @@ impl Table for RandomTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, ctx: Arc, diff --git a/src/query/storages/result_cache/Cargo.toml b/src/query/storages/result_cache/Cargo.toml index aaa9db5d97718..11c78feb0d659 100644 --- a/src/query/storages/result_cache/Cargo.toml +++ b/src/query/storages/result_cache/Cargo.toml @@ -24,6 +24,7 @@ common-storage = { path = "../../../common/storage" } storages-common-blocks = { path = "../common/blocks" } storages-common-table-meta = { path = "../common/table-meta" } +async-backtrace = { workspace = true } async-trait = { version = "0.1.57", package = "async-trait-fn" } opendal = { workspace = true } serde = { workspace = true } diff --git a/src/query/storages/result_cache/src/meta_manager.rs b/src/query/storages/result_cache/src/meta_manager.rs index 2d9b1352aca40..aef0062c3bf5b 100644 --- a/src/query/storages/result_cache/src/meta_manager.rs +++ b/src/query/storages/result_cache/src/meta_manager.rs @@ -35,6 +35,7 @@ impl ResultCacheMetaManager { Self { ttl, inner } } + #[async_backtrace::framed] pub async fn set( &self, key: String, @@ -57,6 +58,7 @@ impl ResultCacheMetaManager { Ok(()) } + #[async_backtrace::framed] pub async fn get(&self, key: String) -> Result> { let raw = self.inner.get_kv(&key).await?; match raw { @@ -68,6 +70,7 @@ impl ResultCacheMetaManager { } } + #[async_backtrace::framed] pub async fn list(&self, prefix: &str) -> Result> { let result = self.inner.prefix_list_kv(prefix).await?; diff --git a/src/query/storages/result_cache/src/read/reader.rs b/src/query/storages/result_cache/src/read/reader.rs index 4d4a4cd39f812..b6224d2201466 100644 --- a/src/query/storages/result_cache/src/read/reader.rs +++ b/src/query/storages/result_cache/src/read/reader.rs @@ -67,11 +67,13 @@ impl ResultCacheReader { self.meta_key.clone() } + #[async_backtrace::framed] pub async fn try_read_cached_result(&self) -> Result>> { self.try_read_cached_result_with_meta_key(self.meta_key.clone()) .await } + #[async_backtrace::framed] pub async fn try_read_cached_result_with_meta_key( &self, meta_key: String, @@ -93,6 +95,7 @@ impl ResultCacheReader { } } + #[async_backtrace::framed] async fn read_result_from_cache(&self, location: &str) -> Result> { let data = self.operator.read(location).await?; let mut reader = Cursor::new(data); @@ -113,6 +116,7 @@ impl ResultCacheReader { Ok(blocks) } + #[async_backtrace::framed] pub async fn read_table_schema_and_data( operator: Operator, location: &str, diff --git a/src/query/storages/result_cache/src/table_function/table.rs b/src/query/storages/result_cache/src/table_function/table.rs index 22916e16bc5a9..d6c4e7c5ad99d 100644 --- a/src/query/storages/result_cache/src/table_function/table.rs +++ b/src/query/storages/result_cache/src/table_function/table.rs @@ -1,147 +1,148 @@ -// Copyright 2023 Datafuse Labs. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::any::Any; -use std::io::Cursor; -use std::sync::Arc; - -use common_arrow::arrow::io::parquet::read::infer_schema; -use common_arrow::arrow::io::parquet::read::{self as pread}; -use common_arrow::parquet::read::read_metadata; -use common_catalog::plan::DataSourceInfo; -use common_catalog::plan::DataSourcePlan; -use common_catalog::plan::PartStatistics; -use common_catalog::plan::Partitions; -use common_catalog::plan::PushDownInfo; -use common_catalog::plan::ResultScanTableInfo; -use common_catalog::table::Table; -use common_catalog::table_args::TableArgs; -use common_catalog::table_context::TableContext; -use common_exception::Result; -use common_expression::DataBlock; -use common_expression::DataSchema; -use common_expression::Scalar; -use common_expression::TableSchema; -use common_meta_app::schema::TableIdent; -use common_meta_app::schema::TableInfo; -use common_meta_app::schema::TableMeta; -use common_pipeline_core::Pipeline; -use common_pipeline_sources::EmptySource; -use common_pipeline_sources::OneBlockSource; - -const RESULT_SCAN: &str = "result_scan"; - -pub struct ResultScan { - table_info: TableInfo, - query_id: String, - block_raw_data: Vec, -} - -impl ResultScan { - pub fn try_create( - table_schema: TableSchema, - query_id: String, - block_raw_data: Vec, - ) -> Result> { - let table_info = TableInfo { - ident: TableIdent::new(0, 0), - desc: format!("''.'{RESULT_SCAN}'"), - name: String::from(RESULT_SCAN), - meta: TableMeta { - schema: Arc::new(table_schema), - engine: String::from(RESULT_SCAN), - ..Default::default() - }, - ..Default::default() - }; - - Ok(Arc::new(ResultScan { - table_info, - query_id, - block_raw_data, - })) - } - - pub fn from_info(info: &ResultScanTableInfo) -> Result> { - Ok(Arc::new(ResultScan { - table_info: info.table_info.clone(), - query_id: info.query_id.clone(), - block_raw_data: info.block_raw_data.clone(), - })) - } -} - -#[async_trait::async_trait] -impl Table for ResultScan { - fn is_local(&self) -> bool { - true - } - - fn as_any(&self) -> &dyn Any { - self - } - - fn get_table_info(&self) -> &TableInfo { - &self.table_info - } - - fn get_data_source_info(&self) -> DataSourceInfo { - DataSourceInfo::ResultScanSource(ResultScanTableInfo { - table_info: self.table_info.clone(), - query_id: self.query_id.clone(), - block_raw_data: self.block_raw_data.clone(), - }) - } - - async fn read_partitions( - &self, - _: Arc, - _: Option, - ) -> Result<(PartStatistics, Partitions)> { - Ok((PartStatistics::default(), Partitions::default())) - } - - fn table_args(&self) -> Option { - let args = vec![Scalar::String(self.query_id.as_bytes().to_vec())]; - - Some(TableArgs::new_positioned(args)) - } - - fn read_data( - &self, - _ctx: Arc, - _plan: &DataSourcePlan, - pipeline: &mut Pipeline, - ) -> Result<()> { - if self.block_raw_data.is_empty() { - pipeline.add_source(EmptySource::create, 1)?; - } else { - let mut reader = Cursor::new(self.block_raw_data.clone()); - let meta = read_metadata(&mut reader)?; - let arrow_schema = infer_schema(&meta)?; - let table_schema = TableSchema::from(&arrow_schema); - let schema = DataSchema::from(&table_schema); - - // Read the parquet file into one block. - let chunks_iter = - pread::FileReader::new(reader, meta.row_groups, arrow_schema, None, None, None); - - for chunk in chunks_iter { - let block = DataBlock::from_arrow_chunk(&chunk?, &schema)?; - pipeline.add_source(|output| OneBlockSource::create(output, block.clone()), 1)?; - } - } - Ok(()) - } -} +// Copyright 2023 Datafuse Labs. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::any::Any; +use std::io::Cursor; +use std::sync::Arc; + +use common_arrow::arrow::io::parquet::read::infer_schema; +use common_arrow::arrow::io::parquet::read::{self as pread}; +use common_arrow::parquet::read::read_metadata; +use common_catalog::plan::DataSourceInfo; +use common_catalog::plan::DataSourcePlan; +use common_catalog::plan::PartStatistics; +use common_catalog::plan::Partitions; +use common_catalog::plan::PushDownInfo; +use common_catalog::plan::ResultScanTableInfo; +use common_catalog::table::Table; +use common_catalog::table_args::TableArgs; +use common_catalog::table_context::TableContext; +use common_exception::Result; +use common_expression::DataBlock; +use common_expression::DataSchema; +use common_expression::Scalar; +use common_expression::TableSchema; +use common_meta_app::schema::TableIdent; +use common_meta_app::schema::TableInfo; +use common_meta_app::schema::TableMeta; +use common_pipeline_core::Pipeline; +use common_pipeline_sources::EmptySource; +use common_pipeline_sources::OneBlockSource; + +const RESULT_SCAN: &str = "result_scan"; + +pub struct ResultScan { + table_info: TableInfo, + query_id: String, + block_raw_data: Vec, +} + +impl ResultScan { + pub fn try_create( + table_schema: TableSchema, + query_id: String, + block_raw_data: Vec, + ) -> Result> { + let table_info = TableInfo { + ident: TableIdent::new(0, 0), + desc: format!("''.'{RESULT_SCAN}'"), + name: String::from(RESULT_SCAN), + meta: TableMeta { + schema: Arc::new(table_schema), + engine: String::from(RESULT_SCAN), + ..Default::default() + }, + ..Default::default() + }; + + Ok(Arc::new(ResultScan { + table_info, + query_id, + block_raw_data, + })) + } + + pub fn from_info(info: &ResultScanTableInfo) -> Result> { + Ok(Arc::new(ResultScan { + table_info: info.table_info.clone(), + query_id: info.query_id.clone(), + block_raw_data: info.block_raw_data.clone(), + })) + } +} + +#[async_trait::async_trait] +impl Table for ResultScan { + fn is_local(&self) -> bool { + true + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn get_table_info(&self) -> &TableInfo { + &self.table_info + } + + fn get_data_source_info(&self) -> DataSourceInfo { + DataSourceInfo::ResultScanSource(ResultScanTableInfo { + table_info: self.table_info.clone(), + query_id: self.query_id.clone(), + block_raw_data: self.block_raw_data.clone(), + }) + } + + #[async_backtrace::framed] + async fn read_partitions( + &self, + _: Arc, + _: Option, + ) -> Result<(PartStatistics, Partitions)> { + Ok((PartStatistics::default(), Partitions::default())) + } + + fn table_args(&self) -> Option { + let args = vec![Scalar::String(self.query_id.as_bytes().to_vec())]; + + Some(TableArgs::new_positioned(args)) + } + + fn read_data( + &self, + _ctx: Arc, + _plan: &DataSourcePlan, + pipeline: &mut Pipeline, + ) -> Result<()> { + if self.block_raw_data.is_empty() { + pipeline.add_source(EmptySource::create, 1)?; + } else { + let mut reader = Cursor::new(self.block_raw_data.clone()); + let meta = read_metadata(&mut reader)?; + let arrow_schema = infer_schema(&meta)?; + let table_schema = TableSchema::from(&arrow_schema); + let schema = DataSchema::from(&table_schema); + + // Read the parquet file into one block. + let chunks_iter = + pread::FileReader::new(reader, meta.row_groups, arrow_schema, None, None, None); + + for chunk in chunks_iter { + let block = DataBlock::from_arrow_chunk(&chunk?, &schema)?; + pipeline.add_source(|output| OneBlockSource::create(output, block.clone()), 1)?; + } + } + Ok(()) + } +} diff --git a/src/query/storages/result_cache/src/write/sink.rs b/src/query/storages/result_cache/src/write/sink.rs index 7a1aeef32b3e3..2808e7151bcc4 100644 --- a/src/query/storages/result_cache/src/write/sink.rs +++ b/src/query/storages/result_cache/src/write/sink.rs @@ -48,6 +48,7 @@ impl AsyncMpscSink for WriteResultCacheSink { const NAME: &'static str = "WriteResultCacheSink"; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn consume(&mut self, block: DataBlock) -> Result { if !self.cache_writer.over_limit() { self.cache_writer.append_block(block); @@ -58,6 +59,7 @@ impl AsyncMpscSink for WriteResultCacheSink { } } + #[async_backtrace::framed] async fn on_finish(&mut self) -> Result<()> { if self.cache_writer.over_limit() { return Ok(()); diff --git a/src/query/storages/result_cache/src/write/writer.rs b/src/query/storages/result_cache/src/write/writer.rs index 283132a024a0f..3832833e69439 100644 --- a/src/query/storages/result_cache/src/write/writer.rs +++ b/src/query/storages/result_cache/src/write/writer.rs @@ -61,6 +61,7 @@ impl ResultCacheWriter { } /// Write the result cache to the storage and return the location. + #[async_backtrace::framed] pub async fn write_to_storage(&self) -> Result { let mut buf = Vec::with_capacity(self.current_bytes); let _ = blocks_to_parquet( diff --git a/src/query/storages/share/Cargo.toml b/src/query/storages/share/Cargo.toml index 1fbdfe64b8d24..4775b55e071b9 100644 --- a/src/query/storages/share/Cargo.toml +++ b/src/query/storages/share/Cargo.toml @@ -17,6 +17,7 @@ common-meta-app = { path = "../../../meta/app" } storages-common-table-meta = { path = "../common/table-meta" } +async-backtrace = { workspace = true } opendal = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/src/query/storages/share/src/share.rs b/src/query/storages/share/src/share.rs index d763fd06c1035..0925265bf1c74 100644 --- a/src/query/storages/share/src/share.rs +++ b/src/query/storages/share/src/share.rs @@ -35,6 +35,7 @@ pub fn share_table_info_location(tenant: &str, share_name: &str) -> String { ) } +#[async_backtrace::framed] pub async fn save_share_spec( tenant: &String, operator: Operator, diff --git a/src/query/storages/stage/Cargo.toml b/src/query/storages/stage/Cargo.toml index 24e9f974054c0..98a8c55a8e99f 100644 --- a/src/query/storages/stage/Cargo.toml +++ b/src/query/storages/stage/Cargo.toml @@ -22,6 +22,7 @@ common-pipeline-core = { path = "../../pipeline/core" } common-pipeline-sources = { path = "../../pipeline/sources" } common-storage = { path = "../../../common/storage" } +async-backtrace = { workspace = true } async-trait = { version = "0.1.57", package = "async-trait-fn" } opendal = { workspace = true } parking_lot = "0.12.1" diff --git a/src/query/storages/stage/src/stage_table.rs b/src/query/storages/stage/src/stage_table.rs index 5db7d384b3989..ab657c9d2dc37 100644 --- a/src/query/storages/stage/src/stage_table.rs +++ b/src/query/storages/stage/src/stage_table.rs @@ -72,6 +72,7 @@ impl StageTable { init_stage_operator(stage) } + #[async_backtrace::framed] pub async fn list_files( stage_info: &StageTableInfo, max_files: Option, @@ -110,6 +111,7 @@ impl Table for StageTable { DataSourceInfo::StageSource(self.table_info.clone()) } + #[async_backtrace::framed] async fn read_partitions( &self, ctx: Arc, @@ -236,6 +238,7 @@ impl Table for StageTable { } // TODO use tmp file_name & rename to have atomic commit + #[async_backtrace::framed] async fn commit_insertion( &self, _ctx: Arc, @@ -247,6 +250,7 @@ impl Table for StageTable { } // Truncate the stage file. + #[async_backtrace::framed] async fn truncate(&self, _ctx: Arc, _: bool) -> Result<()> { Err(ErrorCode::Unimplemented( "S3 external table truncate() unimplemented yet!", diff --git a/src/query/storages/stage/src/stage_table_sink.rs b/src/query/storages/stage/src/stage_table_sink.rs index aaa18198d14b0..55f27f654b791 100644 --- a/src/query/storages/stage/src/stage_table_sink.rs +++ b/src/query/storages/stage/src/stage_table_sink.rs @@ -255,6 +255,7 @@ impl Processor for StageTableSink { Ok(()) } + #[async_backtrace::framed] async fn async_process(&mut self) -> Result<()> { match std::mem::replace(&mut self.state, State::None) { State::NeedWrite(bytes, remainng_block) => { diff --git a/src/query/storages/system/Cargo.toml b/src/query/storages/system/Cargo.toml index c2977807d75c6..cf0b624b9beb1 100644 --- a/src/query/storages/system/Cargo.toml +++ b/src/query/storages/system/Cargo.toml @@ -31,6 +31,7 @@ common-storages-view = { path = "../view" } common-users = { path = "../../users" } jsonb = { workspace = true } +async-backtrace = { workspace = true } async-trait = { version = "0.1.57", package = "async-trait-fn" } chrono = { workspace = true } itertools = "0.10.5" diff --git a/src/query/storages/system/src/catalogs_table.rs b/src/query/storages/system/src/catalogs_table.rs index 61787bf41e6d1..f770187821173 100644 --- a/src/query/storages/system/src/catalogs_table.rs +++ b/src/query/storages/system/src/catalogs_table.rs @@ -43,6 +43,7 @@ impl AsyncSystemTable for CatalogsTable { &self.table_info } + #[async_backtrace::framed] async fn get_full_data(&self, _ctx: Arc) -> Result { let cm = CatalogManager::instance(); diff --git a/src/query/storages/system/src/columns_table.rs b/src/query/storages/system/src/columns_table.rs index e4a365754d2c3..1f6b59e10d91b 100644 --- a/src/query/storages/system/src/columns_table.rs +++ b/src/query/storages/system/src/columns_table.rs @@ -44,6 +44,7 @@ impl AsyncSystemTable for ColumnsTable { &self.table_info } + #[async_backtrace::framed] async fn get_full_data(&self, ctx: Arc) -> Result { let rows = self.dump_table_columns(ctx).await?; let mut names: Vec> = Vec::with_capacity(rows.len()); @@ -125,6 +126,7 @@ impl ColumnsTable { AsyncOneBlockSystemTable::create(ColumnsTable { table_info }) } + #[async_backtrace::framed] async fn dump_table_columns( &self, ctx: Arc, diff --git a/src/query/storages/system/src/databases_table.rs b/src/query/storages/system/src/databases_table.rs index 3dcccb2b8a5d8..2f34de6547b54 100644 --- a/src/query/storages/system/src/databases_table.rs +++ b/src/query/storages/system/src/databases_table.rs @@ -44,6 +44,7 @@ impl AsyncSystemTable for DatabasesTable { &self.table_info } + #[async_backtrace::framed] async fn get_full_data(&self, ctx: Arc) -> Result { let tenant = ctx.get_tenant(); let catalogs = CatalogManager::instance(); diff --git a/src/query/storages/system/src/engines_table.rs b/src/query/storages/system/src/engines_table.rs index 37bf4ad4cf304..57899977b98bc 100644 --- a/src/query/storages/system/src/engines_table.rs +++ b/src/query/storages/system/src/engines_table.rs @@ -43,6 +43,7 @@ impl AsyncSystemTable for EnginesTable { &self.table_info } + #[async_backtrace::framed] async fn get_full_data(&self, ctx: Arc) -> Result { // TODO passing catalog name let table_engine_descriptors = ctx.get_catalog(CATALOG_DEFAULT)?.get_table_engines(); diff --git a/src/query/storages/system/src/functions_table.rs b/src/query/storages/system/src/functions_table.rs index 8cf30925553f1..95667aedcac86 100644 --- a/src/query/storages/system/src/functions_table.rs +++ b/src/query/storages/system/src/functions_table.rs @@ -47,6 +47,7 @@ impl AsyncSystemTable for FunctionsTable { &self.table_info } + #[async_backtrace::framed] async fn get_full_data(&self, ctx: Arc) -> Result { // TODO(andylokandy): add rewritable function names, e.g. database() let func_names = BUILTIN_FUNCTIONS.registered_names(); @@ -152,6 +153,7 @@ impl FunctionsTable { AsyncOneBlockSystemTable::create(FunctionsTable { table_info }) } + #[async_backtrace::framed] async fn get_udfs(ctx: Arc) -> Result> { let tenant = ctx.get_tenant(); UserApiProvider::instance().get_udfs(&tenant).await diff --git a/src/query/storages/system/src/log_queue.rs b/src/query/storages/system/src/log_queue.rs index 0dcd04baf677a..a4686bc013e0a 100644 --- a/src/query/storages/system/src/log_queue.rs +++ b/src/query/storages/system/src/log_queue.rs @@ -155,6 +155,7 @@ impl Table for SystemLogTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _: Arc, @@ -203,6 +204,7 @@ impl Table for SystemLogTable { ) } + #[async_backtrace::framed] async fn truncate(&self, _ctx: Arc, _: bool) -> Result<()> { let log_queue = SystemLogQueue::::instance()?; let mut write_guard = log_queue.data.write(); diff --git a/src/query/storages/system/src/query_cache_table.rs b/src/query/storages/system/src/query_cache_table.rs index ac5b35dbf6621..3e9ecf87ff9d5 100644 --- a/src/query/storages/system/src/query_cache_table.rs +++ b/src/query/storages/system/src/query_cache_table.rs @@ -1,138 +1,139 @@ -// Copyright 2023 Datafuse Labs. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use common_catalog::table::Table; -use common_catalog::table_context::TableContext; -use common_exception::Result; -use common_expression::types::BooleanType; -use common_expression::types::NumberDataType; -use common_expression::types::StringType; -use common_expression::types::UInt64Type; -use common_expression::DataBlock; -use common_expression::FromData; -use common_expression::TableDataType; -use common_expression::TableField; -use common_expression::TableSchemaRefExt; -use common_meta_app::schema::TableIdent; -use common_meta_app::schema::TableInfo; -use common_meta_app::schema::TableMeta; -use common_storages_result_cache::gen_result_cache_prefix; -use common_storages_result_cache::ResultCacheMetaManager; -use common_users::UserApiProvider; -use itertools::Itertools; - -use crate::table::AsyncOneBlockSystemTable; -use crate::table::AsyncSystemTable; - -pub struct QueryCacheTable { - table_info: TableInfo, -} - -#[async_trait::async_trait] -impl AsyncSystemTable for QueryCacheTable { - const NAME: &'static str = "system.query_cache"; - - fn get_table_info(&self) -> &TableInfo { - &self.table_info - } - - async fn get_full_data(&self, ctx: Arc) -> Result { - let meta_client = UserApiProvider::instance().get_meta_store_client(); - let result_cache_mgr = ResultCacheMetaManager::create(meta_client, 0); - let tenant = ctx.get_tenant(); - let prefix = gen_result_cache_prefix(&tenant); - - let cached_values = result_cache_mgr.list(prefix.as_str()).await?; - - let mut sql_vec: Vec<&str> = Vec::with_capacity(cached_values.len()); - let mut query_id_vec: Vec<&str> = Vec::with_capacity(cached_values.len()); - let mut result_size_vec = Vec::with_capacity(cached_values.len()); - let mut num_rows_vec = Vec::with_capacity(cached_values.len()); - let mut partitions_sha_vec = Vec::with_capacity(cached_values.len()); - let mut location_vec = Vec::with_capacity(cached_values.len()); - let mut active_result_scan: Vec = Vec::with_capacity(cached_values.len()); - - cached_values.iter().for_each(|x| { - sql_vec.push(x.sql.as_str()); - query_id_vec.push(x.query_id.as_str()); - result_size_vec.push(x.result_size as u64); - num_rows_vec.push(x.num_rows as u64); - partitions_sha_vec.push(x.partitions_shas.clone()); - location_vec.push(x.location.as_str()); - }); - - let active_query_ids = ctx.get_query_id_history(); - - for qid in query_id_vec.iter() { - if active_query_ids.contains(*qid) { - active_result_scan.push(true) - } else { - active_result_scan.push(false) - } - } - - let partitions_sha_vec: Vec = partitions_sha_vec - .into_iter() - .map(|part| part.into_iter().join(", ")) - .collect(); - - Ok(DataBlock::new_from_columns(vec![ - StringType::from_data(sql_vec), - StringType::from_data(query_id_vec), - UInt64Type::from_data(result_size_vec), - UInt64Type::from_data(num_rows_vec), - StringType::from_data( - partitions_sha_vec - .iter() - .map(|part_sha| part_sha.as_str()) - .collect::>(), - ), - StringType::from_data(location_vec), - BooleanType::from_data(active_result_scan), - ])) - } -} - -impl QueryCacheTable { - pub fn create(table_id: u64) -> Arc { - let schema = TableSchemaRefExt::create(vec![ - TableField::new("sql", TableDataType::String), - TableField::new("query_id", TableDataType::String), - TableField::new("result_size", TableDataType::Number(NumberDataType::UInt64)), - TableField::new("num_rows", TableDataType::Number(NumberDataType::UInt64)), - TableField::new( - "partitions_sha", - TableDataType::Array(Box::new(TableDataType::String)), - ), - TableField::new("location", TableDataType::String), - TableField::new("active_result_scan", TableDataType::Boolean), - ]); - - let table_info = TableInfo { - desc: "'system'.'query_cache'".to_string(), - name: "query_cache".to_string(), - ident: TableIdent::new(table_id, 0), - meta: TableMeta { - schema, - engine: "SystemQueryCache".to_string(), - ..Default::default() - }, - ..Default::default() - }; - - AsyncOneBlockSystemTable::create(QueryCacheTable { table_info }) - } -} +// Copyright 2023 Datafuse Labs. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use common_catalog::table::Table; +use common_catalog::table_context::TableContext; +use common_exception::Result; +use common_expression::types::BooleanType; +use common_expression::types::NumberDataType; +use common_expression::types::StringType; +use common_expression::types::UInt64Type; +use common_expression::DataBlock; +use common_expression::FromData; +use common_expression::TableDataType; +use common_expression::TableField; +use common_expression::TableSchemaRefExt; +use common_meta_app::schema::TableIdent; +use common_meta_app::schema::TableInfo; +use common_meta_app::schema::TableMeta; +use common_storages_result_cache::gen_result_cache_prefix; +use common_storages_result_cache::ResultCacheMetaManager; +use common_users::UserApiProvider; +use itertools::Itertools; + +use crate::table::AsyncOneBlockSystemTable; +use crate::table::AsyncSystemTable; + +pub struct QueryCacheTable { + table_info: TableInfo, +} + +#[async_trait::async_trait] +impl AsyncSystemTable for QueryCacheTable { + const NAME: &'static str = "system.query_cache"; + + fn get_table_info(&self) -> &TableInfo { + &self.table_info + } + + #[async_backtrace::framed] + async fn get_full_data(&self, ctx: Arc) -> Result { + let meta_client = UserApiProvider::instance().get_meta_store_client(); + let result_cache_mgr = ResultCacheMetaManager::create(meta_client, 0); + let tenant = ctx.get_tenant(); + let prefix = gen_result_cache_prefix(&tenant); + + let cached_values = result_cache_mgr.list(prefix.as_str()).await?; + + let mut sql_vec: Vec<&str> = Vec::with_capacity(cached_values.len()); + let mut query_id_vec: Vec<&str> = Vec::with_capacity(cached_values.len()); + let mut result_size_vec = Vec::with_capacity(cached_values.len()); + let mut num_rows_vec = Vec::with_capacity(cached_values.len()); + let mut partitions_sha_vec = Vec::with_capacity(cached_values.len()); + let mut location_vec = Vec::with_capacity(cached_values.len()); + let mut active_result_scan: Vec = Vec::with_capacity(cached_values.len()); + + cached_values.iter().for_each(|x| { + sql_vec.push(x.sql.as_str()); + query_id_vec.push(x.query_id.as_str()); + result_size_vec.push(x.result_size as u64); + num_rows_vec.push(x.num_rows as u64); + partitions_sha_vec.push(x.partitions_shas.clone()); + location_vec.push(x.location.as_str()); + }); + + let active_query_ids = ctx.get_query_id_history(); + + for qid in query_id_vec.iter() { + if active_query_ids.contains(*qid) { + active_result_scan.push(true) + } else { + active_result_scan.push(false) + } + } + + let partitions_sha_vec: Vec = partitions_sha_vec + .into_iter() + .map(|part| part.into_iter().join(", ")) + .collect(); + + Ok(DataBlock::new_from_columns(vec![ + StringType::from_data(sql_vec), + StringType::from_data(query_id_vec), + UInt64Type::from_data(result_size_vec), + UInt64Type::from_data(num_rows_vec), + StringType::from_data( + partitions_sha_vec + .iter() + .map(|part_sha| part_sha.as_str()) + .collect::>(), + ), + StringType::from_data(location_vec), + BooleanType::from_data(active_result_scan), + ])) + } +} + +impl QueryCacheTable { + pub fn create(table_id: u64) -> Arc { + let schema = TableSchemaRefExt::create(vec![ + TableField::new("sql", TableDataType::String), + TableField::new("query_id", TableDataType::String), + TableField::new("result_size", TableDataType::Number(NumberDataType::UInt64)), + TableField::new("num_rows", TableDataType::Number(NumberDataType::UInt64)), + TableField::new( + "partitions_sha", + TableDataType::Array(Box::new(TableDataType::String)), + ), + TableField::new("location", TableDataType::String), + TableField::new("active_result_scan", TableDataType::Boolean), + ]); + + let table_info = TableInfo { + desc: "'system'.'query_cache'".to_string(), + name: "query_cache".to_string(), + ident: TableIdent::new(table_id, 0), + meta: TableMeta { + schema, + engine: "SystemQueryCache".to_string(), + ..Default::default() + }, + ..Default::default() + }; + + AsyncOneBlockSystemTable::create(QueryCacheTable { table_info }) + } +} diff --git a/src/query/storages/system/src/roles_table.rs b/src/query/storages/system/src/roles_table.rs index d8162b037c3d1..bbb5b2c57e8b9 100644 --- a/src/query/storages/system/src/roles_table.rs +++ b/src/query/storages/system/src/roles_table.rs @@ -45,6 +45,7 @@ impl AsyncSystemTable for RolesTable { &self.table_info } + #[async_backtrace::framed] async fn get_full_data(&self, ctx: Arc) -> Result { let tenant = ctx.get_tenant(); let roles = UserApiProvider::instance().get_roles(&tenant).await?; diff --git a/src/query/storages/system/src/stages_table.rs b/src/query/storages/system/src/stages_table.rs index a273db649fb1f..22cdcbb037d70 100644 --- a/src/query/storages/system/src/stages_table.rs +++ b/src/query/storages/system/src/stages_table.rs @@ -48,6 +48,7 @@ impl AsyncSystemTable for StagesTable { &self.table_info } + #[async_backtrace::framed] async fn get_full_data(&self, ctx: Arc) -> Result { let tenant = ctx.get_tenant(); let stages = UserApiProvider::instance().get_stages(&tenant).await?; diff --git a/src/query/storages/system/src/table.rs b/src/query/storages/system/src/table.rs index 5fc3391dddf85..93406bc00fe59 100644 --- a/src/query/storages/system/src/table.rs +++ b/src/query/storages/system/src/table.rs @@ -104,6 +104,7 @@ impl Table for SyncOneBlockSystemTable, @@ -133,6 +134,7 @@ impl Table for SyncOneBlockSystemTable, _purge: bool) -> Result<()> { self.inner_table.truncate(ctx) } @@ -180,6 +182,7 @@ pub trait AsyncSystemTable: Send + Sync { fn get_table_info(&self) -> &TableInfo; async fn get_full_data(&self, ctx: Arc) -> Result; + #[async_backtrace::framed] async fn get_partitions( &self, _ctx: Arc, @@ -218,6 +221,7 @@ impl Table for AsyncOneBlockSystemTable, @@ -269,6 +273,7 @@ impl AsyncSource for SystemTableAsyncSource< const NAME: &'static str = TTable::NAME; #[async_trait::unboxed_simple] + #[async_backtrace::framed] async fn generate(&mut self) -> Result> { if self.finished { return Ok(None); diff --git a/src/query/storages/system/src/tables_table.rs b/src/query/storages/system/src/tables_table.rs index 2ed8aff0501af..dcfb810f3d42d 100644 --- a/src/query/storages/system/src/tables_table.rs +++ b/src/query/storages/system/src/tables_table.rs @@ -57,6 +57,7 @@ pub trait HistoryAware { #[async_trait::async_trait] impl HistoryAware for TablesTable { const TABLE_NAME: &'static str = "tables_with_history"; + #[async_backtrace::framed] async fn list_tables( catalog: &Arc, tenant: &str, @@ -69,6 +70,7 @@ impl HistoryAware for TablesTable { #[async_trait::async_trait] impl HistoryAware for TablesTable { const TABLE_NAME: &'static str = "tables"; + #[async_backtrace::framed] async fn list_tables( catalog: &Arc, tenant: &str, @@ -88,6 +90,7 @@ where TablesTable: HistoryAware &self.table_info } + #[async_backtrace::framed] async fn get_full_data(&self, ctx: Arc) -> Result { let tenant = ctx.get_tenant(); let catalog_mgr = CatalogManager::instance(); diff --git a/src/query/storages/system/src/tracing_table.rs b/src/query/storages/system/src/tracing_table.rs index e90a4d2f67bfa..abc3286f34190 100644 --- a/src/query/storages/system/src/tracing_table.rs +++ b/src/query/storages/system/src/tracing_table.rs @@ -103,6 +103,7 @@ impl Table for TracingTable { &self.table_info } + #[async_backtrace::framed] async fn read_partitions( &self, _ctx: Arc, diff --git a/src/query/storages/system/src/users_table.rs b/src/query/storages/system/src/users_table.rs index b52989ec09734..93933872326d8 100644 --- a/src/query/storages/system/src/users_table.rs +++ b/src/query/storages/system/src/users_table.rs @@ -43,6 +43,7 @@ impl AsyncSystemTable for UsersTable { &self.table_info } + #[async_backtrace::framed] async fn get_full_data(&self, ctx: Arc) -> Result { let tenant = ctx.get_tenant(); let users = UserApiProvider::instance().get_users(&tenant).await?; diff --git a/src/query/users/Cargo.toml b/src/query/users/Cargo.toml index 46c916d351c34..aab21bcbff9f3 100644 --- a/src/query/users/Cargo.toml +++ b/src/query/users/Cargo.toml @@ -27,6 +27,7 @@ common-meta-types = { path = "../../meta/types" } # Github dependencies # Crates.io dependencies +async-backtrace = { workspace = true } base64 = "0.21" jwt-simple = "0.11" p256 = "0.13" diff --git a/src/query/users/src/file_format.rs b/src/query/users/src/file_format.rs index 019cc7cdbf3e3..bd44f5086c451 100644 --- a/src/query/users/src/file_format.rs +++ b/src/query/users/src/file_format.rs @@ -22,6 +22,7 @@ use crate::UserApiProvider; /// user file_format operations. impl UserApiProvider { // Add a new file_format. + #[async_backtrace::framed] pub async fn add_file_format( &self, tenant: &str, @@ -43,6 +44,7 @@ impl UserApiProvider { } // Get one file_format from by tenant. + #[async_backtrace::framed] pub async fn get_file_format( &self, tenant: &str, @@ -55,6 +57,7 @@ impl UserApiProvider { } // Get the tenant all file_format list. + #[async_backtrace::framed] pub async fn get_file_formats(&self, tenant: &str) -> Result> { let file_format_api_provider = self.get_file_format_api_client(tenant)?; let get_file_formats = file_format_api_provider.get_file_formats(); @@ -66,6 +69,7 @@ impl UserApiProvider { } // Drop a file_format by name. + #[async_backtrace::framed] pub async fn drop_file_format(&self, tenant: &str, name: &str, if_exists: bool) -> Result<()> { let file_format_api_provider = self.get_file_format_api_client(tenant)?; let drop_file_format = file_format_api_provider.drop_file_format(name, MatchSeq::GE(1)); diff --git a/src/query/users/src/jwt/authenticator.rs b/src/query/users/src/jwt/authenticator.rs index 6aafcada9c7cc..1dbc5885070f6 100644 --- a/src/query/users/src/jwt/authenticator.rs +++ b/src/query/users/src/jwt/authenticator.rs @@ -91,6 +91,7 @@ impl JwtAuthenticator { } // parse jwt claims from single source, if custom claim is not matching on desired, claim parsed would be empty + #[async_backtrace::framed] pub async fn parse_jwt_claims_from_store( &self, token: &str, @@ -111,6 +112,7 @@ impl JwtAuthenticator { Some(_) => Ok(c), } } + #[async_backtrace::framed] pub async fn parse_jwt_claims(&self, token: &str) -> Result> { let mut combined_code = ErrorCode::AuthenticateFailure( "could not decode token from all available jwt key stores. ", diff --git a/src/query/users/src/jwt/jwk.rs b/src/query/users/src/jwt/jwk.rs index 7c6fa0b1c8e31..dea3a710b8ce7 100644 --- a/src/query/users/src/jwt/jwk.rs +++ b/src/query/users/src/jwt/jwk.rs @@ -116,6 +116,7 @@ impl JwkKeyStore { } impl JwkKeyStore { + #[async_backtrace::framed] async fn load_keys(&self) -> Result<()> { let response = reqwest::get(&self.url).await.map_err(|e| { ErrorCode::AuthenticateFailure(format!("Could not download JWKS: {}", e)) @@ -132,6 +133,7 @@ impl JwkKeyStore { Ok(()) } + #[async_backtrace::framed] async fn maybe_reload_keys(&self) -> Result<()> { let need_reload = { let last_refreshed_at = *self.last_refreshed_at.read(); @@ -145,6 +147,7 @@ impl JwkKeyStore { Ok(()) } + #[async_backtrace::framed] pub(super) async fn get_key(&self, key_id: Option) -> Result { self.maybe_reload_keys().await?; let keys = self.keys.read(); diff --git a/src/query/users/src/role_cache_mgr.rs b/src/query/users/src/role_cache_mgr.rs index d520615998854..7b4cfffd0b128 100644 --- a/src/query/users/src/role_cache_mgr.rs +++ b/src/query/users/src/role_cache_mgr.rs @@ -69,29 +69,31 @@ impl RoleCacheManager { let cache = self.cache.clone(); let polling_interval = self.polling_interval; let user_manager = self.user_manager.clone(); - self.polling_join_handle = Some(tokio::spawn(async move { - loop { - let tenants: Vec = { - let cached = cache.read(); - cached.keys().cloned().collect() - }; - for tenant in tenants { - match load_roles_data(&user_manager, &tenant).await { - Err(err) => { - warn!( - "role_cache_mgr load roles data of tenant {} failed: {}", - tenant, err, - ) - } - Ok(data) => { - let mut cached = cache.write(); - cached.insert(tenant.to_string(), data); + self.polling_join_handle = Some(tokio::spawn(async_backtrace::location!().frame( + async move { + loop { + let tenants: Vec = { + let cached = cache.read(); + cached.keys().cloned().collect() + }; + for tenant in tenants { + match load_roles_data(&user_manager, &tenant).await { + Err(err) => { + warn!( + "role_cache_mgr load roles data of tenant {} failed: {}", + tenant, err, + ) + } + Ok(data) => { + let mut cached = cache.write(); + cached.insert(tenant.to_string(), data); + } } } + tokio::time::sleep(polling_interval).await } - tokio::time::sleep(polling_interval).await - } - })); + }, + ))); } pub fn invalidate_cache(&self, tenant: &str) { @@ -99,6 +101,7 @@ impl RoleCacheManager { cached.remove(tenant); } + #[async_backtrace::framed] pub async fn find_role(&self, tenant: &str, role: &str) -> Result> { let cached = self.cache.read(); let cached_roles = match cached.get(tenant) { @@ -109,6 +112,7 @@ impl RoleCacheManager { } // find_related_roles is called on validating an user's privileges. + #[async_backtrace::framed] pub async fn find_related_roles( &self, tenant: &str, @@ -123,6 +127,7 @@ impl RoleCacheManager { Ok(find_all_related_roles(&cached_roles.roles, roles)) } + #[async_backtrace::framed] pub async fn force_reload(&self, tenant: &str) -> Result<()> { let data = load_roles_data(&self.user_manager, tenant).await?; let mut cached = self.cache.write(); @@ -132,6 +137,7 @@ impl RoleCacheManager { // Load roles data if not found in cache. Watch this tenant's role data in background if // once it loads successfully. + #[async_backtrace::framed] async fn maybe_reload(&self, tenant: &str) -> Result<()> { let need_reload = { let cached = self.cache.read(); diff --git a/src/query/users/src/role_mgr.rs b/src/query/users/src/role_mgr.rs index c0f0090cab54f..278b23546e153 100644 --- a/src/query/users/src/role_mgr.rs +++ b/src/query/users/src/role_mgr.rs @@ -31,6 +31,7 @@ pub const BUILTIN_ROLE_PUBLIC: &str = "public"; impl UserApiProvider { // Get one role from by tenant. + #[async_backtrace::framed] pub async fn get_role(&self, tenant: &str, role: String) -> Result { let client = self.get_role_api_client(tenant)?; let role_data = client.get_role(&role, MatchSeq::GE(0)).await?.data; @@ -38,6 +39,7 @@ impl UserApiProvider { } // Get the tenant all roles list. + #[async_backtrace::framed] pub async fn get_roles(&self, tenant: &str) -> Result> { let client = self.get_role_api_client(tenant)?; let get_roles = client.get_roles(); @@ -56,6 +58,7 @@ impl UserApiProvider { } // Add a new role info. + #[async_backtrace::framed] pub async fn add_role( &self, tenant: &str, @@ -81,6 +84,7 @@ impl UserApiProvider { // it also contains all roles. ACCOUNT_ADMIN can access the data objects which owned by any role. // 2. PUBLIC, on the other side only includes the public accessible privileges, but every role // contains the PUBLIC role. The data objects which owned by PUBLIC can be accessed by any role. + #[async_backtrace::framed] pub async fn ensure_builtin_roles(&self, tenant: &str) -> Result { let mut account_admin = RoleInfo::new(BUILTIN_ROLE_ACCOUNT_ADMIN); account_admin.grants.grant_privileges( @@ -101,6 +105,7 @@ impl UserApiProvider { self.add_role(tenant, public, true).await } + #[async_backtrace::framed] pub async fn grant_privileges_to_role( &self, tenant: &str, @@ -117,6 +122,7 @@ impl UserApiProvider { .map_err(|e| e.add_message_back("(while set role privileges)")) } + #[async_backtrace::framed] pub async fn revoke_privileges_from_role( &self, tenant: &str, @@ -134,6 +140,7 @@ impl UserApiProvider { } // the grant_role can not have cycle with target_role. + #[async_backtrace::framed] pub async fn grant_role_to_role( &self, tenant: &str, @@ -160,6 +167,7 @@ impl UserApiProvider { .map_err(|e| e.add_message_back("(while grant role to role)")) } + #[async_backtrace::framed] pub async fn revoke_role_from_role( &self, tenant: &str, @@ -176,6 +184,7 @@ impl UserApiProvider { } // Drop a role by name + #[async_backtrace::framed] pub async fn drop_role(&self, tenant: &str, role: String, if_exists: bool) -> Result<()> { let client = self.get_role_api_client(tenant)?; let drop_role = client.drop_role(role, MatchSeq::GE(1)); @@ -193,6 +202,7 @@ impl UserApiProvider { // Find all related roles by role names. Every role have a PUBLIC role, and ACCOUNT_ADMIN // default contains every role. + #[async_backtrace::framed] async fn find_related_roles( &self, tenant: &str, diff --git a/src/query/users/src/user_api.rs b/src/query/users/src/user_api.rs index 70581bab67ff6..dbf52c217a19e 100644 --- a/src/query/users/src/user_api.rs +++ b/src/query/users/src/user_api.rs @@ -49,6 +49,7 @@ pub struct UserApiProvider { } impl UserApiProvider { + #[async_backtrace::framed] pub async fn init( conf: RpcClientConf, idm_config: IDMConfig, @@ -67,6 +68,7 @@ impl UserApiProvider { Ok(()) } + #[async_backtrace::framed] pub async fn try_create( conf: RpcClientConf, idm_config: IDMConfig, @@ -87,6 +89,7 @@ impl UserApiProvider { })) } + #[async_backtrace::framed] pub async fn try_create_simple(conf: RpcClientConf) -> Result> { Self::try_create(conf, IDMConfig::default()).await } diff --git a/src/query/users/src/user_mgr.rs b/src/query/users/src/user_mgr.rs index 0f6afdaa83da0..a34f92dd3727e 100644 --- a/src/query/users/src/user_mgr.rs +++ b/src/query/users/src/user_mgr.rs @@ -28,6 +28,7 @@ use crate::UserApiProvider; impl UserApiProvider { // Get one user from by tenant. + #[async_backtrace::framed] pub async fn get_user(&self, tenant: &str, user: UserIdentity) -> Result { if user.is_root() { let mut user_info = UserInfo::new_no_auth(&user.username, &user.hostname); @@ -66,6 +67,7 @@ impl UserApiProvider { /// find the matched user with the client ip address, like 'u1'@'127.0.0.1', if the specific /// user@host is not found, try 'u1'@'%'. + #[async_backtrace::framed] pub async fn get_user_with_client_ip( &self, tenant: &str, @@ -93,6 +95,7 @@ impl UserApiProvider { } // Get the tenant all users list. + #[async_backtrace::framed] pub async fn get_users(&self, tenant: &str) -> Result> { let client = self.get_user_api_client(tenant)?; let get_users = client.get_users(); @@ -111,6 +114,7 @@ impl UserApiProvider { } // Add a new user info. + #[async_backtrace::framed] pub async fn add_user( &self, tenant: &str, @@ -136,6 +140,7 @@ impl UserApiProvider { } } + #[async_backtrace::framed] pub async fn grant_privileges_to_user( &self, tenant: &str, @@ -152,6 +157,7 @@ impl UserApiProvider { .map_err(|e| e.add_message_back("(while set user privileges)")) } + #[async_backtrace::framed] pub async fn revoke_privileges_from_user( &self, tenant: &str, @@ -168,6 +174,7 @@ impl UserApiProvider { .map_err(|e| e.add_message_back("(while revoke user privileges)")) } + #[async_backtrace::framed] pub async fn grant_role_to_user( &self, tenant: &str, @@ -183,6 +190,7 @@ impl UserApiProvider { .map_err(|e| e.add_message_back("(while grant role to user)")) } + #[async_backtrace::framed] pub async fn revoke_role_from_user( &self, tenant: &str, @@ -199,6 +207,7 @@ impl UserApiProvider { } // Drop a user by name and hostname. + #[async_backtrace::framed] pub async fn drop_user(&self, tenant: &str, user: UserIdentity, if_exists: bool) -> Result<()> { let client = self.get_user_api_client(tenant)?; let drop_user = client.drop_user(user, MatchSeq::GE(1)); @@ -215,6 +224,7 @@ impl UserApiProvider { } // Update an user by name and hostname. + #[async_backtrace::framed] pub async fn update_user( &self, tenant: &str, @@ -236,6 +246,7 @@ impl UserApiProvider { } // Update an user's default role + #[async_backtrace::framed] pub async fn update_user_default_role( &self, tenant: &str, diff --git a/src/query/users/src/user_setting.rs b/src/query/users/src/user_setting.rs index 585f68b4a3ab8..9aa86b0578e5e 100644 --- a/src/query/users/src/user_setting.rs +++ b/src/query/users/src/user_setting.rs @@ -20,18 +20,21 @@ use crate::UserApiProvider; impl UserApiProvider { // Set a setting. + #[async_backtrace::framed] pub async fn set_setting(&self, tenant: &str, setting: UserSetting) -> Result { let setting_api_provider = self.get_setting_api_client(tenant)?; setting_api_provider.set_setting(setting).await } // Get all settings list. + #[async_backtrace::framed] pub async fn get_settings(&self, tenant: &str) -> Result> { let setting_api_provider = self.get_setting_api_client(tenant)?; setting_api_provider.get_settings().await } // Drop a setting by name. + #[async_backtrace::framed] pub async fn drop_setting(&self, tenant: &str, name: &str) -> Result<()> { let setting_api_provider = self.get_setting_api_client(tenant)?; setting_api_provider diff --git a/src/query/users/src/user_stage.rs b/src/query/users/src/user_stage.rs index b1ae3ed247a60..5762413687ce2 100644 --- a/src/query/users/src/user_stage.rs +++ b/src/query/users/src/user_stage.rs @@ -22,6 +22,7 @@ use crate::UserApiProvider; /// user stage operations. impl UserApiProvider { // Add a new stage. + #[async_backtrace::framed] pub async fn add_stage( &self, tenant: &str, @@ -43,6 +44,7 @@ impl UserApiProvider { } // Get one stage from by tenant. + #[async_backtrace::framed] pub async fn get_stage(&self, tenant: &str, stage_name: &str) -> Result { let stage_api_provider = self.get_stage_api_client(tenant)?; let get_stage = stage_api_provider.get_stage(stage_name, MatchSeq::GE(0)); @@ -50,6 +52,7 @@ impl UserApiProvider { } // Get the tenant all stage list. + #[async_backtrace::framed] pub async fn get_stages(&self, tenant: &str) -> Result> { let stage_api_provider = self.get_stage_api_client(tenant)?; let get_stages = stage_api_provider.get_stages(); @@ -61,6 +64,7 @@ impl UserApiProvider { } // Drop a stage by name. + #[async_backtrace::framed] pub async fn drop_stage(&self, tenant: &str, name: &str, if_exists: bool) -> Result<()> { let stage_api_provider = self.get_stage_api_client(tenant)?; let drop_stage = stage_api_provider.drop_stage(name); diff --git a/src/query/users/src/user_udf.rs b/src/query/users/src/user_udf.rs index 79c704eb3b9ea..7e6093406cef2 100644 --- a/src/query/users/src/user_udf.rs +++ b/src/query/users/src/user_udf.rs @@ -22,6 +22,7 @@ use crate::UserApiProvider; /// UDF operations. impl UserApiProvider { // Add a new UDF. + #[async_backtrace::framed] pub async fn add_udf( &self, tenant: &str, @@ -43,6 +44,7 @@ impl UserApiProvider { } // Update a UDF. + #[async_backtrace::framed] pub async fn update_udf(&self, tenant: &str, info: UserDefinedFunction) -> Result { let udf_api_client = self.get_udf_api_client(tenant)?; let update_udf = udf_api_client.update_udf(info, MatchSeq::GE(1)); @@ -53,6 +55,7 @@ impl UserApiProvider { } // Get a UDF by name. + #[async_backtrace::framed] pub async fn get_udf(&self, tenant: &str, udf_name: &str) -> Result { let udf_api_client = self.get_udf_api_client(tenant)?; let get_udf = udf_api_client.get_udf(udf_name, MatchSeq::GE(0)); @@ -60,6 +63,7 @@ impl UserApiProvider { } // Get all UDFs for the tenant. + #[async_backtrace::framed] pub async fn get_udfs(&self, tenant: &str) -> Result> { let udf_api_client = self.get_udf_api_client(tenant)?; let get_udfs = udf_api_client.get_udfs(); @@ -71,6 +75,7 @@ impl UserApiProvider { } // Drop a UDF by name. + #[async_backtrace::framed] pub async fn drop_udf(&self, tenant: &str, udf_name: &str, if_exists: bool) -> Result<()> { let udf_api_client = self.get_udf_api_client(tenant)?; let drop_udf = udf_api_client.drop_udf(udf_name, MatchSeq::GE(1));