Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: dist sql analyze #1260

Merged
merged 20 commits into from
Dec 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 18 additions & 18 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ bytes = "1"
bytes_ext = { path = "components/bytes_ext" }
catalog = { path = "catalog" }
catalog_impls = { path = "catalog_impls" }
ceresdbproto = { git = "https://github.com/CeresDB/horaedbproto.git", rev = "2c60e05" }
ceresdbproto = { git = "https://github.com/CeresDB/horaedbproto.git", rev = "d849fa4" }
codec = { path = "components/codec" }
chrono = "0.4"
clap = "3.0"
Expand Down
21 changes: 19 additions & 2 deletions df_engine_extensions/src/dist_sql_query/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use std::{fmt, sync::Arc};
use std::{
fmt,
sync::{Arc, Mutex},
};

use async_trait::async_trait;
use common_types::projected_schema::ProjectedSchema;
Expand All @@ -35,8 +38,8 @@ pub mod test_util;
pub trait RemotePhysicalPlanExecutor: fmt::Debug + Send + Sync + 'static {
fn execute(
&self,
task_context: RemoteTaskContext,
table: TableIdentifier,
task_context: &TaskContext,
plan: Arc<dyn ExecutionPlan>,
) -> DfResult<BoxFuture<'static, DfResult<SendableRecordBatchStream>>>;
}
Expand All @@ -58,6 +61,20 @@ pub trait ExecutableScanBuilder: fmt::Debug + Send + Sync + 'static {

type ExecutableScanBuilderRef = Box<dyn ExecutableScanBuilder>;

pub struct RemoteTaskContext {
pub task_ctx: Arc<TaskContext>,
pub remote_metrics: Arc<Mutex<Option<String>>>,
}

impl RemoteTaskContext {
pub fn new(task_ctx: Arc<TaskContext>, remote_metrics: Arc<Mutex<Option<String>>>) -> Self {
Self {
task_ctx,
remote_metrics,
}
}
}

#[derive(Clone)]
pub struct TableScanContext {
pub batch_size: usize,
Expand Down
47 changes: 38 additions & 9 deletions df_engine_extensions/src/dist_sql_query/physical_plan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ use std::{
any::Any,
fmt,
pin::Pin,
sync::Arc,
sync::{Arc, Mutex},
task::{Context, Poll},
time::{Duration, Instant},
};
Expand All @@ -45,7 +45,7 @@ use futures::{future::BoxFuture, FutureExt, Stream, StreamExt};
use table_engine::{remote::model::TableIdentifier, table::ReadRequest};
use trace_metric::{collector::FormatCollectorVisitor, MetricsCollector, TraceMetricWhenDrop};

use crate::dist_sql_query::{RemotePhysicalPlanExecutor, TableScanContext};
use crate::dist_sql_query::{RemotePhysicalPlanExecutor, RemoteTaskContext, TableScanContext};

/// Placeholder of partitioned table's scan plan
/// It is inexecutable actually and just for carrying the necessary information
Expand Down Expand Up @@ -147,31 +147,35 @@ pub(crate) struct ResolvedPartitionedScan {
pub remote_exec_ctx: Arc<RemoteExecContext>,
pub pushdown_continue: bool,
pub metrics_collector: MetricsCollector,
pub is_analyze: bool,
}

impl ResolvedPartitionedScan {
pub fn new(
remote_executor: Arc<dyn RemotePhysicalPlanExecutor>,
sub_table_plan_ctxs: Vec<SubTablePlanContext>,
metrics_collector: MetricsCollector,
is_analyze: bool,
) -> Self {
let remote_exec_ctx = Arc::new(RemoteExecContext {
executor: remote_executor,
plan_ctxs: sub_table_plan_ctxs,
});

Self::new_with_details(remote_exec_ctx, true, metrics_collector)
Self::new_with_details(remote_exec_ctx, true, metrics_collector, is_analyze)
}

pub fn new_with_details(
remote_exec_ctx: Arc<RemoteExecContext>,
pushdown_continue: bool,
metrics_collector: MetricsCollector,
is_analyze: bool,
) -> Self {
Self {
remote_exec_ctx,
pushdown_continue,
metrics_collector,
is_analyze,
}
}

Expand All @@ -180,6 +184,7 @@ impl ResolvedPartitionedScan {
remote_exec_ctx: self.remote_exec_ctx.clone(),
pushdown_continue: false,
metrics_collector: self.metrics_collector.clone(),
is_analyze: self.is_analyze,
})
}

Expand Down Expand Up @@ -215,6 +220,7 @@ impl ResolvedPartitionedScan {
table: plan_ctx.table.clone(),
plan: extended_plan,
metrics_collector: plan_ctx.metrics_collector.clone(),
remote_metrics: plan_ctx.remote_metrics.clone(),
})
})
.collect::<DfResult<Vec<_>>>()?;
Expand All @@ -227,6 +233,7 @@ impl ResolvedPartitionedScan {
remote_exec_ctx,
can_push_down_more,
self.metrics_collector.clone(),
self.is_analyze,
);

Ok(Arc::new(plan))
Expand Down Expand Up @@ -256,6 +263,7 @@ pub(crate) struct SubTablePlanContext {
table: TableIdentifier,
plan: Arc<dyn ExecutionPlan>,
metrics_collector: MetricsCollector,
remote_metrics: Arc<Mutex<Option<String>>>,
}

impl SubTablePlanContext {
Expand All @@ -268,6 +276,7 @@ impl SubTablePlanContext {
table,
plan,
metrics_collector,
remote_metrics: Arc::new(Mutex::new(None)),
}
}
}
Expand Down Expand Up @@ -295,6 +304,12 @@ impl ExecutionPlan for ResolvedPartitionedScan {
}

fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
// If this is a analyze plan, we should not collect metrics of children
// which have been send to remote, So we just return empty children.
if self.is_analyze {
return vec![];
}

self.remote_exec_ctx
.plan_ctxs
.iter()
Expand Down Expand Up @@ -327,13 +342,17 @@ impl ExecutionPlan for ResolvedPartitionedScan {
table: sub_table,
plan,
metrics_collector,
remote_metrics,
} = &self.remote_exec_ctx.plan_ctxs[partition];

let remote_task_ctx = RemoteTaskContext::new(context, remote_metrics.clone());

// Send plan for remote execution.
let stream_future =
self.remote_exec_ctx
.executor
.execute(sub_table.clone(), &context, plan.clone())?;
let stream_future = self.remote_exec_ctx.executor.execute(
remote_task_ctx,
sub_table.clone(),
plan.clone(),
)?;
let record_stream =
PartitionedScanStream::new(stream_future, plan.schema(), metrics_collector.clone());

Expand All @@ -349,15 +368,25 @@ impl ExecutionPlan for ResolvedPartitionedScan {

let mut format_visitor = FormatCollectorVisitor::default();
self.metrics_collector.visit(&mut format_visitor);
let metrics_desc = format_visitor.into_string();
let mut metrics_desc = format_visitor.into_string();

// collect metrics from remote
for sub_table_ctx in &self.remote_exec_ctx.plan_ctxs {
if let Some(remote_metrics) = sub_table_ctx.remote_metrics.lock().unwrap().take() {
metrics_desc.push_str(&format!(
"\n{}:\n{}",
sub_table_ctx.table.table, remote_metrics
));
}
}

metric_set.push(Arc::new(Metric::new(
MetricValue::Count {
name: format!("\n{metrics_desc}").into(),
count: Count::new(),
},
None,
)));

Some(metric_set)
}
}
Expand Down
Loading
Loading