Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: dist sql analyze #1260

Merged
merged 20 commits into from
Dec 28, 2023
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 19 additions & 18 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ bytes = "1"
bytes_ext = { path = "components/bytes_ext" }
catalog = { path = "catalog" }
catalog_impls = { path = "catalog_impls" }
ceresdbproto = { git = "https://github.com/CeresDB/horaedbproto.git", rev = "2c60e05" }
ceresdbproto = { git = "https://github.com/baojinri/ceresdbproto.git", rev = "05a9f1b3d7da250893e27f7777a4559f802b7939" }
baojinri marked this conversation as resolved.
Show resolved Hide resolved
codec = { path = "components/codec" }
chrono = "0.4"
clap = "3.0"
Expand Down
28 changes: 25 additions & 3 deletions df_engine_extensions/src/dist_sql_query/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use std::{fmt, sync::Arc};
use std::{
fmt,
sync::{Arc, Mutex},
};

use async_trait::async_trait;
use common_types::projected_schema::ProjectedSchema;
Expand All @@ -35,8 +38,7 @@ pub mod test_util;
pub trait RemotePhysicalPlanExecutor: fmt::Debug + Send + Sync + 'static {
fn execute(
&self,
table: TableIdentifier,
task_context: &TaskContext,
task_context: RemoteTaskContext,
plan: Arc<dyn ExecutionPlan>,
) -> DfResult<BoxFuture<'static, DfResult<SendableRecordBatchStream>>>;
}
Expand All @@ -58,6 +60,26 @@ pub trait ExecutableScanBuilder: fmt::Debug + Send + Sync + 'static {

type ExecutableScanBuilderRef = Box<dyn ExecutableScanBuilder>;

pub struct RemoteTaskContext {
pub task_ctx: Arc<TaskContext>,
pub table: TableIdentifier,
baojinri marked this conversation as resolved.
Show resolved Hide resolved
pub remote_metrics: Arc<Mutex<String>>,
baojinri marked this conversation as resolved.
Show resolved Hide resolved
}

impl RemoteTaskContext {
pub fn new(
task_ctx: Arc<TaskContext>,
table: TableIdentifier,
remote_metrics: Arc<Mutex<String>>,
) -> Self {
Self {
task_ctx,
table,
remote_metrics,
}
}
}

#[derive(Clone)]
pub struct TableScanContext {
pub batch_size: usize,
Expand Down
49 changes: 42 additions & 7 deletions df_engine_extensions/src/dist_sql_query/physical_plan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ use std::{
any::Any,
fmt,
pin::Pin,
sync::Arc,
sync::{Arc, Mutex},
task::{Context, Poll},
time::{Duration, Instant},
};
Expand All @@ -45,7 +45,7 @@ use futures::{future::BoxFuture, FutureExt, Stream, StreamExt};
use table_engine::{remote::model::TableIdentifier, table::ReadRequest};
use trace_metric::{collector::FormatCollectorVisitor, MetricsCollector, TraceMetricWhenDrop};

use crate::dist_sql_query::{RemotePhysicalPlanExecutor, TableScanContext};
use crate::dist_sql_query::{RemotePhysicalPlanExecutor, RemoteTaskContext, TableScanContext};

/// Placeholder of partitioned table's scan plan
/// It is inexecutable actually and just for carrying the necessary information
Expand Down Expand Up @@ -147,31 +147,35 @@ pub(crate) struct ResolvedPartitionedScan {
pub remote_exec_ctx: Arc<RemoteExecContext>,
pub pushdown_continue: bool,
pub metrics_collector: MetricsCollector,
pub is_analyze: bool,
}

impl ResolvedPartitionedScan {
pub fn new(
remote_executor: Arc<dyn RemotePhysicalPlanExecutor>,
sub_table_plan_ctxs: Vec<SubTablePlanContext>,
metrics_collector: MetricsCollector,
is_analyze: bool,
) -> Self {
let remote_exec_ctx = Arc::new(RemoteExecContext {
executor: remote_executor,
plan_ctxs: sub_table_plan_ctxs,
});

Self::new_with_details(remote_exec_ctx, true, metrics_collector)
Self::new_with_details(remote_exec_ctx, true, metrics_collector, is_analyze)
}

pub fn new_with_details(
remote_exec_ctx: Arc<RemoteExecContext>,
pushdown_continue: bool,
metrics_collector: MetricsCollector,
is_analyze: bool,
) -> Self {
Self {
remote_exec_ctx,
pushdown_continue,
metrics_collector,
is_analyze,
}
}

Expand All @@ -180,6 +184,7 @@ impl ResolvedPartitionedScan {
remote_exec_ctx: self.remote_exec_ctx.clone(),
pushdown_continue: false,
metrics_collector: self.metrics_collector.clone(),
is_analyze: self.is_analyze,
})
}

Expand Down Expand Up @@ -215,6 +220,7 @@ impl ResolvedPartitionedScan {
table: plan_ctx.table.clone(),
plan: extended_plan,
metrics_collector: plan_ctx.metrics_collector.clone(),
remote_metrics: plan_ctx.remote_metrics.clone(),
})
})
.collect::<DfResult<Vec<_>>>()?;
Expand All @@ -227,6 +233,7 @@ impl ResolvedPartitionedScan {
remote_exec_ctx,
can_push_down_more,
self.metrics_collector.clone(),
self.is_analyze,
);

Ok(Arc::new(plan))
Expand Down Expand Up @@ -256,18 +263,21 @@ pub(crate) struct SubTablePlanContext {
table: TableIdentifier,
plan: Arc<dyn ExecutionPlan>,
metrics_collector: MetricsCollector,
remote_metrics: Arc<Mutex<String>>,
}

impl SubTablePlanContext {
pub fn new(
table: TableIdentifier,
plan: Arc<dyn ExecutionPlan>,
metrics_collector: MetricsCollector,
remote_metrics: Arc<Mutex<String>>,
baojinri marked this conversation as resolved.
Show resolved Hide resolved
) -> Self {
Self {
table,
plan,
metrics_collector,
remote_metrics,
}
}
}
Expand Down Expand Up @@ -295,6 +305,12 @@ impl ExecutionPlan for ResolvedPartitionedScan {
}

fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
// If this is a analyze plan, we should not collect metrics of children
// which have been send to remote, So we just return empty children.
if self.is_analyze {
return vec![];
}

self.remote_exec_ctx
.plan_ctxs
.iter()
Expand Down Expand Up @@ -327,13 +343,17 @@ impl ExecutionPlan for ResolvedPartitionedScan {
table: sub_table,
plan,
metrics_collector,
remote_metrics,
} = &self.remote_exec_ctx.plan_ctxs[partition];

let remote_task_ctx =
RemoteTaskContext::new(context, sub_table.clone(), remote_metrics.clone());

// Send plan for remote execution.
let stream_future =
self.remote_exec_ctx
.executor
.execute(sub_table.clone(), &context, plan.clone())?;
let stream_future = self
.remote_exec_ctx
.executor
.execute(remote_task_ctx, plan.clone())?;
let record_stream =
PartitionedScanStream::new(stream_future, plan.schema(), metrics_collector.clone());

Expand All @@ -347,6 +367,21 @@ impl ExecutionPlan for ResolvedPartitionedScan {
fn metrics(&self) -> Option<MetricsSet> {
let mut metric_set = MetricsSet::new();

for sub_table_ctx in &self.remote_exec_ctx.plan_ctxs {
let metrics_desc = format!(
"{}:\n{}",
sub_table_ctx.table.table,
sub_table_ctx.remote_metrics.lock().unwrap()
);
metric_set.push(Arc::new(Metric::new(
MetricValue::Count {
name: format!("\n{metrics_desc}").into(),
count: Count::new(),
},
None,
)));
}
baojinri marked this conversation as resolved.
Show resolved Hide resolved

let mut format_visitor = FormatCollectorVisitor::default();
self.metrics_collector.visit(&mut format_visitor);
let metrics_desc = format_visitor.into_string();
Expand Down
Loading
Loading