Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 19 additions & 8 deletions datafusion/core/src/dataframe/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ use datafusion_common::{
ScalarValue, SchemaError, TableReference, UnnestOptions,
};
use datafusion_expr::select_expr::SelectExpr;
use datafusion_expr::Extension;
use datafusion_expr::{
case,
dml::InsertOp,
Expand Down Expand Up @@ -2344,14 +2345,24 @@ impl DataFrame {
/// # }
/// ```
pub async fn cache(self) -> Result<DataFrame> {
let context = SessionContext::new_with_state((*self.session_state).clone());
// The schema is consistent with the output
let plan = self.clone().create_physical_plan().await?;
let schema = plan.schema();
let task_ctx = Arc::new(self.task_ctx());
let partitions = collect_partitioned(plan, task_ctx).await?;
let mem_table = MemTable::try_new(schema, partitions)?;
context.read_table(Arc::new(mem_table))
if let Some(cache_producer) = self.session_state.cache_producer() {
let node = cache_producer.create(self.plan)?;
let plan = LogicalPlan::Extension(Extension { node });
Ok(Self {
session_state: self.session_state,
plan,
projection_requires_validation: self.projection_requires_validation,
})
} else {
let context = SessionContext::new_with_state((*self.session_state).clone());
// The schema is consistent with the output
let plan = self.clone().create_physical_plan().await?;
let schema = plan.schema();
let task_ctx = Arc::new(self.task_ctx());
let partitions = collect_partitioned(plan, task_ctx).await?;
let mem_table = MemTable::try_new(schema, partitions)?;
context.read_table(Arc::new(mem_table))
}
}

/// Apply an alias to the DataFrame.
Expand Down
15 changes: 15 additions & 0 deletions datafusion/core/src/execution/context/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,12 @@ impl SessionContext {
self
}

/// Register a [`CacheProducer`] to provide custom caching strategy
pub fn with_cache_producer(self, cache_producer: Arc<dyn CacheProducer>) -> Self {
self.state.write().set_cache_producer(cache_producer);
self
}

/// Adds an optimizer rule to the end of the existing rules.
///
/// See [`SessionState`] for more control of when the rule is applied.
Expand Down Expand Up @@ -1884,6 +1890,15 @@ pub enum RegisterFunction {
Table(String, Arc<dyn TableFunctionImpl>),
}

/// Interface for applying a custom caching strategy.
/// Implement this trait and register via [`SessionState`]
/// to create a custom logical node for caching.
pub trait CacheProducer: Debug + Sync + Send {
/// Create a custom logical node for caching
/// given a logical plan (of DF to cache).
fn create(&self, plan: LogicalPlan) -> Result<Arc<dyn UserDefinedLogicalNode>>;
}

/// Default implementation of [SerializerRegistry] that throws unimplemented error
/// for all requests.
#[derive(Debug)]
Expand Down
37 changes: 35 additions & 2 deletions datafusion/core/src/execution/session_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@ use crate::catalog::{CatalogProviderList, SchemaProvider, TableProviderFactory};
use crate::datasource::file_format::FileFormatFactory;
#[cfg(feature = "sql")]
use crate::datasource::provider_as_source;
use crate::execution::context::{EmptySerializerRegistry, FunctionFactory, QueryPlanner};
use crate::execution::context::{
CacheProducer, EmptySerializerRegistry, FunctionFactory, QueryPlanner,
};
use crate::execution::SessionStateDefaults;
use crate::physical_planner::{DefaultPhysicalPlanner, PhysicalPlanner};
use arrow_schema::{DataType, FieldRef};
Expand Down Expand Up @@ -185,6 +187,7 @@ pub struct SessionState {
/// It will be invoked on `CREATE FUNCTION` statements.
/// thus, changing dialect o PostgreSql is required
function_factory: Option<Arc<dyn FunctionFactory>>,
cache_producer: Option<Arc<dyn CacheProducer>>,
/// Cache logical plans of prepared statements for later execution.
/// Key is the prepared statement name.
prepared_plans: HashMap<String, Arc<PreparedPlan>>,
Expand All @@ -206,6 +209,7 @@ impl Debug for SessionState {
.field("table_options", &self.table_options)
.field("table_factories", &self.table_factories)
.field("function_factory", &self.function_factory)
.field("cache_producer", &self.cache_producer)
.field("expr_planners", &self.expr_planners);

#[cfg(feature = "sql")]
Expand Down Expand Up @@ -355,6 +359,16 @@ impl SessionState {
self.function_factory.as_ref()
}

/// Register a [`CacheProducer`] for custom caching strategy
pub fn set_cache_producer(&mut self, cache_producer: Arc<dyn CacheProducer>) {
self.cache_producer = Some(cache_producer);
}

/// Get the cache producer
pub fn cache_producer(&self) -> Option<&Arc<dyn CacheProducer>> {
self.cache_producer.as_ref()
}

/// Get the table factories
pub fn table_factories(&self) -> &HashMap<String, Arc<dyn TableProviderFactory>> {
&self.table_factories
Expand Down Expand Up @@ -941,6 +955,7 @@ pub struct SessionStateBuilder {
table_factories: Option<HashMap<String, Arc<dyn TableProviderFactory>>>,
runtime_env: Option<Arc<RuntimeEnv>>,
function_factory: Option<Arc<dyn FunctionFactory>>,
cache_producer: Option<Arc<dyn CacheProducer>>,
// fields to support convenience functions
analyzer_rules: Option<Vec<Arc<dyn AnalyzerRule + Send + Sync>>>,
optimizer_rules: Option<Vec<Arc<dyn OptimizerRule + Send + Sync>>>,
Expand Down Expand Up @@ -978,6 +993,7 @@ impl SessionStateBuilder {
table_factories: None,
runtime_env: None,
function_factory: None,
cache_producer: None,
// fields to support convenience functions
analyzer_rules: None,
optimizer_rules: None,
Expand Down Expand Up @@ -1030,7 +1046,7 @@ impl SessionStateBuilder {
table_factories: Some(existing.table_factories),
runtime_env: Some(existing.runtime_env),
function_factory: existing.function_factory,

cache_producer: existing.cache_producer,
// fields to support convenience functions
analyzer_rules: None,
optimizer_rules: None,
Expand Down Expand Up @@ -1319,6 +1335,15 @@ impl SessionStateBuilder {
self
}

/// Set a [`CacheProducer`] for custom caching strategy
pub fn with_cache_producer(
mut self,
cache_producer: Option<Arc<dyn CacheProducer>>,
) -> Self {
self.cache_producer = cache_producer;
self
}

/// Register an `ObjectStore` to the [`RuntimeEnv`]. See [`RuntimeEnv::register_object_store`]
/// for more details.
///
Expand Down Expand Up @@ -1382,6 +1407,7 @@ impl SessionStateBuilder {
table_factories,
runtime_env,
function_factory,
cache_producer,
analyzer_rules,
optimizer_rules,
physical_optimizer_rules,
Expand Down Expand Up @@ -1418,6 +1444,7 @@ impl SessionStateBuilder {
table_factories: table_factories.unwrap_or_default(),
runtime_env,
function_factory,
cache_producer,
prepared_plans: HashMap::new(),
};

Expand Down Expand Up @@ -1621,6 +1648,11 @@ impl SessionStateBuilder {
&mut self.function_factory
}

/// Returns the cache producer
pub fn cache_producer(&mut self) -> &mut Option<Arc<dyn CacheProducer>> {
&mut self.cache_producer
}

/// Returns the current analyzer_rules value
pub fn analyzer_rules(
&mut self,
Expand Down Expand Up @@ -1659,6 +1691,7 @@ impl Debug for SessionStateBuilder {
.field("table_options", &self.table_options)
.field("table_factories", &self.table_factories)
.field("function_factory", &self.function_factory)
.field("cache_producer", &self.cache_producer)
.field("expr_planners", &self.expr_planners);
#[cfg(feature = "sql")]
let ret = ret.field("type_planner", &self.type_planner);
Expand Down
64 changes: 62 additions & 2 deletions datafusion/core/src/test_util/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ pub mod csv;
use futures::Stream;
use std::any::Any;
use std::collections::HashMap;
use std::fmt::Formatter;
use std::fs::File;
use std::io::Write;
use std::path::Path;
Expand All @@ -36,6 +37,7 @@ use crate::dataframe::DataFrame;
use crate::datasource::stream::{FileStreamProvider, StreamConfig, StreamTable};
use crate::datasource::{empty::EmptyTable, provider_as_source};
use crate::error::Result;
use crate::execution::context::CacheProducer;
use crate::logical_expr::{LogicalPlanBuilder, UNNAMED_TABLE};
use crate::physical_plan::ExecutionPlan;
use crate::prelude::{CsvReadOptions, SessionContext};
Expand All @@ -44,8 +46,11 @@ use crate::execution::SendableRecordBatchStream;
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
use arrow::record_batch::RecordBatch;
use datafusion_catalog::Session;
use datafusion_common::TableReference;
use datafusion_expr::{CreateExternalTable, Expr, SortExpr, TableType};
use datafusion_common::{DFSchemaRef, TableReference};
use datafusion_expr::{
CreateExternalTable, Expr, LogicalPlan, SortExpr, TableType, UserDefinedLogicalNode,
UserDefinedLogicalNodeCore,
};
use std::pin::Pin;

use async_trait::async_trait;
Expand Down Expand Up @@ -282,3 +287,58 @@ impl RecordBatchStream for BoundedStream {
self.record_batch.schema()
}
}

#[derive(Hash, Eq, PartialEq, PartialOrd, Debug)]
struct CacheNode {
input: LogicalPlan,
}

impl UserDefinedLogicalNodeCore for CacheNode {
fn name(&self) -> &str {
"CacheNode"
}

fn inputs(&self) -> Vec<&LogicalPlan> {
vec![&self.input]
}

fn schema(&self) -> &DFSchemaRef {
self.input.schema()
}

fn expressions(&self) -> Vec<Expr> {
vec![]
}

fn fmt_for_explain(&self, f: &mut Formatter) -> std::fmt::Result {
write!(f, "CacheNode")
}

fn with_exprs_and_inputs(
&self,
_exprs: Vec<Expr>,
inputs: Vec<LogicalPlan>,
) -> Result<Self> {
assert_eq!(inputs.len(), 1, "input size inconsistent");
Ok(Self {
input: inputs[0].clone(),
})
}
}

#[derive(Debug)]
struct TestCacheProducer {}

impl CacheProducer for TestCacheProducer {
fn create(&self, plan: LogicalPlan) -> Result<Arc<dyn UserDefinedLogicalNode>> {
Ok(Arc::new(CacheNode { input: plan }))
}
}

/// Create a test table registered to a session context with an associated cache producer
pub async fn test_table_with_cache_producer() -> Result<DataFrame> {
let ctx = SessionContext::new().with_cache_producer(Arc::new(TestCacheProducer {}));
let name = "aggregate_test_100";
register_aggregate_csv(&ctx, name).await?;
ctx.table(name).await
}
25 changes: 24 additions & 1 deletion datafusion/core/tests/dataframe/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ use datafusion::prelude::{
};
use datafusion::test_util::{
parquet_test_data, populate_csv_partitions, register_aggregate_csv, test_table,
test_table_with_name,
test_table_with_cache_producer, test_table_with_name,
};
use datafusion_catalog::TableProvider;
use datafusion_common::test_util::{batches_to_sort_string, batches_to_string};
Expand Down Expand Up @@ -2338,6 +2338,29 @@ async fn cache_test() -> Result<()> {
Ok(())
}

#[tokio::test]
async fn cache_producer_test() -> Result<()> {
let df = test_table_with_cache_producer()
.await?
.select_columns(&["c2", "c3"])?
.limit(0, Some(1))?
.with_column("sum", cast(col("c2") + col("c3"), DataType::Int64))?;

let cached_df = df.clone().cache().await?;

assert_snapshot!(
cached_df.clone().into_optimized_plan().unwrap(),
@r###"
CacheNode
Projection: aggregate_test_100.c2, aggregate_test_100.c3, CAST(CAST(aggregate_test_100.c2 AS Int64) + CAST(aggregate_test_100.c3 AS Int64) AS Int64) AS sum
Projection: aggregate_test_100.c2, aggregate_test_100.c3
Limit: skip=0, fetch=1
TableScan: aggregate_test_100, fetch=1
"###
);
Ok(())
}

#[tokio::test]
async fn partition_aware_union() -> Result<()> {
let left = test_table().await?.select_columns(&["c1", "c2"])?;
Expand Down
Loading