Skip to content

Commit

Permalink
prototype cost-based optimizer
Browse files Browse the repository at this point in the history
  • Loading branch information
andygrove committed Jun 26, 2024
1 parent 2aa20f0 commit dc1b96c
Show file tree
Hide file tree
Showing 7 changed files with 260 additions and 5 deletions.
8 changes: 8 additions & 0 deletions common/src/main/scala/org/apache/comet/CometConf.scala
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,14 @@ object CometConf extends ShimCometConf {
.booleanConf
.createWithDefault(false)

val COMET_CBO_ENABLED: ConfigEntry[Boolean] =
conf("spark.comet.cbo.enabled")
.doc(
"Cost-based optimizer to avoid performance regressions where Comet plan may " +
"be slower than Spark plan.")
.booleanConf
.createWithDefault(false)

}

object ConfigHelpers {
Expand Down
1 change: 1 addition & 0 deletions docs/source/user-guide/configs.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ Comet provides the following configuration settings.
|--------|-------------|---------------|
| spark.comet.batchSize | The columnar batch size, i.e., the maximum number of rows that a batch can contain. | 8192 |
| spark.comet.cast.allowIncompatible | Comet is not currently fully compatible with Spark for all cast operations. Set this config to true to allow them anyway. See compatibility guide for more information. | false |
| spark.comet.cbo.enabled | Cost-based optimizer to avoid performance regressions where Comet plan may be slower than Spark plan. | false |
| spark.comet.columnar.shuffle.async.enabled | Whether to enable asynchronous shuffle for Arrow-based shuffle. By default, this config is false. | false |
| spark.comet.columnar.shuffle.async.max.thread.num | Maximum number of threads on an executor used for Comet async columnar shuffle. By default, this config is 100. This is the upper bound of total number of shuffle threads per executor. In other words, if the number of cores * the number of shuffle threads per task `spark.comet.columnar.shuffle.async.thread.num` is larger than this config. Comet will use this config as the number of shuffle threads per executor instead. | 100 |
| spark.comet.columnar.shuffle.async.thread.num | Number of threads used for Comet async columnar shuffle per shuffle task. By default, this config is 3. Note that more threads means more memory requirement to buffer shuffle data before flushing to disk. Also, more threads may not always improve performance, and should be set based on the number of cores available. | 3 |
Expand Down
121 changes: 121 additions & 0 deletions spark/src/main/scala/org/apache/comet/CometCostEvaluator.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.comet

import org.apache.spark.internal.Logging
import org.apache.spark.sql.comet.{CometExec, CometPlan, CometRowToColumnarExec, CometSinkPlaceHolder}
import org.apache.spark.sql.execution.{ColumnarToRowExec, InputAdapter, RowToColumnarExec, SparkPlan, WholeStageCodegenExec}
import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanExec, Cost, CostEvaluator, QueryStageExec, SimpleCost}

/**
* The goal of this cost model is to avoid introducing performance regressions in query stages
* during AQE.
*
* This evaluator will be called twice; once for the original Spark plan and once for the Comet
* plan. Spark will choose the cheapest plan.
*/
class CometCostEvaluator extends CostEvaluator with Logging {

/** Baseline cost for Spark operator is 1.0 */
val DEFAULT_SPARK_OPERATOR_COST = 1.0

/** Relative cost of Comet operator */
val DEFAULT_COMET_OPERATOR_COST = 0.8

/** Relative cost of a transition (C2R, R2C) */
val DEFAULT_TRANSITION_COST = 1.0

override def evaluateCost(plan: SparkPlan): Cost = {

// TODO this is a crude prototype where we just penalize transitions, but
// this can evolve into a true cost model where we have real numbers for the relative
// performance of Comet operators & expressions versus the Spark versions
//
// Some areas to explore
// - can we use statistics from previous query stage(s)?
// - transition after filter should be cheaper than transition before filter (such as when
// reading from Parquet followed by filter. Comet will filter first then transition)
def computePlanCost(plan: SparkPlan): Double = {

// get children even for leaf nodes at query stage edges
def getChildren(plan: SparkPlan) = plan match {
case a: AdaptiveSparkPlanExec => Seq(a.inputPlan)
case qs: QueryStageExec => Seq(qs.plan)
case p => p.children
}

val children = getChildren(plan)
val childPlanCost = children.map(computePlanCost).sum
val operatorCost = plan match {
case _: AdaptiveSparkPlanExec => 0
case _: CometSinkPlaceHolder => 0
case _: InputAdapter => 0
case _: WholeStageCodegenExec => 0
case RowToColumnarExec(_) => DEFAULT_TRANSITION_COST
case ColumnarToRowExec(_) => DEFAULT_TRANSITION_COST
case CometRowToColumnarExec(_) => DEFAULT_TRANSITION_COST
case _: CometExec => DEFAULT_COMET_OPERATOR_COST
case _ => DEFAULT_SPARK_OPERATOR_COST
}

def isSparkNative(plan: SparkPlan): Boolean = plan match {
case p: AdaptiveSparkPlanExec => isSparkNative(p.inputPlan)
case p: QueryStageExec => isSparkNative(p.plan)
case _: CometPlan => false
case _ => true
}

def isCometNative(plan: SparkPlan): Boolean = plan match {
case p: AdaptiveSparkPlanExec => isCometNative(p.inputPlan)
case p: QueryStageExec => isCometNative(p.plan)
case _: CometPlan => true
case _ => false
}

def isTransition(plan1: SparkPlan, plan2: SparkPlan) = {
(isSparkNative(plan1) && isCometNative(plan2)) ||
(isCometNative(plan1) && isSparkNative(plan2))
}

val transitionCost = if (children.exists(ch => isTransition(plan, ch))) {
DEFAULT_TRANSITION_COST
} else {
0
}


val totalCost = operatorCost + transitionCost + childPlanCost

logWarning(s"total cost is $totalCost ($operatorCost + $transitionCost + $childPlanCost) " +
s"for ${plan.nodeName}")

totalCost
}

// TODO can we access statistics from previous query stages?
val estimatedRowCount = 1000
val cost = (computePlanCost(plan) * estimatedRowCount).toLong

logWarning(s"Computed cost of $cost for $plan")

SimpleCost(cost)
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -32,18 +32,19 @@ import org.apache.spark.sql.comet._
import org.apache.spark.sql.comet.execution.shuffle.{CometColumnarShuffle, CometNativeShuffle, CometShuffleExchangeExec, CometShuffleManager}
import org.apache.spark.sql.comet.util.Utils
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.adaptive.{AQEShuffleReadExec, BroadcastQueryStageExec, ShuffleQueryStageExec}
import org.apache.spark.sql.execution.adaptive.{AQEShuffleReadExec, AdaptiveSparkPlanExec, BroadcastQueryStageExec, QueryStageExec, ShuffleQueryStageExec}
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetScan
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ReusedExchangeExec, ShuffleExchangeExec}
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ReusedExchangeExec, ShuffleExchangeExec, ShuffleExchangeLike}
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, ShuffledHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._

import org.apache.comet.CometConf._
import org.apache.comet.CometExplainInfo.CANNOT_RUN_NATIVE
import org.apache.comet.CometSparkSessionExtensions.{createMessage, getCometShuffleNotEnabledReason, isANSIEnabled, isCometBroadCastForceEnabled, isCometEnabled, isCometExecEnabled, isCometJVMShuffleMode, isCometNativeShuffleMode, isCometOperatorEnabled, isCometScan, isCometScanEnabled, isCometShuffleEnabled, isSchemaSupported, isSpark34Plus, isSpark40Plus, shouldApplyRowToColumnar, withInfo, withInfos}
import org.apache.comet.parquet.{CometParquetScan, SupportsComet}
import org.apache.comet.serde.OperatorOuterClass.Operator
Expand All @@ -65,15 +66,15 @@ class CometSparkSessionExtensions
extensions.injectColumnar { session => CometScanColumnar(session) }
extensions.injectColumnar { session => CometExecColumnar(session) }
extensions.injectQueryStagePrepRule { session => CometScanRule(session) }
extensions.injectQueryStagePrepRule { session => CometExecRule(session) }
extensions.injectQueryStagePrepRule { session => CometQueryStagePrepRule(session) }
}

case class CometScanColumnar(session: SparkSession) extends ColumnarRule {
override def preColumnarTransitions: Rule[SparkPlan] = CometScanRule(session)
}

case class CometExecColumnar(session: SparkSession) extends ColumnarRule {
override def preColumnarTransitions: Rule[SparkPlan] = CometExecRule(session)
override def preColumnarTransitions: Rule[SparkPlan] = CometPreColumnarRule(session)

override def postColumnarTransitions: Rule[SparkPlan] =
EliminateRedundantTransitions(session)
Expand Down Expand Up @@ -192,6 +193,57 @@ class CometSparkSessionExtensions
}
}

case class CometQueryStagePrepRule(session: SparkSession) extends Rule[SparkPlan] {
def apply(plan: SparkPlan): SparkPlan = {


val newPlan = CometExecRule(session).apply(plan)


if (CometConf.COMET_CBO_ENABLED.get()) {
val costEvaluator = new CometCostEvaluator()
println(plan)
println(newPlan)
val sparkCost = costEvaluator.evaluateCost(plan)
val cometCost = costEvaluator.evaluateCost(newPlan)
println(s"sparkCost = $sparkCost, cometCost = $cometCost")
if (cometCost > sparkCost) {
val msg = s"Comet plan is more expensive than Spark plan ($cometCost > $sparkCost)" +
s"\nSPARK: $plan\n" +
s"\nCOMET: $newPlan\n"
logWarning(msg)
println(msg)
println(s"CometQueryStagePrepRule:\nIN: ${plan.getClass}\nOUT: ${plan.getClass}")

def fallbackRecursively(plan: SparkPlan) : Unit = {
plan.setTagValue(CANNOT_RUN_NATIVE, true)
plan match {
case a: AdaptiveSparkPlanExec => fallbackRecursively(a.inputPlan)
case qs: QueryStageExec => fallbackRecursively(qs.plan)
case p => p.children.foreach(fallbackRecursively)
}
}
fallbackRecursively(plan)

return plan
}
}


println(s"CometQueryStagePrepRule:\nIN: ${plan.getClass}\nOUT: ${newPlan.getClass}")

newPlan
}
}

case class CometPreColumnarRule(session: SparkSession) extends Rule[SparkPlan] {
def apply(plan: SparkPlan): SparkPlan = {
val newPlan = CometExecRule(session).apply(plan)
println(s"CometPreColumnarRule:\nIN: ${plan.getClass}\nOUT: ${newPlan.getClass}")
newPlan
}
}

case class CometExecRule(session: SparkSession) extends Rule[SparkPlan] {
private def applyCometShuffle(plan: SparkPlan): SparkPlan = {
plan.transformUp {
Expand Down Expand Up @@ -727,6 +779,11 @@ class CometSparkSessionExtensions
// We shouldn't transform Spark query plan if Comet is disabled.
if (!isCometEnabled(conf)) return plan

if (plan.getTagValue(CANNOT_RUN_NATIVE).getOrElse(false)) {
println("Cannot run native - too slow")
return plan
}

if (!isCometExecEnabled(conf)) {
// Comet exec is disabled, but for Spark shuffle, we still can use Comet columnar shuffle
if (isCometShuffleEnabled(conf)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,4 +85,5 @@ class ExtendedExplainInfo extends ExtendedExplainGenerator {

object CometExplainInfo {
val EXTENSION_INFO = new TreeNodeTag[Set[String]]("CometExtensionInfo")
val CANNOT_RUN_NATIVE = new TreeNodeTag[Boolean]("CometCannotRunNative")
}
Original file line number Diff line number Diff line change
Expand Up @@ -2301,7 +2301,7 @@ object QueryPlanSerde extends Logging with ShimQueryPlanSerde with CometExprShim
.addAllSortOrders(sortOrders.map(_.get).asJava)
Some(result.setSort(sortBuilder).build())
} else {
withInfo(op, sortOrder: _*)
withInfo(op, "sort not allowed", sortOrder: _*)
None
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.comet

import org.apache.spark.sql.CometTestBase
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.internal.SQLConf

class CostBasedOptimizerSuite extends CometTestBase with AdaptiveSparkPlanHelper {

private val dataGen = DataGenerator.DEFAULT

test("tbd") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "false",
CometConf.COMET_ENABLED.key -> "true",
CometConf.COMET_EXEC_ENABLED.key -> "true",
CometConf.COMET_EXEC_SHUFFLE_ENABLED.key -> "true",
CometConf.COMET_CBO_ENABLED.key -> "true",
CometConf.COMET_EXPLAIN_FALLBACK_ENABLED.key -> "true") {
val table = "t1"
withTable(table, "t2") {
sql(s"create table t1(col string, a int, b float) using parquet")
sql(s"create table t2(col string, a int, b float) using parquet")
val tableSchema = spark.table(table).schema
val rows = dataGen.generateRows(
1000,
tableSchema,
Some(() => dataGen.generateString("tbd:", 6)))
val data = spark.createDataFrame(spark.sparkContext.parallelize(rows), tableSchema)
data.write
.mode("append")
.insertInto(table)
data.write
.mode("append")
.insertInto("t2")
val x = checkSparkAnswer/*AndOperator*/("select t1.col as x " +
"from t1 join t2 on cast(t1.col as timestamp) = cast(t2.col as timestamp) " +
"order by x")

// TODO assert that we fell back for whole plan
println(x._1)
println(x._2)

assert(!x._2.toString().contains("CometSortExec"))
}
}
}

}

0 comments on commit dc1b96c

Please sign in to comment.