From 4f08278cd8fde941a2885ffd07de78c0e0b4aeea Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Thu, 6 Aug 2015 12:56:47 -0700 Subject: [PATCH] Fix the test by adding the compatibility check to EnsureRequirements --- .../scala/org/apache/spark/sql/execution/Exchange.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala index 6d4d4834dfc75..4a53e90dabcb4 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala @@ -212,9 +212,9 @@ private[sql] case class EnsureRequirements(sqlContext: SQLContext) extends Rule[ } } - private def ensureChildNumPartitionsAgreementIfNecessary(operator: SparkPlan): SparkPlan = { + private def ensureChildPartitioningsAreCompatible(operator: SparkPlan): SparkPlan = { if (operator.requiresChildPartitioningsToBeCompatible) { - if (operator.children.map(_.outputPartitioning.numPartitions).distinct.size > 1) { + if (!Partitioning.allCompatible(operator.children.map(_.outputPartitioning))) { val newChildren = operator.children.zip(operator.requiredChildDistribution).map { case (child, requiredDistribution) => val targetPartitioning = canonicalPartitioning(requiredDistribution) @@ -271,6 +271,6 @@ private[sql] case class EnsureRequirements(sqlContext: SQLContext) extends Rule[ def apply(plan: SparkPlan): SparkPlan = plan.transformUp { case operator: SparkPlan => - ensureDistributionAndOrdering(ensureChildNumPartitionsAgreementIfNecessary(operator)) + ensureDistributionAndOrdering(ensureChildPartitioningsAreCompatible(operator)) } }