-
Notifications
You must be signed in to change notification settings - Fork 28.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-40852][CONNECT][PYTHON] Introduce StatFunction
in proto and implement DataFrame.summary
#38318
[SPARK-40852][CONNECT][PYTHON] Introduce StatFunction
in proto and implement DataFrame.summary
#38318
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,7 +21,7 @@ import scala.annotation.elidable.byName | |
import scala.collection.JavaConverters._ | ||
|
||
import org.apache.spark.connect.proto | ||
import org.apache.spark.sql.SparkSession | ||
import org.apache.spark.sql.{Dataset, SparkSession} | ||
import org.apache.spark.sql.catalyst.AliasIdentifier | ||
import org.apache.spark.sql.catalyst.analysis.{UnresolvedAlias, UnresolvedAttribute, UnresolvedFunction, UnresolvedRelation, UnresolvedStar} | ||
import org.apache.spark.sql.catalyst.expressions | ||
|
@@ -32,6 +32,7 @@ import org.apache.spark.sql.catalyst.plans.{logical, FullOuter, Inner, JoinType, | |
import org.apache.spark.sql.catalyst.plans.logical.{Deduplicate, Except, Intersect, LogicalPlan, Sample, SubqueryAlias, Union} | ||
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap | ||
import org.apache.spark.sql.execution.QueryExecution | ||
import org.apache.spark.sql.execution.stat.StatFunctions | ||
import org.apache.spark.sql.types._ | ||
import org.apache.spark.util.Utils | ||
|
||
|
@@ -73,6 +74,8 @@ class SparkConnectPlanner(plan: proto.Relation, session: SparkSession) { | |
case proto.Relation.RelTypeCase.SUBQUERY_ALIAS => | ||
transformSubqueryAlias(rel.getSubqueryAlias) | ||
case proto.Relation.RelTypeCase.REPARTITION => transformRepartition(rel.getRepartition) | ||
case proto.Relation.RelTypeCase.STAT_FUNCTION => | ||
transformStatFunction(rel.getStatFunction) | ||
case proto.Relation.RelTypeCase.RELTYPE_NOT_SET => | ||
throw new IndexOutOfBoundsException("Expected Relation to be set, but is empty.") | ||
case _ => throw InvalidPlanInput(s"${rel.getUnknown} not supported.") | ||
|
@@ -124,6 +127,19 @@ class SparkConnectPlanner(plan: proto.Relation, session: SparkSession) { | |
logical.Range(start, end, step, numPartitions) | ||
} | ||
|
||
private def transformStatFunction(rel: proto.StatFunction): LogicalPlan = { | ||
val child = transformRelation(rel.getInput) | ||
|
||
rel.getFunctionCase match { | ||
case proto.StatFunction.FunctionCase.SUMMARY => | ||
StatFunctions | ||
.summary(Dataset.ofRows(session, child), rel.getSummary.getStatisticsList.asScala.toSeq) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is fine for now but it's going to truncate the SQL plans that disable further optimization. We should probably add dedicated plans for For now, LGTM There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes, then it will have more optimization space. let us add new plan for it. thanks There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. +1! I don't know how to add a new plan. It would be very useful to have a PR as an example. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. cc @cloud-fan @HyukjinKwon There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Some rules may not work as they don't recognize the new plan. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what do you mean by There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @cloud-fan the old There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh that's an issue. Can it be solved by updating There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes, it has been resolved There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In the new impl |
||
.logicalPlan | ||
|
||
case _ => throw InvalidPlanInput(s"StatFunction ${rel.getUnknown} not supported.") | ||
} | ||
} | ||
|
||
private def transformDeduplicate(rel: proto.Deduplicate): LogicalPlan = { | ||
if (!rel.hasInput) { | ||
throw InvalidPlanInput("Deduplicate needs a plan input") | ||
|
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -376,6 +376,16 @@ def unionByName(self, other: "DataFrame", allowMissingColumns: bool = False) -> | |||||
def where(self, condition: Expression) -> "DataFrame": | ||||||
return self.filter(condition) | ||||||
|
||||||
def summary(self, *statistics: str) -> "DataFrame": | ||||||
_statistics: List[str] = list(statistics) | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. different from spark/python/pyspark/sql/dataframe.py Lines 2575 to 2576 in 29e4552
|
||||||
for s in _statistics: | ||||||
if not isinstance(s, str): | ||||||
raise TypeError(f"'statistics' must be list[str], but got {type(s).__name__}") | ||||||
return DataFrame.withPlan( | ||||||
plan.StatFunction(child=self._plan, function="summary", statistics=_statistics), | ||||||
session=self._session, | ||||||
) | ||||||
|
||||||
def _get_alias(self) -> Optional[str]: | ||||||
p = self._plan | ||||||
while p is not None: | ||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why do we need this?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
here follows https://github.com/apache/spark/blob/master/connector/connect/src/main/protobuf/spark/connect/relations.proto#L51 to catch unexpected input
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think that's for enum but here is an optional field... cc @amaliujia
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Question: will we add new functions under this
oneof
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes, such as
crosstab
cov
corr
etcThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ok then this makes sense