diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala index f953295afceaf..1bff6d0113e13 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala @@ -257,7 +257,7 @@ abstract class LogicalPlan extends QueryPlan[LogicalPlan] with Logging { // More than one match. case ambiguousReferences => - val qualifiers = ambiguousReferences.flatMap(_._1.qualifiers) + val qualifiers = ambiguousReferences.flatMap(_._1.qualifier) if (qualifiers.nonEmpty && qualifiers.distinct.length == qualifiers.length) { throw new AnalysisException(s"Reference '$name' is ambiguous, please add a qualifier " + s"to distinguish it, e.g. '${qualifiers.head}.$name', available qualifiers: " + diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala index 2c37cb3243e96..fdcb24476e633 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala @@ -314,7 +314,7 @@ class Dataset[T] private[sql]( s"New column names (${colNames.size}): " + colNames.mkString(", ")) val newCols = logicalPlan.output.zip(colNames).map { case (oldAttribute, newName) => - Column(Alias(oldAttribute, newName)(qualifiers = oldAttribute.qualifiers)) + Column(Alias(oldAttribute, newName)(qualifier = oldAttribute.qualifier)) } select(newCols : _*) } @@ -769,7 +769,7 @@ class Dataset[T] private[sql]( Column(ResolvedStar(queryExecution.analyzed.output)) case _ => val col = resolve(colName) match { - case attr: Attribute => UnresolvedAttribute(attr.qualifiers :+ attr.name) + case attr: Attribute => UnresolvedAttribute(attr.qualifier.toSeq :+ attr.name) case Alias(child, _) => UnresolvedAttribute.quotedString(child.sql) } Column(col) @@ -1585,7 +1585,7 @@ class Dataset[T] private[sql]( if (shouldRename) { val columns = output.map { col => if (resolver(col.name, existingName)) { - Column(Alias(col, newName)(qualifiers = col.qualifiers)) + Column(Alias(col, newName)(qualifier = col.qualifier)) } else { Column(col) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala index 94d10805ac92c..db8f89cb83f28 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala @@ -169,7 +169,7 @@ class PlannerSuite extends SharedSQLContext { val planned = query.queryExecution.executedPlan assert(planned.isInstanceOf[execution.TakeOrderedAndProject]) assert(planned.output === - testData.select('key, 'value).logicalPlan.output.map(_.withQualifiers(Nil))) + testData.select('key, 'value).logicalPlan.output.map(_.withQualifier(None))) } test("terminal limit -> project -> sort should use TakeOrderedAndProject") {