Skip to content

Commit

Permalink
style fix
Browse files Browse the repository at this point in the history
  • Loading branch information
HeartSaVioR committed Oct 22, 2024
1 parent 53c08a6 commit 6994766
Show file tree
Hide file tree
Showing 12 changed files with 13 additions and 17 deletions.
4 changes: 2 additions & 2 deletions sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
package org.apache.spark.sql

import java.io.{ByteArrayOutputStream, CharArrayWriter, DataOutputStream}

import java.util

import scala.collection.mutable.{ArrayBuffer, HashSet}
import scala.jdk.CollectionConverters._
import scala.reflect.ClassTag
Expand All @@ -28,8 +28,8 @@ import scala.util.control.NonFatal

import org.apache.commons.lang3.StringUtils
import org.apache.commons.text.StringEscapeUtils
import org.apache.spark.TaskContext

import org.apache.spark.TaskContext
import org.apache.spark.annotation.{DeveloperApi, Stable, Unstable}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.java.function._
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ package org.apache.spark.sql.execution
import java.util.Locale

import org.apache.spark.SparkException

import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.{HiveTableRelation, SessionCatalog}
import org.apache.spark.sql.catalyst.expressions._
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ object SchemaPruning extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan =
plan transformDown {
case op @ ScanOperation(projects, filtersStayUp, filtersPushDown,
RelationAndCatalogTable(l, hadoopFsRelation: HadoopFsRelation, _) =>
RelationAndCatalogTable(l, hadoopFsRelation: HadoopFsRelation, _)) =>
val allFilters = filtersPushDown.reduceOption(And).toSeq ++ filtersStayUp
prunePhysicalColumns(l, projects, allFilters, hadoopFsRelation,
(prunedDataSchema, prunedMetadataSchema) => {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ package org.apache.spark.sql.execution.datasources.v2
import scala.collection.mutable

import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException

import org.apache.spark.SparkException
import org.apache.spark.internal.{Logging, MDC}
import org.apache.spark.internal.LogKeys.EXPR
import org.apache.spark.sql.{SparkSession, Strategy}
Expand All @@ -36,7 +36,7 @@ import org.apache.spark.sql.catalyst.util.{toPrettySQL, GeneratedColumn, Identit
import org.apache.spark.sql.connector.catalog.{Identifier, StagingTableCatalog, SupportsDeleteV2, SupportsNamespaces, SupportsPartitionManagement, SupportsWrite, Table, TableCapability, TableCatalog, TruncatableTable}
import org.apache.spark.sql.connector.catalog.index.SupportsIndex
import org.apache.spark.sql.connector.expressions.{FieldReference, LiteralValue}
import org.apache.spark.sql.connector.expressions.filter.{Predicate, And => V2And, Not => V2Not, Or => V2Or}
import org.apache.spark.sql.connector.expressions.filter.{And => V2And, Not => V2Not, Or => V2Or, Predicate}
import org.apache.spark.sql.connector.read.LocalScan
import org.apache.spark.sql.connector.read.streaming.{ContinuousStream, MicroBatchStream}
import org.apache.spark.sql.connector.write.V1Write
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.mutable

import org.apache.commons.io.FileUtils
import org.apache.spark.{AccumulatorSuite, SPARK_DOC_ROOT, SparkArithmeticException, SparkDateTimeException, SparkException, SparkNumberFormatException, SparkRuntimeException}

import org.apache.spark.{AccumulatorSuite, SPARK_DOC_ROOT, SparkArithmeticException, SparkDateTimeException, SparkException, SparkNumberFormatException, SparkRuntimeException}
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart}
import org.apache.spark.sql.catalyst.ExtendedAnalysisException
import org.apache.spark.sql.catalyst.expressions.{CodegenObjectFactoryMode, GenericRow, Hex}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
package org.apache.spark.sql.collation

import org.apache.parquet.schema.MessageType
import org.apache.spark.SparkConf

import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, QueryTest}
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.util.RebaseDateTime.RebaseSpec
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,13 @@ package org.apache.spark.sql.connector

import java.sql.Timestamp
import java.time.{Duration, LocalDate, Period}

import java.util
import java.util.Locale

import scala.concurrent.duration.MICROSECONDS
import scala.jdk.CollectionConverters._

import org.apache.spark.{SparkException, SparkRuntimeException, SparkUnsupportedOperationException}

import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.{InternalRow, QualifiedTableName, TableIdentifier}
import org.apache.spark.sql.catalyst.CurrentUserContext.CURRENT_USER
Expand Down Expand Up @@ -3748,7 +3746,7 @@ class DataSourceV2SQLSuiteV1Filter
sql("INSERT INTO " + tableName + " VALUES('Bob')")
val df = sql("SELECT * FROM " + tableName)
assert(df.queryExecution.analyzed.exists {
case RelationAndCatalogTable(_, relation: HadoopFsRelation, _)) => true
case RelationAndCatalogTable(_, relation: HadoopFsRelation, _) => true
case _ => false
})
checkAnswer(df, Row("Bob"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ package org.apache.spark.sql.execution.benchmark
import scala.util.Try

import org.apache.spark.SparkConf

import org.apache.spark.benchmark.Benchmark
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ package org.apache.spark.sql.execution.datasources.orc
import scala.jdk.CollectionConverters._

import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl
import org.apache.spark.SparkConf

import org.apache.spark.SparkConf
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.catalyst.expressions.{And, Attribute, Predicate}
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,12 @@ import scala.reflect.runtime.universe.TypeTag
import org.apache.hadoop.fs.Path
import org.apache.parquet.filter2.predicate.{FilterApi, FilterPredicate, Operators}
import org.apache.parquet.filter2.predicate.FilterApi._
import org.apache.parquet.filter2.predicate.Operators.{Eq, Gt, GtEq, Lt, LtEq, NotEq, UserDefinedByInstance, In => FilterIn, Column => _}
import org.apache.parquet.filter2.predicate.Operators.{Column => _, Eq, Gt, GtEq, In => FilterIn, Lt, LtEq, NotEq, UserDefinedByInstance}
import org.apache.parquet.hadoop.{ParquetFileReader, ParquetInputFormat, ParquetOutputFormat}
import org.apache.parquet.hadoop.util.HadoopInputFile
import org.apache.parquet.schema.MessageType
import org.apache.spark.{SparkConf, SparkException, SparkRuntimeException}

import org.apache.spark.{SparkConf, SparkException, SparkRuntimeException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ import scala.util.Random

import org.mockito.ArgumentMatchers._
import org.mockito.Mockito._
import org.apache.spark.{SparkException, SparkSQLException}

import org.apache.spark.{SparkException, SparkSQLException}
import org.apache.spark.sql.{AnalysisException, DataFrame, Observation, QueryTest, Row}
import org.apache.spark.sql.catalyst.{analysis, TableIdentifier}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ import java.util.{Locale, Set}

import com.google.common.io.{Files, FileWriteMode}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SPARK_DOC_ROOT, SparkException, TestUtils}

import org.apache.spark.{SPARK_DOC_ROOT, SparkException, TestUtils}
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
Expand Down

0 comments on commit 6994766

Please sign in to comment.