Skip to content

Commit

Permalink
Fixed import ordering.
Browse files Browse the repository at this point in the history
  • Loading branch information
rxin committed Jun 11, 2015
1 parent 45a123d commit 0967ce6
Show file tree
Hide file tree
Showing 10 changed files with 16 additions and 21 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,12 @@ import java.sql.{Timestamp, Date}
import java.util.{Map => JavaMap}
import javax.annotation.Nullable

import org.apache.spark.unsafe.types.UTF8String

import scala.collection.mutable.HashMap

import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.DateUtils
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String

/**
* Functions to convert Scala types to Catalyst types and vice versa.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@

package org.apache.spark.sql.catalyst.expressions.codegen

import org.apache.spark.unsafe.types.UTF8String

import scala.collection.mutable
import scala.language.existentials

Expand All @@ -28,6 +26,8 @@ import org.codehaus.janino.ClassBodyEvaluator
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String


// These classes are here to avoid issues with serialization and integration with quasiquotes.
class IntegerHashSet extends org.apache.spark.util.collection.OpenHashSet[Int]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,12 @@

package org.apache.spark.sql.types

import org.apache.spark.unsafe.types.UTF8String

import scala.math.Ordering
import scala.reflect.runtime.universe.typeTag

import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql.catalyst.ScalaReflectionLock
import org.apache.spark.unsafe.types.UTF8String

/**
* :: DeveloperApi ::
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,16 @@

package org.apache.spark.sql.catalyst.expressions

import org.apache.spark.unsafe.types.UTF8String

import scala.collection.JavaConverters._
import scala.util.Random

import org.apache.spark.SparkFunSuite
import org.apache.spark.unsafe.memory.{ExecutorMemoryManager, TaskMemoryManager, MemoryAllocator}
import org.scalatest.{BeforeAndAfterEach, Matchers}

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.memory.{ExecutorMemoryManager, TaskMemoryManager, MemoryAllocator}
import org.apache.spark.unsafe.types.UTF8String


class UnsafeFixedWidthAggregationMapSuite
extends SparkFunSuite
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,13 @@ package org.apache.spark.sql.columnar

import java.nio.ByteBuffer

import org.apache.spark.unsafe.types.UTF8String

import scala.reflect.runtime.universe.TypeTag

import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.MutableRow
import org.apache.spark.sql.execution.SparkSqlSerializer
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String

/**
* An abstract class that represents type of a column. Used to append/extract Java objects into/from
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,14 @@ import java.io._
import java.math.{BigDecimal, BigInteger}
import java.nio.ByteBuffer

import org.apache.spark.unsafe.types.UTF8String

import scala.reflect.ClassTag

import org.apache.spark.Logging
import org.apache.spark.serializer._
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.{GenericMutableRow, MutableRow, SpecificMutableRow}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String

/**
* The serialization stream for [[SparkSqlSerializer2]]. It assumes that the object passed in
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,12 @@ package org.apache.spark.sql.execution

import java.util.{List => JList, Map => JMap}

import org.apache.spark.unsafe.types.UTF8String

import scala.collection.JavaConversions._
import scala.collection.JavaConverters._

import net.razorvine.pickle.{Pickler, Unpickler}

import org.apache.spark.{Accumulator, Logging => SparkLogging}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.api.python.{PythonBroadcast, PythonRDD}
import org.apache.spark.broadcast.Broadcast
Expand All @@ -37,7 +36,7 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.util.DateUtils
import org.apache.spark.sql.types._
import org.apache.spark.{Accumulator, Logging => SparkLogging}
import org.apache.spark.unsafe.types.UTF8String

/**
* A serialized version of a Python lambda function. Suitable for use in a [[PythonRDD]].
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,14 @@ import java.sql.{Connection, DriverManager, ResultSet, ResultSetMetaData, SQLExc
import java.util.Properties

import org.apache.commons.lang3.StringUtils
import org.apache.spark.unsafe.types.UTF8String

import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.expressions.{Row, SpecificMutableRow}
import org.apache.spark.sql.catalyst.util.DateUtils
import org.apache.spark.sql.types._
import org.apache.spark.sql.sources._
import org.apache.spark.unsafe.types.UTF8String

/**
* Data corresponding to one partition of a JDBCRDD.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@ package org.apache.spark.sql.json

import java.io.ByteArrayOutputStream

import org.apache.spark.unsafe.types.UTF8String

import scala.collection.Map

import com.fasterxml.jackson.core._
Expand All @@ -30,6 +28,8 @@ import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.DateUtils
import org.apache.spark.sql.json.JacksonUtils.nextUntil
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String


private[sql] object JacksonParser {
def apply(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@

package org.apache.spark.sql.sources

import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.{Logging, SerializableWritable, TaskContext}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.rdd.{MapPartitionsRDD, RDD, UnionRDD}
Expand All @@ -30,6 +29,7 @@ import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.types.{StringType, StructType}
import org.apache.spark.sql.{SaveMode, Strategy, execution, sources}
import org.apache.spark.util.Utils
import org.apache.spark.unsafe.types.UTF8String

/**
* A Strategy for planning scans over data sources defined using the sources API.
Expand Down

0 comments on commit 0967ce6

Please sign in to comment.