Skip to content

Commit

Permalink
Handle review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
Udbhav30 committed Aug 21, 2020
1 parent 266d0eb commit 8d94930
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 15 deletions.
11 changes: 5 additions & 6 deletions core/src/main/scala/org/apache/spark/util/Utils.scala
Original file line number Diff line number Diff line change
Expand Up @@ -271,19 +271,18 @@ private[spark] object Utils extends Logging {

/**
* Move data to trash on truncate table given
* spark.sql.truncate.trash.interval is positive
* spark.sql.truncate.trash.enabled is true
*/
def moveToTrashIfEnabled(
fs: FileSystem,
partitionPath: Path,
trashInterval: Int,
isTrashEnabled: Boolean,
hadoopConf: Configuration): Unit = {
if (trashInterval < 0) {
fs.delete(partitionPath, true)
} else {
if (isTrashEnabled && hadoopConf.getInt("fs.trash.interval", 0) > 0) {
logDebug(s"will move data ${partitionPath.toString} to trash")
hadoopConf.setInt("fs.trash.interval", trashInterval)
Trash.moveToAppropriateTrash(fs, partitionPath, hadoopConf)
} else {
fs.delete(partitionPath, true)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2701,12 +2701,14 @@ object SQLConf {
.booleanConf
.createWithDefault(false)

val TRUNCATE_TRASH_INTERVAL =
buildConf("spark.sql.truncate.trash.interval")
.doc("This Configuration will decide whether move files to trash on truncate table" +
"If -1 files will be deleted without moving to trash")
.intConf
.createWithDefault(-1)
val TRUNCATE_TRASH_ENABLED =
buildConf("spark.sql.truncate.trash.enabled")
.doc("This Configuration will decide whether move files to trash on truncate table given, " +
"'fs.trash.interval' is positive in Hadoop Configuration. " +
"Note that, in Hadoop conf if server side has this configured then the client side " +
"one will be ignored. ")
.booleanConf
.createWithDefault(false)

/**
* Holds information about keys that have been deprecated.
Expand Down Expand Up @@ -3318,7 +3320,7 @@ class SQLConf extends Serializable with Logging {
def optimizeNullAwareAntiJoin: Boolean =
getConf(SQLConf.OPTIMIZE_NULL_AWARE_ANTI_JOIN)

def truncateTrashInterval: Int = getConf(SQLConf.TRUNCATE_TRASH_INTERVAL)
def truncateTrashEnabled: Boolean = getConf(SQLConf.TRUNCATE_TRASH_ENABLED)

/** ********************** SQLConf functionality methods ************ */

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ case class TruncateTableCommand(
}
val hadoopConf = spark.sessionState.newHadoopConf()
val ignorePermissionAcl = SQLConf.get.truncateTableIgnorePermissionAcl
val trashInterval = SQLConf.get.truncateTrashInterval
val isTrashEnabled = SQLConf.get.truncateTrashEnabled
locations.foreach { location =>
if (location.isDefined) {
val path = new Path(location.get)
Expand All @@ -515,7 +515,7 @@ case class TruncateTableCommand(
}
}

Utils.moveToTrashIfEnabled(fs, path, trashInterval, hadoopConf)
Utils.moveToTrashIfEnabled(fs, path, isTrashEnabled, hadoopConf)

// We should keep original permission/acl of the path.
// For owner/group, only super-user can set it, for example on HDFS. Because
Expand Down

0 comments on commit 8d94930

Please sign in to comment.