Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: AbsaOSS/pramen
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: v1.8.4
Choose a base ref
...
head repository: AbsaOSS/pramen
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: v1.8.5
Choose a head ref
  • 9 commits
  • 26 files changed
  • 2 contributors

Commits on Apr 16, 2024

  1. Setting version to 1.8.5-SNAPSHOT

    CI/CD bot committed Apr 16, 2024
    Copy the full SHA
    0e37c91 View commit details
  2. Merge pull request #393 from AbsaOSS/release/1.8.4

    Release Pramen v1.8.4
    yruslan authored Apr 16, 2024

    Verified

    This commit was created on GitHub.com and signed with GitHub’s verified signature.
    Copy the full SHA
    dcffa1a View commit details

Commits on Apr 24, 2024

  1. #394 Fix PostgreSQL with numeric fields with no permission and scale …

    …specified in JdbcNative.
    yruslan committed Apr 24, 2024
    Copy the full SHA
    1076b36 View commit details

Commits on Apr 26, 2024

  1. Copy the full SHA
    8a34bc5 View commit details
  2. Copy the full SHA
    c594b4b View commit details
  3. Copy the full SHA
    132ce9f View commit details
  4. Make JDBC Native connector close to Spark behavior by default.

    yruslan committed Apr 26, 2024
    Copy the full SHA
    65265e2 View commit details

Commits on Apr 27, 2024

  1. Update version number to 1.8.5

    CI/CD bot committed Apr 27, 2024
    Copy the full SHA
    715737b View commit details
  2. Setting version to 1.8.5

    CI/CD bot committed Apr 27, 2024
    Copy the full SHA
    7e4635a View commit details
Showing with 342 additions and 45 deletions.
  1. +10 −2 README.md
  2. +1 −1 pramen-py/pyproject.toml
  3. +6 −0 pramen/core/src/main/resources/reference.conf
  4. +5 −1 pramen/core/src/main/scala/za/co/absa/pramen/core/metastore/Metastore.scala
  5. +9 −2 pramen/core/src/main/scala/za/co/absa/pramen/core/metastore/MetastoreImpl.scala
  6. +7 −2 pramen/core/src/main/scala/za/co/absa/pramen/core/metastore/model/HiveConfig.scala
  7. +9 −2 pramen/core/src/main/scala/za/co/absa/pramen/core/metastore/model/MetaTable.scala
  8. +1 −1 pramen/core/src/main/scala/za/co/absa/pramen/core/pipeline/TransferTable.scala
  9. +3 −0 pramen/core/src/main/scala/za/co/absa/pramen/core/reader/model/JdbcConfig.scala
  10. +2 −2 pramen/core/src/main/scala/za/co/absa/pramen/core/utils/JdbcNativeUtils.scala
  11. +6 −0 pramen/core/src/main/scala/za/co/absa/pramen/core/utils/hive/HiveHelper.scala
  12. +15 −0 pramen/core/src/main/scala/za/co/absa/pramen/core/utils/hive/HiveHelperSparkCatalog.scala
  13. +25 −0 pramen/core/src/main/scala/za/co/absa/pramen/core/utils/hive/HiveHelperSql.scala
  14. +10 −0 pramen/core/src/main/scala/za/co/absa/pramen/core/utils/hive/HiveQueryTemplates.scala
  15. +61 −4 pramen/core/src/main/scala/za/co/absa/pramen/core/utils/impl/ResultSetToRowIterator.scala
  16. +7 −3 pramen/core/src/test/scala/za/co/absa/pramen/core/metastore/model/HiveConfigSuite.scala
  17. +19 −7 pramen/core/src/test/scala/za/co/absa/pramen/core/metastore/model/MetaTableSuite.scala
  18. +2 −0 pramen/core/src/test/scala/za/co/absa/pramen/core/mocks/MetaTableFactory.scala
  19. +5 −1 pramen/core/src/test/scala/za/co/absa/pramen/core/mocks/metastore/MetastoreSpy.scala
  20. +85 −11 pramen/core/src/test/scala/za/co/absa/pramen/core/tests/utils/JdbcNativeUtilsSuite.scala
  21. +23 −0 pramen/core/src/test/scala/za/co/absa/pramen/core/tests/utils/hive/HiveHelperSparkCatalogSuite.scala
  22. +13 −0 pramen/core/src/test/scala/za/co/absa/pramen/core/tests/utils/hive/HiveHelperSqlSuite.scala
  23. +3 −0 pramen/extras/src/main/scala/za/co/absa/pramen/extras/sink/EnceladusConfig.scala
  24. +11 −5 pramen/extras/src/main/scala/za/co/absa/pramen/extras/sink/EnceladusSink.scala
  25. +3 −0 pramen/extras/src/test/scala/za/co/absa/pramen/extras/tests/sink/EnceladusConfigSuite.scala
  26. +1 −1 pramen/version.sbt
12 changes: 10 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -2746,8 +2746,15 @@ pramen {
# The API to use to query Hive. Valid values are: "sql" (default), "spark_catalog"
hive.api = "sql"
# [Optional] Default database
database = "my_db"
# [Optiona] When possible prefer ADD PARTITION to MSCK REPAIR when updating metastore tables in Hive.
# It is not always possible. When a table is initially created, MSCK REPAIR is always used to pick up all partitions.
# Also ADD PARTTITION is only for Parquet format.
# This option can be overridden per metatable.
hive.prefer.add.partition = true
# Optional, use only if you want to use JDBC rather than Spark metastore to query Hive
hive.jdbc {
driver = "com.cloudera.hive.jdbc41.HS2Driver"
@@ -2784,8 +2791,9 @@ pramen.metastore {
# The API to use to query Hive. Valid values are: "sql", "spark_catalog"
hive.api = "sql"
hive.database = my_hive_db
# [Optional] Hive table to create/repair after writes to this metastore table
hive.prefer.add.partition = false
# [Optional] Hive table to create/repair after writes to this metastore table
hive.table = my_hive_table
# [Optional] Hive table location for create/repair if different from the metastore table location
2 changes: 1 addition & 1 deletion pramen-py/pyproject.toml
Original file line number Diff line number Diff line change
@@ -86,7 +86,7 @@ testpaths = "tests/"

[tool.poetry]
name = "pramen-py"
version = "1.8.4"
version = "1.8.5"
description = "Pramen transformations written in python"
authors = [
"Artem Zhukov <iam@zhukovgreen.pro>",
6 changes: 6 additions & 0 deletions pramen/core/src/main/resources/reference.conf
Original file line number Diff line number Diff line change
@@ -56,6 +56,12 @@ pramen {
# The API to use to query Hive. Valid values are: "sql", "spark_catalog"
hive.api = "sql"

# When possible prefer ADD PARTITION to MSCK REPAIR when updating metastore tables in Hive.
# It is not always possible. When a table is initially created, MSCK REPAIR is always used to pick up all partitions.
# Also ADD PARTTITION is only for Parquet format.
# This option can be overridden per metatable.
hive.prefer.add.partition = false

# If enabled, the job will wait for the output table to become available before running a job
# If the number of seconds <=0 the waiting will be infinite
wait.for.output.table.enabled = false
Original file line number Diff line number Diff line change
@@ -43,7 +43,11 @@ trait Metastore {

def getHiveHelper(tableName: String): HiveHelper

def repairOrCreateHiveTable(tableName: String, infoDate: LocalDate, schema: Option[StructType], hiveHelper: HiveHelper, recreate: Boolean): Unit
def repairOrCreateHiveTable(tableName: String,
infoDate: LocalDate,
schema: Option[StructType],
hiveHelper: HiveHelper,
recreate: Boolean): Unit

def getStats(tableName: String, infoDate: LocalDate): MetaTableStats

Original file line number Diff line number Diff line change
@@ -17,6 +17,7 @@
package za.co.absa.pramen.core.metastore

import com.typesafe.config.Config
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.types.{DateType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.slf4j.LoggerFactory
@@ -149,8 +150,14 @@ class MetastoreImpl(appConfig: Config,
hiveHelper.createOrUpdateHiveTable(effectivePath, format, effectiveSchema, Seq(mt.infoDateColumn), mt.hiveConfig.database, hiveTable)
} else {
if (hiveHelper.doesTableExist(mt.hiveConfig.database, hiveTable)) {
log.info(s"The table '$fullTableName' exists. Repairing it.")
hiveHelper.repairHiveTable(mt.hiveConfig.database, hiveTable, format)
if (mt.hivePreferAddPartition && mt.format.isInstanceOf[DataFormat.Parquet]) {
val location = new Path(effectivePath, s"${mt.infoDateColumn}=${infoDate}")
log.info(s"The table '$fullTableName' exists. Adding partition '$location'...")
hiveHelper.repairHiveTable(mt.hiveConfig.database, hiveTable, format)
} else {
log.info(s"The table '$fullTableName' exists. Repairing it.")
hiveHelper.repairHiveTable(mt.hiveConfig.database, hiveTable, format)
}
} else {
log.info(s"The table '$fullTableName' does not exist. Creating it.")
hiveHelper.createOrUpdateHiveTable(effectivePath, format, effectiveSchema, Seq(mt.infoDateColumn), mt.hiveConfig.database, hiveTable)
Original file line number Diff line number Diff line change
@@ -81,6 +81,7 @@ object HiveConfig {
val defaultTemplates = defaults.templates.getOrElse(format.name, HiveQueryTemplates(
DEFAULT_CREATE_TABLE_TEMPLATE,
DEFAULT_REPAIR_TABLE_TEMPLATE,
DEFAULT_ADD_PARTITION_TEMPLATE,
DEFAULT_DROP_TABLE_TEMPLATE
))

@@ -104,13 +105,16 @@ object HiveConfig {
val repairTableTemplate = ConfigUtils.getOptionString(conf, s"$HIVE_TEMPLATE_CONFIG_PREFIX.$REPAIR_TABLE_TEMPLATE_KEY")
.getOrElse(defaultTemplates.repairTableTemplate)

val addPartitionTableTemplate = ConfigUtils.getOptionString(conf, s"$HIVE_TEMPLATE_CONFIG_PREFIX.$ADD_PARTITION_TEMPLATE_KEY")
.getOrElse(defaultTemplates.addPartitionTemplate)

val dropTableTemplate = ConfigUtils.getOptionString(conf, s"$HIVE_TEMPLATE_CONFIG_PREFIX.$DROP_TABLE_TEMPLATE_KEY")
.getOrElse(defaultTemplates.dropTableTemplate)

HiveConfig(
hiveApi = hiveApi,
database = database,
templates = HiveQueryTemplates(createTableTemplate, repairTableTemplate, dropTableTemplate),
templates = HiveQueryTemplates(createTableTemplate, repairTableTemplate, addPartitionTableTemplate, dropTableTemplate),
jdbcConfig = jdbcConfig,
ignoreFailures
)
@@ -127,6 +131,7 @@ object HiveConfig {
val templates = defaults.templates.getOrElse(format.name, HiveQueryTemplates(
DEFAULT_CREATE_TABLE_TEMPLATE,
DEFAULT_REPAIR_TABLE_TEMPLATE,
DEFAULT_ADD_PARTITION_TEMPLATE,
DEFAULT_DROP_TABLE_TEMPLATE
))

@@ -136,7 +141,7 @@ object HiveConfig {
def getNullConfig: HiveConfig = HiveConfig(
HiveApi.Sql,
None,
HiveQueryTemplates(DEFAULT_CREATE_TABLE_TEMPLATE, DEFAULT_REPAIR_TABLE_TEMPLATE, DEFAULT_DROP_TABLE_TEMPLATE),
HiveQueryTemplates(DEFAULT_CREATE_TABLE_TEMPLATE, DEFAULT_REPAIR_TABLE_TEMPLATE, DEFAULT_ADD_PARTITION_TEMPLATE, DEFAULT_DROP_TABLE_TEMPLATE),
None,
ignoreFailures = false)
}
Original file line number Diff line number Diff line change
@@ -37,6 +37,7 @@ import scala.util.{Failure, Success, Try}
* @param hiveConfig The effective Hive configuration to use for Hive operations.
* @param hiveTable The name of the Hive table.
* @param hivePath The path of the Hive table (if it differs from the path in the underlying format).
* @param hivePreferAddPartition If true, prefer ADD PARTITION to MSCK REPAIR when possible for Hive updates.
* @param infoDateExpression The expression to use to calculate the information date.
* @param infoDateStart The start date of the information date.
* @param trackDays The number of days to look back for retrospective changes if this table is used as a dependency.
@@ -53,6 +54,7 @@ case class MetaTable(
hiveConfig: HiveConfig,
hiveTable: Option[String],
hivePath: Option[String],
hivePreferAddPartition: Boolean,
infoDateExpression: Option[String],
infoDateStart: LocalDate,
trackDays: Int,
@@ -68,6 +70,7 @@ object MetaTable {
val NAME_DESCRIPTION = "description"
val HIVE_TABLE_KEY = "hive.table"
val HIVE_PATH_KEY = "hive.path"
val HIVE_PREFER_ADD_PARTITION_KEY = "hive.prefer.add.partition"
val TRACK_DAYS_KEY = "track.days"
val READ_OPTION_KEY = "read.option"
val WRITE_OPTION_KEY = "write.option"
@@ -80,6 +83,7 @@ object MetaTable {
val defaultStartDate = infoDateConfig.startDate
val defaultTrackDays = infoDateConfig.defaultTrackDays
val defaultHiveConfig = HiveDefaultConfig.fromConfig(ConfigUtils.getOptionConfig(conf, DEFAULT_HIVE_CONFIG_PREFIX))
val defaultPreferAddPartition = conf.getBoolean(s"pramen.$HIVE_PREFER_ADD_PARTITION_KEY")

val tableConfigs = ConfigUtils.getOptionConfigList(conf, key)

@@ -88,7 +92,7 @@ object MetaTable {
}

val metatables = tableConfigs
.map(tableConfig => fromConfigSingleEntity(tableConfig, conf, defaultInfoDateColumnName, defaultInfoDateFormat, defaultStartDate, defaultTrackDays, defaultHiveConfig))
.map(tableConfig => fromConfigSingleEntity(tableConfig, conf, defaultInfoDateColumnName, defaultInfoDateFormat, defaultStartDate, defaultTrackDays, defaultHiveConfig, defaultPreferAddPartition))
.toSeq

val duplicates = AlgorithmicUtils.findDuplicates(metatables.map(_.name))
@@ -104,7 +108,8 @@ object MetaTable {
defaultInfoDateFormat: String,
defaultStartDate: LocalDate,
defaultTrackDays: Int,
defaultHiveConfig: HiveDefaultConfig): MetaTable = {
defaultHiveConfig: HiveDefaultConfig,
defaultPreferAddPartition: Boolean): MetaTable = {
val name = ConfigUtils.getOptionString(conf, NAME_KEY).getOrElse(throw new IllegalArgumentException(s"Mandatory option missing: $NAME_KEY"))
val description = ConfigUtils.getOptionString(conf, NAME_DESCRIPTION).getOrElse("")
val infoDateOverride = InfoDateOverride.fromConfig(conf)
@@ -124,6 +129,7 @@ object MetaTable {

val hiveTable = ConfigUtils.getOptionString(conf, HIVE_TABLE_KEY)
val hivePath = ConfigUtils.getOptionString(conf, HIVE_PATH_KEY)
val hivePreferAddPartition = ConfigUtils.getOptionBoolean(conf, HIVE_PREFER_ADD_PARTITION_KEY).getOrElse(defaultPreferAddPartition)

val hiveConfig = if (hiveTable.isEmpty) {
HiveConfig.fromDefaults(defaultHiveConfig, format)
@@ -142,6 +148,7 @@ object MetaTable {
hiveConfig,
hiveTable,
hivePath,
hivePreferAddPartition,
infoDateExpressionOpt,
startDate,
trackDays,
Original file line number Diff line number Diff line change
@@ -54,7 +54,7 @@ case class TransferTable(
}

def getMetaTable: MetaTable = {
MetaTable(jobMetaTableName, "", DataFormat.Null(), "", "", HiveConfig.getNullConfig, None, None, None, infoDateStart, trackDays, trackDaysExplicitlySet, readOptions, writeOptions)
MetaTable(jobMetaTableName, "", DataFormat.Null(), "", "", HiveConfig.getNullConfig, None, None, hivePreferAddPartition = true, None, infoDateStart, trackDays, trackDaysExplicitlySet = trackDaysExplicitlySet, readOptions, writeOptions)
}
}

Original file line number Diff line number Diff line change
@@ -31,6 +31,7 @@ case class JdbcConfig(
retries: Option[Int] = None,
connectionTimeoutSeconds: Option[Int] = None,
sanitizeDateTime: Boolean = true,
incorrectDecimalsAsString: Boolean = false,
extraOptions: Map[String, String] = Map.empty[String, String]
)

@@ -46,6 +47,7 @@ object JdbcConfig {
val JDBC_RETRIES = "jdbc.retries"
val JDBC_CONNECTION_TIMEOUT = "jdbc.connection.timeout"
val JDBC_SANITIZE_DATETIME = "jdbc.sanitize.datetime"
val JDBC_INCORRECT_PRECISION_AS_STRING = "jdbc.incorrect.precision.as.string"
val JDBC_EXTRA_OPTIONS_PREFIX = "jdbc.option"

def load(conf: Config, parent: String = ""): JdbcConfig = {
@@ -75,6 +77,7 @@ object JdbcConfig {
retries = ConfigUtils.getOptionInt(conf, JDBC_RETRIES),
connectionTimeoutSeconds = ConfigUtils.getOptionInt(conf, JDBC_CONNECTION_TIMEOUT),
sanitizeDateTime = ConfigUtils.getOptionBoolean(conf, JDBC_SANITIZE_DATETIME).getOrElse(true),
incorrectDecimalsAsString = ConfigUtils.getOptionBoolean(conf, JDBC_INCORRECT_PRECISION_AS_STRING).getOrElse(false),
extraOptions = ConfigUtils.getExtraOptions(conf, JDBC_EXTRA_OPTIONS_PREFIX)
)
}
Original file line number Diff line number Diff line change
@@ -89,13 +89,13 @@ object JdbcNativeUtils {

// Executing the query
val rs = getResultSet(jdbcConfig, url, query)
val driverIterator = new ResultSetToRowIterator(rs, jdbcConfig.sanitizeDateTime)
val driverIterator = new ResultSetToRowIterator(rs, jdbcConfig.sanitizeDateTime, jdbcConfig.incorrectDecimalsAsString)
val schema = JdbcSparkUtils.addMetadataFromJdbc(driverIterator.getSchema, rs.getMetaData)

driverIterator.close()

val rdd = spark.sparkContext.parallelize(Seq(query)).flatMap(q => {
new ResultSetToRowIterator(getResultSet(jdbcConfig, url, q), jdbcConfig.sanitizeDateTime)
new ResultSetToRowIterator(getResultSet(jdbcConfig, url, q), jdbcConfig.sanitizeDateTime, jdbcConfig.incorrectDecimalsAsString)
})

spark.createDataFrame(rdd, schema)
Original file line number Diff line number Diff line change
@@ -34,6 +34,12 @@ abstract class HiveHelper {
tableName: String,
format: HiveFormat): Unit

def addPartition(databaseName: Option[String],
tableName: String,
partitionBy: Seq[String],
partitionValues: Seq[String],
location: String): Unit

def doesTableExist(databaseName: Option[String],
tableName: String): Boolean

Original file line number Diff line number Diff line change
@@ -59,6 +59,21 @@ class HiveHelperSparkCatalog(spark: SparkSession) extends HiveHelper {
}
}

def addPartition(databaseName: Option[String],
tableName: String,
partitionBy: Seq[String],
partitionValues: Seq[String],
location: String): Unit = {
if (partitionBy.length != partitionValues.length) {
throw new IllegalArgumentException(s"Partition columns and values must have the same length. Columns: $partitionBy, values: $partitionValues")
}
val fullTableName = HiveHelper.getFullTable(databaseName, tableName)
val partitionClause = partitionBy.zip(partitionValues).map { case (col, value) => s"$col='$value'" }.mkString(", ")
val sql = s"ALTER TABLE $fullTableName ADD IF NOT EXISTS PARTITION ($partitionClause) LOCATION '$location'"
log.info(s"Executing: $sql")
spark.sql(sql).collect()
}

private def dropCatalogTable(fullTableName: String): Unit = {
spark.sql(s"DROP TABLE $fullTableName").collect()
}
Original file line number Diff line number Diff line change
@@ -49,6 +49,21 @@ class HiveHelperSql(val queryExecutor: QueryExecutor,
}
}

def addPartition(databaseName: Option[String],
tableName: String,
partitionBy: Seq[String],
partitionValues: Seq[String],
location: String): Unit = {
if (partitionBy.length != partitionValues.length) {
throw new IllegalArgumentException(s"Partition columns and values must have the same length. Columns: $partitionBy, values: $partitionValues")
}
val fullTableName = HiveHelper.getFullTable(databaseName, tableName)
val partitionClause = partitionBy.zip(partitionValues).map { case (col, value) => s"$col='$value'" }.mkString(", ")
val sql = applyPartitionTemplate(hiveConfig.addPartitionTemplate, fullTableName, location, partitionClause)
queryExecutor.execute(sql)
}


override def doesTableExist(databaseName: Option[String], tableName: String): Boolean = queryExecutor.doesTableExist(databaseName, tableName)

override def dropTable(databaseName: Option[String],
@@ -133,4 +148,14 @@ class HiveHelperSql(val queryExecutor: QueryExecutor,
.replace("@schema", schemaDDL)
.replace("@partitionedBy", partitionDDL)
}

private def applyPartitionTemplate(template: String,
fullTableName: String,
partitionPath: String = "",
partitionClause: String = ""
): String = {
template.replace("@fullTableName", fullTableName)
.replace("@partitionPath", partitionPath)
.replace("@partitionClause", partitionClause)
}
}
Original file line number Diff line number Diff line change
@@ -22,6 +22,7 @@ import za.co.absa.pramen.core.utils.ConfigUtils
case class HiveQueryTemplates(
createTableTemplate: String,
repairTableTemplate: String,
addPartitionTemplate: String,
dropTableTemplate: String
)

@@ -30,6 +31,7 @@ object HiveQueryTemplates {

val CREATE_TABLE_TEMPLATE_KEY = "create.table.template"
val REPAIR_TABLE_TEMPLATE_KEY = "repair.table.template"
val ADD_PARTITION_TEMPLATE_KEY = "add.partition.template"
val DROP_TABLE_TEMPLATE_KEY = "drop.table.template"

val DEFAULT_CREATE_TABLE_TEMPLATE: String =
@@ -43,6 +45,9 @@ object HiveQueryTemplates {

val DEFAULT_REPAIR_TABLE_TEMPLATE: String = "MSCK REPAIR TABLE @fullTableName"

val DEFAULT_ADD_PARTITION_TEMPLATE: String =
"""ALTER TABLE @fullTableName ADD IF NOT EXISTS PARTITION (@partitionClause) LOCATION '@partitionPath';""".stripMargin

val DEFAULT_DROP_TABLE_TEMPLATE: String = "DROP TABLE IF EXISTS @fullTableName"

def fromConfig(conf: Config): HiveQueryTemplates = {
@@ -52,12 +57,16 @@ object HiveQueryTemplates {
val repairTableTemplate = ConfigUtils.getOptionString(conf, REPAIR_TABLE_TEMPLATE_KEY)
.getOrElse(DEFAULT_REPAIR_TABLE_TEMPLATE)

val addPartitionTemplate = ConfigUtils.getOptionString(conf, ADD_PARTITION_TEMPLATE_KEY)
.getOrElse(DEFAULT_ADD_PARTITION_TEMPLATE)

val dropTableTemplate = ConfigUtils.getOptionString(conf, DROP_TABLE_TEMPLATE_KEY)
.getOrElse(DEFAULT_DROP_TABLE_TEMPLATE)

HiveQueryTemplates(
createTableTemplate = createTableTemplate,
repairTableTemplate = repairTableTemplate,
addPartitionTemplate = addPartitionTemplate,
dropTableTemplate = dropTableTemplate
)
}
@@ -66,6 +75,7 @@ object HiveQueryTemplates {
HiveQueryTemplates(
createTableTemplate = DEFAULT_CREATE_TABLE_TEMPLATE,
repairTableTemplate = DEFAULT_REPAIR_TABLE_TEMPLATE,
addPartitionTemplate = DEFAULT_ADD_PARTITION_TEMPLATE,
dropTableTemplate = DEFAULT_DROP_TABLE_TEMPLATE
)
}
Loading