From c7e5d7eea9c97f66e9681b0f75517be552c87c67 Mon Sep 17 00:00:00 2001 From: uncleGen Date: Sat, 27 Apr 2019 09:28:31 +0800 Subject: [PATCH] [MINOR][TEST][DOC] Execute action miss name message ## What changes were proposed in this pull request? some minor updates: - `Execute` action miss `name` message - typo in SS document - typo in SQLConf ## How was this patch tested? N/A Closes #24466 from uncleGen/minor-fix. Authored-by: uncleGen Signed-off-by: Wenchen Fan --- docs/structured-streaming-programming-guide.md | 2 +- .../main/scala/org/apache/spark/sql/internal/SQLConf.scala | 4 ++-- .../src/main/scala/org/apache/spark/sql/DataFrameReader.scala | 2 +- .../src/main/scala/org/apache/spark/sql/DataFrameWriter.scala | 2 +- .../sql/execution/datasources/DataSourceResolution.scala | 2 +- .../scala/org/apache/spark/sql/streaming/StreamTest.scala | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/structured-streaming-programming-guide.md b/docs/structured-streaming-programming-guide.md index e76b53dbb4dc3..77db1c3d7d613 100644 --- a/docs/structured-streaming-programming-guide.md +++ b/docs/structured-streaming-programming-guide.md @@ -2980,7 +2980,7 @@ the effect of the change is not well-defined. For all of them: - Changes to the user-defined foreach sink (that is, the `ForeachWriter` code) are allowed, but the semantics of the change depends on the code. -- *Changes in projection / filter / map-like operations**: Some cases are allowed. For example: +- *Changes in projection / filter / map-like operations*: Some cases are allowed. For example: - Addition / deletion of filters is allowed: `sdf.selectExpr("a")` to `sdf.where(...).selectExpr("a").filter(...)`. diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index 8ba43a3abbfc2..30afb0021a16b 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -2111,9 +2111,9 @@ class SQLConf extends Serializable with Logging { def continuousStreamingExecutorPollIntervalMs: Long = getConf(CONTINUOUS_STREAMING_EXECUTOR_POLL_INTERVAL_MS) - def userV1SourceReaderList: String = getConf(USE_V1_SOURCE_READER_LIST) + def useV1SourceReaderList: String = getConf(USE_V1_SOURCE_READER_LIST) - def userV1SourceWriterList: String = getConf(USE_V1_SOURCE_WRITER_LIST) + def useV1SourceWriterList: String = getConf(USE_V1_SOURCE_WRITER_LIST) def disabledV2StreamingWriters: String = getConf(DISABLED_V2_STREAMING_WRITERS) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index 56686fdf50db0..0a49360ba49a5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -195,7 +195,7 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { } val useV1Sources = - sparkSession.sessionState.conf.userV1SourceReaderList.toLowerCase(Locale.ROOT).split(",") + sparkSession.sessionState.conf.useV1SourceReaderList.toLowerCase(Locale.ROOT).split(",") val lookupCls = DataSource.lookupDataSource(source, sparkSession.sessionState.conf) val cls = lookupCls.newInstance() match { case f: FileDataSourceV2 if useV1Sources.contains(f.shortName()) || diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala index df88a675a2b47..e42b8275de737 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala @@ -248,7 +248,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) { val session = df.sparkSession val useV1Sources = - session.sessionState.conf.userV1SourceWriterList.toLowerCase(Locale.ROOT).split(",") + session.sessionState.conf.useV1SourceWriterList.toLowerCase(Locale.ROOT).split(",") val lookupCls = DataSource.lookupDataSource(source, session.sessionState.conf) val cls = lookupCls.newInstance() match { case f: FileDataSourceV2 if useV1Sources.contains(f.shortName()) || diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceResolution.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceResolution.scala index 6d1cbe18c900c..09506f05ccfa4 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceResolution.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceResolution.scala @@ -76,7 +76,7 @@ case class DataSourceResolution( object V1WriteProvider { private val v1WriteOverrideSet = - conf.userV1SourceWriterList.toLowerCase(Locale.ROOT).split(",").toSet + conf.useV1SourceWriterList.toLowerCase(Locale.ROOT).split(",").toSet def unapply(provider: String): Option[String] = { if (v1WriteOverrideSet.contains(provider.toLowerCase(Locale.ROOT))) { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamTest.scala index da496837e7a19..a8efe5b4e889e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamTest.scala @@ -294,7 +294,7 @@ trait StreamTest extends QueryTest with SharedSQLContext with TimeLimits with Be /** Execute arbitrary code */ object Execute { def apply(name: String)(func: StreamExecution => Any): AssertOnQuery = - AssertOnQuery(query => { func(query); true }, "name") + AssertOnQuery(query => { func(query); true }, name) def apply(func: StreamExecution => Any): AssertOnQuery = apply("Execute")(func) }