[SPARK-44936][CORE] Simplify the log when Spark HybridStore hits the … #32
GitHub Actions / Report test results
failed
Aug 24, 2023 in 0s
40904 tests run, 974 skipped, 1 failed.
Annotations
Check failure on line 1 in KafkaMicroBatchV1SourceWithAdminSuite
github-actions / Report test results
KafkaMicroBatchV1SourceWithAdminSuite.compositeReadLimit
org.scalatest.exceptions.TestFailedException:
== Results ==
!== Correct Answer - 52 == == Spark Answer - 39 ==
struct<value:int> struct<value:int>
[100] [100]
[101] [101]
[102] [102]
[103] [103]
[104] [104]
[105] [105]
[106] [106]
[107] [107]
[108] [108]
[109] [109]
[10] [10]
[110] [110]
[111] [111]
[112] [112]
[113] [113]
[114] [114]
[115] [115]
[116] [116]
[117] [117]
[118] [118]
[119] [119]
[11] [11]
[120] [120]
[121] [121]
![122] [12]
![123] [13]
![124] [14]
![125] [15]
![126] [16]
![127] [17]
![128] [18]
![12] [19]
![13] [1]
![14] [20]
![15] [21]
![16] [22]
![17] [23]
![18] [24]
![19] [2]
![1]
![20]
![21]
![22]
![23]
![24]
![25]
![26]
![27]
![28]
![29]
![2]
![30]
== Progress ==
StartStream(ProcessingTimeTrigger(100),org.apache.spark.sql.streaming.util.StreamManualClock@1982b947,Map(),null)
AssertOnQuery(<condition>, )
CheckAnswer: [1],[10],[100],[101],[102],[103],[104],[105],[106],[107],[11],[108],[109],[110],[111],[12],[13],[14],[15]
AdvanceManualClock(100)
org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$anonfun$advanceSystemClock$1$1@588efd6f
AssertOnQuery(<condition>, )
CheckNewAnswer:
Assert(<condition>, )
AdvanceManualClock(100)
org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$anonfun$advanceSystemClock$1$1@2015b01f
AssertOnQuery(<condition>, )
CheckAnswer: [1],[10],[100],[101],[102],[103],[104],[105],[106],[107],[11],[108],[109],[110],[111],[112],[113],[114],[115],[116],[12],[117],[118],[119],[120],[121],[13],[14],[15],[16],[17],[18],[19],[2],[20],[21],[22],[23],[24]
AdvanceManualClock(100)
org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$anonfun$advanceSystemClock$1$1@55a66e9a
AssertOnQuery(<condition>, )
CheckNewAnswer:
AdvanceManualClock(100)
org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$anonfun$advanceSystemClock$1$1@1cfcd8e1
AssertOnQuery(<condition>, )
=> CheckAnswer: [1],[10],[100],[101],[102],[103],[104],[105],[106],[107],[11],[108],[109],[110],[111],[112],[113],[114],[115],[116],[117],[118],[119],[12],[120],[121],[122],[123],[124],[125],[126],[127],[128],[13],[14],[15],[16],[17],[18],[19],[2],[20],[21],[22],[23],[24],[25],[26],[27],[28],[29],[30]
== Stream ==
Output Mode: Append
Stream state: {KafkaSourceV1[Subscribe[topic-46]]: {"topic-46":{"2":2,"1":15,"0":22}}}
Thread state: alive
Thread stack trace: java.lang.Object.wait(Native Method)
org.apache.spark.util.ManualClock.waitTillTime(ManualClock.scala:67)
org.apache.spark.sql.streaming.util.StreamManualClock.waitTillTime(StreamManualClock.scala:34)
org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor.execute(TriggerExecutor.scala:76)
org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:239)
org.apache.spark.sql.execution.streaming.StreamExecution.$anonfun$runStream$1(StreamExecution.scala:311)
org.apache.spark.sql.execution.streaming.StreamExecution$$Lambda$5467/1364495330.apply$mcV$sp(Unknown Source)
scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:289)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.$anonfun$run$1(StreamExecution.scala:211)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1$$Lambda$5462/1151169484.apply$mcV$sp(Unknown Source)
scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:211)
== Sink ==
0: [10] [11] [12] [13] [14] [15] [100] [101] [102] [103] [104] [105] [106] [107] [108] [109] [110] [111] [1]
1: [16] [17] [18] [19] [20] [21] [22] [23] [24] [112] [113] [114] [115] [116] [117] [118] [119] [120] [121] [2]
== Plan ==
== Parsed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, 86ea20c4-46bc-47f2-9b5c-7e3671741f0c, Append, 1
+- SerializeFromObject [input[0, int, false] AS value#20100]
+- MapElements org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$Lambda$6538/1603768763@240db0eb, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#20099: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#20098: scala.Tuple2
+- Project [cast(key#20074 as string) AS key#20088, cast(value#20075 as string) AS value#20089]
+- Project [key#20146 AS key#20074, value#20147 AS value#20075, topic#20148 AS topic#20076, partition#20149 AS partition#20077, offset#20150L AS offset#20078L, timestamp#20151 AS timestamp#20079, timestampType#20152 AS timestampType#20080]
+- LogicalRDD [key#20146, value#20147, topic#20148, partition#20149, offset#20150L, timestamp#20151, timestampType#20152], true
== Analyzed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, 86ea20c4-46bc-47f2-9b5c-7e3671741f0c, Append, 1
+- SerializeFromObject [input[0, int, false] AS value#20100]
+- MapElements org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$Lambda$6538/1603768763@240db0eb, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#20099: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#20098: scala.Tuple2
+- Project [cast(key#20074 as string) AS key#20088, cast(value#20075 as string) AS value#20089]
+- Project [key#20146 AS key#20074, value#20147 AS value#20075, topic#20148 AS topic#20076, partition#20149 AS partition#20077, offset#20150L AS offset#20078L, timestamp#20151 AS timestamp#20079, timestampType#20152 AS timestampType#20080]
+- LogicalRDD [key#20146, value#20147, topic#20148, partition#20149, offset#20150L, timestamp#20151, timestampType#20152], true
== Optimized Logical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 1, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@51fdaf74]
+- SerializeFromObject [input[0, int, false] AS value#20100]
+- MapElements org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$Lambda$6538/1603768763@240db0eb, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#20099: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#20098: scala.Tuple2
+- Project [cast(key#20146 as string) AS key#20088, cast(value#20147 as string) AS value#20089]
+- LogicalRDD [key#20146, value#20147, topic#20148, partition#20149, offset#20150L, timestamp#20151, timestampType#20152], true
== Physical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 1, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@51fdaf74], org.apache.spark.sql.execution.datasources.v2.DataSourceV2Strategy$$Lambda$5584/980300870@a991a96
+- *(1) SerializeFromObject [input[0, int, false] AS value#20100]
+- *(1) MapElements org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$Lambda$6538/1603768763@240db0eb, obj#20099: int
+- *(1) DeserializeToObject newInstance(class scala.Tuple2), obj#20098: scala.Tuple2
+- *(1) Project [cast(key#20146 as string) AS key#20088, cast(value#20147 as string) AS value#20089]
+- *(1) Scan ExistingRDD kafka[key#20146,value#20147,topic#20148,partition#20149,offset#20150L,timestamp#20151,timestampType#20152]
Raw output
sbt.ForkMain$ForkError: org.scalatest.exceptions.TestFailedException:
== Results ==
!== Correct Answer - 52 == == Spark Answer - 39 ==
struct<value:int> struct<value:int>
[100] [100]
[101] [101]
[102] [102]
[103] [103]
[104] [104]
[105] [105]
[106] [106]
[107] [107]
[108] [108]
[109] [109]
[10] [10]
[110] [110]
[111] [111]
[112] [112]
[113] [113]
[114] [114]
[115] [115]
[116] [116]
[117] [117]
[118] [118]
[119] [119]
[11] [11]
[120] [120]
[121] [121]
![122] [12]
![123] [13]
![124] [14]
![125] [15]
![126] [16]
![127] [17]
![128] [18]
![12] [19]
![13] [1]
![14] [20]
![15] [21]
![16] [22]
![17] [23]
![18] [24]
![19] [2]
![1]
![20]
![21]
![22]
![23]
![24]
![25]
![26]
![27]
![28]
![29]
![2]
![30]
== Progress ==
StartStream(ProcessingTimeTrigger(100),org.apache.spark.sql.streaming.util.StreamManualClock@1982b947,Map(),null)
AssertOnQuery(<condition>, )
CheckAnswer: [1],[10],[100],[101],[102],[103],[104],[105],[106],[107],[11],[108],[109],[110],[111],[12],[13],[14],[15]
AdvanceManualClock(100)
org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$anonfun$advanceSystemClock$1$1@588efd6f
AssertOnQuery(<condition>, )
CheckNewAnswer:
Assert(<condition>, )
AdvanceManualClock(100)
org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$anonfun$advanceSystemClock$1$1@2015b01f
AssertOnQuery(<condition>, )
CheckAnswer: [1],[10],[100],[101],[102],[103],[104],[105],[106],[107],[11],[108],[109],[110],[111],[112],[113],[114],[115],[116],[12],[117],[118],[119],[120],[121],[13],[14],[15],[16],[17],[18],[19],[2],[20],[21],[22],[23],[24]
AdvanceManualClock(100)
org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$anonfun$advanceSystemClock$1$1@55a66e9a
AssertOnQuery(<condition>, )
CheckNewAnswer:
AdvanceManualClock(100)
org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$anonfun$advanceSystemClock$1$1@1cfcd8e1
AssertOnQuery(<condition>, )
=> CheckAnswer: [1],[10],[100],[101],[102],[103],[104],[105],[106],[107],[11],[108],[109],[110],[111],[112],[113],[114],[115],[116],[117],[118],[119],[12],[120],[121],[122],[123],[124],[125],[126],[127],[128],[13],[14],[15],[16],[17],[18],[19],[2],[20],[21],[22],[23],[24],[25],[26],[27],[28],[29],[30]
== Stream ==
Output Mode: Append
Stream state: {KafkaSourceV1[Subscribe[topic-46]]: {"topic-46":{"2":2,"1":15,"0":22}}}
Thread state: alive
Thread stack trace: java.lang.Object.wait(Native Method)
org.apache.spark.util.ManualClock.waitTillTime(ManualClock.scala:67)
org.apache.spark.sql.streaming.util.StreamManualClock.waitTillTime(StreamManualClock.scala:34)
org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor.execute(TriggerExecutor.scala:76)
org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:239)
org.apache.spark.sql.execution.streaming.StreamExecution.$anonfun$runStream$1(StreamExecution.scala:311)
org.apache.spark.sql.execution.streaming.StreamExecution$$Lambda$5467/1364495330.apply$mcV$sp(Unknown Source)
scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:289)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.$anonfun$run$1(StreamExecution.scala:211)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1$$Lambda$5462/1151169484.apply$mcV$sp(Unknown Source)
scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:211)
== Sink ==
0: [10] [11] [12] [13] [14] [15] [100] [101] [102] [103] [104] [105] [106] [107] [108] [109] [110] [111] [1]
1: [16] [17] [18] [19] [20] [21] [22] [23] [24] [112] [113] [114] [115] [116] [117] [118] [119] [120] [121] [2]
== Plan ==
== Parsed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, 86ea20c4-46bc-47f2-9b5c-7e3671741f0c, Append, 1
+- SerializeFromObject [input[0, int, false] AS value#20100]
+- MapElements org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$Lambda$6538/1603768763@240db0eb, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#20099: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#20098: scala.Tuple2
+- Project [cast(key#20074 as string) AS key#20088, cast(value#20075 as string) AS value#20089]
+- Project [key#20146 AS key#20074, value#20147 AS value#20075, topic#20148 AS topic#20076, partition#20149 AS partition#20077, offset#20150L AS offset#20078L, timestamp#20151 AS timestamp#20079, timestampType#20152 AS timestampType#20080]
+- LogicalRDD [key#20146, value#20147, topic#20148, partition#20149, offset#20150L, timestamp#20151, timestampType#20152], true
== Analyzed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, 86ea20c4-46bc-47f2-9b5c-7e3671741f0c, Append, 1
+- SerializeFromObject [input[0, int, false] AS value#20100]
+- MapElements org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$Lambda$6538/1603768763@240db0eb, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#20099: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#20098: scala.Tuple2
+- Project [cast(key#20074 as string) AS key#20088, cast(value#20075 as string) AS value#20089]
+- Project [key#20146 AS key#20074, value#20147 AS value#20075, topic#20148 AS topic#20076, partition#20149 AS partition#20077, offset#20150L AS offset#20078L, timestamp#20151 AS timestamp#20079, timestampType#20152 AS timestampType#20080]
+- LogicalRDD [key#20146, value#20147, topic#20148, partition#20149, offset#20150L, timestamp#20151, timestampType#20152], true
== Optimized Logical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 1, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@51fdaf74]
+- SerializeFromObject [input[0, int, false] AS value#20100]
+- MapElements org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$Lambda$6538/1603768763@240db0eb, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#20099: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#20098: scala.Tuple2
+- Project [cast(key#20146 as string) AS key#20088, cast(value#20147 as string) AS value#20089]
+- LogicalRDD [key#20146, value#20147, topic#20148, partition#20149, offset#20150L, timestamp#20151, timestampType#20152], true
== Physical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 1, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@51fdaf74], org.apache.spark.sql.execution.datasources.v2.DataSourceV2Strategy$$Lambda$5584/980300870@a991a96
+- *(1) SerializeFromObject [input[0, int, false] AS value#20100]
+- *(1) MapElements org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase$$Lambda$6538/1603768763@240db0eb, obj#20099: int
+- *(1) DeserializeToObject newInstance(class scala.Tuple2), obj#20098: scala.Tuple2
+- *(1) Project [cast(key#20146 as string) AS key#20088, cast(value#20147 as string) AS value#20089]
+- *(1) Scan ExistingRDD kafka[key#20146,value#20147,topic#20148,partition#20149,offset#20150L,timestamp#20151,timestampType#20152]
at org.scalatest.Assertions.newAssertionFailedException(Assertions.scala:472)
at org.scalatest.Assertions.newAssertionFailedException$(Assertions.scala:471)
at org.scalatest.funsuite.AnyFunSuite.newAssertionFailedException(AnyFunSuite.scala:1564)
at org.scalatest.Assertions.fail(Assertions.scala:933)
at org.scalatest.Assertions.fail$(Assertions.scala:929)
at org.scalatest.funsuite.AnyFunSuite.fail(AnyFunSuite.scala:1564)
at org.apache.spark.sql.streaming.StreamTest.failTest$1(StreamTest.scala:462)
at org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$53(StreamTest.scala:745)
at scala.Option.foreach(Option.scala:407)
at org.apache.spark.sql.streaming.StreamTest.executeAction$1(StreamTest.scala:745)
at org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$56(StreamTest.scala:790)
at org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$56$adapted(StreamTest.scala:777)
at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:38)
at org.apache.spark.sql.streaming.StreamTest.liftedTree1$1(StreamTest.scala:777)
at org.apache.spark.sql.streaming.StreamTest.testStream(StreamTest.scala:776)
at org.apache.spark.sql.streaming.StreamTest.testStream$(StreamTest.scala:342)
at org.apache.spark.sql.kafka010.KafkaSourceTest.testStream(KafkaMicroBatchSourceSuite.scala:57)
at org.apache.spark.sql.kafka010.KafkaMicroBatchSourceSuiteBase.$anonfun$new$45(KafkaMicroBatchSourceSuite.scala:730)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
at org.scalatest.enablers.Timed$$anon$1.timeoutAfter(Timed.scala:127)
at org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:282)
at org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
at org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
at org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
at org.apache.spark.SparkFunSuite.$anonfun$test$2(SparkFunSuite.scala:155)
at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
at org.scalatest.Transformer.apply(Transformer.scala:22)
at org.scalatest.Transformer.apply(Transformer.scala:20)
at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:226)
at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:227)
at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:224)
at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:236)
at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:236)
at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:218)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:69)
at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:69)
at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:269)
at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
at scala.collection.immutable.List.foreach(List.scala:431)
at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:269)
at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:268)
at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1564)
at org.scalatest.Suite.run(Suite.scala:1114)
at org.scalatest.Suite.run$(Suite.scala:1096)
at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1564)
at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:273)
at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:273)
at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:272)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:69)
at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:69)
at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:321)
at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:517)
at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:414)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:750)
Loading