FailedConsole Output

Skipping 49,579 KB.. Full Log
                         0 / 82122                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNewInstance                                 0 / 79868                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveGroupingAnalytics                           0 / 67936                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveEncodersInUDF                               0 / 67571                                       0 / 7                                          
org.apache.spark.sql.catalyst.analysis.ResolveCatalogs                                             0 / 62441                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveOrdinalInOrderByAndGroupBy                  0 / 61593                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$HandleNullInputsForUDF                             0 / 61050                                       0 / 7                                          
org.apache.spark.sql.catalyst.expressions.codegen.package$ExpressionCanonicalizer$CleanExpressions 2630 / 60584                                    2 / 16                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNamespace                                   0 / 60449                                       0 / 14                                         
org.apache.spark.sql.execution.analysis.DetectAmbiguousSelfJoin                                    0 / 59366                                       0 / 7                                          
org.apache.spark.sql.catalyst.analysis.TypeCoercion$WidenSetOperationTypes                         0 / 55405                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.ResolveTableValuedFunctions                                 0 / 54716                                       0 / 14                                         
org.apache.spark.sql.execution.datasources.ResolveSQLOnFile                                        0 / 54476                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.ResolveHints$ResolveCoalesceHints                           0 / 52522                                       0 / 7                                          
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveGenerate                                    0 / 51546                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAggregateFunctions                          0 / 49810                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.ResolveInlineTables                                         0 / 46228                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAggAliasInGroupBy                           0 / 44740                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolvePivot                                       0 / 43164                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveMissingReferences                           0 / 42693                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveInsertInto                                  0 / 42670                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveSubqueryColumnAliases                       0 / 41252                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNaturalAndUsingJoin                         0 / 40125                                       0 / 14                                         
org.apache.spark.sql.execution.datasources.FallBackFileSourceV2                                    0 / 39499                                       0 / 14                                         
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveOutputRelation                              0 / 38602                                       0 / 14                                         
org.apache.spark.sql.execution.OptimizeMetadataOnlyQuery                                           0 / 35920                                       0 / 6                                          
org.apache.spark.sql.catalyst.optimizer.CostBasedJoinReorder                                       0 / 33388                                       0 / 6                                          
org.apache.spark.sql.hive.HiveAnalysis                                                             8558 / 33252                                    1 / 7                                          
org.apache.spark.sql.hive.RelationConversions                                                      0 / 29841                                       0 / 7                                          
org.apache.spark.sql.catalyst.optimizer.CombineConcats                                             0 / 29541                                       0 / 13                                         
org.apache.spark.sql.hive.DetermineTableStats                                                      0 / 28059                                       0 / 7                                          
org.apache.spark.sql.catalyst.analysis.ResolveHints$DisableHints                                   0 / 27797                                       0 / 7                                          
org.apache.spark.sql.execution.datasources.DataSourceAnalysis                                      0 / 24771                                       0 / 7                                          
org.apache.spark.sql.catalyst.analysis.UpdateOuterReferences                                       0 / 24396                                       0 / 7                                          
org.apache.spark.sql.execution.datasources.PreprocessTableInsertion                                0 / 22499                                       0 / 7                                          
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAlterTableChanges                           0 / 21729                                       0 / 7                                          
org.apache.spark.sql.catalyst.analysis.ResolveHints$RemoveAllHints                                 0 / 20554                                       0 / 7                                          
org.apache.spark.sql.catalyst.analysis.SubstituteUnresolvedOrdinals                                0 / 19236                                       0 / 7                                          
org.apache.spark.sql.catalyst.analysis.Analyzer$WindowsSubstitution                                0 / 19189                                       0 / 7                                          
org.apache.spark.sql.catalyst.analysis.EliminateUnions                                             0 / 18961                                       0 / 7                                          
org.apache.spark.sql.catalyst.optimizer.EliminateDistinct                                          0 / 18746                                       0 / 6                                          
     
08:09:49.175 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:09:49.175 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:09:49.175 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:09:49.251 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:09:49.251 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:09:49.251 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] HiveVariableSubstitutionSuite:
[info] - SET hivevar with prefix (10 milliseconds)
[info] - SET hivevar with dotted name (8 milliseconds)
[info] - hivevar substitution (127 milliseconds)
[info] - variable substitution without a prefix (92 milliseconds)
[info] - variable substitution precedence (116 milliseconds)
08:09:49.699 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:09:49.699 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:09:49.700 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:09:49.779 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:09:49.779 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:09:49.779 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] HiveOrcSourceSuite:
[info] - create temporary orc table (1 second, 364 milliseconds)
[info] - create temporary orc table as (1 second, 111 milliseconds)
[info] - appending insert (791 milliseconds)
[info] - overwrite insert (1 second, 725 milliseconds)
[info] - write null values (1 second, 288 milliseconds)
[info] - SPARK-18433: Improve DataSource option keys to be more case-insensitive (2 milliseconds)
[info] - SPARK-21839: Add SQL config for ORC compression (5 milliseconds)
[info] - SPARK-23340 Empty float/double array columns raise EOFException !!! IGNORED !!!
[info] - SPARK-24322 Fix incorrect workaround for bug in java.sql.Timestamp (771 milliseconds)
[info] - Write Spark version into ORC file metadata (625 milliseconds)
[info] - SPARK-11412 test orc merge schema option (3 milliseconds)
[info] - SPARK-11412 test enabling/disabling schema merging (3 seconds, 776 milliseconds)
08:10:03.940 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 14292.0 (TID 26092)
org.apache.spark.SparkException: Failed merging schema:
root
 |-- a: long (nullable = true)

	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$4(SchemaMergeUtils.scala:86)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$4$adapted(SchemaMergeUtils.scala:81)
	at scala.collection.immutable.Stream.foreach(Stream.scala:533)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$2(SchemaMergeUtils.scala:81)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:863)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:863)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1403)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Failed to merge fields 'a' and 'a'. Failed to merge incompatible data types string and bigint
	at org.apache.spark.sql.types.StructType$.$anonfun$merge$2(StructType.scala:593)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.types.StructType$.$anonfun$merge$1(StructType.scala:585)
	at org.apache.spark.sql.types.StructType$.$anonfun$merge$1$adapted(StructType.scala:582)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at org.apache.spark.sql.types.StructType$.merge(StructType.scala:582)
	at org.apache.spark.sql.types.StructType.merge(StructType.scala:492)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$4(SchemaMergeUtils.scala:83)
	... 16 more
08:10:03.943 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 14292.0 (TID 26092, amp-jenkins-worker-05.amp, executor driver): org.apache.spark.SparkException: Failed merging schema:
root
 |-- a: long (nullable = true)

	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$4(SchemaMergeUtils.scala:86)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$4$adapted(SchemaMergeUtils.scala:81)
	at scala.collection.immutable.Stream.foreach(Stream.scala:533)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$2(SchemaMergeUtils.scala:81)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:863)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:863)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1403)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Failed to merge fields 'a' and 'a'. Failed to merge incompatible data types string and bigint
	at org.apache.spark.sql.types.StructType$.$anonfun$merge$2(StructType.scala:593)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.types.StructType$.$anonfun$merge$1(StructType.scala:585)
	at org.apache.spark.sql.types.StructType$.$anonfun$merge$1$adapted(StructType.scala:582)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at org.apache.spark.sql.types.StructType$.merge(StructType.scala:582)
	at org.apache.spark.sql.types.StructType.merge(StructType.scala:492)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$4(SchemaMergeUtils.scala:83)
	... 16 more

08:10:03.944 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 14292.0 failed 1 times; aborting job
[info] - SPARK-11412 test enabling/disabling schema merging with data type conflicts (1 second, 728 milliseconds)
08:10:06.262 WARN org.apache.spark.sql.hive.orc.OrcFileOperator: Skipped the footer in the corrupted file: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-13a783f5-08ac-4981-9911-9e1a90a45230/foo=3/part-00000-cdaf8615-862e-4bfa-b738-c183416bc85b-c000.json
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-13a783f5-08ac-4981-9911-9e1a90a45230/foo=3/part-00000-cdaf8615-862e-4bfa-b738-c183416bc85b-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:275)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:582)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:370)
	at org.apache.hadoop.hive.ql.io.orc.ReaderImpl.<init>(ReaderImpl.java:63)
	at org.apache.hadoop.hive.ql.io.orc.OrcFile.createReader(OrcFile.java:55)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:76)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at scala.collection.TraversableOnce.collectFirst(TraversableOnce.scala:148)
	at scala.collection.TraversableOnce.collectFirst$(TraversableOnce.scala:135)
	at scala.collection.AbstractIterator.collectFirst(Iterator.scala:1429)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.getFileReader(OrcFileOperator.scala:87)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$readOrcSchemasInParallel$1(OrcFileOperator.scala:114)
	at org.apache.spark.util.ThreadUtils$.$anonfun$parmap$2(ThreadUtils.scala:373)
	at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:659)
	at scala.util.Success.$anonfun$map$1(Try.scala:255)
	at scala.util.Success.map(Try.scala:213)
	at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
	at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
	at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
	at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
	at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
08:10:06.414 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 14297.0 (TID 26097)
org.apache.spark.SparkException: Exception thrown in awaitResult: 
	at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:302)
	at org.apache.spark.util.ThreadUtils$.parmap(ThreadUtils.scala:376)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.readOrcSchemasInParallel(OrcFileOperator.scala:112)
	at org.apache.spark.sql.hive.orc.OrcFileFormat.$anonfun$inferSchema$1(OrcFileFormat.scala:73)
	at org.apache.spark.sql.hive.orc.OrcFileFormat.$anonfun$inferSchema$1$adapted(OrcFileFormat.scala:73)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$2(SchemaMergeUtils.scala:75)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:863)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:863)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1403)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Could not read footer for file: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-13a783f5-08ac-4981-9911-9e1a90a45230/foo=3/part-00000-cdaf8615-862e-4bfa-b738-c183416bc85b-c000.json
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:83)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at scala.collection.TraversableOnce.collectFirst(TraversableOnce.scala:148)
	at scala.collection.TraversableOnce.collectFirst$(TraversableOnce.scala:135)
	at scala.collection.AbstractIterator.collectFirst(Iterator.scala:1429)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.getFileReader(OrcFileOperator.scala:87)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$readOrcSchemasInParallel$1(OrcFileOperator.scala:114)
	at org.apache.spark.util.ThreadUtils$.$anonfun$parmap$2(ThreadUtils.scala:373)
	at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:659)
	at scala.util.Success.$anonfun$map$1(Try.scala:255)
	at scala.util.Success.map(Try.scala:213)
	at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
	at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
	at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
	at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
	at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
Caused by: org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-13a783f5-08ac-4981-9911-9e1a90a45230/foo=3/part-00000-cdaf8615-862e-4bfa-b738-c183416bc85b-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:275)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:582)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:370)
	at org.apache.hadoop.hive.ql.io.orc.ReaderImpl.<init>(ReaderImpl.java:63)
	at org.apache.hadoop.hive.ql.io.orc.OrcFile.createReader(OrcFile.java:55)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:76)
	... 19 more
08:10:06.420 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 14297.0 (TID 26097, amp-jenkins-worker-05.amp, executor driver): org.apache.spark.SparkException: Exception thrown in awaitResult: 
	at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:302)
	at org.apache.spark.util.ThreadUtils$.parmap(ThreadUtils.scala:376)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.readOrcSchemasInParallel(OrcFileOperator.scala:112)
	at org.apache.spark.sql.hive.orc.OrcFileFormat.$anonfun$inferSchema$1(OrcFileFormat.scala:73)
	at org.apache.spark.sql.hive.orc.OrcFileFormat.$anonfun$inferSchema$1$adapted(OrcFileFormat.scala:73)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$2(SchemaMergeUtils.scala:75)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:863)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:863)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1403)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Could not read footer for file: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-13a783f5-08ac-4981-9911-9e1a90a45230/foo=3/part-00000-cdaf8615-862e-4bfa-b738-c183416bc85b-c000.json
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:83)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at scala.collection.TraversableOnce.collectFirst(TraversableOnce.scala:148)
	at scala.collection.TraversableOnce.collectFirst$(TraversableOnce.scala:135)
	at scala.collection.AbstractIterator.collectFirst(Iterator.scala:1429)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.getFileReader(OrcFileOperator.scala:87)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$readOrcSchemasInParallel$1(OrcFileOperator.scala:114)
	at org.apache.spark.util.ThreadUtils$.$anonfun$parmap$2(ThreadUtils.scala:373)
	at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:659)
	at scala.util.Success.$anonfun$map$1(Try.scala:255)
	at scala.util.Success.map(Try.scala:213)
	at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
	at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
	at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
	at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
	at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
Caused by: org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-13a783f5-08ac-4981-9911-9e1a90a45230/foo=3/part-00000-cdaf8615-862e-4bfa-b738-c183416bc85b-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:275)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:582)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:370)
	at org.apache.hadoop.hive.ql.io.orc.ReaderImpl.<init>(ReaderImpl.java:63)
	at org.apache.hadoop.hive.ql.io.orc.OrcFile.createReader(OrcFile.java:55)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:76)
	... 19 more

08:10:06.420 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 14297.0 failed 1 times; aborting job
[info] - SPARK-11412 test schema merging with corrupt files (2 seconds, 421 milliseconds)
[info] - SPARK-31238: compatibility with Spark 2.4 in reading dates (534 milliseconds)
[info] - SPARK-31238, SPARK-31423: rebasing dates in write (1 second, 732 milliseconds)
[info] - SPARK-31284: compatibility with Spark 2.4 in reading timestamps (802 milliseconds)
[info] - SPARK-31284, SPARK-31423: rebasing timestamps in write (976 milliseconds)
08:10:11.336 ERROR org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory: Unable to evaluate org.apache.hadoop.hive.ql.udf.generic.GenericUDFArray@7a07b1bf. Return value unrecoginizable.
08:10:11.341 ERROR org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory: Unable to evaluate org.apache.hadoop.hive.ql.udf.generic.GenericUDFArray@7a07b1bf. Return value unrecoginizable.
08:10:11.341 ERROR org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory: Unable to evaluate org.apache.hadoop.hive.ql.udf.generic.GenericUDFArray@7a07b1bf. Return value unrecoginizable.
08:10:11.343 ERROR org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory: Unable to evaluate org.apache.hadoop.hive.ql.udf.generic.GenericUDFArray@3ac22614. Return value unrecoginizable.
08:10:11.374 WARN org.apache.hadoop.hive.ql.Driver: Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
08:10:11.623 WARN org.apache.hadoop.metrics2.impl.MetricsSystemImpl: JobTracker metrics system already initialized!
08:10:11.742 WARN org.apache.hadoop.metrics2.impl.MetricsSystemImpl: JobTracker metrics system already initialized!
08:10:11.832 WARN org.apache.hadoop.mapreduce.JobResourceUploader: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
08:10:13.634 WARN org.apache.hadoop.fs.FileUtil: Command 'ln -s /tmp/hadoop-jenkins/mapred/local/job_local1205905291_0049_ed8f4bce-3d7c-41b4-a7a0-149eceaec494/libjars /home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/sql/hive/libjars/*' failed 1 with: ln: creating symbolic link `/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/sql/hive/libjars/*': No such file or directory

08:10:13.634 WARN org.apache.hadoop.mapred.LocalDistributedCacheManager: Failed to create symlink: /tmp/hadoop-jenkins/mapred/local/job_local1205905291_0049_ed8f4bce-3d7c-41b4-a7a0-149eceaec494/libjars <- /home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/sql/hive/libjars/*
08:10:15.275 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:10:15.275 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:10:15.275 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:10:15.569 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:10:15.569 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:10:15.569 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:10:15.647 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:10:15.647 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:10:15.648 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] - SPARK-19459/SPARK-18220: read char/varchar column written by Hive (6 seconds, 549 milliseconds)
[info] - SPARK-24204 error handling for unsupported data types (1 second, 742 milliseconds)
[info] - Check BloomFilter creation (3 seconds, 335 milliseconds)
[info] - Enforce direct encoding column-wise selectively (2 seconds, 891 milliseconds)
08:10:27.240 WARN org.apache.spark.sql.hive.orc.OrcFileOperator: Skipped the footer in the corrupted file: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-f1c3f68b-91ee-4e28-b099-ac4a4cd57c0d/third/part-00000-af6d4a53-f8c1-43bb-ab77-e9b103ca93a0-c000.json
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-f1c3f68b-91ee-4e28-b099-ac4a4cd57c0d/third/part-00000-af6d4a53-f8c1-43bb-ab77-e9b103ca93a0-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:275)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:582)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:370)
	at org.apache.hadoop.hive.ql.io.orc.ReaderImpl.<init>(ReaderImpl.java:63)
	at org.apache.hadoop.hive.ql.io.orc.OrcFile.createReader(OrcFile.java:55)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:76)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at scala.collection.TraversableOnce.collectFirst(TraversableOnce.scala:148)
	at scala.collection.TraversableOnce.collectFirst$(TraversableOnce.scala:135)
	at scala.collection.AbstractIterator.collectFirst(Iterator.scala:1429)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.getFileReader(OrcFileOperator.scala:87)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$readOrcSchemasInParallel$1(OrcFileOperator.scala:114)
	at org.apache.spark.util.ThreadUtils$.$anonfun$parmap$2(ThreadUtils.scala:373)
	at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:659)
	at scala.util.Success.$anonfun$map$1(Try.scala:255)
	at scala.util.Success.map(Try.scala:213)
	at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
	at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
	at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
	at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
	at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
08:10:29.812 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 14333.0 (TID 26133)
org.apache.spark.SparkException: Exception thrown in awaitResult: 
	at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:302)
	at org.apache.spark.util.ThreadUtils$.parmap(ThreadUtils.scala:376)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.readOrcSchemasInParallel(OrcFileOperator.scala:112)
	at org.apache.spark.sql.hive.orc.HiveOrcSourceSuite.$anonfun$new$19(HiveOrcSourceSuite.scala:171)
	at org.apache.spark.sql.hive.orc.HiveOrcSourceSuite.$anonfun$new$19$adapted(HiveOrcSourceSuite.scala:171)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$2(SchemaMergeUtils.scala:75)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:863)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:863)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1403)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Could not read footer for file: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-cefb54a2-661b-4005-9719-400300839d14/third/part-00000-483cdafd-8435-4889-baa2-e5dbd3757a20-c000.json
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:83)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at scala.collection.TraversableOnce.collectFirst(TraversableOnce.scala:148)
	at scala.collection.TraversableOnce.collectFirst$(TraversableOnce.scala:135)
	at scala.collection.AbstractIterator.collectFirst(Iterator.scala:1429)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.getFileReader(OrcFileOperator.scala:87)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$readOrcSchemasInParallel$1(OrcFileOperator.scala:114)
	at org.apache.spark.util.ThreadUtils$.$anonfun$parmap$2(ThreadUtils.scala:373)
	at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:659)
	at scala.util.Success.$anonfun$map$1(Try.scala:255)
	at scala.util.Success.map(Try.scala:213)
	at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
	at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
	at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
	at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
	at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
Caused by: org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-cefb54a2-661b-4005-9719-400300839d14/third/part-00000-483cdafd-8435-4889-baa2-e5dbd3757a20-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:275)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:582)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:370)
	at org.apache.hadoop.hive.ql.io.orc.ReaderImpl.<init>(ReaderImpl.java:63)
	at org.apache.hadoop.hive.ql.io.orc.OrcFile.createReader(OrcFile.java:55)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:76)
	... 19 more
08:10:29.819 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 14333.0 (TID 26133, amp-jenkins-worker-05.amp, executor driver): org.apache.spark.SparkException: Exception thrown in awaitResult: 
	at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:302)
	at org.apache.spark.util.ThreadUtils$.parmap(ThreadUtils.scala:376)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.readOrcSchemasInParallel(OrcFileOperator.scala:112)
	at org.apache.spark.sql.hive.orc.HiveOrcSourceSuite.$anonfun$new$19(HiveOrcSourceSuite.scala:171)
	at org.apache.spark.sql.hive.orc.HiveOrcSourceSuite.$anonfun$new$19$adapted(HiveOrcSourceSuite.scala:171)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$2(SchemaMergeUtils.scala:75)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:863)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:863)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1403)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Could not read footer for file: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-cefb54a2-661b-4005-9719-400300839d14/third/part-00000-483cdafd-8435-4889-baa2-e5dbd3757a20-c000.json
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:83)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at scala.collection.TraversableOnce.collectFirst(TraversableOnce.scala:148)
	at scala.collection.TraversableOnce.collectFirst$(TraversableOnce.scala:135)
	at scala.collection.AbstractIterator.collectFirst(Iterator.scala:1429)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.getFileReader(OrcFileOperator.scala:87)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$readOrcSchemasInParallel$1(OrcFileOperator.scala:114)
	at org.apache.spark.util.ThreadUtils$.$anonfun$parmap$2(ThreadUtils.scala:373)
	at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:659)
	at scala.util.Success.$anonfun$map$1(Try.scala:255)
	at scala.util.Success.map(Try.scala:213)
	at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
	at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
	at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
	at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
	at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
Caused by: org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-cefb54a2-661b-4005-9719-400300839d14/third/part-00000-483cdafd-8435-4889-baa2-e5dbd3757a20-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:275)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:582)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:370)
	at org.apache.hadoop.hive.ql.io.orc.ReaderImpl.<init>(ReaderImpl.java:63)
	at org.apache.hadoop.hive.ql.io.orc.OrcFile.createReader(OrcFile.java:55)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:76)
	... 19 more

08:10:29.819 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 14333.0 failed 1 times; aborting job
[info] - SPARK-11412 read and merge orc schemas in parallel (4 seconds, 835 milliseconds)
08:10:48.710 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:49.412 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:49.868 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:50.282 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:50.713 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:50.927 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:51.977 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:52.005 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:54.520 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:54.561 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:55.305 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
08:10:55.332 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
[info] - SPARK-25993 CREATE EXTERNAL TABLE with subdirectories (27 seconds, 18 milliseconds)
[info] - SPARK-31580: Read a file written before ORC-569 (276 milliseconds)
08:10:57.572 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:10:57.572 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:10:57.573 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:10:57.668 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:10:57.668 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:10:57.669 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] ObjectHashAggregateSuite:
[info] - typed_count without grouping keys (383 milliseconds)
[info] - typed_count without grouping keys and empty input (163 milliseconds)
[info] - typed_count with grouping keys (467 milliseconds)
[info] - typed_count fallback to sort-based aggregation (459 milliseconds)
[info] - random input data types (17 seconds, 479 milliseconds)
[info] - randomized aggregation test - [typed] - with grouping keys - with empty input (1 second, 670 milliseconds)
[info] - randomized aggregation test - [typed] - with grouping keys - with non-empty input (2 seconds, 729 milliseconds)
[info] - randomized aggregation test - [typed] - without grouping keys - with empty input (367 milliseconds)
[info] - randomized aggregation test - [typed] - without grouping keys - with non-empty input (398 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe] - with grouping keys - with empty input (1 second, 493 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe] - with grouping keys - with non-empty input (3 seconds, 297 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe] - without grouping keys - with empty input (329 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe] - without grouping keys - with non-empty input (391 milliseconds)
[info] - randomized aggregation test - [with partial + safe] - with grouping keys - with empty input (1 second, 264 milliseconds)
[info] - randomized aggregation test - [with partial + safe] - with grouping keys - with non-empty input (3 seconds, 399 milliseconds)
[info] - randomized aggregation test - [with partial + safe] - without grouping keys - with empty input (571 milliseconds)
[info] - randomized aggregation test - [with partial + safe] - without grouping keys - with non-empty input (457 milliseconds)
[info] - randomized aggregation test - [with distinct] - with grouping keys - with empty input (2 seconds, 701 milliseconds)
[info] - randomized aggregation test - [with distinct] - with grouping keys - with non-empty input (3 seconds, 949 milliseconds)
[info] - randomized aggregation test - [with distinct] - without grouping keys - with empty input (1 second, 12 milliseconds)
[info] - randomized aggregation test - [with distinct] - without grouping keys - with non-empty input (1 second, 134 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe] - with grouping keys - with empty input (1 second, 190 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe] - with grouping keys - with non-empty input (2 seconds, 829 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe] - without grouping keys - with empty input (400 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe] - without grouping keys - with non-empty input (398 milliseconds)
[info] - randomized aggregation test - [typed, with partial + safe] - with grouping keys - with empty input (1 second, 586 milliseconds)
[info] - randomized aggregation test - [typed, with partial + safe] - with grouping keys - with non-empty input (3 seconds, 176 milliseconds)
[info] - randomized aggregation test - [typed, with partial + safe] - without grouping keys - with empty input (795 milliseconds)
[info] - randomized aggregation test - [typed, with partial + safe] - without grouping keys - with non-empty input (625 milliseconds)
[info] - randomized aggregation test - [typed, with distinct] - with grouping keys - with empty input (1 second, 607 milliseconds)
[info] - randomized aggregation test - [typed, with distinct] - with grouping keys - with non-empty input (5 seconds, 149 milliseconds)
[info] - randomized aggregation test - [typed, with distinct] - without grouping keys - with empty input (1 second, 160 milliseconds)
[info] - randomized aggregation test - [typed, with distinct] - without grouping keys - with non-empty input (1 second, 666 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with partial + safe] - with grouping keys - with empty input (1 second, 331 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with partial + safe] - with grouping keys - with non-empty input (4 seconds, 167 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with partial + safe] - without grouping keys - with empty input (677 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with partial + safe] - without grouping keys - with non-empty input (519 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with distinct] - with grouping keys - with empty input (3 seconds, 856 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with distinct] - with grouping keys - with non-empty input (6 seconds, 443 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with distinct] - without grouping keys - with empty input (1 second, 449 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with distinct] - without grouping keys - with non-empty input (1 second, 462 milliseconds)
[info] - randomized aggregation test - [with partial + safe, with distinct] - with grouping keys - with empty input (2 seconds, 55 milliseconds)
[info] - randomized aggregation test - [with partial + safe, with distinct] - with grouping keys - with non-empty input (5 seconds, 422 milliseconds)
[info] - randomized aggregation test - [with partial + safe, with distinct] - without grouping keys - with empty input (1 second, 139 milliseconds)
[info] - randomized aggregation test - [with partial + safe, with distinct] - without grouping keys - with non-empty input (1 second, 532 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with partial + safe] - with grouping keys - with empty input (1 second, 446 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with partial + safe] - with grouping keys - with non-empty input (3 seconds, 377 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with partial + safe] - without grouping keys - with empty input (665 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with partial + safe] - without grouping keys - with non-empty input (528 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with distinct] - with grouping keys - with empty input (1 second, 589 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with distinct] - with grouping keys - with non-empty input (5 seconds, 447 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with distinct] - without grouping keys - with empty input (1 second, 776 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with distinct] - without grouping keys - with non-empty input (1 second, 844 milliseconds)
[info] - randomized aggregation test - [typed, with partial + safe, with distinct] - with grouping keys - with empty input (1 second, 344 milliseconds)
[info] - randomized aggregation test - [typed, with partial + safe, with distinct] - with grouping keys - with non-empty input (4 seconds, 755 milliseconds)
[info] - randomized aggregation test - [typed, with partial + safe, with distinct] - without grouping keys - with empty input (1 second, 125 milliseconds)
[info] - randomized aggregation test - [typed, with partial + safe, with distinct] - without grouping keys - with non-empty input (1 second, 588 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with partial + safe, with distinct] - with grouping keys - with empty input (1 second, 338 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with partial + safe, with distinct] - with grouping keys - with non-empty input (3 seconds, 802 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with partial + safe, with distinct] - without grouping keys - with empty input (941 milliseconds)
[info] - randomized aggregation test - [with partial + unsafe, with partial + safe, with distinct] - without grouping keys - with non-empty input (1 second, 295 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with partial + safe, with distinct] - with grouping keys - with empty input (1 second, 362 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with partial + safe, with distinct] - with grouping keys - with non-empty input (5 seconds, 711 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with partial + safe, with distinct] - without grouping keys - with empty input (1 second, 615 milliseconds)
[info] - randomized aggregation test - [typed, with partial + unsafe, with partial + safe, with distinct] - without grouping keys - with non-empty input (1 second, 787 milliseconds)
[info] - SPARK-18403 Fix unsafe data false sharing issue in ObjectHashAggregateExec (2 seconds, 185 milliseconds)
08:13:18.328 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:13:18.328 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:13:18.328 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:13:18.396 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:13:18.397 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:13:18.397 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] PartitionProviderCompatibilitySuite:
[info] - convert partition provider to hive with repair table (5 seconds, 52 milliseconds)
[info] - when partition management is enabled, new tables have partition provider hive (4 seconds, 290 milliseconds)
[info] - when partition management is disabled, new tables have no partition provider (2 seconds, 888 milliseconds)
[info] - when partition management is disabled, we preserve the old behavior even for new tables (3 seconds, 356 milliseconds)
[info] - insert overwrite partition of legacy datasource table (9 seconds, 17 milliseconds)
[info] - insert overwrite partition of new datasource table overwrites just partition (9 seconds, 672 milliseconds)
[info] - SPARK-18544 append with saveAsTable - partition management true (8 seconds, 445 milliseconds)
[info] - SPARK-18635 special chars in partition values - partition management true (29 seconds, 668 milliseconds)
[info] - SPARK-18659 insert overwrite table files - partition management true (18 seconds, 3 milliseconds)
[info] - SPARK-18659 insert overwrite table with lowercase - partition management true (8 seconds, 375 milliseconds)
[info] - SPARK-19887 partition value is null - partition management true (10 seconds, 507 milliseconds)
[info] - SPARK-18544 append with saveAsTable - partition management false (6 seconds, 485 milliseconds)
[info] - SPARK-18635 special chars in partition values - partition management false (29 seconds, 248 milliseconds)
[info] - SPARK-18659 insert overwrite table files - partition management false (11 seconds, 218 milliseconds)
[info] - SPARK-18659 insert overwrite table with lowercase - partition management false (5 seconds, 713 milliseconds)
[info] - SPARK-19887 partition value is null - partition management false (10 seconds, 307 milliseconds)
08:16:18.119 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-b4ac8c4b-cb93-41b4-9338-0e2884c8a988 was not found. Was it deleted very recently?
08:16:18.597 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-b0474f12-856c-4c5f-b536-9ed2c488b027 was not found. Was it deleted very recently?
08:16:18.872 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-b4ac8c4b-cb93-41b4-9338-0e2884c8a988 was not found. Was it deleted very recently?
08:16:19.285 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/c-128b6d83-82e9-4dfa-a313-96c09de86651 was not found. Was it deleted very recently?
08:16:19.461 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-b4ac8c4b-cb93-41b4-9338-0e2884c8a988 was not found. Was it deleted very recently?
08:16:19.462 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-b0474f12-856c-4c5f-b536-9ed2c488b027 was not found. Was it deleted very recently?
[info] - sanity check table setup (9 seconds, 545 milliseconds)
08:16:35.653 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-3f8ae0c3-afaf-464d-98e7-490b17445d09 was not found. Was it deleted very recently?
08:16:36.174 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-d22e5185-4985-4505-9f98-97d58abb3105 was not found. Was it deleted very recently?
08:16:36.351 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-3f8ae0c3-afaf-464d-98e7-490b17445d09 was not found. Was it deleted very recently?
08:16:36.727 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/c-9c0a67a1-ec14-4499-8747-b4568691f67a was not found. Was it deleted very recently?
08:16:37.053 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-3f8ae0c3-afaf-464d-98e7-490b17445d09 was not found. Was it deleted very recently?
08:16:37.253 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-d22e5185-4985-4505-9f98-97d58abb3105 was not found. Was it deleted very recently?
[info] - insert into partial dynamic partitions (18 seconds, 321 milliseconds)
08:16:48.909 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-1d11ea12-3b37-440d-acbc-ae9b9fcbb5c9 was not found. Was it deleted very recently?
08:16:49.476 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-22b0dbca-a362-41b5-b0f5-9726a24476c0 was not found. Was it deleted very recently?
08:16:49.635 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-1d11ea12-3b37-440d-acbc-ae9b9fcbb5c9 was not found. Was it deleted very recently?
08:16:49.931 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/c-0958e0f2-1d6e-48fd-b958-664ecccac110 was not found. Was it deleted very recently?
08:16:50.087 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-1d11ea12-3b37-440d-acbc-ae9b9fcbb5c9 was not found. Was it deleted very recently?
08:16:50.087 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-22b0dbca-a362-41b5-b0f5-9726a24476c0 was not found. Was it deleted very recently?
[info] - insert into fully dynamic partitions (12 seconds, 285 milliseconds)
08:16:56.701 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-c7464cce-6084-4eac-bd6d-2e5cf9142291 was not found. Was it deleted very recently?
08:16:57.209 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-cba08b68-897b-4d40-a4bb-931323210848 was not found. Was it deleted very recently?
08:16:57.370 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-c7464cce-6084-4eac-bd6d-2e5cf9142291 was not found. Was it deleted very recently?
08:16:57.610 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/c-56cde48d-8551-464d-ac53-73e9aaca3652 was not found. Was it deleted very recently?
08:16:57.792 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-c7464cce-6084-4eac-bd6d-2e5cf9142291 was not found. Was it deleted very recently?
08:16:57.792 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-cba08b68-897b-4d40-a4bb-931323210848 was not found. Was it deleted very recently?
[info] - insert into static partition (7 seconds, 565 milliseconds)
08:17:43.502 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-e8633421-e309-48c5-8902-f8155751bd28 was not found. Was it deleted very recently?
08:17:46.782 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-e8633421-e309-48c5-8902-f8155751bd28 was not found. Was it deleted very recently?
08:17:50.328 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/c-f2cea588-d2c6-4195-87ef-118186cbb5f5 was not found. Was it deleted very recently?
08:17:51.004 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-e8633421-e309-48c5-8902-f8155751bd28 was not found. Was it deleted very recently?
[info] - overwrite partial dynamic partitions (58 seconds, 917 milliseconds)
08:18:08.875 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-1d3bf026-0583-4813-aa56-64189842ce32 was not found. Was it deleted very recently?
08:18:09.681 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-1d3bf026-0583-4813-aa56-64189842ce32 was not found. Was it deleted very recently?
08:18:10.265 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-1d3bf026-0583-4813-aa56-64189842ce32 was not found. Was it deleted very recently?
[info] - overwrite fully dynamic partitions (13 seconds, 804 milliseconds)
08:18:20.621 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-7ca08bec-61a1-4933-84bc-9486e23f8b31 was not found. Was it deleted very recently?
08:18:21.035 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-e254bdba-fd0c-4cd7-9169-da5d89fb560a was not found. Was it deleted very recently?
08:18:21.232 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-7ca08bec-61a1-4933-84bc-9486e23f8b31 was not found. Was it deleted very recently?
08:18:21.548 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/c-7b4abdb4-56ae-4d48-9548-1bd266f5993a was not found. Was it deleted very recently?
08:18:21.721 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-7ca08bec-61a1-4933-84bc-9486e23f8b31 was not found. Was it deleted very recently?
08:18:21.721 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-e254bdba-fd0c-4cd7-9169-da5d89fb560a was not found. Was it deleted very recently?
[info] - overwrite static partition (11 seconds, 308 milliseconds)
08:18:28.589 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-e71e24da-cfd9-418f-bf6b-4b39f10eaece was not found. Was it deleted very recently?
08:18:29.000 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-77752cb5-f876-4b4e-b176-1e84e4e503a8 was not found. Was it deleted very recently?
08:18:29.322 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-e71e24da-cfd9-418f-bf6b-4b39f10eaece was not found. Was it deleted very recently?
08:18:29.657 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/c-96299719-b3b8-47f4-8fec-07d4346d2206 was not found. Was it deleted very recently?
08:18:29.850 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/a-e71e24da-cfd9-418f-bf6b-4b39f10eaece was not found. Was it deleted very recently?
08:18:29.850 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/b-77752cb5-f876-4b4e-b176-1e84e4e503a8 was not found. Was it deleted very recently?
[info] - append data with DataFrameWriter (8 seconds, 542 milliseconds)
[info] - SPARK-19359: renaming partition should not leave useless directories (6 seconds, 71 milliseconds)
08:18:37.150 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:18:37.150 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:18:37.150 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:18:37.248 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:18:37.248 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:18:37.248 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] HiveTableScanSuite:
08:18:37.354 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:18:37.354 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:18:37.354 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:18:37.412 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:18:37.412 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:18:37.412 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:18:37.438 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/src specified for non-external table:src
08:18:38.229 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/part_scan_test specified for non-external table:part_scan_test
[info] - partition_based_table_scan_with_different_serde (6 seconds, 547 milliseconds)
08:18:44.271 WARN org.apache.hadoop.hive.common.FileUtils: File file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/part_scan_test does not exist; Force to delete it.
08:18:44.271 ERROR org.apache.hadoop.hive.common.FileUtils: Failed to delete file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/part_scan_test
08:18:44.301 WARN org.apache.hadoop.hive.common.FileUtils: File file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/src does not exist; Force to delete it.
08:18:44.301 ERROR org.apache.hadoop.hive.common.FileUtils: Failed to delete file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/src
08:18:44.345 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:18:44.346 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:18:44.346 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:18:44.446 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:18:44.446 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:18:44.447 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:18:44.468 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/src specified for non-external table:src
[info] - file_split_for_small_table (1 second, 798 milliseconds)
08:18:45.621 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/tb specified for non-external table:tb
[info] - Spark-4041: lowercase issue (1 second, 947 milliseconds)
08:18:47.577 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/timestamp_query_null specified for non-external table:timestamp_query_null
[info] - Spark-4077: timestamp query for null value (1 second, 775 milliseconds)
08:18:49.428 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/spark_4959 specified for non-external table:spark_4959
[info] - Spark-4959 Attributes are case sensitive when using a select query from a projection (2 seconds, 862 milliseconds)
08:18:52.331 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/table_with_partition specified for non-external table:table_with_partition
[info] - Verify SQLConf HIVE_METASTORE_PARTITION_PRUNING (8 seconds, 979 milliseconds)
08:19:01.333 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/table_with_partition specified for non-external table:table_with_partition
08:19:11.549 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/hive_tbl_part specified for non-external table:hive_tbl_part
[info] - SPARK-16926: number of table and partition columns match for new partitioned table (10 seconds, 330 milliseconds)
[info] - HiveTableScanExec canonicalization for different orders of partition filters (904 milliseconds)
08:19:12.505 WARN org.apache.hadoop.hive.common.FileUtils: File file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/src does not exist; Force to delete it.
08:19:12.505 ERROR org.apache.hadoop.hive.common.FileUtils: Failed to delete file:/home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/warehouse-5a927cdb-5f2d-4178-b1c7-eeffa277c41d/src
08:19:12.560 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:19:12.561 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:19:12.561 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:19:12.690 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:19:12.690 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:19:12.690 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:19:12.828 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:19:12.828 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:19:12.829 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
08:19:12.930 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
08:19:12.930 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
08:19:12.931 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] Test run started
[info] Test org.apache.spark.sql.hive.JavaMetastoreDataSourcesSuite.saveTableAndQueryIt started
08:19:14.157 WARN org.apache.spark.sql.hive.test.TestHiveExternalCatalog: Couldn't find corresponding Hive SerDe for data source provider org.apache.spark.sql.json. Persisting data source table `default`.`javasavedtable` into Hive metastore in Spark SQL specific format, which is NOT compatible with Hive.
[info] Test run finished: 0 failed, 0 ignored, 1 total, 1.439s
[info] Test run started
[info] Test org.apache.spark.sql.hive.JavaDataFrameSuite.testUDAF started
[info] Test org.apache.spark.sql.hive.JavaDataFrameSuite.saveTableAndQueryIt started
[info] Test run finished: 0 failed, 0 ignored, 2 total, 2.954s
08:19:47.703 WARN org.apache.spark.network.util.JavaUtils: Attempt to delete using native Unix OS command failed for path = /home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-f943ecee-cd6b-45cc-b07d-44ab60e04c2b. Falling back to Java IO way
java.io.IOException: Failed to delete: /home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/target/tmp/spark-f943ecee-cd6b-45cc-b07d-44ab60e04c2b
	at org.apache.spark.network.util.JavaUtils.deleteRecursivelyUsingUnixNative(JavaUtils.java:163)
	at org.apache.spark.network.util.JavaUtils.deleteRecursively(JavaUtils.java:110)
	at org.apache.spark.network.util.JavaUtils.deleteRecursively(JavaUtils.java:91)
	at org.apache.spark.util.Utils$.deleteRecursively(Utils.scala:1105)
	at org.apache.spark.util.ShutdownHookManager$.$anonfun$new$4(ShutdownHookManager.scala:65)
	at org.apache.spark.util.ShutdownHookManager$.$anonfun$new$4$adapted(ShutdownHookManager.scala:62)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at org.apache.spark.util.ShutdownHookManager$.$anonfun$new$2(ShutdownHookManager.scala:62)
	at org.apache.spark.util.SparkShutdownHook.run(ShutdownHookManager.scala:214)
	at org.apache.spark.util.SparkShutdownHookManager.$anonfun$runAll$2(ShutdownHookManager.scala:188)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1958)
	at org.apache.spark.util.SparkShutdownHookManager.$anonfun$runAll$1(ShutdownHookManager.scala:188)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at scala.util.Try$.apply(Try.scala:213)
	at org.apache.spark.util.SparkShutdownHookManager.runAll(ShutdownHookManager.scala:188)
	at org.apache.spark.util.SparkShutdownHookManager$$anon$2.run(ShutdownHookManager.scala:178)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.InterruptedException
	at java.lang.Object.wait(Native Method)
	at java.lang.Object.wait(Object.java:502)
	at java.lang.UNIXProcess.waitFor(UNIXProcess.java:395)
	at org.apache.spark.network.util.JavaUtils.deleteRecursivelyUsingUnixNative(JavaUtils.java:161)
	... 23 more
08:19:47.703 WARN org.apache.hadoop.util.ShutdownHookManager: ShutdownHook '$anon$2' timeout, java.util.concurrent.TimeoutException
java.util.concurrent.TimeoutException
	at java.util.concurrent.FutureTask.get(FutureTask.java:205)
	at org.apache.hadoop.util.ShutdownHookManager.executeShutdown(ShutdownHookManager.java:124)
	at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:95)
08:20:17.704 WARN org.apache.hadoop.util.ShutdownHookManager: ShutdownHook '' timeout, java.util.concurrent.TimeoutException
java.util.concurrent.TimeoutException
	at java.util.concurrent.FutureTask.get(FutureTask.java:205)
	at org.apache.hadoop.util.ShutdownHookManager.executeShutdown(ShutdownHookManager.java:124)
	at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:95)
[info] ScalaTest
[info] Run completed in 3 hours, 29 minutes, 31 seconds.
[info] Total number of tests run: 3645
[info] Suites: completed 132, aborted 0
[info] Tests: succeeded 3645, failed 0, canceled 0, ignored 598, pending 0
[info] All tests passed.
[info] Passed: Total 3648, Failed 0, Errors 0, Passed 3648, Ignored 598
[error] (core/test:test) sbt.TestsFailedException: Tests unsuccessful
[error] Total time: 12637 s, completed Jul 14, 2020 8:20:32 AM
[error] running /home/jenkins/workspace/spark-master-test-sbt-hadoop-3.2-hive-2.3/build/sbt -Phadoop-3.2 -Phive-2.3 -Phadoop-cloud -Phive-thriftserver -Pkubernetes -Pspark-ganglia-lgpl -Phive -Pyarn -Pmesos -Pkinesis-asl test ; received return code 1
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Finished: FAILURE