From b1d49610a623df1b564cdcd4095991a4f88bd2ef Mon Sep 17 00:00:00 2001 From: PHILO-HE Date: Fri, 23 Aug 2024 11:10:51 +0800 Subject: [PATCH] Workaround fix --- .../spark/sql/sources/GlutenInsertSuite.scala | 13 ++++++++----- .../spark/sql/sources/GlutenInsertSuite.scala | 13 ++++++++----- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala index db90df14e3fd..290a8fa18a51 100644 --- a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala +++ b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala @@ -114,16 +114,19 @@ class GlutenInsertSuite } } - testGluten("Cleanup staging files if job is failed") { - withTable("t1") { - spark.sql("CREATE TABLE t1 (c1 int, c2 string) USING PARQUET") - val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1")) + testGluten("Cleanup staging files if job failed") { + // Using a unique table name in this test. Sometimes, the table is not removed for some unknown + // reason, which can cause test failure (location already exists) if other following tests have + // the same table name. + withTable("tbl") { + spark.sql("CREATE TABLE tbl (c1 int, c2 string) USING PARQUET") + val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl")) assert(new File(table.location).list().length == 0) intercept[Exception] { spark.sql( """ - |INSERT INTO TABLE t1 + |INSERT INTO TABLE tbl |SELECT id, assert_true(SPARK_PARTITION_ID() = 1) FROM range(1, 3, 1, 2) |""".stripMargin ) diff --git a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala index dd0567f54603..dc97be44cb70 100644 --- a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala +++ b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala @@ -116,16 +116,19 @@ class GlutenInsertSuite } } - testGluten("Cleanup staging files if job is failed") { - withTable("t1") { - spark.sql("CREATE TABLE t1 (c1 int, c2 string) USING PARQUET") - val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1")) + testGluten("Cleanup staging files if job failed") { + // Using a unique table name in this test. Sometimes, the table is not removed for some unknown + // reason, which can cause test failure (location already exists) if other following tests have + // the same table name. + withTable("tbl") { + spark.sql("CREATE TABLE tbl (c1 int, c2 string) USING PARQUET") + val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl")) assert(new File(table.location).list().length == 0) intercept[Exception] { spark.sql( """ - |INSERT INTO TABLE t1 + |INSERT INTO TABLE tbl |SELECT id, assert_true(SPARK_PARTITION_ID() = 1) FROM range(1, 3, 1, 2) |""".stripMargin )