diff --git a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala index db90df14e3fd..290a8fa18a51 100644 --- a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala +++ b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala @@ -114,16 +114,19 @@ class GlutenInsertSuite } } - testGluten("Cleanup staging files if job is failed") { - withTable("t1") { - spark.sql("CREATE TABLE t1 (c1 int, c2 string) USING PARQUET") - val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1")) + testGluten("Cleanup staging files if job failed") { + // Using a unique table name in this test. Sometimes, the table is not removed for some unknown + // reason, which can cause test failure (location already exists) if other following tests have + // the same table name. + withTable("tbl") { + spark.sql("CREATE TABLE tbl (c1 int, c2 string) USING PARQUET") + val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl")) assert(new File(table.location).list().length == 0) intercept[Exception] { spark.sql( """ - |INSERT INTO TABLE t1 + |INSERT INTO TABLE tbl |SELECT id, assert_true(SPARK_PARTITION_ID() = 1) FROM range(1, 3, 1, 2) |""".stripMargin ) diff --git a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala index dd0567f54603..dc97be44cb70 100644 --- a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala +++ b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala @@ -116,16 +116,19 @@ class GlutenInsertSuite } } - testGluten("Cleanup staging files if job is failed") { - withTable("t1") { - spark.sql("CREATE TABLE t1 (c1 int, c2 string) USING PARQUET") - val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1")) + testGluten("Cleanup staging files if job failed") { + // Using a unique table name in this test. Sometimes, the table is not removed for some unknown + // reason, which can cause test failure (location already exists) if other following tests have + // the same table name. + withTable("tbl") { + spark.sql("CREATE TABLE tbl (c1 int, c2 string) USING PARQUET") + val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl")) assert(new File(table.location).list().length == 0) intercept[Exception] { spark.sql( """ - |INSERT INTO TABLE t1 + |INSERT INTO TABLE tbl |SELECT id, assert_true(SPARK_PARTITION_ID() = 1) FROM range(1, 3, 1, 2) |""".stripMargin )