From 73d04db4443f3c03347bbe1c4d2dc987b8a90b2d Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Tue, 5 Dec 2023 15:34:35 +0800 Subject: [PATCH] add uts --- .../execution/GlutenClickHouseHiveTableSuite.scala | 11 +++++++++++ .../execution/GlutenClickHouseTPCHParquetSuite.scala | 11 ----------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseHiveTableSuite.scala b/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseHiveTableSuite.scala index d15f07aff6db..40ba53d105fe 100644 --- a/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseHiveTableSuite.scala +++ b/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseHiveTableSuite.scala @@ -97,6 +97,7 @@ class GlutenClickHouseHiveTableSuite() "spark.sql.warehouse.dir", getClass.getResource("/").getPath + "unit-tests-working-home/spark-warehouse") .set("spark.hive.exec.dynamic.partition.mode", "nonstrict") + .set("spark.gluten.supported.hive.udfs", "my_add") .setMaster("local[*]") } @@ -1060,4 +1061,14 @@ class GlutenClickHouseHiveTableSuite() compareResultsAgainstVanillaSpark(select_sql, compareResult = true, _ => {}) spark.sql("DROP TABLE test_tbl_3548") } + + test("test 'hive udf'") { + val jarPath = "backends-clickhouse/src/test/resources/udfs/hive-test-udfs.jar" + val jarUrl = s"file://${System.getProperty("user.dir")}/$jarPath" + spark.sql( + s"CREATE FUNCTION my_add as " + + "'org.apache.hadoop.hive.contrib.udf.example.UDFExampleAdd2' USING JAR '$jarUrl'") + runQueryAndCompare("select MY_ADD(id, id+1) from range(10)")( + checkOperatorMatch[ProjectExecTransformer]) + } } diff --git a/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseTPCHParquetSuite.scala b/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseTPCHParquetSuite.scala index 8baf5711c23d..2098ea6248af 100644 --- a/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseTPCHParquetSuite.scala +++ b/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseTPCHParquetSuite.scala @@ -48,7 +48,6 @@ class GlutenClickHouseTPCHParquetSuite extends GlutenClickHouseTPCHAbstractSuite .set("spark.sql.autoBroadcastJoinThreshold", "10MB") .set("spark.gluten.sql.columnar.backend.ch.use.v2", "false") .set("spark.gluten.supported.scala.udfs", "my_add") - .set("spark.gluten.supported.hive.udfs", "my_add") } override protected val createNullableTables = true @@ -1319,16 +1318,6 @@ class GlutenClickHouseTPCHParquetSuite extends GlutenClickHouseTPCHAbstractSuite checkOperatorMatch[ProjectExecTransformer]) } - ignore("test 'hive udf'") { - val jarPath = "backends-clickhouse/src/test/resources/udfs/hive-test-udfs.jar" - val jarUrl = s"file://${System.getProperty("user.dir")}/$jarPath" - spark.sql( - s"CREATE FUNCTION my_add as " + - "'org.apache.hadoop.hive.contrib.udf.example.UDFExampleAdd2' USING JAR '$jarUrl'") - runQueryAndCompare("select my_add(id, id+1) from range(10)")( - checkOperatorMatch[ProjectExecTransformer]) - } - override protected def runTPCHQuery( queryNum: Int, tpchQueries: String = tpchQueries,