Skip to content

Commit

Permalink
[SPARK-44446][PYTHON] Add checks for expected list type special cases
Browse files Browse the repository at this point in the history
### What changes were proposed in this pull request?
This PR adds handling for special cases when `expected` is type list.

### Why are the changes needed?
The change is needed to handle all cases for when `expected` is type list.

### Does this PR introduce _any_ user-facing change?
Yes, the PR makes modifications to the user-facing function `assertDataFrameEqual`

### How was this patch tested?
Added tests to `runtime/python/pyspark/sql/tests/test_utils.py` and `runtime/python/pyspark/sql/tests/connect/test_utils.py`

Closes #42023 from asl3/fix-list-support.

Authored-by: Amanda Liu <[email protected]>
Signed-off-by: Xinrong Meng <[email protected]>
  • Loading branch information
asl3 authored and xinrong-meng committed Jul 17, 2023
1 parent 85d8d62 commit e578d46
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 2 deletions.
24 changes: 24 additions & 0 deletions python/pyspark/sql/tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1119,6 +1119,30 @@ def test_empty_no_column(self):
assertDataFrameEqual(df1, df2, checkRowOrder=False)
assertDataFrameEqual(df1, df2, checkRowOrder=True)

def test_empty_expected_list(self):
df1 = self.spark.range(0, 10).drop("id")

df2 = []

assertDataFrameEqual(df1, df2, checkRowOrder=False)
assertDataFrameEqual(df1, df2, checkRowOrder=True)

def test_no_column_expected_list(self):
df1 = self.spark.range(0, 10).limit(0)

df2 = []

assertDataFrameEqual(df1, df2, checkRowOrder=False)
assertDataFrameEqual(df1, df2, checkRowOrder=True)

def test_empty_no_column_expected_list(self):
df1 = self.spark.range(0, 10).drop("id").limit(0)

df2 = []

assertDataFrameEqual(df1, df2, checkRowOrder=False)
assertDataFrameEqual(df1, df2, checkRowOrder=True)

def test_special_vals(self):
df1 = self.spark.createDataFrame(
data=[
Expand Down
15 changes: 13 additions & 2 deletions python/pyspark/testing/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,8 @@ def assertDataFrameEqual(
For checkRowOrder, note that PySpark DataFrame ordering is non-deterministic, unless
explicitly sorted.
Note that schema equality is checked only when `expected` is a DataFrame (not a list of Rows).
For DataFrames with float values, assertDataFrame asserts approximate equality.
Two float values a and b are approximately equal if the following equation is True:
Expand All @@ -362,6 +364,9 @@ def assertDataFrameEqual(
>>> df1 = spark.createDataFrame(data=[("1", 0.1), ("2", 3.23)], schema=["id", "amount"])
>>> df2 = spark.createDataFrame(data=[("1", 0.109), ("2", 3.23)], schema=["id", "amount"])
>>> assertDataFrameEqual(df1, df2, rtol=1e-1) # pass, DataFrames are approx equal by rtol
>>> df1 = spark.createDataFrame(data=[(1, 1000), (2, 3000)], schema=["id", "amount"])
>>> list_of_rows = [Row(1, 1000), Row(2, 3000)]
>>> assertDataFrameEqual(df1, list_of_rows) # pass, actual and expected are equal
>>> df1 = spark.createDataFrame(
... data=[("1", 1000.00), ("2", 3000.00), ("3", 2000.00)], schema=["id", "amount"])
>>> df2 = spark.createDataFrame(
Expand Down Expand Up @@ -415,8 +420,14 @@ def assertDataFrameEqual(
)

# special cases: empty datasets, datasets with 0 columns
if (actual.first() is None and expected.first() is None) or (
len(actual.columns) == 0 and len(expected.columns) == 0
if (
isinstance(expected, DataFrame)
and (
(actual.first() is None and expected.first() is None)
or (len(actual.columns) == 0 and len(expected.columns) == 0)
)
or isinstance(expected, list)
and ((actual.first() is None or len(actual.columns) == 0) and len(expected) == 0)
):
return True

Expand Down

0 comments on commit e578d46

Please sign in to comment.