From c40b2df9b03f498547bca9e388297d619249e1c7 Mon Sep 17 00:00:00 2001 From: Dennis Yatunin Date: Wed, 12 Jul 2023 17:24:43 -0700 Subject: [PATCH] Address comments --- .buildkite/pipeline.yml | 2 +- src/MatrixFields/MatrixFields.jl | 6 +- src/MatrixFields/matrix_field_utils.jl | 13 +- src/MatrixFields/matrix_multiplication.jl | 13 +- src/MatrixFields/rmul_with_projection.jl | 153 +++++++++++++++++++--- src/Operators/common.jl | 26 ++-- test/MatrixFields/rmul_with_projection.jl | 9 +- 7 files changed, 171 insertions(+), 51 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 42a855062b..ff02381cdb 100755 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -518,7 +518,7 @@ steps: command: "julia --color=yes --project=test test/MatrixFields/matrix_field_broadcasting.jl" agents: slurm_gpus: 1 - slurm_mem: 80GB + slurm_mem: 20GB - group: "Unit: Hypsography" steps: diff --git a/src/MatrixFields/MatrixFields.jl b/src/MatrixFields/MatrixFields.jl index 0a0f241acd..17214e97f6 100644 --- a/src/MatrixFields/MatrixFields.jl +++ b/src/MatrixFields/MatrixFields.jl @@ -1,12 +1,12 @@ module MatrixFields import CUDA: @allowscalar -import LinearAlgebra: UniformScaling, Adjoint -import StaticArrays: SArray, SMatrix, SVector +import LinearAlgebra: UniformScaling, Adjoint, AdjointAbsVec +import StaticArrays: SMatrix, SVector import BandedMatrices: BandedMatrix, band, _BandedMatrix import ..Utilities: PlusHalf, half import ..RecursiveApply: - rmap, rpromote_type, rzero, rconvert, radd, rsub, rmul, rdiv + rmap, rmaptype, rpromote_type, rzero, rconvert, radd, rsub, rmul, rdiv import ..Geometry import ..Spaces import ..Fields diff --git a/src/MatrixFields/matrix_field_utils.jl b/src/MatrixFields/matrix_field_utils.jl index 0d8470d6a3..d73e1f218e 100644 --- a/src/MatrixFields/matrix_field_utils.jl +++ b/src/MatrixFields/matrix_field_utils.jl @@ -84,9 +84,16 @@ function column_field2array_view(field::Fields.FiniteDifferenceField) end end -all_columns(::Fields.FiniteDifferenceField) = (((1, 1), 1),) -all_columns(field::Fields.ExtrudedFiniteDifferenceField) = - Spaces.all_nodes(Spaces.horizontal_space(axes(field))) +all_columns(::Fields.ColumnField) = (((1, 1), 1),) +all_columns(field) = all_columns(axes(field)) +all_columns(space::Spaces.ExtrudedFiniteDifferenceSpace) = + Spaces.all_nodes(Spaces.horizontal_space(space)) + +# TODO: Unify FiniteDifferenceField and ColumnField so that we can use this +# version instead. +# all_columns(::Fields.FiniteDifferenceField) = (((1, 1), 1),) +# all_columns(field::Fields.ExtrudedFiniteDifferenceField) = +# Spaces.all_nodes(Spaces.horizontal_space(axes(field))) column_map(f::F, field) where {F} = map((((i, j), h),) -> f(Spaces.column(field, i, j, h)), all_columns(field)) diff --git a/src/MatrixFields/matrix_multiplication.jl b/src/MatrixFields/matrix_multiplication.jl index 45ddaba9bc..08d0bed830 100644 --- a/src/MatrixFields/matrix_multiplication.jl +++ b/src/MatrixFields/matrix_multiplication.jl @@ -199,14 +199,6 @@ Operators.right_interior_idx( arg, ) = matrix_right_interior_index(space, outer_diagonals(eltype(matrix1))[2]) -function rmul_type(::Type{T1}, ::Type{T2}, ::Type{LG}) where {T1, T2, LG} - type = Base._return_type(rmul_with_projection, Tuple{T1, T2, LG}) - type == Union{} && - error("Unable to infer result type: Calling rmul_with_projection with \ - arguments of types $T1 and $T2 will cause it to throw an error") - return type -end - function Operators.return_eltype( ::MultiplyColumnwiseBandMatrixField, matrix1, @@ -216,18 +208,17 @@ function Operators.return_eltype( "The first argument of ⋅ must have elements of type BandMatrixRow, but \ the given argument has elements of type $(eltype(matrix1))", ) - lg_type = Operators.local_geometry_type(axes(arg)) if eltype(arg) <: BandMatrixRow # matrix-matrix multiplication matrix2 = arg ld1, ud1 = outer_diagonals(eltype(matrix1)) ld2, ud2 = outer_diagonals(eltype(matrix2)) prod_ld, prod_ud = ld1 + ld2, ud1 + ud2 prod_value_type = - rmul_type(eltype(eltype(matrix1)), eltype(eltype(matrix2)), lg_type) + rmul_return_type(eltype(eltype(matrix1)), eltype(eltype(matrix2))) return band_matrix_row_type(prod_ld, prod_ud, prod_value_type) else # matrix-vector multiplication vector = arg - return rmul_type(eltype(eltype(matrix1)), eltype(vector), lg_type) + return rmul_return_type(eltype(eltype(matrix1)), eltype(vector)) end end diff --git a/src/MatrixFields/rmul_with_projection.jl b/src/MatrixFields/rmul_with_projection.jl index 76f9846b68..9cc449583f 100644 --- a/src/MatrixFields/rmul_with_projection.jl +++ b/src/MatrixFields/rmul_with_projection.jl @@ -1,26 +1,27 @@ const SingleValue = Union{Number, Geometry.AxisTensor, Geometry.AdjointAxisTensor} +""" + mul_with_projection(x, y, lg) + +Similar to `x * y`, except that this version automatically projects `y` to avoid +`DimensionMismatch` errors for `AxisTensor`s. For example, if `x` is a covector +along the `Covariant3Axis` (e.g., `Covariant3Vector(1)'`), then `y` will be +projected onto the `Contravariant3Axis`. In general, the first axis of `y` will +be projected onto the dual of the last axis of `x`. +""" mul_with_projection(x, y, _) = x * y -mul_with_projection(x::Geometry.AdjointAxisVector, y::Geometry.AxisTensor, lg) = - x * Geometry.project(Geometry.dual(axes(x, 2)), y, lg) -mul_with_projection(::Geometry.AdjointAxisTensor, ::Geometry.AxisTensor, _) = - error("mul_with_projection is currently only implemented for covectors, \ - and higher-order cotensors are not supported") -# We should add methods for other cotensors (e.g., AdjointAxis2Tensor) when they -# are needed (e.g., when we need to support matrices that represent the -# divergence of higher-order tensors). +mul_with_projection( + x::Union{Geometry.AdjointAxisVector, Geometry.Axis2TensorOrAdj}, + y::Geometry.AxisTensor, + lg, +) = x * Geometry.project(Geometry.dual(axes(x)[2]), y, lg) """ rmul_with_projection(x, y, lg) -Similar to `rmul(x, y)`, but with automatic projection of `y` when `x` contains -a covector (i.e, an `AdjointAxisVector`). For example, if `x` is a covector -along the `Covariant3Axis` (e.g., `Covariant3Vector(1)'`), then `y` (or each -element of `y`) will be projected onto the `Contravariant3Axis`. In general, -each covector in `x` will cause `y` (or each corresponding element of `y`) to -be projected onto the dual axis of the covector. In the future, we may extend -this behavior to higher-order cotensors. +Similar to `rmul(x, y)`, except that this version calls `mul_with_projection` +instead of `*`. """ rmul_with_projection(x, y, lg) = rmap((x′, y′) -> mul_with_projection(x′, y′, lg), x, y) @@ -30,3 +31,125 @@ rmul_with_projection(x, y::SingleValue, lg) = rmap(x′ -> mul_with_projection(x′, y, lg), x) rmul_with_projection(x::SingleValue, y::SingleValue, lg) = mul_with_projection(x, y, lg) + +axis_tensor_type(::Type{T}, ::Type{Tuple{A1}}) where {T, A1} = + Geometry.AxisVector{T, A1, SVector{length(A1.instance), T}} +function axis_tensor_type(::Type{T}, ::Type{Tuple{A1, A2}}) where {T, A1, A2} + N1 = length(A1.instance) + N2 = length(A2.instance) + S = SMatrix{N1, N2, T, N1 * N2} + return Geometry.Axis2Tensor{T, Tuple{A1, A2}, S} +end + +adjoint_type(::Type{A}) where {A} = Adjoint{eltype(A), A} +adjoint_type(::Type{A}) where {T, S, A <: Adjoint{T, S}} = S + +""" + mul_return_type(X, Y) + +Computes the return type of `mul_with_projection(x, y, lg)`, where `x isa X` +and `y isa Y`. This can also be used to obtain the return type of `x * y`, +although `x * y` will throw an error when projection is necessary. + +Note: this should be equivalent to calling the internal function `_return_type`: +`Base._return_type(mul_with_projection, Tuple{X, Y, LG})`, where `lg isa LG`. +""" +mul_return_type(::Type{X}, ::Type{Y}) where {X, Y} = error( + "Unable to infer return type: Multiplying an object of type $X with an \ + object of type $Y will result in a method error", +) +# Note: If the behavior of * changes for any relevant types, the corresponding +# methods below should be updated. + +# Methods from Base: +mul_return_type(::Type{X}, ::Type{Y}) where {X <: Number, Y <: Number} = + promote_type(X, Y) +mul_return_type( + ::Type{X}, + ::Type{Y}, +) where {X <: AdjointAbsVec, Y <: AbstractMatrix} = + adjoint_type(mul_return_type(adjoint_type(Y), adjoint_type(X))) + +# Methods from ClimaCore: +mul_return_type( + ::Type{X}, + ::Type{Y}, +) where {T, N, A, X <: Number, Y <: Geometry.AxisTensor{T, N, A}} = + axis_tensor_type(promote_type(X, T), A) +mul_return_type( + ::Type{X}, + ::Type{Y}, +) where {T, N, A, X <: Geometry.AxisTensor{T, N, A}, Y <: Number} = + axis_tensor_type(promote_type(T, Y), A) +mul_return_type( + ::Type{X}, + ::Type{Y}, +) where {T, N, A, X <: Number, Y <: Geometry.AdjointAxisTensor{T, N, A}} = + adjoint_type(axis_tensor_type(promote_type(X, T), A)) +mul_return_type( + ::Type{X}, + ::Type{Y}, +) where {T, N, A, X <: Geometry.AdjointAxisTensor{T, N, A}, Y <: Number} = + adjoint_type(axis_tensor_type(promote_type(T, Y), A)) +mul_return_type( + ::Type{X}, + ::Type{Y}, +) where { + T1, + T2, + X <: Geometry.AdjointAxisVector{T1}, + Y <: Geometry.AxisVector{T2}, +} = promote_type(T1, T2) # This comes from the definition of dot. +mul_return_type( + ::Type{X}, + ::Type{Y}, +) where { + T1, + T2, + A1, + A2, + X <: Geometry.AxisVector{T1, A1}, + Y <: Geometry.AdjointAxisVector{T2, A2}, +} = axis_tensor_type(promote_type(T1, T2), Tuple{A1, A2}) +mul_return_type( + ::Type{X}, + ::Type{Y}, +) where { + T1, + T2, + A1, + X <: Geometry.Axis2TensorOrAdj{T1, <:Tuple{A1, Any}}, + Y <: Geometry.AxisVector{T2}, +} = axis_tensor_type(promote_type(T1, T2), Tuple{A1}) +mul_return_type( + ::Type{X}, + ::Type{Y}, +) where { + T1, + T2, + A1, + A2, + X <: Geometry.Axis2TensorOrAdj{T1, <:Tuple{A1, Any}}, + Y <: Geometry.Axis2TensorOrAdj{T2, <:Tuple{Any, A2}}, +} = axis_tensor_type(promote_type(T1, T2), Tuple{A1, A2}) + +""" + rmul_return_type(X, Y) + +Computes the return type of `rmul_with_projection(x, y, lg)`, where `x isa X` +and `y isa Y`. This can also be used to obtain the return type of `rmul(x, y)`, +although `rmul(x, y)` will throw an error when projection is necessary. + +Note: this should be equivalent to calling the internal function `_return_type`: +`Base._return_type(rmul_with_projection, Tuple{X, Y, LG})`, where `lg isa LG`. +""" +rmul_return_type(::Type{X}, ::Type{Y}) where {X, Y} = + rmaptype((X′, Y′) -> mul_return_type(X′, Y′), X, Y) +rmul_return_type(::Type{X}, ::Type{Y}) where {X <: SingleValue, Y} = + rmaptype(Y′ -> mul_return_type(X, Y′), Y) +rmul_return_type(::Type{X}, ::Type{Y}) where {X, Y <: SingleValue} = + rmaptype(X′ -> mul_return_type(X′, Y), X) +rmul_return_type( + ::Type{X}, + ::Type{Y}, +) where {X <: SingleValue, Y <: SingleValue} = mul_return_type(X, Y) diff --git a/src/Operators/common.jl b/src/Operators/common.jl index 07315befdc..44a5ddef17 100644 --- a/src/Operators/common.jl +++ b/src/Operators/common.jl @@ -65,34 +65,33 @@ end # when sending Broadcasted objects to the GPU, we strip out the space # information at each level of the broadcast tree and Fields, replacing with # PlaceholderSpace. this reduces the amount runtime parameter data we send to -# the GPU, which is quite limited (~4kB). However, we need to keep the eltype of -# the local geometry data, since it may needed in order to infer the return -# eltype of a Field operation. +# the GPU, which is quite limited (~4kB). # Functions for CUDASpectralStyle -struct PlaceholderSpace{LG} <: Spaces.AbstractSpace end -struct CenterPlaceholderSpace{LG} <: Spaces.AbstractSpace end -struct FacePlaceholderSpace{LG} <: Spaces.AbstractSpace end +struct PlaceholderSpace <: Spaces.AbstractSpace end +struct CenterPlaceholderSpace <: Spaces.AbstractSpace end +struct FacePlaceholderSpace <: Spaces.AbstractSpace end + placeholder_space(current_space::T, parent_space::T) where {T} = - PlaceholderSpace{local_geometry_type(current_space)}() + PlaceholderSpace() placeholder_space(current_space, parent_space) = current_space placeholder_space( current_space::Spaces.CenterFiniteDifferenceSpace, parent_space::Spaces.FaceFiniteDifferenceSpace, -) = CenterPlaceholderSpace{local_geometry_type(current_space)}() +) = CenterPlaceholderSpace() placeholder_space( current_space::Spaces.CenterExtrudedFiniteDifferenceSpace, parent_space::Spaces.FaceExtrudedFiniteDifferenceSpace, -) = CenterPlaceholderSpace{local_geometry_type(current_space)}() +) = CenterPlaceholderSpace() placeholder_space( current_space::Spaces.FaceFiniteDifferenceSpace, parent_space::Spaces.CenterFiniteDifferenceSpace, -) = FacePlaceholderSpace{local_geometry_type(current_space)}() +) = FacePlaceholderSpace() placeholder_space( current_space::Spaces.FaceExtrudedFiniteDifferenceSpace, parent_space::Spaces.CenterExtrudedFiniteDifferenceSpace, -) = FacePlaceholderSpace{local_geometry_type(current_space)}() +) = FacePlaceholderSpace() @inline reconstruct_placeholder_space(::PlaceholderSpace, parent_space) = parent_space @@ -115,11 +114,6 @@ placeholder_space( @inline reconstruct_placeholder_space(current_space, parent_space) = current_space -local_geometry_type(space::Spaces.AbstractSpace) = - eltype(Spaces.local_geometry_data(space)) -local_geometry_type(::PlaceholderSpace{LG}) where {LG} = LG -local_geometry_type(::CenterPlaceholderSpace{LG}) where {LG} = LG -local_geometry_type(::FacePlaceholderSpace{LG}) where {LG} = LG strip_space(obj, parent_space) = obj diff --git a/test/MatrixFields/rmul_with_projection.jl b/test/MatrixFields/rmul_with_projection.jl index d7f4855865..44b610f9cc 100644 --- a/test/MatrixFields/rmul_with_projection.jl +++ b/test/MatrixFields/rmul_with_projection.jl @@ -4,10 +4,11 @@ using Random: seed! using StaticArrays: @SMatrix import ClimaCore: Geometry -import ClimaCore.MatrixFields: rmul_with_projection +import ClimaCore.MatrixFields: rmul_with_projection, rmul_return_type -function test_rmul_with_projection(x, y, lg, expected_result) +function test_rmul_with_projection(x::X, y::Y, lg, expected_result) where {X, Y} result = rmul_with_projection(x, y, lg) + result_type = rmul_return_type(X, Y) # Compute the maximum error as an integer multiple of machine epsilon. FT = Geometry.undertype(typeof(lg)) @@ -22,6 +23,10 @@ function test_rmul_with_projection(x, y, lg, expected_result) @test max_error <= 1 # correctness @test (@allocated rmul_with_projection(x, y, lg)) == 0 # allocations @test_opt rmul_with_projection(x, y, lg) # type instabilities + + @test result_type == typeof(result) # correctness + @test (@allocated rmul_return_type(X, Y)) == 0 # allocations + @test_opt rmul_return_type(X, Y) # type instabilities end @testset "rmul_with_projection Unit Tests" begin