diff --git a/bld/namelist_files/namelist_defaults_cam.xml b/bld/namelist_files/namelist_defaults_cam.xml
index 54ada37312..ada7e7d632 100644
--- a/bld/namelist_files/namelist_defaults_cam.xml
+++ b/bld/namelist_files/namelist_defaults_cam.xml
@@ -274,6 +274,8 @@
atm/waccm/ic/FW2000_CONUS_30x8_L70_01-01-0001_c200602.nc
+atm/waccm/ic/mpasa120km.waccm_fulltopo_c220818.nc
+
atm/cam/inic/mpas/cami_01-01-2000_00Z_mpasa120_L32_CFSR_c210426.nc
atm/cam/inic/mpas/cami_01-01-2000_00Z_mpasa480_L32_CFSR_c211013.nc
@@ -3135,12 +3137,14 @@
2
1800.0D0
900.0D0
+ 600.D0
450.0D0
225.0D0
.true.
2
3
+ 4
0.0D0
0.0D0
0.0D0
@@ -3170,12 +3174,16 @@
0.125D0
.true.
0.1D0
+ 0.5D0
0.1D0
0.5D0
+ 0.0D0
.true.
22000.0D0
+ 80000.0D0
0.2D0
0.0D0
+ 0.2D0
0
.true.
5.0
diff --git a/cime_config/config_pes.xml b/cime_config/config_pes.xml
index 1b52b2be50..7f0f268fe8 100644
--- a/cime_config/config_pes.xml
+++ b/cime_config/config_pes.xml
@@ -257,6 +257,48 @@
+
+
+
+
+ 360
+ 360
+ 360
+ 360
+ 360
+ 360
+
+
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+
+
+
+
+
+
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
+
+
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+
+
+
+
diff --git a/cime_config/testdefs/testlist_cam.xml b/cime_config/testdefs/testlist_cam.xml
index 854ad1ac5a..b3673cc7c4 100644
--- a/cime_config/testdefs/testlist_cam.xml
+++ b/cime_config/testdefs/testlist_cam.xml
@@ -726,15 +726,16 @@
-
+
+
-
+
@@ -2133,6 +2134,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq1d_physgrid_tem_mpasa120_wcmsc/user_nl_cam b/cime_config/testdefs/testmods_dirs/cam/outfrq1d_physgrid_tem_mpasa120_wcmsc/user_nl_cam
index 1769cf51c8..a5fa13c3a1 100644
--- a/cime_config/testdefs/testmods_dirs/cam/outfrq1d_physgrid_tem_mpasa120_wcmsc/user_nl_cam
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq1d_physgrid_tem_mpasa120_wcmsc/user_nl_cam
@@ -1,15 +1,3 @@
-ncdata = '$DIN_LOC_ROOT/atm/waccm/ic/mpasa120km.waccm_fulltopo_c220818.nc'
-
-mpas_cam_coef = 0.2D0
-mpas_rayleigh_damp_u_timescale_days = 5.D0
-mpas_zd = 80000.0D0
-mpas_apvm_upwinding = 0.0D0
-mpas_dt = 600.D0
-mpas_dynamics_split_steps = 4
-mpas_epssm = 0.5D0
-
-use_gw_front = .false.
-
phys_grid_ctem_nfreq = -12
phys_grid_ctem_zm_nbas = 120
phys_grid_ctem_za_nlat = 90
diff --git a/doc/ChangeLog b/doc/ChangeLog
index bc5b36c08f..5428fcaed0 100644
--- a/doc/ChangeLog
+++ b/doc/ChangeLog
@@ -1,4 +1,92 @@
+===============================================================
+
+Tag name: cam6_3_138
+Originator(s): fvitt, skamaroc
+Date: 1 Dec 2023
+One-line Summary: Frontogenesis gravity waves with MPAS dynamical core
+Github PR URL: https://github.com/ESCOMP/CAM/pull/688
+
+Purpose of changes (include the issue number and title text for each relevant GitHub issue):
+
+ Add the capability to generate frontal gravity wave forcings when the MPAS dynamical core
+ is used. See github issue #400.
+
+Describe any changes made to build system: N/A
+
+Describe any changes made to the namelist: N/A
+
+List any changes to the defaults for the boundary datasets: N/A
+
+Describe any substantial timing or memory changes: N/A
+
+Code reviewed by: cacraigucar brian-eaton jtruesdal nusbaume
+
+List all files eliminated: N/A
+
+List all files added and what they do: N/A
+List all existing files that have been modified, and describe the changes:
+M bld/namelist_files/namelist_defaults_cam.xml
+ - default namelist settings for waccm on mpasa120 grid
+
+M cime_config/config_pes.xml
+ - working cheyenne and derecho PE layouts for waccm on mpasa120 grid
+
+M cime_config/testdefs/testlist_cam.xml
+ - tests for waccm-sc on mpasa120 grid
+
+M cime_config/testdefs/testmods_dirs/cam/outfrq1d_physgrid_tem_mpasa120_wcmsc/user_nl_cam
+ - mpas namelist setting moved to namelist_defaults_cam.xml
+
+M src/dynamics/mpas/dp_coupling.F90
+ - implement function for front generated gravity wave forcings
+ - code cleanup
+
+M src/dynamics/mpas/driver/cam_mpas_subdriver.F90
+ - add MPAS stream fields for cell gradient coeffecients
+ - code cleanup
+
+M src/dynamics/mpas/dyn_comp.F90
+ - set dyn_out pointers for frontogenesis calculations
+ - code cleanup
+
+M src/dynamics/mpas/dyn_grid.F90
+ - minor code cleanup
+
+If there were any failures reported from running test_driver.sh on any test
+platform, and checkin with these failures has been OK'd by the gatekeeper,
+then copy the lines from the td.*.status files for the failed tests to the
+appropriate machine below. All failed tests must be justified.
+
+cheyenne/intel/aux_cam:
+ ERP_Ln9_Vnuopc.C96_C96_mg17.F2000climo.cheyenne_intel.cam-outfrq9s_mg3 (Overall: FAIL) details:
+ FAIL ERP_Ln9_Vnuopc.C96_C96_mg17.F2000climo.cheyenne_intel.cam-outfrq9s_mg3 MODEL_BUILD time=2
+ ERP_Ln9_Vnuopc.f09_f09_mg17.FCSD_HCO.cheyenne_intel.cam-outfrq9s (Overall: FAIL) details:
+ FAIL ERP_Ln9_Vnuopc.f09_f09_mg17.FCSD_HCO.cheyenne_intel.cam-outfrq9s COMPARE_base_rest
+ FAIL ERP_Ln9_Vnuopc.f09_f09_mg17.FCSD_HCO.cheyenne_intel.cam-outfrq9s BASELINE /glade/p/cesm/amwg/cesm_baselines/cam6_3_137: DIFF
+ - pre-existing failures
+
+derecho/intel/aux_cam:
+ ERP_Ln9_Vnuopc.C96_C96_mg17.F2000climo.derecho_intel.cam-outfrq9s_mg3 (Overall: PEND) details:
+ PEND ERP_Ln9_Vnuopc.C96_C96_mg17.F2000climo.derecho_intel.cam-outfrq9s_mg3 SHAREDLIB_BUILD RERUN
+ ERP_Ln9_Vnuopc.f09_f09_mg17.FCSD_HCO.derecho_intel.cam-outfrq9s (Overall: FAIL) details:
+ FAIL ERP_Ln9_Vnuopc.f09_f09_mg17.FCSD_HCO.derecho_intel.cam-outfrq9s COMPARE_base_rest
+ ERP_Ln9_Vnuopc.ne30pg3_ne30pg3_mg17.FW2000climo.derecho_intel.cam-outfrq9s_wcm_ne30 (Overall: PEND) details:
+ PEND ERP_Ln9_Vnuopc.ne30pg3_ne30pg3_mg17.FW2000climo.derecho_intel.cam-outfrq9s_wcm_ne30 RUN
+ PEND ERP_Ln9_Vnuopc.ne30pg3_ne30pg3_mg17.FW2000climo.derecho_intel.cam-outfrq9s_wcm_ne30 COMPARE_base_rest
+ - pre-existing failures
+
+izumi/nag/aux_cam:
+ DAE_Vnuopc.f45_f45_mg37.FHS94.izumi_nag.cam-dae (Overall: FAIL) details:
+ FAIL DAE_Vnuopc.f45_f45_mg37.FHS94.izumi_nag.cam-dae RUN time=10
+ PEND DAE_Vnuopc.f45_f45_mg37.FHS94.izumi_nag.cam-dae COMPARE_base_da
+ - pre-existing failure
+
+izumi/gnu/aux_cam: All PASS
+
+Summarize any changes to answers: bit-for-bit unchanged
+
+===============================================================
===============================================================
Tag name: cam6_3_137
diff --git a/src/dynamics/mpas/dp_coupling.F90 b/src/dynamics/mpas/dp_coupling.F90
index 9366c6fac3..10d75b4b8c 100644
--- a/src/dynamics/mpas/dp_coupling.F90
+++ b/src/dynamics/mpas/dp_coupling.F90
@@ -8,24 +8,21 @@ module dp_coupling
use pmgrid, only: plev
use ppgrid, only: begchunk, endchunk, pcols, pver, pverp
use constituents, only: pcnst, cnst_type
-use physconst, only: gravit, cappa, rh2o, zvir
-use air_composition,only: cpairv, rairv
+use physconst, only: gravit, cappa, zvir
+use air_composition,only: cpairv
use air_composition,only: dry_air_species_num
-use spmd_dyn, only: local_dp_map, block_buf_nrecs, chunk_buf_nrecs
-use spmd_utils, only: mpicom, iam, masterproc
-
use dyn_comp, only: dyn_export_t, dyn_import_t
-
use physics_types, only: physics_state, physics_tend, physics_cnst_limit
-use phys_grid, only: get_dyn_col_p, get_chunk_info_p, get_ncols_p, get_gcol_all_p
+use phys_grid, only: get_dyn_col_p, get_chunk_info_p, get_ncols_p
use phys_grid, only: columns_on_task
use physics_buffer, only: physics_buffer_desc, pbuf_get_chunk, pbuf_get_field
-use cam_logfile, only: iulog
-use perf_mod, only: t_startf, t_stopf, t_barrierf
+use perf_mod, only: t_startf, t_stopf
use cam_abortutils, only: endrun
-use air_composition,only: thermodynamic_active_species_num,thermodynamic_active_species_idx,thermodynamic_active_species_idx_dycore
+use air_composition,only: thermodynamic_active_species_num,thermodynamic_active_species_idx, &
+ thermodynamic_active_species_idx_dycore
+
implicit none
private
save
@@ -41,12 +38,15 @@ module dp_coupling
!=========================================================================================
subroutine d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out)
+ use cam_mpas_subdriver, only: cam_mpas_update_halo
! Convert the dynamics output state into the physics input state.
! Note that all pressures and tracer mixing ratios coming from the dycore are based on
! dry air mass.
- use cam_history, only : hist_fld_active
- use mpas_constants, only : Rv_over_Rd => rvord
+ use cam_history, only: hist_fld_active
+ use dyn_comp, only: frontgf_idx, frontga_idx
+ use mpas_constants, only: Rv_over_Rd => rvord
+ use phys_control, only: use_gw_front, use_gw_front_igw
use cam_budget, only : thermo_budget_history
! arguments
@@ -71,16 +71,43 @@ subroutine d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out)
real(r8), pointer :: w(:,:)
real(r8), pointer :: theta_m(:,:)
real(r8), pointer :: tracers(:,:,:)
+
+ !
+ ! mesh information and coefficients needed for
+ ! frontogenesis function calculation
+ !
+ real(r8), pointer :: defc_a(:,:)
+ real(r8), pointer :: defc_b(:,:)
+ real(r8), pointer :: cell_gradient_coef_x(:,:)
+ real(r8), pointer :: cell_gradient_coef_y(:,:)
+ real(r8), pointer :: edgesOnCell_sign(:,:)
+ real(r8), pointer :: dvEdge(:)
+ real(r8), pointer :: areaCell(:)
+
+ integer, pointer :: cellsOnEdge(:,:)
+ integer, pointer :: edgesOnCell(:,:)
+ integer, pointer :: nEdgesOnCell(:)
+
+ real(r8), pointer :: uperp(:,:)
+ real(r8), pointer :: utangential(:,:)
+
+ !
+ ! local storage for frontogenesis function and angle
+ !
+ real(r8), pointer :: frontogenesisFunction(:,:)
+ real(r8), pointer :: frontogenesisAngle(:,:)
+ real(r8), pointer :: pbuf_frontgf(:,:)
+ real(r8), pointer :: pbuf_frontga(:,:)
+ real(r8), allocatable :: frontgf_phys(:,:,:)
+ real(r8), allocatable :: frontga_phys(:,:,:)
+
+ type(physics_buffer_desc), pointer :: pbuf_chnk(:)
+
integer :: lchnk, icol, icol_p, k, kk ! indices over chunks, columns, physics columns and layers
- integer :: i, m, ncols, blockid
+ integer :: i, m, ncols
integer :: block_index
integer, dimension(:), pointer :: block_offset
- integer :: pgcols(pcols)
- integer :: tsize ! amount of data per grid point passed to physics
- integer, allocatable :: bpter(:,:) ! offsets into block buffer for packing data
- integer, allocatable :: cpter(:,:) ! offsets into chunk buffer for unpacking data
-
real(r8), allocatable:: pmid(:,:) !mid-level hydrostatic pressure consistent with MPAS discrete state
real(r8), allocatable:: pintdry(:,:) !interface hydrostatic pressure consistent with MPAS discrete state
real(r8), allocatable:: pmiddry(:,:) !mid-level hydrostatic dry pressure consistent with MPAS discrete state
@@ -126,6 +153,48 @@ subroutine d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out)
nCellsSolve, plev, size(tracers, 1), index_qv, zz, zint, rho_zz, theta_m, exner, tracers,&
pmiddry, pintdry, pmid)
+ if (use_gw_front .or. use_gw_front_igw) then
+ call cam_mpas_update_halo('scalars', endrun) ! scalars is the name of tracers in the MPAS state pool
+ nullify(pbuf_chnk)
+ nullify(pbuf_frontgf)
+ nullify(pbuf_frontga)
+ !
+ ! compute frontogenesis function and angle for gravity wave scheme
+ !
+ defc_a => dyn_out % defc_a
+ defc_b => dyn_out % defc_b
+ cell_gradient_coef_x => dyn_out % cell_gradient_coef_x
+ cell_gradient_coef_y => dyn_out % cell_gradient_coef_y
+ edgesOnCell_sign => dyn_out % edgesOnCell_sign
+ dvEdge => dyn_out % dvEdge
+ areaCell => dyn_out % areaCell
+ cellsOnEdge => dyn_out % cellsOnEdge
+ edgesOnCell => dyn_out % edgesOnCell
+ nEdgesOnCell => dyn_out % nEdgesOnCell
+ uperp => dyn_out % uperp
+ utangential => dyn_out % utangential
+
+ allocate(frontogenesisFunction(plev, nCellsSolve), stat=ierr)
+ if( ierr /= 0 ) call endrun(subname//':failed to allocate frontogenesisFunction array')
+ allocate(frontogenesisAngle(plev, nCellsSolve), stat=ierr)
+ if( ierr /= 0 ) call endrun(subname//':failed to allocate frontogenesisAngle array')
+
+ allocate(frontgf_phys(pcols, pver, begchunk:endchunk), stat=ierr)
+ if( ierr /= 0 ) call endrun(subname//':failed to allocate frontgf_phys array')
+ allocate(frontga_phys(pcols, pver, begchunk:endchunk), stat=ierr)
+ if( ierr /= 0 ) call endrun(subname//':failed to allocate frontga_phys array')
+
+
+ call calc_frontogenesis( frontogenesisFunction, frontogenesisAngle, &
+ theta_m, tracers(index_qv,:,:), &
+ uperp, utangential, defc_a, defc_b, &
+ cell_gradient_coef_x, cell_gradient_coef_y, &
+ areaCell, dvEdge, cellsOnEdge, edgesOnCell, &
+ nEdgesOnCell, edgesOnCell_sign, &
+ plev, nCellsSolve )
+
+ end if
+
call t_startf('dpcopy')
ncols = columns_on_task
@@ -151,6 +220,11 @@ subroutine d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out)
phys_state(lchnk)%omega(icol_p,k) = -rho_zz(kk,i)*zz(kk,i)*gravit*0.5_r8*(w(kk,i)+w(kk+1,i)) ! omega
phys_state(lchnk)%pmiddry(icol_p,k) = pmiddry(kk,i)
phys_state(lchnk)%pmid(icol_p,k) = pmid(kk,i)
+
+ if (use_gw_front .or. use_gw_front_igw) then
+ frontgf_phys(icol_p, k, lchnk) = frontogenesisFunction(kk, i)
+ frontga_phys(icol_p, k, lchnk) = frontogenesisAngle(kk, i)
+ end if
end do
do k = 1, pverp
@@ -166,6 +240,27 @@ subroutine d_p_coupling(phys_state, phys_tend, pbuf2d, dyn_out)
end do
end do
+ if (use_gw_front .or. use_gw_front_igw) then
+
+ !$omp parallel do private (lchnk, ncols, icol, k, pbuf_chnk, pbuf_frontgf, pbuf_frontga)
+ do lchnk = begchunk, endchunk
+ ncols = get_ncols_p(lchnk)
+ pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
+ call pbuf_get_field(pbuf_chnk, frontgf_idx, pbuf_frontgf)
+ call pbuf_get_field(pbuf_chnk, frontga_idx, pbuf_frontga)
+ do k = 1, pver
+ do icol = 1, ncols
+ pbuf_frontgf(icol, k) = frontgf_phys(icol, k, lchnk)
+ pbuf_frontga(icol, k) = frontga_phys(icol, k, lchnk)
+ end do
+ end do
+ end do
+ deallocate(frontgf_phys)
+ deallocate(frontga_phys)
+ deallocate(frontogenesisFunction)
+ deallocate(frontogenesisAngle)
+ end if
+
call t_stopf('dpcopy')
call t_startf('derived_phys')
@@ -198,7 +293,7 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in)
! Local variables
integer :: lchnk, icol, icol_p, k, kk ! indices over chunks, columns, layers
- integer :: i, m, ncols, blockid
+ integer :: i, m, ncols
integer :: block_index
integer, dimension(:), pointer :: block_offset
@@ -208,7 +303,6 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in)
! Variables from dynamics import container
integer :: nCellsSolve
integer :: nCells
- integer :: nEdgesSolve
integer :: index_qv
integer, dimension(:), pointer :: mpas_from_cam_cnst
@@ -222,11 +316,6 @@ subroutine p_d_coupling(phys_state, phys_tend, dyn_in)
integer :: idx_phys, idx_dycore
- integer :: pgcols(pcols)
- integer :: tsize ! amount of data per grid point passed to dynamics
- integer, allocatable :: bpter(:,:) ! offsets into block buffer for unpacking data
- integer, allocatable :: cpter(:,:) ! offsets into chunk buffer for packing data
-
type (mpas_pool_type), pointer :: tend_physics
type (field2DReal), pointer :: tend_uzonal, tend_umerid
@@ -340,7 +429,7 @@ subroutine derived_phys(phys_state, phys_tend, pbuf2d)
! Local variables
- integer :: i, k, lchnk, m, ncol, m_cnst
+ integer :: k, lchnk, m, ncol, m_cnst
real(r8) :: factor(pcols,pver)
real(r8) :: zvirv(pcols,pver)
@@ -515,13 +604,11 @@ subroutine derived_tend(nCellsSolve, nCells, t_tend, u_tend, v_tend, q_tend, dyn
real(r8), pointer :: north(:,:)
integer, pointer :: cellsOnEdge(:,:)
- real(r8), pointer :: theta(:,:)
- real(r8), pointer :: exner(:,:)
real(r8), pointer :: rho_zz(:,:)
real(r8), pointer :: tracers(:,:,:)
integer :: index_qv,m,idx_dycore
- real(r8) :: rhok,thetavk,thetak,pk,exnerk,tempk,tempk_new,exnerk_new,thetak_new,thetak_m_new,rhodk,tknew,thetaknew
+ real(r8) :: thetak,exnerk,rhodk,tknew,thetaknew
!
! variables for energy diagnostics
!
@@ -536,7 +623,7 @@ subroutine derived_tend(nCellsSolve, nCells, t_tend, u_tend, v_tend, q_tend, dyn
real(r8) :: Rnew(nCellsSolve,pver)
real(r8) :: qk (thermodynamic_active_species_num,pver,nCellsSolve) !water species before physics (diagnostics)
real(r8) :: qktmp (nCellsSolve,pver,thermodynamic_active_species_num)
- integer :: idx_thermo (thermodynamic_active_species_num)
+ integer :: idx_thermo (thermodynamic_active_species_num)
real(r8) :: qwv(pver,nCellsSolve) !water vapor before physics
real(r8) :: facnew, facold
@@ -555,8 +642,6 @@ subroutine derived_tend(nCellsSolve, nCells, t_tend, u_tend, v_tend, q_tend, dyn
normal => dyn_in % normal
cellsOnEdge => dyn_in % cellsOnEdge
- theta => dyn_in % theta
- exner => dyn_in % exner
rho_zz => dyn_in % rho_zz
tracers => dyn_in % tracers
index_qv = dyn_in % index_qv
@@ -727,8 +812,7 @@ subroutine hydrostatic_pressure(nCells, nVertLevels, qsize, index_qv, zz, zgrid,
! The vertical dimension for 3-d arrays is innermost, and k=1 represents
! the lowest layer or level in the fields.
!
- use mpas_constants, only : cp, rgas, cv, gravity, p0, Rv_over_Rd => rvord
- use physconst, only: rair, cpair
+ use mpas_constants, only: cp, rgas, cv, gravity, p0, Rv_over_Rd => rvord
! Arguments
integer, intent(in) :: nCells
@@ -750,8 +834,8 @@ subroutine hydrostatic_pressure(nCells, nVertLevels, qsize, index_qv, zz, zgrid,
real(r8), dimension(nVertLevels) :: dz ! Geometric layer thickness in column
real(r8), dimension(nVertLevels) :: dp,dpdry ! Pressure thickness
real(r8), dimension(nVertLevels+1,nCells) :: pint ! hydrostatic pressure at interface
- real(r8) :: pi, t, sum_water
- real(r8) :: pk,rhok,rhodryk,theta,thetavk,kap1,kap2,tvk,tk
+ real(r8) :: sum_water
+ real(r8) :: pk,rhok,rhodryk,thetavk,kap1,kap2,tvk,tk
!
! For each column, integrate downward from model top to compute dry hydrostatic pressure at layer
! midpoints and interfaces. The pressure averaged to layer midpoints should be consistent with
@@ -765,7 +849,7 @@ subroutine hydrostatic_pressure(nCells, nVertLevels, qsize, index_qv, zz, zgrid,
do idx=dry_air_species_num+1,thermodynamic_active_species_num
rhok = rhok+q(thermodynamic_active_species_idx_dycore(idx),k,iCell)
end do
- rhok = rhok*rhodryk
+ rhok = rhok*rhodryk
dp(k) = gravit*dz(k)*rhok
dpdry(k) = gravit*dz(k)*rhodryk
end do
@@ -782,7 +866,7 @@ subroutine hydrostatic_pressure(nCells, nVertLevels, qsize, index_qv, zz, zgrid,
!
! model top pressure consistently diagnosed using the assumption that the mid level
! is at height z(nVertLevels-1)+0.5*dz
- !
+ !
pintdry(nVertLevels+1,iCell) = pk-0.5_r8*dz(nVertLevels)*rhok*gravity !hydrostatic
pint (nVertLevels+1,iCell) = pintdry(nVertLevels+1,iCell)
do k = nVertLevels, 1, -1
@@ -805,7 +889,7 @@ subroutine hydrostatic_pressure(nCells, nVertLevels, qsize, index_qv, zz, zgrid,
end subroutine hydrostatic_pressure
subroutine tot_energy_dyn(nCells, nVertLevels, qsize, index_qv, zz, zgrid, rho_zz, theta_m, q, ux,uy,outfld_name_suffix)
- use physconst, only: rair, cpair, gravit,cappa!=R/cp (dry air)
+ use physconst, only: rair, gravit
use mpas_constants, only: p0,cv,rv,rgas,cp
use cam_history, only: outfld, hist_fld_active
use mpas_constants, only: Rv_over_Rd => rvord
@@ -816,6 +900,7 @@ subroutine tot_energy_dyn(nCells, nVertLevels, qsize, index_qv, zz, zgrid, rho_z
use cam_thermo, only: get_hydrostatic_energy,thermo_budget_vars
use dyn_tests_utils, only: vcoord=>vc_height
use cam_history_support, only: max_fieldname_len
+
! Arguments
integer, intent(in) :: nCells
integer, intent(in) :: nVertLevels
@@ -849,13 +934,13 @@ subroutine tot_energy_dyn(nCells, nVertLevels, qsize, index_qv, zz, zgrid, rho_z
do i=1,thermo_budget_num_vars
name_out(i)=trim(thermo_budget_vars(i))//'_'//trim(outfld_name_suffix)
end do
-
+
kinetic_energy = 0.0_r8
potential_energy = 0.0_r8
internal_energy = 0.0_r8
water_vapor = 0.0_r8
tracers = 0.0_r8
-
+
do iCell = 1, nCells
do k = 1, nVertLevels
dz = zgrid(k+1,iCell) - zgrid(k,iCell)
@@ -863,11 +948,11 @@ subroutine tot_energy_dyn(nCells, nVertLevels, qsize, index_qv, zz, zgrid, rho_z
rhod = zz(k,iCell) * rho_zz(k,iCell)
theta = theta_m(k,iCell)/(1.0_r8 + Rv_over_Rd *q(index_qv,k,iCell))!convert theta_m to theta
exner = (rgas*rhod*theta_m(k,iCell)/p0)**(rgas/cv)
-
+
temperature(iCell,k) = exner*theta
pdeldry(iCell,k) = gravit*rhod*dz
!
- ! internal energy coefficient for MPAS
+ ! internal energy coefficient for MPAS
! (equation 92 in Eldred et al. 2023; https://rmets.onlinelibrary.wiley.com/doi/epdf/10.1002/qj.4353)
!
cp_or_cv(iCell,k) = rair
@@ -882,7 +967,7 @@ subroutine tot_energy_dyn(nCells, nVertLevels, qsize, index_qv, zz, zgrid, rho_z
v(iCell,k) = uy(k,iCell)
phis(iCell) = zgrid(1,iCell)*gravit
do idx=dry_air_species_num+1,thermodynamic_active_species_num
- idx_tmp = thermodynamic_active_species_idx_dycore(idx)
+ idx_tmp = thermodynamic_active_species_idx_dycore(idx)
tracers(iCell,k,idx_tmp) = q(idx_tmp,k,iCell)
end do
end do
@@ -891,7 +976,7 @@ subroutine tot_energy_dyn(nCells, nVertLevels, qsize, index_qv, zz, zgrid, rho_z
vcoord=vcoord, phis = phis, z_mid=zcell, dycore_idx=.true., &
se=internal_energy, po=potential_energy, ke=kinetic_energy, &
wv=water_vapor , liq=liq , ice=ice)
-
+
call outfld(name_out(seidx),internal_energy ,ncells,1)
call outfld(name_out(poidx),potential_energy,ncells,1)
call outfld(name_out(keidx),kinetic_energy ,ncells,1)
@@ -899,7 +984,111 @@ subroutine tot_energy_dyn(nCells, nVertLevels, qsize, index_qv, zz, zgrid, rho_z
call outfld(name_out(wlidx),liq ,ncells,1)
call outfld(name_out(wiidx),ice ,ncells,1)
call outfld(name_out(teidx),potential_energy+internal_energy+kinetic_energy,ncells,1)
-
+
end subroutine tot_energy_dyn
+ subroutine calc_frontogenesis( frontogenesisFunction, frontogenesisAngle, &
+ theta_m, qv, u,v, defc_a, defc_b, cell_gradient_coef_x, cell_gradient_coef_y, &
+ areaCell, dvEdge, cellsOnEdge, edgesOnCell, nEdgesOnCell, edgesOnCell_sign, &
+ nVertLevels, nCellsSolve )
+
+ use mpas_constants, only: rvord
+
+ ! inputs
+
+ integer, intent(in) :: nVertLevels, nCellsSolve
+ real(r8), dimension(:,:), intent(in) :: theta_m, qv
+ real(r8), dimension(:,:), intent(in) :: u, v
+ real(r8), dimension(:,:), intent(in) :: defc_a
+ real(r8), dimension(:,:), intent(in) :: defc_b
+ real(r8), dimension(:,:), intent(in) :: cell_gradient_coef_x
+ real(r8), dimension(:,:), intent(in) :: cell_gradient_coef_y
+ real(r8), dimension(:,:), intent(in) :: edgesOnCell_sign
+ real(r8), dimension(:), intent(in) :: dvEdge
+ real(r8), dimension(:), intent(in) :: areaCell
+ integer, dimension(:,:), intent(in) :: cellsOnEdge
+ integer, dimension(:,:), intent(in) :: edgesOnCell
+ integer, dimension(:), intent(in) :: nEdgesOnCell
+
+ ! outputs
+
+ real(r8), dimension(:,:), intent(out) :: frontogenesisFunction(:,:)
+ real(r8), dimension(:,:), intent(out) :: frontogenesisAngle(:,:)
+
+ ! local storage
+
+ integer :: iCell, iEdge, k, cell1, cell2
+ real(r8), dimension(nVertLevels) :: d_diag, d_off_diag, divh, theta_x, theta_y
+ real(r8) :: edge_sign, thetaEdge
+
+ !
+ ! for each column, compute frontogenesis function and del(theta) angle
+ !
+
+ do iCell = 1,nCellsSolve
+
+ d_diag(1:nVertLevels) = 0.0_r8
+ d_off_diag(1:nVertLevels) = 0.0_r8
+ divh(1:nVertLevels) = 0.0_r8
+ theta_x(1:nVertLevels) = 0.0_r8
+ theta_y(1:nVertLevels) = 0.0_r8
+
+ !
+ ! Integrate over edges to compute cell-averaged divergence, deformation,
+ ! d(theta)/dx, and d(theta)/dy. (x,y) are aligned with (lon,lat) at the
+ ! cell center in the 2D tangent-plane approximation used here. This alignment
+ ! is set in the initialization routine for the coefficients
+ ! defc_a, defc_b, cell_gradient_coef_x and cell_gradient_coef_y that is
+ ! part of the MPAS mesh initialization. The horizontal divergence is calculated
+ ! as it is in the MPAS solver, i.e. on the sphere as opposed to on the tangent plane.
+ !
+ do iEdge=1,nEdgesOnCell(iCell)
+
+ edge_sign = edgesOnCell_sign(iEdge,iCell) * dvEdge(edgesOnCell(iEdge,iCell)) / areaCell(iCell)
+ cell1 = cellsOnEdge(1,edgesOnCell(iEdge,iCell))
+ cell2 = cellsOnEdge(2,edgesOnCell(iEdge,iCell))
+
+ do k=1,nVertLevels
+
+ d_diag(k) = d_diag(k) + defc_a(iEdge,iCell)*u(k,EdgesOnCell(iEdge,iCell)) &
+ - defc_b(iEdge,iCell)*v(k,EdgesOnCell(iEdge,iCell))
+ d_off_diag(k) = d_off_diag(k) + defc_b(iEdge,iCell)*u(k,EdgesOnCell(iEdge,iCell)) &
+ + defc_a(iEdge,iCell)*v(k,EdgesOnCell(iEdge,iCell))
+ divh(k) = divh(k) + edge_sign * u(k,EdgesOnCell(iEdge,iCell))
+ thetaEdge = 0.5_r8*( theta_m(k,cell1)/(1.0_r8 + rvord*qv(k,cell1)) &
+ +theta_m(k,cell2)/(1.0_r8 + rvord*qv(k,cell2)) )
+ theta_x(k) = theta_x(k) + cell_gradient_coef_x(iEdge,iCell)*thetaEdge
+ theta_y(k) = theta_y(k) + cell_gradient_coef_y(iEdge,iCell)*thetaEdge
+
+ end do
+
+ end do
+
+ !
+ ! compute the frontogenesis function:
+ ! 1/2 |del(theta)/dt)| = 1/2 (
+ ! - Div * |del(theta)|^2
+ ! - E (d(theta)/dx)^2
+ ! - 2F (d(theta)/dx)*(d(theta)/dy)
+ ! + E (d(theta)/dy) )
+ ! where
+ ! Div = u_x + v_y (horizontal velocity divergence)
+ ! E = u_x - v_y (stretching deformation)
+ ! F = v_x + u_y (shearing deformation)
+ !
+ do k=1, nVertLevels
+
+ frontogenesisFunction(k,iCell) = 0.5_r8*( &
+ -divh(k)*(theta_x(k)**2 + theta_y(k)**2) &
+ -d_diag(k)*theta_x(k)**2 &
+ -2.0_r8*d_off_diag(k)*theta_x(k)*theta_y(k) &
+ +d_diag(k)*theta_y(k)**2 )
+ frontogenesisAngle(k,iCell) = atan2(theta_y(k),theta_x(k))
+
+ end do
+
+ end do
+
+ end subroutine calc_frontogenesis
+
end module dp_coupling
diff --git a/src/dynamics/mpas/driver/cam_mpas_subdriver.F90 b/src/dynamics/mpas/driver/cam_mpas_subdriver.F90
index 29572ca5ef..676bacd4af 100644
--- a/src/dynamics/mpas/driver/cam_mpas_subdriver.F90
+++ b/src/dynamics/mpas/driver/cam_mpas_subdriver.F90
@@ -11,7 +11,8 @@ module cam_mpas_subdriver
!-------------------------------------------------------------------------------
use cam_abortutils, only: endrun
- use mpas_derived_types, only : core_type, dm_info, domain_type, MPAS_Clock_type
+ use mpas_derived_types, only : core_type, domain_type, MPAS_Clock_type
+ use phys_control, only: use_gw_front, use_gw_front_igw
implicit none
@@ -75,7 +76,6 @@ subroutine cam_mpas_init_phase1(mpicom, endrun, logUnits, realkind)
use mpas_domain_routines, only : mpas_allocate_domain
use mpas_framework, only : mpas_framework_init_phase1
use atm_core_interface, only : atm_setup_core, atm_setup_domain
- use mpas_pool_routines, only : mpas_pool_add_config
use mpas_kind_types, only : RKIND
! Dummy argument
@@ -147,7 +147,6 @@ end subroutine cam_mpas_init_phase1
!-----------------------------------------------------------------------
subroutine cam_mpas_init_phase2(pio_subsystem, endrun, cam_calendar)
- use mpas_log, only : mpas_log_write
use mpas_kind_types, only : ShortStrKIND
use pio_types, only : iosystem_desc_t
@@ -159,7 +158,6 @@ subroutine cam_mpas_init_phase2(pio_subsystem, endrun, cam_calendar)
character(len=*), intent(in) :: cam_calendar
integer :: ierr
- logical :: streamsExists
character(len=ShortStrKIND) :: mpas_calendar
@@ -221,52 +219,19 @@ end subroutine cam_mpas_init_phase2
!> the number of constituents.
!
!-----------------------------------------------------------------------
- subroutine cam_mpas_init_phase3(fh_ini, num_scalars, endrun)
+ subroutine cam_mpas_init_phase3(fh_ini, num_scalars)
- use mpas_log, only : mpas_log_write
use pio, only : file_desc_t
- use iso_c_binding, only : c_int, c_char, c_ptr, c_loc
-
- use mpas_derived_types, only : MPAS_Time_type, MPAS_TimeInterval_type
- use mpas_derived_types, only : MPAS_IO_PNETCDF, MPAS_IO_PNETCDF5, MPAS_IO_NETCDF, MPAS_IO_NETCDF4
- use mpas_derived_types, only : MPAS_START_TIME
- use mpas_derived_types, only : MPAS_STREAM_MGR_NOERR
- use mpas_timekeeping, only : mpas_get_clock_time, mpas_get_time, mpas_expand_string, mpas_set_time, &
- mpas_set_timeInterval
- use mpas_stream_manager, only : MPAS_stream_mgr_init, mpas_build_stream_filename, MPAS_stream_mgr_validate_streams
- use mpas_kind_types, only : StrKIND
- use mpas_c_interfacing, only : mpas_c_to_f_string, mpas_f_to_c_string
+ use mpas_derived_types, only : MPAS_IO_NETCDF
+ use mpas_kind_types, only : StrKIND
use mpas_bootstrapping, only : mpas_bootstrap_framework_phase1, mpas_bootstrap_framework_phase2
use mpas_pool_routines, only : mpas_pool_add_config
type (file_desc_t), intent(inout) :: fh_ini
integer, intent(in) :: num_scalars
- procedure(halt_model) :: endrun
- integer :: ierr
- character(kind=c_char), dimension(StrKIND+1) :: c_filename ! StrKIND+1 for C null-termination character
- integer(kind=c_int) :: c_comm
- integer(kind=c_int) :: c_ierr
- type (c_ptr) :: mgr_p
- character(len=StrKIND) :: mesh_stream
character(len=StrKIND) :: mesh_filename
- character(len=StrKIND) :: mesh_filename_temp
- character(len=StrKIND) :: ref_time_temp
- character(len=StrKIND) :: filename_interval_temp
- character(kind=c_char), dimension(StrKIND+1) :: c_mesh_stream
- character(kind=c_char), dimension(StrKIND+1) :: c_mesh_filename_temp
- character(kind=c_char), dimension(StrKIND+1) :: c_ref_time_temp
- character(kind=c_char), dimension(StrKIND+1) :: c_filename_interval_temp
- character(kind=c_char), dimension(StrKIND+1) :: c_iotype
- type (MPAS_Time_type) :: start_time
- type (MPAS_Time_type) :: ref_time
- type (MPAS_TimeInterval_type) :: filename_interval
- character(len=StrKIND) :: start_timestamp
- character(len=StrKIND) :: iotype
- logical :: streamsExists
integer :: mesh_iotype
- integer :: blockID
- character(len=StrKIND) :: timeStamp
character(len=*), parameter :: subname = 'cam_mpas_subdriver::cam_mpas_init_phase3'
@@ -320,8 +285,6 @@ subroutine cam_mpas_init_phase4(endrun)
real (kind=RKIND), pointer :: dt
- character(len=StrKIND) :: timeStamp
- integer :: i
logical, pointer :: config_do_restart
type (mpas_pool_type), pointer :: state
@@ -390,7 +353,7 @@ subroutine cam_mpas_init_phase4(endrun)
if ( ierr /= 0 ) then
call endrun(subname//': failed to get MPAS_START_TIME')
end if
- call mpas_get_time(startTime, dateTimeString=startTimeStamp)
+ call mpas_get_time(startTime, dateTimeString=startTimeStamp)
call exchange_halo_group(domain_ptr, 'initialization:u')
@@ -436,7 +399,7 @@ end subroutine cam_mpas_init_phase4
!> to reorder the constituents; to allow for mapping of indices between CAM
!> physics and the MPAS-A dycore, this routine returns index mapping arrays
!> mpas_from_cam_cnst and cam_from_mpas_cnst.
- !>
+ !>
!
!-----------------------------------------------------------------------
subroutine cam_mpas_define_scalars(block, mpas_from_cam_cnst, cam_from_mpas_cnst, ierr)
@@ -733,7 +696,7 @@ subroutine cam_mpas_get_global_coords(latCellGlobal, lonCellGlobal, areaCellGlob
use mpas_pool_routines, only : mpas_pool_get_subpool, mpas_pool_get_dimension, mpas_pool_get_array
use mpas_derived_types, only : mpas_pool_type
use mpas_kind_types, only : RKIND
- use mpas_dmpar, only : mpas_dmpar_sum_int, mpas_dmpar_max_int, mpas_dmpar_max_real_array
+ use mpas_dmpar, only : mpas_dmpar_sum_int, mpas_dmpar_max_real_array
real (kind=RKIND), dimension(:), intent(out) :: latCellGlobal
real (kind=RKIND), dimension(:), intent(out) :: lonCellGlobal
@@ -768,7 +731,7 @@ subroutine cam_mpas_get_global_coords(latCellGlobal, lonCellGlobal, areaCellGlob
allocate(temp(nCellsGlobal), stat=ierr)
if( ierr /= 0 ) call endrun(subname//':failed to allocate temp array')
-
+
!
! latCellGlobal
!
@@ -962,9 +925,12 @@ subroutine cam_mpas_read_static(fh_ini, endrun)
type (field3DReal), pointer :: zb, zb3, deriv_two, cellTangentPlane, coeffs_reconstruct
type (field2DReal), pointer :: edgeNormalVectors, localVerticalUnitVectors, defc_a, defc_b
+ type (field2DReal), pointer :: cell_gradient_coef_x, cell_gradient_coef_y
type (MPAS_Stream_type) :: mesh_stream
+ nullify(cell_gradient_coef_x)
+ nullify(cell_gradient_coef_y)
call MPAS_createStream(mesh_stream, domain_ptr % ioContext, 'not_used', MPAS_IO_NETCDF, MPAS_IO_READ, &
pio_file_desc=fh_ini, ierr=ierr)
@@ -1043,9 +1009,15 @@ subroutine cam_mpas_read_static(fh_ini, endrun)
call mpas_pool_get_field(meshPool, 'edgeNormalVectors', edgeNormalVectors)
call mpas_pool_get_field(meshPool, 'localVerticalUnitVectors', localVerticalUnitVectors)
+
call mpas_pool_get_field(meshPool, 'defc_a', defc_a)
call mpas_pool_get_field(meshPool, 'defc_b', defc_b)
+ if (use_gw_front .or. use_gw_front_igw) then
+ call mpas_pool_get_field(meshPool, 'cell_gradient_coef_x', cell_gradient_coef_x)
+ call mpas_pool_get_field(meshPool, 'cell_gradient_coef_y', cell_gradient_coef_y)
+ endif
+
ierr_total = 0
call MPAS_streamAddField(mesh_stream, latCell, ierr=ierr)
@@ -1177,6 +1149,12 @@ subroutine cam_mpas_read_static(fh_ini, endrun)
if (ierr /= MPAS_STREAM_NOERR) ierr_total = ierr_total + 1
call MPAS_streamAddField(mesh_stream, defc_b, ierr=ierr)
if (ierr /= MPAS_STREAM_NOERR) ierr_total = ierr_total + 1
+ if (use_gw_front .or. use_gw_front_igw) then
+ call MPAS_streamAddField(mesh_stream, cell_gradient_coef_x, ierr=ierr)
+ if (ierr /= MPAS_STREAM_NOERR) ierr_total = ierr_total + 1
+ call MPAS_streamAddField(mesh_stream, cell_gradient_coef_y, ierr=ierr)
+ if (ierr /= MPAS_STREAM_NOERR) ierr_total = ierr_total + 1
+ endif
if (ierr_total > 0) then
write(errString, '(a,i0,a)') subname//': FATAL: Failed to add ', ierr_total, ' fields to static input stream.'
@@ -1258,7 +1236,10 @@ subroutine cam_mpas_read_static(fh_ini, endrun)
call MPAS_dmpar_exch_halo_field(localVerticalUnitVectors)
call MPAS_dmpar_exch_halo_field(defc_a)
call MPAS_dmpar_exch_halo_field(defc_b)
-
+ if (use_gw_front .or. use_gw_front_igw) then
+ call MPAS_dmpar_exch_halo_field(cell_gradient_coef_x)
+ call MPAS_dmpar_exch_halo_field(cell_gradient_coef_y)
+ endif
!
! Re-index from global index space to local index space
!
@@ -1347,6 +1328,7 @@ subroutine cam_mpas_setup_restart(fh_rst, restart_stream, direction, endrun)
type (field3DReal), pointer :: zb, zb3, deriv_two, cellTangentPlane, coeffs_reconstruct
type (field2DReal), pointer :: edgeNormalVectors, localVerticalUnitVectors, defc_a, defc_b
+ type (field2DReal), pointer :: cell_gradient_coef_x, cell_gradient_coef_y
type (field0DChar), pointer :: initial_time
type (field0DChar), pointer :: xtime
@@ -1386,6 +1368,8 @@ subroutine cam_mpas_setup_restart(fh_rst, restart_stream, direction, endrun)
type (field1DReal), pointer :: u_init
type (field1DReal), pointer :: qv_init
+ nullify(cell_gradient_coef_x)
+ nullify(cell_gradient_coef_y)
call MPAS_createStream(restart_stream, domain_ptr % ioContext, 'not_used', MPAS_IO_NETCDF, &
direction, pio_file_desc=fh_rst, ierr=ierr)
@@ -1466,7 +1450,10 @@ subroutine cam_mpas_setup_restart(fh_rst, restart_stream, direction, endrun)
call mpas_pool_get_field(allFields, 'localVerticalUnitVectors', localVerticalUnitVectors)
call mpas_pool_get_field(allFields, 'defc_a', defc_a)
call mpas_pool_get_field(allFields, 'defc_b', defc_b)
-
+ if (use_gw_front .or. use_gw_front_igw) then
+ call mpas_pool_get_field(allFields, 'cell_gradient_coef_x', cell_gradient_coef_x)
+ call mpas_pool_get_field(allFields, 'cell_gradient_coef_y', cell_gradient_coef_y)
+ endif
call mpas_pool_get_field(allFields, 'initial_time', initial_time, timeLevel=1)
call mpas_pool_get_field(allFields, 'xtime', xtime, timeLevel=1)
call mpas_pool_get_field(allFields, 'u', u, timeLevel=1)
@@ -1636,7 +1623,12 @@ subroutine cam_mpas_setup_restart(fh_rst, restart_stream, direction, endrun)
if (ierr /= MPAS_STREAM_NOERR) ierr_total = ierr_total + 1
call MPAS_streamAddField(restart_stream, defc_b, ierr=ierr)
if (ierr /= MPAS_STREAM_NOERR) ierr_total = ierr_total + 1
-
+ if (use_gw_front .or. use_gw_front_igw) then
+ call MPAS_streamAddField(restart_stream, cell_gradient_coef_x, ierr=ierr)
+ if (ierr /= MPAS_STREAM_NOERR) ierr_total = ierr_total + 1
+ call MPAS_streamAddField(restart_stream, cell_gradient_coef_y, ierr=ierr)
+ if (ierr /= MPAS_STREAM_NOERR) ierr_total = ierr_total + 1
+ endif
call MPAS_streamAddField(restart_stream, initial_time, ierr=ierr)
if (ierr /= MPAS_STREAM_NOERR) ierr_total = ierr_total + 1
call MPAS_streamAddField(restart_stream, xtime, ierr=ierr)
@@ -1754,8 +1746,6 @@ end subroutine cam_mpas_setup_restart
!-----------------------------------------------------------------------
subroutine cam_mpas_read_restart(restart_stream, endrun)
- use pio, only : file_desc_t
-
use mpas_io_streams, only : MPAS_readStream, MPAS_closeStream
use mpas_derived_types, only : MPAS_Stream_type, MPAS_pool_type, MPAS_STREAM_NOERR
use mpas_pool_routines, only : MPAS_pool_create_pool, MPAS_pool_destroy_pool, MPAS_pool_add_config
@@ -1847,7 +1837,10 @@ subroutine cam_mpas_read_restart(restart_stream, endrun)
call cam_mpas_update_halo('localVerticalUnitVectors', endrun)
call cam_mpas_update_halo('defc_a', endrun)
call cam_mpas_update_halo('defc_b', endrun)
-
+ if (use_gw_front .or. use_gw_front_igw) then
+ call cam_mpas_update_halo('cell_gradient_coef_x', endrun)
+ call cam_mpas_update_halo('cell_gradient_coef_y', endrun)
+ endif
call cam_mpas_update_halo('u', endrun)
call cam_mpas_update_halo('w', endrun)
call cam_mpas_update_halo('rho_zz', endrun)
@@ -1923,8 +1916,6 @@ end subroutine cam_mpas_read_restart
!-----------------------------------------------------------------------
subroutine cam_mpas_write_restart(restart_stream, endrun)
- use pio, only : file_desc_t
-
use mpas_io_streams, only : MPAS_writeStream, MPAS_closeStream
use mpas_derived_types, only : MPAS_Stream_type, MPAS_pool_type, MPAS_STREAM_NOERR
use mpas_pool_routines, only : MPAS_pool_create_pool, MPAS_pool_destroy_pool, MPAS_pool_add_config
@@ -2273,17 +2264,17 @@ subroutine cam_mpas_run(integrationLength)
runUntilTime = currTime + integrationLength
do while (currTime < runUntilTime)
- call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
+ call mpas_get_time(curr_time=currTime, dateTimeString=timeStamp, ierr=ierr)
call mpas_log_write('Dynamics timestep beginning at '//trim(timeStamp))
call mpas_timer_start('time integration')
call atm_do_timestep(domain_ptr, dt, itimestep)
- call mpas_timer_stop('time integration')
+ call mpas_timer_stop('time integration')
! Move time level 2 fields back into time level 1 for next time step
call mpas_pool_get_subpool(domain_ptr % blocklist % structs, 'state', state)
call mpas_pool_shift_time_levels(state)
-
+
! Advance clock before writing output
itimestep = itimestep + 1
call mpas_advance_clock(clock)
@@ -2382,11 +2373,10 @@ subroutine cam_mpas_debug_stream(domain, filename, timeLevel)
use mpas_derived_types, only : MPAS_IO_WRITE, MPAS_IO_NETCDF, MPAS_STREAM_NOERR, MPAS_Stream_type, MPAS_pool_type, &
field0DReal, field1DReal, field2DReal, field3DReal, field4DReal, field5DReal, &
field1DInteger, field2DInteger, field3DInteger
- use mpas_pool_routines, only : MPAS_pool_get_subpool, MPAS_pool_get_field, MPAS_pool_create_pool, MPAS_pool_destroy_pool, &
- MPAS_pool_add_config
+ use mpas_pool_routines, only : MPAS_pool_get_field
use mpas_derived_types, only : MPAS_Pool_iterator_type, MPAS_POOL_FIELD, MPAS_POOL_REAL, MPAS_POOL_INTEGER
- use mpas_pool_routines, only : mpas_pool_begin_iteration, mpas_pool_get_next_member, mpas_pool_get_config
+ use mpas_pool_routines, only : mpas_pool_begin_iteration, mpas_pool_get_next_member
type (domain_type), intent(inout) :: domain
character(len=*), intent(in) :: filename
diff --git a/src/dynamics/mpas/dyn_comp.F90 b/src/dynamics/mpas/dyn_comp.F90
index 6f4b822971..a1b02c1f86 100644
--- a/src/dynamics/mpas/dyn_comp.F90
+++ b/src/dynamics/mpas/dyn_comp.F90
@@ -3,7 +3,7 @@ module dyn_comp
! CAM component interfaces to the MPAS Dynamical Core
use shr_kind_mod, only: r8=>shr_kind_r8
-use spmd_utils, only: iam, masterproc, mpicom, npes
+use spmd_utils, only: masterproc, mpicom, npes
use physconst, only: pi, gravit, rair, cpair
use pmgrid, only: plev, plevp
@@ -13,23 +13,18 @@ module dyn_comp
use cam_control_mod, only: initial_run
use cam_initfiles, only: initial_file_get_id, topo_file_get_id
-use cam_grid_support, only: cam_grid_id, cam_grid_get_gcid, &
- cam_grid_dimensions, cam_grid_get_dim_names, &
- cam_grid_get_latvals, cam_grid_get_lonvals, &
- max_hcoordname_len
+use cam_grid_support, only: cam_grid_id, &
+ cam_grid_get_latvals, cam_grid_get_lonvals
use cam_map_utils, only: iMap
use inic_analytic, only: analytic_ic_active, dyn_set_inic_col
use dyn_tests_utils, only: vcoord=>vc_height
-use cam_history, only: add_default, horiz_only, register_vector_field, &
- outfld, hist_fld_active
-use cam_history_support, only: max_fieldname_len
+use cam_history, only: addfld, horiz_only
use string_utils, only: date2yyyymmdd, sec2hms, int2str
use ncdio_atm, only: infld
-use pio, only: file_desc_t, pio_seterrorhandling, PIO_BCAST_ERROR, &
- pio_inq_dimid, pio_inq_dimlen, PIO_NOERR
+use pio, only: file_desc_t
use cam_pio_utils, only: clean_iodesc_list
use time_manager, only: get_start_date, get_stop_date, get_run_duration, &
@@ -43,6 +38,8 @@ module dyn_comp
use cam_budget, only: cam_budget_em_snapshot, cam_budget_em_register
+use phys_control, only: use_gw_front, use_gw_front_igw
+
implicit none
private
save
@@ -197,6 +194,24 @@ module dyn_comp
real(r8), dimension(:), pointer :: fzm ! Interp weight from k layer midpoint to k layer
! interface [dimensionless] (nver)
real(r8), dimension(:), pointer :: fzp ! Interp weight from k-1 layer midpoint to k
+ !
+ ! Invariant -- needed for computing the frontogenesis function
+ !
+ real(r8), dimension(:,:), pointer :: defc_a
+ real(r8), dimension(:,:), pointer :: defc_b
+ real(r8), dimension(:,:), pointer :: cell_gradient_coef_x
+ real(r8), dimension(:,:), pointer :: cell_gradient_coef_y
+ real(r8), dimension(:,:), pointer :: edgesOnCell_sign
+ real(r8), dimension(:), pointer :: dvEdge
+ real(r8), dimension(:), pointer :: areaCell ! cell area (m^2)
+
+ integer, dimension(:,:), pointer :: edgesOnCell
+ integer, dimension(:,:), pointer :: cellsOnEdge
+ integer, dimension(:), pointer :: nEdgesOnCell
+
+ real(r8), dimension(:,:), pointer :: utangential ! velocity tangent to cell edge,
+ ! diagnosed by mpas
+
!
! State that may be directly derived from dycore prognostic state
!
@@ -215,6 +230,10 @@ module dyn_comp
! (nver,ncol)
end type dyn_export_t
+! Frontogenesis indices
+integer, public :: frontgf_idx = -1
+integer, public :: frontga_idx = -1
+
real(r8), parameter :: rad2deg = 180.0_r8 / pi
real(r8), parameter :: deg2rad = pi / 180.0_r8
@@ -246,11 +265,8 @@ subroutine dyn_readnl(NLFileName)
character(len=*), intent(in) :: NLFileName
! Local variables
- integer :: ierr
integer, dimension(2) :: logUnits ! stdout and stderr for MPAS logging
integer :: yr, mon, day, tod, ndate, nday, nsec
- character(len=10) :: date_str
- character(len=8) :: tod_str
character(len=*), parameter :: subname = 'dyn_comp:dyn_readnl'
!----------------------------------------------------------------------------
@@ -294,8 +310,16 @@ subroutine dyn_register()
use physics_buffer, only: pbuf_add_field, dtype_r8
use ppgrid, only: pcols, pver
+ use phys_control, only: use_gw_front, use_gw_front_igw
!----------------------------------------------------------------------------
+ ! These fields are computed by the dycore and passed to the physics via the
+ ! physics buffer.
+
+ if (use_gw_front .or. use_gw_front_igw) then
+ call pbuf_add_field("FRONTGF", "global", dtype_r8, (/pcols,pver/), frontgf_idx)
+ call pbuf_add_field("FRONTGA", "global", dtype_r8, (/pcols,pver/), frontga_idx)
+ end if
end subroutine dyn_register
@@ -315,9 +339,7 @@ subroutine dyn_init(dyn_in, dyn_out)
use mpas_derived_types, only : mpas_pool_type
use mpas_constants, only : mpas_constants_compute_derived
use dyn_tests_utils, only : vc_dycore, vc_height, string_vc, vc_str_lgth
- use constituents, only : cnst_get_ind
- use phys_control, only: phys_getopts
- use cam_budget, only: thermo_budget_history
+ use cam_budget, only : thermo_budget_history
! arguments:
type(dyn_import_t), intent(inout) :: dyn_in
@@ -475,6 +497,24 @@ subroutine dyn_init(dyn_in, dyn_out)
dyn_out % ux => dyn_in % ux
dyn_out % uy => dyn_in % uy
+ ! for frontogenesis calc
+
+ if (use_gw_front .or. use_gw_front_igw) then
+ dyn_out % areaCell => dyn_in % areaCell
+ dyn_out % cellsOnEdge => dyn_in % cellsOnEdge
+ call mpas_pool_get_array(mesh_pool, 'defc_a', dyn_out % defc_a)
+ call mpas_pool_get_array(mesh_pool, 'defc_b', dyn_out % defc_b)
+ call mpas_pool_get_array(mesh_pool, 'cell_gradient_coef_x', dyn_out % cell_gradient_coef_x)
+ call mpas_pool_get_array(mesh_pool, 'cell_gradient_coef_y', dyn_out % cell_gradient_coef_y)
+ call mpas_pool_get_array(mesh_pool, 'edgesOnCell_sign', dyn_out % edgesOnCell_sign)
+ call mpas_pool_get_array(mesh_pool, 'dvEdge', dyn_out % dvEdge)
+ call mpas_pool_get_array(mesh_pool, 'edgesOnCell', dyn_out % edgesOnCell)
+ call mpas_pool_get_array(mesh_pool, 'nEdgesOnCell', dyn_out % nEdgesOnCell)
+ call mpas_pool_get_array(diag_pool, 'v', dyn_out % utangential)
+ endif
+
+ ! cam-required hydrostatic pressures
+
allocate(dyn_out % pmiddry(nVertLevels, nCells), stat=ierr)
if( ierr /= 0 ) call endrun(subname//': failed to allocate dyn_out%pmiddry array')
@@ -551,7 +591,7 @@ subroutine dyn_init(dyn_in, dyn_out)
call cam_budget_em_register('dEdt_phys_total_in_dyn','dAM','dBF',pkgtype='dyn',optype='dif', &
longname="dE/dt physics total in dycore (phys) (dAM-dBF)")
end if
-
+
!
! initialize CAM thermodynamic infrastructure
!
@@ -576,9 +616,9 @@ subroutine dyn_init(dyn_in, dyn_out)
m,thermodynamic_active_species_ice_idx_dycore(m)
end if
end do
-
+
end subroutine dyn_init
-
+
!=========================================================================================
subroutine dyn_run(dyn_in, dyn_out)
@@ -713,7 +753,7 @@ subroutine read_inidat(dyn_in)
use cam_mpas_subdriver, only : domain_ptr, cam_mpas_update_halo, cam_mpas_cell_to_edge_winds
use cam_initfiles, only : scale_dry_air_mass
- use mpas_pool_routines, only : mpas_pool_get_subpool, mpas_pool_get_array, mpas_pool_get_config
+ use mpas_pool_routines, only : mpas_pool_get_subpool, mpas_pool_get_array
use mpas_derived_types, only : mpas_pool_type
use mpas_vector_reconstruction, only : mpas_reconstruct
use mpas_constants, only : Rv_over_Rd => rvord
@@ -771,7 +811,6 @@ subroutine read_inidat(dyn_in)
real(r8), allocatable :: qv(:), tm(:)
- real(r8) :: dz, h
logical :: readvar
character(len=shr_kind_cx) :: str
@@ -1265,7 +1304,7 @@ subroutine cam_mpas_namelist_read(namelistFilename, configPool)
! if no errors were encountered, all MPI ranks have valid namelists in their configPool.
use spmd_utils, only: mpicom, masterproc, masterprocid, &
- mpi_integer, mpi_real8, mpi_logical, mpi_character, mpi_success
+ mpi_integer, mpi_real8, mpi_logical, mpi_character
use namelist_utils, only: find_group_name
use mpas_derived_types, only: mpas_pool_type
@@ -1719,7 +1758,6 @@ subroutine set_dry_mass(dyn_in, target_avg_dry_surface_pressure)
real(r8) :: preliminary_avg_dry_surface_pressure, scaled_avg_dry_surface_pressure
real(r8) :: scaling_ratio
real(r8) :: sphere_surface_area
- real(r8) :: surface_integral, test_value
integer :: ixqv,ierr
diff --git a/src/dynamics/mpas/dyn_grid.F90 b/src/dynamics/mpas/dyn_grid.F90
index 104524a3c9..d0b53c5fa0 100644
--- a/src/dynamics/mpas/dyn_grid.F90
+++ b/src/dynamics/mpas/dyn_grid.F90
@@ -130,7 +130,7 @@ subroutine dyn_grid_init()
! MPAS-A always requires at least one scalar (qv). CAM has the same requirement
! and it is enforced by the configure script which sets the cpp macrop PCNST.
- call cam_mpas_init_phase3(fh_ini, pcnst, endrun)
+ call cam_mpas_init_phase3(fh_ini, pcnst)
! Read or compute all time-invariant fields for the MPAS-A dycore
! Time-invariant fields are stored in the MPAS mesh pool. This call