From 1071b6709f2c3263b848e3e7afc7e46bcff886bd Mon Sep 17 00:00:00 2001 From: pbd562 Date: Mon, 22 Aug 2016 04:02:21 +0000 Subject: [PATCH 01/52] Import from /short/p66/dhb599/ACCESS-CM2/submodels/cice_GC3_GA7_hxyo/ git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_GC3_GA7@357 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- bld/Macros.Darwin.CPOM.mi2 | 52 + bld/Macros.Darwin.LANL.bigsuze | 61 + bld/Macros.Linux.LANL.conejo | 68 + bld/Macros.Linux.NCAR.yellowstone | 78 + bld/Macros.Linux.ORNL.jaguar | 40 + bld/Macros.Linux.ORNL.lens | 66 + bld/Macros.Linux.ORNL.titan | 62 + bld/Macros.Linux.SIO.uhu | 65 + bld/Macros.Linux.Ubuntu | 41 + bld/Macros.Linux.raijin-182 | 97 + bld/Macros.Linux.raijin-183 | 1 + bld/Macros.Linux.raijin-185 | 97 + bld/Macros.Linux.raijin.nci.org.au | 79 + bld/Macros.Linux.raijin.nci.org.au-mct | 97 + bld/Macros.Linux.raijin.nci.org.au-mct_bk | 101 + bld/Makefile | 169 + bld/Makefile.Darwin | 169 + bld/Makefile.UNICOS | 167 + bld/Makefile.std | 169 + bld/makdep.c | 446 ++ compile/comp_access-cm1440-185_ac330 | 238 + compile/comp_access-cm1440-185_r47 | 238 + compile/comp_access-cm360-185 | 221 + compile/environs.raijin-185 | 13 + csm_share/README | 10 + csm_share/shr_orb_mod.F90 | 671 ++ drivers/access/CICE.F90 | 104 + drivers/access/CICE_FinalMod.F90 | 103 + drivers/access/CICE_InitMod.F90 | 467 ++ drivers/access/CICE_RunMod.F90 | 776 ++ drivers/access/CICE_RunMod.F90-new_incomplete | 737 ++ drivers/access/CICE_RunMod.F90_debug | 552 ++ drivers/access/README.txt | 10 + drivers/access/cpl_arrays_setup.F90 | 176 + drivers/access/cpl_forcing_handler.F90 | 1685 ++++ drivers/access/cpl_interface.F90 | 1 + drivers/access/cpl_interface.F90_ggather | 2063 +++++ drivers/access/cpl_interface.F90_maice_bad | 2081 +++++ drivers/access/cpl_interface.F90_uphalo | 1819 +++++ drivers/access/cpl_netcdf_setup.F90 | 367 + drivers/access/cpl_parameters.F90 | 215 + drivers/access/ice_constants.F90 | 243 + drivers/access/ice_coupling.F90 | 482 ++ drivers/access/xxxx.f90 | 1446 ++++ io_netcdf/ice_history_write.F90 | 1308 ++++ io_netcdf/ice_restart.F90 | 563 ++ mpi/ice_boundary.F90 | 6791 +++++++++++++++++ mpi/ice_broadcast.F90 | 745 ++ mpi/ice_communicate.F90 | 216 + mpi/ice_exit.F90 | 75 + mpi/ice_gather_scatter.F90 | 2739 +++++++ mpi/ice_global_reductions.F90 | 2374 ++++++ mpi/ice_timers.F90 | 725 ++ source/ice_aerosol.F90 | 736 ++ source/ice_age.F90 | 134 + source/ice_algae.F90 | 1264 +++ source/ice_atmo.F90 | 1005 +++ source/ice_blocks.F90 | 872 +++ source/ice_brine.F90 | 828 ++ source/ice_calendar.F90 | 686 ++ source/ice_diagnostics.F90 | 1601 ++++ source/ice_distribution.F90 | 1964 +++++ source/ice_domain.F90 | 506 ++ source/ice_domain_size.F90 | 76 + source/ice_dyn_eap.F90 | 2008 +++++ source/ice_dyn_evp.F90 | 848 ++ source/ice_dyn_shared.F90 | 914 +++ source/ice_fileunits.F90 | 299 + source/ice_firstyear.F90 | 173 + source/ice_flux.F90 | 1004 +++ source/ice_forcing.F90 | 3992 ++++++++++ source/ice_grid.F90 | 2220 ++++++ source/ice_history.F90 | 2088 +++++ source/ice_history.F90_spo | 2092 +++++ source/ice_history_bgc.F90 | 682 ++ source/ice_history_drag.F90 | 286 + source/ice_history_mechred.F90 | 377 + source/ice_history_pond.F90 | 356 + source/ice_history_shared.F90 | 838 ++ source/ice_history_shared.F90_spo | 899 +++ source/ice_init.F90 | 1754 +++++ source/ice_itd.F90 | 2632 +++++++ source/ice_kinds_mod.F90 | 30 + source/ice_lvl.F90 | 110 + source/ice_mechred.F90 | 2275 ++++++ source/ice_meltpond_cesm.F90 | 261 + source/ice_meltpond_lvl.F90 | 484 ++ source/ice_meltpond_topo.F90 | 967 +++ source/ice_ocean.F90 | 233 + source/ice_orbital.F90 | 145 + source/ice_read_write.F90 | 2092 +++++ source/ice_restart_driver.F90 | 881 +++ source/ice_restart_shared.F90 | 63 + source/ice_restoring.F90 | 705 ++ source/ice_shortwave.F90 | 3935 ++++++++++ source/ice_spacecurve.F90 | 1757 +++++ source/ice_state.F90 | 244 + source/ice_step_mod.F90 | 1533 ++++ source/ice_therm_0layer.F90 | 470 ++ source/ice_therm_bl99.F90 | 2042 +++++ source/ice_therm_itd.F90 | 1847 +++++ source/ice_therm_mushy.F90 | 3925 ++++++++++ source/ice_therm_shared.F90 | 282 + source/ice_therm_vertical.F90 | 2696 +++++++ source/ice_transport_driver.F90 | 1694 ++++ source/ice_transport_remap.F90 | 3735 +++++++++ source/ice_zbgc.F90 | 1078 +++ source/ice_zbgc_shared.F90 | 335 + 108 files changed, 99332 insertions(+) create mode 100755 bld/Macros.Darwin.CPOM.mi2 create mode 100755 bld/Macros.Darwin.LANL.bigsuze create mode 100755 bld/Macros.Linux.LANL.conejo create mode 100755 bld/Macros.Linux.NCAR.yellowstone create mode 100755 bld/Macros.Linux.ORNL.jaguar create mode 100644 bld/Macros.Linux.ORNL.lens create mode 100644 bld/Macros.Linux.ORNL.titan create mode 100644 bld/Macros.Linux.SIO.uhu create mode 100644 bld/Macros.Linux.Ubuntu create mode 100644 bld/Macros.Linux.raijin-182 create mode 120000 bld/Macros.Linux.raijin-183 create mode 100644 bld/Macros.Linux.raijin-185 create mode 100644 bld/Macros.Linux.raijin.nci.org.au create mode 100644 bld/Macros.Linux.raijin.nci.org.au-mct create mode 100644 bld/Macros.Linux.raijin.nci.org.au-mct_bk create mode 100644 bld/Makefile create mode 100644 bld/Makefile.Darwin create mode 100755 bld/Makefile.UNICOS create mode 100644 bld/Makefile.std create mode 100755 bld/makdep.c create mode 100755 compile/comp_access-cm1440-185_ac330 create mode 100755 compile/comp_access-cm1440-185_r47 create mode 100755 compile/comp_access-cm360-185 create mode 100755 compile/environs.raijin-185 create mode 100644 csm_share/README create mode 100644 csm_share/shr_orb_mod.F90 create mode 100644 drivers/access/CICE.F90 create mode 100644 drivers/access/CICE_FinalMod.F90 create mode 100644 drivers/access/CICE_InitMod.F90 create mode 100644 drivers/access/CICE_RunMod.F90 create mode 100644 drivers/access/CICE_RunMod.F90-new_incomplete create mode 100644 drivers/access/CICE_RunMod.F90_debug create mode 100644 drivers/access/README.txt create mode 100644 drivers/access/cpl_arrays_setup.F90 create mode 100644 drivers/access/cpl_forcing_handler.F90 create mode 120000 drivers/access/cpl_interface.F90 create mode 100644 drivers/access/cpl_interface.F90_ggather create mode 100644 drivers/access/cpl_interface.F90_maice_bad create mode 100644 drivers/access/cpl_interface.F90_uphalo create mode 100644 drivers/access/cpl_netcdf_setup.F90 create mode 100644 drivers/access/cpl_parameters.F90 create mode 100644 drivers/access/ice_constants.F90 create mode 100644 drivers/access/ice_coupling.F90 create mode 100644 drivers/access/xxxx.f90 create mode 100644 io_netcdf/ice_history_write.F90 create mode 100644 io_netcdf/ice_restart.F90 create mode 100644 mpi/ice_boundary.F90 create mode 100644 mpi/ice_broadcast.F90 create mode 100644 mpi/ice_communicate.F90 create mode 100644 mpi/ice_exit.F90 create mode 100644 mpi/ice_gather_scatter.F90 create mode 100644 mpi/ice_global_reductions.F90 create mode 100644 mpi/ice_timers.F90 create mode 100755 source/ice_aerosol.F90 create mode 100755 source/ice_age.F90 create mode 100755 source/ice_algae.F90 create mode 100755 source/ice_atmo.F90 create mode 100755 source/ice_blocks.F90 create mode 100755 source/ice_brine.F90 create mode 100755 source/ice_calendar.F90 create mode 100755 source/ice_diagnostics.F90 create mode 100755 source/ice_distribution.F90 create mode 100755 source/ice_domain.F90 create mode 100755 source/ice_domain_size.F90 create mode 100755 source/ice_dyn_eap.F90 create mode 100755 source/ice_dyn_evp.F90 create mode 100755 source/ice_dyn_shared.F90 create mode 100755 source/ice_fileunits.F90 create mode 100755 source/ice_firstyear.F90 create mode 100755 source/ice_flux.F90 create mode 100755 source/ice_forcing.F90 create mode 100755 source/ice_grid.F90 create mode 100755 source/ice_history.F90 create mode 100755 source/ice_history.F90_spo create mode 100755 source/ice_history_bgc.F90 create mode 100755 source/ice_history_drag.F90 create mode 100755 source/ice_history_mechred.F90 create mode 100755 source/ice_history_pond.F90 create mode 100755 source/ice_history_shared.F90 create mode 100755 source/ice_history_shared.F90_spo create mode 100755 source/ice_init.F90 create mode 100755 source/ice_itd.F90 create mode 100755 source/ice_kinds_mod.F90 create mode 100755 source/ice_lvl.F90 create mode 100755 source/ice_mechred.F90 create mode 100755 source/ice_meltpond_cesm.F90 create mode 100755 source/ice_meltpond_lvl.F90 create mode 100755 source/ice_meltpond_topo.F90 create mode 100755 source/ice_ocean.F90 create mode 100755 source/ice_orbital.F90 create mode 100755 source/ice_read_write.F90 create mode 100755 source/ice_restart_driver.F90 create mode 100755 source/ice_restart_shared.F90 create mode 100755 source/ice_restoring.F90 create mode 100755 source/ice_shortwave.F90 create mode 100755 source/ice_spacecurve.F90 create mode 100755 source/ice_state.F90 create mode 100755 source/ice_step_mod.F90 create mode 100755 source/ice_therm_0layer.F90 create mode 100755 source/ice_therm_bl99.F90 create mode 100755 source/ice_therm_itd.F90 create mode 100755 source/ice_therm_mushy.F90 create mode 100755 source/ice_therm_shared.F90 create mode 100755 source/ice_therm_vertical.F90 create mode 100755 source/ice_transport_driver.F90 create mode 100755 source/ice_transport_remap.F90 create mode 100755 source/ice_zbgc.F90 create mode 100755 source/ice_zbgc_shared.F90 diff --git a/bld/Macros.Darwin.CPOM.mi2 b/bld/Macros.Darwin.CPOM.mi2 new file mode 100755 index 00000000..2dd4cf00 --- /dev/null +++ b/bld/Macros.Darwin.CPOM.mi2 @@ -0,0 +1,52 @@ +#============================================================================== +# Makefile macros for mi2 - UNIX Cluster at CPOM +# David Schroeder (7 JAN 2011) +#============================================================================== + +INCLDIR := -I. -I/usr/include -I/usr/local/bin +SLIBS := -L/usr/local/lib -L/usr/lib +ULIBS := +CPP := /usr/bin/cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX +CFLAGS := -c -O2 +ifeq ($(COMMDIR), mpi) + FC := /usr/local/bin/mpif90 +else + FC := gfortran +endif +FIXEDFLAGS := -132 +FREEFLAGS := +FFLAGS := -r8 -i4 -O2 -align all -w -ftz -assume byterecl -fpe0 -traceback -g +#FFLAGS := -O3 -w +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -g +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fpe0 -CB -traceback +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fpe0 -CB +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) \ + -DNICELYR=$(NICELYR) -DNSNWLYR=$(NSNWLYR) -DNICECAT=$(NICECAT) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) -DTRPND=$(TRPND) \ + -DTRBRI=$(TRBRI) -DNTRAERO=$(NTRAERO) -DNBGCLYR=$(NBGCLYR) \ + -DTRBGCS=$(TRBGCS) -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) + +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := -I/opt/netcdf/include/ $(INCLDIR) + SLIBS := /opt/netcdf/lib/libnetcdf.a -lcurl $(SLIBS) +# INCLDIR := $(INCLDIR) -I/usr/projects/climate/bzhao/netcdf-3.6.1/include +# SLIBS := $(SLIBS) -L/usr/projects/climate/bzhao/netcdf-3.6.1/lib -lnetcdf +endif + +### note this file does not include PIO capability +### see Macros.Linux.LANL.conejo + diff --git a/bld/Macros.Darwin.LANL.bigsuze b/bld/Macros.Darwin.LANL.bigsuze new file mode 100755 index 00000000..7beb3c2f --- /dev/null +++ b/bld/Macros.Darwin.LANL.bigsuze @@ -0,0 +1,61 @@ +#============================================================================== +# Makefile macros for "conejo," Linux cluster at LANL +#============================================================================== + +INCLDIR := -I. -I/usr/include +SLIBS := -L/usr/lib +ULIBS := +CPP := /usr/bin/cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX +CFLAGS := -c -O2 +ifeq ($(COMMDIR), mpi) + FC := mpif90 +else + FC := gfortran +endif +FIXEDFLAGS := -132 +FREEFLAGS := +#FFLAGS := -O0 -w -g -Wall -finit-real=nan -ffree-line-length-none -fconvert=big-endian -pedantic -std=gnu +#FFLAGS := -O0 -w -g -finit-real=nan -ffree-line-length-none -fconvert=big-endian -pedantic -Wunused +FFLAGS := -O2 -ffree-line-length-none -fconvert=big-endian -finit-real=nan +#FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -g +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fpe0 -CB -traceback +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fpe0 -CB -g +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) \ + -DNICELYR=$(NICELYR) -DNSNWLYR=$(NSNWLYR) -DNICECAT=$(NICECAT) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) -DTRPND=$(TRPND) \ + -DTRBRI=$(TRBRI) -DNTRAERO=$(NTRAERO) -DNBGCLYR=$(NBGCLYR) \ + -DTRBGCS=$(TRBGCS) -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := $(INCLDIR) -I/Users/akt/Work/libraries/netcdf/netcdf/include + SLIBS := $(SLIBS) -L/Users/akt/Work/libraries/netcdf/netcdf/lib -lnetcdff +endif + +### if using parallel I/O, load all 3 libraries. PIO must be first! +ifeq ($(IO_TYPE), pio) + INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/pio-1.4.0 + SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/pio-1.4.0 -lpio + + INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/parallel-netcdf-1.2.0/include + SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/parallel-netcdf-1.2.0/lib -lpnetcdf + + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/netcdf-3.6.3/include + SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/netcdf-3.6.3/lib -lnetcdf + +endif + diff --git a/bld/Macros.Linux.LANL.conejo b/bld/Macros.Linux.LANL.conejo new file mode 100755 index 00000000..db07a345 --- /dev/null +++ b/bld/Macros.Linux.LANL.conejo @@ -0,0 +1,68 @@ +#============================================================================== +# Makefile macros for "conejo," Linux cluster at LANL +#============================================================================== + +INCLDIR := -I. -I/usr/include +SLIBS := -L/usr/lib +ULIBS := +CPP := /usr/bin/cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX +CFLAGS := -c -O2 +ifeq ($(COMMDIR), mpi) + FC := mpif90 +else + FC := ifort +endif +FIXEDFLAGS := -132 +FREEFLAGS := +FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -fp-model precise +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fp-model precise -g +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fpe0 -CB -traceback +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fpe0 -CB -check uninit + +ifeq ($(THRD), yes) + FFLAGS := $(FFLAGS) -openmp +#cesm CPPDEFS := $(CPPDEFS) -DTHREADED_OMP +endif + +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) \ + -DNICELYR=$(NICELYR) -DNSNWLYR=$(NSNWLYR) -DNICECAT=$(NICECAT) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) -DTRPND=$(TRPND) \ + -DTRBRI=$(TRBRI) -DNTRAERO=$(NTRAERO) -DNBGCLYR=$(NBGCLYR) \ + -DTRBGCS=$(TRBGCS) -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif +ifeq ($(BARRIERS), yes) + CPPDEFS := $(CPPDEFS) -Dgather_scatter_barrier +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf +# INCLDIR := $(INCLDIR) $(NETCDF)/include +# SLIBS := $(SLIBS) $(NETCDF)/lib -lnetcdf + INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/netcdf/3.6.3/intel-13.0.1/include + SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/netcdf/3.6.3/intel-13.0.1/lib -lnetcdf +endif + +### if using parallel I/O, load all 3 libraries. PIO must be first! +ifeq ($(IO_TYPE), pio) + INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/pio/1.7.2/intel-13.0.1/openmpi-1.6.3/netcdf-3.6.3-parallel-netcdf-1.3.1/include + SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/pio/1.7.2/intel-13.0.1/openmpi-1.6.3/netcdf-3.6.3-parallel-netcdf-1.3.1/lib -lpio + + INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/parallel-netcdf/1.3.1/intel-13.0.1/openmpi-1.6.3/include + SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/parallel-netcdf/1.3.1/intel-13.0.1/openmpi-1.6.3/lib -lpnetcdf + + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/netcdf/3.6.3/intel-13.0.1/include + SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/netcdf/3.6.3/intel-13.0.1/lib -lnetcdf + +endif diff --git a/bld/Macros.Linux.NCAR.yellowstone b/bld/Macros.Linux.NCAR.yellowstone new file mode 100755 index 00000000..c37b1aac --- /dev/null +++ b/bld/Macros.Linux.NCAR.yellowstone @@ -0,0 +1,78 @@ +#============================================================================== +# Makefile macros for "yellowstone" an NCAR Linux Cluster +#============================================================================== +# For use with intel compiler +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 -DHAVE_F2008_CONTIGUOUS -DLINUX -DCPRINTEL +CFLAGS := -c -O2 -fp-model precise -xHost + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -O2 -fp-model precise -convert big_endian -assume byterecl -ftz -traceback -xHost +FFLAGS_NOOPT:= -O0 + +ifeq ($(COMMDIR), mpi) + FC := mpif90 +else + FC := ifort +endif + +MPICC:= mpicc + +MPIFC:= mpif90 +LD:= $(MPIFC) + +NETCDF_PATH := $(NETCDF) + +PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs + +#PNETCDF_PATH := $(PNETCDF) +PNETCDF_PATH := /glade/apps/opt/pnetcdf/1.3.0/intel/default + +LAPACK_LIBDIR := /glade/apps/opt/lapack/3.4.2/intel/12.1.5/lib + +INCLDIR := $(INCLDIR) + +LIB_NETCDF := $(NETCDF_PATH)/lib +LIB_PNETCDF := $(PNETCDF_PATH)/lib +LIB_MPI := $(IMPILIBDIR) + +SLIBS := -L$(LIB_NETCDF) -lnetcdf -L$(LIB_PNETCDF) -lpnetcdf -L$(LAPACK_LIBDIR) -llapack -lblas + +SCC:= icc + +SFC:= ifort + +# CPPDEFS := $(CPPDEFS) -Dfcd_coupled -Dcoupled + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) \ + -DNICELYR=$(NICELYR) -DNSNWLYR=$(NSNWLYR) -DNICECAT=$(NICECAT) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) -DTRPND=$(TRPND) \ + -DTRBRI=$(TRBRI) -DNTRAERO=$(NTRAERO) -DNBGCLYR=$(NBGCLYR) \ + -DTRBGCS=$(TRBGCS) -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) + +ifeq ($(compile_threaded), true) + LDFLAGS += -openmp + CFLAGS += -openmp + FFLAGS += -openmp +endif + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +### if using parallel I/O, load all 3 libraries. PIO must be first! +ifeq ($(IO_TYPE), pio) + PIO_PATH:=/glade/u/home/jedwards/pio1_6_5/pio + INCLDIR += -I$(PIO_PATH) + SLIBS := $(SLIBS) -L$(PIO_PATH) -lpio + + CPPDEFS := $(CPPDEFS) -Dncdf +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf +endif diff --git a/bld/Macros.Linux.ORNL.jaguar b/bld/Macros.Linux.ORNL.jaguar new file mode 100755 index 00000000..e605b4ef --- /dev/null +++ b/bld/Macros.Linux.ORNL.jaguar @@ -0,0 +1,40 @@ +#============================================================================== +# Makefile macros for "jaguar," an ORNL Cray XT3 running Linux +#============================================================================== + +INCLDIR := -I. +SLIBS := +ULIBS := +CPP := /usr/bin/cpp +CPPFLAGS := -P -traditional-cpp -DPOSIX +CPPDEFS := -DLINUX +CFLAGS := -c -fastsse -tp k8-64 +FIXEDFLAGS := -Mfixed +FREEFLAGS := -Mfree +FC := ftn +#FFLAGS := $(INCLDIR) -O3 -byteswapio +FFLAGS := $(INCLDIR) -g -O3 -byteswapio -fastsse -tp k8-64 -Knoieee -Mextend -Mdaz +#FFLAGS := $(INCLDIR) -O3 -byteswapio -Ktrap=inv,divz,ovf,unf +#FFLAGS := $(INCLDIR) -O3 -byteswapio -Mflushz +MOD_SUFFIX := mod +LD := ftn +LDFLAGS := + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(COMMDIR), mpi) +# SLIBS := $(SLIBS) -lmpich +endif + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := $(INCLDIR) -I/apps/netcdf/prod/jaguar/include + SLIBS := $(SLIBS) -L/apps/netcdf/prod/jaguar/lib -lnetcdf +endif + diff --git a/bld/Macros.Linux.ORNL.lens b/bld/Macros.Linux.ORNL.lens new file mode 100644 index 00000000..2246bc2d --- /dev/null +++ b/bld/Macros.Linux.ORNL.lens @@ -0,0 +1,66 @@ +#============================================================================== +# Makefile macros for "conejo," Linux cluster at LANL +#============================================================================== + +INCLDIR := -I. +SLIBS := +ULIBS := +CPP := /usr/bin/cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX +CFLAGS := -c -O2 +ifeq ($(COMMDIR), mpi) + FC := /sw/analysis-x64/ompi/1.4.2/centos5.5_pgi10.9/bin/mpif90 +else + FC := /ccs/compilers/pgi/centos5.5-x86_64/10.9/linux86-64/10.9/bin/pgf90 +endif +FIXEDFLAGS := -Mextend +FREEFLAGS := -Mfree +FFLAGS := -r8 -i4 -O2 -byteswapio -Mnodclchk -Mextend +#FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -fp-model precise +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -g +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fpe0 -CB -traceback +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fpe0 -CB -g + +ifeq ($(THRD), yes) + FFLAGS := $(FFLAGS) -openmp +#cesm CPPDEFS := $(CPPDEFS) -DTHREADED_OMP +endif + +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) \ + -DNICELYR=$(NICELYR) -DNSNWLYR=$(NSNWLYR) -DNICECAT=$(NICECAT) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) -DTRPND=$(TRPND) \ + -DTRBRI=$(TRBRI) -DNTRAERO=$(NTRAERO) -DNBGCLYR=$(NBGCLYR) \ + -DTRBGCS=$(TRBGCS) -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf +# INCLDIR := $(INCLDIR) $(NETCDF)/include +# SLIBS := $(SLIBS) $(NETCDF)/lib -lnetcdf + INCLDIR := $(INCLDIR) -I/sw/analysis-x64/netcdf/3.6.2/centos5.5_pgi10.9/include + SLIBS := $(SLIBS) -L/sw/analysis-x64/netcdf/3.6.2/centos5.5_pgi10.9/lib -lnetcdf +endif + +### if using parallel I/O, load all 3 libraries. PIO must be first! +#ifeq ($(IO_TYPE), pio) +# INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/pio-1.4.0 +# SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/pio-1.4.0 -lpio +# +# INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/parallel-netcdf-1.2.0/include +# SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/parallel-netcdf-1.2.0/lib -lpnetcdf +# +# CPPDEFS := $(CPPDEFS) -Dncdf +# INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/netcdf-3.6.3/include +# SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/netcdf-3.6.3/lib -lnetcdf + +#endif diff --git a/bld/Macros.Linux.ORNL.titan b/bld/Macros.Linux.ORNL.titan new file mode 100644 index 00000000..9682052c --- /dev/null +++ b/bld/Macros.Linux.ORNL.titan @@ -0,0 +1,62 @@ +#============================================================================== +# Makefile macros for "conejo," Linux cluster at LANL +#============================================================================== + +INCLDIR := -I. +SLIBS := +ULIBS := +CPP := /usr/bin/cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX +CFLAGS := -c -O2 +ifeq ($(COMMDIR), mpi) + FC := ftn +else + FC := ftn +endif +FIXEDFLAGS := -Mfixed +FREEFLAGS := -Mfree +FFLAGS := -r8 -i4 -O2 -byteswapio -Mnodclchk -Mextend + +ifeq ($(THRD), yes) + FFLAGS := $(FFLAGS) -openmp +#cesm CPPDEFS := $(CPPDEFS) -DTHREADED_OMP +endif + +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) \ + -DNICELYR=$(NICELYR) -DNSNWLYR=$(NSNWLYR) -DNICECAT=$(NICECAT) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) -DTRPND=$(TRPND) \ + -DTRBRI=$(TRBRI) -DNTRAERO=$(NTRAERO) -DNBGCLYR=$(NBGCLYR) \ + -DTRBGCS=$(TRBGCS) -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf +# INCLDIR := $(INCLDIR) $(NETCDF)/include +# SLIBS := $(SLIBS) $(NETCDF)/lib -lnetcdf + INCLDIR := $(INCLDIR) -I/$NETCDF_DIR/include + SLIBS := $(SLIBS) -L/$NETCDF_DIR/lib -lnetcdff -lnetcdf +endif + +### if using parallel I/O, load all 3 libraries. PIO must be first! +#ifeq ($(IO_TYPE), pio) +# INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/pio-1.4.0 +# SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/pio-1.4.0 -lpio +# +# INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/parallel-netcdf-1.2.0/include +# SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/parallel-netcdf-1.2.0/lib -lpnetcdf +# +# CPPDEFS := $(CPPDEFS) -Dncdf +# INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/netcdf-3.6.3/include +# SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/netcdf-3.6.3/lib -lnetcdf + +#endif diff --git a/bld/Macros.Linux.SIO.uhu b/bld/Macros.Linux.SIO.uhu new file mode 100644 index 00000000..71021086 --- /dev/null +++ b/bld/Macros.Linux.SIO.uhu @@ -0,0 +1,65 @@ +#============================================================================== +# Makefile macros for "conejo," Linux cluster at LANL +#============================================================================== + +INCLDIR := -I. -I/usr/include +SLIBS := -L/usr/lib +ULIBS := +CPP := /usr/bin/cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX +CFLAGS := -c -O2 +ifeq ($(COMMDIR), mpi) + FC := /net/shared_data/Libraries/openmpi-1.6.5_gfortran_4.4.7/bin/mpif90 +else + FC := gfortran +endif +FIXEDFLAGS := -ffixed-line-length-132 +FREEFLAGS := +FFLAGS := -O2 -fconvert=big-endian +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -g +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fpe0 -CB -traceback +#FFLAGS := -r8 -i4 -align all -w -ftz -convert big_endian -assume byterecl -fpe0 -CB -g + +#ifeq ($(THRD), yes) +# FFLAGS := $(FFLAGS) -openmp +##cesm CPPDEFS := $(CPPDEFS) -DTHREADED_OMP +#endif + +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) \ + -DNICELYR=$(NICELYR) -DNSNWLYR=$(NSNWLYR) -DNICECAT=$(NICECAT) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) -DTRPND=$(TRPND) \ + -DTRBRI=$(TRBRI) -DNTRAERO=$(NTRAERO) -DNBGCLYR=$(NBGCLYR) \ + -DTRBGCS=$(TRBGCS) -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf +# INCLDIR := $(INCLDIR) $(NETCDF)/include +# SLIBS := $(SLIBS) $(NETCDF)/lib -lnetcdf + INCLDIR := $(INCLDIR) -I/net/shared_data/Libraries/netcdf-3.6.2_gfortran_4.4.6/include + SLIBS := $(SLIBS) -L//net/shared_data/Libraries/netcdf-3.6.2_gfortran_4.4.6/lib -lnetcdf +endif + +### if using parallel I/O, load all 3 libraries. PIO must be first! +#ifeq ($(IO_TYPE), pio) +# INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/pio-1.4.0 +# SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/pio-1.4.0 -lpio +# +# INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/parallel-netcdf-1.2.0/include +# SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/parallel-netcdf-1.2.0/lib -lpnetcdf +# +# CPPDEFS := $(CPPDEFS) -Dncdf +# INCLDIR := $(INCLDIR) -I/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/netcdf-3.6.3/include +# SLIBS := $(SLIBS) -L/usr/projects/climate/SHARED_CLIMATE/software/conejo/intel_openmpi/netcdf-3.6.3/lib -lnetcdf +# +#endif diff --git a/bld/Macros.Linux.Ubuntu b/bld/Macros.Linux.Ubuntu new file mode 100644 index 00000000..90ad2926 --- /dev/null +++ b/bld/Macros.Linux.Ubuntu @@ -0,0 +1,41 @@ +#============================================================================== +# Makefile macros for Ubuntu 10.04 Linux w/ repository OpenMPI and libnetcdf-dev +# Geophysical Institute, University of Alaska Fairbanks +#============================================================================== + +INCLDIR := +SLIBS := +ULIBS := +CPP := cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX +CFLAGS := -c -O2 +ifeq ($(COMMDIR), mpi) + FC := mpif90 +else + FC := gfortran +endif +FIXEDFLAGS := -132 +FREEFLAGS := +# work-around for gcc to ensure that denormailzed numbers are +# flushed to zero: -march=pentium4 -msse2 -mfpmath=sse +FFLAGS := -O2 -w -march=pentium4 -msse2 -mfpmath=sse -fconvert=big-endian +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := $(INCLDIR) -I/usr/include + SLIBS := $(SLIBS) -lnetcdf -lnetcdff +endif + +### note this file does not include PIO capability +### see Macros.Linux.LANL.conejo \ No newline at end of file diff --git a/bld/Macros.Linux.raijin-182 b/bld/Macros.Linux.raijin-182 new file mode 100644 index 00000000..2997011c --- /dev/null +++ b/bld/Macros.Linux.raijin-182 @@ -0,0 +1,97 @@ +#============================================================================== +# Makefile macros for xe.nci.org.au, an SGI ALTIX system running Linux +# Note: Use the -mp flag if precision is critical. It slows down the +# code by 25% (or more). +#============================================================================== + +INCLDIR := -I. +SLIBS := +ULIBS := +CPP := /usr/bin/cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX -DPAROPT +ifeq ($(DEBUG), yes) + CFLAGS := -c -g -O0 +else + CFLAGS := -c -O2 +endif +FIXEDFLAGS := -132 +FREEFLAGS := +FC := mpifort +# work-around for gc to ensure that denormailzed numbers are +# flushed to zero: -march=pentium4 -msse2 -mfpmath=sse +#FFLAGS := -O2 -w -fdefault-real-8 -msse2 -mfpmath=sse -fconvert=big-endian +#FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +#FFLAGS := -r8 -i4 -g -O0 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +ifeq ($(DEBUG), yes) + FFLAGS := -r8 -i4 -O0 -g -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium -xHost -fp-model precise +else + FFLAGS := -r8 -i4 -O2 -check all -CB -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium -xHost -fp-model precise +endif +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v #-static-intel + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) -DN_ILYR=$(N_ILYR) \ + -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) \ + -DTRPND=$(TRPND) -DNTRAERO=$(NTRAERO) -DTRBRI=$(TRBRI) \ + -DNBGCLYR=$(NBGCLYR) -DTRBGCS=$(TRBGCS) \ + -DNICECAT=$(NICECAT) -DNICELYR=$(NICELYR) \ + -DNSNWLYR=$(NSNWLYR) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) +# CPPDEFS := $(CPPDEFS) -DAOMIP +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(COMMDIR), mpi) + SLIBS := $(SLIBS) $(OPENMPI_F90LIBS) +endif + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := $(INCLDIR) -I$(NETCDF_ROOT)/include + + SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib -lnetcdf -lnetcdff +endif + +ifeq ($(USE_ESMF), yes) + CPPDEFS := $(CPPDEFS) -Duse_esmf + INCLDIR := $(INCLDIR) -I ??? + SLIBS := $(SLIBS) -L ??? -lesmf -lcprts -lrt -ldl +endif + +ifeq ($(AusCOM), yes) + CPPDEFS := $(CPPDEFS) -DAusCOM -Dcoupled + INCLDIR := $(INCLDIR) $(CPL_INCS) + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lpsmile.${CHAN} + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmct + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmpeu + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lscrip +#B: for coupled AusCOM, mpi lib is ALWAYS required: +# SLIBS := $(SLIBS) -L$(CPLLIBDIR) $(OMPI_F90LIBS) +# SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmpi +#B: and, somehow the netcdf lib must be put behind the psmile lib: +# SLIBS := $(SLIBS) -lnetcdf + #SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib -lnetcdf + SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib/Intel -lnetcdf + +endif + +ifeq ($(ACCESS), yes) + CPPDEFS := $(CPPDEFS) -DACCESS +endif +# standalone CICE with AusCOM mods +ifeq ($(ACCICE), yes) + CPPDEFS := $(CPPDEFS) -DACCICE +endif +# no MOM just CICE+UM +ifeq ($(NOMOM), yes) + CPPDEFS := $(CPPDEFS) -DNOMOM +endif +ifeq ($(OASIS3_MCT), yes) + CPPDEFS := $(CPPDEFS) -DOASIS3_MCT +endif diff --git a/bld/Macros.Linux.raijin-183 b/bld/Macros.Linux.raijin-183 new file mode 120000 index 00000000..80e6d910 --- /dev/null +++ b/bld/Macros.Linux.raijin-183 @@ -0,0 +1 @@ +Macros.Linux.raijin-182 \ No newline at end of file diff --git a/bld/Macros.Linux.raijin-185 b/bld/Macros.Linux.raijin-185 new file mode 100644 index 00000000..8e52c6e0 --- /dev/null +++ b/bld/Macros.Linux.raijin-185 @@ -0,0 +1,97 @@ +#============================================================================== +# Makefile macros for xe.nci.org.au, an SGI ALTIX system running Linux +# Note: Use the -mp flag if precision is critical. It slows down the +# code by 25% (or more). +#============================================================================== + +INCLDIR := -I. +SLIBS := +ULIBS := +CPP := /usr/bin/cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX -DPAROPT +ifeq ($(DEBUG), yes) + CFLAGS := -c -g -O0 +else + CFLAGS := -c -O2 +endif +FIXEDFLAGS := -132 +FREEFLAGS := +FC := mpifort +# work-around for gc to ensure that denormailzed numbers are +# flushed to zero: -march=pentium4 -msse2 -mfpmath=sse +#FFLAGS := -O2 -w -fdefault-real-8 -msse2 -mfpmath=sse -fconvert=big-endian +#FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +#FFLAGS := -r8 -i4 -g -O0 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +ifeq ($(DEBUG), yes) + FFLAGS := -r8 -i4 -O0 -g -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium -xHost -fp-model precise +else + FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium -xHost -fp-model precise -assume buffered_io -check noarg_temp_created +endif +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v #-static-intel + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) -DN_ILYR=$(N_ILYR) \ + -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) \ + -DTRPND=$(TRPND) -DNTRAERO=$(NTRAERO) -DTRBRI=$(TRBRI) \ + -DNBGCLYR=$(NBGCLYR) -DTRBGCS=$(TRBGCS) \ + -DNICECAT=$(NICECAT) -DNICELYR=$(NICELYR) \ + -DNSNWLYR=$(NSNWLYR) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) +# CPPDEFS := $(CPPDEFS) -DAOMIP +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(COMMDIR), mpi) + SLIBS := $(SLIBS) $(OPENMPI_F90LIBS) +endif + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := $(INCLDIR) -I$(NETCDF_ROOT)/include + + SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib -lnetcdf -lnetcdff +endif + +ifeq ($(USE_ESMF), yes) + CPPDEFS := $(CPPDEFS) -Duse_esmf + INCLDIR := $(INCLDIR) -I ??? + SLIBS := $(SLIBS) -L ??? -lesmf -lcprts -lrt -ldl +endif + +ifeq ($(AusCOM), yes) + CPPDEFS := $(CPPDEFS) -DAusCOM -Dcoupled + INCLDIR := $(INCLDIR) $(CPL_INCS) + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lpsmile.${CHAN} + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmct + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmpeu + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lscrip +#B: for coupled AusCOM, mpi lib is ALWAYS required: +# SLIBS := $(SLIBS) -L$(CPLLIBDIR) $(OMPI_F90LIBS) +# SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmpi +#B: and, somehow the netcdf lib must be put behind the psmile lib: +# SLIBS := $(SLIBS) -lnetcdf + #SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib -lnetcdf + SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib/Intel -lnetcdf + +endif + +ifeq ($(ACCESS), yes) + CPPDEFS := $(CPPDEFS) -DACCESS +endif +# standalone CICE with AusCOM mods +ifeq ($(ACCICE), yes) + CPPDEFS := $(CPPDEFS) -DACCICE +endif +# no MOM just CICE+UM +ifeq ($(NOMOM), yes) + CPPDEFS := $(CPPDEFS) -DNOMOM +endif +ifeq ($(OASIS3_MCT), yes) + CPPDEFS := $(CPPDEFS) -DOASIS3_MCT +endif diff --git a/bld/Macros.Linux.raijin.nci.org.au b/bld/Macros.Linux.raijin.nci.org.au new file mode 100644 index 00000000..135d2738 --- /dev/null +++ b/bld/Macros.Linux.raijin.nci.org.au @@ -0,0 +1,79 @@ +#============================================================================== +# Makefile macros for Ubuntu 10.04 Linux w/ repository OpenMPI and libnetcdf-dev +# Geophysical Institute, University of Alaska Fairbanks +#============================================================================== + +INCLDIR := -I. +SLIBS := +ULIBS := +CPP := cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX +CFLAGS := -c -O2 +ifeq ($(COMMDIR), mpi) + FC := mpif90 +else + FC := ifort +endif +ifeq ($(DEBUG), yes) + CFLAGS := -c -g -O0 +else + CFLAGS := -c -O2 +endif +FIXEDFLAGS := -132 +FREEFLAGS := +# work-around for gcc to ensure that denormailzed numbers are +# flushed to zero: -march=pentium4 -msse2 -mfpmath=sse +#FFLAGS := -O2 -w -fdefault-real-8 -msse2 -mfpmath=sse -fconvert=big-endian +#FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +#FFLAGS := -r8 -i4 -g -O0 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +ifeq ($(DEBUG), yes) + FFLAGS := -g -r8 -i4 -O0 -g -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +else + FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +endif +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v -static-intel + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) -DN_ILYR=$(N_ILYR) \ + -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) \ + -DTRPND=$(TRPND) -DNTRAERO=$(NTRAERO) -DTRBRI=$(TRBRI) \ + -DNBGCLYR=$(NBGCLYR) -DTRBGCS=$(TRBGCS) \ + -DNICECAT=$(NICECAT) -DNICELYR=$(NICELYR) \ + -DNSNWLYR=$(NSNWLYR) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) + +ifeq ($(COMMDIR), mpi) + SLIBS := $(SLIBS) $(OPENMPI_F90LIBS) +endif + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := $(INCLDIR) -I$(NETCDF_ROOT)/include + SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib/Intel -lnetcdf -lnetcdff +endif + +ifeq ($(AusCOM), yes) + CPPDEFS := $(CPPDEFS) -DAusCOM -Dcoupled + INCLDIR := $(INCLDIR) $(CPL_INCS) + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lpsmile.${CHAN} + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmpp_io + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmpi + SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib/Intel -lnetcdf +endif + +ifeq ($(ACCESS), yes) + CPPDEFS := $(CPPDEFS) -DACCESS +endif +# standalone CICE with AusCOM mods +ifeq ($(ACCICE), yes) + CPPDEFS := $(CPPDEFS) -DACCICE +endif +### note this file does not include PIO capability +### see Macros.Linux.LANL.conejo diff --git a/bld/Macros.Linux.raijin.nci.org.au-mct b/bld/Macros.Linux.raijin.nci.org.au-mct new file mode 100644 index 00000000..b6ea101e --- /dev/null +++ b/bld/Macros.Linux.raijin.nci.org.au-mct @@ -0,0 +1,97 @@ +#============================================================================== +# Makefile macros for xe.nci.org.au, an SGI ALTIX system running Linux +# Note: Use the -mp flag if precision is critical. It slows down the +# code by 25% (or more). +#============================================================================== + +INCLDIR := -I. +SLIBS := +ULIBS := +CPP := /usr/bin/cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX +ifeq ($(DEBUG), yes) + CFLAGS := -c -g -O0 +else + CFLAGS := -c -O2 +endif +FIXEDFLAGS := -132 +FREEFLAGS := +FC := mpif90 +# work-around for gc to ensure that denormailzed numbers are +# flushed to zero: -march=pentium4 -msse2 -mfpmath=sse +#FFLAGS := -O2 -w -fdefault-real-8 -msse2 -mfpmath=sse -fconvert=big-endian +#FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +#FFLAGS := -r8 -i4 -g -O0 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +ifeq ($(DEBUG), yes) + FFLAGS := -g -r8 -i4 -O0 -g -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +else + FFLAGS := -g -traceback -fpe0 -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +endif +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v -static-intel + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) -DN_ILYR=$(N_ILYR) \ + -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) \ + -DTRPND=$(TRPND) -DNTRAERO=$(NTRAERO) -DTRBRI=$(TRBRI) \ + -DNBGCLYR=$(NBGCLYR) -DTRBGCS=$(TRBGCS) \ + -DNICECAT=$(NICECAT) -DNICELYR=$(NICELYR) \ + -DNSNWLYR=$(NSNWLYR) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) +# CPPDEFS := $(CPPDEFS) -DAOMIP +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(COMMDIR), mpi) + SLIBS := $(SLIBS) $(OPENMPI_F90LIBS) +endif + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := $(INCLDIR) -I$(NETCDF_ROOT)/include + + SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib -lnetcdf -lnetcdff +endif + +ifeq ($(USE_ESMF), yes) + CPPDEFS := $(CPPDEFS) -Duse_esmf + INCLDIR := $(INCLDIR) -I ??? + SLIBS := $(SLIBS) -L ??? -lesmf -lcprts -lrt -ldl +endif + +ifeq ($(AusCOM), yes) + CPPDEFS := $(CPPDEFS) -DAusCOM -Dcoupled + INCLDIR := $(INCLDIR) $(CPL_INCS) + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lpsmile.${CHAN} + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmct + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmpeu + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lscrip +#B: for coupled AusCOM, mpi lib is ALWAYS required: +# SLIBS := $(SLIBS) -L$(CPLLIBDIR) $(OMPI_F90LIBS) +# SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmpi +#B: and, somehow the netcdf lib must be put behind the psmile lib: +# SLIBS := $(SLIBS) -lnetcdf + #SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib -lnetcdf + SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib/Intel -lnetcdf + +endif + +ifeq ($(ACCESS), yes) + CPPDEFS := $(CPPDEFS) -DACCESS +endif +# standalone CICE with AusCOM mods +ifeq ($(ACCICE), yes) + CPPDEFS := $(CPPDEFS) -DACCICE +endif +# no MOM just CICE+UM +ifeq ($(NOMOM), yes) + CPPDEFS := $(CPPDEFS) -DNOMOM +endif +ifeq ($(OASIS3_MCT), yes) + CPPDEFS := $(CPPDEFS) -DOASIS3_MCT +endif diff --git a/bld/Macros.Linux.raijin.nci.org.au-mct_bk b/bld/Macros.Linux.raijin.nci.org.au-mct_bk new file mode 100644 index 00000000..f10225ed --- /dev/null +++ b/bld/Macros.Linux.raijin.nci.org.au-mct_bk @@ -0,0 +1,101 @@ +#============================================================================== +# Makefile macros for xe.nci.org.au, an SGI ALTIX system running Linux +# Note: Use the -mp flag if precision is critical. It slows down the +# code by 25% (or more). +#============================================================================== + +INCLDIR := -I. +SLIBS := +ULIBS := +CPP := cpp +CPPFLAGS := -P -traditional +CPPDEFS := -DLINUX +CFLAGS := -c -O2 +ifeq ($(COMMDIR), mpi) + FC := mpif90 +else + FC := ifort +endif +ifeq ($(DEBUG), yes) + CFLAGS := -c -g -O0 +else + CFLAGS := -c -O2 +endif +FIXEDFLAGS := -132 +FREEFLAGS := +# work-around for gcc to ensure that denormailzed numbers are +# flushed to zero: -march=pentium4 -msse2 -mfpmath=sse +#FFLAGS := -O2 -w -fdefault-real-8 -msse2 -mfpmath=sse -fconvert=big-endian +#FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +#FFLAGS := -r8 -i4 -g -O0 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +ifeq ($(DEBUG), yes) + FFLAGS := -g -r8 -i4 -O0 -g -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +else + FFLAGS := -r8 -i4 -O2 -align all -w -ftz -convert big_endian -assume byterecl -no-vec -mcmodel=medium +endif +MOD_SUFFIX := mod +LD := $(FC) +LDFLAGS := $(FFLAGS) -v -static-intel + + CPPDEFS := $(CPPDEFS) -DNXGLOB=$(NXGLOB) -DNYGLOB=$(NYGLOB) -DN_ILYR=$(N_ILYR) \ + -DNUMIN=$(NUMIN) -DNUMAX=$(NUMAX) \ + -DTRAGE=$(TRAGE) -DTRFY=$(TRFY) -DTRLVL=$(TRLVL) \ + -DTRPND=$(TRPND) -DNTRAERO=$(NTRAERO) -DTRBRI=$(TRBRI) \ + -DNBGCLYR=$(NBGCLYR) -DTRBGCS=$(TRBGCS) \ + -DNICECAT=$(NICECAT) -DNICELYR=$(NICELYR) \ + -DNSNWLYR=$(NSNWLYR) \ + -DBLCKX=$(BLCKX) -DBLCKY=$(BLCKY) -DMXBLCKS=$(MXBLCKS) +# CPPDEFS := $(CPPDEFS) -DAOMIP +# CPPDEFS := $(CPPDEFS) -DAOMIP + +ifeq ($(COMMDIR), mpi) + SLIBS := $(SLIBS) $(OPENMPI_F90LIBS) +endif + +ifeq ($(DITTO), yes) + CPPDEFS := $(CPPDEFS) -DREPRODUCIBLE +endif + +ifeq ($(IO_TYPE), netcdf) + CPPDEFS := $(CPPDEFS) -Dncdf + INCLDIR := $(INCLDIR) -I$(NETCDF_ROOT)/include + SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib/Intel -lnetcdf -lnetcdff +endif + +ifeq ($(USE_ESMF), yes) + CPPDEFS := $(CPPDEFS) -Duse_esmf + INCLDIR := $(INCLDIR) -I ??? + SLIBS := $(SLIBS) -L ??? -lesmf -lcprts -lrt -ldl +endif + +ifeq ($(AusCOM), yes) + CPPDEFS := $(CPPDEFS) -DAusCOM -Dcoupled + INCLDIR := $(INCLDIR) $(CPL_INCS) + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lpsmile.${CHAN} + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmct + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmpeu + SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lscrip +#B: for coupled AusCOM, mpi lib is ALWAYS required: +# SLIBS := $(SLIBS) -L$(CPLLIBDIR) $(OMPI_F90LIBS) +# SLIBS := $(SLIBS) -L$(CPLLIBDIR) -lmpi +#B: and, somehow the netcdf lib must be put behind the psmile lib: +# SLIBS := $(SLIBS) -lnetcdf + #SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib -lnetcdf + SLIBS := $(SLIBS) -L$(NETCDF_ROOT)/lib/Intel -lnetcdf + +endif + +ifeq ($(ACCESS), yes) + CPPDEFS := $(CPPDEFS) -DACCESS +endif +# standalone CICE with AusCOM mods +ifeq ($(ACCICE), yes) + CPPDEFS := $(CPPDEFS) -DACCICE +endif +# no MOM just CICE+UM +ifeq ($(NOMOM), yes) + CPPDEFS := $(CPPDEFS) -DNOMOM +endif +ifeq ($(OASIS3_MCT), yes) + CPPDEFS := $(CPPDEFS) -DOASIS3_MCT +endif diff --git a/bld/Makefile b/bld/Makefile new file mode 100644 index 00000000..c2d232ff --- /dev/null +++ b/bld/Makefile @@ -0,0 +1,169 @@ +#------------------------------------------------------------------------------- +# CVS $Id: Makefile.std,v 1.1 2004/02/09 18:13:52 lipscomb Exp $ +# CVS $Source: /home/climate/CVS-COSIM/cice/bld/Makefile.std,v $ +# CVS $Name: $ +#------------------------------------------------------------------------------- +# Common Makefile: a framework for building all CCSM components and more +# +# Command-line variables +# MACFILE= ~ the macros definition file to use/include +# EXEC= ~ name given to executable, default is a.out +# VPATH= ~ VPATH , default is . (cwd only) +# SRCS= ~ list of src files, default is all .c .F .F90 files in VPATH +# VPFILE= ~ file with list of dirs, used to create VPATH +# SRCFILE= ~ file with list of src files, used to create SRCS +# DEPGEN= ~ dependency generator utility, default is makdep +# +# ~ any macro definitions found in this file or the included +# MACFILE will be over-riden by cmd-line macro definitions +# MODEL= ~ a standard macro definition, often found in the included +# MACFILE, used to trigger special compilation flags +# +# Usage examples: +# % gmake MACFILE=Macros.AIX VPFILE=Filepath MODEL=ccm3 EXEC=atm +# % gmake MACFILE=Macros.AIX VPFILE=Filepath SRCFILE=Srclist EXEC=pop +# % gmake MACFILE=Macros.C90 VPATH="dir1 dir2" SRCS="file1.c file2.F90" +# % gmake MACFILE=Macros.SUN SRCS="test.F" +#------------------------------------------------------------------------------- + +#------------------------------------------------------------------------------- +# parse cmd-line and establish values for EXEC, VPATH, SRCS, OBJS, etc +#------------------------------------------------------------------------------- + +EXEC := a.out +MACFILE := NONE +MODEL := NONE +VPFILE := NONE +VPATH := . +SRCFILE := NONE +SRCS := NONE +DEPGEN := ./makdep # an externally provided dependency generator + +ifneq ($(VPATH),.) + # this variable was specified on cmd line or in an env var +else + ifneq ($(VPFILE),NONE) + # explicit list of VPATH dirs is provided + VPATH := $(wildcard . $(shell cat $(VPFILE) ) ) + endif +endif + +ifneq ($(SRCS),NONE) + # this variable was specified on cmd line or in an env var +else + ifneq ($(SRCFILE),NONE) + # explicit list of src files is provided + SRCS := $(shell cat $(SRCFILE) ) + else + # list of src files is all .F90 .F .c files in VPATH + SRCS := $(wildcard $(addsuffix /*.F90 , $(VPATH)) \ + $(addsuffix /*.[cF], $(VPATH)) ) + endif +endif + +OBJS := $(addsuffix .o, $(sort $(basename $(notdir $(SRCS))))) +DEPS := $(addsuffix .d, $(sort $(basename $(notdir $(SRCS))))) +INCS := $(patsubst %,-I%, $(VPATH) ) +RM := rm + +.SUFFIXES: +.SUFFIXES: .F90 .F .c .o + +all: $(EXEC) + +#------------------------------------------------------------------------------- +# include the file that provides macro definitions required by build rules +# note: the MACFILE may not be needed for certain goals +#------------------------------------------------------------------------------- + +ifneq ($(MAKECMDGOALS), db_files) + -include $(MACFILE) +endif + +#------------------------------------------------------------------------------- +# echo file names, paths, compile flags, etc. used during build +#------------------------------------------------------------------------------- + +db_files: + @echo " " + @echo "* EXEC := $(EXEC)" + @echo "* MACFILE := $(MACFILE)" + @echo "* VPFILE := $(VPFILE)" + @echo "* VPATH := $(VPATH)" + @echo "* SRCFILE := $(SRCFILE)" + @echo "* INCS := $(INCS)" + @echo "* SRCS := $(SRCS)" + @echo "* OBJS := $(OBJS)" + @echo "* DEPS := $(DEPS)" +db_flags: + @echo " " + @echo "* cpp := $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR)" + @echo "* cc := cc -c $(CFLAGS) $(INCS) $(INCLDIR)" + @echo "* .F.o := $(FC) -c $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR)" + @echo "* .F90.o := $(FC) -c $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR)" + +#------------------------------------------------------------------------------- +# build rules: MACFILE, cmd-line, or env vars must provide the needed macros +#------------------------------------------------------------------------------- + +$(EXEC): $(OBJS) + $(LD) -o $(EXEC) $(LDFLAGS) $(OBJS) $(ULIBS) $(SLIBS) + +.c.o: + cc $(CFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< + +.F.o: + $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< > $*.f + $(FC) -c $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR) $*.f + +.F90.o: + $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< > $*.f90 + $(FC) -c $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR) $*.f90 + +mostlyclean: + $(RM) -f *.f *.f90 + +clean: + $(RM) -f *.f *.f90 *.d *.mod *.o +# $(RM) -f *.f *.f90 *.d *.$(MOD_SUFFIX) $(OBJS) + +realclean: + $(RM) -f *.f *.f90 *.d *.$(MOD_SUFFIX) $(OBJS) $(EXEC) + +#------------------------------------------------------------------------------- +# Build & include dependency files +#------------------------------------------------------------------------------- +# ASSUMPTIONS: +# o an externally provided dependency generator, $(DEPGEN), is available, +# its cmd line syntax is compatible with the build rules below. Eg, for +# each .o file, there is a corresponding .d (dependency) file, and both +# will be dependent on the same src file, eg. foo.o foo.d : foo.F90 +# Also, the dependancy genorator's capabilities, limitations, and assumptions +# are understood & accepted. +#------------------------------------------------------------------------------- + +%.d : %.c + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< | head -3 > $@ +%.d : %.F + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ +%.d : %.F90 + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ +%.d : %.H + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ + +# the if-tests prevent DEPS files from being created when they're not needed +ifneq ($(MAKECMDGOALS), db_files) +ifneq ($(MAKECMDGOALS), db_flags) +ifneq ($(MAKECMDGOALS), mostlyclean) +ifneq ($(MAKECMDGOALS), clean) +ifneq ($(MAKECMDGOALS), realclean) + -include $(DEPS) +endif +endif +endif +endif +endif diff --git a/bld/Makefile.Darwin b/bld/Makefile.Darwin new file mode 100644 index 00000000..3649dde9 --- /dev/null +++ b/bld/Makefile.Darwin @@ -0,0 +1,169 @@ +#------------------------------------------------------------------------------- +# CVS $Id: Makefile.std,v 1.1 2004/02/09 18:13:52 lipscomb Exp $ +# CVS $Source: /home/climate/CVS-COSIM/cice/bld/Makefile.std,v $ +# CVS $Name: $ +#------------------------------------------------------------------------------- +# Common Makefile: a framework for building all CCSM components and more +# +# Command-line variables +# MACFILE= ~ the macros definition file to use/include +# EXEC= ~ name given to executable, default is a.out +# VPATH= ~ VPATH , default is . (cwd only) +# SRCS= ~ list of src files, default is all .c .F .F90 files in VPATH +# VPFILE= ~ file with list of dirs, used to create VPATH +# SRCFILE= ~ file with list of src files, used to create SRCS +# DEPGEN= ~ dependency generator utility, default is makdep +# +# ~ any macro definitions found in this file or the included +# MACFILE will be over-riden by cmd-line macro definitions +# MODEL= ~ a standard macro definition, often found in the included +# MACFILE, used to trigger special compilation flags +# +# Usage examples: +# % gmake MACFILE=Macros.AIX VPFILE=Filepath MODEL=ccm3 EXEC=atm +# % gmake MACFILE=Macros.AIX VPFILE=Filepath SRCFILE=Srclist EXEC=pop +# % gmake MACFILE=Macros.C90 VPATH="dir1 dir2" SRCS="file1.c file2.F90" +# % gmake MACFILE=Macros.SUN SRCS="test.F" +#------------------------------------------------------------------------------- + +#------------------------------------------------------------------------------- +# parse cmd-line and establish values for EXEC, VPATH, SRCS, OBJS, etc +#------------------------------------------------------------------------------- + +EXEC := a.out +MACFILE := NONE +MODEL := NONE +VPFILE := NONE +VPATH := . +SRCFILE := NONE +SRCS := NONE +DEPGEN := ./makdep # an externally provided dependency generator + +ifneq ($(VPATH),.) + # this variable was specified on cmd line or in an env var +else + ifneq ($(VPFILE),NONE) + # explicit list of VPATH dirs is provided + VPATH := $(wildcard . $(shell cat $(VPFILE) ) ) + endif +endif + +ifneq ($(SRCS),NONE) + # this variable was specified on cmd line or in an env var +else + ifneq ($(SRCFILE),NONE) + # explicit list of src files is provided + SRCS := $(shell cat $(SRCFILE) ) + else + # list of src files is all .F90 .F .c files in VPATH + SRCS := $(wildcard $(addsuffix /*.F90 , $(VPATH)) \ + $(addsuffix /*.[cF], $(VPATH)) ) + endif +endif + +OBJS := $(addsuffix .o, $(sort $(basename $(notdir $(SRCS))))) +DEPS := $(addsuffix .d, $(sort $(basename $(notdir $(SRCS))))) +INCS := $(patsubst %,-I%, $(VPATH) ) +RM := rm + +.SUFFIXES: +.SUFFIXES: .F90 .F .c .o + +all: $(EXEC) + +#------------------------------------------------------------------------------- +# include the file that provides macro definitions required by build rules +# note: the MACFILE may not be needed for certain goals +#------------------------------------------------------------------------------- + +ifneq ($(MAKECMDGOALS), db_files) + -include $(MACFILE) +endif + +#------------------------------------------------------------------------------- +# echo file names, paths, compile flags, etc. used during build +#------------------------------------------------------------------------------- + +db_files: + @echo " " + @echo "* EXEC := $(EXEC)" + @echo "* MACFILE := $(MACFILE)" + @echo "* VPFILE := $(VPFILE)" + @echo "* VPATH := $(VPATH)" + @echo "* SRCFILE := $(SRCFILE)" + @echo "* INCS := $(INCS)" + @echo "* SRCS := $(SRCS)" + @echo "* OBJS := $(OBJS)" + @echo "* DEPS := $(DEPS)" +db_flags: + @echo " " + @echo "* cpp := $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR)" + @echo "* cc := cc -c $(CFLAGS) $(INCS) $(INCLDIR)" + @echo "* .F.o := $(FC) -c $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR)" + @echo "* .F90.o := $(FC) -c $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR)" + +#------------------------------------------------------------------------------- +# build rules: MACFILE, cmd-line, or env vars must provide the needed macros +#------------------------------------------------------------------------------- + +$(EXEC): $(OBJS) + $(LD) -o $(EXEC) $(LDFLAGS) $(OBJS) $(ULIBS) $(SLIBS) + +.c.o: + cc $(CFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< + +.F.o: + $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< > $*.f + $(FC) -c $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR) $*.f + +.F90.o: + $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< > $*.f95 + $(FC) -c $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR) $*.f95 + +mostlyclean: + $(RM) -f *.f *.f95 + +clean: + $(RM) -f *.f *.f95 *.d *.mod *.o +# $(RM) -f *.f *.f95 *.d *.$(MOD_SUFFIX) $(OBJS) + +realclean: + $(RM) -f *.f *.f95 *.d *.$(MOD_SUFFIX) $(OBJS) $(EXEC) + +#------------------------------------------------------------------------------- +# Build & include dependency files +#------------------------------------------------------------------------------- +# ASSUMPTIONS: +# o an externally provided dependency generator, $(DEPGEN), is available, +# its cmd line syntax is compatible with the build rules below. Eg, for +# each .o file, there is a corresponding .d (dependency) file, and both +# will be dependent on the same src file, eg. foo.o foo.d : foo.F90 +# Also, the dependancy genorator's capabilities, limitations, and assumptions +# are understood & accepted. +#------------------------------------------------------------------------------- + +%.d : %.c + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< | head -3 > $@ +%.d : %.F + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ +%.d : %.F90 + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ +%.d : %.H + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ + +# the if-tests prevent DEPS files from being created when they're not needed +ifneq ($(MAKECMDGOALS), db_files) +ifneq ($(MAKECMDGOALS), db_flags) +ifneq ($(MAKECMDGOALS), mostlyclean) +ifneq ($(MAKECMDGOALS), clean) +ifneq ($(MAKECMDGOALS), realclean) + -include $(DEPS) +endif +endif +endif +endif +endif diff --git a/bld/Makefile.UNICOS b/bld/Makefile.UNICOS new file mode 100755 index 00000000..0f6f4c8a --- /dev/null +++ b/bld/Makefile.UNICOS @@ -0,0 +1,167 @@ +#------------------------------------------------------------------------------- +# CVS $Id: Makefile.UNICOS,v 1.1 2004/02/09 18:13:52 lipscomb Exp $ +# CVS $Source: /net/bob1/CVS-COSIM/cice/bld/Makefile.UNICOS,v $ +# CVS $Name: $ +#------------------------------------------------------------------------------- +# Common Makefile: a framework for building all CCSM components and more +# +# Command-line variables +# MACFILE= ~ the macros definition file to use/include +# EXEC= ~ name given to executable, default is a.out +# VPATH= ~ VPATH , default is . (cwd only) +# SRCS= ~ list of src files, default is all .c .F .F90 files in VPATH +# VPFILE= ~ file with list of dirs, used to create VPATH +# SRCFILE= ~ file with list of src files, used to create SRCS +# DEPGEN= ~ dependency generator utility, default is makdep +# +# ~ any macro definitions found in this file or the included +# MACFILE will be over-riden by cmd-line macro definitions +# MODEL= ~ a standard macro definition, often found in the included +# MACFILE, used to trigger special compilation flags +# +# Usage examples: +# % gmake MACFILE=Macros.AIX VPFILE=Filepath MODEL=ccm3 EXEC=atm +# % gmake MACFILE=Macros.AIX VPFILE=Filepath SRCFILE=Srclist EXEC=pop +# % gmake MACFILE=Macros.C90 VPATH="dir1 dir2" SRCS="file1.c file2.F90" +# % gmake MACFILE=Macros.SUN SRCS="test.F" +#------------------------------------------------------------------------------- + +#------------------------------------------------------------------------------- +# parse cmd-line and establish values for EXEC, VPATH, SRCS, OBJS, etc +#------------------------------------------------------------------------------- + +EXEC := a.out +MACFILE := NONE +MODEL := NONE +VPFILE := NONE +VPATH := . +SRCFILE := NONE +SRCS := NONE +DEPGEN := ./makdep # an externally provided dependency generator + +ifneq ($(VPATH),.) + # this variable was specified on cmd line or in an env var +else + ifneq ($(VPFILE),NONE) + # explicit list of VPATH dirs is provided + VPATH := $(wildcard . $(shell cat $(VPFILE) ) ) + endif +endif + +ifneq ($(SRCS),NONE) + # this variable was specified on cmd line or in an env var +else + ifneq ($(SRCFILE),NONE) + # explicit list of src files is provided + SRCS := $(shell cat $(SRCFILE) ) + else + # list of src files is all .F90 .F .c files in VPATH + SRCS := $(wildcard $(addsuffix /*.F90 , $(VPATH)) \ + $(addsuffix /*.[cF], $(VPATH)) ) + endif +endif + +OBJS := $(addsuffix .o, $(sort $(basename $(notdir $(SRCS))))) +DEPS := $(addsuffix .d, $(sort $(basename $(notdir $(SRCS))))) +INCS := $(patsubst %,-I%, $(VPATH) ) +RM := rm + +.SUFFIXES: +.SUFFIXES: .F90 .F .c .o + +all: $(EXEC) + +#------------------------------------------------------------------------------- +# include the file that provides macro definitions required by build rules +# note: the MACFILE may not be needed for certain goals +#------------------------------------------------------------------------------- + +ifneq ($(MAKECMDGOALS), db_files) + -include $(MACFILE) +endif + +#------------------------------------------------------------------------------- +# echo file names, paths, compile flags, etc. used during build +#------------------------------------------------------------------------------- + +db_files: + @echo " " + @echo "* EXEC := $(EXEC)" + @echo "* MACFILE := $(MACFILE)" + @echo "* VPFILE := $(VPFILE)" + @echo "* VPATH := $(VPATH)" + @echo "* SRCFILE := $(SRCFILE)" + @echo "* INCS := $(INCS)" + @echo "* SRCS := $(SRCS)" + @echo "* OBJS := $(OBJS)" + @echo "* DEPS := $(DEPS)" +db_flags: + @echo " " + @echo "* cpp := $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR)" + @echo "* cc := cc -c $(CFLAGS) $(INCS) $(INCLDIR)" + @echo "* .F.o := $(FC) -c $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR)" + @echo "* .F90.o := $(FC) -c $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR)" + +#------------------------------------------------------------------------------- +# build rules: MACFILE, cmd-line, or env vars must provide the needed macros +#------------------------------------------------------------------------------- + +$(EXEC): $(OBJS) + $(LD) -o $(EXEC) $(LDFLAGS) $(OBJS) $(ULIBS) $(SLIBS) + +.c.o: + cc $(CFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< + +.F.o: + $(FC) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) -c $(FFLAGS) $(FIXEDFLAGS) $< + +.F90.o: + $(FC) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) -c $(FFLAGS) $(FREEFLAGS) $< + +mostlyclean: + $(RM) -f *.f *.f90 + +clean: + $(RM) -f *.f *.f90 *.d *.mod *.o +## $(RM) -f *.f *.f90 *.d *.$(MOD_SUFFIX) $(OBJS) + +realclean: + $(RM) -f *.f *.f90 *.d *.$(MOD_SUFFIX) $(OBJS) $(EXEC) + +#------------------------------------------------------------------------------- +# Build & include dependency files +#------------------------------------------------------------------------------- +# ASSUMPTIONS: +# o an externally provided dependency generator, $(DEPGEN), is available, +# its cmd line syntax is compatible with the build rules below. Eg, for +# each .o file, there is a corresponding .d (dependency) file, and both +# will be dependent on the same src file, eg. foo.o foo.d : foo.F90 +# Also, the dependancy genorator's capabilities, limitations, and assumptions +# are understood & accepted. +#------------------------------------------------------------------------------- + +%.d : %.c + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< | head -3 > $@ +%.d : %.F + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ +%.d : %.F90 + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ +%.d : %.H + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ + +# the if-tests prevent DEPS files from being created when they're not needed +ifneq ($(MAKECMDGOALS), db_files) +ifneq ($(MAKECMDGOALS), db_flags) +ifneq ($(MAKECMDGOALS), mostlyclean) +ifneq ($(MAKECMDGOALS), clean) +ifneq ($(MAKECMDGOALS), realclean) + -include $(DEPS) +endif +endif +endif +endif +endif diff --git a/bld/Makefile.std b/bld/Makefile.std new file mode 100644 index 00000000..c2d232ff --- /dev/null +++ b/bld/Makefile.std @@ -0,0 +1,169 @@ +#------------------------------------------------------------------------------- +# CVS $Id: Makefile.std,v 1.1 2004/02/09 18:13:52 lipscomb Exp $ +# CVS $Source: /home/climate/CVS-COSIM/cice/bld/Makefile.std,v $ +# CVS $Name: $ +#------------------------------------------------------------------------------- +# Common Makefile: a framework for building all CCSM components and more +# +# Command-line variables +# MACFILE= ~ the macros definition file to use/include +# EXEC= ~ name given to executable, default is a.out +# VPATH= ~ VPATH , default is . (cwd only) +# SRCS= ~ list of src files, default is all .c .F .F90 files in VPATH +# VPFILE= ~ file with list of dirs, used to create VPATH +# SRCFILE= ~ file with list of src files, used to create SRCS +# DEPGEN= ~ dependency generator utility, default is makdep +# +# ~ any macro definitions found in this file or the included +# MACFILE will be over-riden by cmd-line macro definitions +# MODEL= ~ a standard macro definition, often found in the included +# MACFILE, used to trigger special compilation flags +# +# Usage examples: +# % gmake MACFILE=Macros.AIX VPFILE=Filepath MODEL=ccm3 EXEC=atm +# % gmake MACFILE=Macros.AIX VPFILE=Filepath SRCFILE=Srclist EXEC=pop +# % gmake MACFILE=Macros.C90 VPATH="dir1 dir2" SRCS="file1.c file2.F90" +# % gmake MACFILE=Macros.SUN SRCS="test.F" +#------------------------------------------------------------------------------- + +#------------------------------------------------------------------------------- +# parse cmd-line and establish values for EXEC, VPATH, SRCS, OBJS, etc +#------------------------------------------------------------------------------- + +EXEC := a.out +MACFILE := NONE +MODEL := NONE +VPFILE := NONE +VPATH := . +SRCFILE := NONE +SRCS := NONE +DEPGEN := ./makdep # an externally provided dependency generator + +ifneq ($(VPATH),.) + # this variable was specified on cmd line or in an env var +else + ifneq ($(VPFILE),NONE) + # explicit list of VPATH dirs is provided + VPATH := $(wildcard . $(shell cat $(VPFILE) ) ) + endif +endif + +ifneq ($(SRCS),NONE) + # this variable was specified on cmd line or in an env var +else + ifneq ($(SRCFILE),NONE) + # explicit list of src files is provided + SRCS := $(shell cat $(SRCFILE) ) + else + # list of src files is all .F90 .F .c files in VPATH + SRCS := $(wildcard $(addsuffix /*.F90 , $(VPATH)) \ + $(addsuffix /*.[cF], $(VPATH)) ) + endif +endif + +OBJS := $(addsuffix .o, $(sort $(basename $(notdir $(SRCS))))) +DEPS := $(addsuffix .d, $(sort $(basename $(notdir $(SRCS))))) +INCS := $(patsubst %,-I%, $(VPATH) ) +RM := rm + +.SUFFIXES: +.SUFFIXES: .F90 .F .c .o + +all: $(EXEC) + +#------------------------------------------------------------------------------- +# include the file that provides macro definitions required by build rules +# note: the MACFILE may not be needed for certain goals +#------------------------------------------------------------------------------- + +ifneq ($(MAKECMDGOALS), db_files) + -include $(MACFILE) +endif + +#------------------------------------------------------------------------------- +# echo file names, paths, compile flags, etc. used during build +#------------------------------------------------------------------------------- + +db_files: + @echo " " + @echo "* EXEC := $(EXEC)" + @echo "* MACFILE := $(MACFILE)" + @echo "* VPFILE := $(VPFILE)" + @echo "* VPATH := $(VPATH)" + @echo "* SRCFILE := $(SRCFILE)" + @echo "* INCS := $(INCS)" + @echo "* SRCS := $(SRCS)" + @echo "* OBJS := $(OBJS)" + @echo "* DEPS := $(DEPS)" +db_flags: + @echo " " + @echo "* cpp := $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR)" + @echo "* cc := cc -c $(CFLAGS) $(INCS) $(INCLDIR)" + @echo "* .F.o := $(FC) -c $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR)" + @echo "* .F90.o := $(FC) -c $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR)" + +#------------------------------------------------------------------------------- +# build rules: MACFILE, cmd-line, or env vars must provide the needed macros +#------------------------------------------------------------------------------- + +$(EXEC): $(OBJS) + $(LD) -o $(EXEC) $(LDFLAGS) $(OBJS) $(ULIBS) $(SLIBS) + +.c.o: + cc $(CFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< + +.F.o: + $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< > $*.f + $(FC) -c $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR) $*.f + +.F90.o: + $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< > $*.f90 + $(FC) -c $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR) $*.f90 + +mostlyclean: + $(RM) -f *.f *.f90 + +clean: + $(RM) -f *.f *.f90 *.d *.mod *.o +# $(RM) -f *.f *.f90 *.d *.$(MOD_SUFFIX) $(OBJS) + +realclean: + $(RM) -f *.f *.f90 *.d *.$(MOD_SUFFIX) $(OBJS) $(EXEC) + +#------------------------------------------------------------------------------- +# Build & include dependency files +#------------------------------------------------------------------------------- +# ASSUMPTIONS: +# o an externally provided dependency generator, $(DEPGEN), is available, +# its cmd line syntax is compatible with the build rules below. Eg, for +# each .o file, there is a corresponding .d (dependency) file, and both +# will be dependent on the same src file, eg. foo.o foo.d : foo.F90 +# Also, the dependancy genorator's capabilities, limitations, and assumptions +# are understood & accepted. +#------------------------------------------------------------------------------- + +%.d : %.c + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< | head -3 > $@ +%.d : %.F + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ +%.d : %.F90 + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ +%.d : %.H + @ echo "Building dependency for $@" + @ $(DEPGEN) -f $(INCS) $< > $@ + +# the if-tests prevent DEPS files from being created when they're not needed +ifneq ($(MAKECMDGOALS), db_files) +ifneq ($(MAKECMDGOALS), db_flags) +ifneq ($(MAKECMDGOALS), mostlyclean) +ifneq ($(MAKECMDGOALS), clean) +ifneq ($(MAKECMDGOALS), realclean) + -include $(DEPS) +endif +endif +endif +endif +endif diff --git a/bld/makdep.c b/bld/makdep.c new file mode 100755 index 00000000..ca9a9e78 --- /dev/null +++ b/bld/makdep.c @@ -0,0 +1,446 @@ +/* +** Print to stdout a dependency list for input file specified on the command +** line. A dependency is anything that is referenced by a "#include"' or +** f90 "use" statement. In addition to these dependencies, write a dependency +** rule of "file.d" for each "file.F" or "file.c". This is to accomodate the +** default "make" procedure for CCM. +** +** The name of the module being "use"d is assumed to be case sensitive even +** though the Fortran language is not. In addition, Fortran source files are +** assumed to end in .F. For example, the statement "use Xxx" will translate +** into a dependency of Xxx.o, and the file searched for will be Xxx.F. +** +** Only files which exist in at least one directory named in the current +** directory or one or more "-I" command line arguments will be considered. +** +** An ANSI C compiler is required to build this code. +*/ + +#include /* printf, puts */ +#include /* malloc, getopt */ +#include /* strcpy */ +#include /* access */ +#include /* isspace, isalnum, tolower */ + +#define MAXLEN 256 +#define TRUE 1 +#define FALSE 0 + +/* +** Linked list struct used for directories to search, and filenames already +** found. +*/ + +struct node { + char *name; + struct node *next; +}; + +/* +** lists of dependencies already found: prevents duplicates. +*/ + +static struct node *list = NULL; /* For #include */ +static struct node *uselist = NULL; /* For use */ +static struct node *suffix_list; /* List of Fortran suffixes to look for */ + +/* +** Function prototypes +*/ + +static void check (char *, struct node *, char *, int); +static int already_found (char *, struct node *); + +main (int argc, char **argv) +{ + int lastdot; /* points to the last . in fname */ + int c; /* return from getopt */ + int recursive = FALSE; /* flag asks for recursive check: + ** i.e. check the thing being #included for #includes */ + FILE *fpFname; + + char line[MAXLEN]; /* line read from input file */ + char doto[MAXLEN]; /* name of .o file (from input file) */ + char dotd[MAXLEN]; /* name of .o file (from input file) */ + char fullpath[MAXLEN]; /* full pathname to potential dependency for .F files */ + char depnam[MAXLEN]; /* dependency name (from #include or use) */ + char srcfile[MAXLEN]; /* source file .F name from "use" */ + char *lptr; /* points into line */ + char *fname; /* input file name from command line */ + char *fptr; /* pointer to copy into depnam */ + char *relpath; /* input file name or path to it */ + + struct node *dirlist; /* list of directories to search */ + struct node *dirptr; /* loop through dirlist */ + struct node *newnode; /* malloc'd node */ + struct node *last; /* last entry in #include list of found dependencies */ + struct node *uselast; /* last entry in "use" list of found dependencies */ + struct node *sptr; /* pointer into suffix_list */ + + /* + ** Always put "." first in Filepath since gnumake will put "." first + ** regardless of whether it is specified in VPATH + */ + + dirlist = dirptr = (struct node *) malloc (sizeof (struct node)); + dirptr->name = (char *) malloc (2); + strcpy (dirptr->name, "."); + dirptr->next = NULL; + + /* + ** Always look for .F and .F90 files. List can be augmented via "-s" cmd line arg(s). + */ + + suffix_list = (struct node *) malloc (sizeof (struct node)); + suffix_list-> name = (char *) malloc (3); + strcpy (suffix_list->name, ".F"); + + suffix_list->next = (struct node *) malloc (sizeof (struct node)); + sptr = suffix_list->next; + sptr->name = (char *) malloc (5); + strcpy (sptr->name, ".F90"); + sptr->next = NULL; + + while ((c = getopt (argc, argv, "I:rs:f")) != -1) { + + switch(c) { + + case 'f': /* this arg is for backward compatibility */ + break; + case 'I': + dirptr->next = (struct node *) malloc (sizeof (struct node)); + dirptr = dirptr->next; + dirptr->name = (char *) malloc (strlen (optarg) + 1); + strcpy (dirptr->name, optarg); + dirptr->next = NULL; + break; + case 's': + sptr->next = (struct node *) malloc (sizeof (struct node)); + sptr = sptr->next; + sptr->name = (char *) malloc (strlen (optarg) + 2); + strcpy (sptr->name, "."); + strcat (sptr->name, optarg); + sptr->next = NULL; + break; + case 'r': + recursive = TRUE; + break; + case '?': /* Unknown option */ + fprintf (stderr, "%s: Unknown option encountered\n", argv[0]); + } + } + + if (argc == optind+1) { + relpath = argv[optind]; + + } else { + + fprintf (stderr, "Usage: %s [-Idir] [-r] [-s suffix] file\n", argv[0]); + exit (-1); + } + + /* + ** Retain only the filename of the input file for which dependencies are + ** being generated. + */ + + fname = relpath + strlen (relpath) - 1; + while (*fname != '/' && fname > relpath) fname--; + if (*fname == '/') fname++; + + /* + ** Define the .o file by changing tail to ".o" + */ + + strcpy (doto, fname); + for (lastdot = strlen (fname) - 1; doto[lastdot] != '.' && lastdot > 0; + lastdot--); + + if (lastdot == 0) { + fprintf (stderr, "Input file %s needs a head\n", fname); + exit (1); + } + + doto[lastdot] = '\0'; + strcpy (dotd, doto); + strcat (doto, ".o "); + strcat (dotd, ".d "); + + /* + ** write the blah.o blah.d: blah.F (or .c or whatever) dependency to stdout + */ + + fputs (doto , stdout); + fputs (dotd , stdout); + fputs (": " , stdout); + fputs (fname , stdout); + fputs ("\n" , stdout); + + if ((fpFname = fopen (relpath, "r")) == NULL) { + fprintf (stderr, "Can't open file %s\n", relpath); + exit (1); + } + + while (fgets (line, MAXLEN, fpFname) != NULL) { + + /* + ** Check for dependencies of the cpp "include" variety. Allow for lines + ** of the form "# include" + */ + + if (line[0] == '#') { + for (lptr = line+1; isspace (*lptr); lptr++); + if (strncmp (lptr, "include ", 8) == 0) { + for (lptr += 8; *lptr != '<' && *lptr != '"' && *lptr != '\0'; lptr++); + + if (*lptr == '\0') + break; /* Bad input line: ignore */ + + /* + ** Fill in depnam with the dependency (i.e. the thing being + ** #included. Syntax check is not perfect. + */ + + for (fptr = depnam; *++lptr != '>' && *lptr != '"' && *lptr != '\0'; + fptr++) + *fptr = *lptr; + + if (*lptr == '\0') + break; /* Bad input line: ignore */ + + *fptr = '\0'; + + if ( ! already_found (depnam, list)) { /* Skip any duplicates */ + + /* + ** Include only dependencies which are specified by -Ixxx on the + ** command line. These directories are defined by the linked list + ** pointed to by dirlist. + */ + + for (dirptr = dirlist; dirptr != NULL; dirptr = dirptr->next) { + strcpy (fullpath, dirptr->name); + strcat (fullpath, "/"); + strcat (fullpath, depnam); + + /* + ** If the file exists and is readable, add an entry to the "found" + ** list, then write a dependency rule to stdout. + */ + + if (access (fullpath, R_OK) == 0) { + newnode = malloc (sizeof (struct node)); + newnode->name = malloc (strlen (depnam) + 1); + strcpy (newnode->name, depnam); + newnode->next = NULL; + + if (list == NULL) + list = newnode; + else + last->next = newnode; + + last = newnode; + fputs (doto , stdout); + fputs (": " , stdout); + fputs (depnam, stdout); + fputs ("\n", stdout); + + /* + ** Check for nested #include's if flag was set + */ + + if (recursive) check (fullpath, dirlist, doto, 0); + + break; /* Dependency found: process next line */ + } + } + } + } + + } else { + + /* + ** Check for dependencies of the f90 "use" variety. To strictly adhere + ** to fortran std, should allow for spaces between chars of "use". + */ + + for (lptr = line; isspace (*lptr); lptr++); + if (tolower ((int) lptr[0]) == 'u' && + tolower ((int) lptr[1]) == 's' && + tolower ((int) lptr[2]) == 'e') { + + for (lptr += 3; isspace (*lptr); lptr++); + + /* + ** Fill in depnam with the dependency (i.e. the thing being "use"d. + ** Strictly speaking, should disallow numeric starting character. + */ + + for (fptr = depnam; isalnum (*lptr) || *lptr == '_'; (fptr++, lptr++)) + *fptr = *lptr; + *fptr = '\0'; + + /* + ** srcfile is the source file name from which the dependency is + ** generated. Note case sensitivity of depnam. + */ + + if ( ! already_found (depnam, uselist)) { /* Skip any duplicates */ + + /* + ** Loop through suffix list + */ + + for (sptr = suffix_list; sptr != NULL; sptr = sptr->next) { + + strcpy (srcfile, depnam); + strcat (srcfile, sptr->name); + + /* + ** Include only dependencies which are specified by -Ixxx on the + ** command line. These directories are defined by the linked list + ** pointed to by dirlist. + */ + + for (dirptr = dirlist; dirptr != NULL; dirptr = dirptr->next) { + strcpy (fullpath, dirptr->name); + strcat (fullpath, "/"); + strcat (fullpath, srcfile); + + /* + ** If the file exists and is readable, add an entry to the "found" + ** list, then write a dependency rule to stdout. + */ + + if (access (fullpath, R_OK) == 0) { + newnode = malloc (sizeof (struct node)); + newnode->name = malloc (strlen (srcfile) + 1); + strcpy (newnode->name, depnam); + newnode->next = NULL; + + if (uselist == NULL) + uselist = newnode; + else + uselast->next = newnode; + + uselast = newnode; + + fputs (doto , stdout); + fputs (": " , stdout); + fputs (depnam, stdout); + fputs (".o" , stdout); + fputs ("\n" , stdout); + + goto read_next_line; /* Dependency found: process next line */ + + } /* if (access (fullpath... */ + + } /* loop through linked list of directories from Filepath */ + } /* loop through linked list of suffixes */ + } /* if ( ! already_found (srcfile... */ + } /* if (lptr points to "use " */ + } /* else branch of if (line[0] == '#') */ + read_next_line: + continue; + } /* Looping over lines in the file */ + + fclose (fpFname); + return (0); +} + +void check (char *file, struct node *dirlist, char *doto, int recurse_level) +{ + FILE *fpFile; + + char line[MAXLEN], fullpath[MAXLEN]; + char depnam[MAXLEN]; + char *lptr, *fptr; + + struct node *dirptr; + + /* + ** Don't bother checking beyond 3 levels of recursion + */ + + if (recurse_level > 3) { + fprintf (stderr, "More than 3 levels of recursion detected: bailing out\n"); + return; + } + + if ((fpFile = fopen (file, "r")) == NULL) { + fprintf (stderr, "Can't open file %s\n", file); + exit (1); + } + + while (fgets (line, MAXLEN, fpFile) != NULL) { + + /* + ** Check for dependencies of the cpp "include" variety. Allow for lines + ** of the form "# include" + */ + + if (line[0] == '#') { + for (lptr = line+1; isspace (*lptr); lptr++); + if (strncmp (lptr, "include ", 8) == 0) { + for (lptr += 8; *lptr != '<' && *lptr != '"' && *lptr != '\0'; lptr++); + + if (*lptr == '\0') + break; /* Bad input line: ignore */ + + /* + ** Fill in depnam with the dependency (i.e. the thing being + ** #included. Syntax check is not perfect. + */ + + for (fptr = depnam; *++lptr != '>' && *lptr != '"' && *lptr != '\0'; fptr++) + *fptr = *lptr; + + if (*lptr == '\0') + break; /* Bad input line: ignore */ + + *fptr = '\0'; + + /* + ** Don't include dependencies which are not in the Filepath + */ + + for (dirptr = dirlist; dirptr != NULL; dirptr = dirptr->next) { + strcpy (fullpath, dirptr->name); + strcat (fullpath, "/"); + strcat (fullpath, depnam); + + /* + ** If the file exists and is readable, add an entry to the "found" + ** list, then write a dependency rule to stdout. + */ + + if (access (fullpath, R_OK) == 0) { + fputs (doto , stdout); + fputs (": " , stdout); + fputs (depnam, stdout); + fputs ("\n", stdout); + + /* + ** Check for nested #include's + */ + + check (fullpath, dirlist, doto, recurse_level+1); + break; + } + } + } + } + } + fclose (fpFile); + return; +} + +int already_found (char *name, struct node *list) +{ + struct node *ptr; + + for (ptr = list; ptr != NULL; ptr = ptr->next) { + if (strcmp (ptr->name, name) == 0) return (1); + } + return (0); +} diff --git a/compile/comp_access-cm1440-185_ac330 b/compile/comp_access-cm1440-185_ac330 new file mode 100755 index 00000000..a32f5c50 --- /dev/null +++ b/compile/comp_access-cm1440-185_ac330 @@ -0,0 +1,238 @@ +#! /bin/csh -f + +set echo on +#setenv DEBUG yes # set to yes for debug + +if ( $1 == '') then + echo '*** Please issue the command like ***' + echo ' > ./comp_auscom_cice.RJ.nP #nproc ' + echo 'here #proc is the number of cpu to be used for CICE5 (e.g. 1, 2, 4, 6...)' + set nproc = 192 + echo *** $nproc processors will be used to run CICE5... *** + sleep 3 + #exit +else + set nproc = $1 + echo *** $nproc processors will be used to run CICE5... *** +endif + +### Change these to your own site and user directory! +### You will need to create a Makefile Macro in bld +### Platform and its architecture ($HOST = xe) +setenv ARCH raijin-185 + +# Set AusCOM home: +setenv AusCOMHOME $cwd:h:h:h + +#---------------------------------------------------------------------- + +### Specialty code +setenv CAM_ICE no # set to yes for CAM runs (single column) +setenv SHRDIR csm_share # location of CCSM shared code +setenv IO_TYPE netcdf # set to none if netcdf library is unavailable + # set to pio for parallel netcdf +setenv DITTO no # reproducible diagnostics +setenv THRD no # set to yes for OpenMP threading +if ( $THRD == 'yes') setenv OMP_NUM_THREADS 2 # positive integer + +setenv ACCESS yes # set to yes for ACCESS +setenv AusCOM yes # set to yes for AusCOM +setenv OASIS3_MCT yes # oasis3-mct version +setenv CHAN MPI1 # MPI1 or MPI2 (always MPI1!) +setenv NICELYR 4 # number of vertical layers in the ice +setenv NSNWLYR 1 # number of vertical layers in the snow +setenv NICECAT 5 # number of ice thickness categories + +### Location of ACCESS system +setenv SYSTEMDIR $AusCOMHOME +echo SYSTEMDIR: $SYSTEMDIR + +### Location of this model (source) +setenv SRCDIR $cwd:h #$SYSTEMDIR/submodels/cice5.0.4 +echo SRCDIR: $SRCDIR + +source ${SRCDIR}/compile/environs.$ARCH # environment variables and loadable modules + +### Location and names of coupling libraries and inclusions +### Location and names of coupling libraries +#setenv CPLLIBDIR ~access/access-cm2/prebuild/oasis3-mct/Linux-182/lib +setenv CPLLIBDIR /projects/access/apps/oasis3-mct/ompi185/lib +setenv CPLLIBS '-L$(CPLLIBDIR) -lpsmile.${CHAN} -lmct -lmpeu -lscrip' +#echo CPLLIBS: ${CPLLIBS} + +### Location of coupling inclusions +#setenv CPLINCDIR ~access/access-cm2/prebuild/oasis3-mct/Linux-182/build/lib +setenv CPLINCDIR /projects/access/apps/oasis3-mct/ompi185/include +setenv CPL_INCS '-I$(CPLINCDIR)/psmile.$(CHAN) -I$(CPLINCDIR)/pio -I$(CPLINCDIR)/mct' +#echo CPL_INCS: $CPL_INCS + +### For multi-Layer ice (standard) configuration +setenv N_ILYR 1 # 4 for standard multi-layer ice. for ktherm=0, zero-layer thermodynamics + +### Location and name of the generated exectuable +setenv DATESTR `date +%Y%m%d` +setenv BINDIR $SYSTEMDIR/bin +setenv EXE cice_GC3GA7-cm1440-185.${DATESTR}_${nproc}p_${NICELYR}lyr + +### Where this model is compiled +setenv OBJDIR $SRCDIR/compile/build_${CHAN}_{$nproc}p-mct-185 +if !(-d $OBJDIR) mkdir -p $OBJDIR +#/bin/rm $OBJDIR/* +# + +### Grid resolution +#setenv GRID gx3 ; setenv RES 100x116 +#setenv GRID gx1 ; setenv RES 320x384 +#setenv GRID tx1 ; setenv RES 360x240 +#setenv GRID tp1 ; setenv RES 360x300 +setenv GRID tp1 ; setenv RES 1440x1080 + +set NXGLOB = `echo $RES | sed s/x.\*//` +set NYGLOB = `echo $RES | sed s/.\*x//` +echo NXGLOB: $NXGLOB +echo NYGLOB: $NYGLOB + +# Recommendations: +# NTASK equals nprocs in ice_in +# use processor_shape = slenderX1 or slenderX2 in ice_in +# one per processor with distribution_type='cartesian' or +# squarish blocks with distribution_type='rake' +# If BLCKX (BLCKY) does not divide NXGLOB (NYGLOB) evenly, padding +# will be used on the right (top) of the grid. +setenv NTASK $nproc +#setenv BLCKX 45 # x-dimension of blocks ( not including ) +#setenv BLCKY 38 # y-dimension of blocks ( ghost cells ) +setenv BLCKX `expr $NXGLOB / $nproc` +setenv BLCKY `expr $NYGLOB` +echo BLCKX: $BLCKX +echo BLCKY: $BLCKY + +echo +#24 : 12x2 +setenv BLCKX 120 +setenv BLCKY 540 +#144 : 16x9 +setenv BLCKX 90 +setenv BLCKY 120 +#16 : 8x2 +setenv BLCKX 180 +setenv BLCKY 540 +#16 : 4x4 +setenv BLCKX 360 +setenv BLCKY 270 +##216 : 24x9 +#setenv BLCKX 60 +#setenv BLCKY 120 +#192 : 16x12 -->square-ice +setenv BLCKX 90 +setenv BLCKY 90 +#192 : 96x2 +setenv BLCKX 15 +setenv BLCKY 540 + +# may need to increase MXBLCKS with rake distribution or padding +@ a = $NXGLOB * $NYGLOB ; @ b = $BLCKX * $BLCKY * $NTASK +@ m = $a / $b ; setenv MXBLCKS $m ; if ($MXBLCKS == 0) setenv MXBLCKS 1 +echo Autimatically generated: MXBLCKS = $MXBLCKS +##setenv MXBLCKS 8 # if necessary (code will print proper value) +#20110830: increase it to 12 as required by code: +# (but no clue why it never happened before!) +#setenv MXBLCKS 12 # if necessary (code will print proper value) + +########################################### +# ars599: 24032014 +# copy from /short/p66/ars599/CICE.v5.0/accice.v504_csiro +# solo_ice_comp +########################################### +### Tracers # match ice_in tracer_nml to conserve memory +setenv TRAGE 1 # set to 1 for ice age tracer +setenv TRFY 0 # set to 1 for first-year ice area tracer +setenv TRLVL 0 # set to 1 for level and deformed ice tracers +setenv TRPND 1 # set to 1 for melt pond tracers +setenv NTRAERO 0 # number of aerosol tracers + # (up to max_aero in ice_domain_size.F90) + # CESM uses 3 aerosol tracers +setenv TRBRI 0 # set to 1 for brine height tracer +setenv NBGCLYR 0 # number of zbgc layers +setenv TRBGCS 0 # number of skeletal layer bgc tracers + # TRBGCS=0 or 2<=TRBGCS<=9) + +### File unit numbers +setenv NUMIN 11 # minimum file unit number +setenv NUMAX 99 # maximum file unit number + +if ($IO_TYPE == 'netcdf') then + setenv IODIR io_netcdf +else if ($IO_TYPE == 'pio') then + setenv IODIR io_pio +else + setenv IODIR io_binary +endif + +########################################### + +setenv CBLD $SRCDIR/bld + +if ( $ARCH == 'UNICOS/mp') setenv ARCH UNICOS +if ( $ARCH == 'UNICOS') then + cp -f $CBLD/Makefile.$ARCH $CBLD/Makefile +else + cp -f $CBLD/Makefile.std $CBLD/Makefile +endif + +if ($NTASK == 1) then + setenv COMMDIR serial +else + setenv COMMDIR mpi +endif +echo COMMDIR: $COMMDIR + +if ($ACCESS == 'yes') then + setenv DRVDIR access +else + setenv DRVDIR cice +endif +echo DRVDIR: $DRVDIR + +cd $OBJDIR + +### List of source code directories (in order of importance). +cat >! Filepath << EOF +$SRCDIR/drivers/$DRVDIR +$SRCDIR/source +$SRCDIR/$COMMDIR +$SRCDIR/$IODIR +$SRCDIR/$SHRDIR +EOF + +if ( $ARCH == 'UNICOS.ORNL.phoenix' ) then + ### use -h command for phoenix + cc -o makdep -h command $CBLD/makdep.c || exit 2 +else if ( $ARCH == 'Linux.ORNL.jaguar' ) then + gcc -g -o makdep $CBLD/makdep.c || exit 2 +else + cc -o makdep $CBLD/makdep.c || exit 2 +endif + +setenv MACFILE $CBLD/Macros.Linux.${ARCH} + +gmake VPFILE=Filepath EXEC=$BINDIR/$EXE \ + NXGLOB=$NXGLOB NYGLOB=$NYGLOB \ + BLCKX=$BLCKX BLCKY=$BLCKY MXBLCKS=$MXBLCKS \ + -f $CBLD/Makefile MACFILE=$MACFILE || exit 2 + +cd .. +pwd +echo NTASK = $NTASK +echo "global N, block_size" +echo "x $NXGLOB, $BLCKX" +echo "y $NYGLOB, $BLCKY" +echo max_blocks = $MXBLCKS +echo $TRAGE = TRAGE, iage tracer +echo $TRFY = TRFY, first-year ice tracer +echo $TRLVL = TRLVL, level-ice tracers +echo $TRPND = TRPND, melt pond tracers +echo $NTRAERO = NTRAERO, number of aerosol tracers +echo $TRBRI = TRBRI, brine height tracer +echo $NBGCLYR = NBGCLYR, number of bio grid layers +echo $TRBGCS = TRBGCS, number of BGC tracers diff --git a/compile/comp_access-cm1440-185_r47 b/compile/comp_access-cm1440-185_r47 new file mode 100755 index 00000000..f3e09f0d --- /dev/null +++ b/compile/comp_access-cm1440-185_r47 @@ -0,0 +1,238 @@ +#! /bin/csh -f + +set echo on +#setenv DEBUG yes # set to yes for debug + +if ( $1 == '') then + echo '*** Please issue the command like ***' + echo ' > ./comp_auscom_cice.RJ.nP #nproc ' + echo 'here #proc is the number of cpu to be used for CICE5 (e.g. 1, 2, 4, 6...)' + set nproc = 192 + echo *** $nproc processors will be used to run CICE5... *** + sleep 3 + #exit +else + set nproc = $1 + echo *** $nproc processors will be used to run CICE5... *** +endif + +### Change these to your own site and user directory! +### You will need to create a Makefile Macro in bld +### Platform and its architecture ($HOST = xe) +setenv ARCH raijin-185 + +# Set AusCOM home: +setenv AusCOMHOME $cwd:h:h:h + +#---------------------------------------------------------------------- + +### Specialty code +setenv CAM_ICE no # set to yes for CAM runs (single column) +setenv SHRDIR csm_share # location of CCSM shared code +setenv IO_TYPE netcdf # set to none if netcdf library is unavailable + # set to pio for parallel netcdf +setenv DITTO no # reproducible diagnostics +setenv THRD no # set to yes for OpenMP threading +if ( $THRD == 'yes') setenv OMP_NUM_THREADS 2 # positive integer + +setenv ACCESS yes # set to yes for ACCESS +setenv AusCOM yes # set to yes for AusCOM +setenv OASIS3_MCT yes # oasis3-mct version +setenv CHAN MPI1 # MPI1 or MPI2 (always MPI1!) +setenv NICELYR 4 # number of vertical layers in the ice +setenv NSNWLYR 1 # number of vertical layers in the snow +setenv NICECAT 5 # number of ice thickness categories + +### Location of ACCESS system +setenv SYSTEMDIR $AusCOMHOME +echo SYSTEMDIR: $SYSTEMDIR + +### Location of this model (source) +setenv SRCDIR $cwd:h #$SYSTEMDIR/submodels/cice5.0.4 +echo SRCDIR: $SRCDIR + +source ${SRCDIR}/compile/environs.$ARCH # environment variables and loadable modules + +### Location and names of coupling libraries and inclusions +### Location and names of coupling libraries +#setenv CPLLIBDIR ~access/access-cm2/prebuild/oasis3-mct/Linux-182/lib +setenv CPLLIBDIR /short/p66/hxy599/ACCESS/submodels/oasis3-mct_local/Linux-185_r47/lib +setenv CPLLIBS '-L$(CPLLIBDIR) -lpsmile.${CHAN} -lmct -lmpeu -lscrip' +#echo CPLLIBS: ${CPLLIBS} + +### Location of coupling inclusions +#setenv CPLINCDIR ~access/access-cm2/prebuild/oasis3-mct/Linux-182/build/lib +setenv CPLINCDIR /short/p66/hxy599/ACCESS/submodels/oasis3-mct_local/Linux-185_r47/build/lib +setenv CPL_INCS '-I$(CPLINCDIR)/psmile.$(CHAN) -I$(CPLINCDIR)/pio -I$(CPLINCDIR)/mct' +#echo CPL_INCS: $CPL_INCS + +### For multi-Layer ice (standard) configuration +setenv N_ILYR 1 # 4 for standard multi-layer ice. for ktherm=0, zero-layer thermodynamics + +### Location and name of the generated exectuable +setenv DATESTR `date +%Y%m%d` +setenv BINDIR $SYSTEMDIR/bin +setenv EXE cice_GC3GA7-cm1440-185.${DATESTR}_${nproc}p_${NICELYR}lyr + +### Where this model is compiled +setenv OBJDIR $SRCDIR/compile/build_${CHAN}_{$nproc}p-mct-185 +if !(-d $OBJDIR) mkdir -p $OBJDIR +#/bin/rm $OBJDIR/* +# + +### Grid resolution +#setenv GRID gx3 ; setenv RES 100x116 +#setenv GRID gx1 ; setenv RES 320x384 +#setenv GRID tx1 ; setenv RES 360x240 +#setenv GRID tp1 ; setenv RES 360x300 +setenv GRID tp1 ; setenv RES 1440x1080 + +set NXGLOB = `echo $RES | sed s/x.\*//` +set NYGLOB = `echo $RES | sed s/.\*x//` +echo NXGLOB: $NXGLOB +echo NYGLOB: $NYGLOB + +# Recommendations: +# NTASK equals nprocs in ice_in +# use processor_shape = slenderX1 or slenderX2 in ice_in +# one per processor with distribution_type='cartesian' or +# squarish blocks with distribution_type='rake' +# If BLCKX (BLCKY) does not divide NXGLOB (NYGLOB) evenly, padding +# will be used on the right (top) of the grid. +setenv NTASK $nproc +#setenv BLCKX 45 # x-dimension of blocks ( not including ) +#setenv BLCKY 38 # y-dimension of blocks ( ghost cells ) +setenv BLCKX `expr $NXGLOB / $nproc` +setenv BLCKY `expr $NYGLOB` +echo BLCKX: $BLCKX +echo BLCKY: $BLCKY + +echo +#24 : 12x2 +setenv BLCKX 120 +setenv BLCKY 540 +#144 : 16x9 +setenv BLCKX 90 +setenv BLCKY 120 +#16 : 8x2 +setenv BLCKX 180 +setenv BLCKY 540 +#16 : 4x4 +setenv BLCKX 360 +setenv BLCKY 270 +##216 : 24x9 +#setenv BLCKX 60 +#setenv BLCKY 120 +#192 : 16x12 -->square-ice +setenv BLCKX 90 +setenv BLCKY 90 +#192 : 96x2 +setenv BLCKX 15 +setenv BLCKY 540 + +# may need to increase MXBLCKS with rake distribution or padding +@ a = $NXGLOB * $NYGLOB ; @ b = $BLCKX * $BLCKY * $NTASK +@ m = $a / $b ; setenv MXBLCKS $m ; if ($MXBLCKS == 0) setenv MXBLCKS 1 +echo Autimatically generated: MXBLCKS = $MXBLCKS +##setenv MXBLCKS 8 # if necessary (code will print proper value) +#20110830: increase it to 12 as required by code: +# (but no clue why it never happened before!) +#setenv MXBLCKS 12 # if necessary (code will print proper value) + +########################################### +# ars599: 24032014 +# copy from /short/p66/ars599/CICE.v5.0/accice.v504_csiro +# solo_ice_comp +########################################### +### Tracers # match ice_in tracer_nml to conserve memory +setenv TRAGE 1 # set to 1 for ice age tracer +setenv TRFY 0 # set to 1 for first-year ice area tracer +setenv TRLVL 0 # set to 1 for level and deformed ice tracers +setenv TRPND 1 # set to 1 for melt pond tracers +setenv NTRAERO 0 # number of aerosol tracers + # (up to max_aero in ice_domain_size.F90) + # CESM uses 3 aerosol tracers +setenv TRBRI 0 # set to 1 for brine height tracer +setenv NBGCLYR 0 # number of zbgc layers +setenv TRBGCS 0 # number of skeletal layer bgc tracers + # TRBGCS=0 or 2<=TRBGCS<=9) + +### File unit numbers +setenv NUMIN 11 # minimum file unit number +setenv NUMAX 99 # maximum file unit number + +if ($IO_TYPE == 'netcdf') then + setenv IODIR io_netcdf +else if ($IO_TYPE == 'pio') then + setenv IODIR io_pio +else + setenv IODIR io_binary +endif + +########################################### + +setenv CBLD $SRCDIR/bld + +if ( $ARCH == 'UNICOS/mp') setenv ARCH UNICOS +if ( $ARCH == 'UNICOS') then + cp -f $CBLD/Makefile.$ARCH $CBLD/Makefile +else + cp -f $CBLD/Makefile.std $CBLD/Makefile +endif + +if ($NTASK == 1) then + setenv COMMDIR serial +else + setenv COMMDIR mpi +endif +echo COMMDIR: $COMMDIR + +if ($ACCESS == 'yes') then + setenv DRVDIR access +else + setenv DRVDIR cice +endif +echo DRVDIR: $DRVDIR + +cd $OBJDIR + +### List of source code directories (in order of importance). +cat >! Filepath << EOF +$SRCDIR/drivers/$DRVDIR +$SRCDIR/source +$SRCDIR/$COMMDIR +$SRCDIR/$IODIR +$SRCDIR/$SHRDIR +EOF + +if ( $ARCH == 'UNICOS.ORNL.phoenix' ) then + ### use -h command for phoenix + cc -o makdep -h command $CBLD/makdep.c || exit 2 +else if ( $ARCH == 'Linux.ORNL.jaguar' ) then + gcc -g -o makdep $CBLD/makdep.c || exit 2 +else + cc -o makdep $CBLD/makdep.c || exit 2 +endif + +setenv MACFILE $CBLD/Macros.Linux.${ARCH} + +gmake VPFILE=Filepath EXEC=$BINDIR/$EXE \ + NXGLOB=$NXGLOB NYGLOB=$NYGLOB \ + BLCKX=$BLCKX BLCKY=$BLCKY MXBLCKS=$MXBLCKS \ + -f $CBLD/Makefile MACFILE=$MACFILE || exit 2 + +cd .. +pwd +echo NTASK = $NTASK +echo "global N, block_size" +echo "x $NXGLOB, $BLCKX" +echo "y $NYGLOB, $BLCKY" +echo max_blocks = $MXBLCKS +echo $TRAGE = TRAGE, iage tracer +echo $TRFY = TRFY, first-year ice tracer +echo $TRLVL = TRLVL, level-ice tracers +echo $TRPND = TRPND, melt pond tracers +echo $NTRAERO = NTRAERO, number of aerosol tracers +echo $TRBRI = TRBRI, brine height tracer +echo $NBGCLYR = NBGCLYR, number of bio grid layers +echo $TRBGCS = TRBGCS, number of BGC tracers diff --git a/compile/comp_access-cm360-185 b/compile/comp_access-cm360-185 new file mode 100755 index 00000000..b611c7ce --- /dev/null +++ b/compile/comp_access-cm360-185 @@ -0,0 +1,221 @@ +#! /bin/csh -f + +set echo on +#setenv DEBUG yes # set to yes for debug + +if ( $1 == '') then + echo '*** Please issue the command like ***' + echo ' > ./comp_auscom_cice.RJ.nP #nproc ' + echo 'here #proc is the number of cpu to be used for CICE5 (e.g. 1, 2, 4, 6...)' + set nproc = 16 + echo *** $nproc processors will be used to run CICE5... *** + sleep 3 + #exit +else + set nproc = $1 + echo *** $nproc processors will be used to run CICE5... *** +endif + +### Change these to your own site and user directory! +### You will need to create a Makefile Macro in bld +### Platform and its architecture ($HOST = xe) +setenv ARCH raijin-185 + +# Set AusCOM home: +setenv AusCOMHOME $cwd:h:h:h + +#---------------------------------------------------------------------- + +### Specialty code +setenv CAM_ICE no # set to yes for CAM runs (single column) +setenv SHRDIR csm_share # location of CCSM shared code +setenv IO_TYPE netcdf # set to none if netcdf library is unavailable + # set to pio for parallel netcdf +setenv DITTO no # reproducible diagnostics +setenv THRD no # set to yes for OpenMP threading +if ( $THRD == 'yes') setenv OMP_NUM_THREADS 2 # positive integer + +setenv ACCESS yes # set to yes for ACCESS +setenv AusCOM yes # set to yes for AusCOM +setenv OASIS3_MCT yes # oasis3-mct version +setenv CHAN MPI1 # MPI1 or MPI2 (always MPI1!) +setenv NICELYR 1 # number of vertical layers in the ice +setenv NSNWLYR 1 # number of vertical layers in the snow +setenv NICECAT 5 # number of ice thickness categories + +### Location of ACCESS system +setenv SYSTEMDIR $AusCOMHOME +echo SYSTEMDIR: $SYSTEMDIR + +### Location of this model (source) +setenv SRCDIR $cwd:h #$SYSTEMDIR/submodels/cice5.0.4 +echo SRCDIR: $SRCDIR + +source ${SRCDIR}/compile/environs.$ARCH # environment variables and loadable modules + +### Location and names of coupling libraries and inclusions +### Location and names of coupling libraries +setenv CPLLIBDIR /short/p66/hxy599/ACCESS/submodels/oasis3-mct_local/Linux-185/lib +setenv CPLLIBS '-L$(CPLLIBDIR) -lpsmile.${CHAN} -lmct -lmpeu -lscrip' +#echo CPLLIBS: ${CPLLIBS} + +### Location of coupling inclusions +#setenv CPLINCDIR ~access/access-cm2/prebuild/oasis3-mct/Linux-182/build/lib +setenv CPLINCDIR /short/p66/hxy599/ACCESS/submodels/oasis3-mct_local/Linux-185/build/lib +setenv CPL_INCS '-I$(CPLINCDIR)/psmile.$(CHAN) -I$(CPLINCDIR)/pio -I$(CPLINCDIR)/mct' +#echo CPL_INCS: $CPL_INCS + +### For multi-Layer ice (standard) configuration +setenv N_ILYR 1 # 4 for standard multi-layer ice. for ktherm=0, zero-layer thermodynamics + +### Location and name of the generated exectuable +setenv DATESTR `date +%Y%m%d` +setenv BINDIR $SYSTEMDIR/bin +setenv EXE cice_GC3-cm360-185.${DATESTR}_${nproc}p_${NICELYR}lyr + +### Where this model is compiled +setenv OBJDIR $SRCDIR/compile/build_${CHAN}_{$nproc}p-mct-185 +if !(-d $OBJDIR) mkdir -p $OBJDIR +#/bin/rm $OBJDIR/* +# + +### Grid resolution +#setenv GRID gx3 ; setenv RES 100x116 +#setenv GRID gx1 ; setenv RES 320x384 +#setenv GRID tx1 ; setenv RES 360x240 +setenv GRID tp1 ; setenv RES 360x300 + +set NXGLOB = `echo $RES | sed s/x.\*//` +set NYGLOB = `echo $RES | sed s/.\*x//` +echo NXGLOB: $NXGLOB +echo NYGLOB: $NYGLOB + +# Recommendations: +# NTASK equals nprocs in ice_in +# use processor_shape = slenderX1 or slenderX2 in ice_in +# one per processor with distribution_type='cartesian' or +# squarish blocks with distribution_type='rake' +# If BLCKX (BLCKY) does not divide NXGLOB (NYGLOB) evenly, padding +# will be used on the right (top) of the grid. +setenv NTASK $nproc +#setenv BLCKX 45 # x-dimension of blocks ( not including ) +#setenv BLCKY 38 # y-dimension of blocks ( ghost cells ) +setenv BLCKX `expr $NXGLOB / $nproc` +setenv BLCKY `expr $NYGLOB` +echo BLCKX: $BLCKX +echo BLCKY: $BLCKY + +echo +#24 : 12x2 +setenv BLCKX 30 +setenv BLCKY 150 +#16 : 8x2 +setenv BLCKX 45 +setenv BLCKY 150 + +# may need to increase MXBLCKS with rake distribution or padding +@ a = $NXGLOB * $NYGLOB ; @ b = $BLCKX * $BLCKY * $NTASK +@ m = $a / $b ; setenv MXBLCKS $m ; if ($MXBLCKS == 0) setenv MXBLCKS 1 +echo Autimatically generated: MXBLCKS = $MXBLCKS +##setenv MXBLCKS 8 # if necessary (code will print proper value) +#20110830: increase it to 12 as required by code: +# (but no clue why it never happened before!) +#setenv MXBLCKS 12 # if necessary (code will print proper value) + +########################################### +# ars599: 24032014 +# copy from /short/p66/ars599/CICE.v5.0/accice.v504_csiro +# solo_ice_comp +########################################### +### Tracers # match ice_in tracer_nml to conserve memory +setenv TRAGE 1 # set to 1 for ice age tracer +setenv TRFY 1 # set to 1 for first-year ice area tracer +setenv TRLVL 1 # set to 1 for level and deformed ice tracers +setenv TRPND 1 # set to 1 for melt pond tracers +setenv NTRAERO 0 # number of aerosol tracers + # (up to max_aero in ice_domain_size.F90) + # CESM uses 3 aerosol tracers +setenv TRBRI 0 # set to 1 for brine height tracer +setenv NBGCLYR 7 # number of zbgc layers +setenv TRBGCS 0 # number of skeletal layer bgc tracers + # TRBGCS=0 or 2<=TRBGCS<=9) + +### File unit numbers +setenv NUMIN 11 # minimum file unit number +setenv NUMAX 99 # maximum file unit number + +if ($IO_TYPE == 'netcdf') then + setenv IODIR io_netcdf +else if ($IO_TYPE == 'pio') then + setenv IODIR io_pio +else + setenv IODIR io_binary +endif + +########################################### + +setenv CBLD $SRCDIR/bld + +if ( $ARCH == 'UNICOS/mp') setenv ARCH UNICOS +if ( $ARCH == 'UNICOS') then + cp -f $CBLD/Makefile.$ARCH $CBLD/Makefile +else + cp -f $CBLD/Makefile.std $CBLD/Makefile +endif + +if ($NTASK == 1) then + setenv COMMDIR serial +else + setenv COMMDIR mpi +endif +echo COMMDIR: $COMMDIR + +if ($ACCESS == 'yes') then + setenv DRVDIR access +else + setenv DRVDIR cice +endif +echo DRVDIR: $DRVDIR + +cd $OBJDIR + +### List of source code directories (in order of importance). +cat >! Filepath << EOF +$SRCDIR/drivers/$DRVDIR +$SRCDIR/source +$SRCDIR/$COMMDIR +$SRCDIR/$IODIR +$SRCDIR/$SHRDIR +EOF + +if ( $ARCH == 'UNICOS.ORNL.phoenix' ) then + ### use -h command for phoenix + cc -o makdep -h command $CBLD/makdep.c || exit 2 +else if ( $ARCH == 'Linux.ORNL.jaguar' ) then + gcc -g -o makdep $CBLD/makdep.c || exit 2 +else + cc -o makdep $CBLD/makdep.c || exit 2 +endif + +setenv MACFILE $CBLD/Macros.Linux.${ARCH} + +gmake VPFILE=Filepath EXEC=$BINDIR/$EXE \ + NXGLOB=$NXGLOB NYGLOB=$NYGLOB \ + BLCKX=$BLCKX BLCKY=$BLCKY MXBLCKS=$MXBLCKS \ + -f $CBLD/Makefile MACFILE=$MACFILE || exit 2 + +cd .. +pwd +echo NTASK = $NTASK +echo "global N, block_size" +echo "x $NXGLOB, $BLCKX" +echo "y $NYGLOB, $BLCKY" +echo max_blocks = $MXBLCKS +echo $TRAGE = TRAGE, iage tracer +echo $TRFY = TRFY, first-year ice tracer +echo $TRLVL = TRLVL, level-ice tracers +echo $TRPND = TRPND, melt pond tracers +echo $NTRAERO = NTRAERO, number of aerosol tracers +echo $TRBRI = TRBRI, brine height tracer +echo $NBGCLYR = NBGCLYR, number of bio grid layers +echo $TRBGCS = TRBGCS, number of BGC tracers diff --git a/compile/environs.raijin-185 b/compile/environs.raijin-185 new file mode 100755 index 00000000..b45a04b1 --- /dev/null +++ b/compile/environs.raijin-185 @@ -0,0 +1,13 @@ + echo $MODULE_VERSION + #source /opt/Modules/3.2.6/init/csh + source /etc/profile.d/nf_csh_modules + module purge +#module load intel-fc/13.5.192 +#module load intel-cc/13.5.192 +#module load intel-mkl/13.5.192 +module load intel-fc/15.0.1.133 +module load intel-cc/15.0.1.133 +module load intel-mkl/15.0.1.133 +module load openmpi/1.8.5 +module load netcdf/4.3.2 +module list diff --git a/csm_share/README b/csm_share/README new file mode 100644 index 00000000..35a6d4d7 --- /dev/null +++ b/csm_share/README @@ -0,0 +1,10 @@ +This directory contains shared code from CCSM. + +When CICE is coupled to CCSM, the modules in this directory are + not compiled, because identical modules from the csm_share + directory are compiled. + +When CICE is not coupled to CCSM, the modules in this directory + are compiled because they are used by some of the CICE modules. + For example, the orbital module is used by the delta-Eddington + shortwave scheme. diff --git a/csm_share/shr_orb_mod.F90 b/csm_share/shr_orb_mod.F90 new file mode 100644 index 00000000..74d7d43e --- /dev/null +++ b/csm_share/shr_orb_mod.F90 @@ -0,0 +1,671 @@ +!=============================================================================== +! SVN $Id: shr_orb_mod.F90 25434 2010-11-04 22:46:24Z tcraig $ +! SVN $URL: https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_121022/shr/shr_orb_mod.F90 $ +!=============================================================================== + +MODULE shr_orb_mod + +!echmod This module was borrowed from the CCSM shared code library and +!echmod modified to work within CICE without other CCSM shared code. All +!echmod such changes are marked with "!echmod" -- E.C. Hunke, 2007 +!echmod +!echmod use shr_kind_mod +!echmod use shr_sys_mod +!echmod use shr_const_mod +!echmod use shr_log_mod, only: s_loglev => shr_log_Level +!echmod use shr_log_mod, only: s_logunit => shr_log_Unit + use ice_kinds_mod, only: dbl_kind, int_kind !echmod + use ice_exit, only: abort_ice !echmod + use ice_constants, only: pi !echmod + use ice_fileunits, only: ice_stdout !echmod + + IMPLICIT none + + integer (kind=int_kind), parameter :: SHR_KIND_R8 = dbl_kind !echmod + integer (kind=int_kind), parameter :: SHR_KIND_IN = int_kind !echmod + integer (kind=int_kind), parameter :: s_logunit = ice_stdout !echmod + integer (kind=int_kind), parameter :: s_loglev = 0 !echmod + + !---------------------------------------------------------------------------- + ! PUBLIC: Interfaces and global data + !---------------------------------------------------------------------------- +! public :: shr_orb_cosz + public :: shr_orb_params + public :: shr_orb_decl + public :: shr_orb_print + + real (SHR_KIND_R8),public,parameter :: SHR_ORB_UNDEF_REAL = 1.e36_SHR_KIND_R8 ! undefined real + integer(SHR_KIND_IN),public,parameter :: SHR_ORB_UNDEF_INT = 2000000000 ! undefined int + + !---------------------------------------------------------------------------- + ! PRIVATE: by default everything else is private to this module + !---------------------------------------------------------------------------- + private + +!echmod real (SHR_KIND_R8),parameter :: pi = SHR_CONST_PI + real (SHR_KIND_R8),parameter :: SHR_ORB_ECCEN_MIN = 0.0_SHR_KIND_R8 ! min value for eccen + real (SHR_KIND_R8),parameter :: SHR_ORB_ECCEN_MAX = 0.1_SHR_KIND_R8 ! max value for eccen + real (SHR_KIND_R8),parameter :: SHR_ORB_OBLIQ_MIN = -90.0_SHR_KIND_R8 ! min value for obliq + real (SHR_KIND_R8),parameter :: SHR_ORB_OBLIQ_MAX = +90.0_SHR_KIND_R8 ! max value for obliq + real (SHR_KIND_R8),parameter :: SHR_ORB_MVELP_MIN = 0.0_SHR_KIND_R8 ! min value for mvelp + real (SHR_KIND_R8),parameter :: SHR_ORB_MVELP_MAX = 360.0_SHR_KIND_R8 ! max value for mvelp + +!=============================================================================== +CONTAINS +!=============================================================================== + +!echmod - this is computed in ice_orbital.F90 +!real(SHR_KIND_R8) FUNCTION shr_orb_cosz(jday,lat,lon,declin) + + !---------------------------------------------------------------------------- + ! + ! FUNCTION to return the cosine of the solar zenith angle. + ! Assumes 365.0 days/year. + ! + !--------------- Code History ----------------------------------------------- + ! + ! Original Author: Brian Kauffman + ! Date: Jan/98 + ! History: adapted from statement FUNCTION in share/orb_cosz.h + ! + !---------------------------------------------------------------------------- + +! real (SHR_KIND_R8),intent(in) :: jday ! Julian cal day (1.xx to 365.xx) +! real (SHR_KIND_R8),intent(in) :: lat ! Centered latitude (radians) +! real (SHR_KIND_R8),intent(in) :: lon ! Centered longitude (radians) +! real (SHR_KIND_R8),intent(in) :: declin ! Solar declination (radians) + + !---------------------------------------------------------------------------- + +!echmod shr_orb_cosz = sin(lat)*sin(declin) - & +!echmod & cos(lat)*cos(declin)*cos(jday*2.0_SHR_KIND_R8*pi + lon) + +!END FUNCTION shr_orb_cosz + +!=============================================================================== + +SUBROUTINE shr_orb_params( iyear_AD , eccen , obliq , mvelp , & + & obliqr , lambm0 , mvelpp, log_print ) + +!------------------------------------------------------------------------------- +! +! Calculate earths orbital parameters using Dave Threshers formula which +! came from Berger, Andre. 1978 "A Simple Algorithm to Compute Long-Term +! Variations of Daily Insolation". Contribution 18, Institute of Astronomy +! and Geophysics, Universite Catholique de Louvain, Louvain-la-Neuve, Belgium +! +!------------------------------Code history------------------------------------- +! +! Original Author: Erik Kluzek +! Date: Oct/97 +! +!------------------------------------------------------------------------------- + + !----------------------------- Arguments ------------------------------------ + integer(SHR_KIND_IN),intent(in) :: iyear_AD ! Year to calculate orbit for + real (SHR_KIND_R8),intent(inout) :: eccen ! orbital eccentricity + real (SHR_KIND_R8),intent(inout) :: obliq ! obliquity in degrees + real (SHR_KIND_R8),intent(inout) :: mvelp ! moving vernal equinox long + real (SHR_KIND_R8),intent(out) :: obliqr ! Earths obliquity in rad + real (SHR_KIND_R8),intent(out) :: lambm0 ! Mean long of perihelion at + ! vernal equinox (radians) + real (SHR_KIND_R8),intent(out) :: mvelpp ! moving vernal equinox long + ! of perihelion plus pi (rad) + logical ,intent(in) :: log_print ! Flags print of status/error + + !------------------------------ Parameters ---------------------------------- + integer(SHR_KIND_IN),parameter :: poblen =47 ! # of elements in series wrt obliquity + integer(SHR_KIND_IN),parameter :: pecclen=19 ! # of elements in series wrt eccentricity + integer(SHR_KIND_IN),parameter :: pmvelen=78 ! # of elements in series wrt vernal equinox + real (SHR_KIND_R8),parameter :: psecdeg = 1.0_SHR_KIND_R8/3600.0_SHR_KIND_R8 ! arc sec to deg conversion + + real (SHR_KIND_R8) :: degrad = pi/180._SHR_KIND_R8 ! degree to radian conversion factor + real (SHR_KIND_R8) :: yb4_1950AD ! number of years before 1950 AD + + character(len=*),parameter :: subname = '(shr_orb_params)' + + ! Cosine series data for computation of obliquity: amplitude (arc seconds), + ! rate (arc seconds/year), phase (degrees). + + real (SHR_KIND_R8), parameter :: obamp(poblen) = & ! amplitudes for obliquity cos series + & (/ -2462.2214466_SHR_KIND_R8, -857.3232075_SHR_KIND_R8, -629.3231835_SHR_KIND_R8, & + & -414.2804924_SHR_KIND_R8, -311.7632587_SHR_KIND_R8, 308.9408604_SHR_KIND_R8, & + & -162.5533601_SHR_KIND_R8, -116.1077911_SHR_KIND_R8, 101.1189923_SHR_KIND_R8, & + & -67.6856209_SHR_KIND_R8, 24.9079067_SHR_KIND_R8, 22.5811241_SHR_KIND_R8, & + & -21.1648355_SHR_KIND_R8, -15.6549876_SHR_KIND_R8, 15.3936813_SHR_KIND_R8, & + & 14.6660938_SHR_KIND_R8, -11.7273029_SHR_KIND_R8, 10.2742696_SHR_KIND_R8, & + & 6.4914588_SHR_KIND_R8, 5.8539148_SHR_KIND_R8, -5.4872205_SHR_KIND_R8, & + & -5.4290191_SHR_KIND_R8, 5.1609570_SHR_KIND_R8, 5.0786314_SHR_KIND_R8, & + & -4.0735782_SHR_KIND_R8, 3.7227167_SHR_KIND_R8, 3.3971932_SHR_KIND_R8, & + & -2.8347004_SHR_KIND_R8, -2.6550721_SHR_KIND_R8, -2.5717867_SHR_KIND_R8, & + & -2.4712188_SHR_KIND_R8, 2.4625410_SHR_KIND_R8, 2.2464112_SHR_KIND_R8, & + & -2.0755511_SHR_KIND_R8, -1.9713669_SHR_KIND_R8, -1.8813061_SHR_KIND_R8, & + & -1.8468785_SHR_KIND_R8, 1.8186742_SHR_KIND_R8, 1.7601888_SHR_KIND_R8, & + & -1.5428851_SHR_KIND_R8, 1.4738838_SHR_KIND_R8, -1.4593669_SHR_KIND_R8, & + & 1.4192259_SHR_KIND_R8, -1.1818980_SHR_KIND_R8, 1.1756474_SHR_KIND_R8, & + & -1.1316126_SHR_KIND_R8, 1.0896928_SHR_KIND_R8/) + + real (SHR_KIND_R8), parameter :: obrate(poblen) = & ! rates for obliquity cosine series + & (/ 31.609974_SHR_KIND_R8, 32.620504_SHR_KIND_R8, 24.172203_SHR_KIND_R8, & + & 31.983787_SHR_KIND_R8, 44.828336_SHR_KIND_R8, 30.973257_SHR_KIND_R8, & + & 43.668246_SHR_KIND_R8, 32.246691_SHR_KIND_R8, 30.599444_SHR_KIND_R8, & + & 42.681324_SHR_KIND_R8, 43.836462_SHR_KIND_R8, 47.439436_SHR_KIND_R8, & + & 63.219948_SHR_KIND_R8, 64.230478_SHR_KIND_R8, 1.010530_SHR_KIND_R8, & + & 7.437771_SHR_KIND_R8, 55.782177_SHR_KIND_R8, 0.373813_SHR_KIND_R8, & + & 13.218362_SHR_KIND_R8, 62.583231_SHR_KIND_R8, 63.593761_SHR_KIND_R8, & + & 76.438310_SHR_KIND_R8, 45.815258_SHR_KIND_R8, 8.448301_SHR_KIND_R8, & + & 56.792707_SHR_KIND_R8, 49.747842_SHR_KIND_R8, 12.058272_SHR_KIND_R8, & + & 75.278220_SHR_KIND_R8, 65.241008_SHR_KIND_R8, 64.604291_SHR_KIND_R8, & + & 1.647247_SHR_KIND_R8, 7.811584_SHR_KIND_R8, 12.207832_SHR_KIND_R8, & + & 63.856665_SHR_KIND_R8, 56.155990_SHR_KIND_R8, 77.448840_SHR_KIND_R8, & + & 6.801054_SHR_KIND_R8, 62.209418_SHR_KIND_R8, 20.656133_SHR_KIND_R8, & + & 48.344406_SHR_KIND_R8, 55.145460_SHR_KIND_R8, 69.000539_SHR_KIND_R8, & + & 11.071350_SHR_KIND_R8, 74.291298_SHR_KIND_R8, 11.047742_SHR_KIND_R8, & + & 0.636717_SHR_KIND_R8, 12.844549_SHR_KIND_R8/) + + real (SHR_KIND_R8), parameter :: obphas(poblen) = & ! phases for obliquity cosine series + & (/ 251.9025_SHR_KIND_R8, 280.8325_SHR_KIND_R8, 128.3057_SHR_KIND_R8, & + & 292.7252_SHR_KIND_R8, 15.3747_SHR_KIND_R8, 263.7951_SHR_KIND_R8, & + & 308.4258_SHR_KIND_R8, 240.0099_SHR_KIND_R8, 222.9725_SHR_KIND_R8, & + & 268.7809_SHR_KIND_R8, 316.7998_SHR_KIND_R8, 319.6024_SHR_KIND_R8, & + & 143.8050_SHR_KIND_R8, 172.7351_SHR_KIND_R8, 28.9300_SHR_KIND_R8, & + & 123.5968_SHR_KIND_R8, 20.2082_SHR_KIND_R8, 40.8226_SHR_KIND_R8, & + & 123.4722_SHR_KIND_R8, 155.6977_SHR_KIND_R8, 184.6277_SHR_KIND_R8, & + & 267.2772_SHR_KIND_R8, 55.0196_SHR_KIND_R8, 152.5268_SHR_KIND_R8, & + & 49.1382_SHR_KIND_R8, 204.6609_SHR_KIND_R8, 56.5233_SHR_KIND_R8, & + & 200.3284_SHR_KIND_R8, 201.6651_SHR_KIND_R8, 213.5577_SHR_KIND_R8, & + & 17.0374_SHR_KIND_R8, 164.4194_SHR_KIND_R8, 94.5422_SHR_KIND_R8, & + & 131.9124_SHR_KIND_R8, 61.0309_SHR_KIND_R8, 296.2073_SHR_KIND_R8, & + & 135.4894_SHR_KIND_R8, 114.8750_SHR_KIND_R8, 247.0691_SHR_KIND_R8, & + & 256.6114_SHR_KIND_R8, 32.1008_SHR_KIND_R8, 143.6804_SHR_KIND_R8, & + & 16.8784_SHR_KIND_R8, 160.6835_SHR_KIND_R8, 27.5932_SHR_KIND_R8, & + & 348.1074_SHR_KIND_R8, 82.6496_SHR_KIND_R8/) + + ! Cosine/sine series data for computation of eccentricity and fixed vernal + ! equinox longitude of perihelion (fvelp): amplitude, + ! rate (arc seconds/year), phase (degrees). + + real (SHR_KIND_R8), parameter :: ecamp (pecclen) = & ! ampl for eccen/fvelp cos/sin series + & (/ 0.01860798_SHR_KIND_R8, 0.01627522_SHR_KIND_R8, -0.01300660_SHR_KIND_R8, & + & 0.00988829_SHR_KIND_R8, -0.00336700_SHR_KIND_R8, 0.00333077_SHR_KIND_R8, & + & -0.00235400_SHR_KIND_R8, 0.00140015_SHR_KIND_R8, 0.00100700_SHR_KIND_R8, & + & 0.00085700_SHR_KIND_R8, 0.00064990_SHR_KIND_R8, 0.00059900_SHR_KIND_R8, & + & 0.00037800_SHR_KIND_R8, -0.00033700_SHR_KIND_R8, 0.00027600_SHR_KIND_R8, & + & 0.00018200_SHR_KIND_R8, -0.00017400_SHR_KIND_R8, -0.00012400_SHR_KIND_R8, & + & 0.00001250_SHR_KIND_R8/) + + real (SHR_KIND_R8), parameter :: ecrate(pecclen) = & ! rates for eccen/fvelp cos/sin series + & (/ 4.2072050_SHR_KIND_R8, 7.3460910_SHR_KIND_R8, 17.8572630_SHR_KIND_R8, & + & 17.2205460_SHR_KIND_R8, 16.8467330_SHR_KIND_R8, 5.1990790_SHR_KIND_R8, & + & 18.2310760_SHR_KIND_R8, 26.2167580_SHR_KIND_R8, 6.3591690_SHR_KIND_R8, & + & 16.2100160_SHR_KIND_R8, 3.0651810_SHR_KIND_R8, 16.5838290_SHR_KIND_R8, & + & 18.4939800_SHR_KIND_R8, 6.1909530_SHR_KIND_R8, 18.8677930_SHR_KIND_R8, & + & 17.4255670_SHR_KIND_R8, 6.1860010_SHR_KIND_R8, 18.4174410_SHR_KIND_R8, & + & 0.6678630_SHR_KIND_R8/) + + real (SHR_KIND_R8), parameter :: ecphas(pecclen) = & ! phases for eccen/fvelp cos/sin series + & (/ 28.620089_SHR_KIND_R8, 193.788772_SHR_KIND_R8, 308.307024_SHR_KIND_R8, & + & 320.199637_SHR_KIND_R8, 279.376984_SHR_KIND_R8, 87.195000_SHR_KIND_R8, & + & 349.129677_SHR_KIND_R8, 128.443387_SHR_KIND_R8, 154.143880_SHR_KIND_R8, & + & 291.269597_SHR_KIND_R8, 114.860583_SHR_KIND_R8, 332.092251_SHR_KIND_R8, & + & 296.414411_SHR_KIND_R8, 145.769910_SHR_KIND_R8, 337.237063_SHR_KIND_R8, & + & 152.092288_SHR_KIND_R8, 126.839891_SHR_KIND_R8, 210.667199_SHR_KIND_R8, & + & 72.108838_SHR_KIND_R8/) + + ! Sine series data for computation of moving vernal equinox longitude of + ! perihelion: amplitude (arc seconds), rate (arc sec/year), phase (degrees). + + real (SHR_KIND_R8), parameter :: mvamp (pmvelen) = & ! amplitudes for mvelp sine series + & (/ 7391.0225890_SHR_KIND_R8, 2555.1526947_SHR_KIND_R8, 2022.7629188_SHR_KIND_R8, & + & -1973.6517951_SHR_KIND_R8, 1240.2321818_SHR_KIND_R8, 953.8679112_SHR_KIND_R8, & + & -931.7537108_SHR_KIND_R8, 872.3795383_SHR_KIND_R8, 606.3544732_SHR_KIND_R8, & + & -496.0274038_SHR_KIND_R8, 456.9608039_SHR_KIND_R8, 346.9462320_SHR_KIND_R8, & + & -305.8412902_SHR_KIND_R8, 249.6173246_SHR_KIND_R8, -199.1027200_SHR_KIND_R8, & + & 191.0560889_SHR_KIND_R8, -175.2936572_SHR_KIND_R8, 165.9068833_SHR_KIND_R8, & + & 161.1285917_SHR_KIND_R8, 139.7878093_SHR_KIND_R8, -133.5228399_SHR_KIND_R8, & + & 117.0673811_SHR_KIND_R8, 104.6907281_SHR_KIND_R8, 95.3227476_SHR_KIND_R8, & + & 86.7824524_SHR_KIND_R8, 86.0857729_SHR_KIND_R8, 70.5893698_SHR_KIND_R8, & + & -69.9719343_SHR_KIND_R8, -62.5817473_SHR_KIND_R8, 61.5450059_SHR_KIND_R8, & + & -57.9364011_SHR_KIND_R8, 57.1899832_SHR_KIND_R8, -57.0236109_SHR_KIND_R8, & + & -54.2119253_SHR_KIND_R8, 53.2834147_SHR_KIND_R8, 52.1223575_SHR_KIND_R8, & + & -49.0059908_SHR_KIND_R8, -48.3118757_SHR_KIND_R8, -45.4191685_SHR_KIND_R8, & + & -42.2357920_SHR_KIND_R8, -34.7971099_SHR_KIND_R8, 34.4623613_SHR_KIND_R8, & + & -33.8356643_SHR_KIND_R8, 33.6689362_SHR_KIND_R8, -31.2521586_SHR_KIND_R8, & + & -30.8798701_SHR_KIND_R8, 28.4640769_SHR_KIND_R8, -27.1960802_SHR_KIND_R8, & + & 27.0860736_SHR_KIND_R8, -26.3437456_SHR_KIND_R8, 24.7253740_SHR_KIND_R8, & + & 24.6732126_SHR_KIND_R8, 24.4272733_SHR_KIND_R8, 24.0127327_SHR_KIND_R8, & + & 21.7150294_SHR_KIND_R8, -21.5375347_SHR_KIND_R8, 18.1148363_SHR_KIND_R8, & + & -16.9603104_SHR_KIND_R8, -16.1765215_SHR_KIND_R8, 15.5567653_SHR_KIND_R8, & + & 15.4846529_SHR_KIND_R8, 15.2150632_SHR_KIND_R8, 14.5047426_SHR_KIND_R8, & + & -14.3873316_SHR_KIND_R8, 13.1351419_SHR_KIND_R8, 12.8776311_SHR_KIND_R8, & + & 11.9867234_SHR_KIND_R8, 11.9385578_SHR_KIND_R8, 11.7030822_SHR_KIND_R8, & + & 11.6018181_SHR_KIND_R8, -11.2617293_SHR_KIND_R8, -10.4664199_SHR_KIND_R8, & + & 10.4333970_SHR_KIND_R8, -10.2377466_SHR_KIND_R8, 10.1934446_SHR_KIND_R8, & + & -10.1280191_SHR_KIND_R8, 10.0289441_SHR_KIND_R8, -10.0034259_SHR_KIND_R8/) + + real (SHR_KIND_R8), parameter :: mvrate(pmvelen) = & ! rates for mvelp sine series + & (/ 31.609974_SHR_KIND_R8, 32.620504_SHR_KIND_R8, 24.172203_SHR_KIND_R8, & + & 0.636717_SHR_KIND_R8, 31.983787_SHR_KIND_R8, 3.138886_SHR_KIND_R8, & + & 30.973257_SHR_KIND_R8, 44.828336_SHR_KIND_R8, 0.991874_SHR_KIND_R8, & + & 0.373813_SHR_KIND_R8, 43.668246_SHR_KIND_R8, 32.246691_SHR_KIND_R8, & + & 30.599444_SHR_KIND_R8, 2.147012_SHR_KIND_R8, 10.511172_SHR_KIND_R8, & + & 42.681324_SHR_KIND_R8, 13.650058_SHR_KIND_R8, 0.986922_SHR_KIND_R8, & + & 9.874455_SHR_KIND_R8, 13.013341_SHR_KIND_R8, 0.262904_SHR_KIND_R8, & + & 0.004952_SHR_KIND_R8, 1.142024_SHR_KIND_R8, 63.219948_SHR_KIND_R8, & + & 0.205021_SHR_KIND_R8, 2.151964_SHR_KIND_R8, 64.230478_SHR_KIND_R8, & + & 43.836462_SHR_KIND_R8, 47.439436_SHR_KIND_R8, 1.384343_SHR_KIND_R8, & + & 7.437771_SHR_KIND_R8, 18.829299_SHR_KIND_R8, 9.500642_SHR_KIND_R8, & + & 0.431696_SHR_KIND_R8, 1.160090_SHR_KIND_R8, 55.782177_SHR_KIND_R8, & + & 12.639528_SHR_KIND_R8, 1.155138_SHR_KIND_R8, 0.168216_SHR_KIND_R8, & + & 1.647247_SHR_KIND_R8, 10.884985_SHR_KIND_R8, 5.610937_SHR_KIND_R8, & + & 12.658184_SHR_KIND_R8, 1.010530_SHR_KIND_R8, 1.983748_SHR_KIND_R8, & + & 14.023871_SHR_KIND_R8, 0.560178_SHR_KIND_R8, 1.273434_SHR_KIND_R8, & + & 12.021467_SHR_KIND_R8, 62.583231_SHR_KIND_R8, 63.593761_SHR_KIND_R8, & + & 76.438310_SHR_KIND_R8, 4.280910_SHR_KIND_R8, 13.218362_SHR_KIND_R8, & + & 17.818769_SHR_KIND_R8, 8.359495_SHR_KIND_R8, 56.792707_SHR_KIND_R8, & + & 8.448301_SHR_KIND_R8, 1.978796_SHR_KIND_R8, 8.863925_SHR_KIND_R8, & + & 0.186365_SHR_KIND_R8, 8.996212_SHR_KIND_R8, 6.771027_SHR_KIND_R8, & + & 45.815258_SHR_KIND_R8, 12.002811_SHR_KIND_R8, 75.278220_SHR_KIND_R8, & + & 65.241008_SHR_KIND_R8, 18.870667_SHR_KIND_R8, 22.009553_SHR_KIND_R8, & + & 64.604291_SHR_KIND_R8, 11.498094_SHR_KIND_R8, 0.578834_SHR_KIND_R8, & + & 9.237738_SHR_KIND_R8, 49.747842_SHR_KIND_R8, 2.147012_SHR_KIND_R8, & + & 1.196895_SHR_KIND_R8, 2.133898_SHR_KIND_R8, 0.173168_SHR_KIND_R8/) + + real (SHR_KIND_R8), parameter :: mvphas(pmvelen) = & ! phases for mvelp sine series + & (/ 251.9025_SHR_KIND_R8, 280.8325_SHR_KIND_R8, 128.3057_SHR_KIND_R8, & + & 348.1074_SHR_KIND_R8, 292.7252_SHR_KIND_R8, 165.1686_SHR_KIND_R8, & + & 263.7951_SHR_KIND_R8, 15.3747_SHR_KIND_R8, 58.5749_SHR_KIND_R8, & + & 40.8226_SHR_KIND_R8, 308.4258_SHR_KIND_R8, 240.0099_SHR_KIND_R8, & + & 222.9725_SHR_KIND_R8, 106.5937_SHR_KIND_R8, 114.5182_SHR_KIND_R8, & + & 268.7809_SHR_KIND_R8, 279.6869_SHR_KIND_R8, 39.6448_SHR_KIND_R8, & + & 126.4108_SHR_KIND_R8, 291.5795_SHR_KIND_R8, 307.2848_SHR_KIND_R8, & + & 18.9300_SHR_KIND_R8, 273.7596_SHR_KIND_R8, 143.8050_SHR_KIND_R8, & + & 191.8927_SHR_KIND_R8, 125.5237_SHR_KIND_R8, 172.7351_SHR_KIND_R8, & + & 316.7998_SHR_KIND_R8, 319.6024_SHR_KIND_R8, 69.7526_SHR_KIND_R8, & + & 123.5968_SHR_KIND_R8, 217.6432_SHR_KIND_R8, 85.5882_SHR_KIND_R8, & + & 156.2147_SHR_KIND_R8, 66.9489_SHR_KIND_R8, 20.2082_SHR_KIND_R8, & + & 250.7568_SHR_KIND_R8, 48.0188_SHR_KIND_R8, 8.3739_SHR_KIND_R8, & + & 17.0374_SHR_KIND_R8, 155.3409_SHR_KIND_R8, 94.1709_SHR_KIND_R8, & + & 221.1120_SHR_KIND_R8, 28.9300_SHR_KIND_R8, 117.1498_SHR_KIND_R8, & + & 320.5095_SHR_KIND_R8, 262.3602_SHR_KIND_R8, 336.2148_SHR_KIND_R8, & + & 233.0046_SHR_KIND_R8, 155.6977_SHR_KIND_R8, 184.6277_SHR_KIND_R8, & + & 267.2772_SHR_KIND_R8, 78.9281_SHR_KIND_R8, 123.4722_SHR_KIND_R8, & + & 188.7132_SHR_KIND_R8, 180.1364_SHR_KIND_R8, 49.1382_SHR_KIND_R8, & + & 152.5268_SHR_KIND_R8, 98.2198_SHR_KIND_R8, 97.4808_SHR_KIND_R8, & + & 221.5376_SHR_KIND_R8, 168.2438_SHR_KIND_R8, 161.1199_SHR_KIND_R8, & + & 55.0196_SHR_KIND_R8, 262.6495_SHR_KIND_R8, 200.3284_SHR_KIND_R8, & + & 201.6651_SHR_KIND_R8, 294.6547_SHR_KIND_R8, 99.8233_SHR_KIND_R8, & + & 213.5577_SHR_KIND_R8, 154.1631_SHR_KIND_R8, 232.7153_SHR_KIND_R8, & + & 138.3034_SHR_KIND_R8, 204.6609_SHR_KIND_R8, 106.5938_SHR_KIND_R8, & + & 250.4676_SHR_KIND_R8, 332.3345_SHR_KIND_R8, 27.3039_SHR_KIND_R8/) + + !---------------------------Local variables---------------------------------- + integer(SHR_KIND_IN) :: i ! Index for series summations + real (SHR_KIND_R8) :: obsum ! Obliquity series summation + real (SHR_KIND_R8) :: cossum ! Cos series summation for eccentricity/fvelp + real (SHR_KIND_R8) :: sinsum ! Sin series summation for eccentricity/fvelp + real (SHR_KIND_R8) :: fvelp ! Fixed vernal equinox long of perihelion + real (SHR_KIND_R8) :: mvsum ! mvelp series summation + real (SHR_KIND_R8) :: beta ! Intermediate argument for lambm0 + real (SHR_KIND_R8) :: years ! Years to time of interest ( pos <=> future) + real (SHR_KIND_R8) :: eccen2 ! eccentricity squared + real (SHR_KIND_R8) :: eccen3 ! eccentricity cubed + + !-------------------------- Formats ----------------------------------------- + character(*),parameter :: svnID = "SVN " // & + "$Id: shr_orb_mod.F90 25434 2010-11-04 22:46:24Z tcraig $" + character(*),parameter :: svnURL = "SVN " +! character(*),parameter :: svnURL = "SVN " // & +! "$URL: https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_121022/shr/shr_orb_mod.F90 $" + character(len=*),parameter :: F00 = "('(shr_orb_params) ',4a)" + character(len=*),parameter :: F01 = "('(shr_orb_params) ',a,i9)" + character(len=*),parameter :: F02 = "('(shr_orb_params) ',a,f6.3)" + character(len=*),parameter :: F03 = "('(shr_orb_params) ',a,es14.6)" + + !---------------------------------------------------------------------------- + ! radinp and algorithms below will need a degree to radian conversion factor + + if ( log_print .and. s_loglev > 0 ) then + write(s_logunit,F00) 'Calculate characteristics of the orbit:' + write(s_logunit,F00) svnID +! write(s_logunit,F00) svnURL + end if + + ! Check for flag to use input orbit parameters + + IF ( iyear_AD == SHR_ORB_UNDEF_INT ) THEN + + ! Check input obliq, eccen, and mvelp to ensure reasonable + + if( obliq == SHR_ORB_UNDEF_REAL )then + write(s_logunit,F00) trim(subname)//' Have to specify orbital parameters:' + write(s_logunit,F00) 'Either set: iyear_AD, OR [obliq, eccen, and mvelp]:' + write(s_logunit,F00) 'iyear_AD is the year to simulate orbit for (ie. 1950): ' + write(s_logunit,F00) 'obliq, eccen, mvelp specify the orbit directly:' + write(s_logunit,F00) 'The AMIP II settings (for a 1995 orbit) are: ' + write(s_logunit,F00) ' obliq = 23.4441' + write(s_logunit,F00) ' eccen = 0.016715' + write(s_logunit,F00) ' mvelp = 102.7' +!echmod call shr_sys_abort(subname//' ERROR: unreasonable obliq') + call abort_ice('unreasonable oblip') !echmod + else if ( log_print ) then + write(s_logunit,F00) 'Use input orbital parameters: ' + end if + if( (obliq < SHR_ORB_OBLIQ_MIN).or.(obliq > SHR_ORB_OBLIQ_MAX) ) then + write(s_logunit,F03) 'Input obliquity unreasonable: ', obliq +!echmod call shr_sys_abort(subname//' ERROR: unreasonable obliq') + call abort_ice('unreasonable obliq') !echmod + end if + if( (eccen < SHR_ORB_ECCEN_MIN).or.(eccen > SHR_ORB_ECCEN_MAX) ) then + write(s_logunit,F03) 'Input eccentricity unreasonable: ', eccen +!echmod call shr_sys_abort(subname//' ERROR: unreasonable eccen') + call abort_ice('unreasonable eccen') !echmod + end if + if( (mvelp < SHR_ORB_MVELP_MIN).or.(mvelp > SHR_ORB_MVELP_MAX) ) then + write(s_logunit,F03) 'Input mvelp unreasonable: ' , mvelp +!echmod call shr_sys_abort(subname//' ERROR: unreasonable mvelp') + call abort_ice('unreasonable mvelp') !echmod + end if + eccen2 = eccen*eccen + eccen3 = eccen2*eccen + + ELSE ! Otherwise calculate based on years before present + + if ( log_print .and. s_loglev > 0) then + write(s_logunit,F01) 'Calculate orbit for year: ' , iyear_AD + end if + yb4_1950AD = 1950.0_SHR_KIND_R8 - real(iyear_AD,SHR_KIND_R8) + if ( abs(yb4_1950AD) .gt. 1000000.0_SHR_KIND_R8 )then + write(s_logunit,F00) 'orbit only valid for years+-1000000' + write(s_logunit,F00) 'Relative to 1950 AD' + write(s_logunit,F03) '# of years before 1950: ',yb4_1950AD + write(s_logunit,F01) 'Year to simulate was : ',iyear_AD +!echmod call shr_sys_abort(subname//' ERROR: unreasonable year') + call abort_ice('unreasonable year') !echmod + end if + + ! The following calculates the earths obliquity, orbital eccentricity + ! (and various powers of it) and vernal equinox mean longitude of + ! perihelion for years in the past (future = negative of years past), + ! using constants (see parameter section) given in the program of: + ! + ! Berger, Andre. 1978 A Simple Algorithm to Compute Long-Term Variations + ! of Daily Insolation. Contribution 18, Institute of Astronomy and + ! Geophysics, Universite Catholique de Louvain, Louvain-la-Neuve, Belgium. + ! + ! and formulas given in the paper (where less precise constants are also + ! given): + ! + ! Berger, Andre. 1978. Long-Term Variations of Daily Insolation and + ! Quaternary Climatic Changes. J. of the Atmo. Sci. 35:2362-2367 + ! + ! The algorithm is valid only to 1,000,000 years past or hence. + ! For a solution valid to 5-10 million years past see the above author. + ! Algorithm below is better for years closer to present than is the + ! 5-10 million year solution. + ! + ! Years to time of interest must be negative of years before present + ! (1950) in formulas that follow. + + years = - yb4_1950AD + + ! In the summations below, cosine or sine arguments, which end up in + ! degrees, must be converted to radians via multiplication by degrad. + ! + ! Summation of cosine series for obliquity (epsilon in Berger 1978) in + ! degrees. Convert the amplitudes and rates, which are in arc secs, into + ! degrees via multiplication by psecdeg (arc seconds to degrees conversion + ! factor). For obliq, first term is Berger 1978 epsilon star; second + ! term is series summation in degrees. + + obsum = 0.0_SHR_KIND_R8 + do i = 1, poblen + obsum = obsum + obamp(i)*psecdeg*cos((obrate(i)*psecdeg*years + & + & obphas(i))*degrad) + end do + obliq = 23.320556_SHR_KIND_R8 + obsum + + ! Summation of cosine and sine series for computation of eccentricity + ! (eccen; e in Berger 1978) and fixed vernal equinox longitude of + ! perihelion (fvelp; pi in Berger 1978), which is used for computation + ! of moving vernal equinox longitude of perihelion. Convert the rates, + ! which are in arc seconds, into degrees via multiplication by psecdeg. + + cossum = 0.0_SHR_KIND_R8 + do i = 1, pecclen + cossum = cossum+ecamp(i)*cos((ecrate(i)*psecdeg*years+ecphas(i))*degrad) + end do + + sinsum = 0.0_SHR_KIND_R8 + do i = 1, pecclen + sinsum = sinsum+ecamp(i)*sin((ecrate(i)*psecdeg*years+ecphas(i))*degrad) + end do + + ! Use summations to calculate eccentricity + + eccen2 = cossum*cossum + sinsum*sinsum + eccen = sqrt(eccen2) + eccen3 = eccen2*eccen + + ! A series of cases for fvelp, which is in radians. + + if (abs(cossum) .le. 1.0E-8_SHR_KIND_R8) then + if (sinsum .eq. 0.0_SHR_KIND_R8) then + fvelp = 0.0_SHR_KIND_R8 + else if (sinsum .lt. 0.0_SHR_KIND_R8) then + fvelp = 1.5_SHR_KIND_R8*pi + else if (sinsum .gt. 0.0_SHR_KIND_R8) then + fvelp = .5_SHR_KIND_R8*pi + endif + else if (cossum .lt. 0.0_SHR_KIND_R8) then + fvelp = atan(sinsum/cossum) + pi + else if (cossum .gt. 0.0_SHR_KIND_R8) then + if (sinsum .lt. 0.0_SHR_KIND_R8) then + fvelp = atan(sinsum/cossum) + 2.0_SHR_KIND_R8*pi + else + fvelp = atan(sinsum/cossum) + endif + endif + + ! Summation of sin series for computation of moving vernal equinox long + ! of perihelion (mvelp; omega bar in Berger 1978) in degrees. For mvelp, + ! first term is fvelp in degrees; second term is Berger 1978 psi bar + ! times years and in degrees; third term is Berger 1978 zeta; fourth + ! term is series summation in degrees. Convert the amplitudes and rates, + ! which are in arc seconds, into degrees via multiplication by psecdeg. + ! Series summation plus second and third terms constitute Berger 1978 + ! psi, which is the general precession. + + mvsum = 0.0_SHR_KIND_R8 + do i = 1, pmvelen + mvsum = mvsum + mvamp(i)*psecdeg*sin((mvrate(i)*psecdeg*years + & + & mvphas(i))*degrad) + end do + mvelp = fvelp/degrad + 50.439273_SHR_KIND_R8*psecdeg*years + 3.392506_SHR_KIND_R8 + mvsum + + ! Cases to make sure mvelp is between 0 and 360. + + do while (mvelp .lt. 0.0_SHR_KIND_R8) + mvelp = mvelp + 360.0_SHR_KIND_R8 + end do + do while (mvelp .ge. 360.0_SHR_KIND_R8) + mvelp = mvelp - 360.0_SHR_KIND_R8 + end do + + END IF ! end of test on whether to calculate or use input orbital params + + ! Orbit needs the obliquity in radians + + obliqr = obliq*degrad + + ! 180 degrees must be added to mvelp since observations are made from the + ! earth and the sun is considered (wrongly for the algorithm) to go around + ! the earth. For a more graphic explanation see Appendix B in: + ! + ! A. Berger, M. Loutre and C. Tricot. 1993. Insolation and Earth Orbital + ! Periods. J. of Geophysical Research 98:10,341-10,362. + ! + ! Additionally, orbit will need this value in radians. So mvelp becomes + ! mvelpp (mvelp plus pi) + + mvelpp = (mvelp + 180._SHR_KIND_R8)*degrad + + ! Set up an argument used several times in lambm0 calculation ahead. + + beta = sqrt(1._SHR_KIND_R8 - eccen2) + + ! The mean longitude at the vernal equinox (lambda m nought in Berger + ! 1978; in radians) is calculated from the following formula given in + ! Berger 1978. At the vernal equinox the true longitude (lambda in Berger + ! 1978) is 0. + + lambm0 = 2._SHR_KIND_R8*((.5_SHR_KIND_R8*eccen + .125_SHR_KIND_R8*eccen3)*(1._SHR_KIND_R8 + beta)*sin(mvelpp) & + & - .250_SHR_KIND_R8*eccen2*(.5_SHR_KIND_R8 + beta)*sin(2._SHR_KIND_R8*mvelpp) & + & + .125_SHR_KIND_R8*eccen3*(1._SHR_KIND_R8/3._SHR_KIND_R8 + beta)*sin(3._SHR_KIND_R8*mvelpp)) + + if ( log_print ) then + write(s_logunit,F03) '------ Computed Orbital Parameters ------' + write(s_logunit,F03) 'Eccentricity = ',eccen + write(s_logunit,F03) 'Obliquity (deg) = ',obliq + write(s_logunit,F03) 'Obliquity (rad) = ',obliqr + write(s_logunit,F03) 'Long of perh(deg) = ',mvelp + write(s_logunit,F03) 'Long of perh(rad) = ',mvelpp + write(s_logunit,F03) 'Long at v.e.(rad) = ',lambm0 + write(s_logunit,F03) '-----------------------------------------' + end if + +END SUBROUTINE shr_orb_params + +!=============================================================================== + +SUBROUTINE shr_orb_decl(calday ,eccen ,mvelpp ,lambm0 ,obliqr ,delta ,eccf) + +!------------------------------------------------------------------------------- +! +! Compute earth/orbit parameters using formula suggested by +! Duane Thresher. +! +!---------------------------Code history---------------------------------------- +! +! Original version: Erik Kluzek +! Date: Oct/1997 +! +!------------------------------------------------------------------------------- + + !------------------------------Arguments-------------------------------- + real (SHR_KIND_R8),intent(in) :: calday ! Calendar day, including fraction + real (SHR_KIND_R8),intent(in) :: eccen ! Eccentricity + real (SHR_KIND_R8),intent(in) :: obliqr ! Earths obliquity in radians + real (SHR_KIND_R8),intent(in) :: lambm0 ! Mean long of perihelion at the + ! vernal equinox (radians) + real (SHR_KIND_R8),intent(in) :: mvelpp ! moving vernal equinox longitude + ! of perihelion plus pi (radians) + real (SHR_KIND_R8),intent(out) :: delta ! Solar declination angle in rad + real (SHR_KIND_R8),intent(out) :: eccf ! Earth-sun distance factor (ie. (1/r)**2) + + !---------------------------Local variables----------------------------- + real (SHR_KIND_R8),parameter :: dayspy = 365.0_SHR_KIND_R8 ! days per year + real (SHR_KIND_R8),parameter :: ve = 80.5_SHR_KIND_R8 ! Calday of vernal equinox + ! assumes Jan 1 = calday 1 + + real (SHR_KIND_R8) :: lambm ! Lambda m, mean long of perihelion (rad) + real (SHR_KIND_R8) :: lmm ! Intermediate argument involving lambm + real (SHR_KIND_R8) :: lamb ! Lambda, the earths long of perihelion + real (SHR_KIND_R8) :: invrho ! Inverse normalized sun/earth distance + real (SHR_KIND_R8) :: sinl ! Sine of lmm + + ! Compute eccentricity factor and solar declination using + ! day value where a round day (such as 213.0) refers to 0z at + ! Greenwich longitude. + ! + ! Use formulas from Berger, Andre 1978: Long-Term Variations of Daily + ! Insolation and Quaternary Climatic Changes. J. of the Atmo. Sci. + ! 35:2362-2367. + ! + ! To get the earths true longitude (position in orbit; lambda in Berger + ! 1978) which is necessary to find the eccentricity factor and declination, + ! must first calculate the mean longitude (lambda m in Berger 1978) at + ! the present day. This is done by adding to lambm0 (the mean longitude + ! at the vernal equinox, set as March 21 at noon, when lambda=0; in radians) + ! an increment (delta lambda m in Berger 1978) that is the number of + ! days past or before (a negative increment) the vernal equinox divided by + ! the days in a model year times the 2*pi radians in a complete orbit. + + lambm = lambm0 + (calday - ve)*2._SHR_KIND_R8*pi/dayspy + lmm = lambm - mvelpp + + ! The earths true longitude, in radians, is then found from + ! the formula in Berger 1978: + + sinl = sin(lmm) + lamb = lambm + eccen*(2._SHR_KIND_R8*sinl + eccen*(1.25_SHR_KIND_R8*sin(2._SHR_KIND_R8*lmm) & + & + eccen*((13.0_SHR_KIND_R8/12.0_SHR_KIND_R8)*sin(3._SHR_KIND_R8*lmm) - 0.25_SHR_KIND_R8*sinl))) + + ! Using the obliquity, eccentricity, moving vernal equinox longitude of + ! perihelion (plus), and earths true longitude, the declination (delta) + ! and the normalized earth/sun distance (rho in Berger 1978; actually inverse + ! rho will be used), and thus the eccentricity factor (eccf), can be + ! calculated from formulas given in Berger 1978. + + invrho = (1._SHR_KIND_R8 + eccen*cos(lamb - mvelpp)) / (1._SHR_KIND_R8 - eccen*eccen) + + ! Set solar declination and eccentricity factor + + delta = asin(sin(obliqr)*sin(lamb)) + eccf = invrho*invrho + + return + +END SUBROUTINE shr_orb_decl + +!=============================================================================== + +SUBROUTINE shr_orb_print( iyear_AD, eccen, obliq, mvelp ) + +!------------------------------------------------------------------------------- +! +! Print out the information on the Earths input orbital characteristics +! +!---------------------------Code history---------------------------------------- +! +! Original version: Erik Kluzek +! Date: Oct/1997 +! +!------------------------------------------------------------------------------- + + !---------------------------Arguments---------------------------------------- + integer(SHR_KIND_IN),intent(in) :: iyear_AD ! requested Year (AD) + real (SHR_KIND_R8),intent(in) :: eccen ! eccentricity (unitless) + ! (typically 0 to 0.1) + real (SHR_KIND_R8),intent(in) :: obliq ! obliquity (-90 to +90 degrees) + ! typically 22-26 + real (SHR_KIND_R8),intent(in) :: mvelp ! moving vernal equinox at perhel + ! (0 to 360 degrees) + !-------------------------- Formats ----------------------------------------- + character(len=*),parameter :: F00 = "('(shr_orb_print) ',4a)" + character(len=*),parameter :: F01 = "('(shr_orb_print) ',a,i9.4)" + character(len=*),parameter :: F02 = "('(shr_orb_print) ',a,f6.3)" + character(len=*),parameter :: F03 = "('(shr_orb_print) ',a,es14.6)" + !---------------------------------------------------------------------------- + + if (s_loglev > 0) then + if ( iyear_AD .ne. SHR_ORB_UNDEF_INT ) then + if ( iyear_AD > 0 ) then + write(s_logunit,F01) 'Orbital parameters calculated for year: AD ',iyear_AD + else + write(s_logunit,F01) 'Orbital parameters calculated for year: BC ',iyear_AD + end if + else if ( obliq /= SHR_ORB_UNDEF_REAL ) then + write(s_logunit,F03) 'Orbital parameters: ' + write(s_logunit,F03) 'Obliquity (degree): ', obliq + write(s_logunit,F03) 'Eccentricity (unitless): ', eccen + write(s_logunit,F03) 'Long. of moving Perhelion (deg): ', mvelp + else + write(s_logunit,F03) 'Orbit parameters not set!' + end if + endif + +END SUBROUTINE shr_orb_print +!=============================================================================== + +END MODULE shr_orb_mod diff --git a/drivers/access/CICE.F90 b/drivers/access/CICE.F90 new file mode 100644 index 00000000..9581ce58 --- /dev/null +++ b/drivers/access/CICE.F90 @@ -0,0 +1,104 @@ +! SVN:$Id: CICE.F90 700 2013-08-15 19:17:39Z eclare $ +!======================================================================= +! Copyright 2013, LANSLLC. All rights reserved. +! Unless otherwise indicated, this information has been authored by an +! employee or employees of the Los Alamos National Security, LLC (LANS), +! operator of the Los Alamos National Laboratory under Contract No. +! DE-AC52-06NA25396 with the U.S. Department of Energy. The U.S. Government +! has rights to use, reproduce, and distribute this information. The public +! may copy and use this information without charge, provided that this +! Notice and any statement of authorship are reproduced on all copies. +! Neither the Government nor LANS makes any warranty, express or implied, +! or assumes any liability or responsibility for the use of this +! information. +! +! CICE is developed and maintained by Elizabeth C. Hunke (eclare@lanl.gov) +! Group T-3 (Fluid Dynamics and Solid Mechanics), Los Alamos National +! Laboratory, with support from the Earth System Modeling and Regional and +! Global Climate Modeling programs of the Office of Biological and +! Environmental Research within the U.S. Department of Energy's Office of +! Science. Los Alamos National Laboratory is operated by the DOE National +! Nuclear Security Administration under Contract DE-AC52-06NA25396. +! +! Numerous researchers have contributed to this effort, especially members +! of the CESM Polar Climate Working Group and the sea ice modeling team +! at UK Met Office Hadley Centre -- thanks to all! +! +!======================================================================= +#ifndef popcice +! +! Main driver routine for CICE. Initializes and steps through the model. +! This program should be compiled if CICE is run as a separate executable, +! but not if CICE subroutines are called from another program (e.g., CAM). +! +! authors Elizabeth C. Hunke and William H. Lipscomb, LANL +! +! 2006: Converted to free form source (F90) by Elizabeth Hunke +! 2008: E. Hunke moved ESMF code to its own driver +! + program icemodel + + use CICE_InitMod + use CICE_RunMod + use CICE_FinalMod + + implicit none + + !----------------------------------------------------------------- + ! Initialize CICE + !----------------------------------------------------------------- + + call CICE_Initialize + + !----------------------------------------------------------------- + ! Run CICE + !----------------------------------------------------------------- + + call CICE_Run + + !----------------------------------------------------------------- + ! Finalize CICE + !----------------------------------------------------------------- + + call CICE_Finalize + + end program icemodel + +#endif +!======================================================================= +! +! Wrapper for the print_state debugging routine. +! Useful for debugging in the main driver (see ice.F_debug) +! ip, jp, mtask are set in ice_diagnostics.F +! +! author Elizabeth C. Hunke, LANL +! + subroutine debug_ice(iblk, plabeld) + + use ice_kinds_mod + use ice_calendar, only: istep1 + use ice_communicate, only: my_task + use ice_diagnostics, only: check_step, iblkp, ip, jp, mtask, print_state + use ice_domain, only: nblocks + use ice_blocks, only: nx_block, ny_block + + character (char_len), intent(in) :: plabeld + integer (kind=int_kind), intent(in) :: iblk + + ! local + integer (kind=int_kind) :: i, j + + if (istep1 >= check_step .and. & + iblk==iblkp .and. my_task==mtask) then + + do j = 1, ny_block + do i = 1, nx_block + if (i==ip .and. j==jp) call print_state(plabeld,i,j,iblk) + enddo + enddo + + endif + + end subroutine debug_ice + +!======================================================================= diff --git a/drivers/access/CICE_FinalMod.F90 b/drivers/access/CICE_FinalMod.F90 new file mode 100644 index 00000000..2ab6fb36 --- /dev/null +++ b/drivers/access/CICE_FinalMod.F90 @@ -0,0 +1,103 @@ +! SVN:$Id: CICE_FinalMod.F90 744 2013-09-27 22:53:24Z eclare $ +!======================================================================= +! +! This module contains routines for the final exit of the CICE model, +! including final output and clean exit from any message passing +! environments and frameworks. +! +! authors: Philip W. Jones, LANL +! 2006: Converted to free source form (F90) by Elizabeth Hunke +! 2008: E. Hunke moved ESMF code to its own driver + + module CICE_FinalMod + + use ice_kinds_mod + +#ifdef AusCOM + use cpl_interface, only : coupler_termination +#endif + + implicit none + private + public :: CICE_Finalize + save + +!======================================================================= + + contains + +!======================================================================= +! +! This routine shuts down CICE by exiting all relevent environments. + + subroutine CICE_Finalize + + use ice_exit, only: end_run + use ice_fileunits, only: nu_diag, release_all_fileunits + use ice_restart_shared, only: runid + use ice_timers, only: ice_timer_stop, ice_timer_print_all, timer_total + + !------------------------------------------------------------------- + ! stop timers and print timer info + !------------------------------------------------------------------- + + call ice_timer_stop(timer_total) ! stop timing entire run +#ifdef AusCOM + call ice_timer_print_all(stats=.true.) ! print timing information +#else + call ice_timer_print_all(stats=.false.) ! print timing information +#endif + +!echmod if (nu_diag /= 6) close (nu_diag) ! diagnostic output + call release_all_fileunits + + !------------------------------------------------------------------- + ! write 'finished' file if needed + !------------------------------------------------------------------- + + if (runid == 'bering') call writeout_finished_file() + + !------------------------------------------------------------------- + ! quit MPI + !------------------------------------------------------------------- + +#ifdef AusCOM + call coupler_termination !quit MPI and release memory +#else +#ifndef coupled + call end_run ! quit MPI +#endif +#endif + + end subroutine CICE_Finalize + +!======================================================================= +! +! Write a file indicating that this run finished cleanly. This is +! needed only for runs on machine 'bering' (set using runid = 'bering'). +! +! author: Adrian Turner, LANL + + subroutine writeout_finished_file() + + use ice_restart_shared, only: restart_dir + use ice_communicate, only: my_task, master_task + + character(len=char_len_long) :: filename + + if (my_task == master_task) then + + filename = trim(restart_dir)//"finished" + open(11,file=filename) + write(11,*) "finished" + close(11) + + endif + + end subroutine writeout_finished_file + +!======================================================================= + + end module CICE_FinalMod + +!======================================================================= diff --git a/drivers/access/CICE_InitMod.F90 b/drivers/access/CICE_InitMod.F90 new file mode 100644 index 00000000..f481f796 --- /dev/null +++ b/drivers/access/CICE_InitMod.F90 @@ -0,0 +1,467 @@ +! SVN:$Id: CICE_InitMod.F90 746 2013-09-28 22:47:56Z eclare $ +!======================================================================= +! +! This module contains the CICE initialization routine that sets model +! parameters and initializes the grid and CICE state variables. +! +! authors Elizabeth C. Hunke, LANL +! William H. Lipscomb, LANL +! Philip W. Jones, LANL +! +! 2006: Converted to free form source (F90) by Elizabeth Hunke +! 2008: E. Hunke moved ESMF code to its own driver + + module CICE_InitMod + + use ice_kinds_mod + +!ars599: 11042014: remove details of forcing handler, interface, arrays setup, and gather scatter +#ifdef AusCOM + use cpl_parameters + use cpl_forcing_handler + use cpl_interface +!ars599: 27032014: defind my_task + use ice_communicate, only: my_task +#endif + + implicit none + private + public :: CICE_Initialize, cice_init + save + +#ifdef AusCOM + integer :: nrec +#endif + +!======================================================================= + + contains + +!======================================================================= + +! Initialize the basic state, grid and all necessary parameters for +! running the CICE model. Return the initial state in routine +! export state. +! Note: This initialization driver is designed for standalone and +! CCSM-coupled applications. For other +! applications (e.g., standalone CAM), this driver would be +! replaced by a different driver that calls subroutine cice_init, +! where most of the work is done. + + subroutine CICE_Initialize + + !-------------------------------------------------------------------- + ! model initialization + !-------------------------------------------------------------------- + + call cice_init + + end subroutine CICE_Initialize + +!======================================================================= +! +! Initialize CICE model. + + subroutine cice_init + + use ice_aerosol, only: faero_default + use ice_algae, only: get_forcing_bgc + use ice_calendar, only: dt, dt_dyn, time, istep, istep1, write_ic, & + init_calendar, calendar, idate, month +!ars599: 27032014 + use ice_communicate, only: MPI_COMM_ICE + use ice_communicate, only: init_communicate + use ice_coupling, only: top_layer_Tandk_init + use ice_diagnostics, only: init_diags + use ice_domain, only: init_domain_blocks + use ice_dyn_eap, only: init_eap + use ice_dyn_shared, only: kdyn, init_evp + use ice_fileunits, only: init_fileunits + use ice_flux, only: init_coupler_flux, init_history_therm, & + init_history_dyn, init_flux_atm, init_flux_ocn + use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & + get_forcing_atmo, get_forcing_ocn + use ice_grid, only: init_grid1, init_grid2 + use ice_history, only: init_hist, accum_hist + use ice_restart_shared, only: restart, runid, runtype + use ice_init, only: input_data, init_state + use ice_itd, only: init_itd + use ice_kinds_mod + use ice_restoring, only: ice_HaloRestore_init + use ice_shortwave, only: init_shortwave + use ice_state, only: tr_aero + use ice_therm_shared, only: calc_Tsfc, heat_capacity + use ice_therm_vertical, only: init_thermo_vertical + use ice_timers, only: timer_total, init_ice_timers, ice_timer_start + use ice_transport_driver, only: init_transport + use ice_zbgc, only: init_zbgc + use ice_zbgc_shared, only: skl_bgc +#ifdef popcice + use drv_forcing, only: sst_sss +#endif + +#ifdef AusCOM + integer(kind=int_kind) :: idate_save +#endif + + call init_communicate ! initial setup for message passing +#ifdef AusCOM + call prism_init ! called in init_communicate + MPI_COMM_ICE = il_commlocal +! call init_cpl ! initialize message passing + call get_cpl_timecontrol + write(il_out,*)' CICE (cice_init) 1 jobnum = ',jobnum + write(il_out,*)' CICE (cice_init) 1 inidate = ',inidate + write(il_out,*)' CICE (cice_init) 1 init_date = ',init_date + write(il_out,*)' CICE (cice_init) 1 runtime0 = ',runtime0 + write(il_out,*)' CICE (cice_init) 1 runtime = ',runtime + write(il_out,*)' CICE (cice_init) 1 idate = ',my_task, idate + !write(il_out,*)' CICE (cice_init) 1 runtype = ',runtype +#endif + call init_fileunits ! unit numbers + + call input_data ! namelist variables + if (trim(runid) == 'bering') call check_finished_file + call init_zbgc ! vertical biogeochemistry namelist + + call init_domain_blocks ! set up block decomposition + call init_grid1 ! domain distribution +#ifdef AusCOM + call init_cpl ! initialize message passing +#endif + call init_ice_timers ! initialize all timers + call ice_timer_start(timer_total) ! start timing entire run + call init_grid2 ! grid variables + + call init_calendar ! initialize some calendar stuff + call init_hist (dt) ! initialize output history file + + if (kdyn == 2) then + call init_eap (dt_dyn) ! define eap dynamics parameters, variables + else ! for both kdyn = 0 or 1 + call init_evp (dt_dyn) ! define evp dynamics parameters, variables + endif + + call init_coupler_flux ! initialize fluxes exchanged with coupler +#ifdef popcice + call sst_sss ! POP data for CICE initialization +#endif + call init_thermo_vertical ! initialize vertical thermodynamics + call init_itd(calc_Tsfc, heat_capacity)! initialize ice thickness distribution + call calendar(time) ! determine the initial date + +!ars599: 11042014: remove most of the lines based on cice4.1_fm +! however try to keep idate_save +#ifdef AusCOM + idate_save = idate !save for late re-set in case 'restart' is used for jobnum=1 + !and mess up the calendar idate for this exp...! + if (jobnum==1) then + nrec = month - 1 !month is from calendar + if (nrec == 0) nrec = 12 + call get_time0_sstsss(trim(inputdir)//'/monthly_sstsss.nc', nrec) + endif + !the read in sst/sss determines the initial ice state (in init_state) +#else + call init_forcing_ocn(dt) ! initialize sss and sst from data +#endif + call init_state ! initialize the ice state + call init_transport ! initialize horizontal transport +!ars599: 08052014 remove markout HaloRestore_init + call ice_HaloRestore_init ! restored boundary conditions +!ars599: 11042014: note: +! the "if (runtype == 'continue') then ! start from core restart file" +! from dhb599 might add in the new code "init_restart together with +! tracer part +! however transport and Halorestore might keep in OM but not in CM?? + + call init_restart ! initialize restart variables + +#ifdef AusCOM + write(il_out,*) 'CICE (cice_init) 2 time = ', my_task, time + write(il_out,*) 'CICE (cice_init) 2 runtime0 = ', my_task, runtime0 + !write(il_out,*) 'CICE (cice_init) 2 idate = ', my_task, idate + + if (jobnum == 1 ) then + time = 0.0 !NOTE, the first job must be set back to 0 and + idate = idate_save !idate back to the 'initial' value, in any case + else !BX: 20160720 + time = runtime0 !............ + endif +#endif + + call init_diags ! initialize diagnostic output points + call init_history_therm ! initialize thermo history variables + call init_history_dyn ! initialize dynamic history variables + + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing + ! in prep_radiation. + if (trim(runtype) == 'continue' .or. restart) & + call init_shortwave ! initialize radiative transfer + + istep = istep + 1 ! update time step counters + istep1 = istep1 + 1 + time = time + dt ! determine the time and date +#ifndef AusCOM + call calendar(time) ! at the end of the first timestep +#else +!ars599: 26032014 original code +! call calendar(time) ! at the end of the first timestep + call calendar(time-runtime0) + write(il_out,*) 'CICE (cice_init) 3 time = ', my_task, time + write(il_out,*) 'CICE (cice_init) 3 runtime0 = ', my_task, runtime0 + write(il_out,*) 'CICE (cice_init) 3 idate = ', my_task, idate +#endif + + !-------------------------------------------------------------------- + ! coupler communication or forcing data initialization + !-------------------------------------------------------------------- + + call init_forcing_atmo ! initialize atmospheric forcing (standalone) + +#ifndef coupled + call get_forcing_atmo ! atmospheric forcing from data + call get_forcing_ocn(dt) ! ocean forcing from data +! if (tr_aero) call faero_data ! aerosols + if (tr_aero) call faero_default ! aerosols + if (skl_bgc) call get_forcing_bgc +#endif + + if (runtype == 'initial' .and. .not. restart) & + call init_shortwave ! initialize radiative transfer using current swdn + +!ars599: 09042014: add in +! based on cice4.1_fm +!20091020 +!#ifndef AusCOM + call init_flux_atm ! initialize atmosphere fluxes sent to coupler + call init_flux_ocn ! initialize ocean fluxes sent to coupler +!#endif + + if (.not. calc_Tsfc .and. heat_capacity) & + call top_layer_Tandk_init ! initialise top layer temperature and + ! effective conductivity + +!ars599: 11042014: ice_write_hist is no longer there now change to accum_hist +! so wrapup this line n use the new code +!dhb599 20111128: this call is moved here from 'downstair', because it *re-initilaise* +! the o2i fileds read in from o2i.nc (e.g., sst=>-1.84 sss=>34.) !!! +! call ice_write_hist(dt) ! write initial conditions if write_ic = T + if (write_ic) call accum_hist(dt) ! write initial conditions + +#ifdef AusCOM + write(il_out,*)' calling init_mocn_fields_4_i2a at time_sec = ',0 + !call initialize_mice_fields_4_i2a + call initialize_mocn_fields_4_i2a + + ! for continue runs, need restart o2i forcing fields and time-averaged ice + ! variables ('mice')saved at the end of last run from ice models; + ! for initial run, pre-processed o2i (and maybe mice) fields are required. +!hxy599 call get_restart_o2i(trim(restartdir)//'/o2i.nc') + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !if no lag for ice to atm coupling, then cice has to read restart file i2a.nc and + !put the data to atm. the call is not needed if there is lag for ice2atm coupling + !must call after get_restart_o2i(), by which the ocn_sst ect are read in and re-used by put_restart_i2a() +! call put_restart_i2a('i2a.nc', 0) +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +! if ( file_exist('CICE_restart/mice.nc') ) then + if ( file_exist(trim(restartdir)//'/mice.nc') ) then + !for continue runs, mice data MUST be available. +! call get_restart_mice('CICE_restart/mice.nc') + call get_restart_mice(trim(restartdir)//'/mice.nc') + else +write(6,*)'*** CICE WARNING: No initial mice.nc data available here! **' +write(6,*)'*** CICE WARNING: ALL mice variables will be set to ZERO! **' +write(6,*)'*** CICE WARNING: This is allowed for the init run ONLY ! **' + endif + if (use_core_runoff) then +! call get_core_runoff('CICE_input/core_runoff_regrid.nc',& + call get_core_runoff(trim(inputdir)//'/core_runoff_regrid.nc',& + 'runoff',1) + endif + +!hxy599 write(il_out,*)' calling ave_ocn_fields_4_i2a at time_sec = ',0 !time_sec +!hxy599 call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling + + !get a2i fields and then set up initial SBC for ice + !call from_atm(0) + !call get_sbc_ice + !now "most of" the IC of this run are 'proper' for "call ice_write_hist" +#endif + +!ars599: 11042014: ice_write_hist is no longer there now change to accum_hist +! so wrapup this line n markout +!dhb599: 20111128: the following call is moved 'upstair' +! if (write_ic) call accum_hist(dt) ! write initial conditions + + end subroutine cice_init + +!======================================================================= + + subroutine init_restart + + use ice_aerosol, only: init_aerosol + use ice_age, only: init_age, restart_age, read_restart_age + use ice_blocks, only: nx_block, ny_block + use ice_brine, only: init_hbrine + use ice_calendar, only: time, calendar + use ice_domain, only: nblocks + use ice_domain_size, only: ncat + use ice_dyn_eap, only: read_restart_eap + use ice_dyn_shared, only: kdyn + use ice_firstyear, only: init_fy, restart_FY, read_restart_FY + use ice_flux, only: sss + use ice_init, only: ice_ic + use ice_lvl, only: init_lvl, restart_lvl, read_restart_lvl + use ice_meltpond_cesm, only: init_meltponds_cesm, & + restart_pond_cesm, read_restart_pond_cesm + use ice_meltpond_lvl, only: init_meltponds_lvl, & + restart_pond_lvl, read_restart_pond_lvl, dhsn + use ice_meltpond_topo, only: init_meltponds_topo, & + restart_pond_topo, read_restart_pond_topo + use ice_restart_shared, only: runtype, restart + use ice_restart_driver, only: restartfile, restartfile_v4 + use ice_state, only: tr_iage, tr_FY, tr_lvl, tr_pond_cesm, & + tr_pond_lvl, tr_pond_topo, tr_aero, trcrn, & + nt_iage, nt_FY, nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, tr_brine + use ice_zbgc, only: init_bgc + use ice_zbgc_shared, only: skl_bgc + + integer(kind=int_kind) :: iblk + + if (trim(runtype) == 'continue') then + ! start from core restart file + call restartfile() ! given by pointer in ice_in +!ars599: 11042014: markout call calendar +! according to dhb599 initmod at cice4.1_fm + call calendar(time) ! update time parameters + if (kdyn == 2) call read_restart_eap ! EAP + else if (restart) then ! ice_ic = core restart file + call restartfile (ice_ic) ! or 'default' or 'none' + !!! uncomment to create netcdf + ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file + !!! uncomment if EAP restart data exists + ! if (kdyn == 2) call read_restart_eap + endif + + ! tracers + ! ice age tracer + if (tr_iage) then + if (trim(runtype) == 'continue') & + restart_age = .true. + if (restart_age) then + call read_restart_age + else + do iblk = 1, nblocks + call init_age(nx_block, ny_block, ncat, trcrn(:,:,nt_iage,:,iblk)) + enddo ! iblk + endif + endif + ! first-year area tracer + if (tr_FY) then + if (trim(runtype) == 'continue') restart_FY = .true. + if (restart_FY) then + call read_restart_FY + else + do iblk = 1, nblocks + call init_FY(nx_block, ny_block, ncat, trcrn(:,:,nt_FY,:,iblk)) + enddo ! iblk + endif + endif + ! level ice tracer + if (tr_lvl) then + if (trim(runtype) == 'continue') restart_lvl = .true. + if (restart_lvl) then + call read_restart_lvl + else + do iblk = 1, nblocks +!ars599: 11042014: markout call init_lvl +! according to dhb599 initmod at cice4.1_fm +! call init_lvl(nx_block, ny_block, ncat, & +! trcrn(:,:,nt_alvl,:,iblk), trcrn(:,:,nt_vlvl,:,iblk)) + enddo ! iblk + endif + endif + ! CESM melt ponds + if (tr_pond_cesm) then + if (trim(runtype) == 'continue') & + restart_pond_cesm = .true. + if (restart_pond_cesm) then + call read_restart_pond_cesm + else + do iblk = 1, nblocks + call init_meltponds_cesm(nx_block, ny_block, ncat, & + trcrn(:,:,nt_apnd,:,iblk), trcrn(:,:,nt_hpnd,:,iblk)) + enddo ! iblk + endif + endif + ! level-ice melt ponds + if (tr_pond_lvl) then + if (trim(runtype) == 'continue') & + restart_pond_lvl = .true. + if (restart_pond_lvl) then + call read_restart_pond_lvl + else + do iblk = 1, nblocks + call init_meltponds_lvl(nx_block, ny_block, ncat, & + trcrn(:,:,nt_apnd,:,iblk), trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk), dhsn(:,:,:,iblk)) + enddo ! iblk + endif + endif + ! topographic melt ponds + if (tr_pond_topo) then + if (trim(runtype) == 'continue') & + restart_pond_topo = .true. + if (restart_pond_topo) then + call read_restart_pond_topo + else + do iblk = 1, nblocks + call init_meltponds_topo(nx_block, ny_block, ncat, & + trcrn(:,:,nt_apnd,:,iblk), trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk)) + enddo ! iblk + endif ! .not restart_pond + endif + if (tr_aero) call init_aerosol ! ice aerosol + if (tr_brine) call init_hbrine ! brine height tracer + if (skl_bgc) call init_bgc ! biogeochemistry + + end subroutine init_restart + +!======================================================================= +! +! Check whether a file indicating that the previous run finished cleanly +! If so, then do not continue the current restart. This is needed only +! for runs on machine 'bering' (set using runid = 'bering'). +! +! author: Adrian Turner, LANL + + subroutine check_finished_file() + + use ice_communicate, only: my_task, master_task + use ice_exit, only: abort_ice + use ice_restart_shared, only: restart_dir + + character(len=char_len_long) :: filename + logical :: lexist = .false. + + if (my_task == master_task) then + + filename = trim(restart_dir)//"finished" + inquire(file=filename, exist=lexist) + if (lexist) then + call abort_ice("Found already finished file - quitting") + end if + + endif + + end subroutine check_finished_file + +!======================================================================= + + end module CICE_InitMod + +!======================================================================= diff --git a/drivers/access/CICE_RunMod.F90 b/drivers/access/CICE_RunMod.F90 new file mode 100644 index 00000000..c286bd81 --- /dev/null +++ b/drivers/access/CICE_RunMod.F90 @@ -0,0 +1,776 @@ +! SVN:$Id: CICE_RunMod.F90 746 2013-09-28 22:47:56Z eclare $ +!======================================================================= +! +! Main driver for time stepping of CICE. +! +! authors Elizabeth C. Hunke, LANL +! Philip W. Jones, LANL +! William H. Lipscomb, LANL +! +! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep +! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Converted to free source form (F90) +! 2007 BPB: Modified Delta-Eddington shortwave interface +! 2008 ECH: moved ESMF code to its own driver + + module CICE_RunMod + + use ice_kinds_mod + +#ifdef AusCOM + !For stuff in this AusCOM's own driver the "#ifdef AusCOM" is NOT needed! + !but for consistency with the code in other places, we keep it anyway ... + !...to "indentify" the modification to the original code, easier + !...to locate future code update Aug. 2008 + + use cpl_parameters + use cpl_arrays_setup + use cpl_interface + use cpl_forcing_handler +#endif + + implicit none + private + public :: CICE_Run, ice_step + save + +!======================================================================= + + contains + +!======================================================================= +! +! This is the main driver routine for advancing CICE forward in time. +! +! author Elizabeth C. Hunke, LANL +! Philip W. Jones, LANL +! William H. Lipscomb, LANL + + subroutine CICE_Run + + use ice_aerosol, only: faero_default + use ice_algae, only: get_forcing_bgc + use ice_calendar, only: istep, istep1, time, dt, stop_now, calendar + use ice_forcing, only: get_forcing_atmo, get_forcing_ocn +#ifdef AusCOM +!ars599: 27032014 add in + use ice_calendar, only: month, mday, istep, istep1, & + time, dt, stop_now, calendar + use ice_forcing, only: get_forcing_atmo, get_forcing_ocn +#endif + use ice_flux, only: init_flux_atm, init_flux_ocn + use ice_state, only: tr_aero + use ice_timers, only: ice_timer_start, ice_timer_stop, & + timer_couple, timer_step + use ice_zbgc_shared, only: skl_bgc + +#ifdef AusCOM +!ars599: 27032014 add in + use ice_timers, only: ice_timer_start, & + ice_timer_stop, timer_couple, timer_step + use ice_grid, only: t2ugrid_vector, u2tgrid_vector + + + integer (kind=int_kind) :: time_sec, itap, icpl_ai, icpl_io, tmp_time + integer (kind=int_kind) :: rtimestamp_ai, stimestamp_ai + integer (kind=int_kind) :: rtimestamp_io, stimestamp_io + !receive and send timestamps (seconds) + integer (kind=int_kind) :: imon +!ars: 08052014 according to dhb599 fm changed, and mark out the one from OM +! logical :: first_step = .true. !1st time step of experiment or not + logical :: need_i2o = .true. +#endif + + !-------------------------------------------------------------------- + ! initialize error code and step timer + !-------------------------------------------------------------------- + + call ice_timer_start(timer_step) ! start timing entire run + + !-------------------------------------------------------------------- + ! timestep loop + !-------------------------------------------------------------------- + +#ifdef AusCOM + write(il_out,*)'A <==> I coupling num_cpl_ai= ',num_cpl_ai + write(il_out,*)' to/from ocean num_cpl_io= ',num_cpl_io + write(il_out,*)' ice steps num_ice_io = ', num_ice_io + write(il_out,*)'runtime, runtime0=', runtime, runtime0 + + time_sec = 0 + ! get from atm once at time 0 +! rtimestamp_ai = time_sec +! call ice_timer_start(timer_from_atm) ! atm/ocn coupling +! call from_atm(rtimestamp_ai) +! call ice_timer_stop(timer_from_atm) ! atm/ocn coupling +! +! !set time averaged ice and ocn variables back to 0 +! call initialize_mice_fields_4_i2a + call initialize_mocn_fields_4_i2a + + DO icpl_ai = 1, num_cpl_ai !begin A <==> I coupling iterations + + !!!B: from_atm should be called here, like + !!! if (icpl_ai /= numcpl_ai) then !avoid the last step(?) + !!! rtimestamp_ai = time_sec !(?) + !!! call from_atm(rtimestamp_ai) + !!! endif + !!! call atm_icefluxes_back2GBM + + Do icpl_io = 1, num_cpl_io !begin I <==> O coupling iterations + + rtimestamp_io = time_sec + if (rtimestamp_io < runtime) then !get coupling from ocean except the last time step + write(il_out,*)' calling from_ocn at time_sec = ',time_sec + !call ice_timer_start(timer_from_ocn) ! atm/ocn coupling + call from_ocn(rtimestamp_io) !get o2i fields for next IO cpl int + !call ice_timer_stop(timer_from_ocn) ! atm/ocn coupling + + write(il_out,*)' calling ave_ocn_fields_4_i2a at time_sec = ',time_sec + call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling + end if + + + ! atm ice coupling time except last step: + if(icpl_ai <= num_cpl_ai .and. mod(time_sec, dt_cpl_ai ) == 0) then + rtimestamp_ai = time_sec + !call ice_timer_start(timer_from_atm) ! atm/ice coupling + call from_atm(rtimestamp_ai) + !call ice_timer_stop(timer_from_atm) ! atm/ice coupling + + !"TTI" approach ice fluxes converted to GBM units + !(topmelt, bototmmelt and surface sublimation) + call atm_icefluxes_back2GBM + +!! !set time averaged ice and ocn variables back to 0 + write(il_out,*)' calling init_mice_fields_4_i2a at time_sec = ',time_sec + call initialize_mice_fields_4_i2a +! call initialize_mocn_fields_4_i2a + end if + + + stimestamp_io = time_sec + + !at the beginning of the run, cice (CICE_init) reads in the required ice fields + !(time averaged over the last coupling interval of previous run), which include + !strocnx/yT, aice, fresh_gbm, fsalt_gbm, fhocn_gbm, fswthru_gbm, sicemass etc. + !(named as mstrocnx/yT, maice, mfresh, mfsalt, mfhocn, mfswthru, msicemass ...) + + !together with the a2i fields (sent from um at the end of previous run) received + !above, the time0 i2o fields can be obtained here + + !if (runtime0 == 0 .and. need_i2o) then + ! write(6,*)'*** CICE: initial run calls get_restart_i2o *** ' + ! write(6,*)'*** CICE: time_sec = ', time_sec + ! write(il_out,*)' calling get_restart_i2o at time_sec = ',time_sec + ! call get_restart_i2o('i2o.nc') + ! need_i2o = .false. + !else + ! write(6,*)'*** CICE: calling get_i2o_fields... ' + ! write(6,*)'*** CICE: time_sec = ', time_sec + ! write(6,*)'*** CICE: calling get_i2o_fields... ' + ! write(il_out,*)' calling get_i2o_fields at time_sec = ',time_sec + call get_i2o_fields + !endif + + !shift stresses from T onto U grid before sending into ocn + write(il_out,*)' calling t2ugrid_vector - u/v at time_sec = ', time_sec + call t2ugrid_vector(io_strsu) + call t2ugrid_vector(io_strsv) + + write(il_out,*)' calling into_ocn at time_sec = ', time_sec + !call ice_timer_start(timer_into_ocn) ! atm/ocn coupling + call into_ocn(stimestamp_io) + !call ice_timer_stop(timer_into_ocn) ! atm/ocn coupling + + !at the beginning of the run, cice (CICE_init) reads in the required o2i fields + !(saved from the last timestep of ocean). + + !together with the a2i fields (sent from um at the end of previous run) received + !above, the time0 boundary condition for ice 'internal time loop' is set here + + !-------------------------------------------------------------------------------- + !* This needs be investigated: calling set_sbc_ice outside the itap loop causes + ! cice to crash ('departure error') due probably to "aice" "mismatch?" for each + ! time step in the set_sbc_ice calculation.... (?) + ! We therefore still call "get_sbc_ice" inside the ice time loop (below) + ! + !write(il_out,*)' calling set_sbc_ice at time_sec = ',time_sec + !call set_sbc_ice + !-------------------------------------------------------------------------------- + + !set time averaged ice variables back to 0 + write(il_out,*)' calling init_mice_fields_4_i2o at time_sec = ',time_sec + call initialize_mice_fields_4_i2o + + do itap = 1, num_ice_io ! cice time loop within each i<=>o cpl interval + + !------------------------------------------------------------------------------ + !* see comments above + call get_sbc_ice + !set boundary condition (forcing) for ice time step + !------------------------------------------------------------------------------ + + call ice_step + write(il_out,*)' calling ave_ice_fields_4_i2a at time_sec = ',time_sec + +!======================================= + tmp_time = time_sec + dt + if ( mod(tmp_time, dt_cpl_ai) == 0 ) then !put to atm i step before coupling + write(il_out,*)' calling get_i2a_fields at time_sec = ',time_sec + !call ice_timer_start(timer_into_atm) ! atm/ocn coupling + call get_i2a_fields ! i2a fields ready to be sent for next IA cpl int in atm. + +! if(tmp_time < runtime ) then + ! * because of using lag=+dt_ice, we must take one step off the time_sec + ! * to make the sending happen at right time: + stimestamp_ai = time_sec ! - dt + write(il_out,*)' calling into_atm at time_sec = ',time_sec + call into_atm(stimestamp_ai) + +! !set time averaged ice and ocn variables back to 0 + write(il_out,*)' calling init_mocn_fields_4_i2a at time_sec = ',time_sec + !call initialize_mice_fields_4_i2a + call initialize_mocn_fields_4_i2a +! end if + !call ice_timer_stop(timer_into_atm) ! atm/ocn coupling + end if +!====================================== + + ! note ice_step makes call to time_average_fields_4_i2o + ! and time_average_fields_4_i2a + ! to get time-averaged ice variables required for setting up i2o and i2a cpl fields + + istep = istep + 1 ! update time step counters + istep1 = istep1 + 1 + time = time + dt ! determine the time and date + + time_sec = time_sec + dt + + call calendar(time-runtime0) + + !initialize fluxes sent to coupler (WHY should still need do this? CH: NOT needed!) + call init_flux_atm + call init_flux_ocn + + !CH: should be doing things here + !get_i2o_fields + !get_i2a_fields + + end do !itap + + End Do !icpl_io + + END DO !icpl_ai + + ! final update of the stimestamp_io, ie., put back the last dt_cice: + stimestamp_io = stimestamp_io + dt + + ! *** need save o2i fields here instead of in mom4 *** + !call save_restart_o2i('o2i.nc', stimestamp_io) !it is done in mom4 + + ! *** need save the last IO cpl int (time-averaged) ice variables used to get i2o fields *** + ! *** at the beginning of next run *** + call save_restart_mice('mice.nc',stimestamp_io) + +#else + + timeLoop: do + + call ice_step + + istep = istep + 1 ! update time step counters + istep1 = istep1 + 1 + time = time + dt ! determine the time and date + + call calendar(time) ! at the end of the timestep + + if (stop_now >= 1) exit timeLoop + +#ifndef coupled + call ice_timer_start(timer_couple) ! atm/ocn coupling + call get_forcing_atmo ! atmospheric forcing from data + call get_forcing_ocn(dt) ! ocean forcing from data + ! if (tr_aero) call faero_data ! aerosols + if (tr_aero) call faero_default ! aerosols + if (skl_bgc) call get_forcing_bgc ! biogeochemistry + call ice_timer_stop(timer_couple) ! atm/ocn coupling +#endif + + call init_flux_atm ! initialize atmosphere fluxes sent to coupler + call init_flux_ocn ! initialize ocean fluxes sent to coupler + + enddo timeLoop + +#endif + + !-------------------------------------------------------------------- + ! end of timestep loop + !-------------------------------------------------------------------- + + call ice_timer_stop(timer_step) ! end timestepping loop timer + + end subroutine CICE_Run + +!======================================================================= +! +! Calls drivers for physics components, some initialization, and output +! +! author Elizabeth C. Hunke, LANL +! William H. Lipscomb, LANL + + subroutine ice_step + + use ice_age, only: write_restart_age + use ice_aerosol, only: write_restart_aero + use ice_boundary, only: ice_HaloUpdate + use ice_brine, only: hbrine_diags, write_restart_hbrine + use ice_calendar, only: dt, dt_dyn, ndtd, diagfreq, write_restart, istep + use ice_constants, only: field_loc_center, field_type_scalar + use ice_diagnostics, only: init_mass_diags, runtime_diags + use ice_domain, only: halo_info, nblocks + use ice_domain_size, only: nslyr + use ice_dyn_eap, only: write_restart_eap + use ice_dyn_shared, only: kdyn + use ice_firstyear, only: write_restart_FY + use ice_flux, only: scale_factor, init_history_therm + use ice_history, only: accum_hist + use ice_lvl, only: write_restart_lvl + use ice_restart, only: final_restart + use ice_restart_driver, only: dumpfile + use ice_meltpond_cesm, only: write_restart_pond_cesm + use ice_meltpond_lvl, only: write_restart_pond_lvl + use ice_meltpond_topo, only: write_restart_pond_topo + use ice_restoring, only: restore_ice, ice_HaloRestore + use ice_state, only: nt_qsno, trcrn, tr_iage, tr_FY, tr_lvl, & + tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero + use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & + post_thermo, step_dynamics, step_radiation + use ice_therm_shared, only: calc_Tsfc + use ice_timers, only: ice_timer_start, ice_timer_stop, & + timer_diags, timer_column, timer_thermo, timer_bound, & + timer_hist, timer_readwrite + use ice_algae, only: bgc_diags, write_restart_bgc + use ice_zbgc, only: init_history_bgc, biogeochemistry + use ice_zbgc_shared, only: skl_bgc + + integer (kind=int_kind) :: & + iblk , & ! block index + k ! dynamics supercycling index + + !----------------------------------------------------------------- + ! restoring on grid boundaries + !----------------------------------------------------------------- + + if (restore_ice) call ice_HaloRestore + + !----------------------------------------------------------------- + ! initialize diagnostics + !----------------------------------------------------------------- + + call ice_timer_start(timer_diags) ! diagnostics/history + call init_mass_diags ! diagnostics per timestep + call init_history_therm + call init_history_bgc + call ice_timer_stop(timer_diags) ! diagnostics/history + + call ice_timer_start(timer_column) ! column physics + call ice_timer_start(timer_thermo) ! thermodynamics + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! Scale radiation fields + !----------------------------------------------------------------- + + if (calc_Tsfc) call prep_radiation (dt, iblk) + + !----------------------------------------------------------------- + ! thermodynamics + !----------------------------------------------------------------- + + call step_therm1 (dt, iblk) ! vertical thermodynamics + call biogeochemistry (dt, iblk) ! biogeochemistry + call step_therm2 (dt, iblk) ! ice thickness distribution thermo + + enddo ! iblk + !$OMP END PARALLEL DO + + call post_thermo (dt) ! finalize thermo update + + call ice_timer_stop(timer_thermo) ! thermodynamics + call ice_timer_stop(timer_column) ! column physics + + !----------------------------------------------------------------- + ! dynamics, transport, ridging + !----------------------------------------------------------------- + + do k = 1, ndtd + call step_dynamics (dt_dyn, ndtd) + enddo + + !----------------------------------------------------------------- + ! albedo, shortwave radiation + !----------------------------------------------------------------- + + call ice_timer_start(timer_column) ! column physics + call ice_timer_start(timer_thermo) ! thermodynamics + +!ars599: 04042014: remove iblk do loop + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call step_radiation (dt, iblk) + + !----------------------------------------------------------------- + ! get ready for coupling and the next time step + !----------------------------------------------------------------- + + call coupling_prep (iblk) + + enddo ! iblk + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (scale_factor, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + call ice_timer_stop(timer_thermo) ! thermodynamics + call ice_timer_stop(timer_column) ! column physics + +!ars599: 04092014: add in +! not sure should add inside the loop or not? +!ars599: 09052014: move from after line 458 "enddo ! iblk" to here +#ifdef AusCOM + !need some time-mean ice fields + !(so as to get i2o and i2a fields for next coupling interval) + call time_average_fields_4_i2o + call time_average_fields_4_i2a +#endif + + !----------------------------------------------------------------- + ! write data + !----------------------------------------------------------------- + + call ice_timer_start(timer_diags) ! diagnostics + if (mod(istep,diagfreq) == 0) then + call runtime_diags(dt) ! log file + if (skl_bgc) call bgc_diags (dt) + if (tr_brine) call hbrine_diags (dt) + endif + call ice_timer_stop(timer_diags) ! diagnostics + + call ice_timer_start(timer_hist) ! history + call accum_hist (dt) ! history file + call ice_timer_stop(timer_hist) ! history + + call ice_timer_start(timer_readwrite) ! reading/writing + if (write_restart == 1) then + call dumpfile ! core variables for restarting + if (tr_iage) call write_restart_age + if (tr_FY) call write_restart_FY + if (tr_lvl) call write_restart_lvl + if (tr_pond_cesm) call write_restart_pond_cesm + if (tr_pond_lvl) call write_restart_pond_lvl + if (tr_pond_topo) call write_restart_pond_topo + if (tr_aero) call write_restart_aero + if (skl_bgc) call write_restart_bgc + if (tr_brine) call write_restart_hbrine + if (kdyn == 2) call write_restart_eap + call final_restart + endif + + call ice_timer_stop(timer_readwrite) ! reading/writing + + end subroutine ice_step + +!======================================================================= +! +! Prepare for coupling +! +! authors: Elizabeth C. Hunke, LANL + + subroutine coupling_prep (iblk) + + use ice_blocks, only: block, nx_block, ny_block + use ice_calendar, only: dt, nstreams + use ice_constants, only: c0, c1, puny, rhofresh + use ice_coupling, only: top_layer_Tandk_run, sfcflux_to_ocn + use ice_domain_size, only: ncat + use ice_flux, only: alvdf, alidf, alvdr, alidr, albice, albsno, & + albpnd, albcnt, apeff_ai, coszen, fpond, fresh, & + alvdf_ai, alidf_ai, alvdr_ai, alidr_ai, fhocn_ai, & + fresh_ai, fsalt_ai, fsalt, & + fswthru_ai, fhocn, fswthru, scale_factor, & + swvdr, swidr, swvdf, swidf, Tf, Tair, Qa, strairxT, strairyt, & + fsens, flat, fswabs, flwout, evap, Tref, Qref, faero_ocn, & + fsurfn_f, flatn_f, scale_fluxes, frzmlt_init, frzmlt + use ice_grid, only: tmask + use ice_ocean, only: oceanmixed_ice, ocean_mixed_layer + use ice_shortwave, only: alvdfn, alidfn, alvdrn, alidrn, & + albicen, albsnon, albpndn, apeffn + use ice_state, only: aicen, aice, aice_init, nbtrcr + use ice_therm_shared, only: calc_Tsfc, heat_capacity + use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop + use ice_zbgc_shared, only: flux_bio, flux_bio_ai + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + integer (kind=int_kind) :: & + n , & ! thickness category index + i,j , & ! horizontal indices + k ! tracer index + + real (kind=dbl_kind) :: cszn ! counter for history averaging + + !----------------------------------------------------------------- + ! Save current value of frzmlt for diagnostics. + ! Update mixed layer with heat and radiation from ice. + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + frzmlt_init (i,j,iblk) = frzmlt(i,j,iblk) + enddo + enddo + + call ice_timer_start(timer_couple) ! atm/ocn coupling + + if (oceanmixed_ice) & + call ocean_mixed_layer (dt,iblk) ! ocean surface fluxes and sst + +!ars599: removed in ACCESS?? +!#ifdef AusCOM +! if (chk_frzmlt_sst) call check_frzmlt_sst('frzmlt_sst1.nc') +!#endif + + !----------------------------------------------------------------- + ! Aggregate albedos + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + alvdf(i,j,iblk) = c0 + alidf(i,j,iblk) = c0 + alvdr(i,j,iblk) = c0 + alidr(i,j,iblk) = c0 + + albice(i,j,iblk) = c0 + albsno(i,j,iblk) = c0 + albpnd(i,j,iblk) = c0 + apeff_ai(i,j,iblk) = c0 + + ! for history averaging + cszn = c0 + if (coszen(i,j,iblk) > puny) cszn = c1 + do n = 1, nstreams + albcnt(i,j,iblk,n) = albcnt(i,j,iblk,n) + cszn + enddo + enddo + enddo + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + alvdf(i,j,iblk) = alvdf(i,j,iblk) & + + alvdfn(i,j,n,iblk)*aicen(i,j,n,iblk) + alidf(i,j,iblk) = alidf(i,j,iblk) & + + alidfn(i,j,n,iblk)*aicen(i,j,n,iblk) + alvdr(i,j,iblk) = alvdr(i,j,iblk) & + + alvdrn(i,j,n,iblk)*aicen(i,j,n,iblk) + alidr(i,j,iblk) = alidr(i,j,iblk) & + + alidrn(i,j,n,iblk)*aicen(i,j,n,iblk) + + if (coszen(i,j,iblk) > puny) then ! sun above horizon + albice(i,j,iblk) = albice(i,j,iblk) & + + albicen(i,j,n,iblk)*aicen(i,j,n,iblk) + albsno(i,j,iblk) = albsno(i,j,iblk) & + + albsnon(i,j,n,iblk)*aicen(i,j,n,iblk) + albpnd(i,j,iblk) = albpnd(i,j,iblk) & + + albpndn(i,j,n,iblk)*aicen(i,j,n,iblk) + endif + + apeff_ai(i,j,iblk) = apeff_ai(i,j,iblk) & ! for history + + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) + enddo + enddo + enddo + + do j = 1, ny_block + do i = 1, nx_block + + !----------------------------------------------------------------- + ! reduce fresh by fpond for coupling + !----------------------------------------------------------------- + + fpond(i,j,iblk) = fpond(i,j,iblk) * rhofresh/dt + fresh(i,j,iblk) = fresh(i,j,iblk) - fpond(i,j,iblk) + + !---------------------------------------------------------------- + ! Store grid box mean albedos and fluxes before scaling by aice + !---------------------------------------------------------------- + + alvdf_ai (i,j,iblk) = alvdf (i,j,iblk) + alidf_ai (i,j,iblk) = alidf (i,j,iblk) + alvdr_ai (i,j,iblk) = alvdr (i,j,iblk) + alidr_ai (i,j,iblk) = alidr (i,j,iblk) + fresh_ai (i,j,iblk) = fresh (i,j,iblk) + fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) + fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) + fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) + + if (nbtrcr > 0) then + do k = 1, nbtrcr + flux_bio_ai (i,j,k,iblk) = flux_bio (i,j,k,iblk) + enddo + endif + + !----------------------------------------------------------------- + ! Save net shortwave for scaling factor in scale_factor + !----------------------------------------------------------------- + scale_factor(i,j,iblk) = & + swvdr(i,j,iblk)*(c1 - alvdr_ai(i,j,iblk)) & + + swvdf(i,j,iblk)*(c1 - alvdf_ai(i,j,iblk)) & + + swidr(i,j,iblk)*(c1 - alidr_ai(i,j,iblk)) & + + swidf(i,j,iblk)*(c1 - alidf_ai(i,j,iblk)) + + enddo + enddo + + !----------------------------------------------------------------- + ! Divide fluxes by ice area + ! - the CCSM coupler assumes fluxes are per unit ice area + ! - also needed for global budget in diagnostics + !----------------------------------------------------------------- + +#ifdef AusCOM + !20091020 -- MUST re-visit this part immediately! + if (do_scale_fluxes) then !(ch...) this should be done. +#endif + call scale_fluxes (nx_block, ny_block, & + tmask (:,:,iblk), nbtrcr, & + aice (:,:,iblk), Tf (:,:,iblk), & + Tair (:,:,iblk), Qa (:,:,iblk), & + strairxT (:,:,iblk), strairyT(:,:,iblk), & + fsens (:,:,iblk), flat (:,:,iblk), & + fswabs (:,:,iblk), flwout (:,:,iblk), & + evap (:,:,iblk), & + Tref (:,:,iblk), Qref (:,:,iblk), & + fresh (:,:,iblk), fsalt (:,:,iblk), & + fhocn (:,:,iblk), fswthru (:,:,iblk), & + faero_ocn(:,:,:,iblk), & + alvdr (:,:,iblk), alidr (:,:,iblk), & + alvdf (:,:,iblk), alidf (:,:,iblk), & + flux_bio(:,:,1:nbtrcr,iblk)) + +#ifdef AusCOM + endif +#endif +!b.--------------------------------------------------------------------- + if (.not. calc_Tsfc) then + + !--------------------------------------------------------------- + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. + !--------------------------------------------------------------- + + call sfcflux_to_ocn & + (nx_block, ny_block, & + tmask (:,:,iblk), aice_init(:,:,iblk), & + fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & + fresh (:,:,iblk), fhocn (:,:,iblk)) + endif +!echmod + + call ice_timer_stop(timer_couple) ! atm/ocn coupling + +! AEW: Calculate new top layer temp and effective cond after each +! timestep + if (.not. calc_Tsfc .and. heat_capacity) then + !---------------------------------------- + ! Get top layer temperature and effective conductivity + ! for passing to atmos + + call top_layer_Tandk_run (iblk) + endif + + end subroutine coupling_prep + +!======================================================================= +! +! If surface heat fluxes are provided to CICE instead of CICE calculating +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! be provided at points which do not have ice. (This is could be due to +! the heat fluxes being calculated on a lower resolution grid or the +! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! conserve energy and water by passing these fluxes to the ocean. +! +! author: A. McLaren, Met Office + + subroutine sfcflux_to_ocn(nx_block, ny_block, & + tmask, aice, & + fsurfn_f, flatn_f, & + fresh, fhocn) + + use ice_domain_size, only: ncat + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + tmask ! land/boundary mask, thickness (T-cell) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in):: & + aice ! initial ice concentration + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(in) :: & + fsurfn_f, & ! net surface heat flux (provided as forcing) + flatn_f ! latent heat flux (provided as forcing) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout):: & + fresh , & ! fresh water flux to ocean (kg/m2/s) + fhocn ! actual ocn/ice heat flx (W/m**2) + +!ars599: 08052014 not sure but add auscom to try, copy from dhb599 fm +!#ifdef CICE_IN_NEMO +#ifdef AusCOM + + ! local variables + integer (kind=int_kind) :: & + i, j, n ! horizontal indices + + real (kind=dbl_kind) :: & + rLsub ! 1/Lsub + + rLsub = c1 / Lsub + + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j) .and. aice(i,j) <= puny) then + fhocn(i,j) = fhocn(i,j) & + + fsurfn_f(i,j,n) + flatn_f(i,j,n) + fresh(i,j) = fresh(i,j) & + + flatn_f(i,j,n) * rLsub + endif + enddo ! i + enddo ! j + enddo ! n + +#endif + + end subroutine sfcflux_to_ocn + +!======================================================================= + + end module CICE_RunMod + +!======================================================================= diff --git a/drivers/access/CICE_RunMod.F90-new_incomplete b/drivers/access/CICE_RunMod.F90-new_incomplete new file mode 100644 index 00000000..d050bd0b --- /dev/null +++ b/drivers/access/CICE_RunMod.F90-new_incomplete @@ -0,0 +1,737 @@ +! SVN:$Id: CICE_RunMod.F90 746 2013-09-28 22:47:56Z eclare $ +!======================================================================= +! +! Main driver for time stepping of CICE. +! +! authors Elizabeth C. Hunke, LANL +! Philip W. Jones, LANL +! William H. Lipscomb, LANL +! +! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep +! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Converted to free source form (F90) +! 2007 BPB: Modified Delta-Eddington shortwave interface +! 2008 ECH: moved ESMF code to its own driver + + module CICE_RunMod + + use ice_kinds_mod + +#ifdef AusCOM + !For stuff in this AusCOM's own driver the "#ifdef AusCOM" is NOT needed! + !but for consistency with the code in other places, we keep it anyway ... + !...to "indentify" the modification to the original code, easier + !...to locate future code update Aug. 2008 + + use cpl_parameters + use cpl_arrays_setup + use cpl_interface + use cpl_forcing_handler +#endif + + implicit none + private + public :: CICE_Run, ice_step + save + +!======================================================================= + + contains + +!======================================================================= +! +! This is the main driver routine for advancing CICE forward in time. +! +! author Elizabeth C. Hunke, LANL +! Philip W. Jones, LANL +! William H. Lipscomb, LANL + + subroutine CICE_Run + + use ice_aerosol, only: faero_default + use ice_algae, only: get_forcing_bgc + use ice_calendar, only: istep, istep1, time, dt, stop_now, calendar + use ice_forcing, only: get_forcing_atmo, get_forcing_ocn +#ifdef AusCOM +!ars599: 27032014 add in + use ice_calendar, only: month, mday, istep, istep1, & + time, dt, stop_now, calendar + use ice_forcing, only: get_forcing_atmo, get_forcing_ocn +#endif + use ice_flux, only: init_flux_atm, init_flux_ocn + use ice_state, only: tr_aero + use ice_timers, only: ice_timer_start, ice_timer_stop, & + timer_couple, timer_step + use ice_zbgc_shared, only: skl_bgc + +#ifdef AusCOM +!ars599: 27032014 add in + use ice_timers, only: ice_timer_start, & + ice_timer_stop, timer_couple, timer_step + use ice_grid, only: t2ugrid_vector, u2tgrid_vector + + + integer (kind=int_kind) :: time_sec, itap, icpl_ai, icpl_io, tmp_time + integer (kind=int_kind) :: rtimestamp_ai, stimestamp_ai + integer (kind=int_kind) :: rtimestamp_io, stimestamp_io + !receive and send timestamps (seconds) + integer (kind=int_kind) :: imon +!ars: 08052014 according to dhb599 fm changed, and mark out the one from OM +! logical :: first_step = .true. !1st time step of experiment or not + logical :: need_i2o = .true. +#endif + + !-------------------------------------------------------------------- + ! initialize error code and step timer + !-------------------------------------------------------------------- + + call ice_timer_start(timer_step) ! start timing entire run + + !-------------------------------------------------------------------- + ! timestep loop + !-------------------------------------------------------------------- + +#ifdef AusCOM + write(il_out,*)'A <==> I coupling num_cpl_ai= ',num_cpl_ai + write(il_out,*)' to/from ocean num_cpl_io= ',num_cpl_io + write(il_out,*)' ice steps num_ice_io = ', num_ice_io + write(il_out,*)'runtime, runtime0=', runtime, runtime0 + + DO icpl_ai = 1, num_cpl_ai !begin A <==> I coupling iterations + + time_sec = 0 + ! get forcing from atm once at time 0 + rtimestamp_ai = time_sec + call from_atm(rtimestamp_ai) + + ! convert fluxes into GBM + ! (UM coupling uses TTI approach which scales up fluxes with /acice) + call atm_fluxes_back2GBM + + !set time averaged ice and ocn variables back to 0 + call initialize_mice_fields_4_i2a + call initialize_mocn_fields_4_i2a + + Do icpl_io = 1, num_cpl_io !begin I <==> O coupling iterations + + stimestamp_io = time_sec + + !at the beginning of the run, cice (CICE_init) reads in the required ice fields + !(time averaged over the last coupling interval of previous run), which include + !strocnx/yT, aice, fresh_gbm, fsalt_gbm, fhocn_gbm, fswthru_gbm, sicemass etc. + !(named as mstrocnx/yT, maice, mfresh, mfsalt, mfhocn, mfswthru, msicemass ...) + + !together with the a2i fields (sent from um at the end of previous run) received + !above, the time0 i2o fields can be obtained here + + !if (runtime0 == 0 .and. need_i2o) then + ! write(6,*)'*** CICE: initial run calls get_restart_i2o *** ' + ! write(6,*)'*** CICE: time_sec = ', time_sec + ! write(il_out,*)' calling get_restart_i2o at time_sec = ',time_sec + ! call get_restart_i2o('i2o.nc') + ! need_i2o = .false. + !else + ! write(6,*)'*** CICE: calling get_i2o_fields... ' + ! write(6,*)'*** CICE: time_sec = ', time_sec + ! write(6,*)'*** CICE: calling get_i2o_fields... ' + ! write(il_out,*)' calling get_i2o_fields at time_sec = ',time_sec + call get_i2o_fields + !endif + + !shift stresses from T onto U grid before sending into ocn + write(il_out,*)' calling t2ugrid_vector - u/v at time_sec = ', time_sec + call t2ugrid_vector(io_strsu) + call t2ugrid_vector(io_strsv) + + write(il_out,*)' calling into_ocn at time_sec = ', time_sec + !call ice_timer_start(timer_into_ocn) ! atm/ocn coupling + call into_ocn(stimestamp_io) + !call ice_timer_stop(timer_into_ocn) ! atm/ocn coupling + + !at the beginning of the run, cice (CICE_init) reads in the required o2i fields + !(saved from the last timestep of ocean). + + !together with the a2i fields (sent from um at the end of previous run) received + !above, the time0 boundary condition for ice 'internal time loop' is set here + + !-------------------------------------------------------------------------------- + !* This needs be investigated: calling set_sbc_ice outside the itap loop causes + ! cice to crash ('departure error') due probably to "aice" "mismatch?" for each + ! time step in the set_sbc_ice calculation.... (?) + ! We therefore still call "get_sbc_ice" inside the ice time loop (below) + ! + !write(il_out,*)' calling set_sbc_ice at time_sec = ',time_sec + !call set_sbc_ice + !-------------------------------------------------------------------------------- + + !set time averaged ice variables back to 0 + write(il_out,*)' calling init_mice_fields_4_i2o at time_sec = ',time_sec + call initialize_mice_fields_4_i2o + + do itap = 1, num_ice_io ! cice time loop within each i<=>o cpl interval + + !------------------------------------------------------------------------------ + !* see comments above + call get_sbc_ice + !set boundary condition (forcing) for ice time step + !------------------------------------------------------------------------------ + + call ice_step + + ! note ice_step makes call to time_average_fields_4_i2o + ! and time_average_fields_4_i2a + ! to get time-averaged ice variables required for setting up i2o and i2a cpl fields + + istep = istep + 1 ! update time step counters + istep1 = istep1 + 1 + time = time + dt ! determine the time and date + + time_sec = time_sec + dt + + call calendar(time-runtime0) + + !initialize fluxes sent to coupler (WHY should still need do this? CH: NOT needed!) + call init_flux_atm + call init_flux_ocn + + !CH: should be doing things here + !get_i2o_fields + !get_i2a_fields + + end do !itap + + !!write(il_out,*)' calling get_i2o_fields at time_sec = ',time_sec + !!call get_i2o_fields !i2o fields ready to be sent for next IO cpl int in ocn. + + rtimestamp_io = time_sec + +!?if block herexxx + if (rtimestamp_io < runtime) then !get coupling from ocean except the last time step + + write(il_out,*)' calling from_ocn at time_sec = ',time_sec + call from_ocn(rtimestamp_io) !get o2i fields for next IO cpl int + + write(il_out,*)' calling ave_ocn_fields_4_i2a at time_sec = ',time_sec + call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling + + end if + + !CH: maybe-- + ! call get_i2a_fields + + End Do !icpl_io + + write(il_out,*)' calling get_i2a_fields at time_sec = ',time_sec + call ice_timer_start(timer_into_atm) ! atm/ocn coupling + call get_i2a_fields ! i2a fields ready to be sent for next IA cpl int in atm. + + ! * because of using lag=+dt_ice, we must take one step off the time_sec + ! * to make the sending happen at right time: +!?check the "lag" issue + stimestamp_ai = time_sec - dt + write(il_out,*)' calling into_atm at time_sec = ',time_sec + call into_atm(stimestamp_ai) + call ice_timer_stop(timer_into_atm) ! atm/ocn coupling + + END DO !icpl_ai + + ! final update of the stimestamp_io, ie., put back the last dt_cice: + stimestamp_io = stimestamp_io + dt + + ! *** need save o2i fields here instead of in mom4 *** + !call save_restart_o2i('o2i.nc', stimestamp_io) !it is done in mom4 + + ! *** need save the last IO cpl int (time-averaged) ice variables used to get i2o fields *** + ! *** at the beginning of next run *** + call save_restart_mice('mice.nc',stimestamp_io) + +#else + + timeLoop: do + + call ice_step + + istep = istep + 1 ! update time step counters + istep1 = istep1 + 1 + time = time + dt ! determine the time and date + + call calendar(time) ! at the end of the timestep + + if (stop_now >= 1) exit timeLoop + +#ifndef coupled + call ice_timer_start(timer_couple) ! atm/ocn coupling + call get_forcing_atmo ! atmospheric forcing from data + call get_forcing_ocn(dt) ! ocean forcing from data + ! if (tr_aero) call faero_data ! aerosols + if (tr_aero) call faero_default ! aerosols + if (skl_bgc) call get_forcing_bgc ! biogeochemistry + call ice_timer_stop(timer_couple) ! atm/ocn coupling +#endif + + call init_flux_atm ! initialize atmosphere fluxes sent to coupler + call init_flux_ocn ! initialize ocean fluxes sent to coupler + + enddo timeLoop + +#endif + + !-------------------------------------------------------------------- + ! end of timestep loop + !-------------------------------------------------------------------- + + call ice_timer_stop(timer_step) ! end timestepping loop timer + + end subroutine CICE_Run + +!======================================================================= +! +! Calls drivers for physics components, some initialization, and output +! +! author Elizabeth C. Hunke, LANL +! William H. Lipscomb, LANL + + subroutine ice_step + + use ice_age, only: write_restart_age + use ice_aerosol, only: write_restart_aero + use ice_boundary, only: ice_HaloUpdate + use ice_brine, only: hbrine_diags, write_restart_hbrine + use ice_calendar, only: dt, dt_dyn, ndtd, diagfreq, write_restart, istep + use ice_constants, only: field_loc_center, field_type_scalar + use ice_diagnostics, only: init_mass_diags, runtime_diags + use ice_domain, only: halo_info, nblocks + use ice_domain_size, only: nslyr + use ice_dyn_eap, only: write_restart_eap + use ice_dyn_shared, only: kdyn + use ice_firstyear, only: write_restart_FY + use ice_flux, only: scale_factor, init_history_therm + use ice_history, only: accum_hist + use ice_lvl, only: write_restart_lvl + use ice_restart, only: final_restart + use ice_restart_driver, only: dumpfile + use ice_meltpond_cesm, only: write_restart_pond_cesm + use ice_meltpond_lvl, only: write_restart_pond_lvl + use ice_meltpond_topo, only: write_restart_pond_topo + use ice_restoring, only: restore_ice, ice_HaloRestore + use ice_state, only: nt_qsno, trcrn, tr_iage, tr_FY, tr_lvl, & + tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero + use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & + post_thermo, step_dynamics, step_radiation + use ice_therm_shared, only: calc_Tsfc + use ice_timers, only: ice_timer_start, ice_timer_stop, & + timer_diags, timer_column, timer_thermo, timer_bound, & + timer_hist, timer_readwrite + use ice_algae, only: bgc_diags, write_restart_bgc + use ice_zbgc, only: init_history_bgc, biogeochemistry + use ice_zbgc_shared, only: skl_bgc + + integer (kind=int_kind) :: & + iblk , & ! block index + k ! dynamics supercycling index + + !----------------------------------------------------------------- + ! restoring on grid boundaries + !----------------------------------------------------------------- + + if (restore_ice) call ice_HaloRestore + + !----------------------------------------------------------------- + ! initialize diagnostics + !----------------------------------------------------------------- + + call ice_timer_start(timer_diags) ! diagnostics/history + call init_mass_diags ! diagnostics per timestep + call init_history_therm + call init_history_bgc + call ice_timer_stop(timer_diags) ! diagnostics/history + + call ice_timer_start(timer_column) ! column physics + call ice_timer_start(timer_thermo) ! thermodynamics + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! Scale radiation fields + !----------------------------------------------------------------- + + if (calc_Tsfc) call prep_radiation (dt, iblk) + + !----------------------------------------------------------------- + ! thermodynamics + !----------------------------------------------------------------- + + call step_therm1 (dt, iblk) ! vertical thermodynamics + call biogeochemistry (dt, iblk) ! biogeochemistry + call step_therm2 (dt, iblk) ! ice thickness distribution thermo + + enddo ! iblk + !$OMP END PARALLEL DO + + call post_thermo (dt) ! finalize thermo update + + call ice_timer_stop(timer_thermo) ! thermodynamics + call ice_timer_stop(timer_column) ! column physics + + !----------------------------------------------------------------- + ! dynamics, transport, ridging + !----------------------------------------------------------------- + + do k = 1, ndtd + call step_dynamics (dt_dyn, ndtd) + enddo + + !----------------------------------------------------------------- + ! albedo, shortwave radiation + !----------------------------------------------------------------- + + call ice_timer_start(timer_column) ! column physics + call ice_timer_start(timer_thermo) ! thermodynamics + +!ars599: 04042014: remove iblk do loop + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call step_radiation (dt, iblk) + + !----------------------------------------------------------------- + ! get ready for coupling and the next time step + !----------------------------------------------------------------- + + call coupling_prep (iblk) + + enddo ! iblk + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (scale_factor, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + call ice_timer_stop(timer_thermo) ! thermodynamics + call ice_timer_stop(timer_column) ! column physics + +!ars599: 04092014: add in +! not sure should add inside the loop or not? +!ars599: 09052014: move from after line 458 "enddo ! iblk" to here +#ifdef AusCOM + !need some time-mean ice fields + !(so as to get i2o and i2a fields for next coupling interval) + call time_average_fields_4_i2o + call time_average_fields_4_i2a +#endif + + !----------------------------------------------------------------- + ! write data + !----------------------------------------------------------------- + + call ice_timer_start(timer_diags) ! diagnostics + if (mod(istep,diagfreq) == 0) then + call runtime_diags(dt) ! log file + if (skl_bgc) call bgc_diags (dt) + if (tr_brine) call hbrine_diags (dt) + endif + call ice_timer_stop(timer_diags) ! diagnostics + + call ice_timer_start(timer_hist) ! history + call accum_hist (dt) ! history file + call ice_timer_stop(timer_hist) ! history + + call ice_timer_start(timer_readwrite) ! reading/writing + if (write_restart == 1) then + call dumpfile ! core variables for restarting + if (tr_iage) call write_restart_age + if (tr_FY) call write_restart_FY + if (tr_lvl) call write_restart_lvl + if (tr_pond_cesm) call write_restart_pond_cesm + if (tr_pond_lvl) call write_restart_pond_lvl + if (tr_pond_topo) call write_restart_pond_topo + if (tr_aero) call write_restart_aero + if (skl_bgc) call write_restart_bgc + if (tr_brine) call write_restart_hbrine + if (kdyn == 2) call write_restart_eap + call final_restart + endif + + call ice_timer_stop(timer_readwrite) ! reading/writing + + end subroutine ice_step + +!======================================================================= +! +! Prepare for coupling +! +! authors: Elizabeth C. Hunke, LANL + + subroutine coupling_prep (iblk) + + use ice_blocks, only: block, nx_block, ny_block + use ice_calendar, only: dt, nstreams + use ice_constants, only: c0, c1, puny, rhofresh + use ice_domain_size, only: ncat + use ice_flux, only: alvdf, alidf, alvdr, alidr, albice, albsno, & + albpnd, albcnt, apeff_ai, coszen, fpond, fresh, & + alvdf_ai, alidf_ai, alvdr_ai, alidr_ai, fhocn_ai, & + fresh_ai, fsalt_ai, fsalt, & + fswthru_ai, fhocn, fswthru, scale_factor, & + swvdr, swidr, swvdf, swidf, Tf, Tair, Qa, strairxT, strairyt, & + fsens, flat, fswabs, flwout, evap, Tref, Qref, faero_ocn, & + fsurfn_f, flatn_f, scale_fluxes, frzmlt_init, frzmlt + use ice_grid, only: tmask + use ice_ocean, only: oceanmixed_ice, ocean_mixed_layer + use ice_shortwave, only: alvdfn, alidfn, alvdrn, alidrn, & + albicen, albsnon, albpndn, apeffn + use ice_state, only: aicen, aice, aice_init, nbtrcr + use ice_therm_shared, only: calc_Tsfc + use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop + use ice_zbgc_shared, only: flux_bio, flux_bio_ai + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + integer (kind=int_kind) :: & + n , & ! thickness category index + i,j , & ! horizontal indices + k ! tracer index + + real (kind=dbl_kind) :: cszn ! counter for history averaging + + !----------------------------------------------------------------- + ! Save current value of frzmlt for diagnostics. + ! Update mixed layer with heat and radiation from ice. + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + frzmlt_init (i,j,iblk) = frzmlt(i,j,iblk) + enddo + enddo + + call ice_timer_start(timer_couple) ! atm/ocn coupling + + if (oceanmixed_ice) & + call ocean_mixed_layer (dt,iblk) ! ocean surface fluxes and sst + +!ars599: removed in ACCESS?? +!#ifdef AusCOM +! if (chk_frzmlt_sst) call check_frzmlt_sst('frzmlt_sst1.nc') +!#endif + + !----------------------------------------------------------------- + ! Aggregate albedos + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + alvdf(i,j,iblk) = c0 + alidf(i,j,iblk) = c0 + alvdr(i,j,iblk) = c0 + alidr(i,j,iblk) = c0 + + albice(i,j,iblk) = c0 + albsno(i,j,iblk) = c0 + albpnd(i,j,iblk) = c0 + apeff_ai(i,j,iblk) = c0 + + ! for history averaging + cszn = c0 + if (coszen(i,j,iblk) > puny) cszn = c1 + do n = 1, nstreams + albcnt(i,j,iblk,n) = albcnt(i,j,iblk,n) + cszn + enddo + enddo + enddo + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + alvdf(i,j,iblk) = alvdf(i,j,iblk) & + + alvdfn(i,j,n,iblk)*aicen(i,j,n,iblk) + alidf(i,j,iblk) = alidf(i,j,iblk) & + + alidfn(i,j,n,iblk)*aicen(i,j,n,iblk) + alvdr(i,j,iblk) = alvdr(i,j,iblk) & + + alvdrn(i,j,n,iblk)*aicen(i,j,n,iblk) + alidr(i,j,iblk) = alidr(i,j,iblk) & + + alidrn(i,j,n,iblk)*aicen(i,j,n,iblk) + + if (coszen(i,j,iblk) > puny) then ! sun above horizon + albice(i,j,iblk) = albice(i,j,iblk) & + + albicen(i,j,n,iblk)*aicen(i,j,n,iblk) + albsno(i,j,iblk) = albsno(i,j,iblk) & + + albsnon(i,j,n,iblk)*aicen(i,j,n,iblk) + albpnd(i,j,iblk) = albpnd(i,j,iblk) & + + albpndn(i,j,n,iblk)*aicen(i,j,n,iblk) + endif + + apeff_ai(i,j,iblk) = apeff_ai(i,j,iblk) & ! for history + + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) + enddo + enddo + enddo + + do j = 1, ny_block + do i = 1, nx_block + + !----------------------------------------------------------------- + ! reduce fresh by fpond for coupling + !----------------------------------------------------------------- + + fpond(i,j,iblk) = fpond(i,j,iblk) * rhofresh/dt + fresh(i,j,iblk) = fresh(i,j,iblk) - fpond(i,j,iblk) + + !---------------------------------------------------------------- + ! Store grid box mean albedos and fluxes before scaling by aice + !---------------------------------------------------------------- + + alvdf_ai (i,j,iblk) = alvdf (i,j,iblk) + alidf_ai (i,j,iblk) = alidf (i,j,iblk) + alvdr_ai (i,j,iblk) = alvdr (i,j,iblk) + alidr_ai (i,j,iblk) = alidr (i,j,iblk) + fresh_ai (i,j,iblk) = fresh (i,j,iblk) + fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) + fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) + fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) + + if (nbtrcr > 0) then + do k = 1, nbtrcr + flux_bio_ai (i,j,k,iblk) = flux_bio (i,j,k,iblk) + enddo + endif + + !----------------------------------------------------------------- + ! Save net shortwave for scaling factor in scale_factor + !----------------------------------------------------------------- + scale_factor(i,j,iblk) = & + swvdr(i,j,iblk)*(c1 - alvdr_ai(i,j,iblk)) & + + swvdf(i,j,iblk)*(c1 - alvdf_ai(i,j,iblk)) & + + swidr(i,j,iblk)*(c1 - alidr_ai(i,j,iblk)) & + + swidf(i,j,iblk)*(c1 - alidf_ai(i,j,iblk)) + + enddo + enddo + + !----------------------------------------------------------------- + ! Divide fluxes by ice area + ! - the CCSM coupler assumes fluxes are per unit ice area + ! - also needed for global budget in diagnostics + !----------------------------------------------------------------- + +#ifdef AusCOM + !20091020 -- MUST re-visit this part immediately! + if (do_scale_fluxes) then !(ch...) this should be done. +#endif + call scale_fluxes (nx_block, ny_block, & + tmask (:,:,iblk), nbtrcr, & + aice (:,:,iblk), Tf (:,:,iblk), & + Tair (:,:,iblk), Qa (:,:,iblk), & + strairxT (:,:,iblk), strairyT(:,:,iblk), & + fsens (:,:,iblk), flat (:,:,iblk), & + fswabs (:,:,iblk), flwout (:,:,iblk), & + evap (:,:,iblk), & + Tref (:,:,iblk), Qref (:,:,iblk), & + fresh (:,:,iblk), fsalt (:,:,iblk), & + fhocn (:,:,iblk), fswthru (:,:,iblk), & + faero_ocn(:,:,:,iblk), & + alvdr (:,:,iblk), alidr (:,:,iblk), & + alvdf (:,:,iblk), alidf (:,:,iblk), & + flux_bio(:,:,1:nbtrcr,iblk)) + +#ifdef AusCOM + endif +#endif +!b.--------------------------------------------------------------------- + if (.not. calc_Tsfc) then + + !--------------------------------------------------------------- + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. + !--------------------------------------------------------------- + + call sfcflux_to_ocn & + (nx_block, ny_block, & + tmask (:,:,iblk), aice_init(:,:,iblk), & + fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & + fresh (:,:,iblk), fhocn (:,:,iblk)) + endif +!echmod + + call ice_timer_stop(timer_couple) ! atm/ocn coupling + + end subroutine coupling_prep + +!======================================================================= +! +! If surface heat fluxes are provided to CICE instead of CICE calculating +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! be provided at points which do not have ice. (This is could be due to +! the heat fluxes being calculated on a lower resolution grid or the +! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! conserve energy and water by passing these fluxes to the ocean. +! +! author: A. McLaren, Met Office + + subroutine sfcflux_to_ocn(nx_block, ny_block, & + tmask, aice, & + fsurfn_f, flatn_f, & + fresh, fhocn) + + use ice_domain_size, only: ncat + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + tmask ! land/boundary mask, thickness (T-cell) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in):: & + aice ! initial ice concentration + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(in) :: & + fsurfn_f, & ! net surface heat flux (provided as forcing) + flatn_f ! latent heat flux (provided as forcing) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout):: & + fresh , & ! fresh water flux to ocean (kg/m2/s) + fhocn ! actual ocn/ice heat flx (W/m**2) + +!ars599: 08052014 not sure but add auscom to try, copy from dhb599 fm +!#ifdef CICE_IN_NEMO +#ifdef AusCOM + + ! local variables + integer (kind=int_kind) :: & + i, j, n ! horizontal indices + + real (kind=dbl_kind) :: & + rLsub ! 1/Lsub + + rLsub = c1 / Lsub + + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j) .and. aice(i,j) <= puny) then + fhocn(i,j) = fhocn(i,j) & + + fsurfn_f(i,j,n) + flatn_f(i,j,n) + fresh(i,j) = fresh(i,j) & + + flatn_f(i,j,n) * rLsub + endif + enddo ! i + enddo ! j + enddo ! n + +#endif + + end subroutine sfcflux_to_ocn + +!======================================================================= + + end module CICE_RunMod + +!======================================================================= diff --git a/drivers/access/CICE_RunMod.F90_debug b/drivers/access/CICE_RunMod.F90_debug new file mode 100644 index 00000000..dffc48e9 --- /dev/null +++ b/drivers/access/CICE_RunMod.F90_debug @@ -0,0 +1,552 @@ +! SVN:$Id: CICE_RunMod.F90 746 2013-09-28 22:47:56Z eclare $ +!======================================================================= +! +! Main driver for time stepping of CICE. +! +! authors Elizabeth C. Hunke, LANL +! Philip W. Jones, LANL +! William H. Lipscomb, LANL +! +! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep +! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Converted to free source form (F90) +! 2007 BPB: Modified Delta-Eddington shortwave interface +! 2008 ECH: moved ESMF code to its own driver + + module CICE_RunMod + + use ice_kinds_mod + + implicit none + private + public :: CICE_Run, ice_step + save + +!======================================================================= + + contains + +!======================================================================= +! +! This is the main driver routine for advancing CICE forward in time. +! +! author Elizabeth C. Hunke, LANL +! Philip W. Jones, LANL +! William H. Lipscomb, LANL + + subroutine CICE_Run + + use ice_aerosol, only: faero_default + use ice_algae, only: get_forcing_bgc + use ice_calendar, only: istep, istep1, time, dt, stop_now, calendar + use ice_forcing, only: get_forcing_atmo, get_forcing_ocn + use ice_flux, only: init_flux_atm, init_flux_ocn + use ice_state, only: tr_aero + use ice_timers, only: ice_timer_start, ice_timer_stop, & + timer_couple, timer_step + use ice_zbgc_shared, only: skl_bgc + + !-------------------------------------------------------------------- + ! initialize error code and step timer + !-------------------------------------------------------------------- + + call ice_timer_start(timer_step) ! start timing entire run + + !-------------------------------------------------------------------- + ! timestep loop + !-------------------------------------------------------------------- + + timeLoop: do + + call ice_step + + istep = istep + 1 ! update time step counters + istep1 = istep1 + 1 + time = time + dt ! determine the time and date + + call calendar(time) ! at the end of the timestep + + if (stop_now >= 1) exit timeLoop + +#ifndef coupled + call ice_timer_start(timer_couple) ! atm/ocn coupling + call get_forcing_atmo ! atmospheric forcing from data + call get_forcing_ocn(dt) ! ocean forcing from data + ! if (tr_aero) call faero_data ! aerosols + if (tr_aero) call faero_default ! aerosols + if (skl_bgc) call get_forcing_bgc ! biogeochemistry + call ice_timer_stop(timer_couple) ! atm/ocn coupling +#endif + + call init_flux_atm ! initialize atmosphere fluxes sent to coupler + call init_flux_ocn ! initialize ocean fluxes sent to coupler + + enddo timeLoop + + !-------------------------------------------------------------------- + ! end of timestep loop + !-------------------------------------------------------------------- + + call ice_timer_stop(timer_step) ! end timestepping loop timer + + end subroutine CICE_Run + +!======================================================================= +! +! Calls drivers for physics components, some initialization, and output +! +! author Elizabeth C. Hunke, LANL +! William H. Lipscomb, LANL + + subroutine ice_step + + use ice_age, only: write_restart_age + use ice_aerosol, only: write_restart_aero + use ice_boundary, only: ice_HaloUpdate + use ice_brine, only: hbrine_diags, write_restart_hbrine + use ice_calendar, only: dt, dt_dyn, ndtd, diagfreq, write_restart, istep + use ice_constants, only: field_loc_center, field_type_scalar + use ice_communicate, only: my_task + use ice_diagnostics, only: init_mass_diags, runtime_diags, mtask + use ice_domain, only: halo_info, nblocks + use ice_domain_size, only: nslyr + use ice_dyn_eap, only: write_restart_eap + use ice_dyn_shared, only: kdyn + use ice_firstyear, only: write_restart_FY + use ice_flux, only: scale_factor, init_history_therm + use ice_history, only: accum_hist + use ice_lvl, only: write_restart_lvl + use ice_restart, only: final_restart + use ice_restart_driver, only: dumpfile + use ice_meltpond_cesm, only: write_restart_pond_cesm + use ice_meltpond_lvl, only: write_restart_pond_lvl + use ice_meltpond_topo, only: write_restart_pond_topo + use ice_restoring, only: restore_ice, ice_HaloRestore + use ice_state, only: nt_qsno, trcrn, tr_iage, tr_FY, tr_lvl, & + tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero + use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & + post_thermo, step_dynamics, step_radiation + use ice_therm_shared, only: calc_Tsfc + use ice_timers, only: ice_timer_start, ice_timer_stop, & + timer_diags, timer_column, timer_thermo, timer_bound, & + timer_hist, timer_readwrite + use ice_algae, only: bgc_diags, write_restart_bgc + use ice_zbgc, only: init_history_bgc, biogeochemistry + use ice_zbgc_shared, only: skl_bgc + + integer (kind=int_kind) :: & + iblk , & ! block index + k ! dynamics supercycling index + + character (len=char_len) :: plabeld + + plabeld = 'beginning time step' + do iblk = 1, nblocks + call debug_ice (iblk, plabeld) + enddo + + !----------------------------------------------------------------- + ! restoring on grid boundaries + !----------------------------------------------------------------- + + if (restore_ice) call ice_HaloRestore + + !----------------------------------------------------------------- + ! initialize diagnostics + !----------------------------------------------------------------- + + call ice_timer_start(timer_diags) ! diagnostics/history + call init_mass_diags ! diagnostics per timestep + call init_history_therm + call init_history_bgc + call ice_timer_stop(timer_diags) ! diagnostics/history + + call ice_timer_start(timer_column) ! column physics + call ice_timer_start(timer_thermo) ! thermodynamics + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! Scale radiation fields + !----------------------------------------------------------------- + + if (calc_Tsfc) call prep_radiation (dt, iblk) + + plabeld = 'post prep_radiation' + call debug_ice (iblk, plabeld) + + !----------------------------------------------------------------- + ! thermodynamics + !----------------------------------------------------------------- + + call step_therm1 (dt, iblk) ! vertical thermodynamics + + plabeld = 'post step_therm1' + call debug_ice (iblk, plabeld) + + call biogeochemistry (dt, iblk) ! biogeochemistry + + plabeld = 'post biogeochemistry' + call debug_ice (iblk, plabeld) + + call step_therm2 (dt, iblk) ! ice thickness distribution thermo + + plabeld = 'post step_therm2' + call debug_ice (iblk, plabeld) + + enddo ! iblk + !$OMP END PARALLEL DO + + call post_thermo (dt) ! finalize thermo update + + call ice_timer_stop(timer_thermo) ! thermodynamics + call ice_timer_stop(timer_column) ! column physics + + !----------------------------------------------------------------- + ! dynamics, transport, ridging + !----------------------------------------------------------------- + + do k = 1, ndtd + call step_dynamics (dt_dyn, ndtd) + enddo + + plabeld = 'post dynamics' + do iblk = 1, nblocks + call debug_ice (iblk, plabeld) + enddo + + !----------------------------------------------------------------- + ! albedo, shortwave radiation + !----------------------------------------------------------------- + + call ice_timer_start(timer_column) ! column physics + call ice_timer_start(timer_thermo) ! thermodynamics + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call step_radiation (dt, iblk) + + plabeld = 'post step_radiation' + call debug_ice (iblk, plabeld) + + !----------------------------------------------------------------- + ! get ready for coupling and the next time step + !----------------------------------------------------------------- + + call coupling_prep (iblk) + + plabeld = 'post coupling_prep' + call debug_ice (iblk, plabeld) + + enddo ! iblk + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (scale_factor, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + call ice_timer_stop(timer_thermo) ! thermodynamics + call ice_timer_stop(timer_column) ! column physics + + !----------------------------------------------------------------- + ! write data + !----------------------------------------------------------------- + + call ice_timer_start(timer_diags) ! diagnostics + if (mod(istep,diagfreq) == 0) then + call runtime_diags(dt) ! log file + if (skl_bgc) call bgc_diags (dt) + if (tr_brine) call hbrine_diags (dt) + endif + call ice_timer_stop(timer_diags) ! diagnostics + + call ice_timer_start(timer_hist) ! history + call accum_hist (dt) ! history file + call ice_timer_stop(timer_hist) ! history + + call ice_timer_start(timer_readwrite) ! reading/writing + if (write_restart == 1) then + call dumpfile ! core variables for restarting + if (tr_iage) call write_restart_age + if (tr_FY) call write_restart_FY + if (tr_lvl) call write_restart_lvl + if (tr_pond_cesm) call write_restart_pond_cesm + if (tr_pond_lvl) call write_restart_pond_lvl + if (tr_pond_topo) call write_restart_pond_topo + if (tr_aero) call write_restart_aero + if (skl_bgc) call write_restart_bgc + if (tr_brine) call write_restart_hbrine + if (kdyn == 2) call write_restart_eap + call final_restart + endif + + call ice_timer_stop(timer_readwrite) ! reading/writing + + end subroutine ice_step + +!======================================================================= +! +! Prepare for coupling +! +! authors: Elizabeth C. Hunke, LANL + + subroutine coupling_prep (iblk) + + use ice_blocks, only: block, nx_block, ny_block + use ice_calendar, only: dt, nstreams + use ice_constants, only: c0, c1, puny, rhofresh + use ice_domain_size, only: ncat + use ice_flux, only: alvdf, alidf, alvdr, alidr, albice, albsno, & + albpnd, albcnt, apeff_ai, coszen, fpond, fresh, & + alvdf_ai, alidf_ai, alvdr_ai, alidr_ai, fhocn_ai, & + fresh_ai, fsalt_ai, fsalt, & + fswthru_ai, fhocn, fswthru, scale_factor, & + swvdr, swidr, swvdf, swidf, Tf, Tair, Qa, strairxT, strairyt, & + fsens, flat, fswabs, flwout, evap, Tref, Qref, faero_ocn, & + fsurfn_f, flatn_f, scale_fluxes, frzmlt_init, frzmlt + use ice_grid, only: tmask + use ice_ocean, only: oceanmixed_ice, ocean_mixed_layer + use ice_shortwave, only: alvdfn, alidfn, alvdrn, alidrn, & + albicen, albsnon, albpndn, apeffn + use ice_state, only: aicen, aice, aice_init, nbtrcr + use ice_therm_shared, only: calc_Tsfc + use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop + use ice_zbgc_shared, only: flux_bio, flux_bio_ai + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + integer (kind=int_kind) :: & + n , & ! thickness category index + i,j , & ! horizontal indices + k ! tracer index + + real (kind=dbl_kind) :: cszn ! counter for history averaging + + !----------------------------------------------------------------- + ! Save current value of frzmlt for diagnostics. + ! Update mixed layer with heat and radiation from ice. + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + frzmlt_init (i,j,iblk) = frzmlt(i,j,iblk) + enddo + enddo + + call ice_timer_start(timer_couple) ! atm/ocn coupling + + if (oceanmixed_ice) & + call ocean_mixed_layer (dt,iblk) ! ocean surface fluxes and sst + + !----------------------------------------------------------------- + ! Aggregate albedos + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + alvdf(i,j,iblk) = c0 + alidf(i,j,iblk) = c0 + alvdr(i,j,iblk) = c0 + alidr(i,j,iblk) = c0 + + albice(i,j,iblk) = c0 + albsno(i,j,iblk) = c0 + albpnd(i,j,iblk) = c0 + apeff_ai(i,j,iblk) = c0 + + ! for history averaging + cszn = c0 + if (coszen(i,j,iblk) > puny) cszn = c1 + do n = 1, nstreams + albcnt(i,j,iblk,n) = albcnt(i,j,iblk,n) + cszn + enddo + enddo + enddo + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + alvdf(i,j,iblk) = alvdf(i,j,iblk) & + + alvdfn(i,j,n,iblk)*aicen(i,j,n,iblk) + alidf(i,j,iblk) = alidf(i,j,iblk) & + + alidfn(i,j,n,iblk)*aicen(i,j,n,iblk) + alvdr(i,j,iblk) = alvdr(i,j,iblk) & + + alvdrn(i,j,n,iblk)*aicen(i,j,n,iblk) + alidr(i,j,iblk) = alidr(i,j,iblk) & + + alidrn(i,j,n,iblk)*aicen(i,j,n,iblk) + + if (coszen(i,j,iblk) > puny) then ! sun above horizon + albice(i,j,iblk) = albice(i,j,iblk) & + + albicen(i,j,n,iblk)*aicen(i,j,n,iblk) + albsno(i,j,iblk) = albsno(i,j,iblk) & + + albsnon(i,j,n,iblk)*aicen(i,j,n,iblk) + albpnd(i,j,iblk) = albpnd(i,j,iblk) & + + albpndn(i,j,n,iblk)*aicen(i,j,n,iblk) + endif + + apeff_ai(i,j,iblk) = apeff_ai(i,j,iblk) & ! for history + + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) + enddo + enddo + enddo + + do j = 1, ny_block + do i = 1, nx_block + + !----------------------------------------------------------------- + ! reduce fresh by fpond for coupling + !----------------------------------------------------------------- + + fpond(i,j,iblk) = fpond(i,j,iblk) * rhofresh/dt + fresh(i,j,iblk) = fresh(i,j,iblk) - fpond(i,j,iblk) + + !---------------------------------------------------------------- + ! Store grid box mean albedos and fluxes before scaling by aice + !---------------------------------------------------------------- + + alvdf_ai (i,j,iblk) = alvdf (i,j,iblk) + alidf_ai (i,j,iblk) = alidf (i,j,iblk) + alvdr_ai (i,j,iblk) = alvdr (i,j,iblk) + alidr_ai (i,j,iblk) = alidr (i,j,iblk) + fresh_ai (i,j,iblk) = fresh (i,j,iblk) + fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) + fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) + fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) + + if (nbtrcr > 0) then + do k = 1, nbtrcr + flux_bio_ai (i,j,k,iblk) = flux_bio (i,j,k,iblk) + enddo + endif + + !----------------------------------------------------------------- + ! Save net shortwave for scaling factor in scale_factor + !----------------------------------------------------------------- + scale_factor(i,j,iblk) = & + swvdr(i,j,iblk)*(c1 - alvdr_ai(i,j,iblk)) & + + swvdf(i,j,iblk)*(c1 - alvdf_ai(i,j,iblk)) & + + swidr(i,j,iblk)*(c1 - alidr_ai(i,j,iblk)) & + + swidf(i,j,iblk)*(c1 - alidf_ai(i,j,iblk)) + + enddo + enddo + + !----------------------------------------------------------------- + ! Divide fluxes by ice area + ! - the CCSM coupler assumes fluxes are per unit ice area + ! - also needed for global budget in diagnostics + !----------------------------------------------------------------- + + call scale_fluxes (nx_block, ny_block, & + tmask (:,:,iblk), nbtrcr, & + aice (:,:,iblk), Tf (:,:,iblk), & + Tair (:,:,iblk), Qa (:,:,iblk), & + strairxT (:,:,iblk), strairyT(:,:,iblk), & + fsens (:,:,iblk), flat (:,:,iblk), & + fswabs (:,:,iblk), flwout (:,:,iblk), & + evap (:,:,iblk), & + Tref (:,:,iblk), Qref (:,:,iblk), & + fresh (:,:,iblk), fsalt (:,:,iblk), & + fhocn (:,:,iblk), fswthru (:,:,iblk), & + faero_ocn(:,:,:,iblk), & + alvdr (:,:,iblk), alidr (:,:,iblk), & + alvdf (:,:,iblk), alidf (:,:,iblk), & + flux_bio(:,:,1:nbtrcr,iblk)) + +!echmod - comment this out for efficiency, if .not. calc_Tsfc + if (.not. calc_Tsfc) then + + !--------------------------------------------------------------- + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. + !--------------------------------------------------------------- + + call sfcflux_to_ocn & + (nx_block, ny_block, & + tmask (:,:,iblk), aice_init(:,:,iblk), & + fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & + fresh (:,:,iblk), fhocn (:,:,iblk)) + endif +!echmod + + call ice_timer_stop(timer_couple) ! atm/ocn coupling + + end subroutine coupling_prep + +!======================================================================= +! +! If surface heat fluxes are provided to CICE instead of CICE calculating +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! be provided at points which do not have ice. (This is could be due to +! the heat fluxes being calculated on a lower resolution grid or the +! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! conserve energy and water by passing these fluxes to the ocean. +! +! author: A. McLaren, Met Office + + subroutine sfcflux_to_ocn(nx_block, ny_block, & + tmask, aice, & + fsurfn_f, flatn_f, & + fresh, fhocn) + + use ice_domain_size, only: ncat + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + tmask ! land/boundary mask, thickness (T-cell) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in):: & + aice ! initial ice concentration + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(in) :: & + fsurfn_f, & ! net surface heat flux (provided as forcing) + flatn_f ! latent heat flux (provided as forcing) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout):: & + fresh , & ! fresh water flux to ocean (kg/m2/s) + fhocn ! actual ocn/ice heat flx (W/m**2) + +#ifdef CICE_IN_NEMO + + ! local variables + integer (kind=int_kind) :: & + i, j, n ! horizontal indices + + real (kind=dbl_kind) :: & + rLsub ! 1/Lsub + + rLsub = c1 / Lsub + + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j) .and. aice(i,j) <= puny) then + fhocn(i,j) = fhocn(i,j) & + + fsurfn_f(i,j,n) + flatn_f(i,j,n) + fresh(i,j) = fresh(i,j) & + + flatn_f(i,j,n) * rLsub + endif + enddo ! i + enddo ! j + enddo ! n + +#endif + + end subroutine sfcflux_to_ocn + +!======================================================================= + + end module CICE_RunMod + +!======================================================================= diff --git a/drivers/access/README.txt b/drivers/access/README.txt new file mode 100644 index 00000000..a0565aa6 --- /dev/null +++ b/drivers/access/README.txt @@ -0,0 +1,10 @@ +# +# Note: +# auscom is copied from +# /short/p66/ars599/CICE.v5.0/accice.v504_csiro/drivers/accice/* . +# later will put into access +# whole things are refered from +# /short/p66/dhb599/ACCESS-OM/submodels/cice4.1/drivers/auscom +# extra files will be copied such as +# cpl_arrays_setup.F90 cpl_forcing_handler.F90 cpl_interface.F90 +# cpl_netcdf_setup.F90 cpl_parameters.F90 ice_constants.F90 diff --git a/drivers/access/cpl_arrays_setup.F90 b/drivers/access/cpl_arrays_setup.F90 new file mode 100644 index 00000000..1597d57f --- /dev/null +++ b/drivers/access/cpl_arrays_setup.F90 @@ -0,0 +1,176 @@ +!============================================================================ +! +module cpl_arrays_setup +! +! UM-AusCOM coupling involves following fields at the air-ice-sea interface: +! +! A> atm (UM) ==> ice (CICE) [* all at T cell center *] +! +! (1) heatflux + solar radiation (total heatflux) um_thflx +! (2) penetrating solar flux um_pswflx +! (3) runoff um_runoff +! (4) WME (?, for use in KT scheme in nemo) um_wme +! (5) rainfall um_rain +! (6) snowfall um_snow +! (7) evaporation um_evap +! (8) latent heat flux/evaporation um_lhflx +! ( 9 - 13) top ice melting um_tmlt(,,1:5) +! (14 - 18) bottom ice melting um_bmlt(,,1:5) +! (19) windstress 'zonal' um_taux +! (20) windstress 'meridional' um_tauy +! *** the above 20 fields are for the Hadgem3 coupling purpose *** +! *** some may not be necessary (such as WME and evap or lhflx) *** +! *** for ACCESS we may need a few more fields: (temporary...) *** +! (21) solar radiation (net down) um_swflx +! (22) long wave radiation (net down) um_lwflx +! (23) sensible heat flux um_shflx +! (24) surface pressure um_press +! (25) co2 um_co2 +! (26) wind speed um_wnd +! --------- add new fields for ACCESS2 -------- +! (27) north (greenland) ice amount um_icenth +! (28) south (antarctic) ice amount um_icesth +! (29 - 33) ice surface/skin temperature um_tsfice(,,1:5) +! (34 - 38) ice surface evaporation (sublimation) um_iceevp(,,1:5) +! +! B> ocn (MOM4) ==> ice (CICE) [* at T or U cell center *] +! +! (1) sea surface temperature (K) ocn_sst +! (2) sea surface salinity (psu) ocn_sss +! (3) zonal water speed (m/s) ocn_ssu +! (4) meridional water speed (m/s) ocn_ssv +! (5) sea surface gradient (zonal) (m/m) ocn_sslx +! (6) sea surface gradient (meridional)(m/m) ocn_ssly +! (7) potential ice frm/mlt heatflux (W/m^2) ocn_pfmice +! (8) co2 () ocn_co2 +! (9) co2 flux () ocn_co2fx +! +! C> ice (CICE) ==> atm (UM) [* all from T to T, U or V cell center *] +! +! ( 1) ocean surface temperature (K) ia_sst +! ( 2 - 6 ) ice concentration (fraction) ia_aicen(,,1:5) +! ( 7 - 11) snow thickness (m ?) ia_snown(,,1:5) +! (12 - 16) ice thickness (m ?) ia_thikn(,,1:5) +! (17) ice/ocn velocity 'zonal' ia_uvel +! (18) ice/ocn velocity 'meridional' ia_vvel +! (19) co2 ia_co2 +! (20) co2 flux ia_co2fx +! --------- add new fields for ACCESS2 -------- +! (21) ocean surface freezing temperature ia_sstfz +! (22 - 26 ) first order ice concentration ia_foifr(,,1:5) +! (27 - 31 ) ice top layer temperature ia_itopt(,,1:5) +! (32 - 36 ) ice top layer effective conductivity ia_itopk(,,1:5) +! (37 - 41 ) ice melt pond concentration ia_pndfn(,,1:5) +! (42 - 46 ) ice melt pond thickness ia_pndtn(,,1:5) +! +! D> ice (CICE) ==> ocn (MOM4) [* at T or U cell center *] +! +! (1) air/ice-ocean stress, x-direction (kg/m s^2) io_strsu +! (2) air/ice-ocean stress, y-direction (kg/m s^2) io_strsv +! (3) rainfall to ocean (kg/m^2/s) io_rain +! (4) snowfall to ocean (kg/m^2/s) io_snow +! (5) salt flux to ocean (kg/m^2/s) io_stflx +! (6) 'net' heat flux to ocean (W/m^2) io_htflx +! *(note word 'net' is misleading!) it is actually ice +! *'melt' heatflux into ocean. (ref: ice_coupling.F, +! *it says: +! *'buffs(n,index_i2c_Fioi_melth) = fhnet(i,j) +! * ! hf from melting' +! (7) shortwave penetrating to ocean (W/m^2) io_swflx +! *** Also, we pass the following 'atmospheric fluxes': *** +! (8) latent heat flux/evaporation io_qflux +! (9) sensible heat flux io_shflx +!(10) long wave radiation io_lwflx +!(11) runoff (kg/m^2/s) io_runof +!(12) pressure io_press +!(13) ice concentration (fraction) io_aice +! +! Seperate ice melting/formation associated water fluxes from the rainfall field: +! +!(14) ice melt waterflux io_melt +!(15) ice form waterflux io_form +!(16) co2 io_co2 +!(17) wind speed io_wnd +! +! Therefore, currently we have +! +! *for ACCESS1.x, 31 in, 33 out => thus jpfldout=33, jpfldin=31 in cpl_parameters. +! for ACCESS-CM2, 47 in, 63 out => thus jpfldout=63, jpfldin=47 in cpl_parameters. +!---------------------------------------------------------------------------------- +! This module will be largely modified/'simplifed' after ACCESS works ! +!============================================================================ + +!cice stuff +use ice_kinds_mod + +implicit none + +! Fields in: +!=========== +real(kind=dbl_kind), dimension(:,:,:), allocatable :: & !from atm (UM) + um_thflx, um_pswflx, um_runoff, um_wme, um_snow, um_rain, & + um_evap, um_lhflx, um_taux, um_tauy, & + um_swflx, um_lwflx, um_shflx, um_press,um_co2, um_wnd, & + um_icenth, um_icesth + +real(kind=dbl_kind), dimension(:,:,:,:), allocatable :: & + um_tmlt, um_bmlt, um_tsfice, um_iceevp + +! CORE runoff remapped onto the AusCOM grid (optional) +real(kind=dbl_kind), dimension(:,:,:), allocatable :: & + core_runoff + +real(kind=dbl_kind), dimension(:,:,:), allocatable :: & !from ocn (MOM4) + ocn_sst, ocn_sss, ocn_ssu, ocn_ssv, ocn_sslx, ocn_ssly, ocn_pfmice, & + ocn_co2, ocn_co2fx + +real(kind=dbl_kind), dimension(:,:), allocatable :: gwork + !global domain work array, 4 coupling data passing and global data output. +real(kind=dbl_kind), dimension(:,:,:), allocatable :: vwork + !local domain work array. + +! Fields out: +!============ +real(kind=dbl_kind),dimension(:,:,:), allocatable :: & !to atm (timeaveraged) + ia_sst, ia_uvel, ia_vvel, ia_co2, ia_co2fx, ia_sstfz +real(kind=dbl_kind), dimension(:,:,:,:), allocatable :: & + ia_aicen, ia_snown, ia_thikn, & + ia_foifr, ia_itopt, ia_itopk, ia_pndfn, ia_pndtn + +real(kind=dbl_kind),dimension(:,:,:), allocatable :: & !to ocn (time averaged) + io_strsu, io_strsv, io_rain, io_snow, io_stflx, io_htflx, io_swflx, & + io_qflux, io_shflx, io_lwflx, io_runof, io_press, io_aice, & + io_melt, io_form, io_co2, io_wnd + +! Temporary arrays +!================== + +! 1. ice fields averaged over IA cpl interval: +real(kind=dbl_kind),dimension(:,:,:), allocatable :: & + maiu, muvel, mvvel +real(kind=dbl_kind), dimension(:,:,:,:), allocatable :: & + maicen, msnown, mthikn, & + mfoifr, mitopt, mitopk, mpndfn, mpndtn +real(kind=dbl_kind), dimension(:,:,:,:), allocatable :: & !BX: just in case...... + maicen_saved + +! 2. ice fields averaged over IO cpl interval: +real(kind=dbl_kind),dimension(:,:,:), allocatable :: & + maice, mstrocnxT, mstrocnyT, mfresh, mfsalt, mfhocn, mfswthru, msicemass +!BX---extra one: (maybe better save the IA interval one......anyway:) +!real(kind=dbl_kind),dimension(:,:,:), allocatable :: & +! maice_saved + +! 3. ocn fields averaged over IA cpl interval: +real(kind=dbl_kind),dimension(:,:,:), allocatable :: & + msst, mssu, mssv, mco2, mco2fx, msstfz + + +! other stuff +!============ +real(kind=dbl_kind),dimension(:,:,:), allocatable :: & + sicemass !ice mass + +!=========================================================================== +end module cpl_arrays_setup + diff --git a/drivers/access/cpl_forcing_handler.F90 b/drivers/access/cpl_forcing_handler.F90 new file mode 100644 index 00000000..64b1fd27 --- /dev/null +++ b/drivers/access/cpl_forcing_handler.F90 @@ -0,0 +1,1685 @@ +MODULE cpl_forcing_handler +! +! It contains subroutines handling coupling fields. They are +! +! nullify_i2o_fluxes: +! tavg_i2o_fluxes: +! ............... +! ............... +! +use ice_blocks +use ice_forcing +use ice_read_write +use ice_domain_size +use ice_domain, only : distrb_info, nblocks +use ice_flux !forcing data definition (Tair, Qa, uocn, etc.) + !Tn_top, keffn_top ...(for multilayer configuration) +use ice_state, only : aice, aicen, trcr, trcrn, nt_hpnd !ice concentration and tracers +use ice_state, only: uvel, vvel, vsnon, vicen +use ice_gather_scatter +!ars599: 11042014: use all ice_constants +!use ice_constants, only : gravit, Lvap, Lsub +use ice_constants +use ice_grid, only : tmask, to_ugrid +use ice_communicate, only : my_task, master_task +!use ice_ocean, only : cprho +use ice_exit, only : abort_ice +use ice_shortwave, only : apeffn + +use cpl_parameters +use cpl_netcdf_setup +use cpl_arrays_setup + +implicit none + +real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + aiiu ! ice fraction on u-grid + +contains + +!=============================================================================== +subroutine get_core_runoff(fname, vname, nrec) +! read in the remapped core runoff data (S.Marsland) which will be used to replace +! the ncep2 runoff sent from matm via coupler + +implicit none + +character*(*), intent(in) :: fname, vname +integer(kind=int_kind), intent(in) :: nrec +logical :: dbug +integer(kind=int_kind) :: ncid + +dbug = .true. + +if ( file_exist(fname) ) then + call ice_open_nc(fname, ncid) + call ice_read_global_nc(ncid, nrec, vname, gwork, dbug) + call scatter_global(core_runoff, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) + if (my_task == master_task) call ice_close_nc(ncid) +else + if (my_task == 0) write(il_out,*) '(get_core_runoff) file doesnt exist: ', fname + stop 'CICE stopped: core runoff (remapped) file not found.' +endif + +return +end subroutine get_core_runoff + +!=============================================================================== +subroutine get_time0_sstsss(fname, nmonth) + +! This routine is to be used only once at the beginning at an exp. + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nmonth +logical :: dbug +integer(kind=int_kind) :: ncid + +dbug = .true. +!dbug = .false. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) opening ncfile: ',fname + endif + call ice_open_nc(fname, ncid) + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) reading in initial SST...' + endif + call ice_read_nc(ncid, nmonth, 'TEMP', sst, dbug) + call gather_global(gwork, sst, master_task, distrb_info) + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) reading in initial SSS...' + endif + call ice_read_nc(ncid, nmonth, 'SALT', sss, dbug) + call gather_global(gwork, sss, master_task, distrb_info) + if (my_task == master_task) call ice_close_nc(ncid) +else + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) file doesnt exist: ', fname + endif + call abort_ice('CICE stopped--initial SST and SSS ncfile not found.') +endif + +return +end subroutine get_time0_sstsss + +!=============================================================================== +! temporary use ... +subroutine read_access_a2i_data(fname,nrec,istep) + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nrec,istep +logical :: dbug +integer(kind=int_kind) :: ncid + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(read_access_a2i_data) opening ncfile: ',fname + endif + call ice_open_nc(fname, ncid) + if (my_task==0) then + write(il_out,*) '(read_access_a2i_data) reading a2i forcing data...' + endif + call ice_read_nc(ncid, nrec, 'thflx_i', um_thflx, dbug) + call ice_read_nc(ncid, nrec, 'pswflx_i', um_pswflx, dbug) + call ice_read_nc(ncid, nrec, 'runoff_i', um_runoff, dbug) + call ice_read_nc(ncid, nrec, 'wme_i', um_wme, dbug) + call ice_read_nc(ncid, nrec, 'rain_i', um_rain, dbug) + call ice_read_nc(ncid, nrec, 'snow_i', um_snow, dbug) + call ice_read_nc(ncid, nrec, 'evap_i', um_evap, dbug) + call ice_read_nc(ncid, nrec, 'lhflx_i', um_lhflx, dbug) + call ice_read_nc(ncid, nrec, 'tmlt01_i', um_tmlt(:,:,1,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt02_i', um_tmlt(:,:,2,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt03_i', um_tmlt(:,:,3,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt04_i', um_tmlt(:,:,4,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt05_i', um_tmlt(:,:,5,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt01_i', um_bmlt(:,:,1,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt02_i', um_bmlt(:,:,2,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt03_i', um_bmlt(:,:,3,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt04_i', um_bmlt(:,:,4,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt05_i', um_bmlt(:,:,5,:), dbug) + call ice_read_nc(ncid, nrec, 'taux_i', um_taux, dbug) + call ice_read_nc(ncid, nrec, 'tauy_i', um_tauy, dbug) + call ice_read_nc(ncid, nrec, 'swflx_i', um_swflx, dbug) + call ice_read_nc(ncid, nrec, 'lwflx_i', um_lwflx, dbug) + call ice_read_nc(ncid, nrec, 'shflx_i', um_shflx, dbug) + call ice_read_nc(ncid, nrec, 'press_i', um_press, dbug) + call ice_read_nc(ncid, nrec, 'co2_ai', um_co2, dbug) + call ice_read_nc(ncid, nrec, 'wnd_ai', um_wnd, dbug) +! + call ice_read_nc(ncid, nrec, 'icenth_i', um_icenth, dbug) + call ice_read_nc(ncid, nrec, 'icesth_i', um_icesth, dbug) + call ice_read_nc(ncid, nrec, 'tsfice01', um_tsfice(:,:,1,:), dbug) + call ice_read_nc(ncid, nrec, 'tsfice02', um_tsfice(:,:,2,:), dbug) + call ice_read_nc(ncid, nrec, 'tsfice03', um_tsfice(:,:,3,:), dbug) + call ice_read_nc(ncid, nrec, 'tsfice04', um_tsfice(:,:,4,:), dbug) + call ice_read_nc(ncid, nrec, 'tsfice05', um_tsfice(:,:,5,:), dbug) + call ice_read_nc(ncid, nrec, 'iceevp01', um_iceevp(:,:,1,:), dbug) + call ice_read_nc(ncid, nrec, 'iceevp02', um_iceevp(:,:,2,:), dbug) + call ice_read_nc(ncid, nrec, 'iceevp03', um_iceevp(:,:,3,:), dbug) + call ice_read_nc(ncid, nrec, 'iceevp04', um_iceevp(:,:,4,:), dbug) + call ice_read_nc(ncid, nrec, 'iceevp05', um_iceevp(:,:,5,:), dbug) + + if (my_task == master_task) call ice_close_nc(ncid) +else + if (my_task==0) then + write(il_out,*) '(ed_access_a2i_data file doesnt exist: ', fname + endif + call abort_ice('CICE stopped--ACCESS fields_a2i ncfile not found.') +endif + +call check_a2i_fields(istep) + +end subroutine read_access_a2i_data + +!============================================================================= +subroutine atm_icefluxes_back2GBM +!convert the a2i fluxes into GBM units for those that are scaled up in UM +!by "/maicen" before being sent to cice [needed for GSI8 TTI approach]. + +implicit none +!integer :: cat,i,j,k +!do j = 1, ny_block +!do i = 1, nx_block +! do k = 1, nblocks +! do cat = 1, ncat +! um_tmlt(i,j,cat,k) = um_tmlt(i,j,cat,k) * maicen_ia(i,j,cat,k) +! um_bmlt(i,j,cat,k) = um_bmlt(i,j,cat,k) * maicen_ia(i,j,cat,k) +! um_iceevp(i,j,cat,k) = um_iceevp(i,j,cat,k) * maicen_ia(i,j,cat,k) +! enddo +! enddo +!enddo +!enddo + +!um_tmlt(:,:,:,:) = um_tmlt(:,:,:,:) * maicen(:,:,:,:) +!um_bmlt(:,:,:,:) = um_bmlt(:,:,:,:) * maicen(:,:,:,:) +!um_iceevp(:,:,:,:) = um_iceevp(:,:,:,:) * maicen(:,:,:,:) + +um_tmlt(:,:,:,:) = um_tmlt(:,:,:,:) * maicen_saved(:,:,:,:) +um_bmlt(:,:,:,:) = um_bmlt(:,:,:,:) * maicen_saved(:,:,:,:) +um_iceevp(:,:,:,:) = um_iceevp(:,:,:,:) * maicen_saved(:,:,:,:) + +end subroutine atm_icefluxes_back2GBM + +!============================================================================= +subroutine read_restart_i2a(fname, sec) !'i2a.nc', 0) + +! read ice to atm coupling fields from restart file, and send to atm module + +implicit none +character*(*), intent(in) :: fname +integer :: sec + +integer(kind=int_kind) :: ncid +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(read_restart_i2a) reading in i2a fields......' + endif + call ice_open_nc(fname, ncid) + call ice_read_nc(ncid, 1, 'icecon01', ia_aicen(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'icecon02', ia_aicen(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'icecon03', ia_aicen(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'icecon04', ia_aicen(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'icecon05', ia_aicen(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk01', ia_snown(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk02', ia_snown(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk03', ia_snown(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk04', ia_snown(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk05', ia_snown(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'icethk01', ia_thikn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'icethk02', ia_thikn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'icethk03', ia_thikn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'icethk04', ia_thikn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'icethk05', ia_thikn(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'isst_ia', ia_sst, dbug) + call ice_read_nc(ncid, 1, 'uvel_ia', ia_uvel, dbug) + call ice_read_nc(ncid, 1, 'vvel_ia', ia_vvel, dbug) + call ice_read_nc(ncid, 1, 'co2_i2', ia_co2, dbug) + call ice_read_nc(ncid, 1, 'co2fx_i2', ia_co2fx, dbug) + call ice_read_nc(ncid, 1, 'sstfz_ia', ia_sstfz, dbug) + call ice_read_nc(ncid, 1, 'foifr01', ia_foifr(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'foifr02', ia_foifr(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'foifr03', ia_foifr(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'foifr04', ia_foifr(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'foifr05', ia_foifr(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'itopt01', ia_itopt(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'itopt02', ia_itopt(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'itopt03', ia_itopt(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'itopt04', ia_itopt(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'itopt05', ia_itopt(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'itopk01', ia_itopk(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'itopk02', ia_itopk(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'itopk03', ia_itopk(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'itopk04', ia_itopk(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'itopk05', ia_itopk(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'pndfn01', ia_pndfn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'pndfn02', ia_pndfn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'pndfn03', ia_pndfn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'pndfn04', ia_pndfn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'pndfn05', ia_pndfn(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'pndtn01', ia_pndtn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'pndtn02', ia_pndtn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'pndtn03', ia_pndtn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'pndtn04', ia_pndtn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'pndtn05', ia_pndtn(:,:,5,:), dbug) + + if (my_task == master_task) then + call ice_close_nc(ncid) + write(il_out,*) '(read_restart_i2a) has read in 18 i2a fields.' + endif + +else + if (my_task==0) then + write(il_out,*) 'ERROR: (read_restart_i2a) not found file *** ',fname + endif + print *, 'CICE: (read_restart_i2a) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 i2a data file.') +endif +end subroutine read_restart_i2a + + +!============================================================================= +subroutine read_restart_i2asum(fname, sec) !'i2a.nc', 0) + +! read ice to atm coupling fields from restart file, and send to atm module + +implicit none +character*(*), intent(in) :: fname +integer :: sec + +integer(kind=int_kind) :: ncid +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(read_restart_i2asum) reading in i2a fields......' + endif + call ice_open_nc(fname, ncid) + call ice_read_nc(ncid, 1, 'maicen1', maicen(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'maicen2', maicen(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'maicen3', maicen(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'maicen4', maicen(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'maicen5', maicen(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'msnown1', msnown(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'msnown2', msnown(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'msnown3', msnown(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'msnown4', msnown(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'msnown5', msnown(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn1', mthikn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn2', mthikn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn3', mthikn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn4', mthikn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn5', mthikn(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'msst', msst, dbug) + call ice_read_nc(ncid, 1, 'mssu', mssu, dbug) + call ice_read_nc(ncid, 1, 'mssv', mssv, dbug) + call ice_read_nc(ncid, 1, 'muvel', muvel, dbug) + call ice_read_nc(ncid, 1, 'mvvel', mvvel, dbug) + call ice_read_nc(ncid, 1, 'maiu', maiu, dbug) + ! + !call ice_read_nc(ncid, 1, 'maice_ia', maice_ia, dbug) + call ice_read_nc(ncid, 1, 'mfoifr01', mfoifr(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mfoifr02', mfoifr(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mfoifr03', mfoifr(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mfoifr04', mfoifr(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mfoifr05', mfoifr(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mitopt01', mitopt(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mitopt02', mitopt(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mitopt03', mitopt(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mitopt04', mitopt(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mitopt05', mitopt(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mitopk01', mitopk(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mitopk02', mitopk(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mitopk03', mitopk(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mitopk04', mitopk(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mitopk05', mitopk(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mpndfn01', mpndfn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mpndfn02', mpndfn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mpndfn03', mpndfn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mpndfn04', mpndfn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mpndfn05', mpndfn(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mpndtn01', mpndtn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mpndtn02', mpndtn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mpndtn03', mpndtn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mpndtn04', mpndtn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mpndtn05', mpndtn(:,:,5,:), dbug) + + if (my_task == master_task) then + call ice_close_nc(ncid) + write(il_out,*) '(read_restart_i2asum) has read in 21 i2a fields.' + endif + +else + if (my_task==0) then + write(il_out,*) 'ERROR: (read_restart_i2asum) not found file *** ',fname + endif + print *, 'CICE: (read_restart_i2asum) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 i2a data file.') +endif +end subroutine read_restart_i2asum + +!============================================================================== +subroutine put_restart_i2a(fname, sec) +! call this subroutine after called get_restart_oi2 +! it uses ocn_sst etc to calculate average ocn fields which will be used to send +!to atm at the 1st step of continue run, because the ocn_sst cannot be sent to ice at the end of last run. +! average ice fields (done at end of last run) are ready by calling read_restart_i2asum() +! +implicit none + +character*(*), intent(in) :: fname +integer :: sec + + if ( file_exist('i2a.nc') ) then + write(il_out,*)' calling read_restart_i2a at time_sec = ',sec + call read_restart_i2a('i2a.nc', sec) + endif + if ( file_exist('i2asum.nc') ) then + write(il_out,*)' calling read_restart_i2asum at time_sec = ',sec + call read_restart_i2asum('i2asum.nc', sec) + + write(il_out,*)' calling ave_ocn_fields_4_i2a at time_sec = ',sec + call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling + write(il_out,*) ' calling get_i2a_fields at time_sec =', sec + call get_i2a_fields + endif + +end subroutine put_restart_i2a + +!=============================================================================== +subroutine get_restart_o2i(fname) + +! To be called at beginning of each run trunk to read in restart o2i fields + +implicit none + +character*(*), intent(in) :: fname + +integer(kind=int_kind) :: ncid_o2i +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_restart_o2i) reading in o2i fields......' + endif + call ice_open_nc(fname, ncid_o2i) + call ice_read_nc(ncid_o2i, 1, 'sst_i', ocn_sst, dbug) + call ice_read_nc(ncid_o2i, 1, 'sss_i', ocn_sss, dbug) + call ice_read_nc(ncid_o2i, 1, 'ssu_i', ocn_ssu, dbug) + call ice_read_nc(ncid_o2i, 1, 'ssv_i', ocn_ssv, dbug) + call ice_read_nc(ncid_o2i, 1, 'sslx_i', ocn_sslx, dbug) + call ice_read_nc(ncid_o2i, 1, 'ssly_i', ocn_ssly, dbug) + call ice_read_nc(ncid_o2i, 1, 'pfmice_i', ocn_pfmice, dbug) + call ice_read_nc(ncid_o2i, 1, 'co2_oi', ocn_co2, dbug) + call ice_read_nc(ncid_o2i, 1, 'co2fx_oi', ocn_co2fx, dbug) + if (my_task == master_task) then + call ice_close_nc(ncid_o2i) + write(il_out,*) '(get_restart_o2i) has read in 7 o2i fields.' + endif +else + if (my_task==0) then + write(il_out,*) 'ERROR: (get_restart_o2i) not found file *** ',fname + endif + print *, 'CICE: (get_restart_o2i) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 o2i data file.') +endif + +return +end subroutine get_restart_o2i + +!=============================================================================== +subroutine get_restart_mice(fname) + +! Called at beginning of the run to get 'last' IO cpl int T-M ice variables +! which are used together with the first received i2a fields to obtain the first +! i2o fields sent to ocn immediately as the 1st io cpl int forcing there. + +implicit none + +character*(*), intent(in) :: fname + +integer(kind=int_kind) :: ncid_o2i +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_restart_mice) reading in mice variables......' + endif + call ice_open_nc(fname, ncid_o2i) + call ice_read_nc(ncid_o2i, 1, 'maice', maice, dbug) + call ice_read_nc(ncid_o2i, 1, 'mstrocnxT', mstrocnxT, dbug) + call ice_read_nc(ncid_o2i, 1, 'mstrocnyT', mstrocnyT, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfresh', mfresh, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfsalt', mfsalt, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfhocn', mfhocn, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfswthru', mfswthru, dbug) + call ice_read_nc(ncid_o2i, 1, 'msicemass', msicemass, dbug) + + if (my_task == master_task) then + call ice_close_nc(ncid_o2i) + write(il_out,*) '(get_restart_mice) has read in 8 T-M variables.' + endif +else + if (my_task==0) then + write(il_out,*) 'ERROR: (get_restart_mice) not found file *** ',fname + endif + print *, 'CICE: (get_restart_mice) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 mice data file.') +endif + +return +end subroutine get_restart_mice + +!=============================================================================== +subroutine get_restart_i2o(fname) + +! To be called at beginning of each run trunk to read in restart i2o fields + +implicit none + +character*(*), intent(in) :: fname + +integer(kind=int_kind) :: ncid_i2o, jf, jfs +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_time0_i2o_fields) reading in i2o fields......' + endif + call ice_open_nc(fname, ncid_i2o) + do jf = nsend_i2a + 1, jpfldout + call ice_read_nc(ncid_i2o, 1, cl_writ(jf) , vwork, dbug) + select case(trim(cl_writ(jf))) + case ('strsu_io'); io_strsu = vwork + case ('strsv_io'); io_strsv = vwork + case ('rain_io'); io_rain = vwork + case ('snow_io'); io_snow = vwork + case ('stflx_io'); io_stflx = vwork + case ('htflx_io'); io_htflx = vwork + case ('swflx_io'); io_swflx = vwork + case ('qflux_io'); io_qflux = vwork + case ('shflx_io'); io_shflx = vwork + case ('lwflx_io'); io_lwflx = vwork + case ('runof_io'); io_runof = vwork + case ('press_io'); io_press = vwork + case ('aice_io'); io_aice = vwork + case ('melt_io'); io_melt = vwork + case ('form_io'); io_form = vwork + case ('co2_i1'); io_co2 = vwork + case ('wnd_i1'); io_wnd = vwork + end select + enddo + if (my_task == master_task) then + call ice_close_nc(ncid_i2o) + write(il_out,*) '(get_time0_i2o_fields) has read in 11 i2o fields.' + endif +else + if (my_task==0) then + write(il_out,*) 'ERROR: (get_time0_i2o_fields) not found file *** ',fname + endif + print *, 'CICE: (get_time0_i2o_fields_old) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 i2o data file.') +endif + +return +end subroutine get_restart_i2o + +!=============================================================================== +subroutine set_sbc_ice !!NOTE: This routine is NOT used!! +! +! Set coupling fields (in units of GMB, from UM and MOM4) needed for CICE +! +! Adapted from "subroutine cice_sbc_in" of HadGem3 Nemo "MODULE sbcice_cice" +! for the "nsbc = 5" case. +! +! It should be called after calling "from_atm" and "from_ocn". +!------------------------------------------------------------------------------- + +implicit none + +real :: r1_S0 +real, dimension(nx_block,ny_block,nblocks) :: zzs + +integer :: i,j,k,cat + +!*** Fields from UM (all on T cell center): + +!(1) windstress taux: +strax = um_taux * maice !*tmask + +!(2) windstress tauy: +stray = um_tauy * maice !*tmask + +!(3) surface downward latent heat flux (==> multi-category) +do j = 1, ny_block +do i = 1, nx_block + do k = 1, nblocks + if (maice(i,j,k)==0.0) then + do cat = 1, ncat + flatn_f(i,j,cat,k) = 0.0 + enddo + ! This will then be conserved in CICE (done in sfcflux_to_ocn) + flatn_f(i,j,1,k) = um_lhflx(i,j,k) + else + do cat = 1, ncat + !!!B: flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * maicen(i,j,cat,k)/maice(i,j,k) + !???: flatn_f(i,j,cat,k) = um_iceevp(i,j,cat,k) * Lsub + flatn_f(i,j,cat,k) = - um_iceevp(i,j,cat,k) * Lsub + enddo + endif + enddo +enddo +enddo + +! GBM conductive flux through ice: +!(4-8) top melting; (9-13) bottom belting ==> surface heatflux +do cat = 1, ncat + fcondtopn_f(:,:,cat,:) = um_bmlt(:,:,cat,:) + fsurfn_f (:,:,cat,:) = um_tmlt(:,:,cat,:) + um_bmlt(:,:,cat,:) +enddo + +!(14) snowfall +fsnow = max(maice * um_snow, 0.0) + +!(15) rainfall +frain = max(maice * um_rain, 0.0) + +!BX-20160718: "save" the ice concentration "maice" used here for scaling-up frain etc in +!ice_step for "consistency"-- +!maice_saved = maice + +!*** Fields from MOM4 (SSU/V and sslx/y are on U points): + +!(1) freezing/melting potential +frzmlt = ocn_pfmice +if (limit_icemelt) then + frzmlt(:,:,:) = max(frzmlt(:,:,:), meltlimit) +endif + +!(2) SST +!make sure SST is 'all right' K==>C +sst = ocn_sst +if (maxval(sst).gt.200) then + sst = sst -273.15 +endif + +!(3) SSS +sss = ocn_sss + +!(4) SSU +uocn = ocn_ssu + +!(5) SSV +vocn = ocn_ssv + +!(6) surface slope sslx +ss_tltx = ocn_sslx + +!(7) surface slope ssly +ss_tlty = ocn_ssly + +!(as per S.O.) make sure Tf if properly initialized +!Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) +! +!B: May use different formula for Tf such as TEOS-10 formulation: +! +!r1_S0 = 0.875/35.16504 +!zzs(:,:,:) = sqrt(abs(sss(:,:,:)) * r1_S0) +!Tf(:,:,:) = ((((1.46873e-03 * zzs(:,:,:) - 9.64972e-03) * zzs(:,:,:) + & +! 2.28348e-02) * zzs(:,:,:) - 3.12775e-02) * zzs(:,:,:) + & +! 2.07679e-02) * zzs(:,:,:) - 5.87701e-02 +!Tf(:,:,:) = Tf(:,:,:) * sss(:,:,:) ! - 7.53e-4 * 5.0 !!!5.0 is depth in meters + +!=============================================================== + +end subroutine set_sbc_ice + +!=============================================================================== +subroutine get_sbc_ice +! +! ** Purpose: set GBM coupling fields (from UM and MOM4) needed for CICE +! +! Adapted from "subroutine cice_sbc_in" of HadGem3 Nemo "MODULE sbcice_cice" +! for the "nsbc = 5" case. +! +! It should be called after calling "from_atm" and "from_ocn". +! +! *** This routine is used/called within ice time loop (itap) +! *** in case "set_sbc_ice" call (outside the itap loop) fails +! *** which is unfortunately the case the moment (Jan2010) ! +!B: possible reason: "maice" used in set_sbc_ice may be (quite) different from +! the real time aice (used here). E.g., in the case of aice << maice, taux/y +! calculated in set_sbc_ice (i.e., um_taux/y * maice) should be too large for +! a (possibly very) small aice grid, causing huge ice velocity and thus ice +! "departure point error". (June 2016) +!------------------------------------------------------------------------------- + +implicit none + +real :: r1_S0 +real, dimension(nx_block,ny_block,nblocks) :: zzs + +integer :: i,j,k,cat + +! Fields from UM (all on T cell center): + +!(1) windstress taux: +strax = um_taux * aice !*tmask ? + +!(2) windstress tauy: +stray = um_tauy * aice !*tmask ? + +!(3) surface downward latent heat flux (==> multi_category) +!BX: where is flatn_f "used" in CICE? +do j = 1, ny_block +do i = 1, nx_block + do k = 1, nblocks + if (aice(i,j,k)==0.0) then + do cat = 1, ncat + flatn_f(i,j,cat,k) = 0.0 + enddo + ! This will then be conserved in CICE (done in sfcflux_to_ocn) + flatn_f(i,j,1,k) = um_lhflx(i,j,k) + else + do cat = 1, ncat + !!!BX: flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * aicen(i,j,cat,k)/aice(i,j,k) + !!! Double check "Lsub" used here !!! + !?! flatn_f(i,j,cat,k) = um_iceevp(i,j,cat,k) * Lsub + flatn_f(i,j,cat,k) = - um_iceevp(i,j,cat,k) * Lsub + enddo + endif + enddo +enddo +enddo + +! GBM conductive flux through ice: +!(4-8) top melting; (9-13) bottom belting ==> surface heatflux +do cat = 1, ncat + fcondtopn_f(:,:,cat,:) = um_bmlt(:,:,cat,:) + fsurfn_f (:,:,cat,:) = um_tmlt(:,:,cat,:) + um_bmlt(:,:,cat,:) +enddo + +!!! 20130419: Martin Dix's investigation suggests that frain and fsnow should NOT be scaled by +!!! aice here. This scaling would caused double-scaling with "fresh" calculation.. +!(14) snowfall +!!!fsnow = max(aice * um_snow,0.0) +!fsnow = max(um_snow,0.0) !no more scaling as per M.D.! +!(15) rainfall +!!!frain = max(aice * um_rain,0.0) +!frain = max(um_rain,0.0) !no more scaling as per M.D.! +!!! 20130420: I dug deeper and checked all the associated steps of "fresh" calculation, found +!!! the original weighting is CORRECT! so back to *aice: +fsnow = max(aice * um_snow,0.0) +frain = max(aice * um_rain,0.0) +!!!------------------------------------------------------------------------------------------ + +! Fields from MOM4 (SSU/V and sslx/y are on U points): + +!(1) freezing/melting potential +frzmlt = ocn_pfmice +!20080312: set maximum melting htflux allowed from ocn, (eg, -200 W/m^2) +! the artificial "meltlimit = -200 " is read in from input_ice.nml +!20090320: set option 'limit_icemelt' in case no limit needed if cice behaves! +if (limit_icemelt) then + frzmlt(:,:,:) = max(frzmlt(:,:,:), meltlimit) +endif + +!(2) SST +sst = ocn_sst -273.15 + +!(3) SSS +sss = ocn_sss + +!(4) SSU +uocn = ocn_ssu + +!(5) SSV +vocn = ocn_ssv +!(6) surface slope sslx + +ss_tltx = ocn_sslx + +!(7) surface slope ssly +ss_tlty = ocn_ssly + +! * (as per S. O'Farrel) make sure Tf if properly initialized +!----- should use eos formula to calculate Tf for "consistency" with GCx ----! +!Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) +! +!B: May use different formula for Tf such as TEOS-10 formulation: +! +!r1_S0 = 0.875/35.16504 +!zzs(:,:,:) = sqrt(abs(sss(:,:,:)) * r1_S0) +!Tf(:,:,:) = ((((1.46873e-03 * zzs(:,:,:) - 9.64972e-03) * zzs(:,:,:) + & +! 2.28348e-02) * zzs(:,:,:) - 3.12775e-02) * zzs(:,:,:) + & +! 2.07679e-02) * zzs(:,:,:) - 5.87701e-02 +!Tf(:,:,:) = Tf(:,:,:) * sss(:,:,:) ! - 7.53e-4 * 5.0 !!!5.0 is depth in meters +! +end subroutine get_sbc_ice + +!=============================================================================== +subroutine save_restart_o2i(fname, nstep) + +! output the last o2i forcing data received in cice by the end of the run, +! to be read in at the beginning of next run by cice + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +if (my_task == 0) then + call create_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) + call write_nc_1Dtime(real(nstep), 1, 'time', ncid) +endif + +do jf = nrecv_a2i + 1, jpfldin + + select case (trim(cl_read(jf))) + case('sst_i'); vwork = ocn_sst + case('sss_i'); vwork = ocn_sss + case('ssu_i'); vwork = ocn_ssu + case('ssv_i'); vwork = ocn_ssv + case('sslx_i'); vwork = ocn_sslx + case('ssly_i'); vwork = ocn_ssly + case('pfmice_i'); vwork = ocn_pfmice + case('co2_oi'); vwork = ocn_co2 + case('co2fx_oi'); vwork = ocn_co2fx + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + if (my_task == 0) then + call write_nc2D(ncid, cl_read(jf), gwork, 2, il_im, il_jm, 1, ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +return +end subroutine save_restart_o2i + +!============================================================================== +subroutine save_restart_i2asum(fname, nstep) +! output the last i2a forcing data in cice at the end of the run, +! to be read in at the beginning of next run by cice and sent to atm + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +integer(kind=int_kind), parameter :: sumfldin = 46 !21 +character(len=8), dimension(sumfldin) :: sumfld + +sumfld(1)='msst' +sumfld(2)='mssu' +sumfld(3)='mssv' +sumfld(4)='muvel' +sumfld(5)='mvvel' +sumfld(6)='maiu' +sumfld(7)='maicen1' +sumfld(8)='maicen2' +sumfld(9)='maicen3' +sumfld(10)='maicen4' +sumfld(11)='maicen5' +sumfld(12)='mthikn1' +sumfld(13)='mthikn2' +sumfld(14)='mthikn3' +sumfld(15)='mthikn4' +sumfld(16)='mthikn5' +sumfld(17)='msnown1' +sumfld(18)='msnown2' +sumfld(19)='msnown3' +sumfld(20)='msnown4' +sumfld(21)='msnown5' +! +sumfld(22)='mfoifr1' +sumfld(23)='mfoifr2' +sumfld(24)='mfoifr3' +sumfld(25)='mfoifr4' +sumfld(26)='mfoifr5' +sumfld(27)='mitopt1' +sumfld(28)='mitopt2' +sumfld(29)='mitopt3' +sumfld(30)='mitopt4' +sumfld(31)='mitopt5' +sumfld(32)='mitopk1' +sumfld(33)='mitopk2' +sumfld(34)='mitopk3' +sumfld(35)='mitopk4' +sumfld(36)='mitopk5' +sumfld(37)='mpndfn1' +sumfld(38)='mpndfn2' +sumfld(39)='mpndfn3' +sumfld(40)='mpndfn4' +sumfld(41)='mpndfn5' +sumfld(42)='mpndtn1' +sumfld(43)='mpndtn2' +sumfld(44)='mpndtn3' +sumfld(45)='mpndtn4' +sumfld(46)='mpndtn5' + +if (my_task == 0) then + call create_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) +endif + +do jf = 1, sumfldin + select case (trim(sumfld(jf))) + case('msst'); vwork = msst + case('mssu'); vwork = mssu + case('mssv'); vwork = mssv + case('muvel'); vwork = muvel + case('mvvel'); vwork = mvvel + case('maiu'); vwork = maiu + case('maicen1'); vwork = maicen(:,:,1,:) + case('maicen2'); vwork = maicen(:,:,2,:) + case('maicen3'); vwork = maicen(:,:,3,:) + case('maicen4'); vwork = maicen(:,:,4,:) + case('maicen5'); vwork = maicen(:,:,5,:) + case('mthikn1'); vwork = mthikn(:,:,1,:) + case('mthikn2'); vwork = mthikn(:,:,2,:) + case('mthikn3'); vwork = mthikn(:,:,3,:) + case('mthikn4'); vwork = mthikn(:,:,4,:) + case('mthikn5'); vwork = mthikn(:,:,5,:) + case('msnown1'); vwork = msnown(:,:,1,:) + case('msnown2'); vwork = msnown(:,:,2,:) + case('msnown3'); vwork = msnown(:,:,3,:) + case('msnown4'); vwork = msnown(:,:,4,:) + case('msnown5'); vwork = msnown(:,:,5,:) + case('mfoifr1'); vwork = mfoifr(:,:,1,:) + case('mfoifr2'); vwork = mfoifr(:,:,2,:) + case('mfoifr3'); vwork = mfoifr(:,:,3,:) + case('mfoifr4'); vwork = mfoifr(:,:,4,:) + case('mfoifr5'); vwork = mfoifr(:,:,5,:) + case('mitopt1'); vwork = mitopt(:,:,1,:) + case('mitopt2'); vwork = mitopt(:,:,2,:) + case('mitopt3'); vwork = mitopt(:,:,3,:) + case('mitopt4'); vwork = mitopt(:,:,4,:) + case('mitopt5'); vwork = mitopt(:,:,5,:) + case('mitopk1'); vwork = mitopk(:,:,1,:) + case('mitopk2'); vwork = mitopk(:,:,2,:) + case('mitopk3'); vwork = mitopk(:,:,3,:) + case('mitopk4'); vwork = mitopk(:,:,4,:) + case('mitopk5'); vwork = mitopk(:,:,5,:) + case('mpndfn1'); vwork = mpndfn(:,:,1,:) + case('mpndfn2'); vwork = mpndfn(:,:,2,:) + case('mpndfn3'); vwork = mpndfn(:,:,3,:) + case('mpndfn4'); vwork = mpndfn(:,:,4,:) + case('mpndfn5'); vwork = mpndfn(:,:,5,:) + case('mpndtn1'); vwork = mpndtn(:,:,1,:) + case('mpndtn2'); vwork = mpndtn(:,:,2,:) + case('mpndtn3'); vwork = mpndtn(:,:,3,:) + case('mpndtn4'); vwork = mpndtn(:,:,4,:) + case('mpndtn5'); vwork = mpndtn(:,:,5,:) + + end select + call gather_global(gwork, vwork, master_task, distrb_info) + if (my_task == 0) then + call write_nc2D(ncid, sumfld(jf), gwork, 2, il_im, il_jm, 1, ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +end subroutine save_restart_i2asum + +!=============================================================================== +subroutine save_restart_mice(fname, nstep) + +! output ice variable averaged over the last IO cpl int of this run, +! cice reads in these vars at the beginning of next run, uses them with the first +! received a2i fields to obtain the first i2o fields to be sent to ocn + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +if (my_task == 0) then + call create_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) + call write_nc_1Dtime(real(nstep), 1, 'time', ncid) +endif + +vwork = maice +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maice', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mstrocnxT +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mstrocnxT', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mstrocnyT +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mstrocnyT', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfresh +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfresh', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfsalt +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfsalt', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfhocn +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfhocn', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfswthru +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfswthru', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = msicemass +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'msicemass', gwork, 2, il_im, il_jm, 1, ilout=il_out) + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +return +end subroutine save_restart_mice + +!=============================================================================== +subroutine get_i2a_fields + +implicit none + +! all fields (except for vector) obtained here are all on T cell center + +!(1) ocean surface temperature +ia_sst(:,:,:) = msst(:,:,:) + +!(2-3) ice/ocn combined surface velocity +!CH: should use "aiu", not aice! +ia_uvel(:,:,:) = mssu(:,:,:) * (1. - maiu(:,:,:)) + muvel(:,:,:) * maiu(:,:,:) +ia_vvel(:,:,:) = mssv(:,:,:) * (1. - maiu(:,:,:)) + mvvel(:,:,:) * maiu(:,:,:) + +!(4-8) ice concentration +ia_aicen(:,:,:,:) = maicen(:,:,:,:) +!BX: save it for use in atm_icefluxes_back2GBM --- +maicen_saved = maicen + +!(9-13) ice thickness +ia_thikn(:,:,:,:) = mthikn(:,:,:,:) + +!(14-18) snow thickness +ia_snown(:,:,:,:) = msnown(:,:,:,:) + +!(19-20) co2 flux stuff +ia_co2 = mco2 +ia_co2fx = mco2fx + +!(21) ocean surface freezing temperature +ia_sstfz(:,:,:) = msstfz(:,:,:) + 273.15 + +!(22-26) first order ice concentration +ia_foifr(:,:,:,:) = mfoifr(:,:,:,:) + +!(27-31) ice top layer temperature +ia_itopt(:,:,:,:) = mitopt(:,:,:,:) + 273.15 + +!(32-36) ice top layer effective conductivity +ia_itopk(:,:,:,:) = mitopk(:,:,:,:) + +!(37-41) ice melt pond concentration +ia_pndfn(:,:,:,:) = mpndfn(:,:,:,:) + +!(42-46) ice melt pond thickness +ia_pndtn(:,:,:,:) = mpndtn(:,:,:,:) + +return +end subroutine get_i2a_fields + +!=============================================================================== +subroutine get_i2o_fields + +! All fluxes should be in GBM units before passing into coupler. +! e.g., io_htflx(:,:,:) = fhocn_gbm(:,:,:) + +implicit none + +real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: pice + +! Fields obtained here are all at T cell center. before being sent to MOM4, vector +! (Taux, Tauy) should be shifted on to U point as required +!------------------------------------------------------------------------------- + +!(1-2) air/ice - sea stress TAUX/TAUY +! Caution: in nemo, "strocnx/y" are NOT weighted by aice here, 'cos strocnx/y +! have already been 'weighted' using aice (when calculated in "evp_finish". +! But, this weight has been removed in strocnx/yT (see "evp_finish"), therfore +! we need put it on again here. +io_strsu = um_taux * (1. - maice) - mstrocnxT * maice +io_strsv = um_tauy * (1. - maice) - mstrocnyT * maice + +!(3) freshwater flux to ocean: rainfall (+ ice melting water flux ?) +io_rain = um_rain * (1. - maice) +!!CH: confirmed: +!!if (ice_fwflux) io_rain = io_rain + mfresh !always .t. +!!NOTE mfresh is now splitted into melt (14) and form (15) and passed into ocn seperately. + +!(4) freshwater flux to ocean: snowfall +io_snow = um_snow * (1. - maice) + +!(5) salt flux to ocean +io_stflx = mfsalt + +!(6) ice/snow melting heatflux into ocean +io_htflx = mfhocn + +!(7) short wave radiation +!(CH: the (1-aice) weight should not be here 'cos all fluxes passed in from +! UM have already been aice-weighted when they are calculated there!!!) +!io_swflx = um_swflx * (1. - maice) + mfswthru +io_swflx = um_swflx + mfswthru +!!!20100616: test for more swflx +!!!io_swflx = 1.064 * um_swflx + mfswthru + +!(8) latent heat flux (positive out of ocean as required by MOM4) +io_qflux = um_evap * Lvap !Note it's already weighted in UM for open sea. +!20101210: NOT sure about the ice weghting in UM, 'cos the ice area does see +! non-zero (positive) evap. +!if (imsk_evap) then +! io_qflux = um_evap * Lvap * (1. - maice) +!endif + +!(9) sensible heat flux (positive out of ocean as required by MOM4) +!io_shflx = um_shflx * (1. - maice) +io_shflx = um_shflx + +!(10) net long wave radiation positive down +!io_lwflx = um_lwflx * (1. - maice) +io_lwflx = um_lwflx + +!(11) runoff (!check the incoming field! pattern? remapping ok? conserved? ...) +io_runof = um_runoff +! CHECK with SM about the annual cycle of core-runoff! (we only have annual mean) + +!(12) pressure +pice = gravit * msicemass +!---------------------------------------------------------------------------- +!sicemass = rho_ice x hi + rho_snow x hs (in m) +! +! Should we set limit to the ovelying ice pressure as suggested in MOM4 code? +!(see ocean_sbc.F90) if yes, we may use following +!pice(i,j) = min(pice(i,j), gravit*rhow*max_ice_thickness) +! (note rhow = 1026 kg/m^3 here, but mom4 instead uses rho0 = 1035 kg/m^3) +! No, let mom4 handle it (see ocean_sbc.F90) +! +!as GFDL SIS, we use patm 'anormaly' and then add in the ice/snow pressure! +!29/11/2007 +!---------------------------------------------------------------------------- +if (ice_pressure_on) then + io_press = pice * maice +endif +if (air_pressure_on) then + !as GFDL SIS, we use patm anormaly, i.e., taking off 1.e5 Pa ! + io_press(:,:,:) = io_press(:,:,:) + um_press(:,:,:) - 1.0e5 +endif +!(13) ice concentration +io_aice = maice +!(14) ice melt fwflux +io_melt = max(0.0,mfresh(:,:,:)) +!(15) ice form fwflux +io_form = min(0.0,mfresh(:,:,:)) + +io_co2 = um_co2 +io_wnd = um_wnd + +return +end subroutine get_i2o_fields + +!=============================================================================== +subroutine initialize_mice_fields_4_i2o + +implicit none + +maice = 0. +mstrocnxT = 0. +mstrocnyT = 0. +mfresh = 0. +mfsalt = 0. +mfhocn = 0. +mfswthru = 0. +msicemass = 0. + +return +end subroutine initialize_mice_fields_4_i2o + +!=============================================================================== +subroutine initialize_mice_fields_4_i2a + +implicit none + +muvel = 0. +mvvel = 0. + +maiu = 0. +maicen = 0. +mthikn = 0. +msnown = 0. + +mfoifr = 0. +mitopt = 0. +mitopk = 0. +mpndfn = 0. +mpndtn = 0. + +return +end subroutine initialize_mice_fields_4_i2a + +!=============================================================================== +subroutine initialize_mocn_fields_4_i2a + +implicit none + +msst = 0. +mssu = 0. +mssv = 0. +mco2 = 0. +mco2fx = 0. +msstfz = 0. + +return +end subroutine initialize_mocn_fields_4_i2a + +!=============================================================================== +subroutine time_average_ocn_fields_4_i2a + +implicit none + +msst(:,:,:) = msst(:,:,:) + ocn_sst(:,:,:) * coef_cpl +mssu(:,:,:) = mssu(:,:,:) + ocn_ssu(:,:,:) * coef_cpl +mssv(:,:,:) = mssv(:,:,:) + ocn_ssv(:,:,:) * coef_cpl +mco2(:,:,:) = mco2(:,:,:) + ocn_co2(:,:,:) * coef_cpl +mco2fx(:,:,:) = mco2fx(:,:,:) + ocn_co2fx(:,:,:) * coef_cpl +msstfz(:,:,:) = msstfz(:,:,:) + Tf(:,:,:) * coef_cpl + +return +end subroutine time_average_ocn_fields_4_i2a + +!=============================================================================== +!dhb599-20131002: resuming the old 'approach' (used before 20130420) which sets +!do_scale_fluxes = .t. and thus the "flatn_f/Lsub" terms is NOT used as part of +!'fresh' and passed into ocean...since the 'evaporation out of ice surface' is +!going into atmosphere, not supposed to change the ocean water volume! +!------------------------------------------------------------------------------- + +subroutine time_average_fields_4_i2o + +implicit none + +maice(:,:,:) = maice(:,:,:) + aice(:,:,:) * coef_io +mstrocnxT(:,:,:) = mstrocnxT(:,:,:) + strocnxT(:,:,:) * coef_io +mstrocnyT(:,:,:) = mstrocnyT(:,:,:) + strocnyT(:,:,:) * coef_io +!20130420: possible bug due to missing term "flatn_f/Lsub" in the last update for fresh +! use scale_fluxes=.f. to avoid flux scaling by /aice +! meaning fluxes are all grid-box-mean by the end of ice_step. +!mfresh(:,:,:) = mfresh(:,:,:) + fresh_gbm(:,:,:) * coef_io +!mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt_gbm(:,:,:) * coef_io +!mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn_gbm(:,:,:) * coef_io +!mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru_gbm(:,:,:) * coef_io +mfresh(:,:,:) = mfresh(:,:,:) + fresh(:,:,:) * coef_io +mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt(:,:,:) * coef_io +mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn(:,:,:) * coef_io +mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru(:,:,:) * coef_io +!--------------------------------------------------------------------------------------- +!--------------------------------------------------------------------------------------- +msicemass(:,:,:) = msicemass(:,:,:) + sicemass(:,:,:) * coef_io + +return +end subroutine time_average_fields_4_i2o + +!=============================================================================== +subroutine time_average_fields_4_i2o_20130420 + +implicit none + +maice(:,:,:) = maice(:,:,:) + aice(:,:,:) * coef_io +mstrocnxT(:,:,:) = mstrocnxT(:,:,:) + strocnxT(:,:,:) * coef_io +mstrocnyT(:,:,:) = mstrocnyT(:,:,:) + strocnyT(:,:,:) * coef_io +!20130420: possible bug due to missing term "flatn_f/Lsub" in the last update +!for fresh +! use scale_fluxes=.f. to avoid flux scaling by /aice +! meaning fluxes are all grid-box-mean by the end of ice_step. +!mfresh(:,:,:) = mfresh(:,:,:) + fresh_gbm(:,:,:) * coef_io +!mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt_gbm(:,:,:) * coef_io +!mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn_gbm(:,:,:) * coef_io +!mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru_gbm(:,:,:) * coef_io +mfresh(:,:,:) = mfresh(:,:,:) + fresh(:,:,:) * coef_io +mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt(:,:,:) * coef_io +mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn(:,:,:) * coef_io +mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru(:,:,:) * coef_io +!--------------------------------------------------------------------------------------- +msicemass(:,:,:) = msicemass(:,:,:) + sicemass(:,:,:) * coef_io + +return +end subroutine time_average_fields_4_i2o_20130420 + +!=============================================================================== +subroutine time_average_fields_4_i2a + +implicit none + +! ice fields: +muvel(:,:,:) = muvel(:,:,:) + uvel(:,:,:) * coef_ia +mvvel(:,:,:) = mvvel(:,:,:) + vvel(:,:,:) * coef_ia +maicen(:,:,:,:) = maicen(:,:,:,:) + aicen(:,:,:,:) * coef_ia !T cat. ice concentration +mthikn(:,:,:,:) = mthikn(:,:,:,:) + vicen(:,:,:,:) * coef_ia !T cat. ice thickness +msnown(:,:,:,:) = msnown(:,:,:,:) + vsnon(:,:,:,:) * coef_ia !T cat. snow thickness + +call to_ugrid(aice, aiiu) +maiu(:,:,:) = maiu(:,:,:) + aiiu(:,:,:) * coef_ia !U cell ice concentraction + +!BX: "First order" ice fraction (mfoifr, below) is required for GSI8 "Time-Travelling Ice" (TTI) +! coupling approach. It may be different than the "normal" ice fraction (maicen, above) if +! maicen is regridded with second order conservation scheme (as "proposed" in GC3). +! BUT, GC3 actually uses 1st order remapping for both of them, so they are identical! +! In ACCESS practice, no second order remapping has been appllied to any coupling field, and +! maicen and mfoifr are ALWAYS the same thing. +! We pass both of them to UM for "concictency" (thus keeping UM coupling code intact)! +mfoifr(:,:,:,:) = mfoifr(:,:,:,:) + aicen(:,:,:,:)* coef_ia !==maicen +mitopt(:,:,:,:) = mitopt(:,:,:,:) + Tn_top(:,:,:,:) * coef_ia +mitopk(:,:,:,:) = mitopk(:,:,:,:) + keffn_top(:,:,:,:) * coef_ia +mpndfn(:,:,:,:) = mpndfn(:,:,:,:) + apeffn(:,:,:,:) * coef_ia +mpndtn(:,:,:,:) = mpndtn(:,:,:,:) + trcrn(:,:,nt_hpnd,:,:) * coef_ia + +!add one more a-i interval mean field (integrated ice concentration), which, togthere with maicen, +!should be saved at the end of current run for use at the beginning of the continue run (e.g., +!converting ice fluxes into GBM. see routines "atm_icefluxes_back2GBM", and "get_sbc_ice")...... +!maice_ia(:,:,:) = maice_ia(:,:,:) + aice(:,:,:) * coef_ia + +!ocn fields: +!must be done after calling from_ocn so as to get the most recently updated ocn fields, +!therefore a separate call to "time_average_ocn_fields_4_i2a" is done for this purpose. + +return +end subroutine time_average_fields_4_i2a + +!=============================================================================== +subroutine check_i2a_fields(nstep) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ilout, ll, jf +integer(kind=int_kind), save :: ncid,currstep +data currstep/0/ + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist('fields_i2a_in_ice.nc') ) then + call create_ncfile('fields_i2a_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file fields_i2a_in_ice.nc at nstep = ', nstep + call ncheck( nf_open('fields_i2a_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = 1, nsend_i2a + + select case(trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,5,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + case('uvel_ia'); vwork = ia_uvel + case('vvel_ia'); vwork = ia_vvel + case('co2_i2'); vwork = ia_co2 + case('co2fx_i2'); vwork = ia_co2fx + case('sstfz_ia'); vwork = ia_sstfz + case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) + case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) + case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) + case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) + case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) + case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) + case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) + case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) + case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) + case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) + case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) + case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) + case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) + case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) + case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) + case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) + case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) + case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) + case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) + case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) + case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) + case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) + case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) + case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) + case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0 ) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_i2a_fields + +!============================================================================ +subroutine check_a2i_fields(nstep) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +character*80 :: ncfile='fields_a2i_in_ice_2.nc' +integer(kind=int_kind) :: ncid, currstep, ll, ilout, jf +data currstep/0/ +save currstep + +currstep=currstep+1 + +if ( my_task == 0 .and. .not. file_exist(trim(ncfile)) ) then + call create_ncfile(trim(ncfile),ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file ', trim(ncfile), ' at nstep = ', nstep + call ncheck( nf_open(trim(ncfile),nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = 1, nrecv_a2i + + select case (trim(cl_read(jf))) + case ('thflx_i'); vwork = um_thflx + case ('pswflx_i'); vwork = um_pswflx + case ('runoff_i'); vwork = um_runoff + case ('wme_i'); vwork = um_wme + case ('rain_i'); vwork = um_rain + case ('snow_i'); vwork = um_snow + case ('evap_i'); vwork = um_evap + case ('lhflx_i'); vwork = um_lhflx + case ('tmlt01'); vwork(:,:,:) = um_tmlt(:,:,1,:) + case ('tmlt02'); vwork(:,:,:) = um_tmlt(:,:,2,:) + case ('tmlt03'); vwork(:,:,:) = um_tmlt(:,:,3,:) + case ('tmlt04'); vwork(:,:,:) = um_tmlt(:,:,4,:) + case ('tmlt05'); vwork(:,:,:) = um_tmlt(:,:,5,:) + case ('bmlt01'); vwork(:,:,:) = um_tmlt(:,:,1,:) + case ('bmlt02'); vwork(:,:,:) = um_tmlt(:,:,2,:) + case ('bmlt03'); vwork(:,:,:) = um_tmlt(:,:,3,:) + case ('bmlt04'); vwork(:,:,:) = um_tmlt(:,:,4,:) + case ('bmlt05'); vwork(:,:,:) = um_tmlt(:,:,5,:) + case ('taux_i'); vwork = um_taux + case ('tauy_i'); vwork = um_tauy + case ('swflx_i'); vwork = um_swflx + case ('lwflx_i'); vwork = um_lwflx + case ('shflx_i'); vwork = um_shflx + case ('press_i'); vwork = um_press + case ('co2_ai'); vwork = um_co2 + case ('wnd_ai'); vwork = um_wnd + case ('icenth_i'); vwork = um_icenth + case ('icesth_i'); vwork = um_icesth + case ('tsfice01'); vwork = um_tsfice(:,:,1,:) + case ('tsfice02'); vwork = um_tsfice(:,:,2,:) + case ('tsfice03'); vwork = um_tsfice(:,:,3,:) + case ('tsfice04'); vwork = um_tsfice(:,:,4,:) + case ('tsfice05'); vwork = um_tsfice(:,:,5,:) + case ('iceevp01'); vwork = um_iceevp(:,:,1,:) + case ('iceevp02'); vwork = um_iceevp(:,:,2,:) + case ('iceevp03'); vwork = um_iceevp(:,:,3,:) + case ('iceevp04'); vwork = um_iceevp(:,:,4,:) + case ('iceevp05'); vwork = um_iceevp(:,:,5,:) + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_a2i_fields + +!============================================================================ +subroutine check_i2o_fields(nstep, scale) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +real, intent(in) :: scale +integer(kind=int_kind) :: ncid, currstep, ll, ilout, jf +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist('fields_i2o_in_ice.nc') ) then + call create_ncfile('fields_i2o_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file fields_i2o_in_ice.nc at nstep = ', nstep + call ncheck( nf_open('fields_i2o_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = nsend_i2a + 1, jpfldout + + select case(trim(cl_writ(jf))) + case('strsu_io') + vwork = scale * io_strsu + case('strsv_io') + vwork = scale * io_strsv + case('rain_io') + vwork = scale * io_rain + case('snow_io') + vwork = scale * io_snow + case('stflx_io') + vwork = scale * io_stflx + case('htflx_io') + vwork = scale * io_htflx + case('swflx_io') + vwork = scale * io_swflx + case('qflux_io') + vwork = scale * io_qflux + case('shflx_io') + vwork = scale * io_shflx + case('lwflx_io') + vwork = scale * io_lwflx + case('runof_io') + vwork = scale * io_runof + case('press_io') + vwork = scale * io_press + case('aice_io') + vwork = scale * io_aice + case('form_io') + vwork = scale * io_form + case('melt_io') + vwork = scale * io_melt + case('co2_i1') + vwork = scale * io_co2 + case('wnd_i1') + vwork = scale * io_wnd + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0 ) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_i2o_fields + +!============================================================================ +subroutine check_o2i_fields(nstep) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid, currstep, ilout, ll, jf +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist('fields_o2i_in_ice.nc') ) then + call create_ncfile('fields_o2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file fields_o2i_in_ice.nc at nstep = ', nstep + call ncheck( nf_open('fields_o2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = nrecv_a2i + 1, jpfldin + + select case (trim(cl_read(jf))) + case ('sst_i'); vwork = ocn_sst + case ('sss_i'); vwork = ocn_sss + case ('ssu_i'); vwork = ocn_ssu + case ('ssv_i'); vwork = ocn_ssv + case ('sslx_i'); vwork = ocn_sslx + case ('ssly_i'); vwork = ocn_ssly + case ('pfmice_i'); vwork = ocn_pfmice + case ('co2_oi'); vwork = ocn_co2 + case ('co2fx_oi'); vwork = ocn_co2fx + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_o2i_fields + +!============================================================================ +subroutine check_frzmlt_sst(ncfilenm) + +!this is (mainly) used to check cice solo run frzmlt and sst ! +! (for comparison against a coupled run forcing into cice) + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, sst, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sst', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, frzmlt, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'frzmlt', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_frzmlt_sst + +!============================================================================ +subroutine check_sstsss(ncfilenm) + +!this is used to check cice sst/sss : temporary use (20091019) + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, sst, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sst', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, sss, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sss', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_sstsss + + +!============================================================================ +function file_exist (file_name) +! +character(len=*), intent(in) :: file_name +logical file_exist + +file_exist = .false. +if (len_trim(file_name) == 0) return +if (file_name(1:1) == ' ') return + +inquire (file=trim(file_name), exist=file_exist) + +end function file_exist + +!============================================================================ + +end module cpl_forcing_handler diff --git a/drivers/access/cpl_interface.F90 b/drivers/access/cpl_interface.F90 new file mode 120000 index 00000000..fef8a283 --- /dev/null +++ b/drivers/access/cpl_interface.F90 @@ -0,0 +1 @@ +cpl_interface.F90_uphalo \ No newline at end of file diff --git a/drivers/access/cpl_interface.F90_ggather b/drivers/access/cpl_interface.F90_ggather new file mode 100644 index 00000000..52307636 --- /dev/null +++ b/drivers/access/cpl_interface.F90_ggather @@ -0,0 +1,2063 @@ +!============================================================================ + module cpl_interface +!============================================================================ +! coupling interface between CICE and the oasis3_25 coupler (via MPI2) using +! the PRISM System Model Interface (PSMILe). +!---------------------------------------------------------------------------- + + !prism stuff + use mod_prism + + !cice stuff + use ice_kinds_mod + use ice_communicate !, only : my_task, master_task + use ice_broadcast + use ice_blocks !, only : nx_block, ny_block, nghost + use ice_domain_size !, only : max_blocks, nx_global, ny_global, ncat + use ice_distribution, only : distrb, nprocsX, nprocsY + use ice_gather_scatter + use ice_constants + use ice_boundary, only : ice_HaloUpdate + use ice_domain !, only : distrb_info + use ice_grid, only : u2tgrid_vector + use ice_grid, only : ANGLE, ANGLET + use ice_exit, only : abort_ice + + !cpl stuff + use cpl_parameters + use cpl_netcdf_setup + use cpl_arrays_setup + use cpl_forcing_handler + + implicit none + + public :: prism_init, init_cpl, coupler_termination, get_time0_sstsss, & + from_atm, into_ocn, from_ocn, into_atm, save_restart_i2a + + private + + logical :: mpiflag + integer(kind=int_kind) :: ierror, ibou + character(len=9) :: chiceout + character(len=3) :: chout + logical :: ll_comparal ! paralell or mono-cpl coupling + integer(kind=int_kind) :: il_comp_id ! Component ID + integer(kind=int_kind) :: il_nbtotproc ! Total number of processes + integer(kind=int_kind) :: il_nbcplproc ! No of processes involved in coupling + integer(kind=int_kind) :: il_part_id ! Local partition ID + integer(kind=int_kind) :: il_length ! Size of partial field for each process + integer(kind=int_kind), dimension(2) :: il_var_nodims + integer(kind=int_kind), dimension(4) :: il_var_shape + + integer(kind=int_kind) :: l_ilo, l_ihi, l_jlo, l_jhi !local partition + integer(kind=int_kind) :: gh_ilo, gh_ihi, gh_jlo, gh_jhi !local ghost outline + integer :: sendsubarray, recvsubarray , resizedrecvsubarray + integer, dimension(:), allocatable :: counts, disps + + integer(kind=int_kind) :: il_flag ! Flag for grid writing + integer(kind=int_kind) :: il_status, il_fileid, il_varid + integer(kind=int_kind) :: io_size, ii, il_bufsize, il_real, il_bufsizebyt + integer(kind=int_kind) :: integer_byte_size, integer_io_size + real(kind=dbl_kind), dimension(:,:), allocatable :: rla_array + real(kind=dbl_kind), dimension(:), allocatable :: rla_bufsend + real(kind=dbl_kind), dimension(:,:), allocatable :: vwork2d + !local domain work array, 4 coupling data passing + contains + +!====================================================================== + subroutine prism_init +!-----------------------! + + include 'mpif.h' + + !----------------------------------- + ! 'define' the model global domain: + !----------------------------------- + il_im = nx_global + il_jm = ny_global + il_imjm = il_im * il_jm + + !allocate rla_array to be used below + allocate (rla_array(il_im,il_jm) ) + + !print *, 'CICE: (prism_init) dbl_kind, ip_realwp_p= ',dbl_kind, ip_realwp_p + + !------------------- + ! Initialize PSMILe. + !------------------- + + ! Initialise MPI + mpiflag = .FALSE. + call MPI_Initialized (mpiflag, ierror) + print *,'CICE: (prism_init) BF MPI_INIT, mpiflag = ',mpiflag + + if ( .not. mpiflag ) then + call MPI_INIT(ierror) + endif + + call MPI_Initialized (mpiflag, ierror) + print *, 'CICE: (prism_init) AF MPI_INIT, mpiflag = ',mpiflag + + print * + print *, 'CICE: (prism_init) calling prism_init_comp_proto...' + + call prism_init_comp_proto (il_comp_id, cp_modnam, ierror) + + if (ierror /= PRISM_Ok) then + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 1') + else + print *, 'CICE: (prism_init) called prism_init_comp_proto !' + endif + + !B: the following part may not be really needed(?) + ! + ! Let's suppose the model attaches to a MPI buffer for its own use + ! + ! ! Sophisticated way to determine buffer size needed (without "kind") + ! ! Here one message containing rla_array + + integer_byte_size = BIT_SIZE(ii)/8 + inquire (iolength=io_size) ii + integer_io_size = io_size + inquire (iolength=io_size) rla_array(1,1) + il_real = io_size/integer_io_size*integer_byte_size + il_bufsize = il_imjm + MPI_BSEND_OVERHEAD/il_real + 1 + allocate (rla_bufsend(il_bufsize), stat = ierror) + il_bufsizebyt = il_bufsize * il_real + call MPI_Buffer_Attach(rla_bufsend, il_bufsizebyt, ierror) + + if (ierror /= PRISM_Ok) then + print *, 'CICE: (prism_init) Error in MPI_Buffer_Attach.' + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 2') + else + print *, 'CICE: (prism_init) MPI_Buffer_Attach ok!' + endif + ! + ! PSMILe attribution of local communicator. + ! + ! Either MPI_COMM_WORLD if MPI2 is used, + ! or a local communicator created by Oasis if MPI1 is used. + ! + call prism_get_localcomm_proto(il_commlocal, ierror) + ! + if (ierror /= PRISM_Ok) then + print *, 'CICE: Error in prism_get_localcomm_proto' + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 3') + else + print *, 'CICE: _get_localcomm_ OK! il_commlocal= ',il_commlocal + endif + + ! + ! Inquire if model is parallel or not and open the process log file + ! + ! print *, '* CICE: Entering init_cpl.....' + + print *, '* CICE (prism_init) calling MPI_Comm_Size ...' + call MPI_Comm_Size(il_commlocal, il_nbtotproc, ierror) + print *, '* CICE (prism_init) calling MPI_Comm_Rank ...' + call MPI_Comm_Rank(il_commlocal, my_task, ierror) + + print *, '* CICE (prism_init) il_commlocal, il_nbtotproc, my_task = ' + print *, '* CICE (prism_init) ', il_commlocal, il_nbtotproc, my_task + ! + il_nbcplproc = il_nbtotproc !multi-process coupling (real parallel cpl)! + !il_nbcplproc = 1 !mono process coupling + + if (il_nbtotproc /= 1 .and. il_nbcplproc == il_nbtotproc ) then + ll_comparal = .TRUE. ! multi-cpl coupling! + else + ll_comparal = .FALSE. !mono-cpl coupling! + endif + + print *, '* CICE: prism_init called OK!' + + end subroutine prism_init + +!======================================================================= + subroutine init_cpl + + use mpi + use ice_communicate +!--------------------! + integer(kind=int_kind) :: jf, jfs + integer(kind=int_kind), dimension(2) :: il_var_nodims ! see below + integer(kind=int_kind), dimension(4) :: il_var_shape ! see below + + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j, n + type (block) :: this_block ! block information for current block + + integer, dimension(2) :: starts,sizes,subsizes + integer(kind=mpi_address_kind) :: start, extent +! integer, dimension(:), allocatable :: counts, disps + real(kind=dbl_kind) :: realvalue + integer (int_kind) :: nprocs + integer (int_kind),dimension(:), allocatable :: vilo, vjlo + + nprocs = get_num_procs() + allocate(vilo(nprocs)) + allocate(vjlo(nprocs)) +!initialise partition to inexisting region + l_ilo=nx_global + l_ihi=0 + l_jlo=ny_global + l_jhi=0 + gh_ilo=nx_global + gh_ihi=0 + gh_jlo=ny_global + gh_jhi=0 + ! Open the process log file +!20100406 if (my_task == 0 .or. ll_comparal) then + il_out = 85 + my_task + write(chout,'(I3.3)')il_out + chiceout='iceout'//chout + open(il_out,file=chiceout,form='formatted') + + write(il_out,*) 'Number of processes:', il_nbtotproc + write(il_out,*) 'Local process number:', my_task + write(il_out,*) 'Local communicator is : ',il_commlocal + write(il_out,*) 'Grid layout: nx_global,ny_global= ',nx_global,ny_global + write(il_out,*) 'Grid decomposition: nx_block,ny_block,max_blocks= ',& + nx_block,ny_block,max_blocks +!20100406 endif + +! write(il_out,*) 'Number of blocks :', nblocks +! do iblk = 1, nblocks +! +! this_block = get_block(blocks_ice(iblk),iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +!! do j=this_block%jlo,this_block%jhi +!! do i=this_block%ilo,this_block%ihi +!! ARRAY_G(this_block%i_glob(i), & +!! this_block%j_glob(j)) = & +!! ARRAY(i,j,src_dist%blockLocalID(n)) +! +! write(il_out,*) ' block:', iblk, "ilo, jlo, ihi, jhi=", ilo, jlo, ihi, jhi +! +! end do +!find out partition of this processor, which is done by init_domain_blocks + write(il_out,*) 'nblocks_x, nblocks_y, Number of tot blocks :', nblocks_x, nblocks_y, nblocks_tot +!!!!!!!!!!!! +! do iblk=1,nblocks_tot +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! !print local to global mapping +! write(il_out,*) 'block, local ilo ihi jlo jhi:', distrb_info%blockLocalID(iblk), ilo,ihi,jlo,jhi +! write(il_out,*) 'block global:', this_block%i_glob(ilo),this_block%i_glob(ihi), & +! this_block%j_glob(jlo),this_block%j_glob(jhi) +! endif +! end do + +!!debug + if (my_task == 0) then + write(il_out,*) "all block info:" + do iblk=1,nblocks_tot + this_block = get_block(iblk,iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + write(il_out,*) ' this block: cpu, iblock, jblock=', distrb_info%blockLocation(iblk)-1, this_block%iblock, this_block%jblock + write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) + end do + end if + + do iblk=1,nblocks_tot + + if (distrb_info%blockLocation(iblk) == my_task+1) then + + this_block = get_block(iblk,iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + write(il_out,*) ' this block: iblock, jblock=', this_block%iblock, this_block%jblock +! write(il_out,*) ' block:', iblk, "ilo, jlo, ihi, jhi=", ilo, jlo, ihi, jhi + write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) + if (this_block%i_glob(ilo) < l_ilo) then + l_ilo = this_block%i_glob(ilo) + gh_ilo = this_block%i_glob(ilo-nghost) + endif + if (this_block%j_glob(jlo) < l_jlo) then + l_jlo = this_block%j_glob(jlo) + gh_jlo = this_block%j_glob(jlo-nghost) + endif + if (this_block%i_glob(ihi) > l_ihi) then + l_ihi = this_block%i_glob(ihi) + gh_ihi = this_block%i_glob(ihi+nghost) + endif + if (this_block%j_glob(jhi) > l_jhi) then + l_jhi = this_block%j_glob(jhi) + gh_jhi = this_block%j_glob(jhi+nghost) + endif +! l_ilo = min(l_ilo, this_block%i_glob(ilo)) +! l_ihi = max(l_ihi, this_block%i_glob(ihi)) +! l_jlo = min(l_jlo, this_block%j_glob(jlo)) +! l_jhi = max(l_jhi, this_block%j_glob(jhi)) +! else if (distrb_info%blockLocation(n) == 0) then +! write(il_out,*) ' land block:', n + + endif + end do + write(il_out,*) ' local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi + write(il_out,*) ' partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 +!print ghost info + write(il_out,*) ' ghost global:',gh_ilo, gh_ihi, gh_jlo, gh_jhi + +!calculate partition using nprocsX and nprocsX + l_ilo=mod(my_task,nprocsX)*nx_global/nprocsX+1 + l_ihi=l_ilo + nx_global/nprocsX -1 + l_jlo=int(my_task/nprocsX) * ny_global/nprocsY+1 + l_jhi=l_jlo+ny_global/nprocsY - 1 + + write(il_out,*) ' 2local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi + write(il_out,*) ' 2partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 + + call mpi_gather(l_ilo, 1, mpi_integer, vilo, 1, mpi_integer, 0, MPI_COMM_ICE, ierror) + call broadcast_array(vilo, 0) + call mpi_gather(l_jlo, 1, mpi_integer, vjlo, 1, mpi_integer, 0, MPI_COMM_ICE, ierror) + call broadcast_array(vjlo, 0) + +!create subarray of this rank + sizes(1)=l_ihi-l_ilo+1; sizes(2)=l_jhi-l_jlo+1 + subsizes(1)=l_ihi-l_ilo+1; subsizes(2)=l_jhi-l_jlo+1 + starts(1)=0; starts(2)=0 + call mpi_type_create_subarray(2, sizes, subsizes, starts, mpi_order_fortran, & + MPI_REAL8, sendsubarray, ierror) + call mpi_type_commit(sendsubarray,ierror) + if (my_task == 0) then ! create recv buffer in main cpu + sizes(1)=nx_global; sizes(2)=ny_global + subsizes(1)=l_ihi-l_ilo+1; subsizes(2)=l_jhi-l_jlo+1 + starts(1)=0; starts(2)=0 + call mpi_type_create_subarray(2, sizes, subsizes, starts, mpi_order_fortran, & + MPI_REAL8, recvsubarray, ierror) + call mpi_type_commit(recvsubarray, ierror) + extent = sizeof(realvalue) + start = 0 + call mpi_type_create_resized(recvsubarray, start, extent, resizedrecvsubarray, ierror) + call mpi_type_commit(resizedrecvsubarray,ierror) + end if + allocate(counts(nprocs),disps(nprocs)) + forall (n=1:nprocs) counts(n) = 1 + do n=1, nprocs + disps(n) = ((vjlo(n)-1)*nx_global + (vilo(n)-1)) + !disps(n) = ((vilo(n)-1)*ny_global + (vjlo(n)-1)) + end do + + write(il_out,*) ' vilo ', vilo + write(il_out,*) ' vjlo ', vjlo + write(il_out,*) ' counts ', counts + write(il_out,*) ' disps ', disps + +! if ( ll_comparal ) then +! il_im = l_ihi-l_ilo+1 !nx_global +! il_jm = l_jhi-l_jlo+1 !ny_global +! il_imjm = il_im * il_jm +! endif + if (ll_comparal) then + xdim=l_ihi-l_ilo+1 + ydim=l_jhi-l_jlo+1 + else + xdim=il_im + ydim=il_jm + endif + + +!----------------------------------------------------------------------- + if (my_task == 0 .or. ll_comparal) then + ! + ! The following steps need to be done: + ! -> by the process if cice is monoprocess; + ! -> only by the master process, if cice is parallel and only + ! master process is involved in the coupling; + ! -> by all processes, if cice is parallel and all processes + ! are involved in the coupling. + + call decomp_def (il_part_id, il_length, il_imjm, & + my_task, il_nbcplproc, ll_comparal, il_out) + + write(il_out,*)'(init_cpl) called decomp_def, my_task, ierror = ',my_task, ierror + + ! + ! PSMILe coupling fields declaration + ! + + il_var_nodims(1) = 2 ! rank of coupling field + il_var_nodims(2) = 1 ! number of bundles in coupling field (always 1) + !il_var_shape(1)= 1 ! min index for the coupling field local dim + !il_var_shape(2)= xdim !il_im ! max index for the coupling field local dim + !il_var_shape(3)= 1 + !il_var_shape(4)= ydim !il_jm + if (ll_comparal) then + il_var_shape(1)= 1 !l_ilo ! min index for the coupling field local dimension + il_var_shape(2)= l_ihi-l_ilo+1 ! max index for the coupling field local dim + il_var_shape(3)= 1 !l_jlo ! min index for the coupling field local dim + il_var_shape(4)= l_jhi-l_jlo+1 ! max index for the coupling field local dim + else + il_var_shape(1)= 1 ! min index for the coupling field local dimension + il_var_shape(2)= il_im ! max index for the coupling field local dim + il_var_shape(3)= 1 ! min index for the coupling field local dim + il_var_shape(4)= il_jm ! max index for the coupling field local dim + endif + + ! ?Does this help? + !il_var_shape(1)= 2 ! min index for the coupling field local dim + !il_var_shape(2)= il_im+1 ! max index for the coupling field local dim + !il_var_shape(3)= 2 + !il_var_shape(4)= il_jm+1 + + endif !my_task==0 .or. ll_comparal + + !*** ***! + !***B: we now define cl_writ/cl_read on all ranks! (20090403) ***! + !*** ***! + + ! + ! Define name (as in namcouple) and declare each field sent by ice + ! + + ! + ! ice ==> atm + ! + nsend_i2a = 0 + + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='isst_ia' + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'icecon',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'snwthk',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'icethk',jf + enddo + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='uvel_ia' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='vvel_ia' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='co2_i2' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='co2fx_i2' + ! new fields sending to UM GA7 + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='sstfz_ia' + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'foifr',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'itopt',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'itopk',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'pndfn',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'pndtn',jf + enddo + + if (my_task == 0) then + write(il_out,*) 'init_cpl: Number of fields sent to atm: ',nsend_i2a + endif + ! + ! ice ==> ocn + ! + nsend_i2o = nsend_i2a + + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='strsu_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='strsv_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='rain_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='snow_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='stflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='htflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='swflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='qflux_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='shflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='lwflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='runof_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='press_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='aice_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='melt_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='form_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='co2_i1' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='wnd_i1' + + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) 'init_cpl: Number of fields sent to ocn: ',nsend_i2o - nsend_i2a + + if (nsend_i2o /= jpfldout) then + write(il_out,*) + write(il_out,*)'!!! Fatal Error: (init_cpl) nsend = ',nsend_i2o + write(il_out,*)'!!! It should be nsend = ',jpfldout + call abort_ice('CICE: Number of outgoing coupling fields incorrect!') + endif + + write(il_out,*) 'init_cpl: Total number of fields sent from ice: ',jpfldout + + !jpfldout == nsend_i2o! + !---------------------! + + do jf=1, jpfldout + call prism_def_var_proto (il_var_id_out(jf),cl_writ(jf), il_part_id, & + il_var_nodims, PRISM_Out, il_var_shape, PRISM_Real, ierror) + enddo + + endif + + ! + ! Define name (as in namcouple) and declare each field received by ice + ! + + ! + ! atm ==> ice + ! + nrecv_a2i = 0 + + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'thflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'pswflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'runoff_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'wme_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'rain_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'snow_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'evap_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'lhflx_i' + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a4,i2.2,a2)')'tmlt',jf,'_i' + enddo + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a4,i2.2,a2)')'bmlt',jf,'_i' + enddo + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'taux_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'tauy_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'swflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'lwflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'shflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'press_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'co2_ai' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'wnd_ai' + ! new fields recving from UM GA7 + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'icenth_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'icesth_i' + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a6,i2.2)')'tsfice',jf + enddo + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a6,i2.2)')'iceevp',jf + enddo + + if (my_task==0 .or. ll_comparal) then + write(il_out,*) 'init_cpl: Number of fields rcvd from atm: ',nrecv_a2i + endif + + ! + ! ocn ==> ice + ! + nrecv_o2i = nrecv_a2i + + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sst_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sss_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssu_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssv_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sslx_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssly_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'pfmice_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'co2_oi' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'co2fx_oi' + + if (my_task==0 .or. ll_comparal) then + + write(il_out,*) 'init_cpl: Number of fields rcvd from ocn: ',nrecv_o2i-nrecv_a2i + + if (nrecv_o2i /= jpfldin) then + write(il_out,*) + write(il_out,*)'!!! Fatal Error: (init_cpl) nrecv = ',nrecv_o2i + write(il_out,*)'!!! It should be nrecv = ',jpfldin + call abort_ice('CICE: Number of incoming coupling fields incorrect!') + endif + !jpfldin == nrecv_o2i! + !--------------------! + + write(il_out,*) 'init_cpl: Total number of fields rcvd by ice: ',jpfldin + + do jf=1, jpfldin + call prism_def_var_proto (il_var_id_in(jf), cl_read(jf), il_part_id, & + il_var_nodims, PRISM_In, il_var_shape, PRISM_Real, ierror) + enddo + + ! + ! PSMILe end of declaration phase + ! + call prism_enddef_proto (ierror) + + endif !my_task==0 + + ! + ! Allocate the 'coupling' fields (to be used) for EACH PROCESS:! + ! + + ! fields in: (local domain) + ! + ! from atm: + allocate (um_thflx(nx_block,ny_block,max_blocks)); um_thflx(:,:,:) = 0 + allocate (um_pswflx(nx_block,ny_block,max_blocks)); um_pswflx(:,:,:) = 0 + allocate (um_runoff(nx_block,ny_block,max_blocks)); um_runoff(:,:,:) = 0 + allocate (um_wme(nx_block,ny_block,max_blocks)); um_wme(:,:,:) = 0 + allocate (um_snow(nx_block,ny_block,max_blocks)); um_snow(:,:,:) = 0 + allocate (um_rain(nx_block,ny_block,max_blocks)); um_rain(:,:,:) = 0 + allocate (um_evap(nx_block,ny_block,max_blocks)); um_evap(:,:,:) = 0 + allocate (um_lhflx(nx_block,ny_block,max_blocks)); um_lhflx(:,:,:) = 0 + allocate (um_taux(nx_block,ny_block,max_blocks)); um_taux(:,:,:) = 0 + allocate (um_tauy(nx_block,ny_block,max_blocks)); um_tauy(:,:,:) = 0 + allocate (um_swflx(nx_block,ny_block,max_blocks)); um_swflx(:,:,:) = 0 + allocate (um_lwflx(nx_block,ny_block,max_blocks)); um_lwflx(:,:,:) = 0 + allocate (um_shflx(nx_block,ny_block,max_blocks)); um_shflx(:,:,:) = 0 + allocate (um_press(nx_block,ny_block,max_blocks)); um_press(:,:,:) = 0 + allocate (um_tmlt(nx_block,ny_block,ncat,max_blocks)); um_tmlt(:,:,:,:) = 0 + allocate (um_bmlt(nx_block,ny_block,ncat,max_blocks)); um_bmlt(:,:,:,:) = 0 + allocate (um_co2(nx_block,ny_block,max_blocks)); um_co2(:,:,:) = 0 + allocate (um_wnd(nx_block,ny_block,max_blocks)); um_wnd(:,:,:) = 0 + allocate (um_icenth(nx_block,ny_block,max_blocks)); um_icenth(:,:,:) = 0 + allocate (um_icesth(nx_block,ny_block,max_blocks)); um_icesth(:,:,:) = 0 + allocate (um_tsfice(nx_block,ny_block,ncat,max_blocks)); um_tsfice(:,:,:,:) = 0 + allocate (um_iceevp(nx_block,ny_block,ncat,max_blocks)); um_iceevp(:,:,:,:) = 0 + + ! + allocate ( core_runoff(nx_block,ny_block,max_blocks)); core_runoff(:,:,:) = 0. + ! + + ! from ocn: + allocate (ocn_sst(nx_block,ny_block,max_blocks)); ocn_sst(:,:,:) = 0 + allocate (ocn_sss(nx_block,ny_block,max_blocks)); ocn_sss(:,:,:) = 0 + allocate (ocn_ssu(nx_block,ny_block,max_blocks)); ocn_ssu(:,:,:) = 0 + allocate (ocn_ssv(nx_block,ny_block,max_blocks)); ocn_ssv(:,:,:) = 0 + allocate (ocn_sslx(nx_block,ny_block,max_blocks)); ocn_sslx(:,:,:) = 0 + allocate (ocn_ssly(nx_block,ny_block,max_blocks)); ocn_ssly(:,:,:) = 0 + allocate (ocn_pfmice(nx_block,ny_block,max_blocks)); ocn_pfmice(:,:,:) = 0 + allocate (ocn_co2(nx_block,ny_block,max_blocks)); ocn_co2(:,:,:) = 0 + allocate (ocn_co2fx(nx_block,ny_block,max_blocks)); ocn_co2fx(:,:,:) = 0 + + ! fields out: (local domain) + ! + ! to atm: + allocate (ia_sst(nx_block,ny_block,max_blocks)); ia_sst(:,:,:) = 0 + allocate (ia_uvel(nx_block,ny_block,max_blocks)); ia_uvel(:,:,:) = 0 + allocate (ia_vvel(nx_block,ny_block,max_blocks)); ia_vvel(:,:,:) = 0 + allocate (ia_aicen(nx_block,ny_block,ncat,max_blocks)); ia_aicen(:,:,:,:) = 0 + allocate (ia_snown(nx_block,ny_block,ncat,max_blocks)); ia_snown(:,:,:,:) = 0 + allocate (ia_thikn(nx_block,ny_block,ncat,max_blocks)); ia_thikn(:,:,:,:) = 0 + allocate (ia_co2(nx_block,ny_block,max_blocks)); ia_co2(:,:,:) = 0 + allocate (ia_co2fx(nx_block,ny_block,max_blocks)); ia_co2fx(:,:,:) = 0 + allocate (ia_sstfz(nx_block,ny_block,max_blocks)); ia_sstfz(:,:,:) = 0 + allocate (ia_foifr(nx_block,ny_block,ncat,max_blocks)); ia_foifr(:,:,:,:) = 0 + allocate (ia_itopt(nx_block,ny_block,ncat,max_blocks)); ia_itopt(:,:,:,:) = 0 + allocate (ia_itopk(nx_block,ny_block,ncat,max_blocks)); ia_itopk(:,:,:,:) = 0 + allocate (ia_pndfn(nx_block,ny_block,ncat,max_blocks)); ia_pndfn(:,:,:,:) = 0 + allocate (ia_pndtn(nx_block,ny_block,ncat,max_blocks)); ia_pndtn(:,:,:,:) = 0 + ! + ! to ocn: + allocate (io_strsu(nx_block,ny_block,max_blocks)); io_strsu(:,:,:) = 0 + allocate (io_strsv(nx_block,ny_block,max_blocks)); io_strsv(:,:,:) = 0 + allocate (io_rain (nx_block,ny_block,max_blocks)); io_rain (:,:,:) = 0 + allocate (io_snow (nx_block,ny_block,max_blocks)); io_snow (:,:,:) = 0 + allocate (io_stflx(nx_block,ny_block,max_blocks)); io_stflx(:,:,:) = 0 + allocate (io_htflx(nx_block,ny_block,max_blocks)); io_htflx(:,:,:) = 0 + allocate (io_swflx(nx_block,ny_block,max_blocks)); io_swflx(:,:,:) = 0 + allocate (io_qflux(nx_block,ny_block,max_blocks)); io_qflux(:,:,:) = 0 + allocate (io_lwflx(nx_block,ny_block,max_blocks)); io_lwflx(:,:,:) = 0 + allocate (io_shflx(nx_block,ny_block,max_blocks)); io_shflx(:,:,:) = 0 + allocate (io_runof(nx_block,ny_block,max_blocks)); io_runof(:,:,:) = 0 + allocate (io_press(nx_block,ny_block,max_blocks)); io_press(:,:,:) = 0 + allocate (io_aice(nx_block,ny_block,max_blocks)); io_aice(:,:,:) = 0 + allocate (io_melt(nx_block,ny_block,max_blocks)); io_melt(:,:,:) = 0 + allocate (io_form(nx_block,ny_block,max_blocks)); io_form(:,:,:) = 0 + allocate (io_co2(nx_block,ny_block,max_blocks)); io_co2(:,:,:) = 0 + allocate (io_wnd(nx_block,ny_block,max_blocks)); io_wnd(:,:,:) = 0 + + ! temporary arrays: + ! IO cpl int time-average + allocate (maice(nx_block,ny_block,max_blocks)); maice(:,:,:) = 0 + allocate (mstrocnxT(nx_block,ny_block,max_blocks)); mstrocnxT(:,:,:) = 0 + allocate (mstrocnyT(nx_block,ny_block,max_blocks)); mstrocnyT(:,:,:) = 0 + allocate (mfresh(nx_block,ny_block,max_blocks)); mfresh(:,:,:) = 0 + allocate (mfsalt(nx_block,ny_block,max_blocks)); mfsalt(:,:,:) = 0 + allocate (mfhocn(nx_block,ny_block,max_blocks)); mfhocn(:,:,:) = 0 + allocate (mfswthru(nx_block,ny_block,max_blocks)); mfswthru(:,:,:) = 0 + allocate (msicemass(nx_block,ny_block,max_blocks)); msicemass(:,:,:) = 0 + ! IA cpl int time-average (3D) + allocate (maiu(nx_block,ny_block,max_blocks)); maiu(:,:,:) = 0 + allocate (muvel(nx_block,ny_block,max_blocks)); muvel(:,:,:) = 0 + allocate (mvvel(nx_block,ny_block,max_blocks)); mvvel(:,:,:) = 0 + allocate (msst(nx_block,ny_block,max_blocks)); msst(:,:,:) = 0 + allocate (mssu(nx_block,ny_block,max_blocks)); mssu(:,:,:) = 0 + allocate (mssv(nx_block,ny_block,max_blocks)); mssv(:,:,:) = 0 + allocate (mco2(nx_block,ny_block,max_blocks)); mco2(:,:,:) = 0 + allocate (mco2fx(nx_block,ny_block,max_blocks)); mco2fx(:,:,:) = 0 + allocate (msstfz(nx_block,ny_block,max_blocks)); msstfz(:,:,:) = 0 + ! IA cpl int time-average (4D) + allocate (maicen(nx_block,ny_block,ncat,max_blocks)); maicen(:,:,:,:) = 0 + allocate (msnown(nx_block,ny_block,ncat,max_blocks)); msnown(:,:,:,:) = 0 + allocate (mthikn(nx_block,ny_block,ncat,max_blocks)); mthikn(:,:,:,:) = 0 + allocate (mfoifr(nx_block,ny_block,ncat,max_blocks)); mfoifr(:,:,:,:) = 0 + allocate (mitopt(nx_block,ny_block,ncat,max_blocks)); mitopt(:,:,:,:) = 0 + allocate (mitopk(nx_block,ny_block,ncat,max_blocks)); mitopk(:,:,:,:) = 0 + allocate (mpndfn(nx_block,ny_block,ncat,max_blocks)); mpndfn(:,:,:,:) = 0 + allocate (mpndtn(nx_block,ny_block,ncat,max_blocks)); mpndtn(:,:,:,:) = 0 +!BX: + allocate (maicen_saved(nx_block,ny_block,ncat,max_blocks)); maicen_saved(:,:,:,:) = 0 + + allocate (vwork(nx_block,ny_block,max_blocks)); vwork(:,:,:) = 0 + allocate (gwork(nx_global,ny_global)); gwork(:,:) = 0 + allocate (sicemass(nx_block,ny_block,max_blocks)); sicemass(:,:,:) = 0. + allocate (vwork2d(l_ilo:l_ihi, l_jlo:l_jhi)); vwork2d(:,:) = 0. !l_ihi-l_ilo+1, l_jhi-l_jlo+1 + end subroutine init_cpl + +!======================================================================= + subroutine from_atm(isteps) +!----------------------------! + + implicit none + + integer(kind=int_kind), intent(in) :: isteps + + real(kind=dbl_kind) :: tmpu, tmpv + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ncid,currstep,ll,ilout + + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(from_atm) receiving coupling fields at rtime= ', isteps + if (chk_a2i_fields) then + if ( .not. file_exist('fields_a2i_in_ice.nc') ) then + call create_ncfile('fields_a2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_a2i_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_a2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + write(il_out,*) + write(il_out,*) '(from_atm) Total number of fields to be rcvd: ', nrecv_a2i + endif + + write(il_out,*) "prism_get from_atm at sec: ", isteps + do jf = 1, nrecv_a2i + + if (my_task==0 .or. ll_comparal ) then + + !jf-th field in + write(il_out,*) + write(il_out,*) '*** receiving coupling field No. ', jf, cl_read(jf) + !call flush(il_out) + + if (ll_comparal) then + call prism_get_proto (il_var_id_in(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) !vwork(2:,2:,my_task+1), + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork,counts,disps,resizedrecvsubarray, & + 0,MPI_COMM_ICE,ierror) + call broadcast_array(gwork, 0) +! gwork(l_ilo:l_ihi, l_jlo:l_jhi) = vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) + else + call prism_get_proto (il_var_id_in(jf), isteps, gwork, ierror) + endif + + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Recvd) then + write(il_out,*) 'Err in _get_ sst at time with error: ', isteps, ierror + call prism_abort_proto(il_comp_id, 'cice from_atm','stop 1') + else + write(il_out,*) + write(il_out,*)'(from_atm) rcvd at time with err: ',cl_read(jf),isteps,ierror + + if (ll_comparal .and. chk_a2i_fields) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps, resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_a2i_fields) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif + + if (.not. ll_comparal ) then + call scatter_global(vwork,gwork,master_task,distrb_info, & + field_loc_center, field_type_scalar) + else + call unpack_global_dbl(vwork,gwork,master_task,distrb_info, & + field_loc_center, field_type_scalar) + endif ! not ll_comparal + +#if (MXBLCKS != 1) +#error The following code assumes that max_blocks == 1 +#endif + + !***Note following "select case" works only if cl_read(:) is defined at ALL ranks***! + !-----------------------------------------------------------------------------------! + select case (trim(cl_read(jf))) + case ('thflx_i'); + um_thflx(:,:,:)=vwork(:,:,:) + case ('pswflx_i'); + um_pswflx(:,:,:) =vwork(:,:,:) + case ('runoff_i'); + um_runoff(:,:,:) =vwork(:,:,:) + case ('wme_i'); + um_wme(:,:,:) = vwork(:,:,:) +! case ('rain_i'); um_rain(:,:,:) = vwork(:,:,:) +! case ('snow_i'); um_snow(:,:,:) = vwork(:,:,:) +!---20100825 -- just be cauious: ------------------------- + case ('rain_i'); + um_rain(:,:,:) =max(0.0,vwork(:,:,:)) + case ('snow_i'); + um_snow(:,:,:) =max(0.0,vwork(:,:,:)) +!--------------------------------------------------------- + case ('evap_i');um_evap(:,:,:) = vwork(:,:,:) + case ('lhflx_i');um_lhflx(:,:,:) = vwork(:,:,:) + case ('tmlt01_i');um_tmlt(:,:,1,:) = vwork(:,:,:) + case ('tmlt02_i');um_tmlt(:,:,2,:) = vwork(:,:,:) + case ('tmlt03_i');um_tmlt(:,:,3,:) = vwork(:,:,:) + case ('tmlt04_i');um_tmlt(:,:,4,:) = vwork(:,:,:) + case ('tmlt05_i');um_tmlt(:,:,5,:) = vwork(:,:,:) + case ('bmlt01_i');um_bmlt(:,:,1,:) = vwork(:,:,:) + case ('bmlt02_i');um_bmlt(:,:,2,:) = vwork(:,:,:) + case ('bmlt03_i');um_bmlt(:,:,3,:) = vwork(:,:,:) + case ('bmlt04_i');um_bmlt(:,:,4,:) = vwork(:,:,:) + case ('bmlt05_i');um_bmlt(:,:,5,:) = vwork(:,:,:) + case ('taux_i');um_taux(:,:,:) = vwork(:,:,:) + case ('tauy_i');um_tauy(:,:,:) = vwork(:,:,:) + case ('swflx_i');um_swflx(:,:,:) = vwork(:,:,:) + case ('lwflx_i');um_lwflx(:,:,:) = vwork(:,:,:) + case ('shflx_i');um_shflx(:,:,:) = vwork(:,:,:) + case ('press_i');um_press(:,:,:) = vwork(:,:,:) + case ('co2_ai');um_co2(:,:,:) = vwork(:,:,:) + case ('wnd_ai');um_wnd(:,:,:) = vwork(:,:,:) + case ('icenth_i');um_icenth(:,:,:) = vwork(:,:,:) + case ('icesth_i');um_icesth(:,:,:) = vwork(:,:,:) + case ('tsfice01');um_tsfice(:,:,1,:) = vwork(:,:,:) + case ('tsfice02');um_tsfice(:,:,2,:) = vwork(:,:,:) + case ('tsfice03');um_tsfice(:,:,3,:) = vwork(:,:,:) + case ('tsfice04');um_tsfice(:,:,4,:) = vwork(:,:,:) + case ('tsfice05');um_tsfice(:,:,5,:) = vwork(:,:,:) + case ('iceevp01');um_iceevp(:,:,1,:) = vwork(:,:,:) + case ('iceevp02');um_iceevp(:,:,2,:) = vwork(:,:,:) + case ('iceevp03');um_iceevp(:,:,3,:) = vwork(:,:,:) + case ('iceevp04');um_iceevp(:,:,4,:) = vwork(:,:,:) + case ('iceevp05');um_iceevp(:,:,5,:) = vwork(:,:,:) + end select + + if (my_task == 0 .or. ll_comparal) then + write(il_out,*) + write(il_out,*)'(from_atm) done: ', jf, trim(cl_read(jf)) + endif + + enddo + !BX: 20160623...... avoid initial "remap transport: bad departure points" (e.g.@(332,776))? + if (isteps == 0) then + um_taux = um_taux * 0.1 + um_tauy = um_tauy * 0.1 + endif + !BX. + +! call ice_HaloUpdate(um_thflx, halo_info,field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_pswflx, halo_info,field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_runoff, halo_info,field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_wme, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(um_rain, halo_info, field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_snow, halo_info, field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_evap, halo_info, field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_lhflx, halo_info,field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_tmlt, halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_bmlt, halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_taux, halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_tauy, halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_swflx, halo_info,field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_lwflx, halo_info,field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_shflx, halo_info,field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_press, halo_info,field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_co2, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(um_wnd, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(um_icenth,halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_icesth,halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_tsfice,halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_iceevp,halo_info, field_loc_center,field_type_vector) + + IF (rotate_winds) THEN !rotate_winds=.t. means oasis does not do the vector rotation. + + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + tmpu = um_taux(i,j,iblk) ! on geographical coord. (T cell) + tmpv = um_tauy(i,j,iblk) + um_taux(i,j,iblk) = tmpu*cos(ANGLET(i,j,iblk)) & ! converted onto model curvelear + + tmpv*sin(ANGLET(i,j,iblk)) ! coord. (T cell) + um_tauy(i,j,iblk) = tmpv*cos(ANGLET(i,j,iblk)) & ! + - tmpu*sin(ANGLET(i,j,iblk)) + enddo + enddo + + enddo + + ENDIF !rotate_winds + + ! need do t-grid to u-grid shift for vectors since all coupling occur on + ! t-grid points: <==No! actually CICE requires the input wind on T grid! + ! (see comment in code ice_flux.F) + !call t2ugrid(uwnd1) + !call t2ugrid(vwnd1) + + !------------------------------- + !if ( chk_a2i_fields ) then + ! call check_a2i_fields(isteps) + !endif + !------------------------------- + + if ( chk_a2i_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine from_atm + +!======================================================================= + subroutine from_ocn(isteps) +!----------------------------! + + integer(kind=int_kind), intent(in) :: isteps + + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) '(from_ocn) receiving coupling fields at rtime: ', isteps + if (chk_o2i_fields) then + if ( .not. file_exist('fields_o2i_in_ice.nc') ) then + call create_ncfile('fields_o2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_o2i_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_o2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + write(il_out,*) "prism_get from_ocn at sec: ", isteps + do jf = nrecv_a2i + 1, jpfldin + + if (my_task==0 .or. ll_comparal) then + + !jf-th field in + write(il_out,*) + write(il_out,*) '*** receiving coupling fields No. ', jf, cl_read(jf) + if(ll_comparal) then + call prism_get_proto (il_var_id_in(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork,counts,disps,resizedrecvsubarray, & + 0,MPI_COMM_ICE,ierror) + call broadcast_array(gwork, 0) +! gwork(l_ilo:l_ihi, l_jlo:l_jhi) = vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) + else + call prism_get_proto (il_var_id_in(jf), isteps, gwork, ierror) + endif + + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Recvd) then + write(il_out,*) 'Err in _get_ sst at time with error: ', isteps, ierror + call prism_abort_proto(il_comp_id, 'cice from_ocn','stop 1') + else + write(il_out,*) + write(il_out,*)'(from_ocn) rcvd at time with err: ',cl_read(jf),isteps,ierror + if(ll_comparal .and. chk_o2i_fields) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_o2i_fields) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif + + if (.not. ll_comparal) then + call scatter_global(vwork, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) + else + call unpack_global_dbl(vwork, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) + endif + + !Q: 'field_type_scalar' all right for 'vector' (ssu/ssv, sslx/ssly))?! + select case (trim(cl_read(jf))) + case ('sst_i'); + ocn_sst = vwork + case ('sss_i'); + ocn_sss = vwork + case ('ssu_i'); + ocn_ssu = vwork + case ('ssv_i'); + ocn_ssv = vwork + case ('sslx_i'); + ocn_sslx = vwork + case ('ssly_i'); + ocn_ssly = vwork + case ('pfmice_i'); + ocn_pfmice =vwork + case ('co2_oi'); + ocn_co2 = vwork + case ('co2fx_oi'); + ocn_co2fx =vwork + end select + + enddo + +! call ice_HaloUpdate(ocn_sst, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(ocn_sss, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(ocn_ssu, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(ocn_ssv, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(ocn_sslx, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(ocn_ssly, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(ocn_pfmice, halo_info,field_loc_center,field_type_scalar) +! call ice_HaloUpdate(ocn_co2, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(ocn_co2fx, halo_info, field_loc_center,field_type_scalar) + + !------------------------------- + !if (chk_o2i_fields) then + ! call check_o2i_fields(isteps) + !endif + !------------------------------- + + if ( chk_o2i_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine from_ocn + +!======================================================================= + subroutine into_ocn(isteps) +!-----------------------------------! + + implicit none + + integer(kind=int_kind), intent(in) :: isteps + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(into_ocn) sending coupling fields at stime= ', isteps + if (chk_i2o_fields) then + if ( .not. file_exist('fields_i2o_in_ice.nc') ) then + call create_ncfile('fields_i2o_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_i2o_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_i2o_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + write(il_out,*) "prism_put into_ocn at sec: ", isteps + do jf = nsend_i2a + 1, jpfldout + +!CH: make sure the 'LIMITS' are to be released! + + select case(trim(cl_writ(jf))) +!20100531 for MYM's test (iostress_factor) ............. + case('strsu_io'); vwork = io_strsu * iostress_factor + case('strsv_io'); vwork = io_strsv * iostress_factor +!....................................................... + case('rain_io'); vwork = io_rain + case('snow_io'); vwork = io_snow + !case('stflx_io'); vwork = io_stflx + case('stflx_io') + if (limit_stflx) then + vwork = max(-5.e-6, min(io_stflx, 5.e-6)) + else + vwork = io_stflx + endif + !case('htflx_io'); vwork = io_htflx + !case('htflx_io'); vwork = max(io_htflx, -450.0) + !Jan2010: + case('htflx_io'); vwork = min(io_htflx,0.0) + case('swflx_io'); vwork = io_swflx + case('qflux_io'); vwork = io_qflux + case('shflx_io'); vwork = io_shflx + case('lwflx_io'); vwork = io_lwflx + case('runof_io') + if (use_core_runoff) then + vwork = core_runoff + else + vwork = io_runof + endif + case('press_io'); vwork = io_press + case('aice_io'); vwork = io_aice + case('melt_io'); vwork = io_melt + case('form_io'); vwork = io_form + case('co2_i1'); vwork = io_co2 + case('wnd_i1'); vwork = io_wnd + end select + + if(.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) + else + call pack_global_dbl(gwork, vwork, master_task, distrb_info) + vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) = gwork(l_ilo:l_ihi, l_jlo:l_jhi) +! do iblk=1,nblocks_tot +! +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! +! vwork2d(this_block%i_glob(ilo):this_block%i_glob(ihi), & +! this_block%j_glob(jlo):this_block%j_glob(jhi)) = & +! vwork(ilo:ihi,jlo:jhi,distrb_info%blockLocalID(iblk)) +! endif +! end do + + endif + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) + write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + if(ll_comparal) then + call prism_put_proto(il_var_id_out(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_put_proto(il_var_id_out(jf), isteps, gwork, ierror) + endif + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Sent) then + write(il_out,*) + write(il_out,*) '(into_ocn) Err in _put_ ', cl_writ(jf), isteps, ierror + call prism_abort_proto(il_comp_id, 'cice into_ocn','STOP 1') + else + write(il_out,*) + write(il_out,*)'(into_ocn) sent: ', cl_writ(jf), isteps, ierror + if(chk_i2o_fields .and. ll_comparal) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_i2o_fields) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif !my_task == 0 + + enddo !jf + + !-------------------------------------- + !if (chk_i2o_fields) then + ! call check_i2o_fields(isteps) + !endif + !-------------------------------------- + + if ( chk_i2o_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine into_ocn + +!======================================================================= + subroutine into_atm(isteps) +!----------------------------! + + integer(kind=int_kind), intent(in) :: isteps + integer(kind=int_kind) :: jf + + real(kind=dbl_kind) :: tmpu, tmpv + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + +!debug hxy599 +!if (isteps==runtime-3600) then +! chk_i2a_fields=.true. !save the last step +! currstep = 1 +! write (il_out,*) 'hxy599 debug: save i2a fields at time ', isteps +!end if + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(into_atm) sending coupling fields at stime= ', isteps + if (chk_i2a_fields) then + if ( .not. file_exist('fields_i2a_in_ice.nc') ) then + call create_ncfile('fields_i2a_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + else + write(il_out,*) 'opening file fields_i2a_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_i2a_in_ice.nc',nf_write,ncid) ) + end if + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + IF (rotate_winds) THEN + + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + !note note uvel/vvel are on the U-cell here. + tmpu = ia_uvel(i,j,iblk); tmpv = ia_vvel(i,j,iblk) ! ice/ocn velocity, m/s + ia_uvel(i,j,iblk) = tmpu*cos(ANGLE(i,j,iblk)) & ! remapped on to geographical + - tmpv*sin(ANGLE(i,j,iblk)) ! grid. + ia_vvel(i,j,iblk) = tmpv*cos(ANGLE(i,j,iblk)) & ! they also need be shifted + + tmpu*sin(ANGLE(i,j,iblk)) ! on to T-cell (below). + enddo + enddo + + enddo + + ENDIF !rotate_winds + + !shift ia_uvel/ia_vvel onto T points before passing into coupler + call u2tgrid_vector(ia_uvel) + call u2tgrid_vector(ia_vvel) + + write(il_out,*) "prism_put into_atm at sec: ", isteps + do jf = 1, nsend_i2a + + select case (trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,4,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + !20100305: test effect of ssuv on the tropical cooling biases (as per Harry Henden) + case('uvel_ia'); vwork = ia_uvel * ocn_ssuv_factor !note ice u/v are also + case('vvel_ia'); vwork = ia_vvel * ocn_ssuv_factor ! included here. + case('sstfz_ia'); vwork = ia_sstfz + case('co2_i2'); vwork = ia_co2 + case('co2fx_i2'); vwork = ia_co2fx + case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) + case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) + case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) + case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) + case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) + case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) + case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) + case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) + case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) + case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) + case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) + case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) + case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) + case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) + case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) + case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) + case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) + case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) + case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) + case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) + case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) + case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) + case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) + case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) + case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) + end select + + if (.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) + else + call pack_global_dbl(gwork, vwork, master_task, distrb_info) + vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) = gwork(l_ilo:l_ihi, l_jlo:l_jhi) +! do iblk=1,nblocks_tot +! +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! +! vwork2d(this_block%i_glob(ilo):this_block%i_glob(ihi), & +! this_block%j_glob(jlo):this_block%j_glob(jhi)) = & +! vwork(ilo:ihi,jlo:jhi,distrb_info%blockLocalID(iblk)) +! endif +! end do + + end if + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) + write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + + !call prism_put_inquire_proto(il_var_id_out(jf),isteps,ierror) + + write(il_out,*) + write(il_out,*) '(into_atm) what to do with this var==> Err= ',ierror + if(ll_comparal) then + call prism_put_proto(il_var_id_out(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_put_proto(il_var_id_out(jf), isteps, gwork, ierror) + endif + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Sent) then + write(il_out,*) + write(il_out,*) '(into_atm) Err in _put_ ', cl_writ(jf), isteps, ierror + call prism_abort_proto(il_comp_id, 'cice into_atm','STOP 1') + else + write(il_out,*) + write(il_out,*)'(into_atm) sent: ', cl_writ(jf), isteps, ierror + if(chk_i2a_fields .and. ll_comparal) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_i2a_fields) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif !my_task == 0 + + enddo !jf = 1, jpfldout + + !------------------------------- + !if (chk_i2a_fields) then + ! call check_i2a_fields(isteps) + !endif + !------------------------------- + + if ( chk_i2a_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine into_atm + +!======================================================================= + subroutine coupler_termination +!-------------------------------! + ! + ! Detach from MPI buffer + ! + call MPI_Buffer_Detach(rla_bufsend, il_bufsize, ierror) + deallocate (rla_bufsend) + !deallocate all the coupling associated arrays... (no bother...) + ! + ! 9- PSMILe termination + ! + + call MPI_Barrier(MPI_COMM_ICE, ierror) + call prism_terminate_proto (ierror) + if (ierror /= PRISM_Ok) then + if (my_task == 0) then + write (il_out,*) 'An error occured in prism_terminate = ', ierror + endif + else + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '==================*** END ***=================' + close(il_out) + endif + endif + ! + print * + print *, '********** End of CICE **********' + print * + + call MPI_Finalize (ierror) + + end subroutine coupler_termination + +!======================================================================= + subroutine decomp_def(id_part_id, id_length, id_imjm, & + id_rank, id_nbcplproc, ld_comparal, ld_mparout) +!-------------------------------------------------------! + ! + !use mod_prism_proto + !use mod_prism_def_partition_proto + + implicit none + + integer(kind=int_kind), dimension(:), allocatable :: il_paral ! Decomposition for each proc + integer(kind=int_kind) :: ig_nsegments ! Number of segments of process decomposition + integer(kind=int_kind) :: ig_parsize ! Size of array decomposition + integer(kind=int_kind) :: id_nbcplproc ! Number of processes involved in the coupling + integer(kind=int_kind) :: id_part_id ! Local partition ID + integer(kind=int_kind) :: id_imjm ! Total grid dimension, ib, ierror, my_task + integer(kind=int_kind) :: id_length ! Size of partial field for each process + integer(kind=int_kind) :: id_rank ! Rank of process + integer(kind=int_kind) :: ld_mparout ! Unit of log file + logical :: ld_comparal + integer(kind=int_kind) :: ib, ierror + character(len=80), parameter :: cdec='BOX' + ! + integer(kind=int_kind) :: ilo, ihi, jlo, jhi + ! + ! + ! Refer to oasis/psmile/prism/modules/mod_prism_proto.F90 for integer(kind=int_kind) value + ! of clim_xxxx parameters + ! + if ( .not. ld_comparal .and. id_rank == 0) then + ! Monoprocess model, or parallel model with only master process involved + ! in coupling: the entire field will be exchanged by the process. + ig_nsegments = 1 + ig_parsize = 3 + allocate(il_paral(ig_parsize)) + ! + il_paral ( clim_strategy ) = clim_serial + il_paral ( clim_offset ) = 0 + il_paral ( clim_length ) = id_imjm + id_length = id_imjm + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else + ! Parallel atm with all process involved in the coupling + ! + if (cdec == 'APPLE') then + ! Each process is responsible for a part of field defined by + ! the number of grid points and the offset of the first point + ! + write (ld_mparout,*) 'APPLE partitioning' + ig_nsegments = 1 + ig_parsize = 3 + allocate(il_paral(ig_parsize)) + write(ld_mparout,*)'ig_parsize',ig_parsize + ! + if (id_rank .LT. (id_nbcplproc-1)) then + il_paral ( clim_strategy ) = clim_apple + il_paral ( clim_length ) = id_imjm/id_nbcplproc + il_paral ( clim_offset ) = id_rank*(id_imjm/id_nbcplproc) + else + il_paral ( clim_strategy ) = clim_apple + il_paral ( clim_length ) = id_imjm-(id_rank*(id_imjm/id_nbcplproc)) + il_paral ( clim_offset ) = id_rank*(id_imjm/id_nbcplproc) + endif + id_length = il_paral(clim_length) + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else if (cdec == 'BOX') then + !B: CICE uses a kind of Cartisian decomposition which actually may NOT + ! be simply taken as "BOX" decomposition described here !!! + ! (there is an issue associated with the 'halo' boundary for each + ! segment and may NOT be treated as what we do below! + ! It needs further consideration to make this work correctly + ! for 'paralell coupling' if really needed in the future ...) + ! + ! Each process is responsible for a rectangular box + ! + write (ld_mparout,*) 'BOX partitioning' + ig_parsize = 5 + allocate(il_paral(ig_parsize)) + write(ld_mparout,*)'ig_parsize',ig_parsize + + !ilo = 1 + nghost + !ihi = nx_block - nghost + !jlo = 1 + nghost + !jhi = ny_block - nghost + + il_paral ( clim_strategy ) = clim_Box + il_paral ( clim_offset ) = nx_global * (l_jlo-1) + (l_ilo-1) + !il_paral ( clim_offset ) = (l_ilo-1) + il_paral ( clim_SizeX ) = l_ihi-l_ilo+1 + il_paral ( clim_SizeY ) = l_jhi-l_jlo+1 + il_paral ( clim_LdX ) = nx_global + + write(ld_mparout,*)'il_paral=',il_paral + + id_length = il_paral(clim_sizeX) * il_paral(clim_sizeY) + + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else if (cdec == 'ORANGE') then + !B: NOT FOR COMMON USE! + ! Each process is responsible for arbitrarily distributed + ! pieces of the field (here two segments by process) + ! + write (ld_mparout,*) 'ORANGE partitioning' + ig_nsegments = 2 + ig_parsize = 2 * ig_nsegments + 2 + write(ld_mparout,*)'ig_parsize',ig_parsize + allocate(il_paral(ig_parsize)) + ! + il_paral ( clim_strategy ) = clim_orange + il_paral ( clim_segments ) = 2 + il_paral ( clim_segments+1 ) = id_rank*768 + il_paral ( clim_segments+2 ) = 768 + il_paral ( clim_segments+3 ) = (id_rank+3)*768 + il_paral ( clim_segments+4 ) = 768 + id_length = 0 + do ib=1,2*il_paral(clim_segments) + if (mod(ib,2).eq.0) then + id_length = id_length + il_paral(clim_segments+ib) + endif + enddo + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else + write (ld_mparout,*) 'incorrect decomposition ' + endif + endif + + end subroutine decomp_def + +!============================================================================ + +!============================================================================ + subroutine unpack_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & + field_loc, field_type) + +! !DESCRIPTION: +! This subroutine scatters a global-sized array to a distributed array. +! +! !REVISION HISTORY: +! same as module +! +! !REMARKS: +! This is the specific interface for double precision arrays +! corresponding to the generic interface scatter_global. + +! !USES: + + include 'mpif.h' + +! !INPUT PARAMETERS: + + integer (int_kind), intent(in) :: & + src_task ! task from which array should be scattered + + type (distrb), intent(in) :: & + dst_dist ! distribution of resulting blocks + + real (dbl_kind), dimension(:,:), intent(in) :: & + ARRAY_G ! array containing global field on src_task + + integer (int_kind), intent(in) :: & + field_type, &! id for type of field (scalar, vector, angle) + field_loc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + +! !OUTPUT PARAMETERS: + + real (dbl_kind), dimension(:,:,:), intent(inout) :: & + ARRAY ! array containing distributed field +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n,bid, &! dummy loop indices + nrecvs, &! actual number of messages received + isrc, jsrc, &! source addresses + dst_block, &! location of block in dst array + xoffset, yoffset, &! offsets for tripole boundary conditions + yoffset2, &! + isign, &! sign factor for tripole boundary conditions + ierr ! MPI error flag + + type (block) :: & + this_block ! block info for current block + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + rcv_request ! request array for receives + + integer (int_kind), dimension(:,:), allocatable :: & + rcv_status ! status array for receives + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer ! buffer for sending blocks + +!----------------------------------------------------------------------- +! +! initialize return array to zero and set up tripole quantities +! +!----------------------------------------------------------------------- + ARRAY = c0 + + this_block = get_block(1,1) ! for the tripoleTflag - all blocks have it + if (this_block%tripoleTFlag) then + select case (field_loc) + case (field_loc_center) ! cell center location + xoffset = 2 + yoffset = 0 + case (field_loc_NEcorner) ! cell corner (velocity) location + xoffset = 1 + yoffset = -1 + case (field_loc_Eface) ! cell face location + xoffset = 1 + yoffset = 0 + case (field_loc_Nface) ! cell face location + xoffset = 2 + yoffset = -1 + case (field_loc_noupdate) ! ghost cells never used - use cell center + xoffset = 1 + yoffset = 1 + end select + else + select case (field_loc) + case (field_loc_center) ! cell center location + xoffset = 1 + yoffset = 1 + case (field_loc_NEcorner) ! cell corner (velocity) location + xoffset = 0 + yoffset = 0 + case (field_loc_Eface) ! cell face location + xoffset = 0 + yoffset = 1 + case (field_loc_Nface) ! cell face location + xoffset = 1 + yoffset = 0 + case (field_loc_noupdate) ! ghost cells never used - use cell center + xoffset = 1 + yoffset = 1 + end select + endif + select case (field_type) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case (field_type_noupdate) ! ghost cells never used - use cell center + isign = 1 + case default + call abort_ice('Unknown field type in scatter') + end select + + + !*** copy any local blocks + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + !*** if this is an interior block, then there is no + !*** padding or update checking required + + if (this_block%iblock > 1 .and. & + this_block%iblock < nblocks_x .and. & + this_block%jblock > 1 .and. & + this_block%jblock < nblocks_y) then + +! do j=1,ny_block +! do i=1,nx_block +! ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& +! this_block%j_glob(j)) +! end do +! end do + ARRAY(1:nx_block,1:ny_block,dst_block) = & + ARRAY_G(this_block%i_glob(1):this_block%i_glob(nx_block), & + this_block%j_glob(1):this_block%j_glob(ny_block)) + + !*** if this is an edge block but not a northern edge + !*** we only need to check for closed boundaries and + !*** padding (global index = 0) + + else if (this_block%jblock /= nblocks_y) then + + do j=1,ny_block + if (this_block%j_glob(j) /= 0) then + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + endif + end do + + !*** if this is a northern edge block, we need to check + !*** for and properly deal with tripole boundaries + + else + + do j=1,ny_block + if (this_block%j_glob(j) > 0) then ! normal boundary + + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + + else if (this_block%j_glob(j) < 0) then ! tripole + ! for yoffset=0 or 1, yoffset2=0,0 + ! for yoffset=-1, yoffset2=0,1, for u-rows on T-fold grid + do yoffset2=0,max(yoffset,0)-yoffset + jsrc = ny_global + yoffset + yoffset2 + & + (this_block%j_glob(j) + ny_global) + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + isrc = nx_global + xoffset - this_block%i_glob(i) + if (isrc < 1) isrc = isrc + nx_global + if (isrc > nx_global) isrc = isrc - nx_global + ARRAY(i,j-yoffset2,dst_block) & + = isign * ARRAY_G(isrc,jsrc) + endif + end do + end do + + endif + end do + + endif + endif + end do + + !----------------------------------------------------------------- + ! Ensure unused ghost cell values are 0 + !----------------------------------------------------------------- + + if (field_loc == field_loc_noupdate) then + do n=1,nblocks_tot + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + if (dst_block > 0) then + + ! north edge + do j = this_block%jhi+1,ny_block + do i = 1, nx_block + ARRAY (i,j,dst_block) = c0 + enddo + enddo + ! east edge + do j = 1, ny_block + do i = this_block%ihi+1,nx_block + ARRAY (i,j,dst_block) = c0 + enddo + enddo + ! south edge + do j = 1, this_block%jlo-1 + do i = 1, nx_block + ARRAY (i,j,dst_block) = c0 + enddo + enddo + ! west edge + do j = 1, ny_block + do i = 1, this_block%ilo-1 + ARRAY (i,j,dst_block) = c0 + enddo + enddo + + endif + enddo + endif + + end subroutine unpack_global_dbl +!============================================================================ + +!============================================================================ + subroutine pack_global_dbl(ARRAY_G, ARRAY, dst_task, src_dist) + +! !DESCRIPTION: +! This subroutine gathers a distributed array to a global-sized +! array on the processor dst_task. +! +! !REVISION HISTORY: +! same as module +! +! !REMARKS: +! This is the specific inteface for double precision arrays +! corresponding to the generic interface gather_global. It is shown +! to provide information on the generic interface (the generic +! interface is identical, but chooses a specific inteface based +! on the data type of the input argument). + + +! !USES: + + include 'mpif.h' + +! !INPUT PARAMETERS: + + integer (int_kind), intent(in) :: & + dst_task ! task to which array should be gathered + + type (distrb), intent(in) :: & + src_dist ! distribution of blocks in the source array + + real (dbl_kind), dimension(:,:,:), intent(in) :: & + ARRAY ! array containing horizontal slab of distributed field + +! !OUTPUT PARAMETERS: + + + real (dbl_kind), dimension(:,:), intent(inout) :: & + ARRAY_G ! array containing global horizontal field on dst_task + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n ,&! dummy loop counters + nsends ,&! number of actual sends + src_block ,&! block locator for send + ierr ! MPI error flag + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + snd_request + + integer (int_kind), dimension(:,:), allocatable :: & + snd_status + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer + + type (block) :: & + this_block ! block info for current block + + do n=1,nblocks_tot + + !*** copy local blocks + + if (src_dist%blockLocation(n) == my_task+1) then + + this_block = get_block(n,n) + +! do j=this_block%jlo,this_block%jhi +! do i=this_block%ilo,this_block%ihi +! ARRAY_G(this_block%i_glob(i), & +! this_block%j_glob(j)) = & +! ARRAY(i,j,src_dist%blockLocalID(n)) +! end do +! end do + ARRAY_G(this_block%i_glob(this_block%ilo):this_block%i_glob(this_block%ihi), & + this_block%j_glob(this_block%jlo):this_block%j_glob(this_block%jhi)) = & + ARRAY(this_block%ilo:this_block%ihi,this_block%jlo:this_block%jhi,src_dist%blockLocalID(n)) + + !*** fill land blocks with special values + + else if (src_dist%blockLocation(n) == 0) then + + this_block = get_block(n,n) + +! do j=this_block%jlo,this_block%jhi +! do i=this_block%ilo,this_block%ihi +! ARRAY_G(this_block%i_glob(i), & +! this_block%j_glob(j)) = spval_dbl +! end do +! end do + ARRAY_G(this_block%i_glob(this_block%ilo):this_block%i_glob(this_block%ihi), & + this_block%j_glob(this_block%jlo):this_block%j_glob(this_block%jhi)) = spval_dbl + endif + + end do + + end subroutine pack_global_dbl +!============================================================================ + +!============================================================================== +subroutine save_restart_i2a(fname, nstep) +! output the last i2a forcing data in cice by the end of the run, +! to be read in at the beginning of next run by cice and sent to atm + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +if (my_task == 0) then + call open_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) +endif + +do jf = 1, nsend_i2a + select case (trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,4,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + !20100305: test effect of ssuv on the tropical cooling biases (as per Harry Henden) + case('uvel_ia'); vwork = ia_uvel !* ocn_ssuv_factor !note ice u/v are also + case('vvel_ia'); vwork = ia_vvel !* ocn_ssuv_factor ! included here. + case('sstfz_ia'); vwork = ia_sstfz + case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) + case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) + case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) + case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) + case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) + case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) + case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) + case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) + case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) + case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) + case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) + case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) + case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) + case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) + case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) + case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) + case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) + case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) + case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) + case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) + case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) + case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) + case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) + case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) + case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) + end select + +! if (.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) +! else +! call pack_global_dbl(gwork, vwork, master_task, distrb_info) +! end if + if (my_task == 0) then + call modify_nc2D(ncid, cl_writ(jf), gwork, 2, il_im, il_jm, 1, ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +end subroutine save_restart_i2a +!========================================================== + + end module cpl_interface diff --git a/drivers/access/cpl_interface.F90_maice_bad b/drivers/access/cpl_interface.F90_maice_bad new file mode 100644 index 00000000..de4a0f89 --- /dev/null +++ b/drivers/access/cpl_interface.F90_maice_bad @@ -0,0 +1,2081 @@ +!============================================================================ + module cpl_interface +!============================================================================ +! coupling interface between CICE and the oasis3_25 coupler (via MPI2) using +! the PRISM System Model Interface (PSMILe). +!---------------------------------------------------------------------------- + + !prism stuff + use mod_prism + + !cice stuff + use ice_kinds_mod + use ice_communicate !, only : my_task, master_task + use ice_broadcast + use ice_blocks !, only : nx_block, ny_block, nghost + use ice_domain_size !, only : max_blocks, nx_global, ny_global, ncat + use ice_distribution, only : distrb, nprocsX, nprocsY + use ice_gather_scatter + use ice_constants + use ice_boundary, only : ice_HaloUpdate + use ice_domain !, only : distrb_info + use ice_grid, only : u2tgrid_vector + use ice_grid, only : ANGLE, ANGLET + use ice_exit, only : abort_ice + + !cpl stuff + use cpl_parameters + use cpl_netcdf_setup + use cpl_arrays_setup + use cpl_forcing_handler + + implicit none + + public :: prism_init, init_cpl, coupler_termination, get_time0_sstsss, & + from_atm, into_ocn, from_ocn, into_atm, save_restart_i2a + + private + + logical :: mpiflag + integer(kind=int_kind) :: ierror, ibou + character(len=9) :: chiceout + character(len=3) :: chout + logical :: ll_comparal ! paralell or mono-cpl coupling + integer(kind=int_kind) :: il_comp_id ! Component ID + integer(kind=int_kind) :: il_nbtotproc ! Total number of processes + integer(kind=int_kind) :: il_nbcplproc ! No of processes involved in coupling + integer(kind=int_kind) :: il_part_id ! Local partition ID + integer(kind=int_kind) :: il_length ! Size of partial field for each process + integer(kind=int_kind), dimension(2) :: il_var_nodims + integer(kind=int_kind), dimension(4) :: il_var_shape + + integer(kind=int_kind) :: l_ilo, l_ihi, l_jlo, l_jhi !local partition + integer(kind=int_kind) :: gh_ilo, gh_ihi, gh_jlo, gh_jhi !local ghost outline + integer :: sendsubarray, recvsubarray , resizedrecvsubarray + integer, dimension(:), allocatable :: counts, disps + + integer(kind=int_kind) :: il_flag ! Flag for grid writing + integer(kind=int_kind) :: il_status, il_fileid, il_varid + integer(kind=int_kind) :: io_size, ii, il_bufsize, il_real, il_bufsizebyt + integer(kind=int_kind) :: integer_byte_size, integer_io_size + real(kind=dbl_kind), dimension(:,:), allocatable :: rla_array + real(kind=dbl_kind), dimension(:), allocatable :: rla_bufsend + real(kind=dbl_kind), dimension(:,:), allocatable :: vwork2d + !local domain work array, 4 coupling data passing + contains + +!====================================================================== + subroutine prism_init +!-----------------------! + + include 'mpif.h' + + !----------------------------------- + ! 'define' the model global domain: + !----------------------------------- + il_im = nx_global + il_jm = ny_global + il_imjm = il_im * il_jm + + !allocate rla_array to be used below + allocate (rla_array(il_im,il_jm) ) + + !print *, 'CICE: (prism_init) dbl_kind, ip_realwp_p= ',dbl_kind, ip_realwp_p + + !------------------- + ! Initialize PSMILe. + !------------------- + + ! Initialise MPI + mpiflag = .FALSE. + call MPI_Initialized (mpiflag, ierror) + print *,'CICE: (prism_init) BF MPI_INIT, mpiflag = ',mpiflag + + if ( .not. mpiflag ) then + call MPI_INIT(ierror) + endif + + call MPI_Initialized (mpiflag, ierror) + print *, 'CICE: (prism_init) AF MPI_INIT, mpiflag = ',mpiflag + + print * + print *, 'CICE: (prism_init) calling prism_init_comp_proto...' + + call prism_init_comp_proto (il_comp_id, cp_modnam, ierror) + + if (ierror /= PRISM_Ok) then + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 1') + else + print *, 'CICE: (prism_init) called prism_init_comp_proto !' + endif + + !B: the following part may not be really needed(?) + ! + ! Let's suppose the model attaches to a MPI buffer for its own use + ! + ! ! Sophisticated way to determine buffer size needed (without "kind") + ! ! Here one message containing rla_array + + integer_byte_size = BIT_SIZE(ii)/8 + inquire (iolength=io_size) ii + integer_io_size = io_size + inquire (iolength=io_size) rla_array(1,1) + il_real = io_size/integer_io_size*integer_byte_size + il_bufsize = il_imjm + MPI_BSEND_OVERHEAD/il_real + 1 + allocate (rla_bufsend(il_bufsize), stat = ierror) + il_bufsizebyt = il_bufsize * il_real + call MPI_Buffer_Attach(rla_bufsend, il_bufsizebyt, ierror) + + if (ierror /= PRISM_Ok) then + print *, 'CICE: (prism_init) Error in MPI_Buffer_Attach.' + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 2') + else + print *, 'CICE: (prism_init) MPI_Buffer_Attach ok!' + endif + ! + ! PSMILe attribution of local communicator. + ! + ! Either MPI_COMM_WORLD if MPI2 is used, + ! or a local communicator created by Oasis if MPI1 is used. + ! + call prism_get_localcomm_proto(il_commlocal, ierror) + ! + if (ierror /= PRISM_Ok) then + print *, 'CICE: Error in prism_get_localcomm_proto' + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 3') + else + print *, 'CICE: _get_localcomm_ OK! il_commlocal= ',il_commlocal + endif + + ! + ! Inquire if model is parallel or not and open the process log file + ! + ! print *, '* CICE: Entering init_cpl.....' + + print *, '* CICE (prism_init) calling MPI_Comm_Size ...' + call MPI_Comm_Size(il_commlocal, il_nbtotproc, ierror) + print *, '* CICE (prism_init) calling MPI_Comm_Rank ...' + call MPI_Comm_Rank(il_commlocal, my_task, ierror) + + print *, '* CICE (prism_init) il_commlocal, il_nbtotproc, my_task = ' + print *, '* CICE (prism_init) ', il_commlocal, il_nbtotproc, my_task + ! + il_nbcplproc = il_nbtotproc !multi-process coupling (real parallel cpl)! + !il_nbcplproc = 1 !mono process coupling + + if (il_nbtotproc /= 1 .and. il_nbcplproc == il_nbtotproc ) then + ll_comparal = .TRUE. ! multi-cpl coupling! + else + ll_comparal = .FALSE. !mono-cpl coupling! + endif + + print *, '* CICE: prism_init called OK!' + + end subroutine prism_init + +!======================================================================= + subroutine init_cpl + + use mpi + use ice_communicate +!--------------------! + integer(kind=int_kind) :: jf, jfs + integer(kind=int_kind), dimension(2) :: il_var_nodims ! see below + integer(kind=int_kind), dimension(4) :: il_var_shape ! see below + + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j, n + type (block) :: this_block ! block information for current block + + integer, dimension(2) :: starts,sizes,subsizes + integer(kind=mpi_address_kind) :: start, extent +! integer, dimension(:), allocatable :: counts, disps + real(kind=dbl_kind) :: realvalue + integer (int_kind) :: nprocs + integer (int_kind),dimension(:), allocatable :: vilo, vjlo + + nprocs = get_num_procs() + allocate(vilo(nprocs)) + allocate(vjlo(nprocs)) +!initialise partition to inexisting region + l_ilo=nx_global + l_ihi=0 + l_jlo=ny_global + l_jhi=0 + gh_ilo=nx_global + gh_ihi=0 + gh_jlo=ny_global + gh_jhi=0 + ! Open the process log file +!20100406 if (my_task == 0 .or. ll_comparal) then + il_out = 85 + my_task + write(chout,'(I3.3)')il_out + chiceout='iceout'//chout + open(il_out,file=chiceout,form='formatted') + + write(il_out,*) 'Number of processes:', il_nbtotproc + write(il_out,*) 'Local process number:', my_task + write(il_out,*) 'Local communicator is : ',il_commlocal + write(il_out,*) 'Grid layout: nx_global,ny_global= ',nx_global,ny_global + write(il_out,*) 'Grid decomposition: nx_block,ny_block,max_blocks= ',& + nx_block,ny_block,max_blocks +!20100406 endif + +! write(il_out,*) 'Number of blocks :', nblocks +! do iblk = 1, nblocks +! +! this_block = get_block(blocks_ice(iblk),iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +!! do j=this_block%jlo,this_block%jhi +!! do i=this_block%ilo,this_block%ihi +!! ARRAY_G(this_block%i_glob(i), & +!! this_block%j_glob(j)) = & +!! ARRAY(i,j,src_dist%blockLocalID(n)) +! +! write(il_out,*) ' block:', iblk, "ilo, jlo, ihi, jhi=", ilo, jlo, ihi, jhi +! +! end do +!find out partition of this processor, which is done by init_domain_blocks + write(il_out,*) 'nblocks_x, nblocks_y, Number of tot blocks :', nblocks_x, nblocks_y, nblocks_tot +!!!!!!!!!!!! +! do iblk=1,nblocks_tot +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! !print local to global mapping +! write(il_out,*) 'block, local ilo ihi jlo jhi:', distrb_info%blockLocalID(iblk), ilo,ihi,jlo,jhi +! write(il_out,*) 'block global:', this_block%i_glob(ilo),this_block%i_glob(ihi), & +! this_block%j_glob(jlo),this_block%j_glob(jhi) +! endif +! end do + +!!debug + if (my_task == 0) then + write(il_out,*) "all block info:" + do iblk=1,nblocks_tot + this_block = get_block(iblk,iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + write(il_out,*) ' this block: cpu, iblock, jblock=', distrb_info%blockLocation(iblk)-1, this_block%iblock, this_block%jblock + write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) + end do + end if + + do iblk=1,nblocks_tot + + if (distrb_info%blockLocation(iblk) == my_task+1) then + + this_block = get_block(iblk,iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + write(il_out,*) ' this block: iblock, jblock=', this_block%iblock, this_block%jblock +! write(il_out,*) ' block:', iblk, "ilo, jlo, ihi, jhi=", ilo, jlo, ihi, jhi + write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) + if (this_block%i_glob(ilo) < l_ilo) then + l_ilo = this_block%i_glob(ilo) + gh_ilo = this_block%i_glob(ilo-nghost) + endif + if (this_block%j_glob(jlo) < l_jlo) then + l_jlo = this_block%j_glob(jlo) + gh_jlo = this_block%j_glob(jlo-nghost) + endif + if (this_block%i_glob(ihi) > l_ihi) then + l_ihi = this_block%i_glob(ihi) + gh_ihi = this_block%i_glob(ihi+nghost) + endif + if (this_block%j_glob(jhi) > l_jhi) then + l_jhi = this_block%j_glob(jhi) + gh_jhi = this_block%j_glob(jhi+nghost) + endif +! l_ilo = min(l_ilo, this_block%i_glob(ilo)) +! l_ihi = max(l_ihi, this_block%i_glob(ihi)) +! l_jlo = min(l_jlo, this_block%j_glob(jlo)) +! l_jhi = max(l_jhi, this_block%j_glob(jhi)) +! else if (distrb_info%blockLocation(n) == 0) then +! write(il_out,*) ' land block:', n + + endif + end do + write(il_out,*) ' local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi + write(il_out,*) ' partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 +!print ghost info + write(il_out,*) ' ghost global:',gh_ilo, gh_ihi, gh_jlo, gh_jhi + +!calculate partition using nprocsX and nprocsX + l_ilo=mod(my_task,nprocsX)*nx_global/nprocsX+1 + l_ihi=l_ilo + nx_global/nprocsX -1 + l_jlo=int(my_task/nprocsX) * ny_global/nprocsY+1 + l_jhi=l_jlo+ny_global/nprocsY - 1 + + write(il_out,*) ' 2local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi + write(il_out,*) ' 2partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 + + call mpi_gather(l_ilo, 1, mpi_integer, vilo, 1, mpi_integer, 0, MPI_COMM_ICE, ierror) + call broadcast_array(vilo, 0) + call mpi_gather(l_jlo, 1, mpi_integer, vjlo, 1, mpi_integer, 0, MPI_COMM_ICE, ierror) + call broadcast_array(vjlo, 0) + +!create subarray of this rank + sizes(1)=l_ihi-l_ilo+1; sizes(2)=l_jhi-l_jlo+1 + subsizes(1)=l_ihi-l_ilo+1; subsizes(2)=l_jhi-l_jlo+1 + starts(1)=0; starts(2)=0 + call mpi_type_create_subarray(2, sizes, subsizes, starts, mpi_order_fortran, & + MPI_REAL8, sendsubarray, ierror) + call mpi_type_commit(sendsubarray,ierror) + if (my_task == 0) then ! create recv buffer in main cpu + sizes(1)=nx_global; sizes(2)=ny_global + subsizes(1)=l_ihi-l_ilo+1; subsizes(2)=l_jhi-l_jlo+1 + starts(1)=0; starts(2)=0 + call mpi_type_create_subarray(2, sizes, subsizes, starts, mpi_order_fortran, & + MPI_REAL8, recvsubarray, ierror) + call mpi_type_commit(recvsubarray, ierror) + extent = sizeof(realvalue) + start = 0 + call mpi_type_create_resized(recvsubarray, start, extent, resizedrecvsubarray, ierror) + call mpi_type_commit(resizedrecvsubarray,ierror) + end if + allocate(counts(nprocs),disps(nprocs)) + forall (n=1:nprocs) counts(n) = 1 + do n=1, nprocs + disps(n) = ((vjlo(n)-1)*nx_global + (vilo(n)-1)) + !disps(n) = ((vilo(n)-1)*ny_global + (vjlo(n)-1)) + end do + + write(il_out,*) ' vilo ', vilo + write(il_out,*) ' vjlo ', vjlo + write(il_out,*) ' counts ', counts + write(il_out,*) ' disps ', disps + +! if ( ll_comparal ) then +! il_im = l_ihi-l_ilo+1 !nx_global +! il_jm = l_jhi-l_jlo+1 !ny_global +! il_imjm = il_im * il_jm +! endif + if (ll_comparal) then + xdim=l_ihi-l_ilo+1 + ydim=l_jhi-l_jlo+1 + else + xdim=il_im + ydim=il_jm + endif + + +!----------------------------------------------------------------------- + if (my_task == 0 .or. ll_comparal) then + ! + ! The following steps need to be done: + ! -> by the process if cice is monoprocess; + ! -> only by the master process, if cice is parallel and only + ! master process is involved in the coupling; + ! -> by all processes, if cice is parallel and all processes + ! are involved in the coupling. + + call decomp_def (il_part_id, il_length, il_imjm, & + my_task, il_nbcplproc, ll_comparal, il_out) + + write(il_out,*)'(init_cpl) called decomp_def, my_task, ierror = ',my_task, ierror + + ! + ! PSMILe coupling fields declaration + ! + + il_var_nodims(1) = 2 ! rank of coupling field + il_var_nodims(2) = 1 ! number of bundles in coupling field (always 1) + !il_var_shape(1)= 1 ! min index for the coupling field local dim + !il_var_shape(2)= xdim !il_im ! max index for the coupling field local dim + !il_var_shape(3)= 1 + !il_var_shape(4)= ydim !il_jm + if (ll_comparal) then + il_var_shape(1)= 1 !l_ilo ! min index for the coupling field local dimension + il_var_shape(2)= l_ihi-l_ilo+1 ! max index for the coupling field local dim + il_var_shape(3)= 1 !l_jlo ! min index for the coupling field local dim + il_var_shape(4)= l_jhi-l_jlo+1 ! max index for the coupling field local dim + else + il_var_shape(1)= 1 ! min index for the coupling field local dimension + il_var_shape(2)= il_im ! max index for the coupling field local dim + il_var_shape(3)= 1 ! min index for the coupling field local dim + il_var_shape(4)= il_jm ! max index for the coupling field local dim + endif + + ! ?Does this help? + !il_var_shape(1)= 2 ! min index for the coupling field local dim + !il_var_shape(2)= il_im+1 ! max index for the coupling field local dim + !il_var_shape(3)= 2 + !il_var_shape(4)= il_jm+1 + + endif !my_task==0 .or. ll_comparal + + !*** ***! + !***B: we now define cl_writ/cl_read on all ranks! (20090403) ***! + !*** ***! + + ! + ! Define name (as in namcouple) and declare each field sent by ice + ! + + ! + ! ice ==> atm + ! + nsend_i2a = 0 + + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='isst_ia' + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'icecon',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'snwthk',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'icethk',jf + enddo + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='uvel_ia' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='vvel_ia' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='co2_i2' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='co2fx_i2' + ! new fields sending to UM GA7 + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='sstfz_ia' + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'foifr',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'itopt',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'itopk',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'pndfn',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'pndtn',jf + enddo + + if (my_task == 0) then + write(il_out,*) 'init_cpl: Number of fields sent to atm: ',nsend_i2a + endif + ! + ! ice ==> ocn + ! + nsend_i2o = nsend_i2a + + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='strsu_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='strsv_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='rain_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='snow_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='stflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='htflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='swflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='qflux_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='shflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='lwflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='runof_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='press_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='aice_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='melt_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='form_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='co2_i1' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='wnd_i1' + + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) 'init_cpl: Number of fields sent to ocn: ',nsend_i2o - nsend_i2a + + if (nsend_i2o /= jpfldout) then + write(il_out,*) + write(il_out,*)'!!! Fatal Error: (init_cpl) nsend = ',nsend_i2o + write(il_out,*)'!!! It should be nsend = ',jpfldout + call abort_ice('CICE: Number of outgoing coupling fields incorrect!') + endif + + write(il_out,*) 'init_cpl: Total number of fields sent from ice: ',jpfldout + + !jpfldout == nsend_i2o! + !---------------------! + + do jf=1, jpfldout + call prism_def_var_proto (il_var_id_out(jf),cl_writ(jf), il_part_id, & + il_var_nodims, PRISM_Out, il_var_shape, PRISM_Real, ierror) + enddo + + endif + + ! + ! Define name (as in namcouple) and declare each field received by ice + ! + + ! + ! atm ==> ice + ! + nrecv_a2i = 0 + + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'thflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'pswflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'runoff_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'wme_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'rain_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'snow_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'evap_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'lhflx_i' + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a4,i2.2,a2)')'tmlt',jf,'_i' + enddo + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a4,i2.2,a2)')'bmlt',jf,'_i' + enddo + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'taux_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'tauy_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'swflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'lwflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'shflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'press_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'co2_ai' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'wnd_ai' + ! new fields recving from UM GA7 + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'icenth_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'icesth_i' + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a6,i2.2)')'tsfice',jf + enddo + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a6,i2.2)')'iceevp',jf + enddo + + if (my_task==0 .or. ll_comparal) then + write(il_out,*) 'init_cpl: Number of fields rcvd from atm: ',nrecv_a2i + endif + + ! + ! ocn ==> ice + ! + nrecv_o2i = nrecv_a2i + + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sst_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sss_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssu_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssv_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sslx_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssly_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'pfmice_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'co2_oi' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'co2fx_oi' + + if (my_task==0 .or. ll_comparal) then + + write(il_out,*) 'init_cpl: Number of fields rcvd from ocn: ',nrecv_o2i-nrecv_a2i + + if (nrecv_o2i /= jpfldin) then + write(il_out,*) + write(il_out,*)'!!! Fatal Error: (init_cpl) nrecv = ',nrecv_o2i + write(il_out,*)'!!! It should be nrecv = ',jpfldin + call abort_ice('CICE: Number of incoming coupling fields incorrect!') + endif + !jpfldin == nrecv_o2i! + !--------------------! + + write(il_out,*) 'init_cpl: Total number of fields rcvd by ice: ',jpfldin + + do jf=1, jpfldin + call prism_def_var_proto (il_var_id_in(jf), cl_read(jf), il_part_id, & + il_var_nodims, PRISM_In, il_var_shape, PRISM_Real, ierror) + enddo + + ! + ! PSMILe end of declaration phase + ! + call prism_enddef_proto (ierror) + + endif !my_task==0 + + ! + ! Allocate the 'coupling' fields (to be used) for EACH PROCESS:! + ! + + ! fields in: (local domain) + ! + ! from atm: + allocate (um_thflx(nx_block,ny_block,max_blocks)); um_thflx(:,:,:) = 0 + allocate (um_pswflx(nx_block,ny_block,max_blocks)); um_pswflx(:,:,:) = 0 + allocate (um_runoff(nx_block,ny_block,max_blocks)); um_runoff(:,:,:) = 0 + allocate (um_wme(nx_block,ny_block,max_blocks)); um_wme(:,:,:) = 0 + allocate (um_snow(nx_block,ny_block,max_blocks)); um_snow(:,:,:) = 0 + allocate (um_rain(nx_block,ny_block,max_blocks)); um_rain(:,:,:) = 0 + allocate (um_evap(nx_block,ny_block,max_blocks)); um_evap(:,:,:) = 0 + allocate (um_lhflx(nx_block,ny_block,max_blocks)); um_lhflx(:,:,:) = 0 + allocate (um_taux(nx_block,ny_block,max_blocks)); um_taux(:,:,:) = 0 + allocate (um_tauy(nx_block,ny_block,max_blocks)); um_tauy(:,:,:) = 0 + allocate (um_swflx(nx_block,ny_block,max_blocks)); um_swflx(:,:,:) = 0 + allocate (um_lwflx(nx_block,ny_block,max_blocks)); um_lwflx(:,:,:) = 0 + allocate (um_shflx(nx_block,ny_block,max_blocks)); um_shflx(:,:,:) = 0 + allocate (um_press(nx_block,ny_block,max_blocks)); um_press(:,:,:) = 0 + allocate (um_tmlt(nx_block,ny_block,ncat,max_blocks)); um_tmlt(:,:,:,:) = 0 + allocate (um_bmlt(nx_block,ny_block,ncat,max_blocks)); um_bmlt(:,:,:,:) = 0 + allocate (um_co2(nx_block,ny_block,max_blocks)); um_co2(:,:,:) = 0 + allocate (um_wnd(nx_block,ny_block,max_blocks)); um_wnd(:,:,:) = 0 + allocate (um_icenth(nx_block,ny_block,max_blocks)); um_icenth(:,:,:) = 0 + allocate (um_icesth(nx_block,ny_block,max_blocks)); um_icesth(:,:,:) = 0 + allocate (um_tsfice(nx_block,ny_block,ncat,max_blocks)); um_tsfice(:,:,:,:) = 0 + allocate (um_iceevp(nx_block,ny_block,ncat,max_blocks)); um_iceevp(:,:,:,:) = 0 + + ! + allocate ( core_runoff(nx_block,ny_block,max_blocks)); core_runoff(:,:,:) = 0. + ! + + ! from ocn: + allocate (ocn_sst(nx_block,ny_block,max_blocks)); ocn_sst(:,:,:) = 0 + allocate (ocn_sss(nx_block,ny_block,max_blocks)); ocn_sss(:,:,:) = 0 + allocate (ocn_ssu(nx_block,ny_block,max_blocks)); ocn_ssu(:,:,:) = 0 + allocate (ocn_ssv(nx_block,ny_block,max_blocks)); ocn_ssv(:,:,:) = 0 + allocate (ocn_sslx(nx_block,ny_block,max_blocks)); ocn_sslx(:,:,:) = 0 + allocate (ocn_ssly(nx_block,ny_block,max_blocks)); ocn_ssly(:,:,:) = 0 + allocate (ocn_pfmice(nx_block,ny_block,max_blocks)); ocn_pfmice(:,:,:) = 0 + allocate (ocn_co2(nx_block,ny_block,max_blocks)); ocn_co2(:,:,:) = 0 + allocate (ocn_co2fx(nx_block,ny_block,max_blocks)); ocn_co2fx(:,:,:) = 0 + + ! fields out: (local domain) + ! + ! to atm: + allocate (ia_sst(nx_block,ny_block,max_blocks)); ia_sst(:,:,:) = 0 + allocate (ia_uvel(nx_block,ny_block,max_blocks)); ia_uvel(:,:,:) = 0 + allocate (ia_vvel(nx_block,ny_block,max_blocks)); ia_vvel(:,:,:) = 0 + allocate (ia_aicen(nx_block,ny_block,ncat,max_blocks)); ia_aicen(:,:,:,:) = 0 + allocate (ia_snown(nx_block,ny_block,ncat,max_blocks)); ia_snown(:,:,:,:) = 0 + allocate (ia_thikn(nx_block,ny_block,ncat,max_blocks)); ia_thikn(:,:,:,:) = 0 + allocate (ia_co2(nx_block,ny_block,max_blocks)); ia_co2(:,:,:) = 0 + allocate (ia_co2fx(nx_block,ny_block,max_blocks)); ia_co2fx(:,:,:) = 0 + allocate (ia_sstfz(nx_block,ny_block,max_blocks)); ia_sstfz(:,:,:) = 0 + allocate (ia_foifr(nx_block,ny_block,ncat,max_blocks)); ia_foifr(:,:,:,:) = 0 + allocate (ia_itopt(nx_block,ny_block,ncat,max_blocks)); ia_itopt(:,:,:,:) = 0 + allocate (ia_itopk(nx_block,ny_block,ncat,max_blocks)); ia_itopk(:,:,:,:) = 0 + allocate (ia_pndfn(nx_block,ny_block,ncat,max_blocks)); ia_pndfn(:,:,:,:) = 0 + allocate (ia_pndtn(nx_block,ny_block,ncat,max_blocks)); ia_pndtn(:,:,:,:) = 0 + ! + ! to ocn: + allocate (io_strsu(nx_block,ny_block,max_blocks)); io_strsu(:,:,:) = 0 + allocate (io_strsv(nx_block,ny_block,max_blocks)); io_strsv(:,:,:) = 0 + allocate (io_rain (nx_block,ny_block,max_blocks)); io_rain (:,:,:) = 0 + allocate (io_snow (nx_block,ny_block,max_blocks)); io_snow (:,:,:) = 0 + allocate (io_stflx(nx_block,ny_block,max_blocks)); io_stflx(:,:,:) = 0 + allocate (io_htflx(nx_block,ny_block,max_blocks)); io_htflx(:,:,:) = 0 + allocate (io_swflx(nx_block,ny_block,max_blocks)); io_swflx(:,:,:) = 0 + allocate (io_qflux(nx_block,ny_block,max_blocks)); io_qflux(:,:,:) = 0 + allocate (io_lwflx(nx_block,ny_block,max_blocks)); io_lwflx(:,:,:) = 0 + allocate (io_shflx(nx_block,ny_block,max_blocks)); io_shflx(:,:,:) = 0 + allocate (io_runof(nx_block,ny_block,max_blocks)); io_runof(:,:,:) = 0 + allocate (io_press(nx_block,ny_block,max_blocks)); io_press(:,:,:) = 0 + allocate (io_aice(nx_block,ny_block,max_blocks)); io_aice(:,:,:) = 0 + allocate (io_melt(nx_block,ny_block,max_blocks)); io_melt(:,:,:) = 0 + allocate (io_form(nx_block,ny_block,max_blocks)); io_form(:,:,:) = 0 + allocate (io_co2(nx_block,ny_block,max_blocks)); io_co2(:,:,:) = 0 + allocate (io_wnd(nx_block,ny_block,max_blocks)); io_wnd(:,:,:) = 0 + + ! temporary arrays: + ! IO cpl int time-average + allocate (maice(nx_block,ny_block,max_blocks)); maice(:,:,:) = 0 + allocate (mstrocnxT(nx_block,ny_block,max_blocks)); mstrocnxT(:,:,:) = 0 + allocate (mstrocnyT(nx_block,ny_block,max_blocks)); mstrocnyT(:,:,:) = 0 + allocate (mfresh(nx_block,ny_block,max_blocks)); mfresh(:,:,:) = 0 + allocate (mfsalt(nx_block,ny_block,max_blocks)); mfsalt(:,:,:) = 0 + allocate (mfhocn(nx_block,ny_block,max_blocks)); mfhocn(:,:,:) = 0 + allocate (mfswthru(nx_block,ny_block,max_blocks)); mfswthru(:,:,:) = 0 + allocate (msicemass(nx_block,ny_block,max_blocks)); msicemass(:,:,:) = 0 + !BX--extra one: + allocate (maice_saved(nx_block,ny_block,max_blocks)); maice_saved(:,:,:) = 0 + + ! IA cpl int time-average (3D) + allocate (maiu(nx_block,ny_block,max_blocks)); maiu(:,:,:) = 0 + allocate (muvel(nx_block,ny_block,max_blocks)); muvel(:,:,:) = 0 + allocate (mvvel(nx_block,ny_block,max_blocks)); mvvel(:,:,:) = 0 + allocate (msst(nx_block,ny_block,max_blocks)); msst(:,:,:) = 0 + allocate (mssu(nx_block,ny_block,max_blocks)); mssu(:,:,:) = 0 + allocate (mssv(nx_block,ny_block,max_blocks)); mssv(:,:,:) = 0 + allocate (mco2(nx_block,ny_block,max_blocks)); mco2(:,:,:) = 0 + allocate (mco2fx(nx_block,ny_block,max_blocks)); mco2fx(:,:,:) = 0 + allocate (msstfz(nx_block,ny_block,max_blocks)); msstfz(:,:,:) = 0 + ! IA cpl int time-average (4D) + allocate (maicen(nx_block,ny_block,ncat,max_blocks)); maicen(:,:,:,:) = 0 + allocate (msnown(nx_block,ny_block,ncat,max_blocks)); msnown(:,:,:,:) = 0 + allocate (mthikn(nx_block,ny_block,ncat,max_blocks)); mthikn(:,:,:,:) = 0 + allocate (mfoifr(nx_block,ny_block,ncat,max_blocks)); mfoifr(:,:,:,:) = 0 + allocate (mitopt(nx_block,ny_block,ncat,max_blocks)); mitopt(:,:,:,:) = 0 + allocate (mitopk(nx_block,ny_block,ncat,max_blocks)); mitopk(:,:,:,:) = 0 + allocate (mpndfn(nx_block,ny_block,ncat,max_blocks)); mpndfn(:,:,:,:) = 0 + allocate (mpndtn(nx_block,ny_block,ncat,max_blocks)); mpndtn(:,:,:,:) = 0 +!BX: + allocate (maicen_saved(nx_block,ny_block,ncat,max_blocks)); maicen_saved(:,:,:,:) = 0 + + allocate (vwork(nx_block,ny_block,max_blocks)); vwork(:,:,:) = 0 + allocate (gwork(nx_global,ny_global)); gwork(:,:) = 0 + allocate (sicemass(nx_block,ny_block,max_blocks)); sicemass(:,:,:) = 0. + allocate (vwork2d(l_ilo:l_ihi, l_jlo:l_jhi)); vwork2d(:,:) = 0. !l_ihi-l_ilo+1, l_jhi-l_jlo+1 + end subroutine init_cpl + +!======================================================================= + subroutine from_atm(isteps) +!----------------------------! + + implicit none + + integer(kind=int_kind), intent(in) :: isteps + + real(kind=dbl_kind) :: tmpu, tmpv + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ncid,currstep,ll,ilout + + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(from_atm) receiving coupling fields at rtime= ', isteps + if (chk_a2i_fields) then + if ( .not. file_exist('fields_a2i_in_ice.nc') ) then + call create_ncfile('fields_a2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_a2i_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_a2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + write(il_out,*) + write(il_out,*) '(from_atm) Total number of fields to be rcvd: ', nrecv_a2i + endif + + write(il_out,*) "prism_get from_atm at sec: ", isteps + do jf = 1, nrecv_a2i + + if (my_task==0 .or. ll_comparal ) then + + !jf-th field in + write(il_out,*) + write(il_out,*) '*** receiving coupling field No. ', jf, cl_read(jf) + !call flush(il_out) + + if (ll_comparal) then + call prism_get_proto (il_var_id_in(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) !vwork(2:,2:,my_task+1), + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork,counts,disps,resizedrecvsubarray, & + 0,MPI_COMM_ICE,ierror) + call broadcast_array(gwork, 0) +! gwork(l_ilo:l_ihi, l_jlo:l_jhi) = vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) + else + call prism_get_proto (il_var_id_in(jf), isteps, gwork, ierror) + endif + + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Recvd) then + write(il_out,*) 'Err in _get_ sst at time with error: ', isteps, ierror + call prism_abort_proto(il_comp_id, 'cice from_atm','stop 1') + else + write(il_out,*) + write(il_out,*)'(from_atm) rcvd at time with err: ',cl_read(jf),isteps,ierror + + if (ll_comparal .and. chk_a2i_fields) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps, resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_a2i_fields) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif + + if (.not. ll_comparal ) then + call scatter_global(vwork,gwork,master_task,distrb_info, & + field_loc_center, field_type_scalar) + else + call unpack_global_dbl(vwork,gwork,master_task,distrb_info, & + field_loc_center, field_type_scalar) + endif ! not ll_comparal + +#if (MXBLCKS != 1) +#error The following code assumes that max_blocks == 1 +#endif + + !***Note following "select case" works only if cl_read(:) is defined at ALL ranks***! + !-----------------------------------------------------------------------------------! + select case (trim(cl_read(jf))) + case ('thflx_i'); + um_thflx(:,:,:)=vwork(:,:,:) + case ('pswflx_i'); + um_pswflx(:,:,:) =vwork(:,:,:) + case ('runoff_i'); + um_runoff(:,:,:) =vwork(:,:,:) + case ('wme_i'); + um_wme(:,:,:) = vwork(:,:,:) +! case ('rain_i'); um_rain(:,:,:) = vwork(:,:,:) +! case ('snow_i'); um_snow(:,:,:) = vwork(:,:,:) +!---20100825 -- just be cauious: ------------------------- + case ('rain_i'); + um_rain(:,:,:) =max(0.0,vwork(:,:,:)) + case ('snow_i'); + um_snow(:,:,:) =max(0.0,vwork(:,:,:)) +!--------------------------------------------------------- + case ('evap_i');um_evap(:,:,:) = vwork(:,:,:) + case ('lhflx_i');um_lhflx(:,:,:) = vwork(:,:,:) + case ('tmlt01_i');um_tmlt(:,:,1,:) = vwork(:,:,:) + case ('tmlt02_i');um_tmlt(:,:,2,:) = vwork(:,:,:) + case ('tmlt03_i');um_tmlt(:,:,3,:) = vwork(:,:,:) + case ('tmlt04_i');um_tmlt(:,:,4,:) = vwork(:,:,:) + case ('tmlt05_i');um_tmlt(:,:,5,:) = vwork(:,:,:) + case ('bmlt01_i');um_bmlt(:,:,1,:) = vwork(:,:,:) + case ('bmlt02_i');um_bmlt(:,:,2,:) = vwork(:,:,:) + case ('bmlt03_i');um_bmlt(:,:,3,:) = vwork(:,:,:) + case ('bmlt04_i');um_bmlt(:,:,4,:) = vwork(:,:,:) + case ('bmlt05_i');um_bmlt(:,:,5,:) = vwork(:,:,:) + case ('taux_i');um_taux(:,:,:) = vwork(:,:,:) + case ('tauy_i');um_tauy(:,:,:) = vwork(:,:,:) + case ('swflx_i');um_swflx(:,:,:) = vwork(:,:,:) + case ('lwflx_i');um_lwflx(:,:,:) = vwork(:,:,:) + case ('shflx_i');um_shflx(:,:,:) = vwork(:,:,:) + case ('press_i');um_press(:,:,:) = vwork(:,:,:) + case ('co2_ai');um_co2(:,:,:) = vwork(:,:,:) + case ('wnd_ai');um_wnd(:,:,:) = vwork(:,:,:) + case ('icenth_i');um_icenth(:,:,:) = vwork(:,:,:) + case ('icesth_i');um_icesth(:,:,:) = vwork(:,:,:) + case ('tsfice01');um_tsfice(:,:,1,:) = vwork(:,:,:) + case ('tsfice02');um_tsfice(:,:,2,:) = vwork(:,:,:) + case ('tsfice03');um_tsfice(:,:,3,:) = vwork(:,:,:) + case ('tsfice04');um_tsfice(:,:,4,:) = vwork(:,:,:) + case ('tsfice05');um_tsfice(:,:,5,:) = vwork(:,:,:) + case ('iceevp01');um_iceevp(:,:,1,:) = vwork(:,:,:) + case ('iceevp02');um_iceevp(:,:,2,:) = vwork(:,:,:) + case ('iceevp03');um_iceevp(:,:,3,:) = vwork(:,:,:) + case ('iceevp04');um_iceevp(:,:,4,:) = vwork(:,:,:) + case ('iceevp05');um_iceevp(:,:,5,:) = vwork(:,:,:) + end select + + if (my_task == 0 .or. ll_comparal) then + write(il_out,*) + write(il_out,*)'(from_atm) done: ', jf, trim(cl_read(jf)) + endif + + enddo + !BX: 20160623...... avoid initial "remap transport: bad departure points" (e.g.@(332,776))? + if (isteps == 0) then + um_taux = um_taux * 0.1 + um_tauy = um_tauy * 0.1 + endif + !BX. + +! call ice_HaloUpdate(um_thflx, halo_info,field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_pswflx, halo_info,field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_runoff, halo_info,field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_wme, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(um_rain, halo_info, field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_snow, halo_info, field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_evap, halo_info, field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_lhflx, halo_info,field_loc_center,field_type_scalar) +! call ice_HaloUpdate(um_tmlt, halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_bmlt, halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_taux, halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_tauy, halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_swflx, halo_info,field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_lwflx, halo_info,field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_shflx, halo_info,field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_press, halo_info,field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_co2, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(um_wnd, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(um_icenth,halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_icesth,halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_tsfice,halo_info, field_loc_center,field_type_vector) +! call ice_HaloUpdate(um_iceevp,halo_info, field_loc_center,field_type_vector) + + IF (rotate_winds) THEN !rotate_winds=.t. means oasis does not do the vector rotation. + + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + tmpu = um_taux(i,j,iblk) ! on geographical coord. (T cell) + tmpv = um_tauy(i,j,iblk) + um_taux(i,j,iblk) = tmpu*cos(ANGLET(i,j,iblk)) & ! converted onto model curvelear + + tmpv*sin(ANGLET(i,j,iblk)) ! coord. (T cell) + um_tauy(i,j,iblk) = tmpv*cos(ANGLET(i,j,iblk)) & ! + - tmpu*sin(ANGLET(i,j,iblk)) + enddo + enddo + + enddo + + ENDIF !rotate_winds + + ! need do t-grid to u-grid shift for vectors since all coupling occur on + ! t-grid points: <==No! actually CICE requires the input wind on T grid! + ! (see comment in code ice_flux.F) + !call t2ugrid(uwnd1) + !call t2ugrid(vwnd1) + + !------------------------------- + !if ( chk_a2i_fields ) then + ! call check_a2i_fields(isteps) + !endif + !------------------------------- + + if ( chk_a2i_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine from_atm + +!======================================================================= + subroutine from_ocn(isteps) +!----------------------------! + + integer(kind=int_kind), intent(in) :: isteps + + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) '(from_ocn) receiving coupling fields at rtime: ', isteps + if (chk_o2i_fields) then + if ( .not. file_exist('fields_o2i_in_ice.nc') ) then + call create_ncfile('fields_o2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_o2i_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_o2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + write(il_out,*) "prism_get from_ocn at sec: ", isteps + do jf = nrecv_a2i + 1, jpfldin + + if (my_task==0 .or. ll_comparal) then + + !jf-th field in + write(il_out,*) + write(il_out,*) '*** receiving coupling fields No. ', jf, cl_read(jf) + if(ll_comparal) then + call prism_get_proto (il_var_id_in(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork,counts,disps,resizedrecvsubarray, & + 0,MPI_COMM_ICE,ierror) + call broadcast_array(gwork, 0) +! gwork(l_ilo:l_ihi, l_jlo:l_jhi) = vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) + else + call prism_get_proto (il_var_id_in(jf), isteps, gwork, ierror) + endif + + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Recvd) then + write(il_out,*) 'Err in _get_ sst at time with error: ', isteps, ierror + call prism_abort_proto(il_comp_id, 'cice from_ocn','stop 1') + else + write(il_out,*) + write(il_out,*)'(from_ocn) rcvd at time with err: ',cl_read(jf),isteps,ierror + if(ll_comparal .and. chk_o2i_fields) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_o2i_fields) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif + + if (.not. ll_comparal) then + call scatter_global(vwork, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) + else + call unpack_global_dbl(vwork, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) + endif + + !Q: 'field_type_scalar' all right for 'vector' (ssu/ssv, sslx/ssly))?! + select case (trim(cl_read(jf))) + case ('sst_i'); + ocn_sst = vwork + case ('sss_i'); + ocn_sss = vwork + case ('ssu_i'); + ocn_ssu = vwork + case ('ssv_i'); + ocn_ssv = vwork + case ('sslx_i'); + ocn_sslx = vwork + case ('ssly_i'); + ocn_ssly = vwork + case ('pfmice_i'); + ocn_pfmice =vwork + case ('co2_oi'); + ocn_co2 = vwork + case ('co2fx_oi'); + ocn_co2fx =vwork + end select + + enddo + +! call ice_HaloUpdate(ocn_sst, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(ocn_sss, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(ocn_ssu, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(ocn_ssv, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(ocn_sslx, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(ocn_ssly, halo_info, field_loc_center, field_type_vector) +! call ice_HaloUpdate(ocn_pfmice, halo_info,field_loc_center,field_type_scalar) +! call ice_HaloUpdate(ocn_co2, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(ocn_co2fx, halo_info, field_loc_center,field_type_scalar) + + !------------------------------- + !if (chk_o2i_fields) then + ! call check_o2i_fields(isteps) + !endif + !------------------------------- + + !------------------------------------------------------------------------------- + !B: calculate freezing point here (before "time_average_ocn_fields_4_i2a") ! + !----- should use eos formula to calculate Tf for "consistency" with GCx ----! + Tf (:,:,:) = -depressT*ocn_sss(:,:,:) ! (deg C) + ! + !May use different formula for Tf such as TEOS-10 formulation: + ! + !r1_S0 = 0.875/35.16504 + !zzs(:,:,:) = sqrt(abs(ocn_sss(:,:,:)) * r1_S0) + !Tf(:,:,:) = ((((1.46873e-03 * zzs(:,:,:) - 9.64972e-03) * zzs(:,:,:) + & + ! 2.28348e-02) * zzs(:,:,:) - 3.12775e-02) * zzs(:,:,:) + & + ! 2.07679e-02) * zzs(:,:,:) - 5.87701e-02 + !Tf(:,:,:) = Tf(:,:,:) * sss(:,:,:) ! - 7.53e-4 * 5.0 !!!5.0 is depth in meters + !--------------------------------------------------------------------------------- + + if ( chk_o2i_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine from_ocn + +!======================================================================= + subroutine into_ocn(isteps) +!-----------------------------------! + + implicit none + + integer(kind=int_kind), intent(in) :: isteps + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(into_ocn) sending coupling fields at stime= ', isteps + if (chk_i2o_fields) then + if ( .not. file_exist('fields_i2o_in_ice.nc') ) then + call create_ncfile('fields_i2o_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_i2o_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_i2o_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + write(il_out,*) "prism_put into_ocn at sec: ", isteps + do jf = nsend_i2a + 1, jpfldout + +!CH: make sure the 'LIMITS' are to be released! + + select case(trim(cl_writ(jf))) +!20100531 for MYM's test (iostress_factor) ............. + case('strsu_io'); vwork = io_strsu * iostress_factor + case('strsv_io'); vwork = io_strsv * iostress_factor +!....................................................... + case('rain_io'); vwork = io_rain + case('snow_io'); vwork = io_snow + !case('stflx_io'); vwork = io_stflx + case('stflx_io') + if (limit_stflx) then + vwork = max(-5.e-6, min(io_stflx, 5.e-6)) + else + vwork = io_stflx + endif + !case('htflx_io'); vwork = io_htflx + !case('htflx_io'); vwork = max(io_htflx, -450.0) + !Jan2010: + case('htflx_io'); vwork = min(io_htflx,0.0) + case('swflx_io'); vwork = io_swflx + case('qflux_io'); vwork = io_qflux + case('shflx_io'); vwork = io_shflx + case('lwflx_io'); vwork = io_lwflx + case('runof_io') + if (use_core_runoff) then + vwork = core_runoff + else + vwork = io_runof + endif + case('press_io'); vwork = io_press + case('aice_io'); vwork = io_aice + case('melt_io'); vwork = io_melt + case('form_io'); vwork = io_form + case('co2_i1'); vwork = io_co2 + case('wnd_i1'); vwork = io_wnd + end select + + if(.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) + else + call pack_global_dbl(gwork, vwork, master_task, distrb_info) + vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) = gwork(l_ilo:l_ihi, l_jlo:l_jhi) +! do iblk=1,nblocks_tot +! +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! +! vwork2d(this_block%i_glob(ilo):this_block%i_glob(ihi), & +! this_block%j_glob(jlo):this_block%j_glob(jhi)) = & +! vwork(ilo:ihi,jlo:jhi,distrb_info%blockLocalID(iblk)) +! endif +! end do + + endif + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) + write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + if(ll_comparal) then + call prism_put_proto(il_var_id_out(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_put_proto(il_var_id_out(jf), isteps, gwork, ierror) + endif + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Sent) then + write(il_out,*) + write(il_out,*) '(into_ocn) Err in _put_ ', cl_writ(jf), isteps, ierror + call prism_abort_proto(il_comp_id, 'cice into_ocn','STOP 1') + else + write(il_out,*) + write(il_out,*)'(into_ocn) sent: ', cl_writ(jf), isteps, ierror + if(chk_i2o_fields .and. ll_comparal) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_i2o_fields) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif !my_task == 0 + + enddo !jf + + !-------------------------------------- + !if (chk_i2o_fields) then + ! call check_i2o_fields(isteps) + !endif + !-------------------------------------- + + if ( chk_i2o_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine into_ocn + +!======================================================================= + subroutine into_atm(isteps) +!----------------------------! + + integer(kind=int_kind), intent(in) :: isteps + integer(kind=int_kind) :: jf + + real(kind=dbl_kind) :: tmpu, tmpv + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + +!debug hxy599 +!if (isteps==runtime-3600) then +! chk_i2a_fields=.true. !save the last step +! currstep = 1 +! write (il_out,*) 'hxy599 debug: save i2a fields at time ', isteps +!end if + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(into_atm) sending coupling fields at stime= ', isteps + if (chk_i2a_fields) then + if ( .not. file_exist('fields_i2a_in_ice.nc') ) then + call create_ncfile('fields_i2a_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + else + write(il_out,*) 'opening file fields_i2a_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_i2a_in_ice.nc',nf_write,ncid) ) + end if + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + IF (rotate_winds) THEN + + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + !note note uvel/vvel are on the U-cell here. + tmpu = ia_uvel(i,j,iblk); tmpv = ia_vvel(i,j,iblk) ! ice/ocn velocity, m/s + ia_uvel(i,j,iblk) = tmpu*cos(ANGLE(i,j,iblk)) & ! remapped on to geographical + - tmpv*sin(ANGLE(i,j,iblk)) ! grid. + ia_vvel(i,j,iblk) = tmpv*cos(ANGLE(i,j,iblk)) & ! they also need be shifted + + tmpu*sin(ANGLE(i,j,iblk)) ! on to T-cell (below). + enddo + enddo + + enddo + + ENDIF !rotate_winds + + !shift ia_uvel/ia_vvel onto T points before passing into coupler + call u2tgrid_vector(ia_uvel) + call u2tgrid_vector(ia_vvel) + + write(il_out,*) "prism_put into_atm at sec: ", isteps + do jf = 1, nsend_i2a + + select case (trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,4,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + !20100305: test effect of ssuv on the tropical cooling biases (as per Harry Henden) + case('uvel_ia'); vwork = ia_uvel * ocn_ssuv_factor !note ice u/v are also + case('vvel_ia'); vwork = ia_vvel * ocn_ssuv_factor ! included here. + case('sstfz_ia'); vwork = ia_sstfz + case('co2_i2'); vwork = ia_co2 + case('co2fx_i2'); vwork = ia_co2fx + case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) + case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) + case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) + case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) + case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) + case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) + case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) + case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) + case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) + case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) + case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) + case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) + case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) + case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) + case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) + case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) + case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) + case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) + case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) + case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) + case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) + case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) + case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) + case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) + case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) + end select + + if (.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) + else + call pack_global_dbl(gwork, vwork, master_task, distrb_info) + vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) = gwork(l_ilo:l_ihi, l_jlo:l_jhi) +! do iblk=1,nblocks_tot +! +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! +! vwork2d(this_block%i_glob(ilo):this_block%i_glob(ihi), & +! this_block%j_glob(jlo):this_block%j_glob(jhi)) = & +! vwork(ilo:ihi,jlo:jhi,distrb_info%blockLocalID(iblk)) +! endif +! end do + + end if + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) + write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + + !call prism_put_inquire_proto(il_var_id_out(jf),isteps,ierror) + + write(il_out,*) + write(il_out,*) '(into_atm) what to do with this var==> Err= ',ierror + if(ll_comparal) then + call prism_put_proto(il_var_id_out(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_put_proto(il_var_id_out(jf), isteps, gwork, ierror) + endif + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Sent) then + write(il_out,*) + write(il_out,*) '(into_atm) Err in _put_ ', cl_writ(jf), isteps, ierror + call prism_abort_proto(il_comp_id, 'cice into_atm','STOP 1') + else + write(il_out,*) + write(il_out,*)'(into_atm) sent: ', cl_writ(jf), isteps, ierror + if(chk_i2a_fields .and. ll_comparal) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_i2a_fields) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif !my_task == 0 + + enddo !jf = 1, jpfldout + + !------------------------------- + !if (chk_i2a_fields) then + ! call check_i2a_fields(isteps) + !endif + !------------------------------- + + if ( chk_i2a_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine into_atm + +!======================================================================= + subroutine coupler_termination +!-------------------------------! + ! + ! Detach from MPI buffer + ! + call MPI_Buffer_Detach(rla_bufsend, il_bufsize, ierror) + deallocate (rla_bufsend) + !deallocate all the coupling associated arrays... (no bother...) + ! + ! 9- PSMILe termination + ! + + call MPI_Barrier(MPI_COMM_ICE, ierror) + call prism_terminate_proto (ierror) + if (ierror /= PRISM_Ok) then + if (my_task == 0) then + write (il_out,*) 'An error occured in prism_terminate = ', ierror + endif + else + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '==================*** END ***=================' + close(il_out) + endif + endif + ! + print * + print *, '********** End of CICE **********' + print * + + call MPI_Finalize (ierror) + + end subroutine coupler_termination + +!======================================================================= + subroutine decomp_def(id_part_id, id_length, id_imjm, & + id_rank, id_nbcplproc, ld_comparal, ld_mparout) +!-------------------------------------------------------! + ! + !use mod_prism_proto + !use mod_prism_def_partition_proto + + implicit none + + integer(kind=int_kind), dimension(:), allocatable :: il_paral ! Decomposition for each proc + integer(kind=int_kind) :: ig_nsegments ! Number of segments of process decomposition + integer(kind=int_kind) :: ig_parsize ! Size of array decomposition + integer(kind=int_kind) :: id_nbcplproc ! Number of processes involved in the coupling + integer(kind=int_kind) :: id_part_id ! Local partition ID + integer(kind=int_kind) :: id_imjm ! Total grid dimension, ib, ierror, my_task + integer(kind=int_kind) :: id_length ! Size of partial field for each process + integer(kind=int_kind) :: id_rank ! Rank of process + integer(kind=int_kind) :: ld_mparout ! Unit of log file + logical :: ld_comparal + integer(kind=int_kind) :: ib, ierror + character(len=80), parameter :: cdec='BOX' + ! + integer(kind=int_kind) :: ilo, ihi, jlo, jhi + ! + ! + ! Refer to oasis/psmile/prism/modules/mod_prism_proto.F90 for integer(kind=int_kind) value + ! of clim_xxxx parameters + ! + if ( .not. ld_comparal .and. id_rank == 0) then + ! Monoprocess model, or parallel model with only master process involved + ! in coupling: the entire field will be exchanged by the process. + ig_nsegments = 1 + ig_parsize = 3 + allocate(il_paral(ig_parsize)) + ! + il_paral ( clim_strategy ) = clim_serial + il_paral ( clim_offset ) = 0 + il_paral ( clim_length ) = id_imjm + id_length = id_imjm + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else + ! Parallel atm with all process involved in the coupling + ! + if (cdec == 'APPLE') then + ! Each process is responsible for a part of field defined by + ! the number of grid points and the offset of the first point + ! + write (ld_mparout,*) 'APPLE partitioning' + ig_nsegments = 1 + ig_parsize = 3 + allocate(il_paral(ig_parsize)) + write(ld_mparout,*)'ig_parsize',ig_parsize + ! + if (id_rank .LT. (id_nbcplproc-1)) then + il_paral ( clim_strategy ) = clim_apple + il_paral ( clim_length ) = id_imjm/id_nbcplproc + il_paral ( clim_offset ) = id_rank*(id_imjm/id_nbcplproc) + else + il_paral ( clim_strategy ) = clim_apple + il_paral ( clim_length ) = id_imjm-(id_rank*(id_imjm/id_nbcplproc)) + il_paral ( clim_offset ) = id_rank*(id_imjm/id_nbcplproc) + endif + id_length = il_paral(clim_length) + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else if (cdec == 'BOX') then + !B: CICE uses a kind of Cartisian decomposition which actually may NOT + ! be simply taken as "BOX" decomposition described here !!! + ! (there is an issue associated with the 'halo' boundary for each + ! segment and may NOT be treated as what we do below! + ! It needs further consideration to make this work correctly + ! for 'paralell coupling' if really needed in the future ...) + ! + ! Each process is responsible for a rectangular box + ! + write (ld_mparout,*) 'BOX partitioning' + ig_parsize = 5 + allocate(il_paral(ig_parsize)) + write(ld_mparout,*)'ig_parsize',ig_parsize + + !ilo = 1 + nghost + !ihi = nx_block - nghost + !jlo = 1 + nghost + !jhi = ny_block - nghost + + il_paral ( clim_strategy ) = clim_Box + il_paral ( clim_offset ) = nx_global * (l_jlo-1) + (l_ilo-1) + !il_paral ( clim_offset ) = (l_ilo-1) + il_paral ( clim_SizeX ) = l_ihi-l_ilo+1 + il_paral ( clim_SizeY ) = l_jhi-l_jlo+1 + il_paral ( clim_LdX ) = nx_global + + write(ld_mparout,*)'il_paral=',il_paral + + id_length = il_paral(clim_sizeX) * il_paral(clim_sizeY) + + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else if (cdec == 'ORANGE') then + !B: NOT FOR COMMON USE! + ! Each process is responsible for arbitrarily distributed + ! pieces of the field (here two segments by process) + ! + write (ld_mparout,*) 'ORANGE partitioning' + ig_nsegments = 2 + ig_parsize = 2 * ig_nsegments + 2 + write(ld_mparout,*)'ig_parsize',ig_parsize + allocate(il_paral(ig_parsize)) + ! + il_paral ( clim_strategy ) = clim_orange + il_paral ( clim_segments ) = 2 + il_paral ( clim_segments+1 ) = id_rank*768 + il_paral ( clim_segments+2 ) = 768 + il_paral ( clim_segments+3 ) = (id_rank+3)*768 + il_paral ( clim_segments+4 ) = 768 + id_length = 0 + do ib=1,2*il_paral(clim_segments) + if (mod(ib,2).eq.0) then + id_length = id_length + il_paral(clim_segments+ib) + endif + enddo + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else + write (ld_mparout,*) 'incorrect decomposition ' + endif + endif + + end subroutine decomp_def + +!============================================================================ + +!============================================================================ + subroutine unpack_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & + field_loc, field_type) + +! !DESCRIPTION: +! This subroutine scatters a global-sized array to a distributed array. +! +! !REVISION HISTORY: +! same as module +! +! !REMARKS: +! This is the specific interface for double precision arrays +! corresponding to the generic interface scatter_global. + +! !USES: + + include 'mpif.h' + +! !INPUT PARAMETERS: + + integer (int_kind), intent(in) :: & + src_task ! task from which array should be scattered + + type (distrb), intent(in) :: & + dst_dist ! distribution of resulting blocks + + real (dbl_kind), dimension(:,:), intent(in) :: & + ARRAY_G ! array containing global field on src_task + + integer (int_kind), intent(in) :: & + field_type, &! id for type of field (scalar, vector, angle) + field_loc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + +! !OUTPUT PARAMETERS: + + real (dbl_kind), dimension(:,:,:), intent(inout) :: & + ARRAY ! array containing distributed field +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n,bid, &! dummy loop indices + nrecvs, &! actual number of messages received + isrc, jsrc, &! source addresses + dst_block, &! location of block in dst array + xoffset, yoffset, &! offsets for tripole boundary conditions + yoffset2, &! + isign, &! sign factor for tripole boundary conditions + ierr ! MPI error flag + + type (block) :: & + this_block ! block info for current block + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + rcv_request ! request array for receives + + integer (int_kind), dimension(:,:), allocatable :: & + rcv_status ! status array for receives + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer ! buffer for sending blocks + +!----------------------------------------------------------------------- +! +! initialize return array to zero and set up tripole quantities +! +!----------------------------------------------------------------------- + ARRAY = c0 + + this_block = get_block(1,1) ! for the tripoleTflag - all blocks have it + if (this_block%tripoleTFlag) then + select case (field_loc) + case (field_loc_center) ! cell center location + xoffset = 2 + yoffset = 0 + case (field_loc_NEcorner) ! cell corner (velocity) location + xoffset = 1 + yoffset = -1 + case (field_loc_Eface) ! cell face location + xoffset = 1 + yoffset = 0 + case (field_loc_Nface) ! cell face location + xoffset = 2 + yoffset = -1 + case (field_loc_noupdate) ! ghost cells never used - use cell center + xoffset = 1 + yoffset = 1 + end select + else + select case (field_loc) + case (field_loc_center) ! cell center location + xoffset = 1 + yoffset = 1 + case (field_loc_NEcorner) ! cell corner (velocity) location + xoffset = 0 + yoffset = 0 + case (field_loc_Eface) ! cell face location + xoffset = 0 + yoffset = 1 + case (field_loc_Nface) ! cell face location + xoffset = 1 + yoffset = 0 + case (field_loc_noupdate) ! ghost cells never used - use cell center + xoffset = 1 + yoffset = 1 + end select + endif + select case (field_type) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case (field_type_noupdate) ! ghost cells never used - use cell center + isign = 1 + case default + call abort_ice('Unknown field type in scatter') + end select + + + !*** copy any local blocks + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + !*** if this is an interior block, then there is no + !*** padding or update checking required + + if (this_block%iblock > 1 .and. & + this_block%iblock < nblocks_x .and. & + this_block%jblock > 1 .and. & + this_block%jblock < nblocks_y) then + +! do j=1,ny_block +! do i=1,nx_block +! ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& +! this_block%j_glob(j)) +! end do +! end do + ARRAY(1:nx_block,1:ny_block,dst_block) = & + ARRAY_G(this_block%i_glob(1):this_block%i_glob(nx_block), & + this_block%j_glob(1):this_block%j_glob(ny_block)) + + !*** if this is an edge block but not a northern edge + !*** we only need to check for closed boundaries and + !*** padding (global index = 0) + + else if (this_block%jblock /= nblocks_y) then + + do j=1,ny_block + if (this_block%j_glob(j) /= 0) then + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + endif + end do + + !*** if this is a northern edge block, we need to check + !*** for and properly deal with tripole boundaries + + else + + do j=1,ny_block + if (this_block%j_glob(j) > 0) then ! normal boundary + + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + + else if (this_block%j_glob(j) < 0) then ! tripole + ! for yoffset=0 or 1, yoffset2=0,0 + ! for yoffset=-1, yoffset2=0,1, for u-rows on T-fold grid + do yoffset2=0,max(yoffset,0)-yoffset + jsrc = ny_global + yoffset + yoffset2 + & + (this_block%j_glob(j) + ny_global) + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + isrc = nx_global + xoffset - this_block%i_glob(i) + if (isrc < 1) isrc = isrc + nx_global + if (isrc > nx_global) isrc = isrc - nx_global + ARRAY(i,j-yoffset2,dst_block) & + = isign * ARRAY_G(isrc,jsrc) + endif + end do + end do + + endif + end do + + endif + endif + end do + + !----------------------------------------------------------------- + ! Ensure unused ghost cell values are 0 + !----------------------------------------------------------------- + + if (field_loc == field_loc_noupdate) then + do n=1,nblocks_tot + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + if (dst_block > 0) then + + ! north edge + do j = this_block%jhi+1,ny_block + do i = 1, nx_block + ARRAY (i,j,dst_block) = c0 + enddo + enddo + ! east edge + do j = 1, ny_block + do i = this_block%ihi+1,nx_block + ARRAY (i,j,dst_block) = c0 + enddo + enddo + ! south edge + do j = 1, this_block%jlo-1 + do i = 1, nx_block + ARRAY (i,j,dst_block) = c0 + enddo + enddo + ! west edge + do j = 1, ny_block + do i = 1, this_block%ilo-1 + ARRAY (i,j,dst_block) = c0 + enddo + enddo + + endif + enddo + endif + + end subroutine unpack_global_dbl +!============================================================================ + +!============================================================================ + subroutine pack_global_dbl(ARRAY_G, ARRAY, dst_task, src_dist) + +! !DESCRIPTION: +! This subroutine gathers a distributed array to a global-sized +! array on the processor dst_task. +! +! !REVISION HISTORY: +! same as module +! +! !REMARKS: +! This is the specific inteface for double precision arrays +! corresponding to the generic interface gather_global. It is shown +! to provide information on the generic interface (the generic +! interface is identical, but chooses a specific inteface based +! on the data type of the input argument). + + +! !USES: + + include 'mpif.h' + +! !INPUT PARAMETERS: + + integer (int_kind), intent(in) :: & + dst_task ! task to which array should be gathered + + type (distrb), intent(in) :: & + src_dist ! distribution of blocks in the source array + + real (dbl_kind), dimension(:,:,:), intent(in) :: & + ARRAY ! array containing horizontal slab of distributed field + +! !OUTPUT PARAMETERS: + + + real (dbl_kind), dimension(:,:), intent(inout) :: & + ARRAY_G ! array containing global horizontal field on dst_task + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n ,&! dummy loop counters + nsends ,&! number of actual sends + src_block ,&! block locator for send + ierr ! MPI error flag + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + snd_request + + integer (int_kind), dimension(:,:), allocatable :: & + snd_status + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer + + type (block) :: & + this_block ! block info for current block + + do n=1,nblocks_tot + + !*** copy local blocks + + if (src_dist%blockLocation(n) == my_task+1) then + + this_block = get_block(n,n) + +! do j=this_block%jlo,this_block%jhi +! do i=this_block%ilo,this_block%ihi +! ARRAY_G(this_block%i_glob(i), & +! this_block%j_glob(j)) = & +! ARRAY(i,j,src_dist%blockLocalID(n)) +! end do +! end do + ARRAY_G(this_block%i_glob(this_block%ilo):this_block%i_glob(this_block%ihi), & + this_block%j_glob(this_block%jlo):this_block%j_glob(this_block%jhi)) = & + ARRAY(this_block%ilo:this_block%ihi,this_block%jlo:this_block%jhi,src_dist%blockLocalID(n)) + + !*** fill land blocks with special values + + else if (src_dist%blockLocation(n) == 0) then + + this_block = get_block(n,n) + +! do j=this_block%jlo,this_block%jhi +! do i=this_block%ilo,this_block%ihi +! ARRAY_G(this_block%i_glob(i), & +! this_block%j_glob(j)) = spval_dbl +! end do +! end do + ARRAY_G(this_block%i_glob(this_block%ilo):this_block%i_glob(this_block%ihi), & + this_block%j_glob(this_block%jlo):this_block%j_glob(this_block%jhi)) = spval_dbl + endif + + end do + + end subroutine pack_global_dbl +!============================================================================ + +!============================================================================== +subroutine save_restart_i2a(fname, nstep) +! output the last i2a forcing data in cice by the end of the run, +! to be read in at the beginning of next run by cice and sent to atm + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +if (my_task == 0) then + call open_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) +endif + +do jf = 1, nsend_i2a + select case (trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,4,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + !20100305: test effect of ssuv on the tropical cooling biases (as per Harry Henden) + case('uvel_ia'); vwork = ia_uvel !* ocn_ssuv_factor !note ice u/v are also + case('vvel_ia'); vwork = ia_vvel !* ocn_ssuv_factor ! included here. + case('sstfz_ia'); vwork = ia_sstfz + case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) + case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) + case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) + case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) + case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) + case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) + case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) + case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) + case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) + case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) + case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) + case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) + case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) + case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) + case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) + case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) + case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) + case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) + case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) + case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) + case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) + case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) + case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) + case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) + case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) + end select + +! if (.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) +! else +! call pack_global_dbl(gwork, vwork, master_task, distrb_info) +! end if + if (my_task == 0) then + call modify_nc2D(ncid, cl_writ(jf), gwork, 2, il_im, il_jm, 1, ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +end subroutine save_restart_i2a +!========================================================== + + end module cpl_interface diff --git a/drivers/access/cpl_interface.F90_uphalo b/drivers/access/cpl_interface.F90_uphalo new file mode 100644 index 00000000..714ef999 --- /dev/null +++ b/drivers/access/cpl_interface.F90_uphalo @@ -0,0 +1,1819 @@ +!============================================================================ + module cpl_interface +!============================================================================ +! coupling interface between CICE and the oasis3_25 coupler (via MPI2) using +! the PRISM System Model Interface (PSMILe). +!---------------------------------------------------------------------------- + + !prism stuff + use mod_prism + + !cice stuff + use ice_kinds_mod + use ice_communicate !, only : my_task, master_task + use ice_broadcast + use ice_blocks !, only : nx_block, ny_block, nghost + use ice_domain_size !, only : max_blocks, nx_global, ny_global, ncat + use ice_distribution, only : distrb, nprocsX, nprocsY + use ice_gather_scatter + use ice_constants + use ice_boundary, only : ice_HaloUpdate + use ice_domain !, only : distrb_info + use ice_grid, only : u2tgrid_vector + use ice_grid, only : ANGLE, ANGLET + use ice_exit, only : abort_ice + + !cpl stuff + use cpl_parameters + use cpl_netcdf_setup + use cpl_arrays_setup + use cpl_forcing_handler + + implicit none + + public :: prism_init, init_cpl, coupler_termination, get_time0_sstsss, & + from_atm, into_ocn, from_ocn, into_atm, save_restart_i2a + + private + + logical :: mpiflag + integer(kind=int_kind) :: ierror, ibou + character(len=9) :: chiceout + character(len=3) :: chout + logical :: ll_comparal ! paralell or mono-cpl coupling + integer(kind=int_kind) :: il_comp_id ! Component ID + integer(kind=int_kind) :: il_nbtotproc ! Total number of processes + integer(kind=int_kind) :: il_nbcplproc ! No of processes involved in coupling + integer(kind=int_kind) :: il_part_id ! Local partition ID + integer(kind=int_kind) :: il_length ! Size of partial field for each process + integer(kind=int_kind), dimension(2) :: il_var_nodims + integer(kind=int_kind), dimension(4) :: il_var_shape + + integer(kind=int_kind) :: l_ilo, l_ihi, l_jlo, l_jhi !local partition + integer(kind=int_kind) :: gh_ilo, gh_ihi, gh_jlo, gh_jhi !local ghost outline + integer :: sendsubarray, recvsubarray , resizedrecvsubarray + integer, dimension(:), allocatable :: counts, disps + + integer(kind=int_kind) :: il_flag ! Flag for grid writing + integer(kind=int_kind) :: il_status, il_fileid, il_varid + integer(kind=int_kind) :: io_size, ii, il_bufsize, il_real, il_bufsizebyt + integer(kind=int_kind) :: integer_byte_size, integer_io_size + real(kind=dbl_kind), dimension(:,:), allocatable :: rla_array + real(kind=dbl_kind), dimension(:), allocatable :: rla_bufsend + real(kind=dbl_kind), dimension(:,:), allocatable :: vwork2d + !local domain work array, 4 coupling data passing + contains + +!====================================================================== + subroutine prism_init +!-----------------------! + + include 'mpif.h' + + !----------------------------------- + ! 'define' the model global domain: + !----------------------------------- + il_im = nx_global + il_jm = ny_global + il_imjm = il_im * il_jm + + !allocate rla_array to be used below + allocate (rla_array(il_im,il_jm) ) + + !print *, 'CICE: (prism_init) dbl_kind, ip_realwp_p= ',dbl_kind, ip_realwp_p + + !------------------- + ! Initialize PSMILe. + !------------------- + + ! Initialise MPI + mpiflag = .FALSE. + call MPI_Initialized (mpiflag, ierror) + print *,'CICE: (prism_init) BF MPI_INIT, mpiflag = ',mpiflag + + if ( .not. mpiflag ) then + call MPI_INIT(ierror) + endif + + call MPI_Initialized (mpiflag, ierror) + print *, 'CICE: (prism_init) AF MPI_INIT, mpiflag = ',mpiflag + + print * + print *, 'CICE: (prism_init) calling prism_init_comp_proto...' + + call prism_init_comp_proto (il_comp_id, cp_modnam, ierror) + + if (ierror /= PRISM_Ok) then + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 1') + else + print *, 'CICE: (prism_init) called prism_init_comp_proto !' + endif + + !B: the following part may not be really needed(?) + ! + ! Let's suppose the model attaches to a MPI buffer for its own use + ! + ! ! Sophisticated way to determine buffer size needed (without "kind") + ! ! Here one message containing rla_array + + integer_byte_size = BIT_SIZE(ii)/8 + inquire (iolength=io_size) ii + integer_io_size = io_size + inquire (iolength=io_size) rla_array(1,1) + il_real = io_size/integer_io_size*integer_byte_size + il_bufsize = il_imjm + MPI_BSEND_OVERHEAD/il_real + 1 + allocate (rla_bufsend(il_bufsize), stat = ierror) + il_bufsizebyt = il_bufsize * il_real + call MPI_Buffer_Attach(rla_bufsend, il_bufsizebyt, ierror) + + if (ierror /= PRISM_Ok) then + print *, 'CICE: (prism_init) Error in MPI_Buffer_Attach.' + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 2') + else + print *, 'CICE: (prism_init) MPI_Buffer_Attach ok!' + endif + ! + ! PSMILe attribution of local communicator. + ! + ! Either MPI_COMM_WORLD if MPI2 is used, + ! or a local communicator created by Oasis if MPI1 is used. + ! + call prism_get_localcomm_proto(il_commlocal, ierror) + ! + if (ierror /= PRISM_Ok) then + print *, 'CICE: Error in prism_get_localcomm_proto' + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 3') + else + print *, 'CICE: _get_localcomm_ OK! il_commlocal= ',il_commlocal + endif + + ! + ! Inquire if model is parallel or not and open the process log file + ! + ! print *, '* CICE: Entering init_cpl.....' + + print *, '* CICE (prism_init) calling MPI_Comm_Size ...' + call MPI_Comm_Size(il_commlocal, il_nbtotproc, ierror) + print *, '* CICE (prism_init) calling MPI_Comm_Rank ...' + call MPI_Comm_Rank(il_commlocal, my_task, ierror) + + print *, '* CICE (prism_init) il_commlocal, il_nbtotproc, my_task = ' + print *, '* CICE (prism_init) ', il_commlocal, il_nbtotproc, my_task + ! + il_nbcplproc = il_nbtotproc !multi-process coupling (real parallel cpl)! + !il_nbcplproc = 1 !mono process coupling + + if (il_nbtotproc /= 1 .and. il_nbcplproc == il_nbtotproc ) then + ll_comparal = .TRUE. ! multi-cpl coupling! + else + ll_comparal = .FALSE. !mono-cpl coupling! + endif + + print *, '* CICE: prism_init called OK!' + + end subroutine prism_init + +!======================================================================= + subroutine init_cpl + + use mpi + use ice_communicate +!--------------------! + integer(kind=int_kind) :: jf, jfs + integer(kind=int_kind), dimension(2) :: il_var_nodims ! see below + integer(kind=int_kind), dimension(4) :: il_var_shape ! see below + + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j, n + type (block) :: this_block ! block information for current block + + integer, dimension(2) :: starts,sizes,subsizes + integer(kind=mpi_address_kind) :: start, extent +! integer, dimension(:), allocatable :: counts, disps + real(kind=dbl_kind) :: realvalue + integer (int_kind) :: nprocs + integer (int_kind),dimension(:), allocatable :: vilo, vjlo + + nprocs = get_num_procs() + allocate(vilo(nprocs)) + allocate(vjlo(nprocs)) +!initialise partition to inexisting region + l_ilo=nx_global + l_ihi=0 + l_jlo=ny_global + l_jhi=0 + gh_ilo=nx_global + gh_ihi=0 + gh_jlo=ny_global + gh_jhi=0 + ! Open the process log file +!20100406 if (my_task == 0 .or. ll_comparal) then + il_out = 85 + my_task + write(chout,'(I3.3)')il_out + chiceout='iceout'//chout + open(il_out,file=chiceout,form='formatted') + + write(il_out,*) 'Number of processes:', il_nbtotproc + write(il_out,*) 'Local process number:', my_task + write(il_out,*) 'Local communicator is : ',il_commlocal + write(il_out,*) 'Grid layout: nx_global,ny_global= ',nx_global,ny_global + write(il_out,*) 'Grid decomposition: nx_block,ny_block,max_blocks= ',& + nx_block,ny_block,max_blocks +!20100406 endif + +! write(il_out,*) 'Number of blocks :', nblocks +! do iblk = 1, nblocks +! +! this_block = get_block(blocks_ice(iblk),iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +!! do j=this_block%jlo,this_block%jhi +!! do i=this_block%ilo,this_block%ihi +!! ARRAY_G(this_block%i_glob(i), & +!! this_block%j_glob(j)) = & +!! ARRAY(i,j,src_dist%blockLocalID(n)) +! +! write(il_out,*) ' block:', iblk, "ilo, jlo, ihi, jhi=", ilo, jlo, ihi, jhi +! +! end do +!find out partition of this processor, which is done by init_domain_blocks + write(il_out,*) 'nblocks_x, nblocks_y, Number of tot blocks :', nblocks_x, nblocks_y, nblocks_tot +!!!!!!!!!!!! +! do iblk=1,nblocks_tot +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! !print local to global mapping +! write(il_out,*) 'block, local ilo ihi jlo jhi:', distrb_info%blockLocalID(iblk), ilo,ihi,jlo,jhi +! write(il_out,*) 'block global:', this_block%i_glob(ilo),this_block%i_glob(ihi), & +! this_block%j_glob(jlo),this_block%j_glob(jhi) +! endif +! end do + +!!debug + if (my_task == 0) then + write(il_out,*) "all block info:" + do iblk=1,nblocks_tot + this_block = get_block(iblk,iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + write(il_out,*) ' this block: cpu, iblock, jblock=', distrb_info%blockLocation(iblk)-1, this_block%iblock, this_block%jblock + write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) + end do + end if + + do iblk=1,nblocks_tot + + if (distrb_info%blockLocation(iblk) == my_task+1) then + + this_block = get_block(iblk,iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + write(il_out,*) ' this block: iblock, jblock=', this_block%iblock, this_block%jblock +! write(il_out,*) ' block:', iblk, "ilo, jlo, ihi, jhi=", ilo, jlo, ihi, jhi + write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) + if (this_block%i_glob(ilo) < l_ilo) then + l_ilo = this_block%i_glob(ilo) + gh_ilo = this_block%i_glob(ilo-nghost) + endif + if (this_block%j_glob(jlo) < l_jlo) then + l_jlo = this_block%j_glob(jlo) + gh_jlo = this_block%j_glob(jlo-nghost) + endif + if (this_block%i_glob(ihi) > l_ihi) then + l_ihi = this_block%i_glob(ihi) + gh_ihi = this_block%i_glob(ihi+nghost) + endif + if (this_block%j_glob(jhi) > l_jhi) then + l_jhi = this_block%j_glob(jhi) + gh_jhi = this_block%j_glob(jhi+nghost) + endif +! l_ilo = min(l_ilo, this_block%i_glob(ilo)) +! l_ihi = max(l_ihi, this_block%i_glob(ihi)) +! l_jlo = min(l_jlo, this_block%j_glob(jlo)) +! l_jhi = max(l_jhi, this_block%j_glob(jhi)) +! else if (distrb_info%blockLocation(n) == 0) then +! write(il_out,*) ' land block:', n + + endif + end do + write(il_out,*) ' local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi + write(il_out,*) ' partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 +!print ghost info + write(il_out,*) ' ghost global:',gh_ilo, gh_ihi, gh_jlo, gh_jhi + +!calculate partition using nprocsX and nprocsX + l_ilo=mod(my_task,nprocsX)*nx_global/nprocsX+1 + l_ihi=l_ilo + nx_global/nprocsX -1 + l_jlo=int(my_task/nprocsX) * ny_global/nprocsY+1 + l_jhi=l_jlo+ny_global/nprocsY - 1 + + write(il_out,*) ' 2local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi + write(il_out,*) ' 2partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 + + call mpi_gather(l_ilo, 1, mpi_integer, vilo, 1, mpi_integer, 0, MPI_COMM_ICE, ierror) + call broadcast_array(vilo, 0) + call mpi_gather(l_jlo, 1, mpi_integer, vjlo, 1, mpi_integer, 0, MPI_COMM_ICE, ierror) + call broadcast_array(vjlo, 0) + +!create subarray of this rank + sizes(1)=l_ihi-l_ilo+1; sizes(2)=l_jhi-l_jlo+1 + subsizes(1)=l_ihi-l_ilo+1; subsizes(2)=l_jhi-l_jlo+1 + starts(1)=0; starts(2)=0 + call mpi_type_create_subarray(2, sizes, subsizes, starts, mpi_order_fortran, & + MPI_REAL8, sendsubarray, ierror) + call mpi_type_commit(sendsubarray,ierror) + if (my_task == 0) then ! create recv buffer in main cpu + sizes(1)=nx_global; sizes(2)=ny_global + subsizes(1)=l_ihi-l_ilo+1; subsizes(2)=l_jhi-l_jlo+1 + starts(1)=0; starts(2)=0 + call mpi_type_create_subarray(2, sizes, subsizes, starts, mpi_order_fortran, & + MPI_REAL8, recvsubarray, ierror) + call mpi_type_commit(recvsubarray, ierror) + extent = sizeof(realvalue) + start = 0 + call mpi_type_create_resized(recvsubarray, start, extent, resizedrecvsubarray, ierror) + call mpi_type_commit(resizedrecvsubarray,ierror) + end if + allocate(counts(nprocs),disps(nprocs)) + forall (n=1:nprocs) counts(n) = 1 + do n=1, nprocs + disps(n) = ((vjlo(n)-1)*nx_global + (vilo(n)-1)) + !disps(n) = ((vilo(n)-1)*ny_global + (vjlo(n)-1)) + end do + + write(il_out,*) ' vilo ', vilo + write(il_out,*) ' vjlo ', vjlo + write(il_out,*) ' counts ', counts + write(il_out,*) ' disps ', disps + +! if ( ll_comparal ) then +! il_im = l_ihi-l_ilo+1 !nx_global +! il_jm = l_jhi-l_jlo+1 !ny_global +! il_imjm = il_im * il_jm +! endif + if (ll_comparal) then + xdim=l_ihi-l_ilo+1 + ydim=l_jhi-l_jlo+1 + else + xdim=il_im + ydim=il_jm + endif + + +!----------------------------------------------------------------------- + if (my_task == 0 .or. ll_comparal) then + ! + ! The following steps need to be done: + ! -> by the process if cice is monoprocess; + ! -> only by the master process, if cice is parallel and only + ! master process is involved in the coupling; + ! -> by all processes, if cice is parallel and all processes + ! are involved in the coupling. + + call decomp_def (il_part_id, il_length, il_imjm, & + my_task, il_nbcplproc, ll_comparal, il_out) + + write(il_out,*)'(init_cpl) called decomp_def, my_task, ierror = ',my_task, ierror + + ! + ! PSMILe coupling fields declaration + ! + + il_var_nodims(1) = 2 ! rank of coupling field + il_var_nodims(2) = 1 ! number of bundles in coupling field (always 1) + !il_var_shape(1)= 1 ! min index for the coupling field local dim + !il_var_shape(2)= xdim !il_im ! max index for the coupling field local dim + !il_var_shape(3)= 1 + !il_var_shape(4)= ydim !il_jm + if (ll_comparal) then + il_var_shape(1)= 1 !l_ilo ! min index for the coupling field local dimension + il_var_shape(2)= l_ihi-l_ilo+1 ! max index for the coupling field local dim + il_var_shape(3)= 1 !l_jlo ! min index for the coupling field local dim + il_var_shape(4)= l_jhi-l_jlo+1 ! max index for the coupling field local dim + else + il_var_shape(1)= 1 ! min index for the coupling field local dimension + il_var_shape(2)= il_im ! max index for the coupling field local dim + il_var_shape(3)= 1 ! min index for the coupling field local dim + il_var_shape(4)= il_jm ! max index for the coupling field local dim + endif + + ! ?Does this help? + !il_var_shape(1)= 2 ! min index for the coupling field local dim + !il_var_shape(2)= il_im+1 ! max index for the coupling field local dim + !il_var_shape(3)= 2 + !il_var_shape(4)= il_jm+1 + + endif !my_task==0 .or. ll_comparal + + !*** ***! + !***B: we now define cl_writ/cl_read on all ranks! (20090403) ***! + !*** ***! + + ! + ! Define name (as in namcouple) and declare each field sent by ice + ! + + ! + ! ice ==> atm + ! + nsend_i2a = 0 + + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='isst_ia' + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'icecon',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'snwthk',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'icethk',jf + enddo + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='uvel_ia' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='vvel_ia' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='co2_i2' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='co2fx_i2' + ! new fields sending to UM GA7 + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='sstfz_ia' + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'foifr',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'itopt',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'itopk',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'pndfn',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'pndtn',jf + enddo + + if (my_task == 0) then + write(il_out,*) 'init_cpl: Number of fields sent to atm: ',nsend_i2a + endif + ! + ! ice ==> ocn + ! + nsend_i2o = nsend_i2a + + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='strsu_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='strsv_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='rain_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='snow_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='stflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='htflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='swflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='qflux_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='shflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='lwflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='runof_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='press_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='aice_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='melt_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='form_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='co2_i1' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='wnd_i1' + + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) 'init_cpl: Number of fields sent to ocn: ',nsend_i2o - nsend_i2a + + if (nsend_i2o /= jpfldout) then + write(il_out,*) + write(il_out,*)'!!! Fatal Error: (init_cpl) nsend = ',nsend_i2o + write(il_out,*)'!!! It should be nsend = ',jpfldout + call abort_ice('CICE: Number of outgoing coupling fields incorrect!') + endif + + write(il_out,*) 'init_cpl: Total number of fields sent from ice: ',jpfldout + + !jpfldout == nsend_i2o! + !---------------------! + + do jf=1, jpfldout + call prism_def_var_proto (il_var_id_out(jf),cl_writ(jf), il_part_id, & + il_var_nodims, PRISM_Out, il_var_shape, PRISM_Real, ierror) + enddo + + endif + + ! + ! Define name (as in namcouple) and declare each field received by ice + ! + + ! + ! atm ==> ice + ! + nrecv_a2i = 0 + + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'thflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'pswflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'runoff_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'wme_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'rain_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'snow_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'evap_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'lhflx_i' + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a4,i2.2,a2)')'tmlt',jf,'_i' + enddo + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a4,i2.2,a2)')'bmlt',jf,'_i' + enddo + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'taux_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'tauy_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'swflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'lwflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'shflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'press_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'co2_ai' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'wnd_ai' + ! new fields recving from UM GA7 + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'icenth_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'icesth_i' + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a6,i2.2)')'tsfice',jf + enddo + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a6,i2.2)')'iceevp',jf + enddo + + if (my_task==0 .or. ll_comparal) then + write(il_out,*) 'init_cpl: Number of fields rcvd from atm: ',nrecv_a2i + endif + + ! + ! ocn ==> ice + ! + nrecv_o2i = nrecv_a2i + + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sst_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sss_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssu_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssv_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sslx_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssly_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'pfmice_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'co2_oi' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'co2fx_oi' + + if (my_task==0 .or. ll_comparal) then + + write(il_out,*) 'init_cpl: Number of fields rcvd from ocn: ',nrecv_o2i-nrecv_a2i + + if (nrecv_o2i /= jpfldin) then + write(il_out,*) + write(il_out,*)'!!! Fatal Error: (init_cpl) nrecv = ',nrecv_o2i + write(il_out,*)'!!! It should be nrecv = ',jpfldin + call abort_ice('CICE: Number of incoming coupling fields incorrect!') + endif + !jpfldin == nrecv_o2i! + !--------------------! + + write(il_out,*) 'init_cpl: Total number of fields rcvd by ice: ',jpfldin + + do jf=1, jpfldin + call prism_def_var_proto (il_var_id_in(jf), cl_read(jf), il_part_id, & + il_var_nodims, PRISM_In, il_var_shape, PRISM_Real, ierror) + enddo + + ! + ! PSMILe end of declaration phase + ! + call prism_enddef_proto (ierror) + + endif !my_task==0 + + ! + ! Allocate the 'coupling' fields (to be used) for EACH PROCESS:! + ! + + ! fields in: (local domain) + ! + ! from atm: + allocate (um_thflx(nx_block,ny_block,max_blocks)); um_thflx(:,:,:) = 0 + allocate (um_pswflx(nx_block,ny_block,max_blocks)); um_pswflx(:,:,:) = 0 + allocate (um_runoff(nx_block,ny_block,max_blocks)); um_runoff(:,:,:) = 0 + allocate (um_wme(nx_block,ny_block,max_blocks)); um_wme(:,:,:) = 0 + allocate (um_snow(nx_block,ny_block,max_blocks)); um_snow(:,:,:) = 0 + allocate (um_rain(nx_block,ny_block,max_blocks)); um_rain(:,:,:) = 0 + allocate (um_evap(nx_block,ny_block,max_blocks)); um_evap(:,:,:) = 0 + allocate (um_lhflx(nx_block,ny_block,max_blocks)); um_lhflx(:,:,:) = 0 + allocate (um_taux(nx_block,ny_block,max_blocks)); um_taux(:,:,:) = 0 + allocate (um_tauy(nx_block,ny_block,max_blocks)); um_tauy(:,:,:) = 0 + allocate (um_swflx(nx_block,ny_block,max_blocks)); um_swflx(:,:,:) = 0 + allocate (um_lwflx(nx_block,ny_block,max_blocks)); um_lwflx(:,:,:) = 0 + allocate (um_shflx(nx_block,ny_block,max_blocks)); um_shflx(:,:,:) = 0 + allocate (um_press(nx_block,ny_block,max_blocks)); um_press(:,:,:) = 0 + allocate (um_tmlt(nx_block,ny_block,ncat,max_blocks)); um_tmlt(:,:,:,:) = 0 + allocate (um_bmlt(nx_block,ny_block,ncat,max_blocks)); um_bmlt(:,:,:,:) = 0 + allocate (um_co2(nx_block,ny_block,max_blocks)); um_co2(:,:,:) = 0 + allocate (um_wnd(nx_block,ny_block,max_blocks)); um_wnd(:,:,:) = 0 + allocate (um_icenth(nx_block,ny_block,max_blocks)); um_icenth(:,:,:) = 0 + allocate (um_icesth(nx_block,ny_block,max_blocks)); um_icesth(:,:,:) = 0 + allocate (um_tsfice(nx_block,ny_block,ncat,max_blocks)); um_tsfice(:,:,:,:) = 0 + allocate (um_iceevp(nx_block,ny_block,ncat,max_blocks)); um_iceevp(:,:,:,:) = 0 + + ! + allocate ( core_runoff(nx_block,ny_block,max_blocks)); core_runoff(:,:,:) = 0. + ! + + ! from ocn: + allocate (ocn_sst(nx_block,ny_block,max_blocks)); ocn_sst(:,:,:) = 0 + allocate (ocn_sss(nx_block,ny_block,max_blocks)); ocn_sss(:,:,:) = 0 + allocate (ocn_ssu(nx_block,ny_block,max_blocks)); ocn_ssu(:,:,:) = 0 + allocate (ocn_ssv(nx_block,ny_block,max_blocks)); ocn_ssv(:,:,:) = 0 + allocate (ocn_sslx(nx_block,ny_block,max_blocks)); ocn_sslx(:,:,:) = 0 + allocate (ocn_ssly(nx_block,ny_block,max_blocks)); ocn_ssly(:,:,:) = 0 + allocate (ocn_pfmice(nx_block,ny_block,max_blocks)); ocn_pfmice(:,:,:) = 0 + allocate (ocn_co2(nx_block,ny_block,max_blocks)); ocn_co2(:,:,:) = 0 + allocate (ocn_co2fx(nx_block,ny_block,max_blocks)); ocn_co2fx(:,:,:) = 0 + + ! fields out: (local domain) + ! + ! to atm: + allocate (ia_sst(nx_block,ny_block,max_blocks)); ia_sst(:,:,:) = 0 + allocate (ia_uvel(nx_block,ny_block,max_blocks)); ia_uvel(:,:,:) = 0 + allocate (ia_vvel(nx_block,ny_block,max_blocks)); ia_vvel(:,:,:) = 0 + allocate (ia_aicen(nx_block,ny_block,ncat,max_blocks)); ia_aicen(:,:,:,:) = 0 + allocate (ia_snown(nx_block,ny_block,ncat,max_blocks)); ia_snown(:,:,:,:) = 0 + allocate (ia_thikn(nx_block,ny_block,ncat,max_blocks)); ia_thikn(:,:,:,:) = 0 + allocate (ia_co2(nx_block,ny_block,max_blocks)); ia_co2(:,:,:) = 0 + allocate (ia_co2fx(nx_block,ny_block,max_blocks)); ia_co2fx(:,:,:) = 0 + allocate (ia_sstfz(nx_block,ny_block,max_blocks)); ia_sstfz(:,:,:) = 0 + allocate (ia_foifr(nx_block,ny_block,ncat,max_blocks)); ia_foifr(:,:,:,:) = 0 + allocate (ia_itopt(nx_block,ny_block,ncat,max_blocks)); ia_itopt(:,:,:,:) = 0 + allocate (ia_itopk(nx_block,ny_block,ncat,max_blocks)); ia_itopk(:,:,:,:) = 0 + allocate (ia_pndfn(nx_block,ny_block,ncat,max_blocks)); ia_pndfn(:,:,:,:) = 0 + allocate (ia_pndtn(nx_block,ny_block,ncat,max_blocks)); ia_pndtn(:,:,:,:) = 0 + ! + ! to ocn: + allocate (io_strsu(nx_block,ny_block,max_blocks)); io_strsu(:,:,:) = 0 + allocate (io_strsv(nx_block,ny_block,max_blocks)); io_strsv(:,:,:) = 0 + allocate (io_rain (nx_block,ny_block,max_blocks)); io_rain (:,:,:) = 0 + allocate (io_snow (nx_block,ny_block,max_blocks)); io_snow (:,:,:) = 0 + allocate (io_stflx(nx_block,ny_block,max_blocks)); io_stflx(:,:,:) = 0 + allocate (io_htflx(nx_block,ny_block,max_blocks)); io_htflx(:,:,:) = 0 + allocate (io_swflx(nx_block,ny_block,max_blocks)); io_swflx(:,:,:) = 0 + allocate (io_qflux(nx_block,ny_block,max_blocks)); io_qflux(:,:,:) = 0 + allocate (io_lwflx(nx_block,ny_block,max_blocks)); io_lwflx(:,:,:) = 0 + allocate (io_shflx(nx_block,ny_block,max_blocks)); io_shflx(:,:,:) = 0 + allocate (io_runof(nx_block,ny_block,max_blocks)); io_runof(:,:,:) = 0 + allocate (io_press(nx_block,ny_block,max_blocks)); io_press(:,:,:) = 0 + allocate (io_aice(nx_block,ny_block,max_blocks)); io_aice(:,:,:) = 0 + allocate (io_melt(nx_block,ny_block,max_blocks)); io_melt(:,:,:) = 0 + allocate (io_form(nx_block,ny_block,max_blocks)); io_form(:,:,:) = 0 + allocate (io_co2(nx_block,ny_block,max_blocks)); io_co2(:,:,:) = 0 + allocate (io_wnd(nx_block,ny_block,max_blocks)); io_wnd(:,:,:) = 0 + + ! temporary arrays: + ! IO cpl int time-average + allocate (maice(nx_block,ny_block,max_blocks)); maice(:,:,:) = 0 + allocate (mstrocnxT(nx_block,ny_block,max_blocks)); mstrocnxT(:,:,:) = 0 + allocate (mstrocnyT(nx_block,ny_block,max_blocks)); mstrocnyT(:,:,:) = 0 + allocate (mfresh(nx_block,ny_block,max_blocks)); mfresh(:,:,:) = 0 + allocate (mfsalt(nx_block,ny_block,max_blocks)); mfsalt(:,:,:) = 0 + allocate (mfhocn(nx_block,ny_block,max_blocks)); mfhocn(:,:,:) = 0 + allocate (mfswthru(nx_block,ny_block,max_blocks)); mfswthru(:,:,:) = 0 + allocate (msicemass(nx_block,ny_block,max_blocks)); msicemass(:,:,:) = 0 + ! IA cpl int time-average (3D) + allocate (maiu(nx_block,ny_block,max_blocks)); maiu(:,:,:) = 0 + allocate (muvel(nx_block,ny_block,max_blocks)); muvel(:,:,:) = 0 + allocate (mvvel(nx_block,ny_block,max_blocks)); mvvel(:,:,:) = 0 + allocate (msst(nx_block,ny_block,max_blocks)); msst(:,:,:) = 0 + allocate (mssu(nx_block,ny_block,max_blocks)); mssu(:,:,:) = 0 + allocate (mssv(nx_block,ny_block,max_blocks)); mssv(:,:,:) = 0 + allocate (mco2(nx_block,ny_block,max_blocks)); mco2(:,:,:) = 0 + allocate (mco2fx(nx_block,ny_block,max_blocks)); mco2fx(:,:,:) = 0 + allocate (msstfz(nx_block,ny_block,max_blocks)); msstfz(:,:,:) = 0 + ! IA cpl int time-average (4D) + allocate (maicen(nx_block,ny_block,ncat,max_blocks)); maicen(:,:,:,:) = 0 + allocate (msnown(nx_block,ny_block,ncat,max_blocks)); msnown(:,:,:,:) = 0 + allocate (mthikn(nx_block,ny_block,ncat,max_blocks)); mthikn(:,:,:,:) = 0 + allocate (mfoifr(nx_block,ny_block,ncat,max_blocks)); mfoifr(:,:,:,:) = 0 + allocate (mitopt(nx_block,ny_block,ncat,max_blocks)); mitopt(:,:,:,:) = 0 + allocate (mitopk(nx_block,ny_block,ncat,max_blocks)); mitopk(:,:,:,:) = 0 + allocate (mpndfn(nx_block,ny_block,ncat,max_blocks)); mpndfn(:,:,:,:) = 0 + allocate (mpndtn(nx_block,ny_block,ncat,max_blocks)); mpndtn(:,:,:,:) = 0 +!BX: + allocate (maicen_saved(nx_block,ny_block,ncat,max_blocks)); maicen_saved(:,:,:,:) = 0 + + allocate (vwork(nx_block,ny_block,max_blocks)); vwork(:,:,:) = 0 + allocate (gwork(nx_global,ny_global)); gwork(:,:) = 0 + allocate (sicemass(nx_block,ny_block,max_blocks)); sicemass(:,:,:) = 0. + allocate (vwork2d(l_ilo:l_ihi, l_jlo:l_jhi)); vwork2d(:,:) = 0. !l_ihi-l_ilo+1, l_jhi-l_jlo+1 + end subroutine init_cpl + +!======================================================================= + subroutine from_atm(isteps) +!----------------------------! + + implicit none + + integer(kind=int_kind), intent(in) :: isteps + + real(kind=dbl_kind) :: tmpu, tmpv + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ncid,currstep,ll,ilout + + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(from_atm) receiving coupling fields at rtime= ', isteps + if (chk_a2i_fields) then + if ( .not. file_exist('fields_a2i_in_ice.nc') ) then + call create_ncfile('fields_a2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_a2i_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_a2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + write(il_out,*) + write(il_out,*) '(from_atm) Total number of fields to be rcvd: ', nrecv_a2i + endif + + write(il_out,*) "prism_get from_atm at sec: ", isteps + do jf = 1, nrecv_a2i + + if (my_task==0 .or. ll_comparal ) then + + !jf-th field in + write(il_out,*) + write(il_out,*) '*** receiving coupling field No. ', jf, cl_read(jf) + !call flush(il_out) + + if (ll_comparal) then + call prism_get_proto (il_var_id_in(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_get_proto (il_var_id_in(jf), isteps, gwork, ierror) + endif + + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Recvd) then + write(il_out,*) 'Err in _get_ sst at time with error: ', isteps, ierror + call prism_abort_proto(il_comp_id, 'cice from_atm','stop 1') + else + write(il_out,*) + write(il_out,*)'(from_atm) rcvd at time with err: ',cl_read(jf),isteps,ierror + + if (ll_comparal .and. chk_a2i_fields) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps, resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_a2i_fields) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif + + if (.not. ll_comparal ) then + call scatter_global(vwork,gwork,master_task,distrb_info, & + field_loc_center, field_type_scalar) +! else +! call unpack_global_dbl(vwork,gwork,master_task,distrb_info, & +! field_loc_center, field_type_scalar) + endif ! not ll_comparal + +#if (MXBLCKS != 1) +#error The following code assumes that max_blocks == 1 +#endif + + !***Note following "select case" works only if cl_read(:) is defined at ALL ranks***! + !-----------------------------------------------------------------------------------! + select case (trim(cl_read(jf))) + case ('thflx_i'); + um_thflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1)=vwork2d(:,:) + case ('pswflx_i'); + um_pswflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) =vwork2d(:,:) + case ('runoff_i'); + um_runoff(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) =vwork2d(:,:) + case ('wme_i'); + um_wme(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) +! case ('rain_i'); um_rain(:,:,:) = vwork(:,:,:) +! case ('snow_i'); um_snow(:,:,:) = vwork(:,:,:) +!---20100825 -- just be cauious: ------------------------- + case ('rain_i'); + um_rain(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) =max(0.0,vwork2d(:,:)) + case ('snow_i'); + um_snow(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) =max(0.0,vwork2d(:,:)) +!--------------------------------------------------------- + case ('evap_i');um_evap(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('lhflx_i');um_lhflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('tmlt01_i');um_tmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1,1) = vwork2d(:,:) + case ('tmlt02_i');um_tmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,2,1) = vwork2d(:,:) + case ('tmlt03_i');um_tmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,3,1) = vwork2d(:,:) + case ('tmlt04_i');um_tmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,4,1) = vwork2d(:,:) + case ('tmlt05_i');um_tmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,5,1) = vwork2d(:,:) + case ('bmlt01_i');um_bmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1,1) = vwork2d(:,:) + case ('bmlt02_i');um_bmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,2,1) = vwork2d(:,:) + case ('bmlt03_i');um_bmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,3,1) = vwork2d(:,:) + case ('bmlt04_i');um_bmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,4,1) = vwork2d(:,:) + case ('bmlt05_i');um_bmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,5,1) = vwork2d(:,:) + case ('taux_i');um_taux(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('tauy_i');um_tauy(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('swflx_i');um_swflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('lwflx_i');um_lwflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('shflx_i');um_shflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('press_i');um_press(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('co2_ai');um_co2(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('wnd_ai');um_wnd(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('icenth_i');um_icenth(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('icesth_i');um_icesth(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('tsfice01');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1,1) = vwork2d(:,:) + case ('tsfice02');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,2,1) = vwork2d(:,:) + case ('tsfice03');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,3,1) = vwork2d(:,:) + case ('tsfice04');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,4,1) = vwork2d(:,:) + case ('tsfice05');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,5,1) = vwork2d(:,:) + case ('iceevp01');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1,1) = vwork2d(:,:) + case ('iceevp02');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,2,1) = vwork2d(:,:) + case ('iceevp03');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,3,1) = vwork2d(:,:) + case ('iceevp04');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,4,1) = vwork2d(:,:) + case ('iceevp05');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,5,1) = vwork2d(:,:) + end select + + if (my_task == 0 .or. ll_comparal) then + write(il_out,*) + write(il_out,*)'(from_atm) done: ', jf, trim(cl_read(jf)) + endif + + enddo + !BX: 20160623...... avoid initial "remap transport: bad departure points" (e.g.@(332,776))? + if (isteps == 0) then + um_taux = um_taux * 0.1 + um_tauy = um_tauy * 0.1 + endif + !BX. + + call ice_HaloUpdate(um_thflx, halo_info,field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_pswflx, halo_info,field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_runoff, halo_info,field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_wme, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(um_rain, halo_info, field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_snow, halo_info, field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_evap, halo_info, field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_lhflx, halo_info,field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_tmlt, halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_bmlt, halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_taux, halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_tauy, halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_swflx, halo_info,field_loc_center,field_type_vector) + call ice_HaloUpdate(um_lwflx, halo_info,field_loc_center,field_type_vector) + call ice_HaloUpdate(um_shflx, halo_info,field_loc_center,field_type_vector) + call ice_HaloUpdate(um_press, halo_info,field_loc_center,field_type_vector) + call ice_HaloUpdate(um_co2, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(um_wnd, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(um_icenth,halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_icesth,halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_tsfice,halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_iceevp,halo_info, field_loc_center,field_type_vector) + + IF (rotate_winds) THEN !rotate_winds=.t. means oasis does not do the vector rotation. + + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + tmpu = um_taux(i,j,iblk) ! on geographical coord. (T cell) + tmpv = um_tauy(i,j,iblk) + um_taux(i,j,iblk) = tmpu*cos(ANGLET(i,j,iblk)) & ! converted onto model curvelear + + tmpv*sin(ANGLET(i,j,iblk)) ! coord. (T cell) + um_tauy(i,j,iblk) = tmpv*cos(ANGLET(i,j,iblk)) & ! + - tmpu*sin(ANGLET(i,j,iblk)) + enddo + enddo + + enddo + + ENDIF !rotate_winds + + ! need do t-grid to u-grid shift for vectors since all coupling occur on + ! t-grid points: <==No! actually CICE requires the input wind on T grid! + ! (see comment in code ice_flux.F) + !call t2ugrid(uwnd1) + !call t2ugrid(vwnd1) + + !------------------------------- + !if ( chk_a2i_fields ) then + ! call check_a2i_fields(isteps) + !endif + !------------------------------- + + if ( chk_a2i_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine from_atm + +!======================================================================= + subroutine from_ocn(isteps) +!----------------------------! + + integer(kind=int_kind), intent(in) :: isteps + + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) '(from_ocn) receiving coupling fields at rtime: ', isteps + if (chk_o2i_fields) then + if ( .not. file_exist('fields_o2i_in_ice.nc') ) then + call create_ncfile('fields_o2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_o2i_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_o2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + write(il_out,*) "prism_get from_ocn at sec: ", isteps + do jf = nrecv_a2i + 1, jpfldin + + if (my_task==0 .or. ll_comparal) then + + !jf-th field in + write(il_out,*) + write(il_out,*) '*** receiving coupling fields No. ', jf, cl_read(jf) + if(ll_comparal) then + call prism_get_proto (il_var_id_in(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_get_proto (il_var_id_in(jf), isteps, gwork, ierror) + endif + + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Recvd) then + write(il_out,*) 'Err in _get_ sst at time with error: ', isteps, ierror + call prism_abort_proto(il_comp_id, 'cice from_ocn','stop 1') + else + write(il_out,*) + write(il_out,*)'(from_ocn) rcvd at time with err: ',cl_read(jf),isteps,ierror + if(ll_comparal .and. chk_o2i_fields) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_o2i_fields) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif + + if (.not. ll_comparal) then + call scatter_global(vwork, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) +! else +! call unpack_global_dbl(vwork, gwork, master_task, distrb_info, & +! field_loc_center, field_type_scalar) + endif + + !Q: 'field_type_scalar' all right for 'vector' (ssu/ssv, sslx/ssly))?! + select case (trim(cl_read(jf))) + case ('sst_i'); + ocn_sst(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('sss_i'); + ocn_sss(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('ssu_i'); + ocn_ssu(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('ssv_i'); + ocn_ssv(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('sslx_i'); + ocn_sslx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('ssly_i'); + ocn_ssly(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('pfmice_i'); + ocn_pfmice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost, 1) =vwork2d + case ('co2_oi'); + ocn_co2(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('co2fx_oi'); + ocn_co2fx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost, 1) =vwork2d + end select + + enddo + + call ice_HaloUpdate(ocn_sst, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ocn_sss, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ocn_ssu, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(ocn_ssv, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(ocn_sslx, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(ocn_ssly, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(ocn_pfmice, halo_info,field_loc_center,field_type_scalar) + call ice_HaloUpdate(ocn_co2, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ocn_co2fx, halo_info, field_loc_center,field_type_scalar) + + !------------------------------- + !if (chk_o2i_fields) then + ! call check_o2i_fields(isteps) + !endif + !------------------------------- + + !------------------------------------------------------------------------------- + !B: calculate freezing point here (before "time_average_ocn_fields_4_i2a") ! + !----- should use eos formula to calculate Tf for "consistency" with GCx ----! + Tf (:,:,:) = -depressT*ocn_sss(:,:,:) ! (deg C) + ! + !May use different formula for Tf such as TEOS-10 formulation: + ! + !r1_S0 = 0.875/35.16504 + !zzs(:,:,:) = sqrt(abs(ocn_sss(:,:,:)) * r1_S0) + !Tf(:,:,:) = ((((1.46873e-03 * zzs(:,:,:) - 9.64972e-03) * zzs(:,:,:) + & + ! 2.28348e-02) * zzs(:,:,:) - 3.12775e-02) * zzs(:,:,:) + & + ! 2.07679e-02) * zzs(:,:,:) - 5.87701e-02 + !Tf(:,:,:) = Tf(:,:,:) * sss(:,:,:) ! - 7.53e-4 * 5.0 !!!5.0 is depth in meters + !--------------------------------------------------------------------------------- + + if ( chk_o2i_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine from_ocn + +!======================================================================= + subroutine into_ocn(isteps) +!-----------------------------------! + + implicit none + + integer(kind=int_kind), intent(in) :: isteps + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(into_ocn) sending coupling fields at stime= ', isteps + if (chk_i2o_fields) then + if ( .not. file_exist('fields_i2o_in_ice.nc') ) then + call create_ncfile('fields_i2o_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_i2o_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_i2o_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + write(il_out,*) "prism_put into_ocn at sec: ", isteps + do jf = nsend_i2a + 1, jpfldout + +!CH: make sure the 'LIMITS' are to be released! + + select case(trim(cl_writ(jf))) +!20100531 for MYM's test (iostress_factor) ............. + case('strsu_io'); vwork = io_strsu * iostress_factor + case('strsv_io'); vwork = io_strsv * iostress_factor +!....................................................... + case('rain_io'); vwork = io_rain + case('snow_io'); vwork = io_snow + !case('stflx_io'); vwork = io_stflx + case('stflx_io') + if (limit_stflx) then + vwork = max(-5.e-6, min(io_stflx, 5.e-6)) + else + vwork = io_stflx + endif + !case('htflx_io'); vwork = io_htflx + !case('htflx_io'); vwork = max(io_htflx, -450.0) + !Jan2010: + case('htflx_io'); vwork = min(io_htflx,0.0) + case('swflx_io'); vwork = io_swflx + case('qflux_io'); vwork = io_qflux + case('shflx_io'); vwork = io_shflx + case('lwflx_io'); vwork = io_lwflx + case('runof_io') + if (use_core_runoff) then + vwork = core_runoff + else + vwork = io_runof + endif + case('press_io'); vwork = io_press + case('aice_io'); vwork = io_aice + case('melt_io'); vwork = io_melt + case('form_io'); vwork = io_form + case('co2_i1'); vwork = io_co2 + case('wnd_i1'); vwork = io_wnd + end select + + if(.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) + else + call pack_global_dbl(gwork, vwork, master_task, distrb_info) + vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) = gwork(l_ilo:l_ihi, l_jlo:l_jhi) +! do iblk=1,nblocks_tot +! +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! +! vwork2d(this_block%i_glob(ilo):this_block%i_glob(ihi), & +! this_block%j_glob(jlo):this_block%j_glob(jhi)) = & +! vwork(ilo:ihi,jlo:jhi,distrb_info%blockLocalID(iblk)) +! endif +! end do + + endif + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) + write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + if(ll_comparal) then + call prism_put_proto(il_var_id_out(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_put_proto(il_var_id_out(jf), isteps, gwork, ierror) + endif + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Sent) then + write(il_out,*) + write(il_out,*) '(into_ocn) Err in _put_ ', cl_writ(jf), isteps, ierror + call prism_abort_proto(il_comp_id, 'cice into_ocn','STOP 1') + else + write(il_out,*) + write(il_out,*)'(into_ocn) sent: ', cl_writ(jf), isteps, ierror + if(chk_i2o_fields .and. ll_comparal) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_i2o_fields) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif !my_task == 0 + + enddo !jf + + !-------------------------------------- + !if (chk_i2o_fields) then + ! call check_i2o_fields(isteps) + !endif + !-------------------------------------- + + if ( chk_i2o_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine into_ocn + +!======================================================================= + subroutine into_atm(isteps) +!----------------------------! + + integer(kind=int_kind), intent(in) :: isteps + integer(kind=int_kind) :: jf + + real(kind=dbl_kind) :: tmpu, tmpv + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + +!debug hxy599 +!if (isteps==runtime-3600) then +! chk_i2a_fields=.true. !save the last step +! currstep = 1 +! write (il_out,*) 'hxy599 debug: save i2a fields at time ', isteps +!end if + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(into_atm) sending coupling fields at stime= ', isteps + if (chk_i2a_fields) then + if ( .not. file_exist('fields_i2a_in_ice.nc') ) then + call create_ncfile('fields_i2a_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + else + write(il_out,*) 'opening file fields_i2a_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_i2a_in_ice.nc',nf_write,ncid) ) + end if + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + IF (rotate_winds) THEN + + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + !note note uvel/vvel are on the U-cell here. + tmpu = ia_uvel(i,j,iblk); tmpv = ia_vvel(i,j,iblk) ! ice/ocn velocity, m/s + ia_uvel(i,j,iblk) = tmpu*cos(ANGLE(i,j,iblk)) & ! remapped on to geographical + - tmpv*sin(ANGLE(i,j,iblk)) ! grid. + ia_vvel(i,j,iblk) = tmpv*cos(ANGLE(i,j,iblk)) & ! they also need be shifted + + tmpu*sin(ANGLE(i,j,iblk)) ! on to T-cell (below). + enddo + enddo + + enddo + + ENDIF !rotate_winds + + !shift ia_uvel/ia_vvel onto T points before passing into coupler + call u2tgrid_vector(ia_uvel) + call u2tgrid_vector(ia_vvel) + + !hxy599 debug + !call read_restart_i2a("i2a.nc", 0) + + write(il_out,*) "prism_put into_atm at sec: ", isteps + do jf = 1, nsend_i2a + + select case (trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,4,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + !20100305: test effect of ssuv on the tropical cooling biases (as per Harry Henden) + case('uvel_ia'); vwork = ia_uvel * ocn_ssuv_factor !note ice u/v are also + case('vvel_ia'); vwork = ia_vvel * ocn_ssuv_factor ! included here. + case('sstfz_ia'); vwork = ia_sstfz + case('co2_i2'); vwork = ia_co2 + case('co2fx_i2'); vwork = ia_co2fx + case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) + case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) + case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) + case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) + case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) + case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) + case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) + case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) + case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) + case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) + case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) + case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) + case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) + case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) + case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) + case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) + case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) + case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) + case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) + case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) + case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) + case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) + case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) + case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) + case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) + end select + + if (.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) + else + call pack_global_dbl(gwork, vwork, master_task, distrb_info) + vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) = gwork(l_ilo:l_ihi, l_jlo:l_jhi) +! do iblk=1,nblocks_tot +! +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! +! vwork2d(this_block%i_glob(ilo):this_block%i_glob(ihi), & +! this_block%j_glob(jlo):this_block%j_glob(jhi)) = & +! vwork(ilo:ihi,jlo:jhi,distrb_info%blockLocalID(iblk)) +! endif +! end do + + end if + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) + write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + + !call prism_put_inquire_proto(il_var_id_out(jf),isteps,ierror) + + write(il_out,*) + write(il_out,*) '(into_atm) what to do with this var==> Err= ',ierror + if(ll_comparal) then + call prism_put_proto(il_var_id_out(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_put_proto(il_var_id_out(jf), isteps, gwork, ierror) + endif + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Sent) then + write(il_out,*) + write(il_out,*) '(into_atm) Err in _put_ ', cl_writ(jf), isteps, ierror + call prism_abort_proto(il_comp_id, 'cice into_atm','STOP 1') + else + write(il_out,*) + write(il_out,*)'(into_atm) sent: ', cl_writ(jf), isteps, ierror + if(chk_i2a_fields .and. ll_comparal) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_i2a_fields) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif !my_task == 0 + + enddo !jf = 1, jpfldout + + !------------------------------- + !if (chk_i2a_fields) then + ! call check_i2a_fields(isteps) + !endif + !------------------------------- + + if ( chk_i2a_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine into_atm + +!======================================================================= + subroutine coupler_termination +!-------------------------------! + ! + ! Detach from MPI buffer + ! + call MPI_Buffer_Detach(rla_bufsend, il_bufsize, ierror) + deallocate (rla_bufsend) + !deallocate all the coupling associated arrays... (no bother...) + ! + ! 9- PSMILe termination + ! + + call MPI_Barrier(MPI_COMM_ICE, ierror) + call prism_terminate_proto (ierror) + if (ierror /= PRISM_Ok) then + if (my_task == 0) then + write (il_out,*) 'An error occured in prism_terminate = ', ierror + endif + else + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '==================*** END ***=================' + close(il_out) + endif + endif + ! + print * + print *, '********** End of CICE **********' + print * + + call MPI_Finalize (ierror) + + end subroutine coupler_termination + +!======================================================================= + subroutine decomp_def(id_part_id, id_length, id_imjm, & + id_rank, id_nbcplproc, ld_comparal, ld_mparout) +!-------------------------------------------------------! + ! + !use mod_prism_proto + !use mod_prism_def_partition_proto + + implicit none + + integer(kind=int_kind), dimension(:), allocatable :: il_paral ! Decomposition for each proc + integer(kind=int_kind) :: ig_nsegments ! Number of segments of process decomposition + integer(kind=int_kind) :: ig_parsize ! Size of array decomposition + integer(kind=int_kind) :: id_nbcplproc ! Number of processes involved in the coupling + integer(kind=int_kind) :: id_part_id ! Local partition ID + integer(kind=int_kind) :: id_imjm ! Total grid dimension, ib, ierror, my_task + integer(kind=int_kind) :: id_length ! Size of partial field for each process + integer(kind=int_kind) :: id_rank ! Rank of process + integer(kind=int_kind) :: ld_mparout ! Unit of log file + logical :: ld_comparal + integer(kind=int_kind) :: ib, ierror + character(len=80), parameter :: cdec='BOX' + ! + integer(kind=int_kind) :: ilo, ihi, jlo, jhi + ! + ! + ! Refer to oasis/psmile/prism/modules/mod_prism_proto.F90 for integer(kind=int_kind) value + ! of clim_xxxx parameters + ! + if ( .not. ld_comparal .and. id_rank == 0) then + ! Monoprocess model, or parallel model with only master process involved + ! in coupling: the entire field will be exchanged by the process. + ig_nsegments = 1 + ig_parsize = 3 + allocate(il_paral(ig_parsize)) + ! + il_paral ( clim_strategy ) = clim_serial + il_paral ( clim_offset ) = 0 + il_paral ( clim_length ) = id_imjm + id_length = id_imjm + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else + ! Parallel atm with all process involved in the coupling + ! + if (cdec == 'APPLE') then + ! Each process is responsible for a part of field defined by + ! the number of grid points and the offset of the first point + ! + write (ld_mparout,*) 'APPLE partitioning' + ig_nsegments = 1 + ig_parsize = 3 + allocate(il_paral(ig_parsize)) + write(ld_mparout,*)'ig_parsize',ig_parsize + ! + if (id_rank .LT. (id_nbcplproc-1)) then + il_paral ( clim_strategy ) = clim_apple + il_paral ( clim_length ) = id_imjm/id_nbcplproc + il_paral ( clim_offset ) = id_rank*(id_imjm/id_nbcplproc) + else + il_paral ( clim_strategy ) = clim_apple + il_paral ( clim_length ) = id_imjm-(id_rank*(id_imjm/id_nbcplproc)) + il_paral ( clim_offset ) = id_rank*(id_imjm/id_nbcplproc) + endif + id_length = il_paral(clim_length) + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else if (cdec == 'BOX') then + !B: CICE uses a kind of Cartisian decomposition which actually may NOT + ! be simply taken as "BOX" decomposition described here !!! + ! (there is an issue associated with the 'halo' boundary for each + ! segment and may NOT be treated as what we do below! + ! It needs further consideration to make this work correctly + ! for 'paralell coupling' if really needed in the future ...) + ! + ! Each process is responsible for a rectangular box + ! + write (ld_mparout,*) 'BOX partitioning' + ig_parsize = 5 + allocate(il_paral(ig_parsize)) + write(ld_mparout,*)'ig_parsize',ig_parsize + + !ilo = 1 + nghost + !ihi = nx_block - nghost + !jlo = 1 + nghost + !jhi = ny_block - nghost + + il_paral ( clim_strategy ) = clim_Box + il_paral ( clim_offset ) = nx_global * (l_jlo-1) + (l_ilo-1) + !il_paral ( clim_offset ) = (l_ilo-1) + il_paral ( clim_SizeX ) = l_ihi-l_ilo+1 + il_paral ( clim_SizeY ) = l_jhi-l_jlo+1 + il_paral ( clim_LdX ) = nx_global + + write(ld_mparout,*)'il_paral=',il_paral + + id_length = il_paral(clim_sizeX) * il_paral(clim_sizeY) + + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else if (cdec == 'ORANGE') then + !B: NOT FOR COMMON USE! + ! Each process is responsible for arbitrarily distributed + ! pieces of the field (here two segments by process) + ! + write (ld_mparout,*) 'ORANGE partitioning' + ig_nsegments = 2 + ig_parsize = 2 * ig_nsegments + 2 + write(ld_mparout,*)'ig_parsize',ig_parsize + allocate(il_paral(ig_parsize)) + ! + il_paral ( clim_strategy ) = clim_orange + il_paral ( clim_segments ) = 2 + il_paral ( clim_segments+1 ) = id_rank*768 + il_paral ( clim_segments+2 ) = 768 + il_paral ( clim_segments+3 ) = (id_rank+3)*768 + il_paral ( clim_segments+4 ) = 768 + id_length = 0 + do ib=1,2*il_paral(clim_segments) + if (mod(ib,2).eq.0) then + id_length = id_length + il_paral(clim_segments+ib) + endif + enddo + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else + write (ld_mparout,*) 'incorrect decomposition ' + endif + endif + + end subroutine decomp_def + +!============================================================================ + +!============================================================================ + subroutine pack_global_dbl(ARRAY_G, ARRAY, dst_task, src_dist) + +! !DESCRIPTION: +! This subroutine gathers a distributed array to a global-sized +! array on the processor dst_task. +! +! !REVISION HISTORY: +! same as module +! +! !REMARKS: +! This is the specific inteface for double precision arrays +! corresponding to the generic interface gather_global. It is shown +! to provide information on the generic interface (the generic +! interface is identical, but chooses a specific inteface based +! on the data type of the input argument). + + +! !USES: + + include 'mpif.h' + +! !INPUT PARAMETERS: + + integer (int_kind), intent(in) :: & + dst_task ! task to which array should be gathered + + type (distrb), intent(in) :: & + src_dist ! distribution of blocks in the source array + + real (dbl_kind), dimension(:,:,:), intent(in) :: & + ARRAY ! array containing horizontal slab of distributed field + +! !OUTPUT PARAMETERS: + + + real (dbl_kind), dimension(:,:), intent(inout) :: & + ARRAY_G ! array containing global horizontal field on dst_task + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n ,&! dummy loop counters + nsends ,&! number of actual sends + src_block ,&! block locator for send + ierr ! MPI error flag + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + snd_request + + integer (int_kind), dimension(:,:), allocatable :: & + snd_status + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer + + type (block) :: & + this_block ! block info for current block + + do n=1,nblocks_tot + + !*** copy local blocks + + if (src_dist%blockLocation(n) == my_task+1) then + + this_block = get_block(n,n) + +! do j=this_block%jlo,this_block%jhi +! do i=this_block%ilo,this_block%ihi +! ARRAY_G(this_block%i_glob(i), & +! this_block%j_glob(j)) = & +! ARRAY(i,j,src_dist%blockLocalID(n)) +! end do +! end do + ARRAY_G(this_block%i_glob(this_block%ilo):this_block%i_glob(this_block%ihi), & + this_block%j_glob(this_block%jlo):this_block%j_glob(this_block%jhi)) = & + ARRAY(this_block%ilo:this_block%ihi,this_block%jlo:this_block%jhi,src_dist%blockLocalID(n)) + + !*** fill land blocks with special values + + else if (src_dist%blockLocation(n) == 0) then + + this_block = get_block(n,n) + +! do j=this_block%jlo,this_block%jhi +! do i=this_block%ilo,this_block%ihi +! ARRAY_G(this_block%i_glob(i), & +! this_block%j_glob(j)) = spval_dbl +! end do +! end do + ARRAY_G(this_block%i_glob(this_block%ilo):this_block%i_glob(this_block%ihi), & + this_block%j_glob(this_block%jlo):this_block%j_glob(this_block%jhi)) = spval_dbl + endif + + end do + + end subroutine pack_global_dbl +!============================================================================ + +!============================================================================== +subroutine save_restart_i2a(fname, nstep) +! output the last i2a forcing data in cice by the end of the run, +! to be read in at the beginning of next run by cice and sent to atm + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +if (my_task == 0) then + call open_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) +endif + +do jf = 1, nsend_i2a + select case (trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,4,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + !20100305: test effect of ssuv on the tropical cooling biases (as per Harry Henden) + case('uvel_ia'); vwork = ia_uvel !* ocn_ssuv_factor !note ice u/v are also + case('vvel_ia'); vwork = ia_vvel !* ocn_ssuv_factor ! included here. + case('sstfz_ia'); vwork = ia_sstfz + case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) + case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) + case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) + case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) + case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) + case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) + case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) + case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) + case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) + case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) + case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) + case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) + case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) + case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) + case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) + case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) + case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) + case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) + case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) + case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) + case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) + case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) + case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) + case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) + case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) + end select + +! if (.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) +! else +! call pack_global_dbl(gwork, vwork, master_task, distrb_info) +! end if + if (my_task == 0) then + call modify_nc2D(ncid, cl_writ(jf), gwork, 2, il_im, il_jm, 1, ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +end subroutine save_restart_i2a +!========================================================== + + end module cpl_interface diff --git a/drivers/access/cpl_netcdf_setup.F90 b/drivers/access/cpl_netcdf_setup.F90 new file mode 100644 index 00000000..2c15c30d --- /dev/null +++ b/drivers/access/cpl_netcdf_setup.F90 @@ -0,0 +1,367 @@ +module cpl_netcdf_setup + +use ice_kinds_mod +use ice_calendar, only: idate, sec + +implicit none + +include 'netcdf.inc' !define nf_* +!use netcdf !define nf90_* + +integer(kind=int_kind) :: pLonDimId, pLatDimId, timeDimId, pDepDimId +contains + +!========================================================================= +subroutine ncheck(status) + +implicit none + +integer(kind=int_kind), intent(in) :: status + +if (status /= nf_noerr) then + write(*,'(/a)') 'error - from NetCDF library' + write(*,'(a/)') trim(nf_strerror(status)) + stop +end if +end subroutine ncheck + +!============================================================================ +subroutine read_nc(vout,nx,ny,vname,fname) +! +!read in a 2D (X-Y) array with double precision !*** not needed anymore! *** +! instead, the more concise 'ice_read_nc' is use for all nc data reading! +! +!Note the compiling option ("idbl4" I think) does not allow the models to +! read in any single precision variables from a netcdf file such as the +! A2I_..nc etc. So we must feed the model with dbl precision vars in nc +! files. Similarly, we need write out dbl precision fields, NOT single +! precision arrays, into ncfiles! +! --- the above note is perhaps not true! --- +! if we define "real*4 vout", then nf_get_vara_real(...,vout) may work. + +implicit none + +real(kind=dbl_kind), dimension(nx,ny), intent(out) :: vout +integer(kind=int_kind), intent(in) :: nx, ny +character(len=*), intent(in) :: vname,fname + +!real, dimension(nx,ny) :: vtmp !single precision for 'conversion' +integer(kind=int_kind) :: varid, ndim, ncid +integer(kind=int_kind), dimension(:), allocatable :: count, start + +!Open file for read access +call ncheck( nf_open(fname, nf_nowrite, ncid) ) + +!Get variable ID +call ncheck(nf_inq_varid(ncid, vname, varid)) + +!Get number of dimensions +call ncheck(nf_inq_varndims(ncid, varid, ndim)) + +!Allocate count and start +allocate (count(ndim), start(ndim)) + +!Get the 2D array data out of a ndim-D variable +if (ndim == 2) then !currently only have ndim = 2! + start = (/ 1, 1 /) + count = (/ nx, ny /) +else if (ndim == 3) then + start = (/ 1, 1, 1/) + count = (/ nx, ny, 1 /) +else !* ndim = 4 *! + start = (/ 1, 1, 1, 1 /) + count = (/ nx, ny, 1, 1 /) +endif + +!call ncheck(nf_get_vara_real(ncid, varid, start, count, vtmp)) +call ncheck(nf_get_vara_double(ncid, varid, start, count, vout)) + +!Close file +call ncheck( nf_close(ncid) ) + +!vout = vtmp + +return +end subroutine read_nc + +!=========================================================================== +subroutine create_ncfile(ncfile, ncid, ii, jj, kk, ll, ilout) +! +!to create 2, 3,or 4D ncfile, depending on optional args (kk,ll)! +! + +implicit none + +integer(kind=int_kind), intent(in) :: ii,jj !x, y dimension size +!!!integer(kind=int_kind), optional, intent(in) :: kk, ll !z, t dimension size +!!!integer(kind=int_kind), optional, intent(in) :: ilout !format io file id +! * 'optional' att can NOT be with 'intent(in) *' +integer(kind=int_kind), optional :: kk, ll !z, t dimension size +integer(kind=int_kind), optional :: ilout !format io file id +character(len=*), intent(in) :: ncfile +integer(kind=int_kind), intent(out) :: ncid + +if (present(ilout)) write(ilout,*) 'creating a new netcdf file: ',ncfile + +!create a new NetCDF and define the grid: +call ncheck(nf_create(trim(ncfile),nf_write,ncid)) + +!define the dimensions +if (present(ll)) call ncheck(nf_def_dim(ncid,"time", nf_unlimited, timeDimId)) +if (present(kk)) call ncheck(nf_def_dim(ncid,"nz", kk, pDepDimId)) +call ncheck(nf_def_dim(ncid, "ny", jj, pLatDimId)) +call ncheck(nf_def_dim(ncid, "nx", ii, pLonDimId)) + +!end of the definition phase +call ncheck(nf_enddef(ncid)) + +!close NetCDF file +!call ncheck(nf_close(ncid)) +!do NOT close it here! +write(*,'(2a)') 'ncfile created: ',trim(ncfile) + +return +end subroutine create_ncfile + +!=========================================================================== +subroutine open_ncfile(ncfile, ncid, ii, jj, kk, ll, ilout) +! +!to open exsiting ncfile to modify it +! + +implicit none + +integer(kind=int_kind), intent(in) :: ii,jj !x, y dimension size +!!!integer(kind=int_kind), optional, intent(in) :: kk, ll !z, t dimension size +!!!integer(kind=int_kind), optional, intent(in) :: ilout !format io file id +! * 'optional' att can NOT be with 'intent(in) *' +integer(kind=int_kind), optional :: kk, ll !z, t dimension size +integer(kind=int_kind), optional :: ilout !format io file id +character(len=*), intent(in) :: ncfile +integer(kind=int_kind), intent(out) :: ncid + +if (present(ilout)) write(ilout,*) 'opening a existing netcdf file: ',ncfile + +!create a new NetCDF and define the grid: +call ncheck(nf_open(trim(ncfile),nf_write,ncid)) + +!close NetCDF file +!call ncheck(nf_close(ncid)) +!do NOT close it here! +write(*,'(2a)') 'ncfile opened: ',trim(ncfile) + +return +end subroutine open_ncfile + +!=========================================================================== +subroutine create_nc(ncfile,ncid,ii,jj) +! +!not needed anymore 'cos its function is covered by 'create_ncfile' +! + +implicit none + +integer(kind=int_kind), intent(in) :: ii,jj +character(len=*), intent(in) :: ncfile +integer(kind=int_kind), intent(out) :: ncid + +print * +print *, 'creating a new netcdf file: ',ncfile + +!create a new NetCDF and define the grid: +call ncheck(nf_create(trim(ncfile),nf_write,ncid)) + +!define the dimensions +call ncheck(nf_def_dim(ncid,"ny", jj, pLatDimId)) +call ncheck(nf_def_dim(ncid,"nx", ii, pLonDimId)) +!B: note the sizes of dimensions ('jj' and 'ii') can't be missing! + +!end of the definition phase +call ncheck(nf_enddef(ncid)) + +!close NetCDF file +!call ncheck(nf_close(ncid)) +!do NOT close it here! +write(*,'(2a)') 'ncfile created: ',trim(ncfile) + +return +end subroutine create_nc + +!=========================================================================== +subroutine write_nc_1Dtime(vin, nt, vname, ncid) + +implicit none + +integer(kind=int_kind), intent(in) :: ncid,nt +integer(kind=int_kind) :: varid, ncstatus +integer(kind=int_kind), dimension(1:6) :: adate +!real(kind=dbl_kind), dimension(nt), intent(in) :: vin +real, intent(in) :: vin +! NOTE here real is default real*8 (which is actually the same as dbl_kind!) +! somehow the netcdf lib used here takes 'real' as real*4. therefore we need: +real*4 :: vtmp +character(len=*), intent(in) :: vname +character*80 ctimeatt + +vtmp = real(vin) ! isn't this real here real*8 ? +print *, 'write_nc_1Dtime: time to write field -- ', vtmp, vname +print *, 'write_nc_1Dtime: idate, sec -- ', idate, sec + +ncstatus=nf_inq_varid(ncid,vname,varid) + +if (ncstatus/=nf_noerr) then + adate(1) = idate/10000 + adate(2) = (idate - (idate/10000)*10000)/100 + adate(3) = idate - (idate/100)*100 + adate(4:6) = 0 !OK for 'whole-day' runs + call ncheck(nf_redef(ncid)) + call ncheck(nf_def_var(ncid,trim(vname),nf_real, 1, timeDimId, varid)) + write(ctimeatt, & + '("seconds since ",I4.4,"-",I2.2,"-",I2.2," ",I2.2,":",I2.2,":",I2.2)') adate(:) + !ctimeatt="hours since 0000-01-01 00:00:00" + call ncheck(nf_put_att_text(ncid,varid,"units",len_trim(ctimeatt),trim(ctimeatt))) + call ncheck(nf_enddef(ncid)) +end if + +!write values into the 1D array +!call ncheck(nf_put_vara_real(ncid,varid,nt,1,vin)) +call ncheck(nf_put_vara_real(ncid,varid,nt,1,vtmp)) +!B: must indicate the start point and number of the record, ie., nt and 1! + +return +end subroutine write_nc_1Dtime + +!=========================================================================== +subroutine write_nc2D(ncid, vname, vin, prcn, nx, ny, istep, ilout) +! +!to output a 2D array into a 3D field (with time dimension) +!with either single or double precisioin depending on argumnet 'prcn'! +! + +implicit none + +integer(kind=int_kind), intent(in) :: ncid +integer(kind=int_kind), intent(in) :: prcn !precision choice (1/2: signle/double) +character(len=*), intent(in) :: vname +integer(kind=int_kind), intent(in) :: nx, ny +integer(kind=int_kind), intent(in) :: istep !position in the time dim (No of record) +!!!integer(kind=int_kind), intent(in), optional :: ilout +integer(kind=int_kind), optional :: ilout +real(kind=dbl_kind), dimension(nx,ny), intent(in) :: vin + +integer(kind=int_kind) :: varid, ncstatus +real*4, dimension(nx,ny) :: vtmp !single precision + +if (present(ilout)) write(ilout,*) 'write_nc2D: handling var *** ',vname, ' rec: ', istep + +ncstatus=nf_inq_varid(ncid,vname,varid) +if (ncstatus/=nf_noerr) then + call ncheck(nf_redef(ncid)) + if (prcn == 1) then + call ncheck(nf_def_var(ncid,trim(vname),nf_real, 3, & + (/pLonDimId, pLatDimId, timeDimId/),varid)) + else + call ncheck(nf_def_var(ncid,trim(vname),nf_double, 3, & + (/pLonDimId, pLatDimId, timeDimId/),varid)) + endif + call ncheck(nf_enddef(ncid)) + if (present(ilout)) write(ilout,*) 'write_nc2D: defined new var ***', vname +else + if (present(ilout)) write(ilout,*) 'write_nc2D: found old var ***', vname +end if + +select case(prcn) + case (1) + vtmp = real(vin) !dbl precision to single precision + call ncheck(nf_put_vara_real(ncid,varid,(/1,1,istep/),(/nx,ny,1/),vtmp)) + case default !case (2) + call ncheck(nf_put_vara_double(ncid,varid,(/1,1,istep/),(/nx,ny,1/),vin)) +end select + +return +end subroutine write_nc2D + +!=========================================================================== +subroutine modify_nc2D(ncid, vname, vin, prcn, nx, ny, istep, ilout) +! +!to modify a 2D array into an existing 3D field (with time dimension) +!with either single or double precisioin depending on argumnet 'prcn'! +! + +implicit none + +integer(kind=int_kind), intent(in) :: ncid +integer(kind=int_kind), intent(in) :: prcn !precision choice (1/2: signle/double) +character(len=*), intent(in) :: vname +integer(kind=int_kind), intent(in) :: nx, ny +integer(kind=int_kind), intent(in) :: istep !position in the time dim (No of record) +!!!integer(kind=int_kind), intent(in), optional :: ilout +integer(kind=int_kind), optional :: ilout +real(kind=dbl_kind), dimension(nx,ny), intent(in) :: vin + +integer(kind=int_kind) :: varid, ncstatus +real*4, dimension(nx,ny) :: vtmp !single precision + +if (present(ilout)) write(ilout,*) 'modify_nc2D: handling var *** ',vname, ' rec: ', istep + +ncstatus=nf_inq_varid(ncid,vname,varid) +if (ncstatus/=nf_noerr) then + if (present(ilout)) write(ilout,*) 'modify_nc2D: Error- not found old var ***', vname + stop +else + if (present(ilout)) write(ilout,*) 'modify_nc2D: found old var ***', vname +end if + +select case(prcn) + case (1) + vtmp = real(vin) !dbl precision to single precision + !call ncheck(nf_put_vara_real(ncid,varid,(/1,1,istep/),(/nx,ny,1/),vtmp)) + call ncheck(nf_put_vara_real(ncid,varid,(/1,1/),(/nx,ny/),vtmp)) + case default !case (2) + !call ncheck(nf_put_vara_double(ncid,varid,(/1,1,istep/),(/nx,ny,1/),vin)) + call ncheck(nf_put_vara_double(ncid,varid,(/1,1/),(/nx,ny/),vin)) +end select + +return +end subroutine modify_nc2D + +!=========================================================================== +subroutine write_nc(vin, nx, ny, vname, ncid) +! +! *** its function is covered by write_nc2D and thus not used any more. *** +! + +implicit none + +integer(kind=int_kind), intent(in) :: nx, ny, ncid +integer(kind=int_kind) :: varid +real(kind=dbl_kind), dimension(nx,ny), intent(in) :: vin +character(len=*), intent(in) :: vname + +!switch to define mode +call ncheck(nf_redef(ncid)) + +!define (2D) variable to be written +call ncheck(nf_def_var(ncid,trim(vname),nf_double, 2, & + (/pLonDimId, pLatDimId/),varid)) +!B: here '2' indicates the dimension of the to-be-written variable 'vname', +! and (/pLonDimId, pLatDimId/) the dimention. +! 'nf_real' determines the output precision! + +!leave define mode +call ncheck(nf_enddef(ncid)) + +!get varId and write to array +call ncheck(nf_inq_varid(ncid,trim(vname),varid)) + +!write values into the 2D array +call ncheck(nf_put_vara_double(ncid,varid,(/1,1/),(/nx,ny/),vin)) +!B: must indicate the start point and number of the record (ie. count as in +! routine read_nc above) (/1,1/) and (/nx,ny/) for the to-be-written vin! + +return +end subroutine write_nc + +!=========================================================================== + +end module cpl_netcdf_setup diff --git a/drivers/access/cpl_parameters.F90 b/drivers/access/cpl_parameters.F90 new file mode 100644 index 00000000..1ffcf12a --- /dev/null +++ b/drivers/access/cpl_parameters.F90 @@ -0,0 +1,215 @@ +!============================================================================ +! +module cpl_parameters +! +!---------------------------------------------------------------------------- + +use ice_kinds_mod + +implicit none + +integer(kind=int_kind) :: il_im, il_jm, il_imjm ! il_im=nx_global, il_jm=ny_global + ! assigned in prism_init +integer (kind=int_kind) :: xdim, ydim +!integer(kind=int_kind), parameter :: nsend = 50 ! maxium no of flds sent allowed +!integer(kind=int_kind), parameter :: nrecv = 50 ! maxium no of flds rcvd allowed +integer(kind=int_kind) :: nsend_i2a, nsend_i2o +integer(kind=int_kind) :: nrecv_a2i, nrecv_o2i +integer(kind=int_kind), parameter :: jpfldout = 63 ! actual number of fields sent +integer(kind=int_kind), parameter :: jpfldin = 47 ! actual number of fields rcvd + +character(len=8), dimension(jpfldout) :: cl_writ ! Symb names fields sent +character(len=8), dimension(jpfldin) :: cl_read ! Symb names fields rcvd + +integer(kind=int_kind), dimension(jpfldout) :: il_var_id_out ! ID for fields sent +integer(kind=int_kind), dimension(jpfldin) :: il_var_id_in ! ID for fields rcvd + +character(len=6), parameter :: cp_modnam='cicexx' ! Component model name + +integer(kind=int_kind) :: il_out ! format io unit(s) for coupling cpu(s) +integer(kind=int_kind) :: il_commlocal ! Component internal communicator +!integer(kind=int_kind) :: il_rank ! local procesor id +!integer(kind=int_kind) :: il_master=0 ! master_task id + +integer(kind=int_kind) :: num_cpl_ai ! num of (a2i) cpl periods +integer(kind=int_kind) :: num_cpl_io ! num of (i2o) cpl periods +integer(kind=int_kind) :: num_ice_io ! ice time loop iter num per i2o cpl interval + +real(kind=dbl_kind) :: meltlimit = -200. !12/03/2008: set max melt +real(kind=dbl_kind) :: ocn_albedo = 0.06 ! for compability with AusCOM +character(len=256) :: inputdir = 'INPUT' +character(len=256) :: restartdir = 'RESTART' +logical :: & !pop_icediag is as that for ocn model, if true + pop_icediag = .true. , & ! ice formation from ocn is via POP approach + use_ocnslope = .false. , & !if .t. use the sea srf tilt passed from ocn + use_umask = .false. , & !if .t. use the pre-processed umask (be careful!) + ice_pressure_on = .true. , & + air_pressure_on = .false., & + ice_fwflux = .false. , & + rotate_winds = .false. , & !.t. if oasis sends U,V as scalars. 20090319 + limit_icemelt = .false. , & + limit_stflx = .false. , & !.t. set limit for the salt flux to ocn (switch 20111108) + use_core_runoff = .true. , & !.t. use core runoff data (remapped) 20090718 + cst_ocn_albedo = .true. , & !.t. use constant ocean albedo (e.g., 0.06, to 0.1) + gbm2pericearea = .true. , & !.t. do GBM to per ice area conversion in set_sfcflux + do_scale_fluxes = .true. , & !.t. call scale_fluxes in routine coupling_prep. + extreme_test = .false. , & !.t. output extreme forcing data (in set_sfcflux) + imsk_evap = .true. , & + chk_a2i_fields = .false. , & + chk_i2a_fields = .false. , & + chk_i2o_fields = .false. , & + chk_o2i_fields = .false. +integer(kind=int_kind) :: jobnum = 1 !1 for initial, >1 restart +integer(kind=int_kind) :: inidate = 01010101 !beginning date of this run (yyyymmdd) +integer(kind=int_kind) :: init_date = 00010101 !beginning date of this EXP (yyyymmdd) +integer(kind=int_kind) :: dt_cice = 3600 !time step of this model (seconds) +integer(kind=int_kind) :: dt_cpl_ai = 21600 !atm<==>ice coupling interval (seconds) +integer(kind=int_kind) :: dt_cpl_io = 21600 !ice<==>ocn coupling interval (seconds) +integer(kind=int_kind) :: caltype = 0 !calendar type: 0 (365daye/yr, 'Juilian' ) + ! 1 (365/366 days/yr, 'Gregorian') + ! n (n days/month) +!integer(kind=int_kind) :: runtime0 !accumulated run time by the end of last run (s) +real(kind=dbl_kind) :: runtime0 = 0.0 ! can be too large as int to read in correctly! +integer(kind=int_kind) :: runtime = 86400 !the time length for this run segment (s) + +!20100305: Harry Henden suggests turning off ocean current into UM might reduce the +! tropical cooling bias: +real(kind=dbl_kind) :: ocn_ssuv_factor = 1.0 ! 0.0 -- turn off the ocn_current into UM. +real(kind=dbl_kind) :: iostress_factor = 1.0 ! 0.0 -- turn off stresses into MOM4. + +namelist/coupling/ & + caltype, & + jobnum, & + inidate, & + init_date, & + runtime0, & + runtime, & + dt_cice, & + dt_cpl_ai, & + dt_cpl_io, & + inputdir, & + restartdir, & + pop_icediag, & + use_ocnslope, & + use_umask, & + ice_pressure_on, & + air_pressure_on, & + ice_fwflux, & + rotate_winds, & + limit_icemelt, & + limit_stflx, & + meltlimit, & + use_core_runoff, & + gbm2pericearea, & + do_scale_fluxes, & + extreme_test, & + imsk_evap, & + ocn_ssuv_factor,& + iostress_factor,& + chk_a2i_fields, & + chk_i2a_fields, & + chk_i2o_fields, & + chk_o2i_fields + +integer(kind=int_kind) :: iniday, inimon, iniyear !from inidate +real(kind=dbl_kind) :: coef_io !dt_ice/dt_cpl_io, for i2o fields tavg +real(kind=dbl_kind) :: coef_ia !dt_ice/dt_cpl_ai, for i2a fields tavg +real(kind=dbl_kind) :: coef_cpl !dt_cpl_io/dt_cpl_ai, for ocn fields tavg + +real(kind=dbl_kind) :: frazil_factor = 0.5 + !frazil_factor is associated with the difference between ocean + ! model and ice model time-stepping: for mom4, two-level frog-leap + ! is used and frazil heat flux is calculated and accumulated with + ! frazil_factor = 1, which is supposed to be used for a ice model + ! with the same two-level time-stepping scheme such as SIS. but + ! cice uses forward time-stepping, which means we need 'correct' + ! the received frazil energy by multiplying 0.5... +!--------------------------------------------------------------------------------------- + +contains + +!======================================================================================= +subroutine get_cpl_timecontrol_simple + +implicit none + +! all processors read the namelist-- +open(unit=99,file="input_ice.nml",form="formatted",status="old") +read (99, coupling) +close(unit=99) +! *** make sure dt_cpl_ai is multiple of dt_cpl_io, and dt_cpl_io if multiple of dt_ice *** +num_cpl_ai = runtime/dt_cpl_ai +num_cpl_io = dt_cpl_ai/dt_cpl_io +num_ice_io = dt_cpl_io/dt_cice + +coef_io = float(dt_cice)/float(dt_cpl_io) +coef_ia = float(dt_cice)/float(dt_cpl_ai) +coef_cpl = float(dt_cpl_io)/float(dt_cpl_ai) + +iniday = mod(inidate, 100) +inimon = mod( (inidate - iniday)/100, 100) +iniyear = inidate / 10000 + +return +end subroutine get_cpl_timecontrol_simple + +!=============================================================================== +subroutine get_cpl_timecontrol + +use ice_exit +use ice_fileunits + +implicit none + +integer (int_kind) :: nml_error ! namelist read error flag + +! all processors read the namelist-- + +call get_fileunit(nu_nml) +open(unit=nu_nml,file="input_ice.nml",form="formatted",status="old",iostat=nml_error) +! +write(6,*)'CICE: input_ice.nml opened at unit = ', nu_nml +! +if (nml_error /= 0) then + nml_error = -1 +else + nml_error = 1 +endif +do while (nml_error > 0) + read(nu_nml, nml=coupling,iostat=nml_error) + if (nml_error > 0) read(nu_nml,*) ! for Nagware compiler +end do +if (nml_error == 0) close(nu_nml) + +write(6,coupling) + +call release_fileunit(nu_nml) + +if (nml_error /= 0) then + !!!call abort_ice('ice: error reading coupling') + write(6, *) + write(6, *)'XXX Warning: after reading coupling, nml_error = ',nml_error + write(6, *) +endif + +! * make sure runtime is mutliple of dt_cpl_ai, dt_cpl_ai is mutliple of dt_cpl_io, +! * and dt_cpl_io is mutliple of dt_cice! +num_cpl_ai = runtime/dt_cpl_ai +num_cpl_io = dt_cpl_ai/dt_cpl_io +num_ice_io = dt_cpl_io/dt_cice + +coef_io = float(dt_cice)/float(dt_cpl_io) +coef_ia = float(dt_cice)/float(dt_cpl_ai) +coef_cpl = float(dt_cpl_io)/float(dt_cpl_ai) + +iniday = mod(inidate, 100) +inimon = mod( (inidate - iniday)/100, 100) +iniyear = inidate / 10000 + +!!idate = inidate + +return +end subroutine get_cpl_timecontrol + +!======================================================================================= +end module cpl_parameters diff --git a/drivers/access/ice_constants.F90 b/drivers/access/ice_constants.F90 new file mode 100644 index 00000000..4ded4cc6 --- /dev/null +++ b/drivers/access/ice_constants.F90 @@ -0,0 +1,243 @@ +! SVN:$Id: ice_constants.F90 700 2013-08-15 19:17:39Z eclare $ +!======================================================================= +! +! This module defines a variety of physical and numerical constants +! used throughout the ice model +! +! author Elizabeth C. Hunke, LANL + + module ice_constants + + use ice_kinds_mod + + implicit none + save + private + + !----------------------------------------------------------------- + ! physical constants + !----------------------------------------------------------------- + + real (kind=dbl_kind), parameter, public :: & + rhos = 330.0_dbl_kind ,&! density of snow (kg/m^3) + rhoi = 917.0_dbl_kind ,&! density of ice (kg/m^3) + rhow = 1026.0_dbl_kind ,&! density of seawater (kg/m^3) + cp_air = 1005.0_dbl_kind ,&! specific heat of air (J/kg/K) + ! (Briegleb JGR 97 11475-11485 July 1992) + emissivity= 0.95_dbl_kind ,&! emissivity of snow and ice + cp_ice = 2106._dbl_kind ,&! specific heat of fresh ice (J/kg/K) +!ars599: 11042014: add AusCOM +#ifdef AusCOM + cp_ocn = 3989._dbl_kind ,&! specific heat of ocn (J/kg/K) + ! freshwater value needed for enthalpy +#else + cp_ocn = 4218._dbl_kind ,&! specific heat of ocn (J/kg/K) + ! freshwater value needed for enthalpy +#endif + depressT = 0.054_dbl_kind ,&! Tf:brine salinity ratio (C/ppt) + +!ars599: 26032014 new code (CODE: dragio) +! use new code, mark out #ifndef AusCOM +#ifndef AusCOM + dragio = 0.00536_dbl_kind ,&! ice-ocn drag coefficient +#endif + albocn = 0.06_dbl_kind ! ocean albedo + + real (kind=dbl_kind), parameter, public :: & + gravit = 9.80665_dbl_kind ,&! gravitational acceleration (m/s^2) + omega = 7.292116e-5_dbl_kind,&! angular velocity of earth (rad/sec) + radius = 6.371229e6_dbl_kind ! earth radius (m) + + real (kind=dbl_kind), parameter, public :: & + secday = 86400.0_dbl_kind ,&! seconds in calendar day + viscosity_dyn = 1.79e-3_dbl_kind, & ! dynamic viscosity of brine (kg/m/s) +#ifndef AusCOM + Tocnfrz = -1.8_dbl_kind ,&! freezing temp of seawater (C), + ! used as Tsfcn for open water +#endif + rhofresh = 1000.0_dbl_kind ,&! density of fresh water (kg/m^3) + zvir = 0.606_dbl_kind ,&! rh2o/rair - 1.0 + vonkar = 0.4_dbl_kind ,&! von Karman constant + cp_wv = 1.81e3_dbl_kind ,&! specific heat of water vapor (J/kg/K) + stefan_boltzmann = 567.0e-10_dbl_kind,&! W/m^2/K^4 + Tffresh = 273.15_dbl_kind ,&! freezing temp of fresh ice (K) + Lsub = 2.835e6_dbl_kind ,&! latent heat, sublimation freshwater (J/kg) + Lvap = 2.501e6_dbl_kind ,&! latent heat, vaporization freshwater (J/kg) + Lfresh = Lsub-Lvap ,&! latent heat of melting of fresh ice (J/kg) + Timelt = 0.0_dbl_kind ,&! melting temperature, ice top surface (C) + Tsmelt = 0.0_dbl_kind ,&! melting temperature, snow top surface (C) + ice_ref_salinity = 5._dbl_kind, &!8._dbl_kind ,&! (ppt) +! ocn_ref_salinity = 34.7_dbl_kind,&! (ppt) + spval_dbl = 1.0e30_dbl_kind ! special value (double precision) + + real (kind=real_kind), parameter, public :: & + spval = 1.0e30_real_kind ! special value for netCDF output + + real (kind=dbl_kind), parameter, public :: & + iceruf = 0.0005_dbl_kind ,&! ice surface roughness (m) + + ! (Ebert, Schramm and Curry JGR 100 15965-15975 Aug 1995) + kappav = 1.4_dbl_kind ,&! vis extnctn coef in ice, wvlngth<700nm (1/m) + !kappan = 17.6_dbl_kind,&! vis extnctn coef in ice, wvlngth<700nm (1/m) + + ! kice is not used for mushy thermo + kice = 2.03_dbl_kind ,&! thermal conductivity of fresh ice(W/m/deg) + ! kseaice is used only for zero-layer thermo + kseaice= 2.00_dbl_kind ,&! thermal conductivity of sea ice (W/m/deg) + ! (used in zero layer thermodynamics option) + ksno = 0.31_dbl_kind ,&! thermal conductivity of snow (W/m/deg) + zref = 10._dbl_kind ,&! reference height for stability (m) +#ifndef AusCOM ! multilayers with the UM coupling + aicenmin_ml = 0.00001_dbl_kind, &! AEW: min aice we want to allow when using + snowpatch = 0.02_dbl_kind ! parameter for fractional snow area (m) +#else + aicenmin_ml = 0.00001_dbl_kind! AEW: min aice we want to allow when using +#endif +#ifdef AusCOM + ! in namelist therefore not parameter, which is counterintuitive, + ! since this modules name is ice_constants +!ars599: 26032014: change to public +!ars599: 24042015: remove dragio!! + real (kind=dbl_kind), public :: & + dragio , & ! ice-ocn drag coefficient + Tocnfrz ! freezing temp of seawater (C), + ! used as Tsfcn for open water +#endif + + ! weights for albedos + ! 4 Jan 2007 BPB Following are appropriate for complete cloud + ! in a summer polar atmosphere with 1.5m bare sea ice surface: + ! .636/.364 vis/nir with only 0.5% direct for each band. +#ifdef AusCOM +!ars599: 26032014 change to public +! real (kind=dbl_kind) :: & ! currently used only + real (kind=dbl_kind), public :: & ! currently used only + awtvdr, &! visible, direct ! for history and + awtidr, &! near IR, direct ! diagnostics + awtvdf, &! visible, diffuse + awtidf ! near IR, diffuse +#else + real (kind=dbl_kind), parameter, public :: & ! currently used only + awtvdr = 0.00318_dbl_kind, &! visible, direct ! for history and + awtidr = 0.00182_dbl_kind, &! near IR, direct ! diagnostics + awtvdf = 0.63282_dbl_kind, &! visible, diffuse + awtidf = 0.36218_dbl_kind ! near IR, diffuse +#endif + + real (kind=dbl_kind), parameter, public :: & + qqqice = 11637800._dbl_kind ,&! for qsat over ice + TTTice = 5897.8_dbl_kind ,&! for qsat over ice + qqqocn = 627572.4_dbl_kind ,&! for qsat over ocn + TTTocn = 5107.4_dbl_kind ! for qsat over ocn + + ! these are currently set so as to have no effect on the decomposition + real (kind=dbl_kind), parameter, public :: & + shlat = 30.0_dbl_kind ,&! artificial masking edge (deg) + nhlat = -30.0_dbl_kind ! artificial masking edge (deg) + + !----------------------------------------------------------------- + ! numbers + !----------------------------------------------------------------- + + real (kind=dbl_kind), parameter, public :: & + c0 = 0.0_dbl_kind, & + c1 = 1.0_dbl_kind, & + c1p5 = 1.5_dbl_kind, & + c2 = 2.0_dbl_kind, & + c3 = 3.0_dbl_kind, & + c4 = 4.0_dbl_kind, & + c5 = 5.0_dbl_kind, & + c6 = 6.0_dbl_kind, & + c8 = 8.0_dbl_kind, & + c9 = 9.0_dbl_kind, & + c10 = 10.0_dbl_kind, & + c12 = 12.0_dbl_kind, & + c15 = 15.0_dbl_kind, & + c16 = 16.0_dbl_kind, & + c20 = 20.0_dbl_kind, & + c25 = 25.0_dbl_kind, & + c30 = 30.0_dbl_kind, & + c60 = 60.0_dbl_kind, & + c100 = 100.0_dbl_kind, & + c180 = 180.0_dbl_kind, & + c360 = 360.0_dbl_kind, & + c365 = 365.0_dbl_kind, & + c400 = 400.0_dbl_kind, & + c3600= 3600.0_dbl_kind, & + c1000= 1000.0_dbl_kind, & + p001 = 0.001_dbl_kind, & + p01 = 0.01_dbl_kind, & + p025 = 0.025_dbl_kind, & + p1 = 0.1_dbl_kind, & + p2 = 0.2_dbl_kind, & + p4 = 0.4_dbl_kind, & + p5 = 0.5_dbl_kind, & + p6 = 0.6_dbl_kind, & + p05 = 0.05_dbl_kind, & + p15 = 0.15_dbl_kind, & + p25 = 0.25_dbl_kind, & + p75 = 0.75_dbl_kind, & + p166 = c1/c6, & + p333 = c1/c3, & + p666 = c2/c3, & + p111 = c1/c9, & + p055 = p111*p5, & + p027 = p055*p5, & + p222 = c2/c9, & + puny = 1.0e-11_dbl_kind, & + eps13 = 1.0e-13_dbl_kind, & + eps16 = 1.0e-16_dbl_kind, & + bignum = 1.0e+30_dbl_kind, & + pi = 3.14159265358979323846_dbl_kind, & + pih = p5*pi, & + piq = p5*pih, & + pi2 = c2*pi + + !----------------------------------------------------------------- + ! location of fields for staggered grids + !----------------------------------------------------------------- + + integer (int_kind), parameter, public :: & + field_loc_unknown = 0, & + field_loc_noupdate = -1, & + field_loc_center = 1, & + field_loc_NEcorner = 2, & + field_loc_Nface = 3, & + field_loc_Eface = 4, & + field_loc_Wface = 5 + + !----------------------------------------------------------------- + ! field type attribute - necessary for handling + ! changes of direction across tripole boundary + !----------------------------------------------------------------- + + integer (int_kind), parameter, public :: & + field_type_unknown = 0, & + field_type_noupdate = -1, & + field_type_scalar = 1, & + field_type_vector = 2, & + field_type_angle = 3 + + !----------------------------------------------------------------- + ! conversion factors + !----------------------------------------------------------------- + + real (kind=dbl_kind), parameter, public :: & + cm_to_m = 0.01_dbl_kind ,&! cm to meters + m_to_cm = 100._dbl_kind ,&! meters to cm + m2_to_km2 = 1.e-6_dbl_kind ,&! m^2 to km^2 + kg_to_g = 1000._dbl_kind ,&! kilograms to grams + mps_to_cmpdy = 8.64e6_dbl_kind ,&! m per s to cm per day + rad_to_deg = 180._dbl_kind/pi ! degree-radian conversion + +#ifdef AusCOM +!ars599: 26032014: change to public + real (kind=dbl_kind), parameter, public :: & + rvgas = 461.50_dbl_kind ,&! gas constant for water vapour + rdgas = 287.04_dbl_kind ! gas constant for dry air +#endif +!======================================================================= + + end module ice_constants + +!======================================================================= diff --git a/drivers/access/ice_coupling.F90 b/drivers/access/ice_coupling.F90 new file mode 100644 index 00000000..e50bec57 --- /dev/null +++ b/drivers/access/ice_coupling.F90 @@ -0,0 +1,482 @@ +!======================================================================= +! +!BOP +! +! !MODULE: ice_coupling - contains coupling related routines used by Met Office +! +! !DESCRIPTION: +! +! Contains routines relating to coupling fields used by Met Office +! +! !REVISION HISTORY: +! SVN:$Id: +! +! authors: Alison McLaren, Met Office +! Feb 2014: Amended by Alex West for use in CICE 5.0. +! +! !INTERFACE: +! + module ice_coupling +! +! !USES: +! + use ice_constants + use ice_kinds_mod +! +!EOP +! + implicit none + save + +! !PUBLIC MEMBER FUNCTIONS: + + public :: sfcflux_to_ocn, set_sfcflux, top_layer_Tandk_init, top_layer_Tandk_run +! +!EOP +! +!======================================================================= + + contains + +!======================================================================= +!BOP +! +! !IROUTINE: sfcflux_to_ocn +! +! !DESCRIPTION: +! +! If surface heat fluxes are provided to CICE instead of CICE calculating +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! be provided at points which do not have ice. (This is could be due to +! the heat fluxes being calculated on a lower resolution grid or the +! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! conserve energy and water by passing these fluxes to the ocean. +! +! !INTERFACE: +! + subroutine sfcflux_to_ocn(nx_block, ny_block, & + tmask, aice, & + fsurfn_f, flatn_f, & + fresh, fhocn) +! +! !REVISION HISTORY: +! +! authors: A. McLaren, Met Office +! +! !USES: +! + use ice_domain_size, only: ncat +! +! !INPUT/OUTPUT PARAMETERS: + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + tmask ! land/boundary mask, thickness (T-cell) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in):: & + aice ! initial ice concentration + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(in) :: & + fsurfn_f, & ! net surface heat flux (provided as forcing) + flatn_f ! latent heat flux (provided as forcing) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout):: & + fresh , & ! fresh water flux to ocean (kg/m2/s) + fhocn ! actual ocn/ice heat flx (W/m**2) +! +!EOP +! +#ifdef CICE_IN_NEMO + integer (kind=int_kind) :: & + i, j, n ! horizontal indices + + real (kind=dbl_kind) :: & + rLsub ! 1/Lsub + + rLsub = c1 / Lsub + + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j) .and. aice(i,j) <= puny) then + fhocn(i,j) = fhocn(i,j) & + + fsurfn_f(i,j,n) + flatn_f(i,j,n) + fresh(i,j) = fresh(i,j) & + + flatn_f(i,j,n) * rLsub + endif + enddo ! i + enddo ! j + enddo ! n + +#endif + end subroutine sfcflux_to_ocn + +!======================================================================= + +! If model is not calculating surface temperature, set the surface +! flux values using values read in from forcing data or supplied via +! coupling (stored in ice_flux). +! +! If CICE is running in NEMO environment, convert fluxes from GBM values +! to per unit ice area values. If model is not running in NEMO environment, +! the forcing is supplied as per unit ice area values. +! +! authors Alison McLaren, Met Office + + subroutine set_sfcflux (nx_block, ny_block, & + n, iblk, & + icells, & + indxi, indxj, & + aicen, & + flatn, & + fsensn, & + fsurfn, & + fcondtopn) + + use ice_fileunits, only: nu_diag + use ice_flux, only: fsurfn_f, fcondtopn_f, flatn_f, fsensn_f + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + n, & ! thickness category index + iblk, & ! block index + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + ! ice state variables + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + aicen ! concentration of ice + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + flatn , & ! latent heat flux (W/m^2) + fsensn , & ! sensible heat flux (W/m^2) + fsurfn , & ! net flux to top surface, not including fcondtopn + fcondtopn ! downward cond flux at top surface (W m-2) + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij ! horizontal indices, combine i and j loops + + real (kind=dbl_kind) :: & + raicen ! 1 or 1/aicen + + logical (kind=log_kind) :: & + extreme_flag ! flag for extreme forcing values + + logical (kind=log_kind), parameter :: & + extreme_test=.false. ! test and write out extreme forcing data + + raicen = c1 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + +#ifdef CICE_IN_NEMO +!---------------------------------------------------------------------- +! Convert fluxes from GBM values to per ice area values when +! running in NEMO environment. (When in standalone mode, fluxes +! are input as per ice area.) +!---------------------------------------------------------------------- + raicen = c1 / aicen(i,j) +#endif + fsurfn(i,j) = fsurfn_f(i,j,n,iblk)*raicen + fcondtopn(i,j)= fcondtopn_f(i,j,n,iblk)*raicen + flatn(i,j) = flatn_f(i,j,n,iblk)*raicen + fsensn(i,j) = fsensn_f(i,j,n,iblk)*raicen + + enddo + +!---------------------------------------------------------------- +! Flag up any extreme fluxes +!--------------------------------------------------------------- + + if (extreme_test) then + extreme_flag = .false. + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (fcondtopn(i,j) < -100.0_dbl_kind & + .or. fcondtopn(i,j) > 20.0_dbl_kind) then + extreme_flag = .true. + endif + + if (fsurfn(i,j) < -100.0_dbl_kind & + .or. fsurfn(i,j) > 80.0_dbl_kind) then + extreme_flag = .true. + endif + + if (flatn(i,j) < -20.0_dbl_kind & + .or. flatn(i,j) > 20.0_dbl_kind) then + extreme_flag = .true. + endif + + enddo ! ij + + if (extreme_flag) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (fcondtopn(i,j) < -100.0_dbl_kind & + .or. fcondtopn(i,j) > 20.0_dbl_kind) then + write(nu_diag,*) & + 'Extreme forcing: -100 > fcondtopn > 20' + write(nu_diag,*) & + 'i,j,n,iblk,aicen,fcondtopn = ', & + i,j,n,iblk,aicen(i,j),fcondtopn(i,j) + endif + + if (fsurfn(i,j) < -100.0_dbl_kind & + .or. fsurfn(i,j) > 80.0_dbl_kind) then + write(nu_diag,*) & + 'Extreme forcing: -100 > fsurfn > 40' + write(nu_diag,*) & + 'i,j,n,iblk,aicen,fsurfn = ', & + i,j,n,iblk,aicen(i,j),fsurfn(i,j) + endif + + if (flatn(i,j) < -20.0_dbl_kind & + .or. flatn(i,j) > 20.0_dbl_kind) then + write(nu_diag,*) & + 'Extreme forcing: -20 > flatn > 20' + write(nu_diag,*) & + 'i,j,n,iblk,aicen,flatn = ', & + i,j,n,iblk,aicen(i,j),flatn(i,j) + endif + + enddo ! ij + + endif ! extreme_flag + endif ! extreme_test + + + end subroutine set_sfcflux + +!======================================================================= +!======================================================================= +!BOP +! +! !ROUTINE: top_layer_Tandk_init +! +! !DESCRIPTION: +! +! Hacked version to be called upon initialisation (when we're not +! parallelised) +! Calculate the top layer temperature and conductivity for passing +! to atmosphere model or calculating Tsfc explicitly. +! +! This routine is only called if calc_Tsfc = F and heat_capacity = T. +! +! !REVISION HISTORY: +! +! authors: Alison McLaren, Met Office +! Feb 2014: Modified by Alex West to work in CICE 5.0 +! +! !INTERFACE: + + subroutine top_layer_Tandk_init +! +! !USES: +! + use ice_blocks + use ice_constants + use ice_domain, only: nblocks + use ice_domain_size + use ice_fileunits, only: nu_diag + use ice_flux, only: Tn_top, keffn_top + use ice_itd, only: hs_min + use ice_state, only: aicen, vicen, vsnon, trcrn, nt_qice, nt_sice, nt_qsno + use ice_therm_mushy, only: liquidus_temperature_mush + use ice_therm_shared, only: calculate_ki_from_Tin, calculate_Tin_from_qin, ktherm, Tmlt, conduct + +! +! !INPUT/OUTPUT PARAMETERS: +! +!EOP +! + + integer (kind=int_kind) :: & + iblk , & ! block index + n , & ! thickness category index + i,j ! horizontal indices + + real (kind=dbl_kind) :: & + rnslyr , & ! real(nslyr) + rnilyr , & ! real(nilyr) + hs1 , & ! thickness of top snow layer + ! (so we know whether the top layer is snow or ice) + hi1 , & ! thickness of top ice layer + Tmlt1 , & ! melting temperature of top ice layer + ki ! top ice layer conductivity + + + real (kind=dbl_kind) :: & + ki_hold, & + Ti_hold ! debugging variables + + keffn_top(:,:,:,:) = c0 ! initialise + Tn_top(:,:,:,:) = c0 + rnslyr = real(nslyr,kind=dbl_kind) + rnilyr = real(nilyr,kind=dbl_kind) + + do iblk = 1, max_blocks + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + + if (aicen(i,j,n,iblk) > puny) then + + hs1 = vsnon(i,j,n,iblk)/(aicen(i,j,n,iblk)*rnslyr) + + if (hs1 > hs_min/rnslyr) then + + !snow is top layer + Tn_top(i,j,n,iblk) = (Lfresh + trcrn(i,j,nt_qsno,n,iblk) / rhos)/cp_ice + keffn_top(i,j,n,iblk) = c2 * ksno / hs1 + + else + !ice is top layer + hi1 = vicen(i,j,n,iblk)/(aicen(i,j,n,iblk)*rnilyr) + if (ktherm == 2) then + Tmlt1 = liquidus_temperature_mush(trcrn(i,j,nt_sice,n,iblk)) + else + Tmlt1 = - trcrn(i,j,nt_sice,n,iblk) * depressT + endif + + Tn_top(i,j,n,iblk) = & + calculate_Tin_from_qin(trcrn(i,j,nt_qice,n,iblk),Tmlt1) + Ti_hold = calculate_Tin_from_qin(trcrn(i,j,nt_qice,n,iblk),Tmlt1) + ki_hold = calculate_ki_from_Tin(Tn_top(i,j,n,iblk),trcrn(i,j,nt_sice,n,iblk)) + ki = calculate_ki_from_Tin(Tn_top(i,j,n,iblk),trcrn(i,j,nt_sice,n,iblk)) + keffn_top(i,j,n,iblk) = c2 * ki / hi1 + endif + + endif ! aice > puny + enddo ! i + enddo ! i + enddo ! n + enddo ! iblk + + end subroutine top_layer_Tandk_init + +!======================================================================= +!BOP +! +! !ROUTINE: top_layer_Tandk_run +! +! !DESCRIPTION: +! +! Calculate the top layer temperature and conductivity for passing +! to atmosphere model or calculating Tsfc explicitly. +! +! This routine is only called if calc_Tsfc = F and heat_capacity = T. +! +! !REVISION HISTORY: +! +! authors: Alison McLaren, Met Office +! Feb 2014: Modified by Alex West to work in CICE 5.0 +! +! !INTERFACE: + + subroutine top_layer_Tandk_run (iblk) +! +! !USES: +! + use ice_blocks + use ice_constants + use ice_domain, only: nblocks + use ice_domain_size + use ice_fileunits, only: nu_diag + use ice_flux, only: Tn_top, keffn_top + use ice_itd, only: hs_min + use ice_state, only: aicen, vicen, vsnon, trcrn, nt_qice, nt_sice, nt_qsno + use ice_therm_mushy, only: liquidus_temperature_mush + use ice_therm_shared, only: calculate_ki_from_Tin, calculate_Tin_from_qin, ktherm, Tmlt, conduct + +! +! !INPUT/OUTPUT PARAMETERS: +! +!EOP +! + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + integer (kind=int_kind) :: & + n , & ! thickness category index + i,j ! horizontal indices + + real (kind=dbl_kind) :: & + rnslyr , & ! real(nslyr) + rnilyr , & ! real(nilyr) + hs1 , & ! thickness of top snow layer + ! (so we know whether the top layer is snow or ice) + hi1 , & ! thickness of top ice layer + Tmlt1 , & ! melting temperature of top ice layer + ki ! top ice layer conductivity + + + real (kind=dbl_kind) :: & + ki_hold, & + Ti_hold ! debugging variables + + keffn_top(:,:,:,:) = c0 ! initialise + Tn_top(:,:,:,:) = c0 + rnslyr = real(nslyr,kind=dbl_kind) + rnilyr = real(nilyr,kind=dbl_kind) + + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + + if (aicen(i,j,n,iblk) > puny) then + + hs1 = vsnon(i,j,n,iblk)/(aicen(i,j,n,iblk)*rnslyr) + + if (hs1 > hs_min/rnslyr) then + + !snow is top layer + Tn_top(i,j,n,iblk) = (Lfresh + trcrn(i,j,nt_qsno,n,iblk) / rhos)/cp_ice + keffn_top(i,j,n,iblk) = c2 * ksno / hs1 + + else + !ice is top layer + hi1 = vicen(i,j,n,iblk)/(aicen(i,j,n,iblk)*rnilyr) + if (ktherm == 2) then + Tmlt1 = liquidus_temperature_mush(trcrn(i,j,nt_sice,n,iblk)) + else + Tmlt1 = - trcrn(i,j,nt_sice,n,iblk) * depressT + endif + + Tn_top(i,j,n,iblk) = & + calculate_Tin_from_qin(trcrn(i,j,nt_qice,n,iblk),Tmlt1) + Ti_hold = calculate_Tin_from_qin(trcrn(i,j,nt_qice,n,iblk),Tmlt1) + ki_hold = calculate_ki_from_Tin(Tn_top(i,j,n,iblk),trcrn(i,j,nt_sice,n,iblk)) + ki = calculate_ki_from_Tin(Tn_top(i,j,n,iblk),trcrn(i,j,nt_sice,n,iblk)) + keffn_top(i,j,n,iblk) = c2 * ki / hi1 + endif + + endif ! aice > puny + enddo ! i + enddo ! i + enddo ! n + + end subroutine top_layer_Tandk_run + +!====================================================================== + + end module ice_coupling + +!====================================================================== diff --git a/drivers/access/xxxx.f90 b/drivers/access/xxxx.f90 new file mode 100644 index 00000000..1c61776b --- /dev/null +++ b/drivers/access/xxxx.f90 @@ -0,0 +1,1446 @@ +MODULE cpl_forcing_handler +! +! It contains subroutines handling coupling fields. They are +! +! nullify_i2o_fluxes: +! tavg_i2o_fluxes: +! ............... +! ............... +! +use ice_blocks +use ice_forcing +use ice_read_write +use ice_domain_size +use ice_domain, only : distrb_info, nblocks +use ice_flux !forcing data definition (Tair, Qa, uocn, etc.) + !Tn_top, keffn_top ...(for multilayer configuration) +use ice_state, only : aice, aicen, trcr !ice concentration and tracers +use ice_state, only: uvel, vvel, vsnon, vicen +use ice_gather_scatter +!ars599: 11042014: use all ice_constants +!use ice_constants, only : gravit, Lvap, Lsub +use ice_constants +use ice_grid, only : tmask, to_ugrid +use ice_communicate, only : my_task, master_task +!use ice_ocean, only : cprho +use ice_exit, only : abort_ice + +use cpl_parameters +use cpl_netcdf_setup +use cpl_arrays_setup + +implicit none + +real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + aiiu ! ice fraction on u-grid + +contains + +!=============================================================================== +subroutine get_core_runoff(fname, vname, nrec) +! read in the remapped core runoff data (S.Marsland) which will be used to replace +! the ncep2 runoff sent from matm via coupler + +implicit none + +character*(*), intent(in) :: fname, vname +integer(kind=int_kind), intent(in) :: nrec +logical :: dbug +integer(kind=int_kind) :: ncid + +dbug = .true. + +if ( file_exist(fname) ) then + call ice_open_nc(fname, ncid) + call ice_read_global_nc(ncid, nrec, vname, gwork, dbug) + call scatter_global(core_runoff, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) + if (my_task == master_task) call ice_close_nc(ncid) +else + if (my_task == 0) write(il_out,*) '(get_core_runoff) file doesnt exist: ', fname + stop 'CICE stopped: core runoff (remapped) file not found.' +endif + +return +end subroutine get_core_runoff + +!=============================================================================== +subroutine get_time0_sstsss(fname, nmonth) + +! This routine is to be used only once at the beginning at an exp. + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nmonth +logical :: dbug +integer(kind=int_kind) :: ncid + +dbug = .true. +!dbug = .false. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) opening ncfile: ',fname + endif + call ice_open_nc(fname, ncid) + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) reading in initial SST...' + endif + call ice_read_nc(ncid, nmonth, 'TEMP', sst, dbug) + call gather_global(gwork, sst, master_task, distrb_info) + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) reading in initial SSS...' + endif + call ice_read_nc(ncid, nmonth, 'SALT', sss, dbug) + call gather_global(gwork, sss, master_task, distrb_info) + if (my_task == master_task) call ice_close_nc(ncid) +else + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) file doesnt exist: ', fname + endif + call abort_ice('CICE stopped--initial SST and SSS ncfile not found.') +endif + +return +end subroutine get_time0_sstsss + +!=============================================================================== +! temporary use ... +subroutine read_access_a2i_data(fname,nrec,istep) + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nrec,istep +logical :: dbug +integer(kind=int_kind) :: ncid + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(read_access_a2i_data) opening ncfile: ',fname + endif + call ice_open_nc(fname, ncid) + if (my_task==0) then + write(il_out,*) '(read_access_a2i_data) reading a2i forcing data...' + endif + call ice_read_nc(ncid, nrec, 'thflx_i', um_thflx, dbug) + call ice_read_nc(ncid, nrec, 'pswflx_i', um_pswflx, dbug) + call ice_read_nc(ncid, nrec, 'runoff_i', um_runoff, dbug) + call ice_read_nc(ncid, nrec, 'wme_i', um_wme, dbug) + call ice_read_nc(ncid, nrec, 'rain_i', um_rain, dbug) + call ice_read_nc(ncid, nrec, 'snow_i', um_snow, dbug) + call ice_read_nc(ncid, nrec, 'evap_i', um_evap, dbug) + call ice_read_nc(ncid, nrec, 'lhflx_i', um_lhflx, dbug) + call ice_read_nc(ncid, nrec, 'tmlt01_i', um_tmlt(:,:,1,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt02_i', um_tmlt(:,:,2,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt03_i', um_tmlt(:,:,3,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt04_i', um_tmlt(:,:,4,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt05_i', um_tmlt(:,:,5,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt01_i', um_bmlt(:,:,1,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt02_i', um_bmlt(:,:,2,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt03_i', um_bmlt(:,:,3,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt04_i', um_bmlt(:,:,4,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt05_i', um_bmlt(:,:,5,:), dbug) + call ice_read_nc(ncid, nrec, 'taux_i', um_taux, dbug) + call ice_read_nc(ncid, nrec, 'tauy_i', um_tauy, dbug) + call ice_read_nc(ncid, nrec, 'swflx_i', um_swflx, dbug) + call ice_read_nc(ncid, nrec, 'lwflx_i', um_lwflx, dbug) + call ice_read_nc(ncid, nrec, 'shflx_i', um_shflx, dbug) + call ice_read_nc(ncid, nrec, 'press_i', um_press, dbug) + call ice_read_nc(ncid, nrec, 'co2_ai', um_co2, dbug) + call ice_read_nc(ncid, nrec, 'wnd_ai', um_wnd, dbug) + if (my_task == master_task) call ice_close_nc(ncid) +else + if (my_task==0) then + write(il_out,*) '(ed_access_a2i_data file doesnt exist: ', fname + endif + call abort_ice('CICE stopped--ACCESS fields_a2i ncfile not found.') +endif + +call check_a2i_fields(istep) + +end subroutine read_access_a2i_data + +!============================================================================= +subroutine read_restart_i2a(fname, sec) !'i2a.nc', 0) + +! read ice to atm coupling fields from restart file, and send to atm module + +implicit none +character*(*), intent(in) :: fname +integer :: sec + +integer(kind=int_kind) :: ncid +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(read_restart_i2a) reading in i2a fields......' + endif + call ice_open_nc(fname, ncid) + call ice_read_nc(ncid, 1, 'icecon01', ia_aicen(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'icecon02', ia_aicen(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'icecon03', ia_aicen(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'icecon04', ia_aicen(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'icecon05', ia_aicen(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk01', ia_snown(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk02', ia_snown(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk03', ia_snown(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk04', ia_snown(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk05', ia_snown(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'icethk01', ia_thikn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'icethk02', ia_thikn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'icethk03', ia_thikn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'icethk04', ia_thikn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'icethk05', ia_thikn(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'isst_ia', ia_sst, dbug) + call ice_read_nc(ncid, 1, 'uvel_ia', ia_uvel, dbug) + call ice_read_nc(ncid, 1, 'vvel_ia', ia_vvel, dbug) + if (my_task == master_task) then + call ice_close_nc(ncid) + write(il_out,*) '(read_restart_i2a) has read in 18 i2a fields.' + endif + +else + if (my_task==0) then + write(il_out,*) 'ERROR: (read_restart_i2a) not found file *** ',fname + endif + print *, 'CICE: (read_restart_i2a) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 i2a data file.') +endif +end subroutine read_restart_i2a + + +!============================================================================= +subroutine read_restart_i2asum(fname, sec) !'i2a.nc', 0) + +! read ice to atm coupling fields from restart file, and send to atm module + +implicit none +character*(*), intent(in) :: fname +integer :: sec + +integer(kind=int_kind) :: ncid +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(read_restart_i2asum) reading in i2a fields......' + endif + call ice_open_nc(fname, ncid) + call ice_read_nc(ncid, 1, 'maicen1', maicen(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'maicen2', maicen(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'maicen3', maicen(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'maicen4', maicen(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'maicen5', maicen(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'msnown1', msnown(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'msnown2', msnown(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'msnown3', msnown(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'msnown4', msnown(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'msnown5', msnown(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn1', mthikn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn2', mthikn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn3', mthikn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn4', mthikn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn5', mthikn(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'msst', msst, dbug) + call ice_read_nc(ncid, 1, 'mssu', mssu, dbug) + call ice_read_nc(ncid, 1, 'mssv', mssv, dbug) + call ice_read_nc(ncid, 1, 'muvel', muvel, dbug) + call ice_read_nc(ncid, 1, 'mvvel', mvvel, dbug) + call ice_read_nc(ncid, 1, 'maiu', maiu, dbug) + if (my_task == master_task) then + call ice_close_nc(ncid) + write(il_out,*) '(read_restart_i2asum) has read in 21 i2a fields.' + endif + +else + if (my_task==0) then + write(il_out,*) 'ERROR: (read_restart_i2asum) not found file *** ',fname + endif + print *, 'CICE: (read_restart_i2asum) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 i2a data file.') +endif +end subroutine read_restart_i2asum + +!============================================================================== +subroutine put_restart_i2a(fname, sec) +! call this subroutine after called get_restart_oi2 +! it uses ocn_sst etc to calculate average ocn fields which will be used to send +!to atm at the 1st step of continue run, because the ocn_sst cannot be sent to ice at the end of last run. +! average ice fields (done at end of last run) are ready by calling read_restart_i2asum() +! +implicit none + +character*(*), intent(in) :: fname +integer :: sec + + if ( file_exist('i2a.nc') ) then + write(il_out,*)' calling read_restart_i2a at time_sec = ',sec + call read_restart_i2a('i2a.nc', sec) + endif + if ( file_exist('i2asum.nc') ) then + write(il_out,*)' calling read_restart_i2asum at time_sec = ',sec + call read_restart_i2asum('i2asum.nc', sec) + + write(il_out,*)' calling ave_ocn_fields_4_i2a at time_sec = ',sec + call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling + write(il_out,*) ' calling get_i2a_fields at time_sec =', sec + call get_i2a_fields + endif + +end subroutine put_restart_i2a + +!=============================================================================== +subroutine get_restart_o2i(fname) + +! To be called at beginning of each run trunk to read in restart o2i fields + +implicit none + +character*(*), intent(in) :: fname + +integer(kind=int_kind) :: ncid_o2i +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_restart_o2i) reading in o2i fields......' + endif + call ice_open_nc(fname, ncid_o2i) + call ice_read_nc(ncid_o2i, 1, 'sst_i', ocn_sst, dbug) + call ice_read_nc(ncid_o2i, 1, 'sss_i', ocn_sss, dbug) + call ice_read_nc(ncid_o2i, 1, 'ssu_i', ocn_ssu, dbug) + call ice_read_nc(ncid_o2i, 1, 'ssv_i', ocn_ssv, dbug) + call ice_read_nc(ncid_o2i, 1, 'sslx_i', ocn_sslx, dbug) + call ice_read_nc(ncid_o2i, 1, 'ssly_i', ocn_ssly, dbug) + call ice_read_nc(ncid_o2i, 1, 'pfmice_i', ocn_pfmice, dbug) + call ice_read_nc(ncid_o2i, 1, 'co2_oi', ocn_co2, dbug) + call ice_read_nc(ncid_o2i, 1, 'co2fx_oi', ocn_co2fx, dbug) + if (my_task == master_task) then + call ice_close_nc(ncid_o2i) + write(il_out,*) '(get_restart_o2i) has read in 7 o2i fields.' + endif +else + if (my_task==0) then + write(il_out,*) 'ERROR: (get_restart_o2i) not found file *** ',fname + endif + print *, 'CICE: (get_restart_o2i) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 o2i data file.') +endif + +return +end subroutine get_restart_o2i + +!=============================================================================== +subroutine get_restart_mice(fname) + +! Called at beginning of the run to get 'last' IO cpl int T-M ice variables +! which are used together with the first received i2a fields to obtain the first +! i2o fields sent to ocn immediately as the 1st io cpl int forcing there. + +implicit none + +character*(*), intent(in) :: fname + +integer(kind=int_kind) :: ncid_o2i +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_restart_mice) reading in mice variables......' + endif + call ice_open_nc(fname, ncid_o2i) + call ice_read_nc(ncid_o2i, 1, 'maice', maice, dbug) + call ice_read_nc(ncid_o2i, 1, 'mstrocnxT', mstrocnxT, dbug) + call ice_read_nc(ncid_o2i, 1, 'mstrocnyT', mstrocnyT, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfresh', mfresh, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfsalt', mfsalt, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfhocn', mfhocn, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfswthru', mfswthru, dbug) + call ice_read_nc(ncid_o2i, 1, 'msicemass', msicemass, dbug) + + if (my_task == master_task) then + call ice_close_nc(ncid_o2i) + write(il_out,*) '(get_restart_mice) has read in 8 T-M variables.' + endif +else + if (my_task==0) then + write(il_out,*) 'ERROR: (get_restart_mice) not found file *** ',fname + endif + print *, 'CICE: (get_restart_mice) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 mice data file.') +endif + +return +end subroutine get_restart_mice + +!=============================================================================== +subroutine get_restart_i2o(fname) + +! To be called at beginning of each run trunk to read in restart i2o fields + +implicit none + +character*(*), intent(in) :: fname + +integer(kind=int_kind) :: ncid_i2o, jf, jfs +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_time0_i2o_fields) reading in i2o fields......' + endif + call ice_open_nc(fname, ncid_i2o) + do jf = nsend_i2a + 1, jpfldout + call ice_read_nc(ncid_i2o, 1, cl_writ(jf) , vwork, dbug) + select case(trim(cl_writ(jf))) + case ('strsu_io'); io_strsu = vwork + case ('strsv_io'); io_strsv = vwork + case ('rain_io'); io_rain = vwork + case ('snow_io'); io_snow = vwork + case ('stflx_io'); io_stflx = vwork + case ('htflx_io'); io_htflx = vwork + case ('swflx_io'); io_swflx = vwork + case ('qflux_io'); io_qflux = vwork + case ('shflx_io'); io_shflx = vwork + case ('lwflx_io'); io_lwflx = vwork + case ('runof_io'); io_runof = vwork + case ('press_io'); io_press = vwork + case ('aice_io'); io_aice = vwork + case ('melt_io'); io_melt = vwork + case ('form_io'); io_form = vwork + case ('co2_i1'); io_co2 = vwork + case ('wnd_i1'); io_wnd = vwork + end select + enddo + if (my_task == master_task) then + call ice_close_nc(ncid_i2o) + write(il_out,*) '(get_time0_i2o_fields) has read in 11 i2o fields.' + endif +else + if (my_task==0) then + write(il_out,*) 'ERROR: (get_time0_i2o_fields) not found file *** ',fname + endif + print *, 'CICE: (get_time0_i2o_fields_old) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 i2o data file.') +endif + +return +end subroutine get_restart_i2o + +!=============================================================================== +subroutine set_sbc_ice +! +! Set coupling fields (in units of GMB, from UM and MOM4) needed for CICE +! +! Adapted from "subroutine cice_sbc_in" of HadGem3 Nemo "MODULE sbcice_cice" +! for the "nsbc = 5" case. +! +! It should be called after calling "from_atm" and "from_ocn". +!------------------------------------------------------------------------------- + +implicit none + +integer :: i,j,k,cat + +!*** Fields from UM (all on T cell center): + +!(1) windstress taux: +strax = um_taux * maice !*tmask + +!(2) windstress tauy: +stray = um_tauy * maice !*tmask + +!(3) surface downward latent heat flux (==> multi-category) +do j = 1, ny_block +do i = 1, nx_block + do k = 1, nblocks + if (maice(i,j,k)==0.0) then + do cat = 1, ncat + flatn_f(i,j,cat,k) = 0.0 + enddo + ! This will then be conserved in CICE (done in sfcflux_to_ocn) + flatn_f(i,j,1,k) = um_lhflx(i,j,k) + else + do cat = 1, ncat + !!!B: flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * maicen(i,j,cat,k)/maice(i,j,k) + flatn_f(i,j,cat,k) = um_iceevp(i,j,cat,k) * Lsub !!!* maicen(i,j,cat,k)/maice(i,j,k) + enddo + endif + enddo +enddo +enddo + +! GBM conductive flux through ice: +!(4-8) top melting; (9-13) bottom belting ==> surface heatflux +do cat = 1, ncat + fcondtopn_f(:,:,cat,:) = um_bmlt(:,:,cat,:) + fsurfn_f (:,:,cat,:) = um_tmlt(:,:,cat,:) + um_bmlt(:,:,cat,:) +enddo + +!(14) snowfall +fsnow = max(maice * um_snow, 0.0) + +!(15) rainfall +frain = max(maice * um_rain, 0.0) + +!*** Fields from MOM4 (SSU/V and sslx/y are on U points): + +!(1) freezing/melting potential +frzmlt = ocn_pfmice +if (limit_icemelt) then + frzmlt(:,:,:) = max(frzmlt(:,:,:), meltlimit) +endif + +!(2) SST +!make sure SST is 'all right' K==>C +sst = ocn_sst +if (maxval(sst).gt.200) then + sst = sst -273.15 +endif + +!(3) SSS +sss = ocn_sss + +!(4) SSU +uocn = ocn_ssu + +!(5) SSV +vocn = ocn_ssv + +!(6) surface slope sslx +ss_tltx = ocn_sslx + +!(7) surface slope ssly +ss_tlty = ocn_ssly + +!B: =========== may use different formula for Tf =============== +!(as per S.O.) make sure Tf if properly initialized +Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) +!=============================================================== + +end subroutine set_sbc_ice + +!=============================================================================== +subroutine get_sbc_ice +! +! ** Purpose: set GBM coupling fields (from UM and MOM4) needed for CICE +! +! Adapted from "subroutine cice_sbc_in" of HadGem3 Nemo "MODULE sbcice_cice" +! for the "nsbc = 5" case. +! +! It should be called after calling "from_atm" and "from_ocn". +! +! *** This routine is used/called within ice time loop (itap) +! *** in case "set_sbc_ice" call (outside the itap loop) fails +! *** which is unfortunately the case the moment (Jan2010) ! +!------------------------------------------------------------------------------- + +implicit none + +integer :: i,j,k,cat + +! Fields from UM (all on T cell center): + +!(1) windstress taux: +strax = um_taux * aice !*tmask ? + +!(2) windstress tauy: +stray = um_tauy * aice !*tmask ? + +!(3) surface downward latent heat flux (==> multi_category) +!BX: where is flatn_f "used" in CICE? +do j = 1, ny_block +do i = 1, nx_block + do k = 1, nblocks + if (aice(i,j,k)==0.0) then + do cat = 1, ncat + flatn_f(i,j,cat,k) = 0.0 + enddo + ! This will then be conserved in CICE (done in sfcflux_to_ocn) + flatn_f(i,j,1,k) = um_lhflx(i,j,k) + else + do cat = 1, ncat + !!!BX: flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * aicen(i,j,cat,k)/aice(i,j,k) + !!! Double check "Lsub" used here !!! + flatn_f(i,j,cat,k) = um_iceevp(i,j,cat,k) * Lsub !!!*aicen(i,j,cat,k)/aice(i,j,k) + enddo + endif + enddo +enddo +enddo + +! GBM conductive flux through ice: +!(4-8) top melting; (9-13) bottom belting ==> surface heatflux +do cat = 1, ncat + fcondtopn_f(:,:,cat,:) = um_bmlt(:,:,cat,:) + fsurfn_f (:,:,cat,:) = um_tmlt(:,:,cat,:) + um_bmlt(:,:,cat,:) +enddo + +!!! 20130419: Martin Dix's investigation suggests that frain and fsnow should NOT be scaled by +!!! aice here. This scaling would caused double-scaling with "fresh" calculation.. +!(14) snowfall +!!!fsnow = max(aice * um_snow,0.0) +!fsnow = max(um_snow,0.0) !no more scaling as per M.D.! +!(15) rainfall +!!!frain = max(aice * um_rain,0.0) +!frain = max(um_rain,0.0) !no more scaling as per M.D.! +!!! 20130420: I dug deeper and checked all the associated steps of "fresh" calculation, found +!!! the original weighting is CORRECT! so back to *aice: +fsnow = max(aice * um_snow,0.0) +frain = max(aice * um_rain,0.0) +!!!------------------------------------------------------------------------------------------ + +! Fields from MOM4 (SSU/V and sslx/y are on U points): + +!(1) freezing/melting potential +frzmlt = ocn_pfmice +!20080312: set maximum melting htflux allowed from ocn, (eg, -200 W/m^2) +! the artificial "meltlimit = -200 " is read in from input_ice.nml +!20090320: set option 'limit_icemelt' in case no limit needed if cice behaves! +if (limit_icemelt) then + frzmlt(:,:,:) = max(frzmlt(:,:,:), meltlimit) +endif + +!(2) SST +sst = ocn_sst -273.15 + +!(3) SSS +sss = ocn_sss + +!(4) SSU +uocn = ocn_ssu + +!(5) SSV +vocn = ocn_ssv +!(6) surface slope sslx + +ss_tltx = ocn_sslx + +!(7) surface slope ssly +ss_tlty = ocn_ssly + +! * (as per S. O'Farrel) make sure Tf if properly initialized +sss = ocn_sss +Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) + +end subroutine get_sbc_ice + +!=============================================================================== +subroutine save_restart_o2i(fname, nstep) + +! output the last o2i forcing data received in cice by the end of the run, +! to be read in at the beginning of next run by cice + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +if (my_task == 0) then + call create_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) + call write_nc_1Dtime(real(nstep), 1, 'time', ncid) +endif + +do jf = nrecv_a2i + 1, jpfldin + + select case (trim(cl_read(jf))) + case('sst_i'); vwork = ocn_sst + case('sss_i'); vwork = ocn_sss + case('ssu_i'); vwork = ocn_ssu + case('ssv_i'); vwork = ocn_ssv + case('sslx_i'); vwork = ocn_sslx + case('ssly_i'); vwork = ocn_ssly + case('pfmice_i'); vwork = ocn_pfmice + case('co2_oi'); vwork = ocn_co2 + case('co2fx_oi'); vwork = ocn_co2fx + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + if (my_task == 0) then + call write_nc2D(ncid, cl_read(jf), gwork, 2, il_im, il_jm, 1, ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +return +end subroutine save_restart_o2i + +!============================================================================== +subroutine save_restart_i2asum(fname, nstep) +! output the last i2a forcing data in cice at the end of the run, +! to be read in at the beginning of next run by cice and sent to atm + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +integer(kind=int_kind), parameter :: sumfldin = 21 +character(len=8), dimension(sumfldin) :: sumfld + +sumfld(1)='msst' +sumfld(2)='mssu' +sumfld(3)='mssv' +sumfld(4)='muvel' +sumfld(5)='mvvel' +sumfld(6)='maiu' +sumfld(7)='maicen1' +sumfld(8)='maicen2' +sumfld(9)='maicen3' +sumfld(10)='maicen4' +sumfld(11)='maicen5' +sumfld(12)='mthikn1' +sumfld(13)='mthikn2' +sumfld(14)='mthikn3' +sumfld(15)='mthikn4' +sumfld(16)='mthikn5' +sumfld(17)='msnown1' +sumfld(18)='msnown2' +sumfld(19)='msnown3' +sumfld(20)='msnown4' +sumfld(21)='msnown5' + +if (my_task == 0) then + call create_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) +endif + +do jf = 1, sumfldin + select case (trim(sumfld(jf))) + case('msst'); vwork = msst + case('mssu'); vwork = mssu + case('mssv'); vwork = mssv + case('muvel'); vwork = muvel + case('mvvel'); vwork = mvvel + case('maiu'); vwork = maiu + case('maicen1'); vwork = maicen(:,:,1,:) + case('maicen2'); vwork =maicen(:,:,2,:) + case('maicen3'); vwork =maicen(:,:,3,:) + case('maicen4'); vwork =maicen(:,:,4,:) + case('maicen5'); vwork =maicen(:,:,5,:) + case('mthikn1'); vwork =mthikn(:,:,1,:) + case('mthikn2'); vwork =mthikn(:,:,2,:) + case('mthikn3'); vwork =mthikn(:,:,3,:) + case('mthikn4'); vwork =mthikn(:,:,4,:) + case('mthikn5'); vwork =mthikn(:,:,5,:) + case('msnown1'); vwork =msnown(:,:,1,:) + case('msnown2'); vwork =msnown(:,:,2,:) + case('msnown3'); vwork =msnown(:,:,3,:) + case('msnown4'); vwork =msnown(:,:,4,:) + case('msnown5'); vwork =msnown(:,:,5,:) + end select + call gather_global(gwork, vwork, master_task, distrb_info) + if (my_task == 0) then + call write_nc2D(ncid, sumfld(jf), gwork, 2, il_im, il_jm, 1, ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +end subroutine save_restart_i2asum + +!=============================================================================== +subroutine save_restart_mice(fname, nstep) + +! output ice variable averaged over the last IO cpl int of this run, +! cice reads in these vars at the beginning of next run, uses them with the first +! received a2i fields to obtain the first i2o fields to be sent to ocn + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +if (my_task == 0) then + call create_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) + call write_nc_1Dtime(real(nstep), 1, 'time', ncid) +endif + +vwork = maice +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maice', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mstrocnxT +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mstrocnxT', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mstrocnyT +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mstrocnyT', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfresh +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfresh', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfsalt +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfsalt', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfhocn +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfhocn', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfswthru +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfswthru', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = msicemass +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'msicemass', gwork, 2, il_im, il_jm, 1, ilout=il_out) + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +return +end subroutine save_restart_mice + +!=============================================================================== +subroutine get_i2a_fields + +implicit none + +! all fields (except for vector) obtained here are all on T cell center + +!(1) ocean surface temperature +ia_sst(:,:,:) = msst(:,:,:) + +!(2-3) ice/ocn combined surface velocity +!CH: should use "aiu", not aice! +ia_uvel(:,:,:) = mssu(:,:,:) * (1. - maiu(:,:,:)) + muvel(:,:,:) * maiu(:,:,:) +ia_vvel(:,:,:) = mssv(:,:,:) * (1. - maiu(:,:,:)) + mvvel(:,:,:) * maiu(:,:,:) + +!(4-8) ice concentration +ia_aicen(:,:,:,:) = maicen(:,:,:,:) + +!(9-13) ice thickness +ia_thikn(:,:,:,:) = mthikn(:,:,:,:) + +!(14-18) snow thickness +ia_snown(:,:,:,:) = msnown(:,:,:,:) + +!(19-20) co2 flux stuff +ia_co2 = mco2 +ia_co2fx = mco2fx + +!(21) ocean surface freezing temperature +ia_sstfz(:,:,:) = msstfz(:,:,:) + +!(22-26) first order ice concentration +ia_foifr(:,:,:,:) = mfoifr(:,:,:,:) + +!(27-31) ice top layer temperature +ia_itopt(:,:,:,:) = mitopt(:,:,:,:) + +!(32-36) ice top layer effective conductivity +ia_itopk(:,:,:,:) = mitopk(:,:,:,:) + +!(37-41) ice melt pond concentration +ia_pndfn(:,:,:,:) = mpndfn(:,:,:,:) + +!(42-46) ice melt pond thickness +ia_pndtn(:,:,:,:) = mpndtn(:,:,:,:) + +return +end subroutine get_i2a_fields + +!=============================================================================== +subroutine get_i2o_fields + +! All fluxes should be in GBM units before passing into coupler. +! e.g., io_htflx(:,:,:) = fhocn_gbm(:,:,:) + +implicit none + +real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: pice + +! Fields obtained here are all at T cell center. before being sent to MOM4, vector +! (Taux, Tauy) should be shifted on to U point as required +!------------------------------------------------------------------------------- + +!(1-2) air/ice - sea stress TAUX/TAUY +! Caution: in nemo, "strocnx/y" are NOT weighted by aice here, 'cos strocnx/y +! have already been 'weighted' using aice (when calculated in "evp_finish". +! But, this weight has been removed in strocnx/yT (see "evp_finish"), therfore +! we need put it on again here. +io_strsu = um_taux * (1. - maice) - mstrocnxT * maice +io_strsv = um_tauy * (1. - maice) - mstrocnyT * maice + +!(3) freshwater flux to ocean: rainfall (+ ice melting water flux ?) +io_rain = um_rain * (1. - maice) +!!CH: confirmed: +!!if (ice_fwflux) io_rain = io_rain + mfresh !always .t. +!!NOTE mfresh is now splitted into melt (14) and form (15) and passed into ocn seperately. + +!(4) freshwater flux to ocean: snowfall +io_snow = um_snow * (1. - maice) + +!(5) salt flux to ocean +io_stflx = mfsalt + +!(6) ice/snow melting heatflux into ocean +io_htflx = mfhocn + +!(7) short wave radiation +!(CH: the (1-aice) weight should not be here 'cos all fluxes passed in from +! UM have already been aice-weighted when they are calculated there!!!) +!io_swflx = um_swflx * (1. - maice) + mfswthru +io_swflx = um_swflx + mfswthru +!!!20100616: test for more swflx +!!!io_swflx = 1.064 * um_swflx + mfswthru + +!(8) latent heat flux (positive out of ocean as required by MOM4) +io_qflux = um_evap * Lvap !Note it's already weighted in UM for open sea. +!20101210: NOT sure about the ice weghting in UM, 'cos the ice area does see +! non-zero (positive) evap. +!if (imsk_evap) then +! io_qflux = um_evap * Lvap * (1. - maice) +!endif + +!(9) sensible heat flux (positive out of ocean as required by MOM4) +!io_shflx = um_shflx * (1. - maice) +io_shflx = um_shflx + +!(10) net long wave radiation positive down +!io_lwflx = um_lwflx * (1. - maice) +io_lwflx = um_lwflx + +!(11) runoff (!check the incoming field! pattern? remapping ok? conserved? ...) +io_runof = um_runoff +! CHECK with SM about the annual cycle of core-runoff! (we only have annual mean) + +!(12) pressure +pice = gravit * msicemass +!---------------------------------------------------------------------------- +!sicemass = rho_ice x hi + rho_snow x hs (in m) +! +! Should we set limit to the ovelying ice pressure as suggested in MOM4 code? +!(see ocean_sbc.F90) if yes, we may use following +!pice(i,j) = min(pice(i,j), gravit*rhow*max_ice_thickness) +! (note rhow = 1026 kg/m^3 here, but mom4 instead uses rho0 = 1035 kg/m^3) +! No, let mom4 handle it (see ocean_sbc.F90) +! +!as GFDL SIS, we use patm 'anormaly' and then add in the ice/snow pressure! +!29/11/2007 +!---------------------------------------------------------------------------- +if (ice_pressure_on) then + io_press = pice * maice +endif +if (air_pressure_on) then + !as GFDL SIS, we use patm anormaly, i.e., taking off 1.e5 Pa ! + io_press(:,:,:) = io_press(:,:,:) + um_press(:,:,:) - 1.0e5 +endif +!(13) ice concentration +io_aice = maice +!(14) ice melt fwflux +io_melt = max(0.0,mfresh(:,:,:)) +!(15) ice form fwflux +io_form = min(0.0,mfresh(:,:,:)) + +io_co2 = um_co2 +io_wnd = um_wnd + +return +end subroutine get_i2o_fields + +!=============================================================================== +subroutine initialize_mice_fields_4_i2o + +implicit none + +maice = 0. +mstrocnxT = 0. +mstrocnyT = 0. +mfresh = 0. +mfsalt = 0. +mfhocn = 0. +mfswthru = 0. +msicemass = 0. + +return +end subroutine initialize_mice_fields_4_i2o + +!=============================================================================== +subroutine initialize_mice_fields_4_i2a + +implicit none + +muvel = 0. +mvvel = 0. + +maiu = 0. +maicen = 0. +mthikn = 0. +msnown = 0. + +mfoifr = 0. +mitopt = 0. +mitopk = 0. +mpndfn = 0. +mpndtn = 0. + +return +end subroutine initialize_mice_fields_4_i2a + +!=============================================================================== +subroutine initialize_mocn_fields_4_i2a + +implicit none + +msst = 0. +mssu = 0. +mssv = 0. +mco2 = 0. +mco2fx = 0. +msstfz = 0. + +return +end subroutine initialize_mocn_fields_4_i2a + +!=============================================================================== +subroutine time_average_ocn_fields_4_i2a + +implicit none + +msst(:,:,:) = msst(:,:,:) + ocn_sst(:,:,:) * coef_cpl +mssu(:,:,:) = mssu(:,:,:) + ocn_ssu(:,:,:) * coef_cpl +mssv(:,:,:) = mssv(:,:,:) + ocn_ssv(:,:,:) * coef_cpl +mco2(:,:,:) = mco2(:,:,:) + ocn_co2(:,:,:) * coef_cpl +mco2fx(:,:,:) = mco2fx(:,:,:) + ocn_co2fx(:,:,:) * coef_cpl +msstfz(:,:,:) = msstfz(:,:,:) + Tf(:,:,:) * coef_cpl + +return +end subroutine time_average_ocn_fields_4_i2a + +!=============================================================================== +!dhb599-20131002: resuming the old 'approach' (used before 20130420) which sets +!do_scale_fluxes = .t. and thus the "flatn_f/Lsub" terms is NOT used as part of +!'fresh' and passed into ocean...since the 'evaporation out of ice surface' is +!going into atmosphere, not supposed to change the ocean water volume! +!------------------------------------------------------------------------------- + +subroutine time_average_fields_4_i2o + +implicit none + +maice(:,:,:) = maice(:,:,:) + aice(:,:,:) * coef_io +mstrocnxT(:,:,:) = mstrocnxT(:,:,:) + strocnxT(:,:,:) * coef_io +mstrocnyT(:,:,:) = mstrocnyT(:,:,:) + strocnyT(:,:,:) * coef_io +!20130420: possible bug due to missing term "flatn_f/Lsub" in the last update for fresh +! use scale_fluxes=.f. to avoid flux scaling by /aice +! meaning fluxes are all grid-box-mean by the end of ice_step. +!mfresh(:,:,:) = mfresh(:,:,:) + fresh_gbm(:,:,:) * coef_io +!mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt_gbm(:,:,:) * coef_io +!mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn_gbm(:,:,:) * coef_io +!mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru_gbm(:,:,:) * coef_io +mfresh(:,:,:) = mfresh(:,:,:) + fresh(:,:,:) * coef_io +mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt(:,:,:) * coef_io +mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn(:,:,:) * coef_io +mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru(:,:,:) * coef_io +!--------------------------------------------------------------------------------------- +!--------------------------------------------------------------------------------------- +msicemass(:,:,:) = msicemass(:,:,:) + sicemass(:,:,:) * coef_io + +return +end subroutine time_average_fields_4_i2o + +!=============================================================================== +subroutine time_average_fields_4_i2o_20130420 + +implicit none + +maice(:,:,:) = maice(:,:,:) + aice(:,:,:) * coef_io +mstrocnxT(:,:,:) = mstrocnxT(:,:,:) + strocnxT(:,:,:) * coef_io +mstrocnyT(:,:,:) = mstrocnyT(:,:,:) + strocnyT(:,:,:) * coef_io +!20130420: possible bug due to missing term "flatn_f/Lsub" in the last update +!for fresh +! use scale_fluxes=.f. to avoid flux scaling by /aice +! meaning fluxes are all grid-box-mean by the end of ice_step. +!mfresh(:,:,:) = mfresh(:,:,:) + fresh_gbm(:,:,:) * coef_io +!mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt_gbm(:,:,:) * coef_io +!mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn_gbm(:,:,:) * coef_io +!mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru_gbm(:,:,:) * coef_io +mfresh(:,:,:) = mfresh(:,:,:) + fresh(:,:,:) * coef_io +mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt(:,:,:) * coef_io +mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn(:,:,:) * coef_io +mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru(:,:,:) * coef_io +!--------------------------------------------------------------------------------------- +msicemass(:,:,:) = msicemass(:,:,:) + sicemass(:,:,:) * coef_io + +return +end subroutine time_average_fields_4_i2o_20130420 + +!=============================================================================== +subroutine time_average_fields_4_i2a + +implicit none + +! ice fields: +muvel(:,:,:) = muvel(:,:,:) + uvel(:,:,:) * coef_ia +mvvel(:,:,:) = mvvel(:,:,:) + vvel(:,:,:) * coef_ia +maicen(:,:,:,:) = maicen(:,:,:,:) + aicen(:,:,:,:) * coef_ia !T cat. ice concentration +mthikn(:,:,:,:) = mthikn(:,:,:,:) + vicen(:,:,:,:) * coef_ia !T cat. ice thickness +msnown(:,:,:,:) = msnown(:,:,:,:) + vsnon(:,:,:,:) * coef_ia !T cat. snow thickness + +call to_ugrid(aice, aiiu) +maiu(:,:,:) = maiu(:,:,:) + aiiu(:,:,:) * coef_ia !U cell ice concentraction + +mfoifr(:,:,:,:) = mfoifr(:,:,:,:) + aicen((:,:,:,:)* coef_ia +mitopt(:,:,:,:) = mitopt(:,:,:,:) + Tn_top(:,:,:,:) * coef_ia +mitopk(:,:,:,:) = mitopk(:,:,:,:) + keffn_top(:,:,:,:) * coef_ia +mpndfn(:,:,:,:) = mpndfn(:,:,:,:) + apeffn(:,:,:,:) * coef_ia +mpndtn(:,:,:,:) = mpndtn(:,:,:,:) + trcrn(:,:,:,:) * coef_ia + + +!ocn fields: +!must be done after calling from_ocn so as to get the most recently updated ocn fields, +!therefore a separate call to "time_average_ocn_fields_4_i2a" is done for this purpose. + +return +end subroutine time_average_fields_4_i2a + +!=============================================================================== +subroutine check_i2a_fields(nstep) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ilout, ll, jf +integer(kind=int_kind), save :: ncid,currstep +data currstep/0/ + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist('fields_i2a_in_ice.nc') ) then + call create_ncfile('fields_i2a_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file fields_i2a_in_ice.nc at nstep = ', nstep + call ncheck( nf_open('fields_i2a_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = 1, nsend_i2a + + select case(trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,5,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + case('uvel_ia'); vwork = ia_uvel + case('vvel_ia'); vwork = ia_vvel + case('co2_i2'); vwork = ia_co2 + case('co2fx_i2'); vwork = ia_co2fx + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0 ) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_i2a_fields + +!============================================================================ +subroutine check_a2i_fields(nstep) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +character*80 :: ncfile='fields_a2i_in_ice_2.nc' +integer(kind=int_kind) :: ncid, currstep, ll, ilout, jf +data currstep/0/ +save currstep + +currstep=currstep+1 + +if ( my_task == 0 .and. .not. file_exist(trim(ncfile)) ) then + call create_ncfile(trim(ncfile),ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file ', trim(ncfile), ' at nstep = ', nstep + call ncheck( nf_open(trim(ncfile),nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = 1, nrecv_a2i + + select case (trim(cl_read(jf))) + case ('thflx_i'); vwork = um_thflx + case ('pswflx_i'); vwork = um_pswflx + case ('runoff_i'); vwork = um_runoff + case ('wme_i'); vwork = um_wme + case ('rain_i'); vwork = um_rain + case ('snow_i'); vwork = um_snow + case ('evap_i'); vwork = um_evap + case ('lhflx_i'); vwork = um_lhflx + case ('tmlt01'); vwork(:,:,:) = um_tmlt(:,:,1,:) + case ('tmlt02'); vwork(:,:,:) = um_tmlt(:,:,2,:) + case ('tmlt03'); vwork(:,:,:) = um_tmlt(:,:,3,:) + case ('tmlt04'); vwork(:,:,:) = um_tmlt(:,:,4,:) + case ('tmlt05'); vwork(:,:,:) = um_tmlt(:,:,5,:) + case ('bmlt01'); vwork(:,:,:) = um_tmlt(:,:,1,:) + case ('bmlt02'); vwork(:,:,:) = um_tmlt(:,:,2,:) + case ('bmlt03'); vwork(:,:,:) = um_tmlt(:,:,3,:) + case ('bmlt04'); vwork(:,:,:) = um_tmlt(:,:,4,:) + case ('bmlt05'); vwork(:,:,:) = um_tmlt(:,:,5,:) + case ('taux_i'); vwork = um_taux + case ('tauy_i'); vwork = um_tauy + case ('swflx_i'); vwork = um_swflx + case ('lwflx_i'); vwork = um_lwflx + case ('shflx_i'); vwork = um_shflx + case ('press_i'); vwork = um_press + case ('co2_ai'); vwork = um_co2 + case ('wnd_ai'); vwork = um_wnd + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_a2i_fields + +!============================================================================ +subroutine check_i2o_fields(nstep, scale) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +real, intent(in) :: scale +integer(kind=int_kind) :: ncid, currstep, ll, ilout, jf +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist('fields_i2o_in_ice.nc') ) then + call create_ncfile('fields_i2o_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file fields_i2o_in_ice.nc at nstep = ', nstep + call ncheck( nf_open('fields_i2o_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = nsend_i2a + 1, jpfldout + + select case(trim(cl_writ(jf))) + case('strsu_io') + vwork = scale * io_strsu + case('strsv_io') + vwork = scale * io_strsv + case('rain_io') + vwork = scale * io_rain + case('snow_io') + vwork = scale * io_snow + case('stflx_io') + vwork = scale * io_stflx + case('htflx_io') + vwork = scale * io_htflx + case('swflx_io') + vwork = scale * io_swflx + case('qflux_io') + vwork = scale * io_qflux + case('shflx_io') + vwork = scale * io_shflx + case('lwflx_io') + vwork = scale * io_lwflx + case('runof_io') + vwork = scale * io_runof + case('press_io') + vwork = scale * io_press + case('aice_io') + vwork = scale * io_aice + case('form_io') + vwork = scale * io_form + case('melt_io') + vwork = scale * io_melt + case('co2_i1') + vwork = scale * io_co2 + case('wnd_i1') + vwork = scale * io_wnd + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0 ) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_i2o_fields + +!============================================================================ +subroutine check_o2i_fields(nstep) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid, currstep, ilout, ll, jf +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist('fields_o2i_in_ice.nc') ) then + call create_ncfile('fields_o2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file fields_o2i_in_ice.nc at nstep = ', nstep + call ncheck( nf_open('fields_o2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = nrecv_a2i + 1, jpfldin + + select case (trim(cl_read(jf))) + case ('sst_i'); vwork = ocn_sst + case ('sss_i'); vwork = ocn_sss + case ('ssu_i'); vwork = ocn_ssu + case ('ssv_i'); vwork = ocn_ssv + case ('sslx_i'); vwork = ocn_sslx + case ('ssly_i'); vwork = ocn_ssly + case ('pfmice_i'); vwork = ocn_pfmice + case ('co2_oi'); vwork = ocn_co2 + case ('co2fx_oi'); vwork = ocn_co2fx + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_o2i_fields + +!============================================================================ +subroutine check_frzmlt_sst(ncfilenm) + +!this is (mainly) used to check cice solo run frzmlt and sst ! +! (for comparison against a coupled run forcing into cice) + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, sst, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sst', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, frzmlt, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'frzmlt', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_frzmlt_sst + +!============================================================================ +subroutine check_sstsss(ncfilenm) + +!this is used to check cice sst/sss : temporary use (20091019) + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, sst, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sst', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, sss, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sss', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_sstsss + + +!============================================================================ +function file_exist (file_name) +! +character(len=*), intent(in) :: file_name +logical file_exist + +file_exist = .false. +if (len_trim(file_name) == 0) return +if (file_name(1:1) == ' ') return + +inquire (file=trim(file_name), exist=file_exist) + +end function file_exist + +!============================================================================ + +end module cpl_forcing_handler diff --git a/io_netcdf/ice_history_write.F90 b/io_netcdf/ice_history_write.F90 new file mode 100644 index 00000000..1174afb3 --- /dev/null +++ b/io_netcdf/ice_history_write.F90 @@ -0,0 +1,1308 @@ +! SVN:$Id: ice_history_write.F90 567 2013-01-07 02:57:36Z eclare $ +!======================================================================= +! +! Writes history in netCDF format +! +! authors Tony Craig and Bruce Briegleb, NCAR +! Elizabeth C. Hunke and William H. Lipscomb, LANL +! C. M. Bitz, UW +! +! 2004 WHL: Block structure added +! 2006 ECH: Accepted some CCSM code into mainstream CICE +! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. +! Added histfreq_n and histfreq='h' options, removed histfreq='w' +! Converted to free source form (F90) +! Added option for binary output instead of netCDF +! 2009 D Bailey and ECH: Generalized for multiple frequency output +! 2010 Alison McLaren and ECH: Added 3D capability +! 2013 ECH split from ice_history.F90 + + module ice_history_write + + implicit none + private + public :: ice_write_hist + save + +!======================================================================= + + contains + +!======================================================================= +! +! write average ice quantities or snapshots +! +! author: Elizabeth C. Hunke, LANL + + subroutine ice_write_hist (ns) + + use ice_kinds_mod +#ifdef ncdf + use ice_blocks, only: nx_block, ny_block + use ice_broadcast, only: broadcast_scalar + use ice_calendar, only: time, sec, idate, idate0, write_ic, & + histfreq, dayyr, days_per_year, use_leap_years + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, c360, secday, spval, rad_to_deg + use ice_domain, only: distrb_info + use ice_domain_size, only: nx_global, ny_global, max_nstrm, max_blocks + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: gather_global + use ice_grid, only: TLON, TLAT, ULON, ULAT, hm, bm, tarea, uarea, & + dxu, dxt, dyu, dyt, HTN, HTE, ANGLE, ANGLET, & + lont_bounds, latt_bounds, lonu_bounds, latu_bounds + use ice_history_shared + use ice_itd, only: hin_max + use ice_restart_shared, only: runid + use netcdf +#endif + + integer (kind=int_kind), intent(in) :: ns + + ! local variables + +#ifdef ncdf + real (kind=dbl_kind), dimension(:,:), allocatable :: work_g1 + real (kind=real_kind), dimension(:,:), allocatable :: work_gr + real (kind=real_kind), dimension(:,:,:), allocatable :: work_gr3 + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & + work1 + + integer (kind=int_kind) :: i,k,ic,n,nn, & + ncid,status,imtid,jmtid,kmtidi,kmtids,kmtidb, cmtid,timid,varid, & + nvertexid,ivertex + integer (kind=int_kind), dimension(3) :: dimid + integer (kind=int_kind), dimension(4) :: dimidz + integer (kind=int_kind), dimension(5) :: dimidcz + integer (kind=int_kind), dimension(3) :: dimid_nverts + integer (kind=int_kind), dimension(4) :: dimidex + real (kind=real_kind) :: ltime + character (char_len) :: title + character (char_len_long) :: ncfile(max_nstrm) + + integer (kind=int_kind) :: ind,boundid + + character (char_len) :: start_time,current_date,current_time + character (len=8) :: cdate + + ! 4 coordinate variables: TLON, TLAT, ULON, ULAT + INTEGER (kind=int_kind), PARAMETER :: ncoord = 4 + + ! 4 vertices in each grid cell + INTEGER (kind=int_kind), PARAMETER :: nverts = 4 + + ! 4 variables describe T, U grid boundaries: + ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds + INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 4 + + TYPE coord_attributes ! netcdf coordinate attributes + character (len=11) :: short_name + character (len=45) :: long_name + character (len=20) :: units + END TYPE coord_attributes + + TYPE req_attributes ! req'd netcdf attributes + type (coord_attributes) :: req + character (len=20) :: coordinates + END TYPE req_attributes + + TYPE(req_attributes), dimension(nvar) :: var + TYPE(coord_attributes), dimension(ncoord) :: coord_var + TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts + TYPE(coord_attributes), dimension(nvarz) :: var_nz + CHARACTER (char_len), dimension(ncoord) :: coord_bounds + + if (my_task == master_task) then + + ltime=time/int(secday) + + call construct_filename(ncfile(ns),'nc',ns) + + ! add local directory path name to ncfile + if (write_ic) then + ncfile(ns) = trim(incond_dir)//ncfile(ns) + else + ncfile(ns) = trim(history_dir)//ncfile(ns) + endif + + ! create file + status = nf90_create(ncfile(ns), nf90_clobber, ncid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error creating history ncfile '//ncfile(ns)) + + !----------------------------------------------------------------- + ! define dimensions + !----------------------------------------------------------------- + + if (hist_avg) then + status = nf90_def_dim(ncid,'d2',2,boundid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining dim d2') + endif + + status = nf90_def_dim(ncid,'ni',nx_global,imtid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining dim ni') + + status = nf90_def_dim(ncid,'nj',ny_global,jmtid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining dim nj') + + status = nf90_def_dim(ncid,'nc',ncat_hist,cmtid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining dim nc') + + status = nf90_def_dim(ncid,'nkice',nzilyr,kmtidi) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining dim nki') + + status = nf90_def_dim(ncid,'nksnow',nzslyr,kmtids) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining dim nks') + + status = nf90_def_dim(ncid,'nkbio',nzblyr,kmtidb) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining dim nkb') + + status = nf90_def_dim(ncid,'time',NF90_UNLIMITED,timid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining dim time') + + status = nf90_def_dim(ncid,'nvertices',nverts,nvertexid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining dim nverts') + + !----------------------------------------------------------------- + ! define coordinate variables + !----------------------------------------------------------------- + + status = nf90_def_var(ncid,'time',nf90_float,timid,varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining var time') + + status = nf90_put_att(ncid,varid,'long_name','model time') + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: time long_name') + + write(cdate,'(i8.8)') idate0 + write(title,'(a,a,a,a,a,a,a,a)') 'days since ', & + cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' 00:00:00' + status = nf90_put_att(ncid,varid,'units',title) + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: time units') + + if (days_per_year == 360) then + status = nf90_put_att(ncid,varid,'calendar','360_day') + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: time calendar') + elseif (days_per_year == 365 .and. .not.use_leap_years ) then + status = nf90_put_att(ncid,varid,'calendar','NoLeap') + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: time calendar') + elseif (use_leap_years) then + status = nf90_put_att(ncid,varid,'calendar','Gregorian') + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: time calendar') + else + call abort_ice( 'ice Error: invalid calendar settings') + endif + + if (hist_avg) then + status = nf90_put_att(ncid,varid,'bounds','time_bounds') + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: time bounds') + endif + + !----------------------------------------------------------------- + ! Define attributes for time bounds if hist_avg is true + !----------------------------------------------------------------- + + if (hist_avg) then + dimid(1) = boundid + dimid(2) = timid + status = nf90_def_var(ncid,'time_bounds',nf90_float,dimid(1:2),varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining var time_bounds') + status = nf90_put_att(ncid,varid,'long_name', & + 'boundaries for time-averaging interval') + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: time_bounds long_name') + write(cdate,'(i8.8)') idate0 + write(title,'(a,a,a,a,a,a,a,a)') 'days since ', & + cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' 00:00:00' + status = nf90_put_att(ncid,varid,'units',title) + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: time_bounds units') + endif + + !----------------------------------------------------------------- + ! define information for required time-invariant variables + !----------------------------------------------------------------- + + ind = 0 + ind = ind + 1 + coord_var(ind) = coord_attributes('TLON', & + 'T grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lont_bounds' + ind = ind + 1 + coord_var(ind) = coord_attributes('TLAT', & + 'T grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latt_bounds' + ind = ind + 1 + coord_var(ind) = coord_attributes('ULON', & + 'U grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lonu_bounds' + ind = ind + 1 + coord_var(ind) = coord_attributes('ULAT', & + 'U grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latu_bounds' + + var_nz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') + var_nz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') + var_nz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') + var_nz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') + + !----------------------------------------------------------------- + ! define information for optional time-invariant variables + !----------------------------------------------------------------- + + var(n_tarea)%req = coord_attributes('tarea', & + 'area of T grid cells', 'm^2') + var(n_tarea)%coordinates = 'TLON TLAT' + var(n_uarea)%req = coord_attributes('uarea', & + 'area of U grid cells', 'm^2') + var(n_uarea)%coordinates = 'ULON ULAT' + var(n_dxt)%req = coord_attributes('dxt', & + 'T cell width through middle', 'm') + var(n_dxt)%coordinates = 'TLON TLAT' + var(n_dyt)%req = coord_attributes('dyt', & + 'T cell height through middle', 'm') + var(n_dyt)%coordinates = 'TLON TLAT' + var(n_dxu)%req = coord_attributes('dxu', & + 'U cell width through middle', 'm') + var(n_dxu)%coordinates = 'ULON ULAT' + var(n_dyu)%req = coord_attributes('dyu', & + 'U cell height through middle', 'm') + var(n_dyu)%coordinates = 'ULON ULAT' + var(n_HTN)%req = coord_attributes('HTN', & + 'T cell width on North side','m') + var(n_HTN)%coordinates = 'TLON TLAT' + var(n_HTE)%req = coord_attributes('HTE', & + 'T cell width on East side', 'm') + var(n_HTE)%coordinates = 'TLON TLAT' + var(n_ANGLE)%req = coord_attributes('ANGLE', & + 'angle grid makes with latitude line on U grid', & + 'radians') + var(n_ANGLE)%coordinates = 'ULON ULAT' + var(n_ANGLET)%req = coord_attributes('ANGLET', & + 'angle grid makes with latitude line on T grid', & + 'radians') + var(n_ANGLET)%coordinates = 'TLON TLAT' + + ! These fields are required for CF compliance + ! dimensions (nx,ny,nverts) + var_nverts(n_lont_bnds) = coord_attributes('lont_bounds', & + 'longitude boundaries of T cells', 'degrees_east') + var_nverts(n_latt_bnds) = coord_attributes('latt_bounds', & + 'latitude boundaries of T cells', 'degrees_north') + var_nverts(n_lonu_bnds) = coord_attributes('lonu_bounds', & + 'longitude boundaries of U cells', 'degrees_east') + var_nverts(n_latu_bnds) = coord_attributes('latu_bounds', & + 'latitude boundaries of U cells', 'degrees_north') + + !----------------------------------------------------------------- + ! define attributes for time-invariant variables + !----------------------------------------------------------------- + + dimid(1) = imtid + dimid(2) = jmtid + dimid(3) = timid + + do i = 1, ncoord + status = nf90_def_var(ncid, coord_var(i)%short_name, nf90_float, & + dimid(1:2), varid) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining short_name for '//coord_var(i)%short_name) + status = nf90_put_att(ncid,varid,'long_name',coord_var(i)%long_name) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//coord_var(i)%short_name) + status = nf90_put_att(ncid, varid, 'units', coord_var(i)%units) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//coord_var(i)%short_name) + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining missing_value for '//coord_var(i)%short_name) + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining _FillValue for '//coord_var(i)%short_name) + if (coord_var(i)%short_name == 'ULAT') then + status = nf90_put_att(ncid,varid,'comment', & + 'Latitude of NE corner of T grid cell') + if (status /= nf90_noerr) call abort_ice( & + 'Error defining comment for '//coord_var(i)%short_name) + endif + if (f_bounds) then + status = nf90_put_att(ncid, varid, 'bounds', coord_bounds(i)) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining bounds for '//coord_var(i)%short_name) + endif + enddo + + ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR) + dimidex(1)=cmtid + dimidex(2)=kmtidi + dimidex(3)=kmtids + dimidex(4)=kmtidb + + do i = 1, nvarz + if (igrdz(i)) then + status = nf90_def_var(ncid, var_nz(i)%short_name, & + nf90_float, dimidex(i), varid) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining short_name for '//var_nz(i)%short_name) + status = nf90_put_att(ncid,varid,'long_name',var_nz(i)%long_name) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//var_nz(i)%short_name) + status = nf90_put_att(ncid, varid, 'units', var_nz(i)%units) + if (Status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//var_nz(i)%short_name) + endif + enddo + + ! Attributes for tmask, blkmask defined separately, since they have no units + if (igrd(n_tmask)) then + status = nf90_def_var(ncid, 'tmask', nf90_float, dimid(1:2), varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining var tmask') + status = nf90_put_att(ncid,varid, 'long_name', 'ocean grid mask') + if (status /= nf90_noerr) call abort_ice('ice Error: tmask long_name') + status = nf90_put_att(ncid, varid, 'coordinates', 'TLON TLAT') + if (status /= nf90_noerr) call abort_ice('ice Error: tmask units') + status = nf90_put_att(ncid,varid,'comment', '0 = land, 1 = ocean') + if (status /= nf90_noerr) call abort_ice('ice Error: tmask comment') + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice('Error defining missing_value for tmask') + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice('Error defining _FillValue for tmask') + endif + + if (igrd(n_blkmask)) then + status = nf90_def_var(ncid, 'blkmask', nf90_float, dimid(1:2), varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error defining var blkmask') + status = nf90_put_att(ncid,varid, 'long_name', 'ice grid block mask') + if (status /= nf90_noerr) call abort_ice('ice Error: blkmask long_name') + status = nf90_put_att(ncid, varid, 'coordinates', 'TLON TLAT') + if (status /= nf90_noerr) call abort_ice('ice Error: blkmask units') + status = nf90_put_att(ncid,varid,'comment', 'mytask + iblk/100') + if (status /= nf90_noerr) call abort_ice('ice Error: blkmask comment') + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice('Error defining missing_value for blkmask') + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice('Error defining _FillValue for blkmask') + endif + + do i = 3, nvar ! note n_tmask=1, n_blkmask=2 + if (igrd(i)) then + status = nf90_def_var(ncid, var(i)%req%short_name, & + nf90_float, dimid(1:2), varid) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining variable '//var(i)%req%short_name) + status = nf90_put_att(ncid,varid, 'long_name', var(i)%req%long_name) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//var(i)%req%short_name) + status = nf90_put_att(ncid, varid, 'units', var(i)%req%units) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//var(i)%req%short_name) + status = nf90_put_att(ncid, varid, 'coordinates', var(i)%coordinates) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining coordinates for '//var(i)%req%short_name) + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining missing_value for '//var(i)%req%short_name) + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining _FillValue for '//var(i)%req%short_name) + endif + enddo + + ! Fields with dimensions (nverts,nx,ny) + dimid_nverts(1) = nvertexid + dimid_nverts(2) = imtid + dimid_nverts(3) = jmtid + do i = 1, nvar_verts + if (f_bounds) then + status = nf90_def_var(ncid, var_nverts(i)%short_name, & + nf90_float,dimid_nverts, varid) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining variable '//var_nverts(i)%short_name) + status = nf90_put_att(ncid,varid, 'long_name', var_nverts(i)%long_name) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//var_nverts(i)%short_name) + status = nf90_put_att(ncid, varid, 'units', var_nverts(i)%units) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//var_nverts(i)%short_name) + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining missing_value for '//var_nverts(i)%short_name) + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining _FillValue for '//var_nverts(i)%short_name) + endif + enddo + + do n=1,num_avail_hist_fields_2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & + nf90_float, dimid, varid) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & + avail_hist_fields(n)%vunit) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid, 'long_name', & + avail_hist_fields(n)%vdesc) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'coordinates', & + avail_hist_fields(n)%vcoord) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining coordinates for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'cell_measures', & + avail_hist_fields(n)%vcellmeas) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell measures for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining missing_value for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining _FillValue for '//avail_hist_fields(n)%vname) + + !----------------------------------------------------------------- + ! Add cell_methods attribute to variables if averaged + !----------------------------------------------------------------- + if (hist_avg) then + if (TRIM(avail_hist_fields(n)%vname)/='sig1' & + .or.TRIM(avail_hist_fields(n)%vname)/='sig2') then + status = nf90_put_att(ncid,varid,'cell_methods','time: mean') + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell methods for '//avail_hist_fields(n)%vname) + endif + endif + + if (histfreq(ns) == '1' .or. .not. hist_avg & + .or. n==n_divu(ns) .or. n==n_shear(ns) & ! snapshots + .or. n==n_sig1(ns) .or. n==n_sig2(ns) & + .or. n==n_trsig(ns) & + .or. n==n_mlt_onset(ns) .or. n==n_frz_onset(ns) & + .or. n==n_hisnap(ns) .or. n==n_aisnap(ns)) then + status = nf90_put_att(ncid,varid,'time_rep','instantaneous') + else + status = nf90_put_att(ncid,varid,'time_rep','averaged') + endif + endif + enddo ! num_avail_hist_fields_2D + + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = cmtid + dimidz(4) = timid + + do n = n2D + 1, n3Dccum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & + nf90_float, dimidz, varid) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & + avail_hist_fields(n)%vunit) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid, 'long_name', & + avail_hist_fields(n)%vdesc) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'coordinates', & + avail_hist_fields(n)%vcoord) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining coordinates for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'cell_measures', & + avail_hist_fields(n)%vcellmeas) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell measures for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining missing_value for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining _FillValue for '//avail_hist_fields(n)%vname) + + !----------------------------------------------------------------- + ! Add cell_methods attribute to variables if averaged + !----------------------------------------------------------------- + if (hist_avg) then + status = nf90_put_att(ncid,varid,'cell_methods','time: mean') + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell methods for '//avail_hist_fields(n)%vname) + endif + + if (histfreq(ns) == '1' .or. .not. hist_avg) then + status = nf90_put_att(ncid,varid,'time_rep','instantaneous') + else + status = nf90_put_att(ncid,varid,'time_rep','averaged') + endif + endif + enddo ! num_avail_hist_fields_3Dc + + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = kmtidi + dimidz(4) = timid + + do n = n3Dccum + 1, n3Dzcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & + nf90_float, dimidz, varid) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & + avail_hist_fields(n)%vunit) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid, 'long_name', & + avail_hist_fields(n)%vdesc) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'coordinates', & + avail_hist_fields(n)%vcoord) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining coordinates for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'cell_measures', & + avail_hist_fields(n)%vcellmeas) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell measures for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining missing_value for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining _FillValue for '//avail_hist_fields(n)%vname) + + endif + enddo ! num_avail_hist_fields_3Dz + + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = kmtidb + dimidz(4) = timid + + do n = n3Dzcum + 1, n3Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & + nf90_float, dimidz, varid) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & + avail_hist_fields(n)%vunit) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid, 'long_name', & + avail_hist_fields(n)%vdesc) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'coordinates', & + avail_hist_fields(n)%vcoord) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining coordinates for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'cell_measures', & + avail_hist_fields(n)%vcellmeas) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell measures for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining missing_value for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining _FillValue for '//avail_hist_fields(n)%vname) + + endif + enddo ! num_avail_hist_fields_3Db + + dimidcz(1) = imtid + dimidcz(2) = jmtid + dimidcz(3) = kmtidi + dimidcz(4) = cmtid + dimidcz(5) = timid + + do n = n3Dbcum + 1, n4Dicum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & +! nf90_float, dimidcz, varid) + nf90_float, dimidcz(1:4), varid) ! ferret + if (status /= nf90_noerr) call abort_ice( & + 'Error defining variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & + avail_hist_fields(n)%vunit) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid, 'long_name', & + avail_hist_fields(n)%vdesc) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'coordinates', & + avail_hist_fields(n)%vcoord) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining coordinates for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'cell_measures', & + avail_hist_fields(n)%vcellmeas) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell measures for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining missing_value for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining _FillValue for '//avail_hist_fields(n)%vname) + + !----------------------------------------------------------------- + ! Add cell_methods attribute to variables if averaged + !----------------------------------------------------------------- + if (hist_avg) then + status = nf90_put_att(ncid,varid,'cell_methods','time: mean') + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell methods for '//avail_hist_fields(n)%vname) + endif + + if (histfreq(ns) == '1' .or. .not. hist_avg) then + status = nf90_put_att(ncid,varid,'time_rep','instantaneous') + else + status = nf90_put_att(ncid,varid,'time_rep','averaged') + endif + endif + enddo ! num_avail_hist_fields_4Di + + dimidcz(1) = imtid + dimidcz(2) = jmtid + dimidcz(3) = kmtids + dimidcz(4) = cmtid + dimidcz(5) = timid + + do n = n4Dicum + 1, n4Dscum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & +! nf90_float, dimidcz, varid) + nf90_float, dimidcz(1:4), varid) ! ferret + if (status /= nf90_noerr) call abort_ice( & + 'Error defining variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & + avail_hist_fields(n)%vunit) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid, 'long_name', & + avail_hist_fields(n)%vdesc) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'coordinates', & + avail_hist_fields(n)%vcoord) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining coordinates for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'cell_measures', & + avail_hist_fields(n)%vcellmeas) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell measures for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining missing_value for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining _FillValue for '//avail_hist_fields(n)%vname) + + !----------------------------------------------------------------- + ! Add cell_methods attribute to variables if averaged + !----------------------------------------------------------------- + if (hist_avg) then + status = nf90_put_att(ncid,varid,'cell_methods','time: mean') + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell methods for '//avail_hist_fields(n)%vname) + endif + + if (histfreq(ns) == '1' .or. .not. hist_avg) then + status = nf90_put_att(ncid,varid,'time_rep','instantaneous') + else + status = nf90_put_att(ncid,varid,'time_rep','averaged') + endif + endif + enddo ! num_avail_hist_fields_4Ds + + dimidcz(1) = imtid + dimidcz(2) = jmtid + dimidcz(3) = kmtidb + dimidcz(4) = cmtid + dimidcz(5) = timid + + do n = n4Dscum + 1, n4Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & +! nf90_float, dimidcz, varid) + nf90_float, dimidcz(1:4), varid) ! ferret + if (status /= nf90_noerr) call abort_ice( & + 'Error defining variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & + avail_hist_fields(n)%vunit) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining units for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid, 'long_name', & + avail_hist_fields(n)%vdesc) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining long_name for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'coordinates', & + avail_hist_fields(n)%vcoord) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining coordinates for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'cell_measures', & + avail_hist_fields(n)%vcellmeas) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell measures for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'missing_value',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining missing_value for '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'_FillValue',spval) + if (status /= nf90_noerr) call abort_ice( & + 'Error defining _FillValue for '//avail_hist_fields(n)%vname) + + !----------------------------------------------------------------- + ! Add cell_methods attribute to variables if averaged + !----------------------------------------------------------------- + if (hist_avg) then + status = nf90_put_att(ncid,varid,'cell_methods','time: mean') + if (status /= nf90_noerr) call abort_ice( & + 'Error defining cell methods for '//avail_hist_fields(n)%vname) + endif + + if (histfreq(ns) == '1' .or. .not. hist_avg) then + status = nf90_put_att(ncid,varid,'time_rep','instantaneous') + else + status = nf90_put_att(ncid,varid,'time_rep','averaged') + endif + endif + enddo ! num_avail_hist_fields_4Db + + !----------------------------------------------------------------- + ! global attributes + !----------------------------------------------------------------- + ! ... the user should change these to something useful ... + !----------------------------------------------------------------- +#ifdef CCSMCOUPLED + status = nf90_put_att(ncid,nf90_global,'title',runid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error in global attribute title') +#else + title = 'sea ice model output for CICE' + status = nf90_put_att(ncid,nf90_global,'title',title) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error in global attribute title') +#endif + title = 'Diagnostic and Prognostic Variables' + status = nf90_put_att(ncid,nf90_global,'contents',title) + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: global attribute contents') + + title = 'Los Alamos Sea Ice Model (CICE) Version 5' + status = nf90_put_att(ncid,nf90_global,'source',title) + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: global attribute source') + + if (use_leap_years) then + write(title,'(a,i3,a)') 'This year has ',int(dayyr),' days' + else + write(title,'(a,i3,a)') 'All years have exactly ',int(dayyr),' days' + endif + status = nf90_put_att(ncid,nf90_global,'comment',title) + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: global attribute comment') + + write(title,'(a,i8.8)') 'File written on model date ',idate + status = nf90_put_att(ncid,nf90_global,'comment2',title) + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: global attribute date1') + + write(title,'(a,i6)') 'seconds elapsed into model date: ',sec + status = nf90_put_att(ncid,nf90_global,'comment3',title) + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: global attribute date2') + + title = 'CF-1.0' + status = & + nf90_put_att(ncid,nf90_global,'conventions',title) + if (status /= nf90_noerr) call abort_ice( & + 'Error in global attribute conventions') + + call date_and_time(date=current_date, time=current_time) + write(start_time,1000) current_date(1:4), current_date(5:6), & + current_date(7:8), current_time(1:2), & + current_time(3:4), current_time(5:8) +1000 format('This dataset was created on ', & + a,'-',a,'-',a,' at ',a,':',a,':',a) + + status = nf90_put_att(ncid,nf90_global,'history',start_time) + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: global attribute history') + + status = nf90_put_att(ncid,nf90_global,'io_flavor','io_netcdf') + if (status /= nf90_noerr) call abort_ice( & + 'ice Error: global attribute io_flavor') + + !----------------------------------------------------------------- + ! end define mode + !----------------------------------------------------------------- + + status = nf90_enddef(ncid) + if (status /= nf90_noerr) call abort_ice('ice: Error in nf90_enddef') + + !----------------------------------------------------------------- + ! write time variable + !----------------------------------------------------------------- + + status = nf90_inq_varid(ncid,'time',varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting time varid') + status = nf90_put_var(ncid,varid,ltime) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing time variable') + + !----------------------------------------------------------------- + ! write time_bounds info + !----------------------------------------------------------------- + + if (hist_avg) then + status = nf90_inq_varid(ncid,'time_bounds',varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting time_bounds id') + status = nf90_put_var(ncid,varid,time_beg(ns),start=(/1/)) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing time_beg') + status = nf90_put_var(ncid,varid,time_end(ns),start=(/2/)) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing time_end') + endif + + endif ! master_task + + if (my_task==master_task) then + allocate(work_g1(nx_global,ny_global)) + allocate(work_gr(nx_global,ny_global)) + else + allocate(work_gr(1,1)) ! to save memory + allocate(work_g1(1,1)) + endif + + work_g1(:,:) = c0 + + !----------------------------------------------------------------- + ! write coordinate variables + !----------------------------------------------------------------- + + do i = 1,ncoord + call broadcast_scalar(coord_var(i)%short_name,master_task) + SELECT CASE (coord_var(i)%short_name) + CASE ('TLON') + ! Convert T grid longitude from -180 -> 180 to 0 to 360 + work1 = TLON*rad_to_deg + c360 + where (work1 > c360) work1 = work1 - c360 + where (work1 < c0 ) work1 = work1 + c360 + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('TLAT') + work1 = TLAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ULON') + work1 = ULON*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ULAT') + work1 = ULAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + END SELECT + + if (my_task == master_task) then + work_gr = work_g1 + status = nf90_inq_varid(ncid, coord_var(i)%short_name, varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//coord_var(i)%short_name) + status = nf90_put_var(ncid,varid,work_gr) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing'//coord_var(i)%short_name) + endif + enddo + + ! Extra dimensions (NCAT, VGRD*) + + do i = 1, nvarz + if (igrdz(i)) then + call broadcast_scalar(var_nz(i)%short_name,master_task) + if (my_task == master_task) then + status = nf90_inq_varid(ncid, var_nz(i)%short_name, varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//var_nz(i)%short_name) + SELECT CASE (var_nz(i)%short_name) + CASE ('NCAT') + status = nf90_put_var(ncid,varid,hin_max(1:ncat_hist)) + CASE ('VGRDi') ! index - needed for Met Office analysis code + status = nf90_put_var(ncid,varid,(/(k, k=1,nzilyr)/)) + CASE ('VGRDs') ! index - needed for Met Office analysis code + status = nf90_put_var(ncid,varid,(/(k, k=1,nzslyr)/)) + CASE ('VGRDb') + status = nf90_put_var(ncid,varid,(/(k, k=1,nzblyr)/)) + END SELECT + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing'//var_nz(i)%short_name) + endif + endif + enddo + + !----------------------------------------------------------------- + ! write grid masks, area and rotation angle + !----------------------------------------------------------------- + + if (igrd(n_tmask)) then + call gather_global(work_g1, hm, master_task, distrb_info) + if (my_task == master_task) then + work_gr=work_g1 + status = nf90_inq_varid(ncid, 'tmask', varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for tmask') + status = nf90_put_var(ncid,varid,work_gr) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable tmask') + endif + endif + + if (igrd(n_blkmask)) then + call gather_global(work_g1, bm, master_task, distrb_info) + if (my_task == master_task) then + work_gr=work_g1 + status = nf90_inq_varid(ncid, 'blkmask', varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for blkmask') + status = nf90_put_var(ncid,varid,work_gr) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable blkmask') + endif + endif + + do i = 3, nvar ! note n_tmask=1, n_blkmask=2 + if (igrd(i)) then + call broadcast_scalar(var(i)%req%short_name,master_task) + SELECT CASE (var(i)%req%short_name) + CASE ('tarea') + call gather_global(work_g1, tarea, master_task, distrb_info) + CASE ('uarea') + call gather_global(work_g1, uarea, master_task, distrb_info) + CASE ('dxu') + call gather_global(work_g1, dxu, master_task, distrb_info) + CASE ('dyu') + call gather_global(work_g1, dyu, master_task, distrb_info) + CASE ('dxt') + call gather_global(work_g1, dxt, master_task, distrb_info) + CASE ('dyt') + call gather_global(work_g1, dyt, master_task, distrb_info) + CASE ('HTN') + call gather_global(work_g1, HTN, master_task, distrb_info) + CASE ('HTE') + call gather_global(work_g1, HTE, master_task, distrb_info) + CASE ('ANGLE') + call gather_global(work_g1, ANGLE, master_task, distrb_info) + CASE ('ANGLET') + call gather_global(work_g1, ANGLET,master_task, distrb_info) + END SELECT + + if (my_task == master_task) then + work_gr=work_g1 + status = nf90_inq_varid(ncid, var(i)%req%short_name, varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//var(i)%req%short_name) + status = nf90_put_var(ncid,varid,work_gr) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable '//var(i)%req%short_name) + endif + endif + enddo + + deallocate(work_gr) + + !---------------------------------------------------------------- + ! Write coordinates of grid box vertices + !---------------------------------------------------------------- + + if (f_bounds) then + if (my_task==master_task) then + allocate(work_gr3(nverts,nx_global,ny_global)) + else + allocate(work_gr3(1,1,1)) ! to save memory + endif + + work_gr3(:,:,:) = c0 + work1 (:,:,:) = c0 + + do i = 1, nvar_verts + call broadcast_scalar(var_nverts(i)%short_name,master_task) + SELECT CASE (var_nverts(i)%short_name) + CASE ('lont_bounds') + do ivertex = 1, nverts + work1(:,:,:) = lont_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work_gr3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('latt_bounds') + do ivertex = 1, nverts + work1(:,:,:) = latt_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work_gr3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('lonu_bounds') + do ivertex = 1, nverts + work1(:,:,:) = lonu_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work_gr3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('latu_bounds') + do ivertex = 1, nverts + work1(:,:,:) = latu_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work_gr3(ivertex,:,:) = work_g1(:,:) + enddo + END SELECT + + if (my_task == master_task) then + status = nf90_inq_varid(ncid, var_nverts(i)%short_name, varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//var_nverts(i)%short_name) + status = nf90_put_var(ncid,varid,work_gr3) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable '//var_nverts(i)%short_name) + endif + enddo + deallocate(work_gr3) + endif + + !----------------------------------------------------------------- + ! write variable data + !----------------------------------------------------------------- + + if (my_task==master_task) then + allocate(work_gr(nx_global,ny_global)) + else + allocate(work_gr(1,1)) ! to save memory + endif + work_gr(:,:) = c0 + work_g1(:,:) = c0 + + do n=1,num_avail_hist_fields_2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + call gather_global(work_g1, a2D(:,:,n,:), & + master_task, distrb_info) + if (my_task == master_task) then + work_gr(:,:) = work_g1(:,:) + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + status = nf90_put_var(ncid,varid,work_gr(:,:), & + count=(/nx_global,ny_global/)) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable '//avail_hist_fields(n)%vname) + endif + endif + enddo ! num_avail_hist_fields_2D + + work_gr(:,:) = c0 + work_g1(:,:) = c0 + + do n = n2D + 1, n3Dccum + nn = n - n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + endif + do k = 1, ncat_hist + call gather_global(work_g1, a3Dc(:,:,k,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) + + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + status = nf90_put_var(ncid,varid,work_gr(:,:), & + start=(/ 1, 1,k/), & + count=(/nx_global,ny_global,1/)) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + endif + enddo ! num_avail_hist_fields_3Dc + + work_gr(:,:) = c0 + work_g1(:,:) = c0 + + do n = n3Dccum+1, n3Dzcum + nn = n - n3Dccum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + endif + do k = 1, nzilyr + call gather_global(work_g1, a3Dz(:,:,k,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) + + if (my_task == master_task) then + status = nf90_put_var(ncid,varid,work_gr(:,:), & + start=(/ 1, 1,k/), & + count=(/nx_global,ny_global,1/)) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + endif + enddo ! num_avail_hist_fields_3Dz + + work_gr(:,:) = c0 + work_g1(:,:) = c0 + + do n = n3Dzcum+1, n3Dbcum + nn = n - n3Dzcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + endif + do k = 1, nzblyr + call gather_global(work_g1, a3Db(:,:,k,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) + + if (my_task == master_task) then + status = nf90_put_var(ncid,varid,work_gr(:,:), & + start=(/ 1, 1,k/), & + count=(/nx_global,ny_global,1/)) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + endif + enddo ! num_avail_hist_fields_3Db + + work_gr(:,:) = c0 + work_g1(:,:) = c0 + + do n = n3Dbcum+1, n4Dicum + nn = n - n3Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + endif + do ic = 1, ncat_hist + do k = 1, nzilyr + call gather_global(work_g1, a4Di(:,:,k,ic,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) + if (my_task == master_task) then + status = nf90_put_var(ncid,varid,work_gr(:,:), & + start=(/ 1, 1,k,ic/), & + count=(/nx_global,ny_global,1, 1/)) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + enddo ! ic + endif + enddo ! num_avail_hist_fields_4Di + + work_gr(:,:) = c0 + work_g1(:,:) = c0 + + do n = n4Dicum+1, n4Dscum + nn = n - n4Dicum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + endif + do ic = 1, ncat_hist + do k = 1, nzslyr + call gather_global(work_g1, a4Ds(:,:,k,ic,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) + if (my_task == master_task) then + status = nf90_put_var(ncid,varid,work_gr(:,:), & + start=(/ 1, 1,k,ic/), & + count=(/nx_global,ny_global,1, 1/)) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + enddo ! ic + endif + enddo ! num_avail_hist_fields_4Ds + + work_gr(:,:) = c0 + work_g1(:,:) = c0 + + do n = n4Dscum+1, n4Dbcum + nn = n - n4Dscum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + endif + do ic = 1, ncat_hist + do k = 1, nzblyr + call gather_global(work_g1, a4Db(:,:,k,ic,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) + if (my_task == master_task) then + status = nf90_put_var(ncid,varid,work_gr(:,:), & + start=(/ 1, 1,k,ic/), & + count=(/nx_global,ny_global,1, 1/)) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + enddo ! ic + endif + enddo ! num_avail_hist_fields_4Db + + deallocate(work_gr) + deallocate(work_g1) + + !----------------------------------------------------------------- + ! close output dataset + !----------------------------------------------------------------- + + if (my_task == master_task) then + status = nf90_close(ncid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error closing netCDF history file') + write(nu_diag,*) ' ' + write(nu_diag,*) 'Finished writing ',trim(ncfile(ns)) + endif +#endif + + end subroutine ice_write_hist + +!======================================================================= + + end module ice_history_write + +!======================================================================= diff --git a/io_netcdf/ice_restart.F90 b/io_netcdf/ice_restart.F90 new file mode 100644 index 00000000..d5de5455 --- /dev/null +++ b/io_netcdf/ice_restart.F90 @@ -0,0 +1,563 @@ +! SVN:$Id: ice_restart.F90 607 2013-03-29 15:49:42Z eclare $ +!======================================================================= + +! Read and write ice model restart files using netCDF or binary +! interfaces. +! authors David A Bailey, NCAR + + module ice_restart + + use ice_broadcast + use ice_exit, only: abort_ice + use ice_kinds_mod + use netcdf + use ice_restart_shared, only: & + restart, restart_ext, restart_dir, restart_file, pointer_file, & + runid, runtype, use_restart_time, restart_format, lcdf64, lenstr + + implicit none + private + public :: init_restart_write, init_restart_read, & + read_restart_field, write_restart_field, final_restart + + integer (kind=int_kind) :: ncid + +!======================================================================= + + contains + +!======================================================================= + +! Sets up restart file for reading. +! author David A Bailey, NCAR + + subroutine init_restart_read(ice_ic) + + use ice_calendar, only: sec, month, mday, nyr, istep0, istep1, & + time, time_forc, year_init, npt + use ice_communicate, only: my_task, master_task + use ice_domain, only: nblocks + use ice_fileunits, only: nu_diag, nu_rst_pointer + + character(len=char_len_long), intent(in), optional :: ice_ic + + ! local variables + + character(len=char_len_long) :: & + filename, filename0 + + integer (kind=int_kind) :: status + + if (present(ice_ic)) then + filename = trim(ice_ic) + else + if (my_task == master_task) then + open(nu_rst_pointer,file=pointer_file) + read(nu_rst_pointer,'(a)') filename0 + filename = trim(filename0) + close(nu_rst_pointer) + write(nu_diag,*) 'Read ',pointer_file(1:lenstr(pointer_file)) + endif + call broadcast_scalar(filename, master_task) + endif + + if (my_task == master_task) then + write(nu_diag,*) 'Using restart dump=', trim(filename) + + status = nf90_open(trim(filename), nf90_nowrite, ncid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error reading restart ncfile '//trim(filename)) + + if (use_restart_time) then + status = nf90_get_att(ncid, nf90_global, 'istep1', istep0) + status = nf90_get_att(ncid, nf90_global, 'time', time) + status = nf90_get_att(ncid, nf90_global, 'time_forc', time_forc) + status = nf90_get_att(ncid, nf90_global, 'nyr', nyr) + if (status == nf90_noerr) then + status = nf90_get_att(ncid, nf90_global, 'month', month) + status = nf90_get_att(ncid, nf90_global, 'mday', mday) + status = nf90_get_att(ncid, nf90_global, 'sec', sec) + endif + endif ! use namelist values if use_restart_time = F + + write(nu_diag,*) 'Restart read at istep=',istep0,time,time_forc + endif + + call broadcast_scalar(istep0,master_task) + call broadcast_scalar(time,master_task) + call broadcast_scalar(time_forc,master_task) + + istep1 = istep0 + + ! if runid is bering then need to correct npt for istep0 + if (trim(runid) == 'bering') then + npt = npt - istep0 + endif + + end subroutine init_restart_read + +!======================================================================= + +! Sets up restart file for writing. +! author David A Bailey, NCAR + + subroutine init_restart_write(filename_spec) + + use ice_blocks, only: nghost + use ice_calendar, only: sec, month, mday, nyr, istep1, & + time, time_forc, year_init + use ice_communicate, only: my_task, master_task + use ice_domain_size, only: nx_global, ny_global, ncat, nilyr, nslyr, & + n_aero + use ice_dyn_shared, only: kdyn + use ice_fileunits, only: nu_diag, nu_rst_pointer + use ice_ocean, only: oceanmixed_ice + use ice_state, only: tr_iage, tr_FY, tr_lvl, tr_aero, tr_pond_cesm, & + tr_pond_topo, tr_pond_lvl, tr_brine + use ice_zbgc_shared, only: tr_bgc_N_sk, tr_bgc_C_sk, tr_bgc_Nit_sk, & + tr_bgc_Sil_sk, tr_bgc_DMSPp_sk, tr_bgc_DMS_sk, & + tr_bgc_chl_sk, tr_bgc_DMSPd_sk, tr_bgc_Am_sk, & + skl_bgc + + character(len=char_len_long), intent(in), optional :: filename_spec + + ! local variables + + integer (kind=int_kind) :: & + k, & ! index + nx, ny, & ! global array size + iyear, imonth, iday ! year, month, day + + character(len=char_len_long) :: filename + + integer (kind=int_kind), allocatable :: dims(:) + + integer (kind=int_kind) :: & + dimid_ni, & ! netCDF identifiers + dimid_nj, & ! + dimid_ncat, & ! + iflag, & ! netCDF creation flag + status ! status variable from netCDF routine + + character (len=3) :: nchar + + ! construct path/file + if (present(filename_spec)) then + filename = trim(filename_spec) + else + iyear = nyr + year_init - 1 + imonth = month + iday = mday + + write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & + restart_dir(1:lenstr(restart_dir)), & + restart_file(1:lenstr(restart_file)),'.', & + iyear,'-',month,'-',mday,'-',sec + end if + + ! write pointer (path/file) + if (my_task == master_task) then + filename = trim(filename) // '.nc' + open(nu_rst_pointer,file=pointer_file) + write(nu_rst_pointer,'(a)') filename + close(nu_rst_pointer) + + iflag = 0 + if (lcdf64) iflag = nf90_64bit_offset + status = nf90_create(trim(filename), iflag, ncid) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error creating restart ncfile '//trim(filename)) + + status = nf90_put_att(ncid,nf90_global,'istep1',istep1) + status = nf90_put_att(ncid,nf90_global,'time',time) + status = nf90_put_att(ncid,nf90_global,'time_forc',time_forc) + status = nf90_put_att(ncid,nf90_global,'nyr',nyr) + status = nf90_put_att(ncid,nf90_global,'month',month) + status = nf90_put_att(ncid,nf90_global,'mday',mday) + status = nf90_put_att(ncid,nf90_global,'sec',sec) + + nx = nx_global + ny = ny_global + if (restart_ext) then + nx = nx_global + 2*nghost + ny = ny_global + 2*nghost + endif + status = nf90_def_dim(ncid,'ni',nx,dimid_ni) + status = nf90_def_dim(ncid,'nj',ny,dimid_nj) + + status = nf90_def_dim(ncid,'ncat',ncat,dimid_ncat) + + !----------------------------------------------------------------- + ! 2D restart fields + !----------------------------------------------------------------- + + allocate(dims(2)) + + dims(1) = dimid_ni + dims(2) = dimid_nj + + call define_rest_field(ncid,'uvel',dims) + call define_rest_field(ncid,'vvel',dims) + +#ifdef CCSMCOUPLED + call define_rest_field(ncid,'coszen',dims) +#endif + call define_rest_field(ncid,'scale_factor',dims) + call define_rest_field(ncid,'swvdr',dims) + call define_rest_field(ncid,'swvdf',dims) + call define_rest_field(ncid,'swidr',dims) + call define_rest_field(ncid,'swidf',dims) + + call define_rest_field(ncid,'strocnxT',dims) + call define_rest_field(ncid,'strocnyT',dims) + + call define_rest_field(ncid,'stressp_1',dims) + call define_rest_field(ncid,'stressp_2',dims) + call define_rest_field(ncid,'stressp_3',dims) + call define_rest_field(ncid,'stressp_4',dims) + + call define_rest_field(ncid,'stressm_1',dims) + call define_rest_field(ncid,'stressm_2',dims) + call define_rest_field(ncid,'stressm_3',dims) + call define_rest_field(ncid,'stressm_4',dims) + + call define_rest_field(ncid,'stress12_1',dims) + call define_rest_field(ncid,'stress12_2',dims) + call define_rest_field(ncid,'stress12_3',dims) + call define_rest_field(ncid,'stress12_4',dims) + + call define_rest_field(ncid,'iceumask',dims) + + if (oceanmixed_ice) then + call define_rest_field(ncid,'sst',dims) + call define_rest_field(ncid,'frzmlt',dims) + endif + + if (tr_FY) then + call define_rest_field(ncid,'frz_onset',dims) + endif + + if (kdyn == 2) then + call define_rest_field(ncid,'a11_1',dims) + call define_rest_field(ncid,'a11_2',dims) + call define_rest_field(ncid,'a11_3',dims) + call define_rest_field(ncid,'a11_4',dims) + call define_rest_field(ncid,'a12_1',dims) + call define_rest_field(ncid,'a12_2',dims) + call define_rest_field(ncid,'a12_3',dims) + call define_rest_field(ncid,'a12_4',dims) + endif + + if (tr_pond_lvl) then + call define_rest_field(ncid,'fsnow',dims) + endif + + if (skl_bgc) then + call define_rest_field(ncid,'algalN',dims) + call define_rest_field(ncid,'nit' ,dims) + if (tr_bgc_Am_sk) & + call define_rest_field(ncid,'amm' ,dims) + if (tr_bgc_Sil_sk) & + call define_rest_field(ncid,'sil' ,dims) + if (tr_bgc_DMSPp_sk) & + call define_rest_field(ncid,'dmsp' ,dims) + if (tr_bgc_DMS_sk) & + call define_rest_field(ncid,'dms' ,dims) + endif + + deallocate(dims) + + !----------------------------------------------------------------- + ! 3D restart fields (ncat) + !----------------------------------------------------------------- + + allocate(dims(3)) + + dims(1) = dimid_ni + dims(2) = dimid_nj + dims(3) = dimid_ncat + + call define_rest_field(ncid,'aicen',dims) + call define_rest_field(ncid,'vicen',dims) + call define_rest_field(ncid,'vsnon',dims) + call define_rest_field(ncid,'Tsfcn',dims) + + if (tr_iage) then + call define_rest_field(ncid,'iage',dims) + end if + + if (tr_FY) then + call define_rest_field(ncid,'FY',dims) + end if + + if (tr_lvl) then + call define_rest_field(ncid,'alvl',dims) + call define_rest_field(ncid,'vlvl',dims) + end if + + if (tr_pond_cesm) then + call define_rest_field(ncid,'apnd',dims) + call define_rest_field(ncid,'hpnd',dims) + end if + + if (tr_pond_topo) then + call define_rest_field(ncid,'apnd',dims) + call define_rest_field(ncid,'hpnd',dims) + call define_rest_field(ncid,'ipnd',dims) + end if + + if (tr_pond_lvl) then + call define_rest_field(ncid,'apnd',dims) + call define_rest_field(ncid,'hpnd',dims) + call define_rest_field(ncid,'ipnd',dims) + call define_rest_field(ncid,'dhs',dims) + call define_rest_field(ncid,'ffrac',dims) + end if + + if (tr_brine) then + call define_rest_field(ncid,'fbrn',dims) + call define_rest_field(ncid,'first_ice',dims) + endif + + if (skl_bgc) then + call define_rest_field(ncid,'bgc_N_sk' ,dims) + call define_rest_field(ncid,'bgc_Nit_sk' ,dims) + if (tr_bgc_C_sk) & + call define_rest_field(ncid,'bgc_C_sk' ,dims) + if (tr_bgc_chl_sk) & + call define_rest_field(ncid,'bgc_chl_sk' ,dims) + if (tr_bgc_Am_sk) & + call define_rest_field(ncid,'bgc_Am_sk' ,dims) + if (tr_bgc_Sil_sk) & + call define_rest_field(ncid,'bgc_Sil_sk' ,dims) + if (tr_bgc_DMSPp_sk) & + call define_rest_field(ncid,'bgc_DMSPp_sk',dims) + if (tr_bgc_DMSPd_sk) & + call define_rest_field(ncid,'bgc_DMSPd_sk',dims) + if (tr_bgc_DMS_sk) & + call define_rest_field(ncid,'bgc_DMS_sk' ,dims) + endif + + !----------------------------------------------------------------- + ! 4D restart fields, written as layers of 3D + !----------------------------------------------------------------- + + do k=1,nilyr + write(nchar,'(i3.3)') k + call define_rest_field(ncid,'sice'//trim(nchar),dims) + call define_rest_field(ncid,'qice'//trim(nchar),dims) + enddo + + do k=1,nslyr + write(nchar,'(i3.3)') k + call define_rest_field(ncid,'qsno'//trim(nchar),dims) + enddo + + if (tr_aero) then + do k=1,n_aero + write(nchar,'(i3.3)') k + call define_rest_field(ncid,'aerosnossl'//trim(nchar),dims) + call define_rest_field(ncid,'aerosnoint'//trim(nchar),dims) + call define_rest_field(ncid,'aeroicessl'//trim(nchar),dims) + call define_rest_field(ncid,'aeroiceint'//trim(nchar),dims) + enddo + endif + + deallocate(dims) + status = nf90_enddef(ncid) + + write(nu_diag,*) 'Writing ',filename(1:lenstr(filename)) + endif ! master_task + + end subroutine init_restart_write + +!======================================================================= + +! Reads a single restart field +! author David A Bailey, NCAR + + subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3, & + diag, field_loc, field_type) + + use ice_blocks, only: nx_block, ny_block + use ice_domain_size, only: max_blocks, ncat + use ice_fileunits, only: nu_diag + use ice_read_write, only: ice_read, ice_read_nc + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number (not used for netcdf) + ndim3 , & ! third dimension + nrec ! record number (0 for sequential access) + + real (kind=dbl_kind), dimension(nx_block,ny_block,ndim3,max_blocks), & + intent(inout) :: & + work ! input array (real, 8-byte) + + character (len=4), intent(in) :: & + atype ! format for output array + ! (real/integer, 4-byte/8-byte) + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + character (len=*), intent(in) :: vname + + integer (kind=int_kind), optional, intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + ! local variables + + integer (kind=int_kind) :: & + n, & ! number of dimensions for variable + varid, & ! variable id + status ! status variable from netCDF routine + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & + work2 ! input array (real, 8-byte) + + if (present(field_loc)) then + if (ndim3 == ncat) then + if (restart_ext) then + call ice_read_nc(ncid,1,vname,work,diag, & + field_loc=field_loc,field_type=field_type,restart_ext=restart_ext) + else + call ice_read_nc(ncid,1,vname,work,diag,field_loc,field_type) + endif + elseif (ndim3 == 1) then + if (restart_ext) then + call ice_read_nc(ncid,1,vname,work2,diag, & + field_loc=field_loc,field_type=field_type,restart_ext=restart_ext) + else + call ice_read_nc(ncid,1,vname,work2,diag,field_loc,field_type) + endif + work(:,:,1,:) = work2(:,:,:) + else + write(nu_diag,*) 'ndim3 not supported ',ndim3 + endif + else + if (ndim3 == ncat) then + if (restart_ext) then + call ice_read_nc(ncid, 1, vname, work, diag, restart_ext=restart_ext) + else + call ice_read_nc(ncid, 1, vname, work, diag) + endif + elseif (ndim3 == 1) then + if (restart_ext) then + call ice_read_nc(ncid, 1, vname, work2, diag, restart_ext=restart_ext) + else + call ice_read_nc(ncid, 1, vname, work2, diag) + endif + work(:,:,1,:) = work2(:,:,:) + else + write(nu_diag,*) 'ndim3 not supported ',ndim3 + endif + endif + + end subroutine read_restart_field + +!======================================================================= + +! Writes a single restart field. +! author David A Bailey, NCAR + + subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) + + use ice_blocks, only: nx_block, ny_block + use ice_domain_size, only: max_blocks, ncat + use ice_fileunits, only: nu_diag + use ice_read_write, only: ice_write, ice_write_nc + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number + ndim3 , & ! third dimension + nrec ! record number (0 for sequential access) + + real (kind=dbl_kind), dimension(nx_block,ny_block,ndim3,max_blocks), & + intent(in) :: & + work ! input array (real, 8-byte) + + character (len=4), intent(in) :: & + atype ! format for output array + ! (real/integer, 4-byte/8-byte) + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + character (len=*), intent(in) :: vname + + ! local variables + + integer (kind=int_kind) :: & + n, & ! dimension counter + varid, & ! variable id + status ! status variable from netCDF routine + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & + work2 ! input array (real, 8-byte) + + status = nf90_inq_varid(ncid,trim(vname),varid) + if (ndim3 == ncat) then + if (restart_ext) then + call ice_write_nc(ncid, 1, varid, work, diag, restart_ext) + else + call ice_write_nc(ncid, 1, varid, work, diag) + endif + elseif (ndim3 == 1) then + work2(:,:,:) = work(:,:,1,:) + if (restart_ext) then + call ice_write_nc(ncid, 1, varid, work2, diag, restart_ext) + else + call ice_write_nc(ncid, 1, varid, work2, diag) + endif + else + write(nu_diag,*) 'ndim3 not supported',ndim3 + endif + + end subroutine write_restart_field + +!======================================================================= + +! Finalize the restart file. +! author David A Bailey, NCAR + + subroutine final_restart() + + use ice_calendar, only: istep1, time, time_forc + use ice_communicate, only: my_task, master_task + use ice_fileunits, only: nu_diag + + integer (kind=int_kind) :: status + + status = nf90_close(ncid) + + if (my_task == master_task) & + write(nu_diag,*) 'Restart read/written ',istep1,time,time_forc + + end subroutine final_restart + +!======================================================================= + +! Defines a restart field +! author David A Bailey, NCAR + + subroutine define_rest_field(ncid, vname, dims) + + character (len=*) , intent(in) :: vname + integer (kind=int_kind), intent(in) :: dims(:) + integer (kind=int_kind), intent(in) :: ncid + + integer (kind=int_kind) :: varid + + integer (kind=int_kind) :: & + status ! status variable from netCDF routine + + status = nf90_def_var(ncid,trim(vname),nf90_double,dims,varid) + + end subroutine define_rest_field + +!======================================================================= + + end module ice_restart + +!======================================================================= diff --git a/mpi/ice_boundary.F90 b/mpi/ice_boundary.F90 new file mode 100644 index 00000000..7fde84c7 --- /dev/null +++ b/mpi/ice_boundary.F90 @@ -0,0 +1,6791 @@ +! SVN:$Id: ice_boundary.F90 843 2014-10-02 19:54:30Z eclare $ +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| + + module ice_boundary + +! This module contains data types and routines for updating halo +! regions (ghost cells) using MPI calls +! +! 2007-07-19: Phil Jones, Yoshi Yoshida, John Dennis +! new naming conventions, optimizations during +! initialization, true multi-dimensional updates +! (rather than serial call to two-dimensional updates), +! fixes for non-existent blocks +! 2008-01-28: Elizabeth Hunke replaced old routines with new POP +! infrastructure + + use ice_kinds_mod + use ice_communicate, only: my_task, mpiR4, mpiR8, mpitagHalo + use ice_constants, only: field_type_scalar, & + field_type_vector, field_type_angle, & + field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_loc_Eface + use ice_global_reductions, only: global_maxval + use ice_exit, only: abort_ice + + use ice_blocks, only: nx_block, ny_block, nghost, & + nblocks_tot, ice_blocksNorth, & + ice_blocksSouth, ice_blocksEast, ice_blocksWest, & + ice_blocksEast2, ice_blocksWest2, & + ice_blocksNorthEast, ice_blocksNorthWest, & + ice_blocksEastNorthEast, ice_blocksWestNorthWest, & + ice_blocksSouthEast, ice_blocksSouthWest, & + ice_blocksGetNbrID, get_block_parameter + use ice_distribution, only: distrb, & + ice_distributionGetBlockLoc, ice_distributionGet + + implicit none + private + save + + include 'mpif.h' + + type, public :: ice_halo + integer (int_kind) :: & + communicator, &! communicator to use for update messages + numMsgSend, &! number of messages to send halo update + numMsgRecv, &! number of messages to recv halo update + numLocalCopies, &! num local copies for halo update + tripoleRows ! number of rows in tripole buffer + + logical (log_kind) :: & + tripoleTFlag ! NS boundary is a tripole T-fold + + integer (int_kind), dimension(:), pointer :: & + recvTask, &! task from which to recv each msg + sendTask, &! task to which to send each msg + sizeSend, &! size of each sent message + sizeRecv, &! size of each recvd message + tripSend, &! send msg tripole flag, 0=non-zipper block + tripRecv ! recv msg tripole flag, for masked halos + + integer (int_kind), dimension(:,:), pointer :: & + srcLocalAddr, &! src addresses for each local copy + dstLocalAddr ! dst addresses for each local copy + + integer (int_kind), dimension(:,:,:), pointer :: & + sendAddr, &! src addresses for each sent message + recvAddr ! dst addresses for each recvd message + + end type + + public :: ice_HaloCreate, & + ice_HaloMask, & + ice_HaloUpdate, & + ice_HaloUpdate_stress, & + ice_HaloExtrapolate, & + ice_HaloDestroy + + interface ice_HaloUpdate ! generic interface + module procedure ice_HaloUpdate2DR8, & + ice_HaloUpdate2DR4, & + ice_HaloUpdate2DI4, & + ice_HaloUpdate3DR8, & + ice_HaloUpdate3DR4, & + ice_HaloUpdate3DI4, & + ice_HaloUpdate4DR8, & + ice_HaloUpdate4DR4, & + ice_HaloUpdate4DI4 + end interface + + interface ice_HaloExtrapolate ! generic interface + module procedure ice_HaloExtrapolate2DR8 !, & +! ice_HaloExtrapolate2DR4, & ! not yet +! ice_HaloExtrapolate2DI4, & ! implemented + end interface + +!----------------------------------------------------------------------- +! +! to prevent frequent allocate-deallocate for 2d halo updates, create +! a static 2d buffer to be allocated once at creation. if future +! creation needs larger buffer, resize during the creation. +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + bufSizeSend, &! max buffer size for send messages + bufSizeRecv ! max buffer size for recv messages + + integer (int_kind), dimension(:,:), allocatable :: & + bufSendI4, &! buffer for use to send in 2D i4 halo updates + bufRecvI4 ! buffer for use to recv in 2D i4 halo updates + + real (real_kind), dimension(:,:), allocatable :: & + bufSendR4, &! buffer for use to send in 2D r4 halo updates + bufRecvR4 ! buffer for use to recv in 2D r4 halo updates + + real (dbl_kind), dimension(:,:), allocatable :: & + bufSendR8, &! buffer for use to send in 2D r8 halo updates + bufRecvR8 ! buffer for use to recv in 2D r8 halo updates + +!----------------------------------------------------------------------- +! +! global buffers for tripole boundary +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:), allocatable :: & + bufTripoleI4 + + real (real_kind), dimension(:,:), allocatable :: & + bufTripoleR4 + + real (dbl_kind), dimension(:,:), allocatable :: & + bufTripoleR8 + +!*********************************************************************** + +contains + +!*********************************************************************** + + function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & + nxGlobal) result(halo) + +! This routine creates a halo type with info necessary for +! performing a halo (ghost cell) update. This info is computed +! based on the input block distribution. + + type (distrb), intent(in) :: & + dist ! distribution of blocks across procs + + character (*), intent(in) :: & + nsBoundaryType, &! type of boundary to use in logical ns dir + ewBoundaryType ! type of boundary to use in logical ew dir + + integer (int_kind), intent(in) :: & + nxGlobal ! global grid extent for tripole grids + + type (ice_halo) :: & + halo ! a new halo type with info for halo updates + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + istat, &! allocate status flag + numProcs, &! num of processors involved + communicator, &! communicator for message passing + iblock, &! block counter + eastBlock, westBlock, &! block id east, west neighbors + northBlock, southBlock, &! block id north, south neighbors + neBlock, nwBlock, &! block id northeast, northwest nbrs + seBlock, swBlock, &! block id southeast, southwest nbrs + srcProc, dstProc, &! source, dest processor locations + srcLocalID, dstLocalID, &! local block index of src,dst blocks + maxTmp, &! temp for global maxval + blockSizeX, &! size of default physical domain in X + blockSizeY, &! size of default physical domain in Y + maxSizeSend, maxSizeRecv, &! max buffer sizes + numMsgSend, numMsgRecv, &! number of messages for this halo + eastMsgSize, westMsgSize, &! nominal sizes for e-w msgs + northMsgSize, southMsgSize, &! nominal sizes for n-s msgs + tripoleMsgSize, &! size for tripole messages + tripoleMsgSizeOut, &! size for tripole messages + tripoleRows, &! number of rows in tripole buffer + cornerMsgSize, msgSize ! nominal size for corner msg + + integer (int_kind), dimension(:), allocatable :: & + sendCount, recvCount ! count number of words to each proc + + logical (log_kind) :: & + resize, &! flag for resizing buffers + tripoleFlag, &! flag for allocating tripole buffers + tripoleBlock, &! flag for identifying north tripole blocks + tripoleTFlag ! flag for processing tripole buffer as T-fold + +!----------------------------------------------------------------------- +! +! Initialize some useful variables and return if this task not +! in the current distribution. +! +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + nprocs = numProcs, & + communicator = communicator) + + if (my_task >= numProcs) return + + halo%communicator = communicator + + blockSizeX = nx_block - 2*nghost + blockSizeY = ny_block - 2*nghost + eastMsgSize = nghost*blockSizeY + westMsgSize = nghost*blockSizeY + southMsgSize = nghost*blockSizeX + northMsgSize = nghost*blockSizeX + cornerMsgSize = nghost*nghost + tripoleRows = nghost+1 + + if (nsBoundaryType == 'tripole' .or. nsBoundaryType == 'tripoleT') then + tripoleFlag = .true. + tripoleTFlag = (nsBoundaryType == 'tripoleT') + if (tripoleTflag) tripoleRows = tripoleRows+1 + + !*** allocate tripole message buffers if not already done + + if (.not. allocated(bufTripoleR8)) then + allocate (bufTripoleI4(nxGlobal, tripoleRows), & + bufTripoleR4(nxGlobal, tripoleRows), & + bufTripoleR8(nxGlobal, tripoleRows), & + stat=istat) + + if (istat > 0) then + call abort_ice( & + 'ice_HaloCreate: error allocating tripole buffers') + return + endif + endif + + else + tripoleFlag = .false. + tripoleTFlag = .false. + endif + halo%tripoleTFlag = tripoleTFlag + halo%tripoleRows = tripoleRows + tripoleMsgSize = tripoleRows*blockSizeX + tripoleMsgSizeOut = tripoleRows*nx_block + +!----------------------------------------------------------------------- +! +! Count the number of messages to send/recv from each processor +! and number of words in each message. These quantities are +! necessary for allocating future arrays. +! +!----------------------------------------------------------------------- + + allocate (sendCount(numProcs), recvCount(numProcs), stat=istat) + + if (istat > 0) then + call abort_ice( & + 'ice_HaloCreate: error allocating count arrays') + return + endif + + sendCount = 0 + recvCount = 0 + + msgCountLoop: do iblock=1,nblocks_tot + + call ice_distributionGetBlockLoc(dist, iblock, srcProc, & + srcLocalID) + + !*** find north neighbor block and add to message count + !*** also set tripole block flag for later special cases + + northBlock = ice_blocksGetNbrID(iblock, ice_blocksNorth, & + ewBoundaryType, nsBoundaryType) + if (northBlock > 0) then + tripoleBlock = .false. + msgSize = northMsgSize + call ice_distributionGetBlockLoc(dist, northBlock, dstProc, & + dstLocalID) + else if (northBlock < 0) then ! tripole north row, count block + tripoleBlock = .true. + msgSize = tripoleMsgSize + call ice_distributionGetBlockLoc(dist, abs(northBlock), & + dstProc, dstLocalID) + else + tripoleBlock = .false. + msgSize = northMsgSize + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, msgSize) + + !*** if a tripole boundary block, also create a local + !*** message into and out of tripole buffer + + if (tripoleBlock) then + !*** copy out of tripole buffer - includes halo + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, srcProc, & + tripoleMsgSizeOut) + + !*** copy in only required if dstProc not same as srcProc + if (dstProc /= srcProc) then + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, srcProc, & + msgSize) + endif + endif + + !*** find south neighbor block and add to message count + + southBlock = ice_blocksGetNbrID(iblock, ice_blocksSouth, & + ewBoundaryType, nsBoundaryType) + + if (southBlock > 0) then + call ice_distributionGetBlockLoc(dist, southBlock, dstProc, & + dstLocalID) + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, southMsgSize) + + !*** find east neighbor block and add to message count + + eastBlock = ice_blocksGetNbrID(iblock, ice_blocksEast, & + ewBoundaryType, nsBoundaryType) + + if (eastBlock > 0) then + call ice_distributionGetBlockLoc(dist, eastBlock, dstProc, & + dstLocalID) + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, eastMsgSize) + + !*** if a tripole boundary block, non-local east neighbor + !*** needs a chunk of the north boundary, so add a message + !*** for that + + if (tripoleBlock .and. dstProc /= srcProc) then + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, tripoleMsgSize) + endif + + !*** find west neighbor block and add to message count + + westBlock = ice_blocksGetNbrID(iblock, ice_blocksWest, & + ewBoundaryType, nsBoundaryType) + + if (westBlock > 0) then + call ice_distributionGetBlockLoc(dist, westBlock, dstProc, & + dstLocalID) + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, westMsgSize) + + !*** if a tripole boundary block, non-local west neighbor + !*** needs a chunk of the north boundary, so add a message + !*** for that + + if (tripoleBlock .and. dstProc /= srcProc) then + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, tripoleMsgSize) + endif + + !*** find northeast neighbor block and add to message count + + neBlock = ice_blocksGetNbrID(iblock, ice_blocksNorthEast, & + ewBoundaryType, nsBoundaryType) + + if (neBlock > 0) then + msgSize = cornerMsgSize ! normal corner message + + call ice_distributionGetBlockLoc(dist, neBlock, dstProc, & + dstLocalID) + + else if (neBlock < 0) then ! tripole north row + msgSize = tripoleMsgSize ! tripole needs whole top row of block + + call ice_distributionGetBlockLoc(dist, abs(neBlock), dstProc, & + dstLocalID) + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, msgSize) + + !*** find northwest neighbor block and add to message count + + nwBlock = ice_blocksGetNbrID(iblock, ice_blocksNorthWest, & + ewBoundaryType, nsBoundaryType) + + if (nwBlock > 0) then + msgSize = cornerMsgSize ! normal NE corner update + + call ice_distributionGetBlockLoc(dist, nwBlock, dstProc, & + dstLocalID) + + else if (nwBlock < 0) then ! tripole north row, count block + msgSize = tripoleMsgSize ! tripole NE corner update - entire row needed + + call ice_distributionGetBlockLoc(dist, abs(nwBlock), dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, msgSize) + + !*** find southeast neighbor block and add to message count + + seBlock = ice_blocksGetNbrID(iblock, ice_blocksSouthEast, & + ewBoundaryType, nsBoundaryType) + + if (seBlock > 0) then + call ice_distributionGetBlockLoc(dist, seBlock, dstProc, & + dstLocalID) + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, cornerMsgSize) + + !*** find southwest neighbor block and add to message count + + swBlock = ice_blocksGetNbrID(iblock, ice_blocksSouthWest, & + ewBoundaryType, nsBoundaryType) + + if (swBlock > 0) then + call ice_distributionGetBlockLoc(dist, swBlock, dstProc, & + dstLocalID) + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, cornerMsgSize) + + !*** for tripole grids with padded domain, padding will + !*** prevent tripole buffer from getting all the info + !*** it needs - must extend footprint at top boundary + + if (tripoleBlock .and. & !tripole + mod(nxGlobal,blockSizeX) /= 0) then !padding + + !*** find east2 neighbor block and add to message count + + eastBlock = ice_blocksGetNbrID(iBlock, ice_blocksEast2, & + ewBoundaryType, nsBoundaryType) + + if (eastBlock > 0) then + call ice_distributionGetBlockLoc(dist, eastBlock, dstProc, & + dstLocalID) + else + dstProc = 0 + dstLocalID = 0 + endif + + if (dstProc /= srcProc) then + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, tripoleMsgSize) + endif + + !*** find EastNorthEast neighbor block and add to message count + + neBlock = ice_blocksGetNbrID(iBlock, ice_blocksEastNorthEast, & + ewBoundaryType, nsBoundaryType) + + if (neBlock < 0) then ! tripole north row + msgSize = tripoleMsgSize ! tripole needs whole top row of block + + call ice_distributionGetBlockLoc(dist, abs(neBlock), dstProc, & + dstLocalID) + else + dstProc = 0 + dstLocalID = 0 + endif + + if (dstProc /= srcProc) then + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, msgSize) + endif + + !*** find west2 neighbor block and add to message count + + westBlock = ice_blocksGetNbrID(iBlock, ice_blocksWest2, & + ewBoundaryType, nsBoundaryType) + + if (westBlock > 0) then + call ice_distributionGetBlockLoc(dist, westBlock, dstProc, & + dstLocalID) + else + dstProc = 0 + dstLocalID = 0 + endif + + if (dstProc /= srcProc) then + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, tripoleMsgSize) + endif + + !*** find WestNorthWest neighbor block and add to message count + + nwBlock = ice_blocksGetNbrID(iBlock, ice_blocksWestNorthWest, & + ewBoundaryType, nsBoundaryType) + + if (nwBlock < 0) then ! tripole north row + msgSize = tripoleMsgSize ! tripole needs whole top row of block + + call ice_distributionGetBlockLoc(dist, abs(nwBlock), dstProc, & + dstLocalID) + else + dstProc = 0 + dstLocalID = 0 + endif + + if (dstProc /= srcProc) then + call ice_HaloIncrementMsgCount(sendCount, recvCount, & + srcProc, dstProc, msgSize) + endif + + endif + + end do msgCountLoop + +!----------------------------------------------------------------------- +! +! if messages are received from the same processor, the message is +! actually a local copy - count them and reset to zero +! +!----------------------------------------------------------------------- + + halo%numLocalCopies = recvCount(my_task+1) + + sendCount(my_task+1) = 0 + recvCount(my_task+1) = 0 + +!----------------------------------------------------------------------- +! +! now count the number of actual messages to be sent and received +! +!----------------------------------------------------------------------- + + numMsgSend = count(sendCount /= 0) + numMsgRecv = count(recvCount /= 0) + halo%numMsgSend = numMsgSend + halo%numMsgRecv = numMsgRecv + +!----------------------------------------------------------------------- +! +! allocate buffers for 2-d halo updates to save time later +! if the buffers are already allocated by previous create call, +! check to see if they need to be re-sized +! +!----------------------------------------------------------------------- + + maxTmp = maxval(sendCount) + maxSizeSend = global_maxval(maxTmp, dist) + maxTmp = maxval(recvCount) + maxSizeRecv = global_maxval(maxTmp, dist) + + if (.not. allocated(bufSendR8)) then + + bufSizeSend = maxSizeSend + bufSizeRecv = maxSizeRecv + + allocate(bufSendI4(bufSizeSend, numMsgSend), & + bufRecvI4(bufSizeRecv, numMsgRecv), & + bufSendR4(bufSizeSend, numMsgSend), & + bufRecvR4(bufSizeRecv, numMsgRecv), & + bufSendR8(bufSizeSend, numMsgSend), & + bufRecvR8(bufSizeRecv, numMsgRecv), stat=istat) + + if (istat > 0) then + call abort_ice( & + 'ice_HaloCreate: error allocating 2d buffers') + return + endif + + else + + resize = .false. + + if (maxSizeSend > bufSizeSend) then + resize = .true. + bufSizeSend = maxSizeSend + endif + if (maxSizeRecv > bufSizeRecv) then + resize = .true. + bufSizeRecv = maxSizeRecv + endif + + if (numMsgSend > size(bufSendR8,dim=2)) resize = .true. + if (numMsgRecv > size(bufRecvR8,dim=2)) resize = .true. + + if (resize) then + deallocate(bufSendI4, bufRecvI4, bufSendR4, & + bufRecvR4, bufSendR8, bufRecvR8, stat=istat) + + if (istat > 0) then + call abort_ice( & + 'ice_HaloCreate: error deallocating 2d buffers') + return + endif + + allocate(bufSendI4(bufSizeSend, numMsgSend), & + bufRecvI4(bufSizeRecv, numMsgRecv), & + bufSendR4(bufSizeSend, numMsgSend), & + bufRecvR4(bufSizeRecv, numMsgRecv), & + bufSendR8(bufSizeSend, numMsgSend), & + bufRecvR8(bufSizeRecv, numMsgRecv), stat=istat) + + if (istat > 0) then + call abort_ice( & + 'ice_HaloCreate: error reallocating 2d buffers') + return + endif + + endif + + endif + +!----------------------------------------------------------------------- +! +! allocate arrays for message information and initialize +! +!----------------------------------------------------------------------- + + allocate(halo%sendTask(numMsgSend), & + halo%recvTask(numMsgRecv), & + halo%sizeSend(numMsgSend), & + halo%sizeRecv(numMsgRecv), & + halo%tripSend(numMsgSend), & + halo%tripRecv(numMsgRecv), & + halo%sendAddr(3,bufSizeSend,numMsgSend), & + halo%recvAddr(3,bufSizeRecv,numMsgRecv), & + halo%srcLocalAddr(3,halo%numLocalCopies), & + halo%dstLocalAddr(3,halo%numLocalCopies), & + stat = istat) + + if (istat > 0) then + call abort_ice( & + 'ice_HaloCreate: error allocating halo message info arrays') + return + endif + + halo%sendTask = 0 + halo%recvTask = 0 + halo%sizeSend = 0 + halo%sizeRecv = 0 + halo%tripSend = 0 + halo%tripRecv = 0 + halo%sendAddr = 0 + halo%recvAddr = 0 + halo%srcLocalAddr = 0 + halo%dstLocalAddr = 0 + + deallocate(sendCount, recvCount, stat=istat) + + if (istat > 0) then + call abort_ice( & + 'ice_HaloCreate: error deallocating count arrays') + return + endif + +!----------------------------------------------------------------------- +! +! repeat loop through blocks but this time, determine all the +! required message information for each message or local copy +! +!----------------------------------------------------------------------- + + !*** reset halo scalars to use as counters + + halo%numMsgSend = 0 + halo%numMsgRecv = 0 + halo%numLocalCopies = 0 + + msgConfigLoop: do iblock=1,nblocks_tot + + call ice_distributionGetBlockLoc(dist, iblock, srcProc, & + srcLocalID) + + !*** find north neighbor block and set msg info + !*** also set tripole block flag for later special cases + + northBlock = ice_blocksGetNbrID(iblock, ice_blocksNorth, & + ewBoundaryType, nsBoundaryType) + + if (northBlock > 0) then + tripoleBlock = .false. + call ice_distributionGetBlockLoc(dist, northBlock, dstProc, & + dstLocalID) + else if (northBlock < 0) then ! tripole north row, count block + tripoleBlock = .true. + call ice_distributionGetBlockLoc(dist, abs(northBlock), & + dstProc, dstLocalID) + else + tripoleBlock = .false. + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + northBlock, dstProc, dstLocalID, & + 'north') + + !*** if a tripole boundary block, also create a local + !*** message into and out of tripole buffer + + if (tripoleBlock) then + !*** copy out of tripole buffer - includes halo + call ice_HaloMsgCreate(halo,-iblock, srcProc, srcLocalID, & + iblock, srcProc, srcLocalID, & + 'north') + + !*** copy in only required if dstProc not same as srcProc + if (dstProc /= srcProc) then + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + -iblock, srcProc, srcLocalID, & + 'north') + + endif + endif + + !*** find south neighbor block and add to message count + + southBlock = ice_blocksGetNbrID(iblock, ice_blocksSouth, & + ewBoundaryType, nsBoundaryType) + + if (southBlock > 0) then + call ice_distributionGetBlockLoc(dist, southBlock, dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + southBlock, dstProc, dstLocalID, & + 'south') + + !*** find east neighbor block and add to message count + + eastBlock = ice_blocksGetNbrID(iblock, ice_blocksEast, & + ewBoundaryType, nsBoundaryType) + + if (eastBlock > 0) then + call ice_distributionGetBlockLoc(dist, eastBlock, dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + eastBlock, dstProc, dstLocalID, & + 'east') + + !*** if a tripole boundary block, non-local east neighbor + !*** needs a chunk of the north boundary, so add a message + !*** for that + + if (tripoleBlock .and. dstProc /= srcProc) then + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + -eastBlock, dstProc, dstLocalID, & + 'north') + + endif + + !*** find west neighbor block and add to message count + + westBlock = ice_blocksGetNbrID(iblock, ice_blocksWest, & + ewBoundaryType, nsBoundaryType) + + if (westBlock > 0) then + call ice_distributionGetBlockLoc(dist, westBlock, dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + westBlock, dstProc, dstLocalID, & + 'west') + + + !*** if a tripole boundary block, non-local west neighbor + !*** needs a chunk of the north boundary, so add a message + !*** for that + + if (tripoleBlock .and. dstProc /= srcProc) then + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + -westBlock, dstProc, dstLocalID, & + 'north') + + endif + + !*** find northeast neighbor block and add to message count + + neBlock = ice_blocksGetNbrID(iblock, ice_blocksNorthEast, & + ewBoundaryType, nsBoundaryType) + + if (neBlock /= 0) then + call ice_distributionGetBlockLoc(dist, abs(neBlock), dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + neBlock, dstProc, dstLocalID, & + 'northeast') + + !*** find northwest neighbor block and add to message count + + nwBlock = ice_blocksGetNbrID(iblock, ice_blocksNorthWest, & + ewBoundaryType, nsBoundaryType) + + if (nwBlock /= 0) then + call ice_distributionGetBlockLoc(dist, abs(nwBlock), dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + nwBlock, dstProc, dstLocalID, & + 'northwest') + + !*** find southeast neighbor block and add to message count + + seBlock = ice_blocksGetNbrID(iblock, ice_blocksSouthEast, & + ewBoundaryType, nsBoundaryType) + + if (seBlock > 0) then + call ice_distributionGetBlockLoc(dist, seBlock, dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + seBlock, dstProc, dstLocalID, & + 'southeast') + + !*** find southwest neighbor block and add to message count + + swBlock = ice_blocksGetNbrID(iblock, ice_blocksSouthWest, & + ewBoundaryType, nsBoundaryType) + + if (swBlock > 0) then + call ice_distributionGetBlockLoc(dist, swBlock, dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + swBlock, dstProc, dstLocalID, & + 'southwest') + + !*** for tripole grids with padded domain, padding will + !*** prevent tripole buffer from getting all the info + !*** it needs - must extend footprint at top boundary + + if (tripoleBlock .and. & !tripole + mod(nxGlobal,blockSizeX) /= 0) then !padding + + !*** find east2 neighbor block and add to message count + + eastBlock = ice_blocksGetNbrID(iBlock, ice_blocksEast2, & + ewBoundaryType, nsBoundaryType) + + if (eastBlock > 0) then + call ice_distributionGetBlockLoc(dist, eastBlock, dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + if (dstProc /= srcProc) then + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + -eastBlock, dstProc, dstLocalID, & + 'north') + + endif + + !*** find EastNorthEast neighbor block and add to message count + + neBlock = ice_blocksGetNbrID(iBlock, ice_blocksEastNorthEast, & + ewBoundaryType, nsBoundaryType) + + if (neBlock < 0) then ! tripole north row + msgSize = tripoleMsgSize ! tripole needs whole top row of block + + call ice_distributionGetBlockLoc(dist, abs(neBlock), dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + if (dstProc /= srcProc) then + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + neBlock, dstProc, dstLocalID, & + 'north') + endif + + !*** find west2 neighbor block and add to message count + + westBlock = ice_blocksGetNbrID(iBlock, ice_blocksWest2, & + ewBoundaryType, nsBoundaryType) + + if (westBlock > 0) then + call ice_distributionGetBlockLoc(dist, westBlock, dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + if (dstProc /= srcProc) then + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + -westBlock, dstProc, dstLocalID, & + 'north') + + endif + + !*** find WestNorthWest neighbor block and add to message count + + nwBlock = ice_blocksGetNbrID(iBlock, ice_blocksWestNorthWest, & + ewBoundaryType, nsBoundaryType) + + if (nwBlock < 0) then ! tripole north row + msgSize = tripoleMsgSize ! tripole needs whole top row of block + + call ice_distributionGetBlockLoc(dist, abs(nwBlock), dstProc, & + dstLocalID) + + else + dstProc = 0 + dstLocalID = 0 + endif + + if (dstProc /= srcProc) then + call ice_HaloMsgCreate(halo, iblock, srcProc, srcLocalID, & + nwBlock, dstProc, dstLocalID, & + 'north') + + endif + + endif + + end do msgConfigLoop + +!----------------------------------------------------------------------- + + end function ice_HaloCreate + +!*********************************************************************** + + subroutine ice_HaloMask(halo, basehalo, mask) + +! This routine creates a halo type with info necessary for +! performing a halo (ghost cell) update. This info is computed +! based on a base halo already initialized and a mask + + use ice_domain_size, only: max_blocks + + type (ice_halo) :: & + basehalo ! basehalo to mask + integer (int_kind), intent(in) :: & + mask(nx_block,ny_block,max_blocks) ! mask of live points + + type (ice_halo) :: & + halo ! a new halo type with info for halo updates + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + n,nmsg,scnt, &! counters + icel,jcel,nblock, &! gridcell index + istat, &! allocate status flag + communicator, &! communicator for message passing + numMsgSend, numMsgRecv, &! number of messages for this halo + numLocalCopies, &! num local copies for halo update + tripoleRows, &! number of rows in tripole buffer + lbufSizeSend, &! buffer size for send messages + lbufSizeRecv ! buffer size for recv messages + logical (log_kind) :: & + tripoleTFlag, & ! flag for processing tripole buffer as T-fold + tmpflag ! temporary flag for setting halomask along T-fold + +!----------------------------------------------------------------------- +! +! allocate and initialize halo +! always keep tripole zipper msgs +! +!----------------------------------------------------------------------- + + communicator = basehalo%communicator + tripoleRows = basehalo%tripoleRows + tripoleTFlag = basehalo%tripoleTFlag + numMsgSend = basehalo%numMsgSend + numMsgRecv = basehalo%numMsgRecv + numLocalCopies = basehalo%numLocalCopies + lbufSizeSend = size(basehalo%sendAddr,dim=2) + lbufSizeRecv = size(basehalo%recvAddr,dim=2) + + allocate(halo%sendTask(numMsgSend), & + halo%recvTask(numMsgRecv), & + halo%sizeSend(numMsgSend), & + halo%sizeRecv(numMsgRecv), & + halo%tripSend(numMsgSend), & + halo%tripRecv(numMsgRecv), & + halo%sendAddr(3,lbufSizeSend,numMsgSend), & + halo%recvAddr(3,lbufSizeRecv,numMsgRecv), & + halo%srcLocalAddr(3,numLocalCopies), & + halo%dstLocalAddr(3,numLocalCopies), & + stat = istat) + + if (istat > 0) then + call abort_ice( & + 'ice_HaloMask: error allocating halo message info arrays') + return + endif + + halo%communicator = communicator + halo%tripoleRows = tripoleRows + halo%tripoleTFlag = tripoleTFlag + halo%numLocalCopies = numLocalCopies + + halo%srcLocalAddr = basehalo%srcLocalAddr(:,1:numLocalCopies) + halo%dstLocalAddr = basehalo%dstLocalAddr(:,1:numLocalCopies) + + numMsgSend = 0 + do nmsg=1,basehalo%numMsgSend + scnt = 0 + do n=1,basehalo%sizeSend(nmsg) + icel = basehalo%sendAddr(1,n,nmsg) + jcel = basehalo%sendAddr(2,n,nmsg) + nblock = basehalo%sendAddr(3,n,nmsg) +! the following line fails bounds check for mask when tripSend /= 0 +! if (mask(icel,jcel,abs(nblock)) /= 0 .or. basehalo%tripSend(nmsg) /= 0) then + tmpflag = .false. + if (basehalo%tripSend(nmsg) /= 0) then + tmpflag = .true. + elseif (mask(icel,jcel,abs(nblock)) /= 0) then + tmpflag = .true. + endif + + if (tmpflag) then + scnt = scnt + 1 + if (scnt == 1) then + numMsgSend = numMsgSend + 1 + halo%sendTask(numMsgSend) = basehalo%sendTask(nmsg) + halo%tripSend(numMsgSend) = basehalo%tripSend(nmsg) + endif + halo%sendAddr(1,scnt,numMsgSend) = icel + halo%sendAddr(2,scnt,numMsgSend) = jcel + halo%sendAddr(3,scnt,numMsgSend) = nblock + halo%sizeSend(numMsgSend) = scnt + endif + enddo + enddo + halo%numMsgSend = numMsgSend + + numMsgRecv = 0 + do nmsg=1,basehalo%numMsgRecv + scnt = 0 + do n=1,basehalo%sizeRecv(nmsg) + icel = basehalo%recvAddr(1,n,nmsg) + jcel = basehalo%recvAddr(2,n,nmsg) + nblock = basehalo%recvAddr(3,n,nmsg) +! the following line fails bounds check for mask when tripRecv /= 0 +! if (mask(icel,jcel,abs(nblock)) /= 0 .or. basehalo%tripRecv(nmsg) /= 0) then + tmpflag = .false. + if (basehalo%tripRecv(nmsg) /= 0) then + tmpflag = .true. + elseif (mask(icel,jcel,abs(nblock)) /= 0) then + tmpflag = .true. + endif + + if (tmpflag) then + scnt = scnt + 1 + if (scnt == 1) then + numMsgRecv = numMsgRecv + 1 + halo%recvTask(numMsgRecv) = basehalo%recvTask(nmsg) + halo%tripRecv(numMsgRecv) = basehalo%tripRecv(nmsg) + endif + halo%recvAddr(1,scnt,numMsgRecv) = icel + halo%recvAddr(2,scnt,numMsgRecv) = jcel + halo%recvAddr(3,scnt,numMsgRecv) = nblock + halo%sizeRecv(numMsgRecv) = scnt + endif + enddo + enddo + halo%numMsgRecv = numMsgRecv + +!----------------------------------------------------------------------- + + end subroutine ice_HaloMask + +!*********************************************************************** + + subroutine ice_HaloUpdate2DR8(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 2d horizontal arrays of double precision. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + real (dbl_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + real (dbl_kind), dimension(:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n,nmsg, &! dummy loop indices + ierr, &! error or status flag for MPI,alloc + nxGlobal, &! global domain size in x (tripole) + iSrc,jSrc, &! source addresses for message + iDst,jDst, &! dest addresses for message + srcBlock, &! local block number for source + dstBlock, &! local block number for destination + ioffset, joffset, &! address shifts for tripole + isign ! sign factor for tripole grids + + integer (int_kind), dimension(:), allocatable :: & + sndRequest, &! MPI request ids + rcvRequest ! MPI request ids + + integer (int_kind), dimension(:,:), allocatable :: & + sndStatus, &! MPI status flags + rcvStatus ! MPI status flags + + real (dbl_kind) :: & + fill, &! value to use for unknown points + x1,x2,xavg ! scalars for enforcing symmetry at U pts + + integer (int_kind) :: len ! length of messages + +!----------------------------------------------------------------------- +! +! initialize error code and fill value +! +!----------------------------------------------------------------------- + + if (present(fillValue)) then + fill = fillValue + else + fill = 0.0_dbl_kind + endif + + nxGlobal = 0 + if (allocated(bufTripoleR8)) then + nxGlobal = size(bufTripoleR8,dim=1) + bufTripoleR8 = fill + endif + +!----------------------------------------------------------------------- +! +! allocate request and status arrays for messages +! +!----------------------------------------------------------------------- + + allocate(sndRequest(halo%numMsgSend), & + rcvRequest(halo%numMsgRecv), & + sndStatus(MPI_STATUS_SIZE,halo%numMsgSend), & + rcvStatus(MPI_STATUS_SIZE,halo%numMsgRecv), stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate2DR8: error allocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- +! +! post receives +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgRecv + + len = halo%SizeRecv(nmsg) + call MPI_IRECV(bufRecvR8(1:len,nmsg), len, mpiR8, & + halo%recvTask(nmsg), & + mpitagHalo + halo%recvTask(nmsg), & + halo%communicator, rcvRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! fill send buffer and post sends +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgSend + + do n=1,halo%sizeSend(nmsg) + iSrc = halo%sendAddr(1,n,nmsg) + jSrc = halo%sendAddr(2,n,nmsg) + srcBlock = halo%sendAddr(3,n,nmsg) + + bufSendR8(n,nmsg) = array(iSrc,jSrc,srcBlock) + end do + do n=halo%sizeSend(nmsg)+1,bufSizeSend + bufSendR8(n,nmsg) = fill ! fill remainder of buffer + end do + + len = halo%SizeSend(nmsg) + call MPI_ISEND(bufSendR8(1:len,nmsg), len, mpiR8, & + halo%sendTask(nmsg), & + mpitagHalo + my_task, & + halo%communicator, sndRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! while messages are being communicated, fill out halo region +! needed for masked halos to ensure halo values are filled for +! halo grid cells that are not updated +! +!----------------------------------------------------------------------- + + do j = 1,nghost + array(1:nx_block, j,:) = fill + array(1:nx_block,ny_block-j+1,:) = fill + enddo + do i = 1,nghost + array(i, 1:ny_block,:) = fill + array(nx_block-i+1,1:ny_block,:) = fill + enddo + +!----------------------------------------------------------------------- +! +! do local copies while waiting for messages to complete +! if srcBlock is zero, that denotes an eliminated land block or a +! closed boundary where ghost cell values are undefined +! if srcBlock is less than zero, the message is a copy out of the +! tripole buffer and will be treated later +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numLocalCopies + iSrc = halo%srcLocalAddr(1,nmsg) + jSrc = halo%srcLocalAddr(2,nmsg) + srcBlock = halo%srcLocalAddr(3,nmsg) + iDst = halo%dstLocalAddr(1,nmsg) + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + if (srcBlock > 0) then + if (dstBlock > 0) then + array(iDst,jDst,dstBlock) = & + array(iSrc,jSrc,srcBlock) + else if (dstBlock < 0) then ! tripole copy into buffer + bufTripoleR8(iDst,jDst) = & + array(iSrc,jSrc,srcBlock) + endif + else if (srcBlock == 0) then + array(iDst,jDst,dstBlock) = fill + endif + end do + +!----------------------------------------------------------------------- +! +! wait for receives to finish and then unpack the recv buffer into +! ghost cells +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgRecv, rcvRequest, rcvStatus, ierr) + + do nmsg=1,halo%numMsgRecv + do n=1,halo%sizeRecv(nmsg) + iDst = halo%recvAddr(1,n,nmsg) + jDst = halo%recvAddr(2,n,nmsg) + dstBlock = halo%recvAddr(3,n,nmsg) + + if (dstBlock > 0) then + array(iDst,jDst,dstBlock) = bufRecvR8(n,nmsg) + else if (dstBlock < 0) then !tripole + bufTripoleR8(iDst,jDst) = bufRecvR8(n,nmsg) + endif + end do + end do + +!----------------------------------------------------------------------- +! +! take care of northern boundary in tripole case +! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows +! (T-fold) of physical domain for entire (global) top row +! +!----------------------------------------------------------------------- + + if (nxGlobal > 0) then + + select case (fieldKind) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case default + call abort_ice( & + 'ice_HaloUpdate2DR8: Unknown field kind') + end select + + if (halo%tripoleTFlag) then + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = -1 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 2,nxGlobal/2 + iDst = nxGlobal - i + 2 + x1 = bufTripoleR8(i ,halo%tripoleRows) + x2 = bufTripoleR8(iDst,halo%tripoleRows) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripoleR8(i ,halo%tripoleRows) = xavg + bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg + end do + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 0 + joffset = 1 + + case (field_loc_Eface) ! cell center location + + ioffset = 0 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripoleR8(i ,halo%tripoleRows) + x2 = bufTripoleR8(iDst,halo%tripoleRows) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripoleR8(i ,halo%tripoleRows) = xavg + bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg + end do + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = -1 + joffset = 1 + + case default + call abort_ice( & + 'ice_HaloUpdate2DR8: Unknown field location') + end select + + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 1,nxGlobal/2 - 1 + iDst = nxGlobal - i + x1 = bufTripoleR8(i ,halo%tripoleRows) + x2 = bufTripoleR8(iDst,halo%tripoleRows) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripoleR8(i ,halo%tripoleRows) = xavg + bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg + end do + + case (field_loc_Eface) ! cell center location + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = 0 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripoleR8(i ,halo%tripoleRows) + x2 = bufTripoleR8(iDst,halo%tripoleRows) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripoleR8(i ,halo%tripoleRows) = xavg + bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg + end do + + case default + call abort_ice( & + 'ice_HaloUpdate2DR8: Unknown field location') + end select + + endif + + !*** copy out of global tripole buffer into local + !*** ghost cells + + !*** look through local copies to find the copy out + !*** messages (srcBlock < 0) + + do nmsg=1,halo%numLocalCopies + srcBlock = halo%srcLocalAddr(3,nmsg) + + if (srcBlock < 0) then + + iSrc = halo%srcLocalAddr(1,nmsg) ! tripole buffer addr + jSrc = halo%srcLocalAddr(2,nmsg) + + iDst = halo%dstLocalAddr(1,nmsg) ! local block addr + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + !*** correct for offsets + iSrc = iSrc - ioffset + jSrc = jSrc - joffset + if (iSrc == 0) iSrc = nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal + + !*** for center and Eface on u-fold, and NE corner and Nface + !*** on T-fold, do not need to replace + !*** top row of physical domain, so jSrc should be + !*** out of range and skipped + !*** otherwise do the copy + + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then + array(iDst,jDst,dstBlock) = isign*bufTripoleR8(iSrc,jSrc) + endif + + endif + end do + + endif + +!----------------------------------------------------------------------- +! +! wait for sends to complete and deallocate arrays +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgSend, sndRequest, sndStatus, ierr) + + deallocate(sndRequest, rcvRequest, sndStatus, rcvStatus, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate2DR8: error deallocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate2DR8 + +!*********************************************************************** + + subroutine ice_HaloUpdate2DR4(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 2d horizontal arrays of single precision. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + real (real_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + real (real_kind), dimension(:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n,nmsg, &! dummy loop indices + ierr, &! error or status flag for MPI,alloc + nxGlobal, &! global domain size in x (tripole) + iSrc,jSrc, &! source addresses for message + iDst,jDst, &! dest addresses for message + srcBlock, &! local block number for source + dstBlock, &! local block number for destination + ioffset, joffset, &! address shifts for tripole + isign ! sign factor for tripole grids + + integer (int_kind), dimension(:), allocatable :: & + sndRequest, &! MPI request ids + rcvRequest ! MPI request ids + + integer (int_kind), dimension(:,:), allocatable :: & + sndStatus, &! MPI status flags + rcvStatus ! MPI status flags + + real (real_kind) :: & + fill, &! value to use for unknown points + x1,x2,xavg ! scalars for enforcing symmetry at U pts + + integer (int_kind) :: len ! length of messages + +!----------------------------------------------------------------------- +! +! initialize error code and fill value +! +!----------------------------------------------------------------------- + + if (present(fillValue)) then + fill = fillValue + else + fill = 0.0_real_kind + endif + + nxGlobal = 0 + if (allocated(bufTripoleR4)) then + nxGlobal = size(bufTripoleR4,dim=1) + bufTripoleR4 = fill + endif + +!----------------------------------------------------------------------- +! +! allocate request and status arrays for messages +! +!----------------------------------------------------------------------- + + allocate(sndRequest(halo%numMsgSend), & + rcvRequest(halo%numMsgRecv), & + sndStatus(MPI_STATUS_SIZE,halo%numMsgSend), & + rcvStatus(MPI_STATUS_SIZE,halo%numMsgRecv), stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate2DR4: error allocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- +! +! post receives +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgRecv + + len = halo%SizeRecv(nmsg) + call MPI_IRECV(bufRecvR4(1:len,nmsg), len, mpiR4, & + halo%recvTask(nmsg), & + mpitagHalo + halo%recvTask(nmsg), & + halo%communicator, rcvRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! fill send buffer and post sends +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgSend + + do n=1,halo%sizeSend(nmsg) + iSrc = halo%sendAddr(1,n,nmsg) + jSrc = halo%sendAddr(2,n,nmsg) + srcBlock = halo%sendAddr(3,n,nmsg) + + bufSendR4(n,nmsg) = array(iSrc,jSrc,srcBlock) + end do + do n=halo%sizeSend(nmsg)+1,bufSizeSend + bufSendR4(n,nmsg) = fill ! fill remainder of buffer + end do + + len = halo%SizeSend(nmsg) + call MPI_ISEND(bufSendR4(1:len,nmsg), len, mpiR4, & + halo%sendTask(nmsg), & + mpitagHalo + my_task, & + halo%communicator, sndRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! while messages are being communicated, fill out halo region +! needed for masked halos to ensure halo values are filled for +! halo grid cells that are not updated +! +!----------------------------------------------------------------------- + + do j = 1,nghost + array(1:nx_block, j,:) = fill + array(1:nx_block,ny_block-j+1,:) = fill + enddo + do i = 1,nghost + array(i, 1:ny_block,:) = fill + array(nx_block-i+1,1:ny_block,:) = fill + enddo + +!----------------------------------------------------------------------- +! +! do local copies while waiting for messages to complete +! if srcBlock is zero, that denotes an eliminated land block or a +! closed boundary where ghost cell values are undefined +! if srcBlock is less than zero, the message is a copy out of the +! tripole buffer and will be treated later +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numLocalCopies + iSrc = halo%srcLocalAddr(1,nmsg) + jSrc = halo%srcLocalAddr(2,nmsg) + srcBlock = halo%srcLocalAddr(3,nmsg) + iDst = halo%dstLocalAddr(1,nmsg) + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + if (srcBlock > 0) then + if (dstBlock > 0) then + array(iDst,jDst,dstBlock) = & + array(iSrc,jSrc,srcBlock) + else if (dstBlock < 0) then ! tripole copy into buffer + bufTripoleR4(iDst,jDst) = & + array(iSrc,jSrc,srcBlock) + endif + else if (srcBlock == 0) then + array(iDst,jDst,dstBlock) = fill + endif + end do + +!----------------------------------------------------------------------- +! +! wait for receives to finish and then unpack the recv buffer into +! ghost cells +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgRecv, rcvRequest, rcvStatus, ierr) + + do nmsg=1,halo%numMsgRecv + do n=1,halo%sizeRecv(nmsg) + iDst = halo%recvAddr(1,n,nmsg) + jDst = halo%recvAddr(2,n,nmsg) + dstBlock = halo%recvAddr(3,n,nmsg) + + if (dstBlock > 0) then + array(iDst,jDst,dstBlock) = bufRecvR4(n,nmsg) + else if (dstBlock < 0) then !tripole + bufTripoleR4(iDst,jDst) = bufRecvR4(n,nmsg) + endif + end do + end do + +!----------------------------------------------------------------------- +! +! take care of northern boundary in tripole case +! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows +! (T-fold) of physical domain for entire (global) top row +! +!----------------------------------------------------------------------- + + if (nxGlobal > 0) then + + select case (fieldKind) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case default + call abort_ice( & + 'ice_HaloUpdate2DR4: Unknown field kind') + end select + + if (halo%tripoleTFlag) then + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = -1 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 2,nxGlobal/2 + iDst = nxGlobal - i + 2 + x1 = bufTripoleR4(i ,halo%tripoleRows) + x2 = bufTripoleR4(iDst,halo%tripoleRows) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripoleR4(i ,halo%tripoleRows) = xavg + bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg + end do + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 0 + joffset = 1 + + case (field_loc_Eface) ! cell center location + + ioffset = 0 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripoleR4(i ,halo%tripoleRows) + x2 = bufTripoleR4(iDst,halo%tripoleRows) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripoleR4(i ,halo%tripoleRows) = xavg + bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg + end do + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = -1 + joffset = 1 + + case default + call abort_ice( & + 'ice_HaloUpdate2DR4: Unknown field location') + end select + + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 1,nxGlobal/2 - 1 + iDst = nxGlobal - i + x1 = bufTripoleR4(i ,halo%tripoleRows) + x2 = bufTripoleR4(iDst,halo%tripoleRows) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripoleR4(i ,halo%tripoleRows) = xavg + bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg + end do + + case (field_loc_Eface) ! cell center location + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = 0 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripoleR4(i ,halo%tripoleRows) + x2 = bufTripoleR4(iDst,halo%tripoleRows) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripoleR4(i ,halo%tripoleRows) = xavg + bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg + end do + + case default + call abort_ice( & + 'ice_HaloUpdate2DR4: Unknown field location') + end select + + endif + + !*** copy out of global tripole buffer into local + !*** ghost cells + + !*** look through local copies to find the copy out + !*** messages (srcBlock < 0) + + do nmsg=1,halo%numLocalCopies + srcBlock = halo%srcLocalAddr(3,nmsg) + + if (srcBlock < 0) then + + iSrc = halo%srcLocalAddr(1,nmsg) ! tripole buffer addr + jSrc = halo%srcLocalAddr(2,nmsg) + + iDst = halo%dstLocalAddr(1,nmsg) ! local block addr + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + !*** correct for offsets + iSrc = iSrc - ioffset + jSrc = jSrc - joffset + if (iSrc == 0) iSrc = nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal + + !*** for center and Eface on u-fold, and NE corner and Nface + !*** on T-fold, do not need to replace + !*** top row of physical domain, so jSrc should be + !*** out of range and skipped + !*** otherwise do the copy + + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then + array(iDst,jDst,dstBlock) = isign*bufTripoleR4(iSrc,jSrc) + endif + + endif + end do + + endif + +!----------------------------------------------------------------------- +! +! wait for sends to complete and deallocate arrays +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgSend, sndRequest, sndStatus, ierr) + + deallocate(sndRequest, rcvRequest, sndStatus, rcvStatus, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate2DR4: error deallocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate2DR4 + +!*********************************************************************** + + subroutine ice_HaloUpdate2DI4(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 2d horizontal integer arrays. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + integer (int_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + integer (int_kind), dimension(:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n,nmsg, &! dummy loop indices + ierr, &! error or status flag for MPI,alloc + nxGlobal, &! global domain size in x (tripole) + iSrc,jSrc, &! source addresses for message + iDst,jDst, &! dest addresses for message + srcBlock, &! local block number for source + dstBlock, &! local block number for destination + ioffset, joffset, &! address shifts for tripole + isign ! sign factor for tripole grids + + integer (int_kind), dimension(:), allocatable :: & + sndRequest, &! MPI request ids + rcvRequest ! MPI request ids + + integer (int_kind), dimension(:,:), allocatable :: & + sndStatus, &! MPI status flags + rcvStatus ! MPI status flags + + integer (int_kind) :: & + fill, &! value to use for unknown points + x1,x2,xavg ! scalars for enforcing symmetry at U pts + + integer (int_kind) :: len ! length of messages + +!----------------------------------------------------------------------- +! +! initialize error code and fill value +! +!----------------------------------------------------------------------- + + if (present(fillValue)) then + fill = fillValue + else + fill = 0_int_kind + endif + + nxGlobal = 0 + if (allocated(bufTripoleI4)) then + nxGlobal = size(bufTripoleI4,dim=1) + bufTripoleI4 = fill + endif + +!----------------------------------------------------------------------- +! +! allocate request and status arrays for messages +! +!----------------------------------------------------------------------- + + allocate(sndRequest(halo%numMsgSend), & + rcvRequest(halo%numMsgRecv), & + sndStatus(MPI_STATUS_SIZE,halo%numMsgSend), & + rcvStatus(MPI_STATUS_SIZE,halo%numMsgRecv), stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate2DI4: error allocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- +! +! post receives +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgRecv + + len = halo%SizeRecv(nmsg) + call MPI_IRECV(bufRecvI4(1:len,nmsg), len, MPI_INTEGER, & + halo%recvTask(nmsg), & + mpitagHalo + halo%recvTask(nmsg), & + halo%communicator, rcvRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! fill send buffer and post sends +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgSend + + do n=1,halo%sizeSend(nmsg) + iSrc = halo%sendAddr(1,n,nmsg) + jSrc = halo%sendAddr(2,n,nmsg) + srcBlock = halo%sendAddr(3,n,nmsg) + + bufSendI4(n,nmsg) = array(iSrc,jSrc,srcBlock) + end do + do n=halo%sizeSend(nmsg)+1,bufSizeSend + bufSendI4(n,nmsg) = fill ! fill remainder of buffer + end do + + len = halo%SizeSend(nmsg) + call MPI_ISEND(bufSendI4(1:len,nmsg), len, MPI_INTEGER, & + halo%sendTask(nmsg), & + mpitagHalo + my_task, & + halo%communicator, sndRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! while messages are being communicated, fill out halo region +! needed for masked halos to ensure halo values are filled for +! halo grid cells that are not updated +! +!----------------------------------------------------------------------- + + do j = 1,nghost + array(1:nx_block, j,:) = fill + array(1:nx_block,ny_block-j+1,:) = fill + enddo + do i = 1,nghost + array(i, 1:ny_block,:) = fill + array(nx_block-i+1,1:ny_block,:) = fill + enddo + +!----------------------------------------------------------------------- +! +! do local copies while waiting for messages to complete +! if srcBlock is zero, that denotes an eliminated land block or a +! closed boundary where ghost cell values are undefined +! if srcBlock is less than zero, the message is a copy out of the +! tripole buffer and will be treated later +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numLocalCopies + iSrc = halo%srcLocalAddr(1,nmsg) + jSrc = halo%srcLocalAddr(2,nmsg) + srcBlock = halo%srcLocalAddr(3,nmsg) + iDst = halo%dstLocalAddr(1,nmsg) + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + if (srcBlock > 0) then + if (dstBlock > 0) then + array(iDst,jDst,dstBlock) = & + array(iSrc,jSrc,srcBlock) + else if (dstBlock < 0) then ! tripole copy into buffer + bufTripoleI4(iDst,jDst) = & + array(iSrc,jSrc,srcBlock) + endif + else if (srcBlock == 0) then + array(iDst,jDst,dstBlock) = fill + endif + end do + +!----------------------------------------------------------------------- +! +! wait for receives to finish and then unpack the recv buffer into +! ghost cells +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgRecv, rcvRequest, rcvStatus, ierr) + + do nmsg=1,halo%numMsgRecv + do n=1,halo%sizeRecv(nmsg) + iDst = halo%recvAddr(1,n,nmsg) + jDst = halo%recvAddr(2,n,nmsg) + dstBlock = halo%recvAddr(3,n,nmsg) + + if (dstBlock > 0) then + array(iDst,jDst,dstBlock) = bufRecvI4(n,nmsg) + else if (dstBlock < 0) then !tripole + bufTripoleI4(iDst,jDst) = bufRecvI4(n,nmsg) + endif + end do + end do + +!----------------------------------------------------------------------- +! +! take care of northern boundary in tripole case +! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows +! (T-fold) of physical domain for entire (global) top row +! +!----------------------------------------------------------------------- + + if (nxGlobal > 0) then + + select case (fieldKind) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case default + call abort_ice( & + 'ice_HaloUpdate2DI4: Unknown field kind') + end select + + if (halo%tripoleTFlag) then + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = -1 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 2,nxGlobal/2 + iDst = nxGlobal - i + 2 + x1 = bufTripoleI4(i ,halo%tripoleRows) + x2 = bufTripoleI4(iDst,halo%tripoleRows) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripoleI4(i ,halo%tripoleRows) = xavg + bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg + end do + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 0 + joffset = 1 + + case (field_loc_Eface) ! cell center location + + ioffset = 0 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripoleI4(i ,halo%tripoleRows) + x2 = bufTripoleI4(iDst,halo%tripoleRows) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripoleI4(i ,halo%tripoleRows) = xavg + bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg + end do + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = -1 + joffset = 1 + + case default + call abort_ice( & + 'ice_HaloUpdate2DI4: Unknown field location') + end select + + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 1,nxGlobal/2 - 1 + iDst = nxGlobal - i + x1 = bufTripoleI4(i ,halo%tripoleRows) + x2 = bufTripoleI4(iDst,halo%tripoleRows) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripoleI4(i ,halo%tripoleRows) = xavg + bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg + end do + + case (field_loc_Eface) ! cell center location + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = 0 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripoleI4(i ,halo%tripoleRows) + x2 = bufTripoleI4(iDst,halo%tripoleRows) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripoleI4(i ,halo%tripoleRows) = xavg + bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg + end do + + case default + call abort_ice( & + 'ice_HaloUpdate2DI4: Unknown field location') + end select + + endif + + !*** copy out of global tripole buffer into local + !*** ghost cells + + !*** look through local copies to find the copy out + !*** messages (srcBlock < 0) + + do nmsg=1,halo%numLocalCopies + srcBlock = halo%srcLocalAddr(3,nmsg) + + if (srcBlock < 0) then + + iSrc = halo%srcLocalAddr(1,nmsg) ! tripole buffer addr + jSrc = halo%srcLocalAddr(2,nmsg) + + iDst = halo%dstLocalAddr(1,nmsg) ! local block addr + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + !*** correct for offsets + iSrc = iSrc - ioffset + jSrc = jSrc - joffset + if (iSrc == 0) iSrc = nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal + + !*** for center and Eface on u-fold, and NE corner and Nface + !*** on T-fold, do not need to replace + !*** top row of physical domain, so jSrc should be + !*** out of range and skipped + !*** otherwise do the copy + + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then + array(iDst,jDst,dstBlock) = isign*bufTripoleI4(iSrc,jSrc) + endif + + endif + end do + + endif + +!----------------------------------------------------------------------- +! +! wait for sends to complete and deallocate arrays +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgSend, sndRequest, sndStatus, ierr) + + deallocate(sndRequest, rcvRequest, sndStatus, rcvStatus, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate2DI4: error deallocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate2DI4 + +!*********************************************************************** + + subroutine ice_HaloUpdate3DR8(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 3d horizontal arrays of double precision. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + real (dbl_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + real (dbl_kind), dimension(:,:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,k,n,nmsg, &! dummy loop indices + ierr, &! error or status flag for MPI,alloc + nxGlobal, &! global domain size in x (tripole) + nz, &! size of array in 3rd dimension + iSrc,jSrc, &! source addresses for message + iDst,jDst, &! dest addresses for message + srcBlock, &! local block number for source + dstBlock, &! local block number for destination + ioffset, joffset, &! address shifts for tripole + isign ! sign factor for tripole grids + + integer (int_kind), dimension(:), allocatable :: & + sndRequest, &! MPI request ids + rcvRequest ! MPI request ids + + integer (int_kind), dimension(:,:), allocatable :: & + sndStatus, &! MPI status flags + rcvStatus ! MPI status flags + + real (dbl_kind) :: & + fill, &! value to use for unknown points + x1,x2,xavg ! scalars for enforcing symmetry at U pts + + real (dbl_kind), dimension(:,:), allocatable :: & + bufSend, bufRecv ! 3d send,recv buffers + + real (dbl_kind), dimension(:,:,:), allocatable :: & + bufTripole ! 3d tripole buffer + + integer (int_kind) :: len ! length of message + +!----------------------------------------------------------------------- +! +! initialize error code and fill value +! +!----------------------------------------------------------------------- + + if (present(fillValue)) then + fill = fillValue + else + fill = 0.0_dbl_kind + endif + + nxGlobal = 0 + if (allocated(bufTripoleR8)) nxGlobal = size(bufTripoleR8,dim=1) + +!----------------------------------------------------------------------- +! +! allocate request and status arrays for messages +! +!----------------------------------------------------------------------- + + allocate(sndRequest(halo%numMsgSend), & + rcvRequest(halo%numMsgRecv), & + sndStatus(MPI_STATUS_SIZE,halo%numMsgSend), & + rcvStatus(MPI_STATUS_SIZE,halo%numMsgRecv), stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DR8: error allocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- +! +! allocate 3D buffers +! +!----------------------------------------------------------------------- + + nz = size(array, dim=3) + + allocate(bufSend(bufSizeSend*nz, halo%numMsgSend), & + bufRecv(bufSizeRecv*nz, halo%numMsgRecv), & + bufTripole(nxGlobal, halo%tripoleRows, nz), & + stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DR8: error allocating buffers') + return + endif + + bufTripole = fill + +!----------------------------------------------------------------------- +! +! post receives +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgRecv + + len = halo%SizeRecv(nmsg)*nz + call MPI_IRECV(bufRecv(1:len,nmsg), len, mpiR8, & + halo%recvTask(nmsg), & + mpitagHalo + halo%recvTask(nmsg), & + halo%communicator, rcvRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! fill send buffer and post sends +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgSend + + i=0 + do n=1,halo%sizeSend(nmsg) + iSrc = halo%sendAddr(1,n,nmsg) + jSrc = halo%sendAddr(2,n,nmsg) + srcBlock = halo%sendAddr(3,n,nmsg) + + do k=1,nz + i = i + 1 + bufSend(i,nmsg) = array(iSrc,jSrc,k,srcBlock) + end do + end do + do n=i+1,bufSizeSend*nz + bufSend(n,nmsg) = fill ! fill remainder of buffer + end do + + len = halo%SizeSend(nmsg)*nz + call MPI_ISEND(bufSend(1:len,nmsg), len, mpiR8, & + halo%sendTask(nmsg), & + mpitagHalo + my_task, & + halo%communicator, sndRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! while messages are being communicated, fill out halo region +! needed for masked halos to ensure halo values are filled for +! halo grid cells that are not updated +! +!----------------------------------------------------------------------- + + do j = 1,nghost + array(1:nx_block, j,:,:) = fill + array(1:nx_block,ny_block-j+1,:,:) = fill + enddo + do i = 1,nghost + array(i, 1:ny_block,:,:) = fill + array(nx_block-i+1,1:ny_block,:,:) = fill + enddo + +!----------------------------------------------------------------------- +! +! do local copies while waiting for messages to complete +! if srcBlock is zero, that denotes an eliminated land block or a +! closed boundary where ghost cell values are undefined +! if srcBlock is less than zero, the message is a copy out of the +! tripole buffer and will be treated later +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numLocalCopies + iSrc = halo%srcLocalAddr(1,nmsg) + jSrc = halo%srcLocalAddr(2,nmsg) + srcBlock = halo%srcLocalAddr(3,nmsg) + iDst = halo%dstLocalAddr(1,nmsg) + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + if (srcBlock > 0) then + if (dstBlock > 0) then + do k=1,nz + array(iDst,jDst,k,dstBlock) = & + array(iSrc,jSrc,k,srcBlock) + end do + else if (dstBlock < 0) then ! tripole copy into buffer + do k=1,nz + bufTripole(iDst,jDst,k) = & + array(iSrc,jSrc,k,srcBlock) + end do + endif + else if (srcBlock == 0) then + do k=1,nz + array(iDst,jDst,k,dstBlock) = fill + end do + endif + end do + +!----------------------------------------------------------------------- +! +! wait for receives to finish and then unpack the recv buffer into +! ghost cells +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgRecv, rcvRequest, rcvStatus, ierr) + + do nmsg=1,halo%numMsgRecv + i = 0 + do n=1,halo%sizeRecv(nmsg) + iDst = halo%recvAddr(1,n,nmsg) + jDst = halo%recvAddr(2,n,nmsg) + dstBlock = halo%recvAddr(3,n,nmsg) + + if (dstBlock > 0) then + do k=1,nz + i = i + 1 + array(iDst,jDst,k,dstBlock) = bufRecv(i,nmsg) + end do + else if (dstBlock < 0) then !tripole + do k=1,nz + i = i + 1 + bufTripole(iDst,jDst,k) = bufRecv(i,nmsg) + end do + endif + end do + end do + +!----------------------------------------------------------------------- +! +! take care of northern boundary in tripole case +! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows +! (T-fold) of physical domain for entire (global) top row +! +!----------------------------------------------------------------------- + + if (nxGlobal > 0) then + + select case (fieldKind) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case default + call abort_ice( & + 'ice_HaloUpdate3DR8: Unknown field kind') + end select + + if (halo%tripoleTFlag) then + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = -1 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 2,nxGlobal/2 + iDst = nxGlobal - i + 2 + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 0 + joffset = 1 + + case (field_loc_Eface) ! cell center location + + ioffset = 0 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = -1 + joffset = 1 + + case default + call abort_ice( & + 'ice_HaloUpdate3DR8: Unknown field location') + end select + + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 1,nxGlobal/2 - 1 + iDst = nxGlobal - i + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case (field_loc_Eface) ! cell center location + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = 0 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case default + call abort_ice( & + 'ice_HaloUpdate3DR8: Unknown field location') + end select + + endif + + !*** copy out of global tripole buffer into local + !*** ghost cells + + !*** look through local copies to find the copy out + !*** messages (srcBlock < 0) + + do nmsg=1,halo%numLocalCopies + srcBlock = halo%srcLocalAddr(3,nmsg) + + if (srcBlock < 0) then + + iSrc = halo%srcLocalAddr(1,nmsg) ! tripole buffer addr + jSrc = halo%srcLocalAddr(2,nmsg) + + iDst = halo%dstLocalAddr(1,nmsg) ! local block addr + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + !*** correct for offsets + iSrc = iSrc - ioffset + jSrc = jSrc - joffset + if (iSrc == 0) iSrc = nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal + + !*** for center and Eface on u-fold, and NE corner and Nface + !*** on T-fold, do not need to replace + !*** top row of physical domain, so jSrc should be + !*** out of range and skipped + !*** otherwise do the copy + + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then + do k=1,nz + array(iDst,jDst,k,dstBlock) = isign* & + bufTripole(iSrc,jSrc,k) + end do + endif + + endif + end do + + endif + +!----------------------------------------------------------------------- +! +! wait for sends to complete and deallocate arrays +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgSend, sndRequest, sndStatus, ierr) + + deallocate(sndRequest, rcvRequest, sndStatus, rcvStatus, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DR8: error deallocating req,status arrays') + return + endif + + deallocate(bufSend, bufRecv, bufTripole, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DR8: error deallocating 3d buffers') + return + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate3DR8 + +!*********************************************************************** + + subroutine ice_HaloUpdate3DR4(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 3d horizontal arrays of single precision. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + real (real_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + real (real_kind), dimension(:,:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,k,n,nmsg, &! dummy loop indices + ierr, &! error or status flag for MPI,alloc + nxGlobal, &! global domain size in x (tripole) + nz, &! size of array in 3rd dimension + iSrc,jSrc, &! source addresses for message + iDst,jDst, &! dest addresses for message + srcBlock, &! local block number for source + dstBlock, &! local block number for destination + ioffset, joffset, &! address shifts for tripole + isign ! sign factor for tripole grids + + integer (int_kind), dimension(:), allocatable :: & + sndRequest, &! MPI request ids + rcvRequest ! MPI request ids + + integer (int_kind), dimension(:,:), allocatable :: & + sndStatus, &! MPI status flags + rcvStatus ! MPI status flags + + real (real_kind) :: & + fill, &! value to use for unknown points + x1,x2,xavg ! scalars for enforcing symmetry at U pts + + real (real_kind), dimension(:,:), allocatable :: & + bufSend, bufRecv ! 3d send,recv buffers + + real (real_kind), dimension(:,:,:), allocatable :: & + bufTripole ! 3d tripole buffer + + integer (int_kind) :: len ! length of message + +!----------------------------------------------------------------------- +! +! initialize error code and fill value +! +!----------------------------------------------------------------------- + + if (present(fillValue)) then + fill = fillValue + else + fill = 0.0_real_kind + endif + + nxGlobal = 0 + if (allocated(bufTripoleR4)) nxGlobal = size(bufTripoleR4,dim=1) + +!----------------------------------------------------------------------- +! +! allocate request and status arrays for messages +! +!----------------------------------------------------------------------- + + allocate(sndRequest(halo%numMsgSend), & + rcvRequest(halo%numMsgRecv), & + sndStatus(MPI_STATUS_SIZE,halo%numMsgSend), & + rcvStatus(MPI_STATUS_SIZE,halo%numMsgRecv), stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DR4: error allocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- +! +! allocate 3D buffers +! +!----------------------------------------------------------------------- + + nz = size(array, dim=3) + + allocate(bufSend(bufSizeSend*nz, halo%numMsgSend), & + bufRecv(bufSizeRecv*nz, halo%numMsgRecv), & + bufTripole(nxGlobal, halo%tripoleRows, nz), & + stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DR4: error allocating buffers') + return + endif + + bufTripole = fill + +!----------------------------------------------------------------------- +! +! post receives +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgRecv + + len = halo%SizeRecv(nmsg)*nz + call MPI_IRECV(bufRecv(1:len,nmsg), len, mpiR4, & + halo%recvTask(nmsg), & + mpitagHalo + halo%recvTask(nmsg), & + halo%communicator, rcvRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! fill send buffer and post sends +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgSend + + i=0 + do n=1,halo%sizeSend(nmsg) + iSrc = halo%sendAddr(1,n,nmsg) + jSrc = halo%sendAddr(2,n,nmsg) + srcBlock = halo%sendAddr(3,n,nmsg) + + do k=1,nz + i = i + 1 + bufSend(i,nmsg) = array(iSrc,jSrc,k,srcBlock) + end do + end do + do n=i+1,bufSizeSend*nz + bufSend(n,nmsg) = fill ! fill remainder of buffer + end do + + len = halo%SizeSend(nmsg)*nz + call MPI_ISEND(bufSend(1:len,nmsg), len, mpiR4, & + halo%sendTask(nmsg), & + mpitagHalo + my_task, & + halo%communicator, sndRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! while messages are being communicated, fill out halo region +! needed for masked halos to ensure halo values are filled for +! halo grid cells that are not updated +! +!----------------------------------------------------------------------- + + do j = 1,nghost + array(1:nx_block, j,:,:) = fill + array(1:nx_block,ny_block-j+1,:,:) = fill + enddo + do i = 1,nghost + array(i, 1:ny_block,:,:) = fill + array(nx_block-i+1,1:ny_block,:,:) = fill + enddo + +!----------------------------------------------------------------------- +! +! do local copies while waiting for messages to complete +! if srcBlock is zero, that denotes an eliminated land block or a +! closed boundary where ghost cell values are undefined +! if srcBlock is less than zero, the message is a copy out of the +! tripole buffer and will be treated later +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numLocalCopies + iSrc = halo%srcLocalAddr(1,nmsg) + jSrc = halo%srcLocalAddr(2,nmsg) + srcBlock = halo%srcLocalAddr(3,nmsg) + iDst = halo%dstLocalAddr(1,nmsg) + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + if (srcBlock > 0) then + if (dstBlock > 0) then + do k=1,nz + array(iDst,jDst,k,dstBlock) = & + array(iSrc,jSrc,k,srcBlock) + end do + else if (dstBlock < 0) then ! tripole copy into buffer + do k=1,nz + bufTripole(iDst,jDst,k) = & + array(iSrc,jSrc,k,srcBlock) + end do + endif + else if (srcBlock == 0) then + do k=1,nz + array(iDst,jDst,k,dstBlock) = fill + end do + endif + end do + +!----------------------------------------------------------------------- +! +! wait for receives to finish and then unpack the recv buffer into +! ghost cells +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgRecv, rcvRequest, rcvStatus, ierr) + + do nmsg=1,halo%numMsgRecv + i = 0 + do n=1,halo%sizeRecv(nmsg) + iDst = halo%recvAddr(1,n,nmsg) + jDst = halo%recvAddr(2,n,nmsg) + dstBlock = halo%recvAddr(3,n,nmsg) + + if (dstBlock > 0) then + do k=1,nz + i = i + 1 + array(iDst,jDst,k,dstBlock) = bufRecv(i,nmsg) + end do + else if (dstBlock < 0) then !tripole + do k=1,nz + i = i + 1 + bufTripole(iDst,jDst,k) = bufRecv(i,nmsg) + end do + endif + end do + end do + +!----------------------------------------------------------------------- +! +! take care of northern boundary in tripole case +! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows +! (T-fold) of physical domain for entire (global) top row +! +!----------------------------------------------------------------------- + + if (nxGlobal > 0) then + + select case (fieldKind) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case default + call abort_ice( & + 'ice_HaloUpdate3DR4: Unknown field kind') + end select + + if (halo%tripoleTFlag) then + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = -1 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 2,nxGlobal/2 + iDst = nxGlobal - i + 2 + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 0 + joffset = 1 + + case (field_loc_Eface) ! cell center location + + ioffset = 0 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = -1 + joffset = 1 + + case default + call abort_ice( & + 'ice_HaloUpdate3DR4: Unknown field location') + end select + + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 1,nxGlobal/2 - 1 + iDst = nxGlobal - i + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case (field_loc_Eface) ! cell center location + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = 0 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case default + call abort_ice( & + 'ice_HaloUpdate3DR4: Unknown field location') + end select + + endif + + !*** copy out of global tripole buffer into local + !*** ghost cells + + !*** look through local copies to find the copy out + !*** messages (srcBlock < 0) + + do nmsg=1,halo%numLocalCopies + srcBlock = halo%srcLocalAddr(3,nmsg) + + if (srcBlock < 0) then + + iSrc = halo%srcLocalAddr(1,nmsg) ! tripole buffer addr + jSrc = halo%srcLocalAddr(2,nmsg) + + iDst = halo%dstLocalAddr(1,nmsg) ! local block addr + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + !*** correct for offsets + iSrc = iSrc - ioffset + jSrc = jSrc - joffset + if (iSrc == 0) iSrc = nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal + + !*** for center and Eface on u-fold, and NE corner and Nface + !*** on T-fold, do not need to replace + !*** top row of physical domain, so jSrc should be + !*** out of range and skipped + !*** otherwise do the copy + + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then + do k=1,nz + array(iDst,jDst,k,dstBlock) = isign* & + bufTripole(iSrc,jSrc,k) + end do + endif + + endif + end do + + endif + +!----------------------------------------------------------------------- +! +! wait for sends to complete and deallocate arrays +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgSend, sndRequest, sndStatus, ierr) + + deallocate(sndRequest, rcvRequest, sndStatus, rcvStatus, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DR4: error deallocating req,status arrays') + return + endif + + deallocate(bufSend, bufRecv, bufTripole, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DR4: error deallocating 3d buffers') + return + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate3DR4 + +!*********************************************************************** + + subroutine ice_HaloUpdate3DI4(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 3d horizontal arrays of double precision. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + integer (int_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + integer (int_kind), dimension(:,:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,k,n,nmsg, &! dummy loop indices + ierr, &! error or status flag for MPI,alloc + nxGlobal, &! global domain size in x (tripole) + nz, &! size of array in 3rd dimension + iSrc,jSrc, &! source addresses for message + iDst,jDst, &! dest addresses for message + srcBlock, &! local block number for source + dstBlock, &! local block number for destination + ioffset, joffset, &! address shifts for tripole + isign ! sign factor for tripole grids + + integer (int_kind), dimension(:), allocatable :: & + sndRequest, &! MPI request ids + rcvRequest ! MPI request ids + + integer (int_kind), dimension(:,:), allocatable :: & + sndStatus, &! MPI status flags + rcvStatus ! MPI status flags + + integer (int_kind) :: & + fill, &! value to use for unknown points + x1,x2,xavg ! scalars for enforcing symmetry at U pts + + integer (int_kind), dimension(:,:), allocatable :: & + bufSend, bufRecv ! 3d send,recv buffers + + integer (int_kind), dimension(:,:,:), allocatable :: & + bufTripole ! 3d tripole buffer + + integer (int_kind) :: len ! length of message + +!----------------------------------------------------------------------- +! +! initialize error code and fill value +! +!----------------------------------------------------------------------- + + if (present(fillValue)) then + fill = fillValue + else + fill = 0_int_kind + endif + + nxGlobal = 0 + if (allocated(bufTripoleI4)) nxGlobal = size(bufTripoleI4,dim=1) + +!----------------------------------------------------------------------- +! +! allocate request and status arrays for messages +! +!----------------------------------------------------------------------- + + allocate(sndRequest(halo%numMsgSend), & + rcvRequest(halo%numMsgRecv), & + sndStatus(MPI_STATUS_SIZE,halo%numMsgSend), & + rcvStatus(MPI_STATUS_SIZE,halo%numMsgRecv), stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DI4: error allocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- +! +! allocate 3D buffers +! +!----------------------------------------------------------------------- + + nz = size(array, dim=3) + + allocate(bufSend(bufSizeSend*nz, halo%numMsgSend), & + bufRecv(bufSizeRecv*nz, halo%numMsgRecv), & + bufTripole(nxGlobal, halo%tripoleRows, nz), & + stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DI4: error allocating buffers') + return + endif + + bufTripole = fill + +!----------------------------------------------------------------------- +! +! post receives +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgRecv + + len = halo%SizeRecv(nmsg)*nz + call MPI_IRECV(bufRecv(1:len,nmsg), len, MPI_INTEGER, & + halo%recvTask(nmsg), & + mpitagHalo + halo%recvTask(nmsg), & + halo%communicator, rcvRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! fill send buffer and post sends +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgSend + + i=0 + do n=1,halo%sizeSend(nmsg) + iSrc = halo%sendAddr(1,n,nmsg) + jSrc = halo%sendAddr(2,n,nmsg) + srcBlock = halo%sendAddr(3,n,nmsg) + + do k=1,nz + i = i + 1 + bufSend(i,nmsg) = array(iSrc,jSrc,k,srcBlock) + end do + end do + do n=i+1,bufSizeSend*nz + bufSend(n,nmsg) = fill ! fill remainder of buffer + end do + + len = halo%SizeSend(nmsg)*nz + call MPI_ISEND(bufSend(1:len,nmsg), len, MPI_INTEGER, & + halo%sendTask(nmsg), & + mpitagHalo + my_task, & + halo%communicator, sndRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! while messages are being communicated, fill out halo region +! needed for masked halos to ensure halo values are filled for +! halo grid cells that are not updated +! +!----------------------------------------------------------------------- + + do j = 1,nghost + array(1:nx_block, j,:,:) = fill + array(1:nx_block,ny_block-j+1,:,:) = fill + enddo + do i = 1,nghost + array(i, 1:ny_block,:,:) = fill + array(nx_block-i+1,1:ny_block,:,:) = fill + enddo + +!----------------------------------------------------------------------- +! +! do local copies while waiting for messages to complete +! if srcBlock is zero, that denotes an eliminated land block or a +! closed boundary where ghost cell values are undefined +! if srcBlock is less than zero, the message is a copy out of the +! tripole buffer and will be treated later +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numLocalCopies + iSrc = halo%srcLocalAddr(1,nmsg) + jSrc = halo%srcLocalAddr(2,nmsg) + srcBlock = halo%srcLocalAddr(3,nmsg) + iDst = halo%dstLocalAddr(1,nmsg) + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + if (srcBlock > 0) then + if (dstBlock > 0) then + do k=1,nz + array(iDst,jDst,k,dstBlock) = & + array(iSrc,jSrc,k,srcBlock) + end do + else if (dstBlock < 0) then ! tripole copy into buffer + do k=1,nz + bufTripole(iDst,jDst,k) = & + array(iSrc,jSrc,k,srcBlock) + end do + endif + else if (srcBlock == 0) then + do k=1,nz + array(iDst,jDst,k,dstBlock) = fill + end do + endif + end do + +!----------------------------------------------------------------------- +! +! wait for receives to finish and then unpack the recv buffer into +! ghost cells +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgRecv, rcvRequest, rcvStatus, ierr) + + do nmsg=1,halo%numMsgRecv + i = 0 + do n=1,halo%sizeRecv(nmsg) + iDst = halo%recvAddr(1,n,nmsg) + jDst = halo%recvAddr(2,n,nmsg) + dstBlock = halo%recvAddr(3,n,nmsg) + + if (dstBlock > 0) then + do k=1,nz + i = i + 1 + array(iDst,jDst,k,dstBlock) = bufRecv(i,nmsg) + end do + else if (dstBlock < 0) then !tripole + do k=1,nz + i = i + 1 + bufTripole(iDst,jDst,k) = bufRecv(i,nmsg) + end do + endif + end do + end do + +!----------------------------------------------------------------------- +! +! take care of northern boundary in tripole case +! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows +! (T-fold) of physical domain for entire (global) top row +! +!----------------------------------------------------------------------- + + if (nxGlobal > 0) then + + select case (fieldKind) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case default + call abort_ice( & + 'ice_HaloUpdate3DI4: Unknown field kind') + end select + + if (halo%tripoleTFlag) then + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = -1 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 2,nxGlobal/2 + iDst = nxGlobal - i + 2 + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 0 + joffset = 1 + + case (field_loc_Eface) ! cell center location + + ioffset = 0 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = -1 + joffset = 1 + + case default + call abort_ice( & + 'ice_HaloUpdate3DI4: Unknown field location') + end select + + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 1,nxGlobal/2 - 1 + iDst = nxGlobal - i + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case (field_loc_Eface) ! cell center location + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = 0 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k) + x2 = bufTripole(iDst,halo%tripoleRows,k) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripole(i ,halo%tripoleRows,k) = xavg + bufTripole(iDst,halo%tripoleRows,k) = isign*xavg + end do + end do + + case default + call abort_ice( & + 'ice_HaloUpdate3DI4: Unknown field location') + end select + + endif + + !*** copy out of global tripole buffer into local + !*** ghost cells + + !*** look through local copies to find the copy out + !*** messages (srcBlock < 0) + + do nmsg=1,halo%numLocalCopies + srcBlock = halo%srcLocalAddr(3,nmsg) + + if (srcBlock < 0) then + + iSrc = halo%srcLocalAddr(1,nmsg) ! tripole buffer addr + jSrc = halo%srcLocalAddr(2,nmsg) + + iDst = halo%dstLocalAddr(1,nmsg) ! local block addr + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + !*** correct for offsets + iSrc = iSrc - ioffset + jSrc = jSrc - joffset + if (iSrc == 0) iSrc = nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal + + !*** for center and Eface on u-fold, and NE corner and Nface + !*** on T-fold, do not need to replace + !*** top row of physical domain, so jSrc should be + !*** out of range and skipped + !*** otherwise do the copy + + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then + do k=1,nz + array(iDst,jDst,k,dstBlock) = isign* & + bufTripole(iSrc,jSrc,k) + end do + endif + + endif + end do + + endif + +!----------------------------------------------------------------------- +! +! wait for sends to complete and deallocate arrays +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgSend, sndRequest, sndStatus, ierr) + + deallocate(sndRequest, rcvRequest, sndStatus, rcvStatus, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DI4: error deallocating req,status arrays') + return + endif + + deallocate(bufSend, bufRecv, bufTripole, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate3DI4: error deallocating 3d buffers') + return + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate3DI4 + +!*********************************************************************** + + subroutine ice_HaloUpdate4DR8(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 4d horizontal arrays of double precision. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + real (dbl_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + real (dbl_kind), dimension(:,:,:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,k,l,n,nmsg, &! dummy loop indices + ierr, &! error or status flag for MPI,alloc + nxGlobal, &! global domain size in x (tripole) + nz, nt, &! size of array in 3rd,4th dimensions + iSrc,jSrc, &! source addresses for message + iDst,jDst, &! dest addresses for message + srcBlock, &! local block number for source + dstBlock, &! local block number for destination + ioffset, joffset, &! address shifts for tripole + isign ! sign factor for tripole grids + + integer (int_kind), dimension(:), allocatable :: & + sndRequest, &! MPI request ids + rcvRequest ! MPI request ids + + integer (int_kind), dimension(:,:), allocatable :: & + sndStatus, &! MPI status flags + rcvStatus ! MPI status flags + + real (dbl_kind) :: & + fill, &! value to use for unknown points + x1,x2,xavg ! scalars for enforcing symmetry at U pts + + real (dbl_kind), dimension(:,:), allocatable :: & + bufSend, bufRecv ! 4d send,recv buffers + + real (dbl_kind), dimension(:,:,:,:), allocatable :: & + bufTripole ! 4d tripole buffer + + integer (int_kind) :: len ! length of message + +!----------------------------------------------------------------------- +! +! initialize error code and fill value +! +!----------------------------------------------------------------------- + + if (present(fillValue)) then + fill = fillValue + else + fill = 0.0_dbl_kind + endif + + nxGlobal = 0 + if (allocated(bufTripoleR8)) nxGlobal = size(bufTripoleR8,dim=1) + +!----------------------------------------------------------------------- +! +! allocate request and status arrays for messages +! +!----------------------------------------------------------------------- + + allocate(sndRequest(halo%numMsgSend), & + rcvRequest(halo%numMsgRecv), & + sndStatus(MPI_STATUS_SIZE,halo%numMsgSend), & + rcvStatus(MPI_STATUS_SIZE,halo%numMsgRecv), stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DR8: error allocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- +! +! allocate 4D buffers +! +!----------------------------------------------------------------------- + + nz = size(array, dim=3) + nt = size(array, dim=4) + + allocate(bufSend(bufSizeSend*nz*nt, halo%numMsgSend), & + bufRecv(bufSizeRecv*nz*nt, halo%numMsgRecv), & + bufTripole(nxGlobal, halo%tripoleRows, nz, nt), & + stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DR8: error allocating buffers') + return + endif + + bufTripole = fill + +!----------------------------------------------------------------------- +! +! post receives +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgRecv + + len = halo%SizeRecv(nmsg)*nz*nt + call MPI_IRECV(bufRecv(1:len,nmsg), len, mpiR8, & + halo%recvTask(nmsg), & + mpitagHalo + halo%recvTask(nmsg), & + halo%communicator, rcvRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! fill send buffer and post sends +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgSend + + i=0 + do n=1,halo%sizeSend(nmsg) + iSrc = halo%sendAddr(1,n,nmsg) + jSrc = halo%sendAddr(2,n,nmsg) + srcBlock = halo%sendAddr(3,n,nmsg) + + do l=1,nt + do k=1,nz + i = i + 1 + bufSend(i,nmsg) = array(iSrc,jSrc,k,l,srcBlock) + end do + end do + end do + + do n=i+1,bufSizeSend*nz*nt + bufSend(n,nmsg) = fill ! fill remainder of buffer + end do + + len = halo%SizeSend(nmsg)*nz*nt + call MPI_ISEND(bufSend(1:len,nmsg), len, mpiR8, & + halo%sendTask(nmsg), & + mpitagHalo + my_task, & + halo%communicator, sndRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! while messages are being communicated, fill out halo region +! needed for masked halos to ensure halo values are filled for +! halo grid cells that are not updated +! +!----------------------------------------------------------------------- + + do j = 1,nghost + array(1:nx_block, j,:,:,:) = fill + array(1:nx_block,ny_block-j+1,:,:,:) = fill + enddo + do i = 1,nghost + array(i, 1:ny_block,:,:,:) = fill + array(nx_block-i+1,1:ny_block,:,:,:) = fill + enddo + +!----------------------------------------------------------------------- +! +! do local copies while waiting for messages to complete +! if srcBlock is zero, that denotes an eliminated land block or a +! closed boundary where ghost cell values are undefined +! if srcBlock is less than zero, the message is a copy out of the +! tripole buffer and will be treated later +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numLocalCopies + iSrc = halo%srcLocalAddr(1,nmsg) + jSrc = halo%srcLocalAddr(2,nmsg) + srcBlock = halo%srcLocalAddr(3,nmsg) + iDst = halo%dstLocalAddr(1,nmsg) + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + if (srcBlock > 0) then + if (dstBlock > 0) then + do l=1,nt + do k=1,nz + array(iDst,jDst,k,l,dstBlock) = & + array(iSrc,jSrc,k,l,srcBlock) + end do + end do + else if (dstBlock < 0) then ! tripole copy into buffer + do l=1,nt + do k=1,nz + bufTripole(iDst,jDst,k,l) = & + array(iSrc,jSrc,k,l,srcBlock) + end do + end do + endif + else if (srcBlock == 0) then + do l=1,nt + do k=1,nz + array(iDst,jDst,k,l,dstBlock) = fill + end do + end do + endif + end do + +!----------------------------------------------------------------------- +! +! wait for receives to finish and then unpack the recv buffer into +! ghost cells +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgRecv, rcvRequest, rcvStatus, ierr) + + do nmsg=1,halo%numMsgRecv + i = 0 + do n=1,halo%sizeRecv(nmsg) + iDst = halo%recvAddr(1,n,nmsg) + jDst = halo%recvAddr(2,n,nmsg) + dstBlock = halo%recvAddr(3,n,nmsg) + + if (dstBlock > 0) then + do l=1,nt + do k=1,nz + i = i + 1 + array(iDst,jDst,k,l,dstBlock) = bufRecv(i,nmsg) + end do + end do + else if (dstBlock < 0) then !tripole + do l=1,nt + do k=1,nz + i = i + 1 + bufTripole(iDst,jDst,k,l) = bufRecv(i,nmsg) + end do + end do + endif + end do + end do + +!----------------------------------------------------------------------- +! +! take care of northern boundary in tripole case +! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows +! (T-fold) of physical domain for entire (global) top row +! +!----------------------------------------------------------------------- + + if (nxGlobal > 0) then + + select case (fieldKind) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case default + call abort_ice( & + 'ice_HaloUpdate4DR8: Unknown field kind') + end select + + if (halo%tripoleTFlag) then + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = -1 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 2,nxGlobal/2 + iDst = nxGlobal - i + 2 + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 0 + joffset = 1 + + case (field_loc_Eface) ! cell center location + + ioffset = 0 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = -1 + joffset = 1 + + case default + call abort_ice( & + 'ice_HaloUpdate4DR8: Unknown field location') + end select + + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 1,nxGlobal/2 - 1 + iDst = nxGlobal - i + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case (field_loc_Eface) ! cell center location + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = 0 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = 0.5_dbl_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case default + call abort_ice( & + 'ice_HaloUpdate4DR8: Unknown field location') + end select + + endif + + !*** copy out of global tripole buffer into local + !*** ghost cells + + !*** look through local copies to find the copy out + !*** messages (srcBlock < 0) + + do nmsg=1,halo%numLocalCopies + srcBlock = halo%srcLocalAddr(3,nmsg) + + if (srcBlock < 0) then + + iSrc = halo%srcLocalAddr(1,nmsg) ! tripole buffer addr + jSrc = halo%srcLocalAddr(2,nmsg) + + iDst = halo%dstLocalAddr(1,nmsg) ! local block addr + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + !*** correct for offsets + iSrc = iSrc - ioffset + jSrc = jSrc - joffset + if (iSrc == 0) iSrc = nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal + + !*** for center and Eface on u-fold, and NE corner and Nface + !*** on T-fold, do not need to replace + !*** top row of physical domain, so jSrc should be + !*** out of range and skipped + !*** otherwise do the copy + + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then + do l=1,nt + do k=1,nz + array(iDst,jDst,k,l,dstBlock) = isign* & + bufTripole(iSrc,jSrc,k,l) + end do + end do + endif + + endif + end do + + endif + +!----------------------------------------------------------------------- +! +! wait for sends to complete and deallocate arrays +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgSend, sndRequest, sndStatus, ierr) + + deallocate(sndRequest, rcvRequest, sndStatus, rcvStatus, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DR8: error deallocating req,status arrays') + return + endif + + deallocate(bufSend, bufRecv, bufTripole, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DR8: error deallocating 4d buffers') + return + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate4DR8 + +!*********************************************************************** + + subroutine ice_HaloUpdate4DR4(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 4d horizontal arrays of single precision. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + real (real_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + real (real_kind), dimension(:,:,:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,k,l,n,nmsg, &! dummy loop indices + ierr, &! error or status flag for MPI,alloc + nxGlobal, &! global domain size in x (tripole) + nz, nt, &! size of array in 3rd,4th dimensions + iSrc,jSrc, &! source addresses for message + iDst,jDst, &! dest addresses for message + srcBlock, &! local block number for source + dstBlock, &! local block number for destination + ioffset, joffset, &! address shifts for tripole + isign ! sign factor for tripole grids + + integer (int_kind), dimension(:), allocatable :: & + sndRequest, &! MPI request ids + rcvRequest ! MPI request ids + + integer (int_kind), dimension(:,:), allocatable :: & + sndStatus, &! MPI status flags + rcvStatus ! MPI status flags + + real (real_kind) :: & + fill, &! value to use for unknown points + x1,x2,xavg ! scalars for enforcing symmetry at U pts + + real (real_kind), dimension(:,:), allocatable :: & + bufSend, bufRecv ! 4d send,recv buffers + + real (real_kind), dimension(:,:,:,:), allocatable :: & + bufTripole ! 4d tripole buffer + + integer (int_kind) :: len ! length of message + +!----------------------------------------------------------------------- +! +! initialize error code and fill value +! +!----------------------------------------------------------------------- + + if (present(fillValue)) then + fill = fillValue + else + fill = 0.0_real_kind + endif + + nxGlobal = 0 + if (allocated(bufTripoleR4)) nxGlobal = size(bufTripoleR4,dim=1) + +!----------------------------------------------------------------------- +! +! allocate request and status arrays for messages +! +!----------------------------------------------------------------------- + + allocate(sndRequest(halo%numMsgSend), & + rcvRequest(halo%numMsgRecv), & + sndStatus(MPI_STATUS_SIZE,halo%numMsgSend), & + rcvStatus(MPI_STATUS_SIZE,halo%numMsgRecv), stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DR4: error allocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- +! +! allocate 4D buffers +! +!----------------------------------------------------------------------- + + nz = size(array, dim=3) + nt = size(array, dim=4) + + allocate(bufSend(bufSizeSend*nz*nt, halo%numMsgSend), & + bufRecv(bufSizeRecv*nz*nt, halo%numMsgRecv), & + bufTripole(nxGlobal, halo%tripoleRows, nz, nt), & + stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DR4: error allocating buffers') + return + endif + + bufTripole = fill + +!----------------------------------------------------------------------- +! +! post receives +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgRecv + + len = halo%SizeRecv(nmsg)*nz*nt + call MPI_IRECV(bufRecv(1:len,nmsg), len, mpiR4, & + halo%recvTask(nmsg), & + mpitagHalo + halo%recvTask(nmsg), & + halo%communicator, rcvRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! fill send buffer and post sends +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgSend + + i=0 + do n=1,halo%sizeSend(nmsg) + iSrc = halo%sendAddr(1,n,nmsg) + jSrc = halo%sendAddr(2,n,nmsg) + srcBlock = halo%sendAddr(3,n,nmsg) + + do l=1,nt + do k=1,nz + i = i + 1 + bufSend(i,nmsg) = array(iSrc,jSrc,k,l,srcBlock) + end do + end do + end do + + do n=i+1,bufSizeSend*nz*nt + bufSend(n,nmsg) = fill ! fill remainder of buffer + end do + + len = halo%SizeSend(nmsg)*nz*nt + call MPI_ISEND(bufSend(1:len,nmsg), len, mpiR4, & + halo%sendTask(nmsg), & + mpitagHalo + my_task, & + halo%communicator, sndRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! while messages are being communicated, fill out halo region +! needed for masked halos to ensure halo values are filled for +! halo grid cells that are not updated +! +!----------------------------------------------------------------------- + + do j = 1,nghost + array(1:nx_block, j,:,:,:) = fill + array(1:nx_block,ny_block-j+1,:,:,:) = fill + enddo + do i = 1,nghost + array(i, 1:ny_block,:,:,:) = fill + array(nx_block-i+1,1:ny_block,:,:,:) = fill + enddo + +!----------------------------------------------------------------------- +! +! do local copies while waiting for messages to complete +! if srcBlock is zero, that denotes an eliminated land block or a +! closed boundary where ghost cell values are undefined +! if srcBlock is less than zero, the message is a copy out of the +! tripole buffer and will be treated later +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numLocalCopies + iSrc = halo%srcLocalAddr(1,nmsg) + jSrc = halo%srcLocalAddr(2,nmsg) + srcBlock = halo%srcLocalAddr(3,nmsg) + iDst = halo%dstLocalAddr(1,nmsg) + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + if (srcBlock > 0) then + if (dstBlock > 0) then + do l=1,nt + do k=1,nz + array(iDst,jDst,k,l,dstBlock) = & + array(iSrc,jSrc,k,l,srcBlock) + end do + end do + else if (dstBlock < 0) then ! tripole copy into buffer + do l=1,nt + do k=1,nz + bufTripole(iDst,jDst,k,l) = & + array(iSrc,jSrc,k,l,srcBlock) + end do + end do + endif + else if (srcBlock == 0) then + do l=1,nt + do k=1,nz + array(iDst,jDst,k,l,dstBlock) = fill + end do + end do + endif + end do + +!----------------------------------------------------------------------- +! +! wait for receives to finish and then unpack the recv buffer into +! ghost cells +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgRecv, rcvRequest, rcvStatus, ierr) + + do nmsg=1,halo%numMsgRecv + i = 0 + do n=1,halo%sizeRecv(nmsg) + iDst = halo%recvAddr(1,n,nmsg) + jDst = halo%recvAddr(2,n,nmsg) + dstBlock = halo%recvAddr(3,n,nmsg) + + if (dstBlock > 0) then + do l=1,nt + do k=1,nz + i = i + 1 + array(iDst,jDst,k,l,dstBlock) = bufRecv(i,nmsg) + end do + end do + else if (dstBlock < 0) then !tripole + do l=1,nt + do k=1,nz + i = i + 1 + bufTripole(iDst,jDst,k,l) = bufRecv(i,nmsg) + end do + end do + endif + end do + end do + +!----------------------------------------------------------------------- +! +! take care of northern boundary in tripole case +! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows +! (T-fold) of physical domain for entire (global) top row +! +!----------------------------------------------------------------------- + + if (nxGlobal > 0) then + + select case (fieldKind) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case default + call abort_ice( & + 'ice_HaloUpdate4DR4: Unknown field kind') + end select + + if (halo%tripoleTFlag) then + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = -1 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 2,nxGlobal/2 + iDst = nxGlobal - i + 2 + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 0 + joffset = 1 + + case (field_loc_Eface) ! cell center location + + ioffset = 0 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = -1 + joffset = 1 + + case default + call abort_ice( & + 'ice_HaloUpdate4DR4: Unknown field location') + end select + + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 1,nxGlobal/2 - 1 + iDst = nxGlobal - i + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case (field_loc_Eface) ! cell center location + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = 0 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = 0.5_real_kind*(x1 + isign*x2) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case default + call abort_ice( & + 'ice_HaloUpdate4DR4: Unknown field location') + end select + + endif + + !*** copy out of global tripole buffer into local + !*** ghost cells + + !*** look through local copies to find the copy out + !*** messages (srcBlock < 0) + + do nmsg=1,halo%numLocalCopies + srcBlock = halo%srcLocalAddr(3,nmsg) + + if (srcBlock < 0) then + + iSrc = halo%srcLocalAddr(1,nmsg) ! tripole buffer addr + jSrc = halo%srcLocalAddr(2,nmsg) + + iDst = halo%dstLocalAddr(1,nmsg) ! local block addr + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + !*** correct for offsets + iSrc = iSrc - ioffset + jSrc = jSrc - joffset + if (iSrc == 0) iSrc = nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal + + !*** for center and Eface on u-fold, and NE corner and Nface + !*** on T-fold, do not need to replace + !*** top row of physical domain, so jSrc should be + !*** out of range and skipped + !*** otherwise do the copy + + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then + do l=1,nt + do k=1,nz + array(iDst,jDst,k,l,dstBlock) = isign* & + bufTripole(iSrc,jSrc,k,l) + end do + end do + endif + + endif + end do + + endif + +!----------------------------------------------------------------------- +! +! wait for sends to complete and deallocate arrays +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgSend, sndRequest, sndStatus, ierr) + + deallocate(sndRequest, rcvRequest, sndStatus, rcvStatus, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DR4: error deallocating req,status arrays') + return + endif + + deallocate(bufSend, bufRecv, bufTripole, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DR4: error deallocating 4d buffers') + return + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate4DR4 + +!*********************************************************************** + + subroutine ice_HaloUpdate4DI4(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 4d horizontal integer arrays. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + integer (int_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + integer (int_kind), dimension(:,:,:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,k,l,n,nmsg, &! dummy loop indices + ierr, &! error or status flag for MPI,alloc + nxGlobal, &! global domain size in x (tripole) + nz, nt, &! size of array in 3rd,4th dimensions + iSrc,jSrc, &! source addresses for message + iDst,jDst, &! dest addresses for message + srcBlock, &! local block number for source + dstBlock, &! local block number for destination + ioffset, joffset, &! address shifts for tripole + isign ! sign factor for tripole grids + + integer (int_kind), dimension(:), allocatable :: & + sndRequest, &! MPI request ids + rcvRequest ! MPI request ids + + integer (int_kind), dimension(:,:), allocatable :: & + sndStatus, &! MPI status flags + rcvStatus ! MPI status flags + + integer (int_kind) :: & + fill, &! value to use for unknown points + x1,x2,xavg ! scalars for enforcing symmetry at U pts + + integer (int_kind), dimension(:,:), allocatable :: & + bufSend, bufRecv ! 4d send,recv buffers + + integer (int_kind), dimension(:,:,:,:), allocatable :: & + bufTripole ! 4d tripole buffer + + integer (int_kind) :: len ! length of messages + +!----------------------------------------------------------------------- +! +! initialize error code and fill value +! +!----------------------------------------------------------------------- + + if (present(fillValue)) then + fill = fillValue + else + fill = 0_int_kind + endif + + nxGlobal = 0 + if (allocated(bufTripoleI4)) nxGlobal = size(bufTripoleI4,dim=1) + +!----------------------------------------------------------------------- +! +! allocate request and status arrays for messages +! +!----------------------------------------------------------------------- + + allocate(sndRequest(halo%numMsgSend), & + rcvRequest(halo%numMsgRecv), & + sndStatus(MPI_STATUS_SIZE,halo%numMsgSend), & + rcvStatus(MPI_STATUS_SIZE,halo%numMsgRecv), stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DI4: error allocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- +! +! allocate 4D buffers +! +!----------------------------------------------------------------------- + + nz = size(array, dim=3) + nt = size(array, dim=4) + + allocate(bufSend(bufSizeSend*nz*nt, halo%numMsgSend), & + bufRecv(bufSizeRecv*nz*nt, halo%numMsgRecv), & + bufTripole(nxGlobal, halo%tripoleRows, nz, nt), & + stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DI4: error allocating buffers') + return + endif + + bufTripole = fill + +!----------------------------------------------------------------------- +! +! post receives +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgRecv + + len = halo%SizeRecv(nmsg)*nz*nt + call MPI_IRECV(bufRecv(1:len,nmsg), len, MPI_INTEGER, & + halo%recvTask(nmsg), & + mpitagHalo + halo%recvTask(nmsg), & + halo%communicator, rcvRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! fill send buffer and post sends +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgSend + + i=0 + do n=1,halo%sizeSend(nmsg) + iSrc = halo%sendAddr(1,n,nmsg) + jSrc = halo%sendAddr(2,n,nmsg) + srcBlock = halo%sendAddr(3,n,nmsg) + + do l=1,nt + do k=1,nz + i = i + 1 + bufSend(i,nmsg) = array(iSrc,jSrc,k,l,srcBlock) + end do + end do + end do + + do n=i+1,bufSizeSend*nz*nt + bufSend(n,nmsg) = fill ! fill remainder of buffer + end do + + len = halo%SizeSend(nmsg)*nz*nt + call MPI_ISEND(bufSend(1:len,nmsg), len, MPI_INTEGER, & + halo%sendTask(nmsg), & + mpitagHalo + my_task, & + halo%communicator, sndRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! while messages are being communicated, fill out halo region +! needed for masked halos to ensure halo values are filled for +! halo grid cells that are not updated +! +!----------------------------------------------------------------------- + + do j = 1,nghost + array(1:nx_block, j,:,:,:) = fill + array(1:nx_block,ny_block-j+1,:,:,:) = fill + enddo + do i = 1,nghost + array(i, 1:ny_block,:,:,:) = fill + array(nx_block-i+1,1:ny_block,:,:,:) = fill + enddo + +!----------------------------------------------------------------------- +! +! do local copies while waiting for messages to complete +! if srcBlock is zero, that denotes an eliminated land block or a +! closed boundary where ghost cell values are undefined +! if srcBlock is less than zero, the message is a copy out of the +! tripole buffer and will be treated later +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numLocalCopies + iSrc = halo%srcLocalAddr(1,nmsg) + jSrc = halo%srcLocalAddr(2,nmsg) + srcBlock = halo%srcLocalAddr(3,nmsg) + iDst = halo%dstLocalAddr(1,nmsg) + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + if (srcBlock > 0) then + if (dstBlock > 0) then + do l=1,nt + do k=1,nz + array(iDst,jDst,k,l,dstBlock) = & + array(iSrc,jSrc,k,l,srcBlock) + end do + end do + else if (dstBlock < 0) then ! tripole copy into buffer + do l=1,nt + do k=1,nz + bufTripole(iDst,jDst,k,l) = & + array(iSrc,jSrc,k,l,srcBlock) + end do + end do + endif + else if (srcBlock == 0) then + do l=1,nt + do k=1,nz + array(iDst,jDst,k,l,dstBlock) = fill + end do + end do + endif + end do + +!----------------------------------------------------------------------- +! +! wait for receives to finish and then unpack the recv buffer into +! ghost cells +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgRecv, rcvRequest, rcvStatus, ierr) + + do nmsg=1,halo%numMsgRecv + i = 0 + do n=1,halo%sizeRecv(nmsg) + iDst = halo%recvAddr(1,n,nmsg) + jDst = halo%recvAddr(2,n,nmsg) + dstBlock = halo%recvAddr(3,n,nmsg) + + if (dstBlock > 0) then + do l=1,nt + do k=1,nz + i = i + 1 + array(iDst,jDst,k,l,dstBlock) = bufRecv(i,nmsg) + end do + end do + else if (dstBlock < 0) then !tripole + do l=1,nt + do k=1,nz + i = i + 1 + bufTripole(iDst,jDst,k,l) = bufRecv(i,nmsg) + end do + end do + endif + end do + end do + +!----------------------------------------------------------------------- +! +! take care of northern boundary in tripole case +! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows +! (T-fold) of physical domain for entire (global) top row +! +!----------------------------------------------------------------------- + + if (nxGlobal > 0) then + + select case (fieldKind) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case default + call abort_ice( & + 'ice_HaloUpdate4DI4: Unknown field kind') + end select + + if (halo%tripoleTFlag) then + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = -1 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 2,nxGlobal/2 + iDst = nxGlobal - i + 2 + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 0 + joffset = 1 + + case (field_loc_Eface) ! cell center location + + ioffset = 0 + joffset = 0 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = -1 + joffset = 1 + + case default + call abort_ice( & + 'ice_HaloUpdate4DI4: Unknown field location') + end select + + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 1,nxGlobal/2 - 1 + iDst = nxGlobal - i + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case (field_loc_Eface) ! cell center location + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) ! cell corner (velocity) location + + ioffset = 0 + joffset = 1 + + !*** top row is degenerate, so must enforce symmetry + !*** use average of two degenerate points for value + + do l=1,nt + do k=1,nz + do i = 1,nxGlobal/2 + iDst = nxGlobal + 1 - i + x1 = bufTripole(i ,halo%tripoleRows,k,l) + x2 = bufTripole(iDst,halo%tripoleRows,k,l) + xavg = nint(0.5_dbl_kind*(x1 + isign*x2)) + bufTripole(i ,halo%tripoleRows,k,l) = xavg + bufTripole(iDst,halo%tripoleRows,k,l) = isign*xavg + end do + end do + end do + + case default + call abort_ice( & + 'ice_HaloUpdate4DI4: Unknown field location') + end select + + endif + + !*** copy out of global tripole buffer into local + !*** ghost cells + + !*** look through local copies to find the copy out + !*** messages (srcBlock < 0) + + do nmsg=1,halo%numLocalCopies + srcBlock = halo%srcLocalAddr(3,nmsg) + + if (srcBlock < 0) then + + iSrc = halo%srcLocalAddr(1,nmsg) ! tripole buffer addr + jSrc = halo%srcLocalAddr(2,nmsg) + + iDst = halo%dstLocalAddr(1,nmsg) ! local block addr + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + !*** correct for offsets + iSrc = iSrc - ioffset + jSrc = jSrc - joffset + if (iSrc == 0) iSrc = nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal + + !*** for center and Eface on u-fold, and NE corner and Nface + !*** on T-fold, do not need to replace + !*** top row of physical domain, so jSrc should be + !*** out of range and skipped + !*** otherwise do the copy + + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then + do l=1,nt + do k=1,nz + array(iDst,jDst,k,l,dstBlock) = isign* & + bufTripole(iSrc,jSrc,k,l) + end do + end do + endif + + endif + end do + + endif + +!----------------------------------------------------------------------- +! +! wait for sends to complete and deallocate arrays +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgSend, sndRequest, sndStatus, ierr) + + deallocate(sndRequest, rcvRequest, sndStatus, rcvStatus, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DI4: error deallocating req,status arrays') + return + endif + + deallocate(bufSend, bufRecv, bufTripole, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate4DI4: error deallocating 4d buffers') + return + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate4DI4 + +!*********************************************************************** +! This routine updates ghost cells for an input array using +! a second array as needed by the stress fields. + + subroutine ice_HaloUpdate_stress(array1, array2, halo, & + fieldLoc, fieldKind, & + fillValue) + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + real (dbl_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + real (dbl_kind), dimension(:,:,:), intent(inout) :: & + array1 ,& ! array containing field for which halo + ! needs to be updated + array2 ! array containing field for which halo + ! in array1 needs to be updated + +! local variables + + integer (int_kind) :: & + i,j,n,nmsg, &! dummy loop indices + ierr, &! error or status flag for MPI,alloc + nxGlobal, &! global domain size in x (tripole) + iSrc,jSrc, &! source addresses for message + iDst,jDst, &! dest addresses for message + srcBlock, &! local block number for source + dstBlock, &! local block number for destination + ioffset, joffset, &! address shifts for tripole + isign ! sign factor for tripole grids + + integer (int_kind), dimension(:), allocatable :: & + sndRequest, &! MPI request ids + rcvRequest ! MPI request ids + + integer (int_kind), dimension(:,:), allocatable :: & + sndStatus, &! MPI status flags + rcvStatus ! MPI status flags + + real (dbl_kind) :: & + fill, &! value to use for unknown points + x1,x2,xavg ! scalars for enforcing symmetry at U pts + + integer (int_kind) :: len ! length of messages + +!----------------------------------------------------------------------- +! +! initialize error code and fill value +! +!----------------------------------------------------------------------- + + if (present(fillValue)) then + fill = fillValue + else + fill = 0.0_dbl_kind + endif + + nxGlobal = 0 + if (allocated(bufTripoleR8)) then + nxGlobal = size(bufTripoleR8,dim=1) + bufTripoleR8 = fill + endif + +!----------------------------------------------------------------------- +! +! allocate request and status arrays for messages +! +!----------------------------------------------------------------------- + + allocate(sndRequest(halo%numMsgSend), & + rcvRequest(halo%numMsgRecv), & + sndStatus(MPI_STATUS_SIZE,halo%numMsgSend), & + rcvStatus(MPI_STATUS_SIZE,halo%numMsgRecv), stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate_stress: error allocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- +! +! post receives +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgRecv + + len = halo%SizeRecv(nmsg) + call MPI_IRECV(bufRecvR8(1:len,nmsg), len, mpiR8, & + halo%recvTask(nmsg), & + mpitagHalo + halo%recvTask(nmsg), & + halo%communicator, rcvRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! fill send buffer and post sends +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numMsgSend + + do n=1,halo%sizeSend(nmsg) + iSrc = halo%sendAddr(1,n,nmsg) + jSrc = halo%sendAddr(2,n,nmsg) + srcBlock = halo%sendAddr(3,n,nmsg) + + bufSendR8(n,nmsg) = array2(iSrc,jSrc,srcBlock) + end do + do n=halo%sizeSend(nmsg)+1,bufSizeSend + bufSendR8(n,nmsg) = fill ! fill remainder of buffer + end do + + len = halo%SizeSend(nmsg) + call MPI_ISEND(bufSendR8(1:len,nmsg), len, mpiR8, & + halo%sendTask(nmsg), & + mpitagHalo + my_task, & + halo%communicator, sndRequest(nmsg), ierr) + end do + +!----------------------------------------------------------------------- +! +! while messages are being communicated, +! do NOT zero the halo out, this halo update just updates +! the tripole zipper as needed for stresses. if you zero +! it out, all halo values will be wiped out. +!----------------------------------------------------------------------- +! do j = 1,nghost +! array1(1:nx_block, j,:) = fill +! array1(1:nx_block,ny_block-j+1,:) = fill +! enddo +! do i = 1,nghost +! array1(i, 1:ny_block,:) = fill +! array1(nx_block-i+1,1:ny_block,:) = fill +! enddo + +!----------------------------------------------------------------------- +! +! do local copies while waiting for messages to complete +! if srcBlock is zero, that denotes an eliminated land block or a +! closed boundary where ghost cell values are undefined +! if srcBlock is less than zero, the message is a copy out of the +! tripole buffer and will be treated later +! +!----------------------------------------------------------------------- + + do nmsg=1,halo%numLocalCopies + iSrc = halo%srcLocalAddr(1,nmsg) + jSrc = halo%srcLocalAddr(2,nmsg) + srcBlock = halo%srcLocalAddr(3,nmsg) + iDst = halo%dstLocalAddr(1,nmsg) + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + if (srcBlock > 0) then + if (dstBlock < 0) then ! tripole copy into buffer + bufTripoleR8(iDst,jDst) = & + array2(iSrc,jSrc,srcBlock) + endif + else if (srcBlock == 0) then + array1(iDst,jDst,dstBlock) = fill + endif + end do + +!----------------------------------------------------------------------- +! +! wait for receives to finish and then unpack the recv buffer into +! ghost cells +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgRecv, rcvRequest, rcvStatus, ierr) + + do nmsg=1,halo%numMsgRecv + do n=1,halo%sizeRecv(nmsg) + iDst = halo%recvAddr(1,n,nmsg) + jDst = halo%recvAddr(2,n,nmsg) + dstBlock = halo%recvAddr(3,n,nmsg) + + if (dstBlock < 0) then !tripole + bufTripoleR8(iDst,jDst) = bufRecvR8(n,nmsg) + endif + end do + end do + +!----------------------------------------------------------------------- +! +! take care of northern boundary in tripole case +! bufTripole array contains the top haloWidth+1 rows of physical +! domain for entire (global) top row +! +!----------------------------------------------------------------------- + + if (nxGlobal > 0) then + + select case (fieldKind) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case default + call abort_ice( & + 'ice_HaloUpdate_stress: Unknown field kind') + end select + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + case (field_loc_Eface) + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) + + ioffset = 0 + joffset = 1 + + case default + call abort_ice( & + 'ice_HaloUpdate_stress: Unknown field location') + end select + + !*** copy out of global tripole buffer into local + !*** ghost cells + + !*** look through local copies to find the copy out + !*** messages (srcBlock < 0) + + do nmsg=1,halo%numLocalCopies + srcBlock = halo%srcLocalAddr(3,nmsg) + + if (srcBlock < 0) then + + iSrc = halo%srcLocalAddr(1,nmsg) ! tripole buffer addr + jSrc = halo%srcLocalAddr(2,nmsg) + + iDst = halo%dstLocalAddr(1,nmsg) ! local block addr + jDst = halo%dstLocalAddr(2,nmsg) + dstBlock = halo%dstLocalAddr(3,nmsg) + + !*** correct for offsets + iSrc = iSrc - ioffset + jSrc = jSrc - joffset + if (iSrc == 0) iSrc = nxGlobal + + !*** for center and Eface, do not need to replace + !*** top row of physical domain, so jSrc should be + !*** out of range and skipped + !*** otherwise do the copy + + if (jSrc <= nghost+1) then + array1(iDst,jDst,dstBlock) = isign*bufTripoleR8(iSrc,jSrc) + endif + + endif + end do + + endif + +!----------------------------------------------------------------------- +! +! wait for sends to complete and deallocate arrays +! +!----------------------------------------------------------------------- + + call MPI_WAITALL(halo%numMsgSend, sndRequest, sndStatus, ierr) + + deallocate(sndRequest, rcvRequest, sndStatus, rcvStatus, stat=ierr) + + if (ierr > 0) then + call abort_ice( & + 'ice_HaloUpdate_stress: error deallocating req,status arrays') + return + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate_stress + +!*********************************************************************** + + subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & + srcProc, dstProc, msgSize) + +! This is a utility routine to increment the arrays for counting +! whether messages are required. It checks the source and destination +! task to see whether the current task needs to send, receive or +! copy messages to fill halo regions (ghost cells). + + integer (int_kind), intent(in) :: & + srcProc, &! source processor for communication + dstProc, &! destination processor for communication + msgSize ! number of words for this message + + integer (int_kind), dimension(:), intent(inout) :: & + sndCounter, &! array for counting messages to be sent + rcvCounter ! array for counting messages to be received + +!----------------------------------------------------------------------- +! +! error check +! +!----------------------------------------------------------------------- + + if (srcProc < 0 .or. dstProc < 0 .or. & + srcProc > size(sndCounter) .or. & + dstProc > size(rcvCounter)) then + call abort_ice( & + 'ice_HaloIncrementMsgCount: invalid processor number') + return + endif + +!----------------------------------------------------------------------- +! +! if destination all land or outside closed boundary (dstProc = 0), +! then no send is necessary, so do the rest only for dstProc /= 0 +! +!----------------------------------------------------------------------- + + if (dstProc == 0) return + +!----------------------------------------------------------------------- +! +! if the current processor is the source, must send data +! local copy if dstProc = srcProc +! +!----------------------------------------------------------------------- + + if (srcProc == my_task + 1) sndCounter(dstProc) = & + sndCounter(dstProc) + msgSize + +!----------------------------------------------------------------------- +! +! if the current processor is the destination, must receive data +! local copy if dstProc = srcProc +! +!----------------------------------------------------------------------- + + if (dstProc == my_task + 1) then + + if (srcProc > 0) then + !*** the source block has ocean points + !*** count as a receive from srcProc + + rcvCounter(srcProc) = rcvCounter(srcProc) + msgSize + + else + !*** if the source block has been dropped, create + !*** a local copy to fill halo with a fill value + + rcvCounter(dstProc) = rcvCounter(dstProc) + msgSize + + endif + endif +!----------------------------------------------------------------------- + + end subroutine ice_HaloIncrementMsgCount + +!*********************************************************************** + + subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & + dstBlock, dstProc, dstLocalID, & + direction) + +! This is a utility routine to determine the required address and +! message information for a particular pair of blocks. + + integer (int_kind), intent(in) :: & + srcBlock, dstBlock, & ! source,destination block id + srcProc, dstProc, & ! source,destination processor location + srcLocalID, dstLocalID ! source,destination local index + + character (*), intent(in) :: & + direction ! direction of neighbor block + ! (north,south,east,west, + ! and NE, NW, SE, SW) + + type (ice_halo), intent(inout) :: & + halo ! data structure containing halo info + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + msgIndx, &! message counter and index into msg array + blockIndx, &! block counter and index into msg array + bufSize, &! size of message buffer + ibSrc, ieSrc, jbSrc, jeSrc, &! phys domain info for source block + ibDst, ieDst, jbDst, jeDst, &! phys domain info for dest block + nxGlobal, &! size of global domain in e-w direction + i,j,n ! dummy loop index + + integer (int_kind), dimension(:), pointer :: & + iGlobal ! global i index for location in tripole + +!----------------------------------------------------------------------- +! +! initialize +! +!----------------------------------------------------------------------- + + if (allocated(bufTripoleR8)) nxGlobal = size(bufTripoleR8,dim=1) + +!----------------------------------------------------------------------- +! +! if destination all land or outside closed boundary (dstProc = 0), +! then no send is necessary, so do the rest only for dstProc /= 0 +! +!----------------------------------------------------------------------- + + if (dstProc == 0) return + +!----------------------------------------------------------------------- +! +! get block information if either block is local +! +!----------------------------------------------------------------------- + + if (srcProc == my_task+1 .or. dstProc == my_task+1) then + + if (srcBlock >= 0 .and. dstBlock >= 0) then + call get_block_parameter(srcBlock, & + ilo=ibSrc, ihi=ieSrc, & + jlo=jbSrc, jhi=jeSrc) + else ! tripole - need iGlobal info + call get_block_parameter(abs(srcBlock), & + ilo=ibSrc, ihi=ieSrc, & + jlo=jbSrc, jhi=jeSrc, & + i_glob=iGlobal) + + endif + + if (dstBlock /= 0) then + call get_block_parameter(abs(dstBlock), & + ilo=ibDst, ihi=ieDst, & + jlo=jbDst, jhi=jeDst) + endif + + endif + +!----------------------------------------------------------------------- +! +! if both blocks are local, create a local copy to fill halo +! +!----------------------------------------------------------------------- + + if (srcProc == my_task+1 .and. & + dstProc == my_task+1) then + + !*** compute addresses based on direction + + msgIndx = halo%numLocalCopies + + if (msgIndx > size(halo%srcLocalAddr,dim=2) .or. & + msgIndx > size(halo%dstLocalAddr,dim=2)) then + call abort_ice( & + 'ice_HaloMsgCreate: msg count > array size') + return + endif + + select case (direction) + case ('east') + + !*** copy easternmost physical domain of src + !*** into westernmost halo of dst + + do j=1,jeSrc-jbSrc+1 + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = ieSrc - nghost + i + halo%srcLocalAddr(2,msgIndx) = jbSrc + j - 1 + halo%srcLocalAddr(3,msgIndx) = srcLocalID + + halo%dstLocalAddr(1,msgIndx) = i + halo%dstLocalAddr(2,msgIndx) = jbDst + j - 1 + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + case ('west') + + !*** copy westernmost physical domain of src + !*** into easternmost halo of dst + + do j=1,jeSrc-jbSrc+1 + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = ibSrc + i - 1 + halo%srcLocalAddr(2,msgIndx) = jbSrc + j - 1 + halo%srcLocalAddr(3,msgIndx) = srcLocalID + + halo%dstLocalAddr(1,msgIndx) = ieDst + i + halo%dstLocalAddr(2,msgIndx) = jbDst + j - 1 + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + case ('north') + + !*** copy northern physical domain of src + !*** into southern halo of dst + + if (srcBlock > 0 .and. dstBlock > 0) then ! normal north boundary + + do j=1,nghost + do i=1,ieSrc-ibSrc+1 + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = ibSrc + i - 1 + halo%srcLocalAddr(2,msgIndx) = jeSrc - nghost + j + halo%srcLocalAddr(3,msgIndx) = srcLocalID + + halo%dstLocalAddr(1,msgIndx) = ibDst + i - 1 + halo%dstLocalAddr(2,msgIndx) = j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + else if (srcBlock > 0 .and. dstBlock < 0) then + + !*** tripole grid - copy info into tripole buffer + !*** copy physical domain of top halo+1 rows + !*** into global buffer at src location + + !*** perform an error check to make sure the + !*** block has enough points to perform a tripole + !*** update + + if (jeSrc - jbSrc + 1 < halo%tripoleRows) then + call abort_ice( & + 'ice_HaloMsgCreate: not enough points in block for tripole') + return + endif + + do j=1,halo%tripoleRows + do i=1,ieSrc-ibSrc+1 + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = ibSrc + i - 1 + halo%srcLocalAddr(2,msgIndx) = jeSrc-halo%tripoleRows+j + halo%srcLocalAddr(3,msgIndx) = srcLocalID + + halo%dstLocalAddr(1,msgIndx) = iGlobal(ibSrc + i - 1) + halo%dstLocalAddr(2,msgIndx) = j + halo%dstLocalAddr(3,msgIndx) = -dstLocalID + + end do + end do + + else if (srcBlock < 0 .and. dstBlock > 0) then + + !*** tripole grid - set up for copying out of + !*** tripole buffer into ghost cell domains + !*** include e-w ghost cells + + do j=1,halo%tripoleRows + do i=1,ieSrc+nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = nxGlobal - iGlobal(i) + 1 + halo%srcLocalAddr(2,msgIndx) = nghost + 3 - j + halo%srcLocalAddr(3,msgIndx) = -srcLocalID + + halo%dstLocalAddr(1,msgIndx) = i + if (j.gt.nghost+1) then + halo%dstLocalAddr(2,msgIndx) = -1 ! never used + else + halo%dstLocalAddr(2,msgIndx) = jeSrc + j - 1 + endif + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + endif + + case ('south') + + !*** copy southern physical domain of src + !*** into northern halo of dst + + do j=1,nghost + do i=1,ieSrc-ibSrc+1 + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = ibSrc + i - 1 + halo%srcLocalAddr(2,msgIndx) = jbSrc + j - 1 + halo%srcLocalAddr(3,msgIndx) = srcLocalID + + halo%dstLocalAddr(1,msgIndx) = ibDst + i - 1 + halo%dstLocalAddr(2,msgIndx) = jeDst + j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + case ('northeast') + + !*** normal northeast boundary - just copy NE corner + !*** of physical domain into SW halo of NE nbr block + + if (dstBlock > 0) then + + do j=1,nghost + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = ieSrc - nghost + i + halo%srcLocalAddr(2,msgIndx) = jeSrc - nghost + j + halo%srcLocalAddr(3,msgIndx) = srcLocalID + + halo%dstLocalAddr(1,msgIndx) = i + halo%dstLocalAddr(2,msgIndx) = j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + else + + !*** tripole grid - this local copy should already + !*** have taken place for the north boundary + + endif + + case ('northwest') + + !*** normal northeast boundary - just copy NW corner + !*** of physical domain into SE halo of NW nbr block + + if (dstBlock > 0) then + + do j=1,nghost + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = ibSrc + i - 1 + halo%srcLocalAddr(2,msgIndx) = jeSrc - nghost + j + halo%srcLocalAddr(3,msgIndx) = srcLocalID + + halo%dstLocalAddr(1,msgIndx) = ieDst + i + halo%dstLocalAddr(2,msgIndx) = j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + else + + !*** tripole grid - this local copy should already + !*** have taken place for the north boundary + + endif + + case ('southeast') + + !*** copy southeastern corner of src physical domain + !*** into northwestern halo of dst + + do j=1,nghost + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = ieSrc - nghost + i + halo%srcLocalAddr(2,msgIndx) = jbSrc + j - 1 + halo%srcLocalAddr(3,msgIndx) = srcLocalID + + halo%dstLocalAddr(1,msgIndx) = i + halo%dstLocalAddr(2,msgIndx) = jeDst + j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + case ('southwest') + + !*** copy southwestern corner of src physical domain + !*** into northeastern halo of dst + + do j=1,nghost + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = ibSrc + i - 1 + halo%srcLocalAddr(2,msgIndx) = jbSrc + j - 1 + halo%srcLocalAddr(3,msgIndx) = srcLocalID + + halo%dstLocalAddr(1,msgIndx) = ieDst + i + halo%dstLocalAddr(2,msgIndx) = jeDst + j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + case default + + call abort_ice( & + 'ice_HaloMsgCreate: unknown direction local copy') + return + + end select + + halo%numLocalCopies = msgIndx + + if (msgIndx > size(halo%srcLocalAddr,dim=2) .or. & + msgIndx > size(halo%dstLocalAddr,dim=2)) then + call abort_ice( & + 'ice_HaloMsgCreate: msg count > array size') + return + endif + +!----------------------------------------------------------------------- +! +! if dest block is local and source block does not exist, create a +! local copy to fill halo with a fill value +! +!----------------------------------------------------------------------- + + else if (srcProc == 0 .and. dstProc == my_task+1) then + + msgIndx = halo%numLocalCopies + + if (msgIndx > size(halo%srcLocalAddr,dim=2) .or. & + msgIndx > size(halo%dstLocalAddr,dim=2)) then + call abort_ice( & + 'ice_HaloMsgCreate: msg count > array size') + return + endif + + !*** compute addresses based on direction + + select case (direction) + case ('east') + + !*** copy easternmost physical domain of src + !*** into westernmost halo of dst + + do j=1,jeSrc-jbSrc+1 + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = 0 + halo%srcLocalAddr(2,msgIndx) = 0 + halo%srcLocalAddr(3,msgIndx) = 0 + + halo%dstLocalAddr(1,msgIndx) = i + halo%dstLocalAddr(2,msgIndx) = jbDst + j - 1 + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + case ('west') + + !*** copy westernmost physical domain of src + !*** into easternmost halo of dst + + do j=1,jeSrc-jbSrc+1 + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = 0 + halo%srcLocalAddr(2,msgIndx) = 0 + halo%srcLocalAddr(3,msgIndx) = 0 + + halo%dstLocalAddr(1,msgIndx) = ieDst + i + halo%dstLocalAddr(2,msgIndx) = jbDst + j - 1 + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + case ('north') + + !*** copy northern physical domain of src + !*** into southern halo of dst + + if (dstBlock > 0) then ! normal north boundary + + do j=1,nghost + do i=1,ieSrc-ibSrc+1 + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = 0 + halo%srcLocalAddr(2,msgIndx) = 0 + halo%srcLocalAddr(3,msgIndx) = 0 + + halo%dstLocalAddr(1,msgIndx) = ibDst + i - 1 + halo%dstLocalAddr(2,msgIndx) = j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + endif + + case ('south') + + !*** copy southern physical domain of src + !*** into northern halo of dst + + do j=1,nghost + do i=1,ieSrc-ibSrc+1 + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = 0 + halo%srcLocalAddr(2,msgIndx) = 0 + halo%srcLocalAddr(3,msgIndx) = 0 + + halo%dstLocalAddr(1,msgIndx) = ibDst + i - 1 + halo%dstLocalAddr(2,msgIndx) = jeDst + j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + case ('northeast') + + !*** normal northeast boundary - just copy NE corner + !*** of physical domain into SW halo of NE nbr block + + if (dstBlock > 0) then + + do j=1,nghost + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = 0 + halo%srcLocalAddr(2,msgIndx) = 0 + halo%srcLocalAddr(3,msgIndx) = 0 + + halo%dstLocalAddr(1,msgIndx) = i + halo%dstLocalAddr(2,msgIndx) = j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + endif + + case ('northwest') + + !*** normal northeast boundary - just copy NW corner + !*** of physical domain into SE halo of NW nbr block + + if (dstBlock > 0) then + + do j=1,nghost + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = 0 + halo%srcLocalAddr(2,msgIndx) = 0 + halo%srcLocalAddr(3,msgIndx) = 0 + + halo%dstLocalAddr(1,msgIndx) = ieDst + i + halo%dstLocalAddr(2,msgIndx) = j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + endif + + case ('southeast') + + !*** copy southeastern corner of src physical domain + !*** into northwestern halo of dst + + do j=1,nghost + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = 0 + halo%srcLocalAddr(2,msgIndx) = 0 + halo%srcLocalAddr(3,msgIndx) = 0 + + halo%dstLocalAddr(1,msgIndx) = i + halo%dstLocalAddr(2,msgIndx) = jeDst + j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + case ('southwest') + + !*** copy southwestern corner of src physical domain + !*** into northeastern halo of dst + + do j=1,nghost + do i=1,nghost + + msgIndx = msgIndx + 1 + + halo%srcLocalAddr(1,msgIndx) = 0 + halo%srcLocalAddr(2,msgIndx) = 0 + halo%srcLocalAddr(3,msgIndx) = 0 + + halo%dstLocalAddr(1,msgIndx) = ieDst + i + halo%dstLocalAddr(2,msgIndx) = jeDst + j + halo%dstLocalAddr(3,msgIndx) = dstLocalID + + end do + end do + + case default + + call abort_ice( & + 'ice_HaloMsgCreate: unknown direction local copy') + return + + end select + + halo%numLocalCopies = msgIndx + + if (msgIndx > size(halo%srcLocalAddr,dim=2) .or. & + msgIndx > size(halo%dstLocalAddr,dim=2)) then + call abort_ice( & + 'ice_HaloMsgCreate: msg count > array size') + return + endif + +!----------------------------------------------------------------------- +! +! if source block local and dest block remote, send a message +! +!----------------------------------------------------------------------- + + else if (srcProc == my_task+1 .and. & + dstProc /= my_task+1 .and. dstProc > 0) then + + !*** first check to see if a message to this processor has + !*** already been defined + !*** if not, update counters and indices + + msgIndx = 0 + + srchSend: do n=1,halo%numMsgSend + if (halo%sendTask(n) == dstProc - 1) then + msgIndx = n + bufSize = halo%sizeSend(n) + exit srchSend + endif + end do srchSend + + if (msgIndx == 0) then + msgIndx = halo%numMsgSend + 1 + halo%numMsgSend = msgIndx + halo%sendTask(msgIndx) = dstProc - 1 + bufSize = 0 + endif + + !*** now compute message info based on msg direction + + select case (direction) + case ('east') + + !*** send easternmost physical domain of src + !*** into westernmost halo of dst + + do j=1,jeSrc-jbSrc+1 + do i=1,nghost + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx) = ieSrc - nghost + i + halo%sendAddr(2,bufSize,msgIndx) = jbSrc + j - 1 + halo%sendAddr(3,bufSize,msgIndx) = srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + case ('west') + + !*** copy westernmost physical domain of src + !*** into easternmost halo of dst + + do j=1,jeSrc-jbSrc+1 + do i=1,nghost + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx) = ibSrc + i - 1 + halo%sendAddr(2,bufSize,msgIndx) = jbSrc + j - 1 + halo%sendAddr(3,bufSize,msgIndx) = srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + case ('north') + + if (dstBlock > 0) then + + !*** copy northern physical domain of src + !*** into southern halo of dst + + do j=1,nghost + do i=1,ieSrc-ibSrc+1 + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx) = ibSrc + i - 1 + halo%sendAddr(2,bufSize,msgIndx) = jeSrc-nghost+j + halo%sendAddr(3,bufSize,msgIndx) = srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + else + + !*** tripole block - send top halo%tripoleRows rows of phys domain + + halo%tripSend(msgIndx) = 1 + do j=1,halo%tripoleRows + do i=1,ieSrc-ibSrc+1 + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx)=ibSrc + i - 1 + halo%sendAddr(2,bufSize,msgIndx)=jeSrc-halo%tripoleRows+j + halo%sendAddr(3,bufSize,msgIndx)=srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + endif + + case ('south') + + !*** copy southern physical domain of src + !*** into northern halo of dst + + do j=1,nghost + do i=1,ieSrc-ibSrc+1 + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx) = ibSrc + i - 1 + halo%sendAddr(2,bufSize,msgIndx) = jbSrc + j - 1 + halo%sendAddr(3,bufSize,msgIndx) = srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + case ('northeast') + + + if (dstBlock > 0) then + + !*** normal northeast corner + !*** copy northeast corner of src physical domain + !*** into southwestern halo of dst + + do j=1,nghost + do i=1,nghost + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx) = ieSrc-nghost+i + halo%sendAddr(2,bufSize,msgIndx) = jeSrc-nghost+j + halo%sendAddr(3,bufSize,msgIndx) = srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + else + + !*** tripole block - send top halo%tripoleRows rows of phys domain + + halo%tripSend(msgIndx) = 1 + do j=1,halo%tripoleRows + do i=1,ieSrc-ibSrc+1 + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx)=ibSrc + i - 1 + halo%sendAddr(2,bufSize,msgIndx)=jeSrc-halo%tripoleRows+j + halo%sendAddr(3,bufSize,msgIndx)=srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + endif + + case ('northwest') + + if (dstBlock > 0) then + + !*** normal northwest corner + !*** copy northwest corner of src physical domain + !*** into southeastern halo of dst + + do j=1,nghost + do i=1,nghost + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx) = ibSrc + i - 1 + halo%sendAddr(2,bufSize,msgIndx) = jeSrc-nghost+j + halo%sendAddr(3,bufSize,msgIndx) = srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + else + + !*** tripole block - send top halo%tripoleRows rows of phys domain + + halo%tripSend(msgIndx) = 1 + do j=1,halo%tripoleRows + do i=1,ieSrc-ibSrc+1 + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx)=ibSrc + i - 1 + halo%sendAddr(2,bufSize,msgIndx)=jeSrc-halo%tripoleRows+j + halo%sendAddr(3,bufSize,msgIndx)=srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + endif + + case ('southeast') + + !*** copy southeastern corner of src physical domain + !*** into northwestern halo of dst + + do j=1,nghost + do i=1,nghost + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx) = ieSrc - nghost + i + halo%sendAddr(2,bufSize,msgIndx) = jbSrc + j - 1 + halo%sendAddr(3,bufSize,msgIndx) = srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + case ('southwest') + + !*** copy southwestern corner of src physical domain + !*** into northeastern halo of dst + + do j=1,nghost + do i=1,nghost + + bufSize = bufSize + 1 + + halo%sendAddr(1,bufSize,msgIndx) = ibSrc + i - 1 + halo%sendAddr(2,bufSize,msgIndx) = jbSrc + j - 1 + halo%sendAddr(3,bufSize,msgIndx) = srcLocalID + + end do + end do + + halo%sizeSend(msgIndx) = bufSize + + case default + + !*** already checked in previous case construct + + end select + +!----------------------------------------------------------------------- +! +! if source block remote and dest block local, recv a message +! +!----------------------------------------------------------------------- + + else if (dstProc == my_task+1 .and. & + srcProc /= my_task+1 .and. srcProc > 0) then + + !*** first check to see if a message from this processor has + !*** already been defined + !*** if not, update counters and indices + + msgIndx = 0 + + srchRecv: do n=1,halo%numMsgRecv + if (halo%recvTask(n) == srcProc - 1) then + msgIndx = n + bufSize = halo%sizeRecv(n) + exit srchRecv + endif + end do srchRecv + + if (msgIndx == 0) then + msgIndx = halo%numMsgRecv + 1 + halo%numMsgRecv = msgIndx + halo%recvTask(msgIndx) = srcProc - 1 + bufSize = 0 + endif + + !*** now compute message info based on msg direction + + select case (direction) + case ('east') + + !*** send easternmost physical domain of src + !*** into westernmost halo of dst + + do j=1,jeSrc-jbSrc+1 + do i=1,nghost + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = i + halo%recvAddr(2,bufSize,msgIndx) = jbDst + j - 1 + halo%recvAddr(3,bufSize,msgIndx) = dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + case ('west') + + !*** copy westernmost physical domain of src + !*** into easternmost halo of dst + + do j=1,jeSrc-jbSrc+1 + do i=1,nghost + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = ieDst + i + halo%recvAddr(2,bufSize,msgIndx) = jbDst + j - 1 + halo%recvAddr(3,bufSize,msgIndx) = dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + case ('north') + + if (dstBlock > 0) then + + !*** copy northern physical domain of src + !*** into southern halo of dst + + do j=1,nghost + do i=1,ieDst-ibDst+1 + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = ibDst + i - 1 + halo%recvAddr(2,bufSize,msgIndx) = j + halo%recvAddr(3,bufSize,msgIndx) = dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + else + + !*** tripole block - receive into tripole buffer + + halo%tripRecv(msgIndx) = 1 + do j=1,halo%tripoleRows + do i=1,ieSrc-ibSrc+1 + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = iGlobal(ibSrc + i - 1) + halo%recvAddr(2,bufSize,msgIndx) = j + halo%recvAddr(3,bufSize,msgIndx) = -dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + endif + + case ('south') + + !*** copy southern physical domain of src + !*** into northern halo of dst + + do j=1,nghost + do i=1,ieSrc-ibSrc+1 + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = ibDst + i - 1 + halo%recvAddr(2,bufSize,msgIndx) = jeDst + j + halo%recvAddr(3,bufSize,msgIndx) = dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + case ('northeast') + + if (dstBlock > 0) then + + !*** normal northeast neighbor + !*** copy northeast physical domain into + !*** into southwest halo of dst + + do j=1,nghost + do i=1,nghost + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = i + halo%recvAddr(2,bufSize,msgIndx) = j + halo%recvAddr(3,bufSize,msgIndx) = dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + else + + !*** tripole block - receive into tripole buffer + + halo%tripRecv(msgIndx) = 1 + do j=1,halo%tripoleRows + do i=1,ieSrc-ibSrc+1 + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = iGlobal(ibSrc + i - 1) + halo%recvAddr(2,bufSize,msgIndx) = j + halo%recvAddr(3,bufSize,msgIndx) = -dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + endif + + case ('northwest') + + if (dstBlock > 0) then + + !*** normal northwest neighbor + !*** copy northwest physical domain into + !*** into southeast halo of dst + + do j=1,nghost + do i=1,nghost + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = ieDst + i + halo%recvAddr(2,bufSize,msgIndx) = j + halo%recvAddr(3,bufSize,msgIndx) = dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + else + + !*** tripole block - receive into tripole buffer + + halo%tripRecv(msgIndx) = 1 + do j=1,halo%tripoleRows + do i=1,ieSrc-ibSrc+1 + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = iGlobal(ibSrc + i - 1) + halo%recvAddr(2,bufSize,msgIndx) = j + halo%recvAddr(3,bufSize,msgIndx) = -dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + endif + + case ('southeast') + + !*** copy southeastern corner of src physical domain + !*** into northwestern halo of dst + + do j=1,nghost + do i=1,nghost + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = i + halo%recvAddr(2,bufSize,msgIndx) = jeDst + j + halo%recvAddr(3,bufSize,msgIndx) = dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + case ('southwest') + + !*** copy southwestern corner of src physical domain + !*** into northeastern halo of dst + + do j=1,nghost + do i=1,nghost + + bufSize = bufSize + 1 + + halo%recvAddr(1,bufSize,msgIndx) = ieDst + i + halo%recvAddr(2,bufSize,msgIndx) = jeDst + j + halo%recvAddr(3,bufSize,msgIndx) = dstLocalID + + end do + end do + + halo%sizeRecv(msgIndx) = bufSize + + case default + + !*** already checked in previous case construct + + end select + +!----------------------------------------------------------------------- +! +! if none of the cases above, no message info required for this +! block pair +! +!----------------------------------------------------------------------- + + endif + +!----------------------------------------------------------------------- + + end subroutine ice_HaloMsgCreate + +!*********************************************************************** + + subroutine ice_HaloExtrapolate2DR8(ARRAY,dist,ew_bndy_type,ns_bndy_type) + +! This subroutine extrapolates ARRAY values into the first row or column +! of ghost cells, and is intended for grid variables whose ghost cells +! would otherwise be set using the default boundary conditions (Dirichlet +! or Neumann). +! Note: This routine will need to be modified for nghost > 1. +! We assume padding occurs only on east and north edges. +! +! This is the specific interface for double precision arrays +! corresponding to the generic interface ice_HaloExtrapolate + + use ice_blocks, only: block, nblocks_x, nblocks_y, get_block + use ice_constants, only: c2 + use ice_distribution, only: ice_distributionGetBlockID + + character (char_len) :: & + ew_bndy_type, &! type of domain bndy in each logical + ns_bndy_type ! direction (ew is i, ns is j) + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + real (dbl_kind), dimension(:,:,:), intent(inout) :: & + ARRAY ! array containing distributed field + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,iblk, &! dummy loop indices + numBlocks, &! number of local blocks + blockID, &! block location + ibc ! ghost cell column or row + + type (block) :: & + this_block ! block info for current block + +!----------------------------------------------------------------------- +! +! Linear extrapolation +! +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks) + + do iblk = 1, numBlocks + call ice_distributionGetBlockID(dist, iblk, blockID) + this_block = get_block(blockID, blockID) + + if (this_block%iblock == 1) then ! west edge + if (trim(ew_bndy_type) /= 'cyclic') then + do j = 1, ny_block + ARRAY(1,j,iblk) = c2*ARRAY(2,j,iblk) - ARRAY(3,j,iblk) + enddo + endif + endif + + if (this_block%iblock == nblocks_x) then ! east edge + if (trim(ew_bndy_type) /= 'cyclic') then + ! locate ghost cell column (avoid padding) + ibc = nx_block + do i = nx_block, nghost + 1, -1 + if (this_block%i_glob(i) == 0) ibc = ibc - 1 + enddo + do j = 1, ny_block + ARRAY(ibc,j,iblk) = c2*ARRAY(ibc-1,j,iblk) - ARRAY(ibc-2,j,iblk) + enddo + endif + endif + + if (this_block%jblock == 1) then ! south edge + if (trim(ns_bndy_type) /= 'cyclic') then + do i = 1, nx_block + ARRAY(i,1,iblk) = c2*ARRAY(i,2,iblk) - ARRAY(i,3,iblk) + enddo + endif + endif + + if (this_block%jblock == nblocks_y) then ! north edge + if (trim(ns_bndy_type) /= 'cyclic' .and. & + trim(ns_bndy_type) /= 'tripole' .and. & + trim(ns_bndy_type) /= 'tripoleT' ) then + ! locate ghost cell column (avoid padding) + ibc = ny_block + do j = ny_block, nghost + 1, -1 + if (this_block%j_glob(j) == 0) ibc = ibc - 1 + enddo + do i = 1, nx_block + ARRAY(i,ibc,iblk) = c2*ARRAY(i,ibc-1,iblk) - ARRAY(i,ibc-2,iblk) + enddo + endif + endif + + enddo ! iblk + +!----------------------------------------------------------------------- + + end subroutine ice_HaloExtrapolate2DR8 + +!*********************************************************************** + + subroutine ice_HaloDestroy(halo) + +! This routine creates a halo type with info necessary for +! performing a halo (ghost cell) update. This info is computed +! based on the input block distribution. + + type (ice_halo) :: & + halo ! a new halo type with info for halo updates + + integer (int_kind) :: & + istat ! error or status flag for MPI,alloc +!----------------------------------------------------------------------- + + deallocate(halo%sendTask, stat=istat) + deallocate(halo%recvTask, stat=istat) + deallocate(halo%sizeSend, stat=istat) + deallocate(halo%sizeRecv, stat=istat) + deallocate(halo%tripSend, stat=istat) + deallocate(halo%tripRecv, stat=istat) + deallocate(halo%srcLocalAddr, stat=istat) + deallocate(halo%dstLocalAddr, stat=istat) + deallocate(halo%sendAddr, stat=istat) + deallocate(halo%recvAddr, stat=istat) + +end subroutine ice_HaloDestroy + +!*********************************************************************** + +end module ice_boundary + +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/mpi/ice_broadcast.F90 b/mpi/ice_broadcast.F90 new file mode 100644 index 00000000..fedf8699 --- /dev/null +++ b/mpi/ice_broadcast.F90 @@ -0,0 +1,745 @@ +! SVN:$Id: ice_broadcast.F90 700 2013-08-15 19:17:39Z eclare $ +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| + + module ice_broadcast + +! This module contains all the broadcast routines. This +! particular version contains MPI versions of these routines. +! +! author: Phil Jones, LANL +! Oct. 2004: Adapted from POP version by William H. Lipscomb, LANL + + use ice_kinds_mod + use ice_communicate, only: mpiR8, mpir4, MPI_COMM_ICE + + implicit none + private + save + + public :: broadcast_scalar, & + broadcast_array + +!----------------------------------------------------------------------- +! +! generic interfaces for module procedures +! +!----------------------------------------------------------------------- + + interface broadcast_scalar + module procedure broadcast_scalar_dbl, & + broadcast_scalar_real, & + broadcast_scalar_int, & + broadcast_scalar_log, & + broadcast_scalar_char + end interface + + interface broadcast_array + module procedure broadcast_array_dbl_1d, & + broadcast_array_real_1d, & + broadcast_array_int_1d, & + broadcast_array_log_1d, & + broadcast_array_dbl_2d, & + broadcast_array_real_2d, & + broadcast_array_int_2d, & + broadcast_array_log_2d, & + broadcast_array_dbl_3d, & + broadcast_array_real_3d, & + broadcast_array_int_3d, & + broadcast_array_log_3d + end interface + +!*********************************************************************** + + contains + +!*********************************************************************** + + subroutine broadcast_scalar_dbl(scalar, root_pe) + +! Broadcasts a scalar dbl variable from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_scalar interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + real (dbl_kind), intent(inout) :: & + scalar ! scalar to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + call MPI_BCAST(scalar, 1, mpiR8, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + +end subroutine broadcast_scalar_dbl + +!*********************************************************************** + +subroutine broadcast_scalar_real(scalar, root_pe) + +! Broadcasts a scalar real variable from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_scalar interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + real (real_kind), intent(inout) :: & + scalar ! scalar to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + call MPI_BCAST(scalar, 1, mpiR4, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_scalar_real + +!*********************************************************************** + +subroutine broadcast_scalar_int(scalar, root_pe) + +! Broadcasts a scalar integer variable from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_scalar interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + integer (int_kind), intent(inout) :: & + scalar ! scalar to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + call MPI_BCAST(scalar, 1, MPI_INTEGER, root_pe, MPI_COMM_ICE,ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_scalar_int + +!*********************************************************************** + +subroutine broadcast_scalar_log(scalar, root_pe) + +! Broadcasts a scalar logical variable from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_scalar interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + logical (log_kind), intent(inout) :: & + scalar ! scalar to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + itmp, &! local temporary + ierr ! MPI error flag + +!----------------------------------------------------------------------- + + if (scalar) then + itmp = 1 + else + itmp = 0 + endif + + call MPI_BCAST(itmp, 1, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + + if (itmp == 1) then + scalar = .true. + else + scalar = .false. + endif + +!----------------------------------------------------------------------- + + end subroutine broadcast_scalar_log + +!*********************************************************************** + +subroutine broadcast_scalar_char(scalar, root_pe) + +! Broadcasts a scalar character variable from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_scalar interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + character (*), intent(inout) :: & + scalar ! scalar to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + clength, &! length of character + ierr ! MPI error flag + +!----------------------------------------------------------------------- + + clength = len(scalar) + + call MPI_BCAST(scalar, clength, MPI_CHARACTER, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!-------------------------------------------------------------------- + + end subroutine broadcast_scalar_char + +!*********************************************************************** + +subroutine broadcast_array_dbl_1d(array, root_pe) + +! Broadcasts a vector dbl variable from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + real (dbl_kind), dimension(:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR8, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_dbl_1d + +!*********************************************************************** + +subroutine broadcast_array_real_1d(array, root_pe) + +! Broadcasts a real vector from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + real (real_kind), dimension(:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR4, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_real_1d + +!*********************************************************************** + +subroutine broadcast_array_int_1d(array, root_pe) + +! Broadcasts an integer vector from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + integer (int_kind), dimension(:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + + call MPI_BCAST(array, nelements, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_int_1d + +!*********************************************************************** + +subroutine broadcast_array_log_1d(array, root_pe) + +! Broadcasts a logical vector from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + logical (log_kind), dimension(:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:), allocatable :: & + array_int ! temporary array for MPI bcast + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + allocate(array_int(nelements)) + + where (array) + array_int = 1 + elsewhere + array_int = 0 + end where + + call MPI_BCAST(array_int, nelements, MPI_INTEGER, root_pe, & + MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + + where (array_int == 1) + array = .true. + elsewhere + array = .false. + end where + + deallocate(array_int) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_log_1d + +!*********************************************************************** + + subroutine broadcast_array_dbl_2d(array, root_pe) + +! Broadcasts a dbl 2d array from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + real (dbl_kind), dimension(:,:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR8, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_dbl_2d + +!*********************************************************************** + + subroutine broadcast_array_real_2d(array, root_pe) + +! Broadcasts a real 2d array from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + real (real_kind), dimension(:,:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR4, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_real_2d + +!*********************************************************************** + + subroutine broadcast_array_int_2d(array, root_pe) + +! Broadcasts a 2d integer array from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + integer (int_kind), dimension(:,:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + + call MPI_BCAST(array, nelements, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_int_2d + +!*********************************************************************** + + subroutine broadcast_array_log_2d(array, root_pe) + +! Broadcasts a logical 2d array from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + logical (log_kind), dimension(:,:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:), allocatable :: & + array_int ! temporary array for MPI bcast + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + allocate(array_int(size(array,dim=1),size(array,dim=2))) + + where (array) + array_int = 1 + elsewhere + array_int = 0 + end where + + call MPI_BCAST(array_int, nelements, MPI_INTEGER, root_pe, & + MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + + where (array_int == 1) + array = .true. + elsewhere + array = .false. + end where + + deallocate(array_int) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_log_2d + +!*********************************************************************** + + subroutine broadcast_array_dbl_3d(array, root_pe) + +! Broadcasts a double 3d array from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + real (dbl_kind), dimension(:,:,:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR8, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_dbl_3d + +!*********************************************************************** + + subroutine broadcast_array_real_3d(array, root_pe) + +! Broadcasts a real 3d array from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + real (real_kind), dimension(:,:,:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR4, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_real_3d + +!*********************************************************************** + + subroutine broadcast_array_int_3d(array, root_pe) + +! Broadcasts an integer 3d array from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + integer (int_kind), dimension(:,:,:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + + call MPI_BCAST(array, nelements, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_int_3d + +!*********************************************************************** + + subroutine broadcast_array_log_3d(array, root_pe) + +! Broadcasts a logical 3d array from one processor (root_pe) +! to all other processors. This is a specific instance of the generic +! broadcast\_array interface. + + include 'mpif.h' ! MPI Fortran include file + + integer (int_kind), intent(in) :: & + root_pe ! processor number to broadcast from + + logical (log_kind), dimension(:,:,:), intent(inout) :: & + array ! array to be broadcast + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:,:), allocatable :: & + array_int ! temporary array for MPI bcast + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + +!----------------------------------------------------------------------- + + nelements = size(array) + allocate(array_int(size(array,dim=1), & + size(array,dim=2), & + size(array,dim=3))) + + where (array) + array_int = 1 + elsewhere + array_int = 0 + end where + + call MPI_BCAST(array_int, nelements, MPI_INTEGER, root_pe, & + MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + + where (array_int == 1) + array = .true. + elsewhere + array = .false. + end where + + deallocate(array_int) + +!----------------------------------------------------------------------- + + end subroutine broadcast_array_log_3d + +!*********************************************************************** + + end module ice_broadcast + +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/mpi/ice_communicate.F90 b/mpi/ice_communicate.F90 new file mode 100644 index 00000000..713c224a --- /dev/null +++ b/mpi/ice_communicate.F90 @@ -0,0 +1,216 @@ +! SVN:$Id: ice_communicate.F90 918 2015-02-10 20:37:08Z eclare $ +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| + + module ice_communicate + +! This module contains the necessary routines and variables for +! communicating between processors. +! +! author: Phil Jones, LANL +! Oct. 2004: Adapted from POP version by William H. Lipscomb, LANL + + use ice_kinds_mod + +#if defined key_oasis3 || key_oasis3mct + use cpl_oasis3 +#endif + +#if defined key_oasis4 + use cpl_oasis4 +#endif + +#if defined key_iomput + use lib_mpp, only: mpi_comm_opa ! MPP library +#endif + + implicit none + private + save + + public :: init_communicate, & + get_num_procs, & + create_communicator + + integer (int_kind), public :: & + MPI_COMM_ICE, &! MPI communicator for ice comms + mpiR16, &! MPI type for r16_kind + mpiR8, &! MPI type for dbl_kind + mpiR4, &! MPI type for real_kind + my_task, &! MPI task number for this task + master_task ! task number of master task + + integer (int_kind), parameter, public :: & + mpitagHalo = 1, &! MPI tags for various + mpitag_gs = 1000 ! communication patterns + +!*********************************************************************** + + contains + +!*********************************************************************** + + subroutine init_communicate(mpicom) + +! This routine sets up MPI environment and defines ice +! communicator. + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + include 'mpif.h' ! MPI Fortran include file + + integer (kind=int_kind), optional, intent(in) :: mpicom ! specified communicator + + integer (int_kind) :: ierr ! MPI error flag + logical :: flag ! MPI logical flag + integer (int_kind) :: ice_comm + +!----------------------------------------------------------------------- +! +! initiate mpi environment and create communicator for internal +! ice communications +! +!----------------------------------------------------------------------- + +!ars599: 01042014: use cice4.1 mpi ice_communicate otherwise +! will have MPI_Comm_f2c issue +!04042014: this part was copied from ACCESS-OM cice4.1 +! add in key_oasis3mct and try again. +!04092014: AusCOm didnt use this bit. MPI_INIT will calll in OASIS +! only use master_task = 0 +#ifndef AusCOM + +#if (defined key_oasis3 || defined key_oasis3mct || defined key_oasis4) +!#if (defined key_oasis3 || defined key_oasis4) + ice_comm = localComm ! communicator from NEMO/OASISn +#else + ice_comm = MPI_COMM_WORLD ! Global communicator +#endif + +#if (defined CCSM) || (defined SEQ_MCT) + ! CCSM standard coupled mode + call cpl_interface_init(cpl_fields_icename, MPI_COMM_ICE) +#else + +#if (defined popcice || defined CICE_IN_NEMO) + ! MPI_INIT is called elsewhere in coupled configuration +#else + call MPI_INIT(ierr) +#endif + call MPI_BARRIER (ice_comm, ierr) + call MPI_COMM_DUP(ice_comm, MPI_COMM_ICE, ierr) + master_task = 0 + +#endif + + call MPI_COMM_RANK (MPI_COMM_ICE, my_task, ierr) + +#else + master_task = 0 +#endif + + mpiR16 = MPI_REAL16 + mpiR8 = MPI_REAL8 + mpiR4 = MPI_REAL4 + +!----------------------------------------------------------------------- + + end subroutine init_communicate + +!*********************************************************************** + + function get_num_procs() + +! This function returns the number of processor assigned to +! MPI_COMM_ICE + + integer (int_kind) :: get_num_procs + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: ierr + +!----------------------------------------------------------------------- + + call MPI_COMM_SIZE(MPI_COMM_ICE, get_num_procs, ierr) + +!----------------------------------------------------------------------- + + end function get_num_procs + +!*********************************************************************** + + subroutine create_communicator(new_comm, num_procs) + +! This routine creates a separate communicator for a subset of +! processors under default ice communicator. +! +! this routine should be called from init_domain1 when the +! domain configuration (e.g. nprocs_btrop) has been determined + + include 'mpif.h' + + integer (int_kind), intent(in) :: & + num_procs ! num of procs in new distribution + + integer (int_kind), intent(out) :: & + new_comm ! new communicator for this distribution + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + MPI_GROUP_ICE, &! group of processors assigned to ice + MPI_GROUP_NEW ! group of processors assigned to new dist + + integer (int_kind) :: & + ierr ! error flag for MPI comms + + integer (int_kind), dimension(3) :: & + range ! range of tasks assigned to new dist + ! (assumed 0,num_procs-1) + +!----------------------------------------------------------------------- +! +! determine group of processes assigned to distribution +! +!----------------------------------------------------------------------- + + call MPI_COMM_GROUP (MPI_COMM_ICE, MPI_GROUP_ICE, ierr) + + range(1) = 0 + range(2) = num_procs-1 + range(3) = 1 + +!----------------------------------------------------------------------- +! +! create subroup and communicator for new distribution +! note: MPI_COMM_CREATE must be called by all procs in MPI_COMM_ICE +! +!----------------------------------------------------------------------- + + call MPI_GROUP_RANGE_INCL(MPI_GROUP_ICE, 1, range, & + MPI_GROUP_NEW, ierr) + + call MPI_COMM_CREATE (MPI_COMM_ICE, MPI_GROUP_NEW, & + new_comm, ierr) + +!----------------------------------------------------------------------- + + end subroutine create_communicator + +!*********************************************************************** + + end module ice_communicate + +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/mpi/ice_exit.F90 b/mpi/ice_exit.F90 new file mode 100644 index 00000000..70f74148 --- /dev/null +++ b/mpi/ice_exit.F90 @@ -0,0 +1,75 @@ +! SVN:$Id: ice_exit.F90 820 2014-08-26 19:08:29Z eclare $ +!======================================================================= +! +! Exit the model. +! authors William H. Lipscomb (LANL) +! Elizabeth C. Hunke (LANL) +! 2006 ECH: separated serial and mpi functionality + + module ice_exit + + use ice_kinds_mod + + implicit none + public + +!======================================================================= + + contains + +!======================================================================= + + subroutine abort_ice(error_message) + +! This routine aborts the ice model and prints an error message. + +#if (defined CCSMCOUPLED) + use ice_fileunits, only: nu_diag, flush_fileunit + use shr_sys_mod +#else + use ice_fileunits, only: nu_diag, ice_stderr, flush_fileunit + include 'mpif.h' ! MPI Fortran include file +#endif + + character (len=*), intent(in) :: error_message + + ! local variables + +#ifndef CCSMCOUPLED + integer (int_kind) :: ierr ! MPI error flag +#endif + +#if (defined CCSMCOUPLED) + call flush_fileunit(nu_diag) + write (nu_diag,*) error_message + call flush_fileunit(nu_diag) + call shr_sys_abort(error_message) +#else + call flush_fileunit(nu_diag) + + write (ice_stderr,*) error_message + call flush_fileunit(ice_stderr) + + call MPI_ABORT(MPI_COMM_WORLD, ierr) + stop +#endif + + end subroutine abort_ice + +!======================================================================= + + subroutine end_run + +! Ends run by calling MPI_FINALIZE. + + integer (int_kind) :: ierr ! MPI error flag + + call MPI_FINALIZE(ierr) + + end subroutine end_run + +!======================================================================= + + end module ice_exit + +!======================================================================= diff --git a/mpi/ice_gather_scatter.F90 b/mpi/ice_gather_scatter.F90 new file mode 100644 index 00000000..baea56a3 --- /dev/null +++ b/mpi/ice_gather_scatter.F90 @@ -0,0 +1,2739 @@ +! SVN:$Id: ice_gather_scatter.F90 700 2013-08-15 19:17:39Z eclare $ +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| + + module ice_gather_scatter + +! This module contains routines for gathering data to a single +! processor from a distributed array, and scattering data from a +! single processor to a distributed array. +! +! NOTE: The arrays gathered and scattered are assumed to have +! horizontal dimensions (nx_block, ny_block). +! +! author: Phil Jones, LANL +! Oct. 2004: Adapted from POP version by William H. Lipscomb, LANL +! Jan. 2008: Elizabeth Hunke replaced old routines with new POP +! infrastructure, added specialized routine scatter_global_stress + + use ice_kinds_mod + use ice_communicate, only: my_task, mpiR8, mpiR4, mpitag_gs, MPI_COMM_ICE + use ice_constants, only: spval_dbl, c0, & + field_loc_center, field_loc_NEcorner, field_loc_Nface, field_loc_Eface, & + field_loc_noupdate, & + field_type_scalar, field_type_vector, field_type_angle, & + field_type_noupdate + use ice_blocks, only: block, nx_block, ny_block, nblocks_tot, get_block, & + nblocks_x, nblocks_y, nghost + use ice_distribution, only: distrb + use ice_domain_size, only: nx_global, ny_global + use ice_exit, only: abort_ice + + implicit none + private + save + + public :: gather_global, & + gather_global_ext, & + scatter_global, & + scatter_global_ext, & + scatter_global_stress + +!----------------------------------------------------------------------- +! +! overload module functions +! +!----------------------------------------------------------------------- + + interface gather_global + module procedure gather_global_dbl, & + gather_global_real, & + gather_global_int + end interface + + interface scatter_global + module procedure scatter_global_dbl, & + scatter_global_real, & + scatter_global_int + end interface + +!----------------------------------------------------------------------- +! +! module variables +! +!----------------------------------------------------------------------- + +!*********************************************************************** + + contains + +!*********************************************************************** + + subroutine gather_global_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) + +! This subroutine gathers a distributed array to a global-sized +! array on the processor dst_task. +! +! This is the specific inteface for double precision arrays +! corresponding to the generic interface gather_global. It is shown +! to provide information on the generic interface (the generic +! interface is identical, but chooses a specific inteface based +! on the data type of the input argument). + + include 'mpif.h' + + integer (int_kind), intent(in) :: & + dst_task ! task to which array should be gathered + + type (distrb), intent(in) :: & + src_dist ! distribution of blocks in the source array + + real (dbl_kind), dimension(:,:,:), intent(in) :: & + ARRAY ! array containing horizontal slab of distributed field + + real (dbl_kind), intent(in), optional :: & + spc_val + + real (dbl_kind), dimension(:,:), intent(inout) :: & + ARRAY_G ! array containing global horizontal field on dst_task + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n ,&! dummy loop counters + nsends ,&! number of actual sends + src_block ,&! block locator for send + ierr ! MPI error flag + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + snd_request + + integer (int_kind), dimension(:,:), allocatable :: & + snd_status + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer + + real (dbl_kind) :: & + special_value + + type (block) :: & + this_block ! block info for current block + + if (present(spc_val)) then + special_value = spc_val + else + special_value = spval_dbl + endif + +!----------------------------------------------------------------------- +! +! if this task is the dst_task, copy local blocks into the global +! array and post receives for non-local blocks. +! +!----------------------------------------------------------------------- + + if (my_task == dst_task) then + + do n=1,nblocks_tot + + !*** copy local blocks + + if (src_dist%blockLocation(n) == my_task+1) then + + this_block = get_block(n,n) + + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i), & + this_block%j_glob(j)) = & + ARRAY(i,j,src_dist%blockLocalID(n)) + end do + end do + + !*** fill land blocks with special values + + else if (src_dist%blockLocation(n) == 0) then + + this_block = get_block(n,n) + + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i), & + this_block%j_glob(j)) = special_value + end do + end do + endif + + end do + + !*** receive blocks to fill up the rest + + allocate (msg_buffer(nx_block,ny_block)) + + do n=1,nblocks_tot + if (src_dist%blockLocation(n) > 0 .and. & + src_dist%blockLocation(n) /= my_task+1) then + + this_block = get_block(n,n) + + call MPI_RECV(msg_buffer, size(msg_buffer), & + mpiR8, src_dist%blockLocation(n)-1, 3*mpitag_gs+n, & + MPI_COMM_ICE, status, ierr) + + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i), & + this_block%j_glob(j)) = msg_buffer(i,j) + end do + end do + endif + end do + + deallocate(msg_buffer) + +!----------------------------------------------------------------------- +! +! otherwise send data to dst_task +! +!----------------------------------------------------------------------- + + else + + allocate(snd_request(nblocks_tot), & + snd_status (MPI_STATUS_SIZE, nblocks_tot)) + + nsends = 0 + do n=1,nblocks_tot + if (src_dist%blockLocation(n) == my_task+1) then + + nsends = nsends + 1 + src_block = src_dist%blockLocalID(n) + call MPI_ISEND(ARRAY(1,1,src_block), nx_block*ny_block, & + mpiR8, dst_task, 3*mpitag_gs+n, & + MPI_COMM_ICE, snd_request(nsends), ierr) + endif + end do + + if (nsends > 0) & + call MPI_WAITALL(nsends, snd_request, snd_status, ierr) + deallocate(snd_request, snd_status) + + endif + +#ifdef gather_scatter_barrier + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!----------------------------------------------------------------------- + + end subroutine gather_global_dbl + +!*********************************************************************** + + subroutine gather_global_real(ARRAY_G, ARRAY, dst_task, src_dist) + +!----------------------------------------------------------------------- +! +! This subroutine gathers a distributed array to a global-sized +! array on the processor dst_task. +! +!----------------------------------------------------------------------- + + include 'mpif.h' + +!----------------------------------------------------------------------- +! +! input variables +! +!----------------------------------------------------------------------- + + integer (int_kind), intent(in) :: & + dst_task ! task to which array should be gathered + + type (distrb), intent(in) :: & + src_dist ! distribution of blocks in the source array + + real (real_kind), dimension(:,:,:), intent(in) :: & + ARRAY ! array containing distributed field + +!----------------------------------------------------------------------- +! +! output variables +! +!----------------------------------------------------------------------- + + real (real_kind), dimension(:,:), intent(inout) :: & + ARRAY_G ! array containing global field on dst_task + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n ,&! dummy loop counters + nsends ,&! number of actual sends + src_block ,&! block locator for send + ierr ! MPI error flag + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + snd_request + + integer (int_kind), dimension(:,:), allocatable :: & + snd_status + + real (real_kind), dimension(:,:), allocatable :: & + msg_buffer + + type (block) :: & + this_block ! block info for current block + +!----------------------------------------------------------------------- +! +! if this task is the dst_task, copy local blocks into the global +! array and post receives for non-local blocks. +! +!----------------------------------------------------------------------- + + if (my_task == dst_task) then + + do n=1,nblocks_tot + + !*** copy local blocks + + if (src_dist%blockLocation(n) == my_task+1) then + + this_block = get_block(n,n) + + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i), & + this_block%j_glob(j)) = & + ARRAY(i,j,src_dist%blockLocalID(n)) + end do + end do + + !*** fill land blocks with zeroes + + else if (src_dist%blockLocation(n) == 0) then + + this_block = get_block(n,n) + + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i), & + this_block%j_glob(j)) = 0._real_kind + end do + end do + endif + + end do + + !*** receive blocks to fill up the rest + + allocate (msg_buffer(nx_block,ny_block)) + + do n=1,nblocks_tot + if (src_dist%blockLocation(n) > 0 .and. & + src_dist%blockLocation(n) /= my_task+1) then + + this_block = get_block(n,n) + + call MPI_RECV(msg_buffer, size(msg_buffer), & + mpiR4, src_dist%blockLocation(n)-1, 3*mpitag_gs+n, & + MPI_COMM_ICE, status, ierr) + + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i), & + this_block%j_glob(j)) = msg_buffer(i,j) + end do + end do + endif + end do + + deallocate(msg_buffer) + +!----------------------------------------------------------------------- +! +! otherwise send data to dst_task +! +!----------------------------------------------------------------------- + + else + + allocate(snd_request(nblocks_tot), & + snd_status (MPI_STATUS_SIZE, nblocks_tot)) + + nsends = 0 + do n=1,nblocks_tot + if (src_dist%blockLocation(n) == my_task+1) then + + nsends = nsends + 1 + src_block = src_dist%blockLocalID(n) + call MPI_ISEND(ARRAY(1,1,src_block), nx_block*ny_block, & + mpiR4, dst_task, 3*mpitag_gs+n, & + MPI_COMM_ICE, snd_request(nsends), ierr) + endif + end do + + if (nsends > 0) & + call MPI_WAITALL(nsends, snd_request, snd_status, ierr) + deallocate(snd_request, snd_status) + + endif + +#ifdef gather_scatter_barrier + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!----------------------------------------------------------------------- + + end subroutine gather_global_real + +!*********************************************************************** + + subroutine gather_global_int(ARRAY_G, ARRAY, dst_task, src_dist) + +!----------------------------------------------------------------------- +! +! This subroutine gathers a distributed array to a global-sized +! array on the processor dst_task. +! +!----------------------------------------------------------------------- + + include 'mpif.h' + +!----------------------------------------------------------------------- +! +! input variables +! +!----------------------------------------------------------------------- + + integer (int_kind), intent(in) :: & + dst_task ! task to which array should be gathered + + type (distrb), intent(in) :: & + src_dist ! distribution of blocks in the source array + + integer (int_kind), dimension(:,:,:), intent(in) :: & + ARRAY ! array containing distributed field + +!----------------------------------------------------------------------- +! +! output variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:), intent(inout) :: & + ARRAY_G ! array containing global field on dst_task + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n ,&! dummy loop counters + nsends ,&! number of actual sends + src_block ,&! block locator for send + ierr ! MPI error flag + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + snd_request + + integer (int_kind), dimension(:,:), allocatable :: & + snd_status + + integer (int_kind), dimension(:,:), allocatable :: & + msg_buffer + + type (block) :: & + this_block ! block info for current block + +!----------------------------------------------------------------------- +! +! if this task is the dst_task, copy local blocks into the global +! array and post receives for non-local blocks. +! +!----------------------------------------------------------------------- + + if (my_task == dst_task) then + + do n=1,nblocks_tot + + !*** copy local blocks + + if (src_dist%blockLocation(n) == my_task+1) then + + this_block = get_block(n,n) + + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i), & + this_block%j_glob(j)) = & + ARRAY(i,j,src_dist%blockLocalID(n)) + end do + end do + + !*** fill land blocks with zeroes + + else if (src_dist%blockLocation(n) == 0) then + + this_block = get_block(n,n) + + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i), & + this_block%j_glob(j)) = 0 + end do + end do + endif + + end do + + !*** receive blocks to fill up the rest + + allocate (msg_buffer(nx_block,ny_block)) + + do n=1,nblocks_tot + if (src_dist%blockLocation(n) > 0 .and. & + src_dist%blockLocation(n) /= my_task+1) then + + this_block = get_block(n,n) + + call MPI_RECV(msg_buffer, size(msg_buffer), & + mpi_integer, src_dist%blockLocation(n)-1, 3*mpitag_gs+n, & + MPI_COMM_ICE, status, ierr) + + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i), & + this_block%j_glob(j)) = msg_buffer(i,j) + end do + end do + endif + end do + + deallocate(msg_buffer) + +!----------------------------------------------------------------------- +! +! otherwise send data to dst_task +! +!----------------------------------------------------------------------- + + else + + allocate(snd_request(nblocks_tot), & + snd_status (MPI_STATUS_SIZE, nblocks_tot)) + + nsends = 0 + do n=1,nblocks_tot + if (src_dist%blockLocation(n) == my_task+1) then + + nsends = nsends + 1 + src_block = src_dist%blockLocalID(n) + call MPI_ISEND(ARRAY(1,1,src_block), nx_block*ny_block, & + mpi_integer, dst_task, 3*mpitag_gs+n, & + MPI_COMM_ICE, snd_request(nsends), ierr) + endif + end do + + if (nsends > 0) & + call MPI_WAITALL(nsends, snd_request, snd_status, ierr) + deallocate(snd_request, snd_status) + + endif + +#ifdef gather_scatter_barrier + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!----------------------------------------------------------------------- + + end subroutine gather_global_int + +!*********************************************************************** + + subroutine gather_global_ext(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) + +! This subroutine gathers a distributed array to a global-sized +! array on the processor dst_task, including ghost cells. + + include 'mpif.h' + + integer (int_kind), intent(in) :: & + dst_task ! task to which array should be gathered + + type (distrb), intent(in) :: & + src_dist ! distribution of blocks in the source array + + real (dbl_kind), dimension(:,:,:), intent(in) :: & + ARRAY ! array containing horizontal slab of distributed field + + real (dbl_kind), dimension(:,:), intent(inout) :: & + ARRAY_G ! array containing global horizontal field on dst_task + + real (dbl_kind), intent(in), optional :: & + spc_val + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n ,&! dummy loop counters + nx, ny ,&! global dimensions + nsends ,&! number of actual sends + src_block ,&! block locator for send + ierr ! MPI error flag + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + snd_request + + integer (int_kind), dimension(:,:), allocatable :: & + snd_status + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer + + real (dbl_kind) :: & + special_value + + type (block) :: & + this_block ! block info for current block + + if (present(spc_val)) then + special_value = spc_val + else + special_value = spval_dbl + endif + + nx = nx_global + 2*nghost + ny = ny_global + 2*nghost + +!----------------------------------------------------------------------- +! +! if this task is the dst_task, copy local blocks into the global +! array and post receives for non-local blocks. +! +!----------------------------------------------------------------------- + + if (my_task == dst_task) then + + do n=1,nblocks_tot + + !*** copy local blocks + + if (src_dist%blockLocation(n) == my_task+1) then + + this_block = get_block(n,n) + + ! interior + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i)+nghost, & + this_block%j_glob(j)+nghost) = & + ARRAY (i,j,src_dist%blockLocalID(n)) + end do + end do + + ! fill ghost cells + if (this_block%jblock == 1) then + ! south block + do j=1, nghost + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i)+nghost,j) = & + ARRAY (i,j,src_dist%blockLocalID(n)) + end do + end do + if (this_block%iblock == 1) then + ! southwest corner + do j=1, nghost + do i=1, nghost + ARRAY_G(i,j) = & + ARRAY (i,j,src_dist%blockLocalID(n)) + end do + end do + endif + endif + if (this_block%jblock == nblocks_y) then + ! north block + do j=1, nghost + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i)+nghost, & + ny_global + nghost + j) = & + ARRAY (i,this_block%jhi+nghost-j+1,src_dist%blockLocalID(n)) + end do + end do + if (this_block%iblock == nblocks_x) then + ! northeast corner + do j=1, nghost + do i=1, nghost + ARRAY_G(nx-i+1, ny-j+1) = & + ARRAY (this_block%ihi+nghost-i+1, & + this_block%jhi+nghost-j+1, & + src_dist%blockLocalID(n)) + end do + end do + endif + endif + if (this_block%iblock == 1) then + ! west block + do j=this_block%jlo,this_block%jhi + do i=1, nghost + ARRAY_G(i,this_block%j_glob(j)+nghost) = & + ARRAY (i,j,src_dist%blockLocalID(n)) + end do + end do + if (this_block%jblock == nblocks_y) then + ! northwest corner + do j=1, nghost + do i=1, nghost + ARRAY_G(i, ny-j+1) = & + ARRAY (i,this_block%jhi+nghost-j+1,src_dist%blockLocalID(n)) + end do + end do + endif + endif + if (this_block%iblock == nblocks_x) then + ! east block + do j=this_block%jlo,this_block%jhi + do i=1, nghost + ARRAY_G(nx_global + nghost + i, & + this_block%j_glob(j)+nghost) = & + ARRAY (this_block%ihi+nghost-i+1,j,src_dist%blockLocalID(n)) + end do + end do + if (this_block%jblock == 1) then + ! southeast corner + do j=1, nghost + do i=1, nghost + ARRAY_G( nx-i+1,j) = & + ARRAY (this_block%ihi+nghost-i+1,j,src_dist%blockLocalID(n)) + end do + end do + endif + endif + + !*** fill land blocks with special values + + else if (src_dist%blockLocation(n) == 0) then + + this_block = get_block(n,n) + + ! interior + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i)+nghost, & + this_block%j_glob(j)+nghost) = special_value + end do + end do + +#ifdef CICE_IN_NEMO +!echmod: this code is temporarily wrapped for nemo pending further testing elsewhere + ! fill ghost cells + if (this_block%jblock == 1) then + ! south block + do j=1, nghost + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i)+nghost,j) = special_value + end do + end do + if (this_block%iblock == 1) then + ! southwest corner + do j=1, nghost + do i=1, nghost + ARRAY_G(i,j) = special_value + end do + end do + endif + endif + if (this_block%jblock == nblocks_y) then + ! north block + do j=1, nghost + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i)+nghost, & + ny_global + nghost + j) = special_value + end do + end do + if (this_block%iblock == nblocks_x) then + ! northeast corner + do j=1, nghost + do i=1, nghost + ARRAY_G(nx-i+1, ny-j+1) = special_value + end do + end do + endif + endif + if (this_block%iblock == 1) then + ! west block + do j=this_block%jlo,this_block%jhi + do i=1, nghost + ARRAY_G(i,this_block%j_glob(j)+nghost) = special_value + end do + end do + if (this_block%jblock == nblocks_y) then + ! northwest corner + do j=1, nghost + do i=1, nghost + ARRAY_G(i, ny-j+1) = special_value + end do + end do + endif + endif + if (this_block%iblock == nblocks_x) then + ! east block + do j=this_block%jlo,this_block%jhi + do i=1, nghost + ARRAY_G(nx_global + nghost + i, & + this_block%j_glob(j)+nghost) = special_value + end do + end do + if (this_block%jblock == 1) then + ! southeast corner + do j=1, nghost + do i=1, nghost + ARRAY_G( nx-i+1,j) = special_value + end do + end do + endif + endif +#endif + + endif + + end do + + !*** receive blocks to fill up the rest + + allocate (msg_buffer(nx_block,ny_block)) + + do n=1,nblocks_tot + if (src_dist%blockLocation(n) > 0 .and. & + src_dist%blockLocation(n) /= my_task+1) then + + this_block = get_block(n,n) + + call MPI_RECV(msg_buffer, size(msg_buffer), & + mpiR8, src_dist%blockLocation(n)-1, 3*mpitag_gs+n, & + MPI_COMM_ICE, status, ierr) + + ! block interior + do j=this_block%jlo,this_block%jhi + do i=this_block%ilo,this_block%ihi + ARRAY_G(this_block%i_glob(i)+nghost, & + this_block%j_glob(j)+nghost) = msg_buffer(i,j) + end do + end do + if (this_block%jblock == 1) then + ! south block + do j=1, nghost + do i=this_block%ilo,this_block%ihi + ARRAY_G (this_block%i_glob(i)+nghost,j) = & + msg_buffer(i,j) + end do + end do + if (this_block%iblock == 1) then + ! southwest corner + do j=1, nghost + do i=1, nghost + ARRAY_G(i,j) = msg_buffer(i,j) + end do + end do + endif + endif + if (this_block%jblock == nblocks_y) then + ! north block + do j=1, nghost + do i=this_block%ilo,this_block%ihi + ARRAY_G (this_block%i_glob(i)+nghost, & + ny_global + nghost + j) = & + msg_buffer(i, this_block%jhi+j) + end do + end do + if (this_block%iblock == nblocks_x) then + ! northeast corner + do j=1, nghost + do i=1, nghost + ARRAY_G (nx-i+1, ny-j+1) = & + msg_buffer(this_block%ihi+nghost-i+1,& + this_block%jhi+nghost-j+1) + end do + end do + endif + endif + if (this_block%iblock == 1) then + ! west block + do j=this_block%jlo,this_block%jhi + do i=1, nghost + ARRAY_G (i, this_block%j_glob(j)+nghost) = & + msg_buffer(i, j) + end do + end do + if (this_block%jblock == nblocks_y) then + ! northwest corner + do j=1, nghost + do i=1, nghost + ARRAY_G (i, ny-j+1) = & + msg_buffer(i, this_block%jhi+nghost-j+1) + end do + end do + endif + endif + if (this_block%iblock == nblocks_x) then + ! east block + do j=this_block%jlo,this_block%jhi + do i=1, nghost + ARRAY_G (nx_global+nghost+i, & + this_block%j_glob(j)+nghost) = & + msg_buffer(this_block%ihi+i, j) + end do + end do + if (this_block%jblock == 1) then + ! southeast corner + do j=1, nghost + do i=1, nghost + ARRAY_G (nx-i+1, j) = & + msg_buffer(this_block%ihi+nghost-i+1, j) + end do + end do + endif + endif + endif + end do + + deallocate(msg_buffer) + +!----------------------------------------------------------------------- +! +! otherwise send data to dst_task +! +!----------------------------------------------------------------------- + + else + + allocate(snd_request(nblocks_tot), & + snd_status (MPI_STATUS_SIZE, nblocks_tot)) + + nsends = 0 + do n=1,nblocks_tot + if (src_dist%blockLocation(n) == my_task+1) then + + nsends = nsends + 1 + src_block = src_dist%blockLocalID(n) + call MPI_ISEND(ARRAY(1,1,src_block), nx_block*ny_block, & + mpiR8, dst_task, 3*mpitag_gs+n, & + MPI_COMM_ICE, snd_request(nsends), ierr) + endif + end do + + if (nsends > 0) & + call MPI_WAITALL(nsends, snd_request, snd_status, ierr) + deallocate(snd_request, snd_status) + + endif + +#ifdef gather_scatter_barrier + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!----------------------------------------------------------------------- + + end subroutine gather_global_ext + +!*********************************************************************** + + subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & + field_loc, field_type) + +! This subroutine scatters a global-sized array to a distributed array. +! +! This is the specific interface for double precision arrays +! corresponding to the generic interface scatter_global. + + include 'mpif.h' + + integer (int_kind), intent(in) :: & + src_task ! task from which array should be scattered + + type (distrb), intent(in) :: & + dst_dist ! distribution of resulting blocks + + real (dbl_kind), dimension(:,:), intent(in) :: & + ARRAY_G ! array containing global field on src_task + + integer (int_kind), intent(in) :: & + field_type, &! id for type of field (scalar, vector, angle) + field_loc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + real (dbl_kind), dimension(:,:,:), intent(inout) :: & + ARRAY ! array containing distributed field + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n, &! dummy loop indices + nrecvs, &! actual number of messages received + isrc, jsrc, &! source addresses + dst_block, &! location of block in dst array + xoffset, yoffset, &! offsets for tripole boundary conditions + yoffset2, &! + isign, &! sign factor for tripole boundary conditions + ierr ! MPI error flag + + type (block) :: & + this_block ! block info for current block + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + rcv_request ! request array for receives + + integer (int_kind), dimension(:,:), allocatable :: & + rcv_status ! status array for receives + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer ! buffer for sending blocks + +!----------------------------------------------------------------------- +! +! initialize return array to zero and set up tripole quantities +! +!----------------------------------------------------------------------- + + ARRAY = c0 + + this_block = get_block(1,1) ! for the tripoleTflag - all blocks have it + if (this_block%tripoleTFlag) then + select case (field_loc) + case (field_loc_center) ! cell center location + xoffset = 2 + yoffset = 0 + case (field_loc_NEcorner) ! cell corner (velocity) location + xoffset = 1 + yoffset = -1 + case (field_loc_Eface) ! cell face location + xoffset = 1 + yoffset = 0 + case (field_loc_Nface) ! cell face location + xoffset = 2 + yoffset = -1 + case (field_loc_noupdate) ! ghost cells never used - use cell center + xoffset = 1 + yoffset = 1 + end select + else + select case (field_loc) + case (field_loc_center) ! cell center location + xoffset = 1 + yoffset = 1 + case (field_loc_NEcorner) ! cell corner (velocity) location + xoffset = 0 + yoffset = 0 + case (field_loc_Eface) ! cell face location + xoffset = 0 + yoffset = 1 + case (field_loc_Nface) ! cell face location + xoffset = 1 + yoffset = 0 + case (field_loc_noupdate) ! ghost cells never used - use cell center + xoffset = 1 + yoffset = 1 + end select + endif + + select case (field_type) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case (field_type_noupdate) ! ghost cells never used - use cell center + isign = 1 + case default + call abort_ice('Unknown field type in scatter') + end select + +!----------------------------------------------------------------------- +! +! if this task is the src_task, copy blocks of global array into +! message buffer and send to other processors. also copy local blocks +! +!----------------------------------------------------------------------- + + if (my_task == src_task) then + + !*** send non-local blocks away + + allocate (msg_buffer(nx_block,ny_block)) + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) > 0 .and. & + dst_dist%blockLocation(n)-1 /= my_task) then + + msg_buffer = c0 + this_block = get_block(n,n) + + !*** if this is an interior block, then there is no + !*** padding or update checking required + + if (this_block%iblock > 1 .and. & + this_block%iblock < nblocks_x .and. & + this_block%jblock > 1 .and. & + this_block%jblock < nblocks_y) then + + do j=1,ny_block + do i=1,nx_block + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + end do + end do + + !*** if this is an edge block but not a northern edge + !*** we only need to check for closed boundaries and + !*** padding (global index = 0) + + else if (this_block%jblock /= nblocks_y) then + + do j=1,ny_block + if (this_block%j_glob(j) /= 0) then + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + endif + end do + + !*** if this is a northern edge block, we need to check + !*** for and properly deal with tripole boundaries + + else + + do j=1,ny_block + if (this_block%j_glob(j) > 0) then ! normal boundary + + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + + else if (this_block%j_glob(j) < 0) then ! tripole + + ! for yoffset=0 or 1, yoffset2=0,0 + ! for yoffset=-1, yoffset2=0,1, for u-rows on T-fold grid + do yoffset2=0,max(yoffset,0)-yoffset + jsrc = ny_global + yoffset + yoffset2 + & + (this_block%j_glob(j) + ny_global) + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + isrc = nx_global + xoffset - this_block%i_glob(i) + if (isrc < 1) isrc = isrc + nx_global + if (isrc > nx_global) isrc = isrc - nx_global + msg_buffer(i,j-yoffset2) = isign * ARRAY_G(isrc,jsrc) + endif + end do + end do + + endif + end do + + endif + + call MPI_SEND(msg_buffer, nx_block*ny_block, & + mpiR8, dst_dist%blockLocation(n)-1, 3*mpitag_gs+n, & + MPI_COMM_ICE, status, ierr) + + endif + end do + + deallocate(msg_buffer) + + !*** copy any local blocks + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + !*** if this is an interior block, then there is no + !*** padding or update checking required + + if (this_block%iblock > 1 .and. & + this_block%iblock < nblocks_x .and. & + this_block%jblock > 1 .and. & + this_block%jblock < nblocks_y) then + + do j=1,ny_block + do i=1,nx_block + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + end do + end do + + !*** if this is an edge block but not a northern edge + !*** we only need to check for closed boundaries and + !*** padding (global index = 0) + + else if (this_block%jblock /= nblocks_y) then + + do j=1,ny_block + if (this_block%j_glob(j) /= 0) then + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + endif + end do + + !*** if this is a northern edge block, we need to check + !*** for and properly deal with tripole boundaries + + else + + do j=1,ny_block + if (this_block%j_glob(j) > 0) then ! normal boundary + + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + + else if (this_block%j_glob(j) < 0) then ! tripole + + ! for yoffset=0 or 1, yoffset2=0,0 + ! for yoffset=-1, yoffset2=0,1, for u-rows on T-fold grid + do yoffset2=0,max(yoffset,0)-yoffset + jsrc = ny_global + yoffset + yoffset2 + & + (this_block%j_glob(j) + ny_global) + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + isrc = nx_global + xoffset - this_block%i_glob(i) + if (isrc < 1) isrc = isrc + nx_global + if (isrc > nx_global) isrc = isrc - nx_global + ARRAY(i,j-yoffset2,dst_block) & + = isign * ARRAY_G(isrc,jsrc) + endif + end do + end do + + endif + end do + + endif + endif + end do + +!----------------------------------------------------------------------- +! +! otherwise receive data from src_task +! +!----------------------------------------------------------------------- + + else + + allocate (rcv_request(nblocks_tot), & + rcv_status(MPI_STATUS_SIZE, nblocks_tot)) + + rcv_request = 0 + rcv_status = 0 + + nrecvs = 0 + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + nrecvs = nrecvs + 1 + dst_block = dst_dist%blockLocalID(n) + call MPI_IRECV(ARRAY(1,1,dst_block), nx_block*ny_block, & + mpiR8, src_task, 3*mpitag_gs+n, & + MPI_COMM_ICE, rcv_request(nrecvs), ierr) + endif + end do + + if (nrecvs > 0) & + call MPI_WAITALL(nrecvs, rcv_request, rcv_status, ierr) + + deallocate(rcv_request, rcv_status) + endif + + !----------------------------------------------------------------- + ! Ensure unused ghost cell values are 0 + !----------------------------------------------------------------- + + if (field_loc == field_loc_noupdate) then + do n=1,nblocks_tot + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + if (dst_block > 0) then + + ! north edge + do j = this_block%jhi+1,ny_block + do i = 1, nx_block + ARRAY (i,j,dst_block) = c0 + enddo + enddo + ! east edge + do j = 1, ny_block + do i = this_block%ihi+1,nx_block + ARRAY (i,j,dst_block) = c0 + enddo + enddo + ! south edge + do j = 1, this_block%jlo-1 + do i = 1, nx_block + ARRAY (i,j,dst_block) = c0 + enddo + enddo + ! west edge + do j = 1, ny_block + do i = 1, this_block%ilo-1 + ARRAY (i,j,dst_block) = c0 + enddo + enddo + + endif + enddo + endif + +#ifdef gather_scatter_barrier + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!----------------------------------------------------------------------- + + end subroutine scatter_global_dbl + +!*********************************************************************** + + subroutine scatter_global_real(ARRAY, ARRAY_G, src_task, dst_dist, & + field_loc, field_type) + +!----------------------------------------------------------------------- +! +! This subroutine scatters a global-sized array to a distributed array. +! +!----------------------------------------------------------------------- + + include 'mpif.h' + +!----------------------------------------------------------------------- +! +! input variables +! +!----------------------------------------------------------------------- + + integer (int_kind), intent(in) :: & + src_task ! task from which array should be scattered + + type (distrb), intent(in) :: & + dst_dist ! distribution of resulting blocks + + real (real_kind), dimension(:,:), intent(in) :: & + ARRAY_G ! array containing global field on src_task + + integer (int_kind), intent(in) :: & + field_type, &! id for type of field (scalar, vector, angle) + field_loc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + +!----------------------------------------------------------------------- +! +! output variables +! +!----------------------------------------------------------------------- + + real (real_kind), dimension(:,:,:), intent(inout) :: & + ARRAY ! array containing distributed field + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n, &! dummy loop indices + nrecvs, &! actual number of messages received + isrc, jsrc, &! source addresses + dst_block, &! location of block in dst array + xoffset, yoffset, &! offsets for tripole boundary conditions + yoffset2, &! + isign, &! sign factor for tripole boundary conditions + ierr ! MPI error flag + + type (block) :: & + this_block ! block info for current block + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + rcv_request ! request array for receives + + integer (int_kind), dimension(:,:), allocatable :: & + rcv_status ! status array for receives + + real (real_kind), dimension(:,:), allocatable :: & + msg_buffer ! buffer for sending blocks + +!----------------------------------------------------------------------- +! +! initialize return array to zero and set up tripole quantities +! +!----------------------------------------------------------------------- + + ARRAY = 0._real_kind + + this_block = get_block(1,1) ! for the tripoleTflag - all blocks have it + if (this_block%tripoleTFlag) then + select case (field_loc) + case (field_loc_center) ! cell center location + xoffset = 2 + yoffset = 0 + case (field_loc_NEcorner) ! cell corner (velocity) location + xoffset = 1 + yoffset = 1 + case (field_loc_Eface) ! cell face location + xoffset = 1 + yoffset = 0 + case (field_loc_Nface) ! cell face location + xoffset = 2 + yoffset = 1 + case (field_loc_noupdate) ! ghost cells never used - use cell center + xoffset = 1 + yoffset = 1 + end select + else + select case (field_loc) + case (field_loc_center) ! cell center location + xoffset = 1 + yoffset = 1 + case (field_loc_NEcorner) ! cell corner (velocity) location + xoffset = 0 + yoffset = 0 + case (field_loc_Eface) ! cell face location + xoffset = 0 + yoffset = 1 + case (field_loc_Nface) ! cell face location + xoffset = 1 + yoffset = 0 + case (field_loc_noupdate) ! ghost cells never used - use cell center + xoffset = 1 + yoffset = 1 + end select + endif + + select case (field_type) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case (field_type_noupdate) ! ghost cells never used - use cell center + isign = 1 + case default + call abort_ice('Unknown field type in scatter') + end select + +!----------------------------------------------------------------------- +! +! if this task is the src_task, copy blocks of global array into +! message buffer and send to other processors. also copy local blocks +! +!----------------------------------------------------------------------- + + if (my_task == src_task) then + + !*** send non-local blocks away + + allocate (msg_buffer(nx_block,ny_block)) + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) > 0 .and. & + dst_dist%blockLocation(n)-1 /= my_task) then + + msg_buffer = 0._real_kind + this_block = get_block(n,n) + + !*** if this is an interior block, then there is no + !*** padding or update checking required + + if (this_block%iblock > 1 .and. & + this_block%iblock < nblocks_x .and. & + this_block%jblock > 1 .and. & + this_block%jblock < nblocks_y) then + + do j=1,ny_block + do i=1,nx_block + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + end do + end do + + !*** if this is an edge block but not a northern edge + !*** we only need to check for closed boundaries and + !*** padding (global index = 0) + + else if (this_block%jblock /= nblocks_y) then + + do j=1,ny_block + if (this_block%j_glob(j) /= 0) then + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + endif + end do + + !*** if this is a northern edge block, we need to check + !*** for and properly deal with tripole boundaries + + else + + do j=1,ny_block + if (this_block%j_glob(j) > 0) then ! normal boundary + + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + + else if (this_block%j_glob(j) < 0) then ! tripole + + ! for yoffset=0 or 1, yoffset2=0,0 + ! for yoffset=-1, yoffset2=0,1, for u-rows on T-fold grid + do yoffset2=0,max(yoffset,0)-yoffset + jsrc = ny_global + yoffset + yoffset2 + & + (this_block%j_glob(j) + ny_global) + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + isrc = nx_global + xoffset - this_block%i_glob(i) + if (isrc < 1) isrc = isrc + nx_global + if (isrc > nx_global) isrc = isrc - nx_global + msg_buffer(i,j-yoffset2) = isign * ARRAY_G(isrc,jsrc) + endif + end do + end do + + endif + end do + + endif + + call MPI_SEND(msg_buffer, nx_block*ny_block, & + mpiR4, dst_dist%blockLocation(n)-1, 3*mpitag_gs+n, & + MPI_COMM_ICE, status, ierr) + + endif + end do + + deallocate(msg_buffer) + + !*** copy any local blocks + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + !*** if this is an interior block, then there is no + !*** padding or update checking required + + if (this_block%iblock > 1 .and. & + this_block%iblock < nblocks_x .and. & + this_block%jblock > 1 .and. & + this_block%jblock < nblocks_y) then + + do j=1,ny_block + do i=1,nx_block + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + end do + end do + + !*** if this is an edge block but not a northern edge + !*** we only need to check for closed boundaries and + !*** padding (global index = 0) + + else if (this_block%jblock /= nblocks_y) then + + do j=1,ny_block + if (this_block%j_glob(j) /= 0) then + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + endif + end do + + !*** if this is a northern edge block, we need to check + !*** for and properly deal with tripole boundaries + + else + + do j=1,ny_block + if (this_block%j_glob(j) > 0) then ! normal boundary + + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + + else if (this_block%j_glob(j) < 0) then ! tripole + + ! for yoffset=0 or 1, yoffset2=0,0 + ! for yoffset=-1, yoffset2=0,1, for u-rows on T-fold grid + do yoffset2=0,max(yoffset,0)-yoffset + jsrc = ny_global + yoffset + yoffset2 + & + (this_block%j_glob(j) + ny_global) + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + isrc = nx_global + xoffset - this_block%i_glob(i) + if (isrc < 1) isrc = isrc + nx_global + if (isrc > nx_global) isrc = isrc - nx_global + ARRAY(i,j-yoffset2,dst_block) & + = isign * ARRAY_G(isrc,jsrc) + endif + end do + end do + + endif + end do + + endif + endif + end do + +!----------------------------------------------------------------------- +! +! otherwise receive data from src_task +! +!----------------------------------------------------------------------- + + else + + allocate (rcv_request(nblocks_tot), & + rcv_status(MPI_STATUS_SIZE, nblocks_tot)) + + rcv_request = 0 + rcv_status = 0 + + nrecvs = 0 + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + nrecvs = nrecvs + 1 + dst_block = dst_dist%blockLocalID(n) + call MPI_IRECV(ARRAY(1,1,dst_block), nx_block*ny_block, & + mpiR4, src_task, 3*mpitag_gs+n, & + MPI_COMM_ICE, rcv_request(nrecvs), ierr) + endif + end do + + if (nrecvs > 0) & + call MPI_WAITALL(nrecvs, rcv_request, rcv_status, ierr) + + deallocate(rcv_request, rcv_status) + endif + + !----------------------------------------------------------------- + ! Ensure unused ghost cell values are 0 + !----------------------------------------------------------------- + + if (field_loc == field_loc_noupdate) then + do n=1,nblocks_tot + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + if (dst_block > 0) then + + ! north edge + do j = this_block%jhi+1,ny_block + do i = 1, nx_block + ARRAY (i,j,dst_block) = 0._real_kind + enddo + enddo + ! east edge + do j = 1, ny_block + do i = this_block%ihi+1,nx_block + ARRAY (i,j,dst_block) = 0._real_kind + enddo + enddo + ! south edge + do j = 1, this_block%jlo-1 + do i = 1, nx_block + ARRAY (i,j,dst_block) = 0._real_kind + enddo + enddo + ! west edge + do j = 1, ny_block + do i = 1, this_block%ilo-1 + ARRAY (i,j,dst_block) = 0._real_kind + enddo + enddo + + endif + enddo + endif + +#ifdef gather_scatter_barrier + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!----------------------------------------------------------------------- + + end subroutine scatter_global_real + +!*********************************************************************** + + subroutine scatter_global_int(ARRAY, ARRAY_G, src_task, dst_dist, & + field_loc, field_type) + +!----------------------------------------------------------------------- +! +! This subroutine scatters a global-sized array to a distributed array. +! +!----------------------------------------------------------------------- + + include 'mpif.h' + +!----------------------------------------------------------------------- +! +! input variables +! +!----------------------------------------------------------------------- + + integer (int_kind), intent(in) :: & + src_task ! task from which array should be scattered + + integer (int_kind), intent(in) :: & + field_type, &! id for type of field (scalar, vector, angle) + field_loc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + type (distrb), intent(in) :: & + dst_dist ! distribution of resulting blocks + + integer (int_kind), dimension(:,:), intent(in) :: & + ARRAY_G ! array containing global field on src_task + +!----------------------------------------------------------------------- +! +! output variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:,:), intent(inout) :: & + ARRAY ! array containing distributed field + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n, &! dummy loop indices + nrecvs, &! actual number of messages received + isrc, jsrc, &! source addresses + dst_block, &! location of block in dst array + xoffset, yoffset, &! offsets for tripole boundary conditions + yoffset2, &! + isign, &! sign factor for tripole boundary conditions + ierr ! MPI error flag + + type (block) :: & + this_block ! block info for current block + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + rcv_request ! request array for receives + + integer (int_kind), dimension(:,:), allocatable :: & + rcv_status ! status array for receives + + integer (int_kind), dimension(:,:), allocatable :: & + msg_buffer ! buffer for sending blocks + +!----------------------------------------------------------------------- +! +! initialize return array to zero and set up tripole quantities +! +!----------------------------------------------------------------------- + + ARRAY = 0 + + this_block = get_block(1,1) ! for the tripoleTflag - all blocks have it + if (this_block%tripoleTFlag) then + select case (field_loc) + case (field_loc_center) ! cell center location + xoffset = 2 + yoffset = 0 + case (field_loc_NEcorner) ! cell corner (velocity) location + xoffset = 1 + yoffset = 1 + case (field_loc_Eface) ! cell face location + xoffset = 1 + yoffset = 0 + case (field_loc_Nface) ! cell face location + xoffset = 2 + yoffset = 1 + case (field_loc_noupdate) ! ghost cells never used - use cell center + xoffset = 1 + yoffset = 1 + end select + else + select case (field_loc) + case (field_loc_center) ! cell center location + xoffset = 1 + yoffset = 1 + case (field_loc_NEcorner) ! cell corner (velocity) location + xoffset = 0 + yoffset = 0 + case (field_loc_Eface) ! cell face location + xoffset = 0 + yoffset = 1 + case (field_loc_Nface) ! cell face location + xoffset = 1 + yoffset = 0 + case (field_loc_noupdate) ! ghost cells never used - use cell center + xoffset = 1 + yoffset = 1 + end select + endif + + select case (field_type) + case (field_type_scalar) + isign = 1 + case (field_type_vector) + isign = -1 + case (field_type_angle) + isign = -1 + case (field_type_noupdate) ! ghost cells never used - use cell center + isign = 1 + case default + call abort_ice('Unknown field type in scatter') + end select + +!----------------------------------------------------------------------- +! +! if this task is the src_task, copy blocks of global array into +! message buffer and send to other processors. also copy local blocks +! +!----------------------------------------------------------------------- + + if (my_task == src_task) then + + !*** send non-local blocks away + + allocate (msg_buffer(nx_block,ny_block)) + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) > 0 .and. & + dst_dist%blockLocation(n)-1 /= my_task) then + + msg_buffer = 0 + this_block = get_block(n,n) + + !*** if this is an interior block, then there is no + !*** padding or update checking required + + if (this_block%iblock > 1 .and. & + this_block%iblock < nblocks_x .and. & + this_block%jblock > 1 .and. & + this_block%jblock < nblocks_y) then + + do j=1,ny_block + do i=1,nx_block + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + end do + end do + + !*** if this is an edge block but not a northern edge + !*** we only need to check for closed boundaries and + !*** padding (global index = 0) + + else if (this_block%jblock /= nblocks_y) then + + do j=1,ny_block + if (this_block%j_glob(j) /= 0) then + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + endif + end do + + !*** if this is a northern edge block, we need to check + !*** for and properly deal with tripole boundaries + + else + + do j=1,ny_block + if (this_block%j_glob(j) > 0) then ! normal boundary + + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + + else if (this_block%j_glob(j) < 0) then ! tripole + + ! for yoffset=0 or 1, yoffset2=0,0 + ! for yoffset=-1, yoffset2=0,1, for u-rows on T-fold grid + do yoffset2=0,max(yoffset,0)-yoffset + jsrc = ny_global + yoffset + yoffset2 + & + (this_block%j_glob(j) + ny_global) + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + isrc = nx_global + xoffset - this_block%i_glob(i) + if (isrc < 1) isrc = isrc + nx_global + if (isrc > nx_global) isrc = isrc - nx_global + msg_buffer(i,j-yoffset2) = isign * ARRAY_G(isrc,jsrc) + endif + end do + end do + + endif + end do + + endif + + call MPI_SEND(msg_buffer, nx_block*ny_block, & + mpi_integer, dst_dist%blockLocation(n)-1, 3*mpitag_gs+n, & + MPI_COMM_ICE, status, ierr) + + endif + end do + + deallocate(msg_buffer) + + !*** copy any local blocks + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + !*** if this is an interior block, then there is no + !*** padding or update checking required + + if (this_block%iblock > 1 .and. & + this_block%iblock < nblocks_x .and. & + this_block%jblock > 1 .and. & + this_block%jblock < nblocks_y) then + + do j=1,ny_block + do i=1,nx_block + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + end do + end do + + !*** if this is an edge block but not a northern edge + !*** we only need to check for closed boundaries and + !*** padding (global index = 0) + + else if (this_block%jblock /= nblocks_y) then + + do j=1,ny_block + if (this_block%j_glob(j) /= 0) then + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + endif + end do + + !*** if this is a northern edge block, we need to check + !*** for and properly deal with tripole boundaries + + else + + do j=1,ny_block + if (this_block%j_glob(j) > 0) then ! normal boundary + + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + + else if (this_block%j_glob(j) < 0) then ! tripole + + ! for yoffset=0 or 1, yoffset2=0,0 + ! for yoffset=-1, yoffset2=0,1, for u-rows on T-fold grid + do yoffset2=0,max(yoffset,0)-yoffset + jsrc = ny_global + yoffset + yoffset2 + & + (this_block%j_glob(j) + ny_global) + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + isrc = nx_global + xoffset - this_block%i_glob(i) + if (isrc < 1) isrc = isrc + nx_global + if (isrc > nx_global) isrc = isrc - nx_global + ARRAY(i,j-yoffset2,dst_block) & + = isign * ARRAY_G(isrc,jsrc) + endif + end do + end do + + endif + end do + + endif + endif + end do + +!----------------------------------------------------------------------- +! +! otherwise receive data from src_task +! +!----------------------------------------------------------------------- + + else + + allocate (rcv_request(nblocks_tot), & + rcv_status(MPI_STATUS_SIZE, nblocks_tot)) + + rcv_request = 0 + rcv_status = 0 + + nrecvs = 0 + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + nrecvs = nrecvs + 1 + dst_block = dst_dist%blockLocalID(n) + call MPI_IRECV(ARRAY(1,1,dst_block), nx_block*ny_block, & + mpi_integer, src_task, 3*mpitag_gs+n, & + MPI_COMM_ICE, rcv_request(nrecvs), ierr) + endif + end do + + if (nrecvs > 0) & + call MPI_WAITALL(nrecvs, rcv_request, rcv_status, ierr) + + deallocate(rcv_request, rcv_status) + endif + + !----------------------------------------------------------------- + ! Ensure unused ghost cell values are 0 + !----------------------------------------------------------------- + + if (field_loc == field_loc_noupdate) then + do n=1,nblocks_tot + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + if (dst_block > 0) then + + ! north edge + do j = this_block%jhi+1,ny_block + do i = 1, nx_block + ARRAY (i,j,dst_block) = 0 + enddo + enddo + ! east edge + do j = 1, ny_block + do i = this_block%ihi+1,nx_block + ARRAY (i,j,dst_block) = 0 + enddo + enddo + ! south edge + do j = 1, this_block%jlo-1 + do i = 1, nx_block + ARRAY (i,j,dst_block) = 0 + enddo + enddo + ! west edge + do j = 1, ny_block + do i = 1, this_block%ilo-1 + ARRAY (i,j,dst_block) = 0 + enddo + enddo + + endif + enddo + endif + +#ifdef gather_scatter_barrier + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!----------------------------------------------------------------------- + + end subroutine scatter_global_int + +!*********************************************************************** + + subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) + +! This subroutine scatters a global-sized array to a distributed array. +! +! This is the specific interface for double precision arrays +! corresponding to the generic interface scatter_global. + + include 'mpif.h' + + integer (int_kind), intent(in) :: & + src_task ! task from which array should be scattered + + type (distrb), intent(in) :: & + dst_dist ! distribution of resulting blocks + + real (dbl_kind), dimension(:,:), intent(in) :: & + ARRAY_G ! array containing global field on src_task + + real (dbl_kind), dimension(:,:,:), intent(inout) :: & + ARRAY ! array containing distributed field + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n, &! dummy loop indices + iblk, jblk, &! block indices + iglb, jglb, &! global indices + nrecvs, &! actual number of messages received + isrc, jsrc, &! source addresses + dst_block, &! location of block in dst array + ierr ! MPI error flag + + type (block) :: & + this_block ! block info for current block + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + rcv_request ! request array for receives + + integer (int_kind), dimension(:,:), allocatable :: & + rcv_status ! status array for receives + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer ! buffer for sending blocks + +!----------------------------------------------------------------------- +! +! initialize return array to zero +! +!----------------------------------------------------------------------- + + ARRAY = c0 + +!----------------------------------------------------------------------- +! +! if this task is the src_task, copy blocks of global array into +! message buffer and send to other processors. also copy local blocks +! +!----------------------------------------------------------------------- + + if (my_task == src_task) then + + !*** send non-local blocks away + + allocate (msg_buffer(nx_block,ny_block)) + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) > 0 .and. & + dst_dist%blockLocation(n)-1 /= my_task) then + + msg_buffer = c0 + this_block = get_block(n,n) + + ! interior + do j = 1, ny_block + do i = 1, nx_block + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i)+nghost,& + this_block%j_glob(j)+nghost) + end do + end do + + if (this_block%jblock == 1) then + ! south edge + do j = 1, nghost + do i = this_block%ilo,this_block%ihi + msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i)+nghost,j) + enddo + do i = 1, nghost + ! southwest corner + iblk = i + jblk = j + iglb = this_block%i_glob(this_block%ilo)+i-1 + jglb = j + msg_buffer(iblk,jblk) = ARRAY_G(iglb,jglb) + ! southeast corner + iblk = this_block%ihi+i + iglb = this_block%i_glob(this_block%ihi)+nghost+i + msg_buffer(iblk,jblk) = ARRAY_G(iglb,jglb) + enddo + enddo + endif + if (this_block%jblock == nblocks_y) then + ! north edge + do j = 1, nghost + do i = this_block%ilo,this_block%ihi + msg_buffer(i,this_block%jhi+j) = ARRAY_G(this_block%i_glob(i)+nghost,& + ny_global+nghost+j) + enddo + do i = 1, nghost + ! northwest corner + iblk = i + jblk = this_block%jhi+j + iglb = this_block%i_glob(this_block%ilo)+i-1 + jglb = ny_global+nghost+j + msg_buffer(iblk,jblk) = ARRAY_G(iglb,jglb) + ! northeast corner + iblk = this_block%ihi+i + iglb = this_block%i_glob(this_block%ihi)+nghost+i + msg_buffer(iblk,jblk) = ARRAY_G(iglb,jglb) + enddo + enddo + endif + if (this_block%iblock == 1) then + ! west edge + do j = this_block%jlo,this_block%jhi + do i = 1, nghost + msg_buffer(i,j) = ARRAY_G(i,this_block%j_glob(j)+nghost) + enddo + enddo + do j = 1, nghost + do i = 1, nghost + ! northwest corner + iblk = i + jblk = this_block%jhi+j + iglb = i + jglb = this_block%j_glob(this_block%jhi)+nghost+j + msg_buffer(iblk,jblk) = ARRAY_G(iglb,jglb) + ! southwest corner + jblk = j + jglb = this_block%j_glob(this_block%jlo)+j-1 + msg_buffer(iblk,jblk) = ARRAY_G(iglb,jglb) + enddo + enddo + endif + if (this_block%iblock == nblocks_x) then + ! east edge + do j = this_block%jlo,this_block%jhi + do i = 1, nghost + msg_buffer(this_block%ihi+i,j) = ARRAY_G(nx_global+nghost+i, & + this_block%j_glob(j)+nghost) + enddo + enddo + do j = 1, nghost + do i = 1, nghost + ! northeast corner + iblk = this_block%ihi+i + jblk = this_block%jhi+j + iglb = nx_global+nghost+i + jglb = this_block%j_glob(this_block%jhi)+nghost+j + msg_buffer(iblk,jblk) = ARRAY_G(iglb,jglb) + ! southeast corner + jblk = j + jglb = this_block%j_glob(this_block%jlo)+j-1 + msg_buffer(iblk,jblk) = ARRAY_G(iglb,jglb) + enddo + enddo + endif + + call MPI_SEND(msg_buffer, nx_block*ny_block, & + mpiR8, dst_dist%blockLocation(n)-1, 3*mpitag_gs+n, & + MPI_COMM_ICE, status, ierr) + + endif + end do + + deallocate(msg_buffer) + + !*** copy any local blocks + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + ! interior + do j = 1, ny_block + do i = 1, nx_block + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i)+nghost,& + this_block%j_glob(j)+nghost) + end do + end do + + if (this_block%jblock == 1) then + ! south edge + do j = 1, nghost + do i = this_block%ilo,this_block%ihi + ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i)+nghost,j) + enddo + do i = 1, nghost + ! southwest corner + iblk = i + jblk = j + iglb = this_block%i_glob(this_block%ilo)+i-1 + jglb = j + ARRAY(iblk,jblk,dst_block) = ARRAY_G(iglb,jglb) + ! southeast corner + iblk = this_block%ihi+i + iglb = this_block%i_glob(this_block%ihi)+nghost+i + ARRAY(iblk,jblk,dst_block) = ARRAY_G(iglb,jglb) + enddo + enddo + endif + if (this_block%jblock == nblocks_y) then + ! north edge + do j = 1, nghost + do i = this_block%ilo,this_block%ihi + ARRAY(i,this_block%jhi+j,dst_block) = ARRAY_G(this_block%i_glob(i)+nghost,& + ny_global+nghost+j) + enddo + do i = 1, nghost + ! northwest corner + iblk = i + jblk = this_block%jhi+j + iglb = this_block%i_glob(this_block%ilo)+i-1 + jglb = ny_global+nghost+j + ARRAY(iblk,jblk,dst_block) = ARRAY_G(iglb,jglb) + ! northeast corner + iblk = this_block%ihi+i + iglb = this_block%i_glob(this_block%ihi)+nghost+i + ARRAY(iblk,jblk,dst_block) = ARRAY_G(iglb,jglb) + enddo + enddo + endif + if (this_block%iblock == 1) then + ! west edge + do j = this_block%jlo,this_block%jhi + do i = 1, nghost + ARRAY(i,j,dst_block) = ARRAY_G(i,this_block%j_glob(j)+nghost) + enddo + enddo + do j = 1, nghost + do i = 1, nghost + ! northwest corner + iblk = i + jblk = this_block%jhi+j + iglb = i + jglb = this_block%j_glob(this_block%jhi)+nghost+j + ARRAY(iblk,jblk,dst_block) = ARRAY_G(iglb,jglb) + ! southwest corner + jblk = j + jglb = this_block%j_glob(this_block%jlo)+j-1 + ARRAY(iblk,jblk,dst_block) = ARRAY_G(iglb,jglb) + enddo + enddo + endif + if (this_block%iblock == nblocks_x) then + ! east edge + do j = this_block%jlo,this_block%jhi + do i = 1, nghost + ARRAY(this_block%ihi+i,j,dst_block) = ARRAY_G(nx_global+nghost+i, & + this_block%j_glob(j)+nghost) + enddo + enddo + do j = 1, nghost + do i = 1, nghost + ! northeast corner + iblk = this_block%ihi+i + jblk = this_block%jhi+j + iglb = nx_global+nghost+i + jglb = this_block%j_glob(this_block%jhi)+nghost+j + ARRAY(iblk,jblk,dst_block) = ARRAY_G(iglb,jglb) + ! southeast corner + jblk = j + jglb = this_block%j_glob(this_block%jlo)+j-1 + ARRAY(iblk,jblk,dst_block) = ARRAY_G(iglb,jglb) + enddo + enddo + endif + + endif + end do + +!----------------------------------------------------------------------- +! +! otherwise receive data from src_task +! +!----------------------------------------------------------------------- + + else + + allocate (rcv_request(nblocks_tot), & + rcv_status(MPI_STATUS_SIZE, nblocks_tot)) + + rcv_request = 0 + rcv_status = 0 + + nrecvs = 0 + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + nrecvs = nrecvs + 1 + dst_block = dst_dist%blockLocalID(n) + call MPI_IRECV(ARRAY(1,1,dst_block), nx_block*ny_block, & + mpiR8, src_task, 3*mpitag_gs+n, & + MPI_COMM_ICE, rcv_request(nrecvs), ierr) + endif + end do + + if (nrecvs > 0) & + call MPI_WAITALL(nrecvs, rcv_request, rcv_status, ierr) + + deallocate(rcv_request, rcv_status) + endif + +#ifdef gather_scatter_barrier + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!----------------------------------------------------------------------- + + end subroutine scatter_global_ext + +!*********************************************************************** + + subroutine scatter_global_stress(ARRAY, ARRAY_G1, ARRAY_G2, & + src_task, dst_dist) + +! This subroutine scatters global stresses to a distributed array. +! +! Ghost cells in the stress tensor must be handled separately on tripole +! grids, because matching the corner values requires 2 different arrays. + + include 'mpif.h' + + integer (int_kind), intent(in) :: & + src_task ! task from which array should be scattered + + type (distrb), intent(in) :: & + dst_dist ! distribution of resulting blocks + + real (dbl_kind), dimension(:,:), intent(in) :: & + ARRAY_G1, &! array containing global field on src_task + ARRAY_G2 ! array containing global field on src_task + + real (dbl_kind), dimension(:,:,:), intent(inout) :: & + ARRAY ! array containing distributed field + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n, &! dummy loop indices + nrecvs, &! actual number of messages received + isrc, jsrc, &! source addresses + dst_block, &! location of block in dst array + xoffset, yoffset, &! offsets for tripole boundary conditions + yoffset2, &! + isign, &! sign factor for tripole boundary conditions + ierr ! MPI error flag + + type (block) :: & + this_block ! block info for current block + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + rcv_request ! request array for receives + + integer (int_kind), dimension(:,:), allocatable :: & + rcv_status ! status array for receives + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer ! buffer for sending blocks + +!----------------------------------------------------------------------- +! +! initialize return array to zero and set up tripole quantities +! +!----------------------------------------------------------------------- + + ARRAY = c0 + + this_block = get_block(1,1) ! for the tripoleTflag - all blocks have it + if (this_block%tripoleTFlag) then + xoffset = 2 ! treat stresses as cell-centered scalars (they are not + yoffset = 0 ! shared with neighboring grid cells) + else + xoffset = 1 ! treat stresses as cell-centered scalars (they are not + yoffset = 1 ! shared with neighboring grid cells) + endif + isign = 1 + +!----------------------------------------------------------------------- +! +! if this task is the src_task, copy blocks of global array into +! message buffer and send to other processors. also copy local blocks +! +!----------------------------------------------------------------------- + + if (my_task == src_task) then + + !*** send non-local blocks away + + allocate (msg_buffer(nx_block,ny_block)) + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) > 0 .and. & + dst_dist%blockLocation(n)-1 /= my_task) then + + msg_buffer = c0 + this_block = get_block(n,n) + + !*** if this is an interior block, then there is no + !*** padding or update checking required + + if (this_block%iblock > 1 .and. & + this_block%iblock < nblocks_x .and. & + this_block%jblock > 1 .and. & + this_block%jblock < nblocks_y) then + + do j=1,ny_block + do i=1,nx_block + msg_buffer(i,j) = ARRAY_G1(this_block%i_glob(i),& + this_block%j_glob(j)) + end do + end do + + !*** if this is an edge block but not a northern edge + !*** we only need to check for closed boundaries and + !*** padding (global index = 0) + + else if (this_block%jblock /= nblocks_y) then + + do j=1,ny_block + if (this_block%j_glob(j) /= 0) then + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + msg_buffer(i,j) = ARRAY_G1(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + endif + end do + + !*** if this is a northern edge block, we need to check + !*** for and properly deal with tripole boundaries + + else + + do j=1,ny_block + if (this_block%j_glob(j) > 0) then ! normal boundary + + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + msg_buffer(i,j) = ARRAY_G1(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + + else if (this_block%j_glob(j) < 0) then ! tripole + + jsrc = ny_global + yoffset + & + (this_block%j_glob(j) + ny_global) + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + isrc = nx_global + xoffset - this_block%i_glob(i) + if (isrc < 1) isrc = isrc + nx_global + if (isrc > nx_global) isrc = isrc - nx_global + msg_buffer(i,j) = isign * ARRAY_G2(isrc,jsrc) + endif + end do + + endif + end do + + endif + + call MPI_SEND(msg_buffer, nx_block*ny_block, & + mpiR8, dst_dist%blockLocation(n)-1, 3*mpitag_gs+n, & + MPI_COMM_ICE, status, ierr) + + endif + end do + + deallocate(msg_buffer) + + !*** copy any local blocks + + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + dst_block = dst_dist%blockLocalID(n) + this_block = get_block(n,n) + + !*** if this is an interior block, then there is no + !*** padding or update checking required + + if (this_block%iblock > 1 .and. & + this_block%iblock < nblocks_x .and. & + this_block%jblock > 1 .and. & + this_block%jblock < nblocks_y) then + + do j=1,ny_block + do i=1,nx_block + ARRAY(i,j,dst_block) = ARRAY_G1(this_block%i_glob(i),& + this_block%j_glob(j)) + end do + end do + + !*** if this is an edge block but not a northern edge + !*** we only need to check for closed boundaries and + !*** padding (global index = 0) + + else if (this_block%jblock /= nblocks_y) then + + do j=1,ny_block + if (this_block%j_glob(j) /= 0) then + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G1(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + endif + end do + + !*** if this is a northern edge block, we need to check + !*** for and properly deal with tripole boundaries + + else + + do j=1,ny_block + if (this_block%j_glob(j) > 0) then ! normal boundary + + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + ARRAY(i,j,dst_block) = ARRAY_G1(this_block%i_glob(i),& + this_block%j_glob(j)) + endif + end do + + else if (this_block%j_glob(j) < 0) then ! tripole + + ! for yoffset=0 or 1, yoffset2=0,0 + ! for yoffset=-1, yoffset2=0,1, for u-rows on T-fold grid + do yoffset2=0,max(yoffset,0)-yoffset + jsrc = ny_global + yoffset + yoffset2 + & + (this_block%j_glob(j) + ny_global) + do i=1,nx_block + if (this_block%i_glob(i) /= 0) then + isrc = nx_global + xoffset - this_block%i_glob(i) + if (isrc < 1) isrc = isrc + nx_global + if (isrc > nx_global) isrc = isrc - nx_global + ARRAY(i,j-yoffset2,dst_block) & + = isign * ARRAY_G2(isrc,jsrc) + endif + end do + end do + + endif + end do + + endif + endif + end do + +!----------------------------------------------------------------------- +! +! otherwise receive data from src_task +! +!----------------------------------------------------------------------- + + else + + allocate (rcv_request(nblocks_tot), & + rcv_status(MPI_STATUS_SIZE, nblocks_tot)) + + rcv_request = 0 + rcv_status = 0 + + nrecvs = 0 + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1) then + nrecvs = nrecvs + 1 + dst_block = dst_dist%blockLocalID(n) + call MPI_IRECV(ARRAY(1,1,dst_block), nx_block*ny_block, & + mpiR8, src_task, 3*mpitag_gs+n, & + MPI_COMM_ICE, rcv_request(nrecvs), ierr) + endif + end do + + if (nrecvs > 0) & + call MPI_WAITALL(nrecvs, rcv_request, rcv_status, ierr) + + deallocate(rcv_request, rcv_status) + endif + +#ifdef gather_scatter_barrier + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!----------------------------------------------------------------------- + + end subroutine scatter_global_stress + +!*********************************************************************** + + end module ice_gather_scatter + +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/mpi/ice_global_reductions.F90 b/mpi/ice_global_reductions.F90 new file mode 100644 index 00000000..8dcdd68a --- /dev/null +++ b/mpi/ice_global_reductions.F90 @@ -0,0 +1,2374 @@ +! SVN:$Id: ice_global_reductions.F90 843 2014-10-02 19:54:30Z eclare $ +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| + + module ice_global_reductions + +! This module contains all the routines for performing global +! reductions like global sums, minvals, maxvals, etc. +! +! author: Phil Jones, LANL +! Oct. 2004: Adapted from POP version by William H. Lipscomb, LANL +! Feb. 2008: Updated from POP version by Elizabeth C. Hunke, LANL +! Aug. 2014: Added bit-for-bit reproducible options for global_sum_dbl +! and global_sum_prod_dbl by T Craig NCAR + + use ice_kinds_mod + use ice_blocks, only: block, get_block, nblocks_tot, nx_block, ny_block + use ice_communicate, only: my_task, mpiR8, mpiR4, master_task + use ice_constants, only: field_loc_Nface, field_loc_NEcorner + use ice_fileunits, only: bfbflag + use ice_distribution, only: distrb, ice_distributionGet, & + ice_distributionGetBlockID + use ice_domain_size, only: nx_global, ny_global, max_blocks + use ice_gather_scatter, only: gather_global + + implicit none + private + save + + include 'mpif.h' + + public :: global_sum, & + global_sum_prod, & + global_maxval, & + global_minval + +!----------------------------------------------------------------------- +! +! generic interfaces for module procedures +! +!----------------------------------------------------------------------- + + interface global_sum + module procedure global_sum_dbl, & + global_sum_real, & + global_sum_int, & + global_sum_scalar_dbl, & + global_sum_scalar_real, & + global_sum_scalar_int + end interface + + interface global_sum_prod + module procedure global_sum_prod_dbl, & + global_sum_prod_real, & + global_sum_prod_int + end interface + + interface global_maxval + module procedure global_maxval_dbl, & + global_maxval_real, & + global_maxval_int, & + global_maxval_scalar_dbl, & + global_maxval_scalar_real, & + global_maxval_scalar_int + end interface + + interface global_minval + module procedure global_minval_dbl, & + global_minval_real, & + global_minval_int, & + global_minval_scalar_dbl, & + global_minval_scalar_real, & + global_minval_scalar_int + end interface + +!*********************************************************************** + + contains + +!*********************************************************************** + + function global_sum_dbl(array, dist, field_loc, mMask, lMask) & + result(globalSum) + +! Computes the global sum of the physical domain of a 2-d array. +! +! This is actually the specific interface for the generic global_sum +! function corresponding to double precision arrays. The generic +! interface is identical but will handle real and integer 2-d slabs +! and real, integer, and double precision scalars. + + real (dbl_kind), dimension(:,:,:), intent(in) :: & + array ! array to be summed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + integer (int_kind), intent(in) :: & + field_loc ! location of field on staggered grid + + real (dbl_kind), dimension(:,:,:), intent(in), optional :: & + mMask ! optional multiplicative mask + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + real (dbl_kind) :: & + globalSum ! resulting global sum + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + real (dbl_kind), dimension(:), allocatable :: & + blockSum, &! sum of local block domain + globalSumTmp ! higher precision global sum + + integer (int_kind) :: & + i,j,iblock,n, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + blockID, &! block location + numProcs, &! number of processor participating + numBlocks, &! number of local blocks + communicator, &! communicator for this distribution + nreduce, &! mpi count + maxiglob ! maximum non-redundant value of i_global + + logical (log_kind) :: & + Nrow ! this field is on a N row (a velocity row) + + real (dbl_kind), dimension(:,:), allocatable :: & + workg ! temporary global array + real (dbl_kind), dimension(:,:,:), allocatable :: & + work ! temporary local array + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + + if (bfbflag) then + allocate(work(nx_block,ny_block,max_blocks)) + work = 0.0_dbl_kind + if (my_task == master_task) then + allocate(workg(nx_global,ny_global)) + else + allocate(workg(1,1)) + endif + workg = 0.0_dbl_kind + else +#ifdef REPRODUCIBLE + nreduce = nblocks_tot +#else + nreduce = 1 +#endif + allocate(blockSum(nreduce), & + globalSumTmp(nreduce)) + blockSum = 0.0_dbl_kind + globalSumTmp = 0.0_dbl_kind + globalSum = 0.0_dbl_kind + endif + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + +#ifdef REPRODUCIBLE + n = blockID +#else + n = 1 +#endif + + if (present(mMask)) then + do j=jb,je + do i=ib,ie + if (bfbflag) then + work(i,j,iblock) = array(i,j,iblock)*mMask(i,j,iblock) + else + blockSum(n) = & + blockSum(n) + array(i,j,iblock)*mMask(i,j,iblock) + endif + end do + end do + else if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + if (bfbflag) then + work(i,j,iblock) = array(i,j,iblock) + else + blockSum(n) = & + blockSum(n) + array(i,j,iblock) + endif + endif + end do + end do + else + do j=jb,je + do i=ib,ie + if (bfbflag) then + work(i,j,iblock) = array(i,j,iblock) + else + blockSum(n) = blockSum(n) + array(i,j,iblock) + endif + end do + end do + endif + + !*** if this row along or beyond tripole boundary + !*** must eliminate redundant points from global sum + + if (.not.bfbflag) then + if (this_block%tripole) then + Nrow=(field_loc == field_loc_Nface .or. & + field_loc == field_loc_NEcorner) + if (Nrow .and. this_block%tripoleTFlag) then + maxiglob = 0 ! entire u-row on T-fold grid + elseif (Nrow .or. this_block%tripoleTFlag) then + maxiglob = nx_global/2 ! half T-row on T-fold and u-row on u-fold + else + maxiglob = -1 ! nothing to do for T-row on u-fold + endif + + if (maxiglob > 0) then + + j = je + + if (present(mMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum(n) = & + blockSum(n) - array(i,j,iblock)*mMask(i,j,iblock) + endif + end do + else if (present(lMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + if (lMask(i,j,iblock)) & + blockSum(n) = blockSum(n) - array(i,j,iblock) + endif + end do + else + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum(n) = blockSum(n) - array(i,j,iblock) + endif + end do + endif + + endif ! maxiglob + endif ! tripole + endif ! bfbflag + end do + + if (bfbflag) then + call gather_global(workg, work, master_task, dist, spc_val=0.0_dbl_kind) + globalSum = 0.0_dbl_kind + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + globalSum = globalSum + workg(i,j) + enddo + enddo + endif + call MPI_BCAST(globalSum,1,mpiR8,master_task,communicator,ierr) + deallocate(workg,work) + else + if (my_task < numProcs) then + call MPI_ALLREDUCE(blockSum, globalSumTmp, nreduce, & + mpiR8, MPI_SUM, communicator, ierr) + endif + + do n=1,nreduce + globalSum = globalSum + globalSumTmp(n) + enddo + deallocate(blockSum, globalSumTmp) + endif + +!----------------------------------------------------------------------- + + end function global_sum_dbl + +!*********************************************************************** + + function global_sum_real(array, dist, field_loc, mMask, lMask) & + result(globalSum) + +! Computes the global sum of the physical domain of a 2-d array. +! +! This is actually the specific interface for the generic global_sum +! function corresponding to real arrays. The generic +! interface is identical but will handle real and integer 2-d slabs +! and real, integer, and double precision scalars. + + real (real_kind), dimension(:,:,:), intent(in) :: & + array ! array to be summed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + integer (int_kind), intent(in) :: & + field_loc ! location of field on staggered grid + + real (real_kind), dimension(:,:,:), intent(in), optional :: & + mMask ! optional multiplicative mask + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + real (real_kind) :: & + globalSum ! resulting global sum + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + +#ifdef REPRODUCIBLE + real (dbl_kind) :: & + blockSum, &! sum of local block domain + localSum, &! sum of all local block domains + globalSumTmp ! higher precision global sum +#else + real (real_kind) :: & + blockSum, &! sum of local block domain + localSum ! sum of all local block domains +#endif + + integer (int_kind) :: & + i,j,iblock, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + blockID, &! block location + numProcs, &! number of processor participating + numBlocks, &! number of local blocks + communicator, &! communicator for this distribution + maxiglob ! maximum non-redundant value of i_global + + logical (log_kind) :: & + Nrow ! this field is on a N row (a velocity row) + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + +#ifdef REPRODUCIBLE + localSum = 0.0_dbl_kind +#else + localSum = 0.0_real_kind +#endif + globalSum = 0.0_real_kind + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + +#ifdef REPRODUCIBLE + blockSum = 0.0_dbl_kind +#else + blockSum = 0.0_real_kind +#endif + + if (present(mMask)) then + do j=jb,je + do i=ib,ie + blockSum = & + blockSum + array(i,j,iblock)*mMask(i,j,iblock) + end do + end do + else if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + blockSum = & + blockSum + array(i,j,iblock) + endif + end do + end do + else + do j=jb,je + do i=ib,ie + blockSum = blockSum + array(i,j,iblock) + end do + end do + endif + + !*** if this row along or beyond tripole boundary + !*** must eliminate redundant points from global sum + + if (this_block%tripole) then + Nrow=(field_loc == field_loc_Nface .or. & + field_loc == field_loc_NEcorner) + if (Nrow .and. this_block%tripoleTFlag) then + maxiglob = 0 ! entire u-row on T-fold grid + elseif (Nrow .or. this_block%tripoleTFlag) then + maxiglob = nx_global/2 ! half T-row on T-fold and u-row on u-fold + else + maxiglob = -1 ! nothing to do for T-row on u-fold + endif + + if (maxiglob > 0) then + + j = je + + if (present(mMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum = & + blockSum - array(i,j,iblock)*mMask(i,j,iblock) + endif + end do + else if (present(lMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + if (lMask(i,j,iblock)) & + blockSum = blockSum - array(i,j,iblock) + endif + end do + else + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum = blockSum - array(i,j,iblock) + endif + end do + endif + + endif + endif + + !*** now add block sum to global sum + + localSum = localSum + blockSum + + end do + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local sum to global sum +! +!----------------------------------------------------------------------- + +#ifdef REPRODUCIBLE + if (my_task < numProcs) then + call MPI_ALLREDUCE(localSum, globalSumTmp, 1, & + mpiR8, MPI_SUM, communicator, ierr) + globalSum = globalSumTmp + endif +#else + if (my_task < numProcs) then + call MPI_ALLREDUCE(localSum, globalSum, 1, & + mpiR4, MPI_SUM, communicator, ierr) + endif +#endif + +!----------------------------------------------------------------------- + + end function global_sum_real + +!*********************************************************************** + + function global_sum_int(array, dist, field_loc, mMask, lMask) & + result(globalSum) + +! Computes the global sum of the physical domain of a 2-d array. +! +! This is actually the specific interface for the generic global_sum +! function corresponding to integer arrays. The generic +! interface is identical but will handle real and integer 2-d slabs +! and real, integer, and double precision scalars. + + integer (int_kind), dimension(:,:,:), intent(in) :: & + array ! array to be summed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + integer (int_kind), intent(in) :: & + field_loc ! location of field on staggered grid + + integer (int_kind), dimension(:,:,:), intent(in), optional :: & + mMask ! optional multiplicative mask + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + integer (int_kind) :: & + globalSum ! resulting global sum + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + blockSum, &! sum of local block domain + localSum ! sum of all local block domains + + integer (int_kind) :: & + i,j,iblock, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + blockID, &! block location + numProcs, &! number of processor participating + numBlocks, &! number of local blocks + communicator, &! communicator for this distribution + maxiglob ! maximum non-redundant value of i_global + + logical (log_kind) :: & + Nrow ! this field is on a N row (a velocity row) + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + + localSum = 0_int_kind + globalSum = 0_int_kind + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + + blockSum = 0 + + if (present(mMask)) then + do j=jb,je + do i=ib,ie + blockSum = & + blockSum + array(i,j,iblock)*mMask(i,j,iblock) + end do + end do + else if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + blockSum = & + blockSum + array(i,j,iblock) + endif + end do + end do + else + do j=jb,je + do i=ib,ie + blockSum = blockSum + array(i,j,iblock) + end do + end do + endif + + !*** if this row along or beyond tripole boundary + !*** must eliminate redundant points from global sum + + if (this_block%tripole) then + Nrow=(field_loc == field_loc_Nface .or. & + field_loc == field_loc_NEcorner) + if (Nrow .and. this_block%tripoleTFlag) then + maxiglob = 0 ! entire u-row on T-fold grid + elseif (Nrow .or. this_block%tripoleTFlag) then + maxiglob = nx_global/2 ! half T-row on T-fold and u-row on u-fold + else + maxiglob = -1 ! nothing to do for T-row on u-fold + endif + + if (maxiglob > 0) then + + j = je + + if (present(mMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum = & + blockSum - array(i,j,iblock)*mMask(i,j,iblock) + endif + end do + else if (present(lMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + if (lMask(i,j,iblock)) & + blockSum = blockSum - array(i,j,iblock) + endif + end do + else + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum = blockSum - array(i,j,iblock) + endif + end do + endif + + endif + endif + + !*** now add block sum to global sum + + localSum = localSum + blockSum + + end do + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local sum to global sum +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(localSum, globalSum, 1, & + MPI_INTEGER, MPI_SUM, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_sum_int + +!*********************************************************************** + + function global_sum_scalar_dbl(scalar, dist) & + result(globalSum) + +! Computes the global sum of a set of scalars distributed across +! a parallel machine. +! +! This is actually the specific interface for the generic global_sum +! function corresponding to double precision scalars. The generic +! interface is identical but will handle real and integer 2-d slabs +! and real, integer, and double precision scalars. + + real (dbl_kind), intent(in) :: & + scalar ! scalar to be summed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + real (dbl_kind) :: & + globalSum ! resulting global sum + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr, &! mpi error flag + numProcs, &! number of processor participating + numBlocks, &! number of local blocks + communicator ! communicator for this distribution + +!#ifdef REPRODUCIBLE +! real (r16_kind) :: & +! scalarTmp, globalSumTmp ! higher precision for reproducibility +!#endif + +!----------------------------------------------------------------------- +! +! get communicator for MPI calls +! +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local sum to global sum +! REPRODUCIBLE option is commented out because MPI does not handle +! REAL16 correctly. +! +!----------------------------------------------------------------------- + +!#ifdef REPRODUCIBLE +! if (my_task < numProcs) then +! scalarTmp = scalar +! call MPI_ALLREDUCE(scalarTmp, globalSumTmp, 1, & +! mpiR16, MPI_SUM, communicator, ierr) +! globalSum = globalSumTmp +! endif +!#else + if (my_task < numProcs) then + call MPI_ALLREDUCE(scalar, globalSum, 1, & + mpiR8, MPI_SUM, communicator, ierr) + endif +!#endif + +!----------------------------------------------------------------------- + + end function global_sum_scalar_dbl + +!*********************************************************************** + + function global_sum_scalar_real(scalar, dist) & + result(globalSum) + +! Computes the global sum of a set of scalars distributed across +! a parallel machine. +! +! This is actually the specific interface for the generic global_sum +! function corresponding to real scalars. The generic +! interface is identical but will handle real and integer 2-d slabs +! and real, integer, and double precision scalars. + + real (real_kind), intent(in) :: & + scalar ! scalar to be summed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + real (real_kind) :: & + globalSum ! resulting global sum + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr, &! mpi error flag + numProcs, &! number of processor participating + numBlocks, &! number of local blocks + communicator ! communicator for this distribution + +#ifdef REPRODUCIBLE + real (dbl_kind) :: & + scalarTmp, globalSumTmp ! higher precision for reproducibility +#endif + +!----------------------------------------------------------------------- +! +! get communicator for MPI calls +! +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local sum to global sum +! +!----------------------------------------------------------------------- + +#ifdef REPRODUCIBLE + if (my_task < numProcs) then + scalarTmp = scalar + call MPI_ALLREDUCE(scalarTmp, globalSumTmp, 1, & + mpiR8, MPI_SUM, communicator, ierr) + globalSum = globalSumTmp + endif +#else + if (my_task < numProcs) then + call MPI_ALLREDUCE(scalar, globalSum, 1, & + mpiR4, MPI_SUM, communicator, ierr) + endif +#endif + +!----------------------------------------------------------------------- + + end function global_sum_scalar_real + +!*********************************************************************** + + function global_sum_scalar_int(scalar, dist) & + result(globalSum) + +! Computes the global sum of a set of scalars distributed across +! a parallel machine. +! +! This is actually the specific interface for the generic global_sum +! function corresponding to integer scalars. The generic +! interface is identical but will handle real and integer 2-d slabs +! and real, integer, and double precision scalars. + + integer (int_kind), intent(in) :: & + scalar ! scalar to be summed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + integer (int_kind) :: & + globalSum ! resulting global sum + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr, &! mpi error flag + numProcs, &! number of processor participating + numBlocks, &! number of local blocks + communicator ! communicator for this distribution + +!----------------------------------------------------------------------- +! +! get communicator for MPI calls +! +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local sum to global sum +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(scalar, globalSum, 1, & + MPI_INTEGER, MPI_SUM, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_sum_scalar_int + +!*********************************************************************** + + function global_sum_prod_dbl (array1, array2, dist, field_loc, & + mMask, lMask) & + result(globalSum) + +! Computes the global sum of the physical domain of a product of +! two 2-d arrays. +! +! This is actually the specific interface for the generic +! global_sum_prod function corresponding to double precision arrays. +! The generic interface is identical but will handle real and integer +! 2-d slabs. + + real (dbl_kind), dimension(:,:,:), intent(in) :: & + array1, array2 ! arrays whose product is to be summed + + type (distrb), intent(in) :: & + dist ! block distribution for arrays + + integer (int_kind), intent(in) :: & + field_loc ! location of field on staggered grid + + real (dbl_kind), dimension(:,:,:), intent(in), optional :: & + mMask ! optional multiplicative mask + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + real (dbl_kind) :: & + globalSum ! resulting global sum + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + real (dbl_kind), dimension(:), allocatable :: & + blockSum, &! sum of local block domain + globalSumTmp ! higher precision global sum + + integer (int_kind) :: & + i,j,iblock,n, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + blockID, &! block location + numBlocks, &! number of local blocks + numProcs, &! number of processor participating + communicator, &! communicator for this distribution + nreduce, &! mpi count + maxiglob ! maximum non-redundant value of i_global + + logical (log_kind) :: & + Nrow ! this field is on a N row (a velocity row) + + real (dbl_kind), dimension(:,:), allocatable :: & + workg ! temporary global array + real (dbl_kind), dimension(:,:,:), allocatable :: & + work ! tempoerary local array + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + + if (bfbflag) then + allocate(work(nx_block,ny_block,max_blocks)) + work = 0.0_dbl_kind + if (my_task == master_task) then + allocate(workg(nx_global,ny_global)) + else + allocate(workg(1,1)) + endif + workg = 0.0_dbl_kind + else +#ifdef REPRODUCIBLE + nreduce = nblocks_tot +#else + nreduce = 1 +#endif + allocate(blockSum(nreduce), & + globalSumTmp(nreduce)) + blockSum = 0.0_dbl_kind + globalSumTmp = 0.0_dbl_kind + globalSum = 0.0_dbl_kind + endif + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + +#ifdef REPRODUCIBLE + n = blockID +#else + n = 1 +#endif + + if (present(mMask)) then + do j=jb,je + do i=ib,ie + if (bfbflag) then + work(i,j,iblock) = array1(i,j,iblock)*array2(i,j,iblock)* & + mMask(i,j,iblock) + else + blockSum(n) = & + blockSum(n) + array1(i,j,iblock)*array2(i,j,iblock)* & + mMask(i,j,iblock) + endif + end do + end do + else if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + if (bfbflag) then + work(i,j,iblock) = array1(i,j,iblock)*array2(i,j,iblock) + else + blockSum(n) = & + blockSum(n) + array1(i,j,iblock)*array2(i,j,iblock) + endif + endif + end do + end do + else + do j=jb,je + do i=ib,ie + if (bfbflag) then + work(i,j,iblock) = array1(i,j,iblock)*array2(i,j,iblock) + else + blockSum(n) = blockSum(n) + array1(i,j,iblock)*array2(i,j,iblock) + endif + end do + end do + endif + + !*** if this row along or beyond tripole boundary + !*** must eliminate redundant points from global sum + + if (.not.bfbflag) then + if (this_block%tripole) then + Nrow=(field_loc == field_loc_Nface .or. & + field_loc == field_loc_NEcorner) + if (Nrow .and. this_block%tripoleTFlag) then + maxiglob = 0 ! entire u-row on T-fold grid + elseif (Nrow .or. this_block%tripoleTFlag) then + maxiglob = nx_global/2 ! half T-row on T-fold and u-row on u-fold + else + maxiglob = -1 ! nothing to do for T-row on u-fold + endif + + if (maxiglob > 0) then + + j = je + + if (present(mMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum(n) = & + blockSum(n) - array1(i,j,iblock)*array2(i,j,iblock)* & + mMask(i,j,iblock) + endif + end do + else if (present(lMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + if (lMask(i,j,iblock)) & + blockSum(n) = blockSum(n) - & + array1(i,j,iblock)*array2(i,j,iblock) + endif + end do + else + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum(n) = blockSum(n) - & + array1(i,j,iblock)*array2(i,j,iblock) + endif + end do + endif + + endif ! maxiglob + endif ! tripole + endif ! bfbflag + end do + + if (bfbflag) then + call gather_global(workg, work, master_task, dist, spc_val=0.0_dbl_kind) + globalSum = 0.0_dbl_kind + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + globalSum = globalSum + workg(i,j) + enddo + enddo + endif + call MPI_BCAST(globalSum,1,mpiR8,master_task,communicator,ierr) + deallocate(workg,work) + else + if (my_task < numProcs) then + call MPI_ALLREDUCE(blockSum, globalSumTmp, nreduce, & + mpiR8, MPI_SUM, communicator, ierr) + endif + + do n=1,nreduce + globalSum = globalSum + globalSumTmp(n) + enddo + deallocate(blockSum, globalSumTmp) + endif + +!----------------------------------------------------------------------- + + end function global_sum_prod_dbl + +!*********************************************************************** + + function global_sum_prod_real (array1, array2, dist, field_loc, & + mMask, lMask) & + result(globalSum) + +! Computes the global sum of the physical domain of a product of +! two 2-d arrays. +! +! This is actually the specific interface for the generic +! global_sum_prod function corresponding to single precision arrays. +! The generic interface is identical but will handle real and integer +! 2-d slabs. + + real (real_kind), dimension(:,:,:), intent(in) :: & + array1, array2 ! arrays whose product is to be summed + + type (distrb), intent(in) :: & + dist ! block distribution for arrays + + integer (int_kind), intent(in) :: & + field_loc ! location of field on staggered grid + + real (real_kind), dimension(:,:,:), intent(in), optional :: & + mMask ! optional multiplicative mask + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + real (real_kind) :: & + globalSum ! resulting global sum + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + +#ifdef REPRODUCIBLE + real (dbl_kind) :: & + blockSum, &! sum of local block domain + localSum, &! sum of all local block domains + globalSumTmp ! higher precision for reproducibility +#else + real (real_kind) :: & + blockSum, &! sum of local block domain + localSum ! sum of all local block domains +#endif + + integer (int_kind) :: & + i,j,iblock, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + blockID, &! block location + numBlocks, &! number of local blocks + numProcs, &! number of processor participating + communicator, &! communicator for this distribution + maxiglob ! maximum non-redundant value of i_global + + logical (log_kind) :: & + Nrow ! this field is on a N row (a velocity row) + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + +#ifdef REPRODUCIBLE + localSum = 0.0_dbl_kind +#else + localSum = 0.0_real_kind +#endif + globalSum = 0.0_real_kind + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + +#ifdef REPRODUCIBLE + blockSum = 0.0_dbl_kind +#else + blockSum = 0.0_real_kind +#endif + + if (present(mMask)) then + do j=jb,je + do i=ib,ie + blockSum = & + blockSum + array1(i,j,iblock)*array2(i,j,iblock)* & + mMask(i,j,iblock) + end do + end do + else if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + blockSum = & + blockSum + array1(i,j,iblock)*array2(i,j,iblock) + endif + end do + end do + else + do j=jb,je + do i=ib,ie + blockSum = blockSum + array1(i,j,iblock)*array2(i,j,iblock) + end do + end do + endif + + !*** if this row along or beyond tripole boundary + !*** must eliminate redundant points from global sum + + if (this_block%tripole) then + Nrow=(field_loc == field_loc_Nface .or. & + field_loc == field_loc_NEcorner) + if (Nrow .and. this_block%tripoleTFlag) then + maxiglob = 0 ! entire u-row on T-fold grid + elseif (Nrow .or. this_block%tripoleTFlag) then + maxiglob = nx_global/2 ! half T-row on T-fold and u-row on u-fold + else + maxiglob = -1 ! nothing to do for T-row on u-fold + endif + + if (maxiglob > 0) then + + j = je + + if (present(mMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum = & + blockSum - array1(i,j,iblock)*array2(i,j,iblock)* & + mMask(i,j,iblock) + endif + end do + else if (present(lMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + if (lMask(i,j,iblock)) & + blockSum = blockSum - & + array1(i,j,iblock)*array2(i,j,iblock) + endif + end do + else + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum = blockSum - & + array1(i,j,iblock)*array2(i,j,iblock) + endif + end do + endif + + endif + endif + + !*** now add block sum to global sum + + localSum = localSum + blockSum + + end do + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local sum to global sum +! +!----------------------------------------------------------------------- + +#ifdef REPRODUCIBLE + if (my_task < numProcs) then + call MPI_ALLREDUCE(localSum, globalSumTmp, 1, & + mpiR8, MPI_SUM, communicator, ierr) + globalSum = globalSumTmp + endif +#else + if (my_task < numProcs) then + call MPI_ALLREDUCE(localSum, globalSum, 1, & + mpiR4, MPI_SUM, communicator, ierr) + endif +#endif + +!----------------------------------------------------------------------- + + end function global_sum_prod_real + +!*********************************************************************** + + function global_sum_prod_int (array1, array2, dist, field_loc, & + mMask, lMask) & + result(globalSum) + +! Computes the global sum of the physical domain of a product of +! two 2-d arrays. +! +! This is actually the specific interface for the generic +! global_sum_prod function corresponding to integer arrays. +! The generic interface is identical but will handle real and integer +! 2-d slabs. + + integer (int_kind), dimension(:,:,:), intent(in) :: & + array1, array2 ! arrays whose product is to be summed + + type (distrb), intent(in) :: & + dist ! block distribution for arrays + + integer (int_kind), intent(in) :: & + field_loc ! location of field on staggered grid + + integer (int_kind), dimension(:,:,:), intent(in), optional :: & + mMask ! optional multiplicative mask + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + integer (int_kind) :: & + globalSum ! resulting global sum + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + blockSum, &! sum of local block domain + localSum ! sum of all local block domains + + integer (int_kind) :: & + i,j,iblock, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + blockID, &! block location + numBlocks, &! number of local blocks + numProcs, &! number of processor participating + communicator, &! communicator for this distribution + maxiglob ! maximum non-redundant value of i_global + + logical (log_kind) :: & + Nrow ! this field is on a N row (a velocity row) + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + + localSum = 0_int_kind + globalSum = 0_int_kind + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + + blockSum = 0 + + if (present(mMask)) then + do j=jb,je + do i=ib,ie + blockSum = & + blockSum + array1(i,j,iblock)*array2(i,j,iblock)* & + mMask(i,j,iblock) + end do + end do + else if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + blockSum = & + blockSum + array1(i,j,iblock)*array2(i,j,iblock) + endif + end do + end do + else + do j=jb,je + do i=ib,ie + blockSum = blockSum + array1(i,j,iblock)*array2(i,j,iblock) + end do + end do + endif + + !*** if this row along or beyond tripole boundary + !*** must eliminate redundant points from global sum + + if (this_block%tripole) then + Nrow=(field_loc == field_loc_Nface .or. & + field_loc == field_loc_NEcorner) + if (Nrow .and. this_block%tripoleTFlag) then + maxiglob = 0 ! entire u-row on T-fold grid + elseif (Nrow .or. this_block%tripoleTFlag) then + maxiglob = nx_global/2 ! half T-row on T-fold and u-row on u-fold + else + maxiglob = -1 ! nothing to do for T-row on u-fold + endif + + if (maxiglob > 0) then + + j = je + + if (present(mMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum = & + blockSum - array1(i,j,iblock)*array2(i,j,iblock)* & + mMask(i,j,iblock) + endif + end do + else if (present(lMask)) then + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + if (lMask(i,j,iblock)) & + blockSum = blockSum - & + array1(i,j,iblock)*array2(i,j,iblock) + endif + end do + else + do i=ib,ie + if (this_block%i_glob(i) > maxiglob) then + blockSum = blockSum - & + array1(i,j,iblock)*array2(i,j,iblock) + endif + end do + endif + + endif + endif + + !*** now add block sum to global sum + + localSum = localSum + blockSum + + end do + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local sum to global sum +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(localSum, globalSum, 1, & + MPI_INTEGER, MPI_SUM, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_sum_prod_int + +!*********************************************************************** + + function global_maxval_dbl (array, dist, lMask) & + result(globalMaxval) + +! Computes the global maximum value of the physical domain of a 2-d field +! +! This is actually the specific interface for the generic global_maxval +! function corresponding to double precision arrays. + + real (dbl_kind), dimension(:,:,:), intent(in) :: & + array ! array for which max value needed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + real (dbl_kind) :: & + globalMaxval ! resulting maximum value of array + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + real (dbl_kind) :: & + blockMaxval, &! sum of local block domain + localMaxval ! sum of all local block domains + + integer (int_kind) :: & + i,j,iblock, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + numBlocks, &! number of local blocks + numProcs, &! number of processor participating + communicator, &! communicator for this distribution + blockID ! block location + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + + localMaxval = -HUGE(0.0_dbl_kind) + globalMaxval = -HUGE(0.0_dbl_kind) + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + + blockMaxval = -HUGE(0.0_dbl_kind) + + if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + blockMaxval = max(blockMaxval,array(i,j,iblock)) + endif + end do + end do + else + do j=jb,je + do i=ib,ie + blockMaxval = max(blockMaxval,array(i,j,iblock)) + end do + end do + endif + + localMaxval = max(localMaxval,blockMaxval) + + end do + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local maxval to global maxval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(localMaxval, globalMaxval, 1, & + mpiR8, MPI_MAX, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_maxval_dbl + +!*********************************************************************** + + function global_maxval_real (array, dist, lMask) & + result(globalMaxval) + +! Computes the global maximum value of the physical domain of a 2-d field +! +! This is actually the specific interface for the generic global_maxval +! function corresponding to single precision arrays. + + real (real_kind), dimension(:,:,:), intent(in) :: & + array ! array for which max value needed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + real (real_kind) :: & + globalMaxval ! resulting maximum value of array + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + real (real_kind) :: & + blockMaxval, &! sum of local block domain + localMaxval ! sum of all local block domains + + integer (int_kind) :: & + i,j,iblock, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + numBlocks, &! number of local blocks + numProcs, &! number of processor participating + communicator, &! communicator for this distribution + blockID ! block location + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + + localMaxval = -HUGE(0.0_real_kind) + globalMaxval = -HUGE(0.0_real_kind) + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + + blockMaxval = -HUGE(0.0_real_kind) + + if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + blockMaxval = max(blockMaxval,array(i,j,iblock)) + endif + end do + end do + else + do j=jb,je + do i=ib,ie + blockMaxval = max(blockMaxval,array(i,j,iblock)) + end do + end do + endif + + localMaxval = max(localMaxval,blockMaxval) + + end do + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local maxval to global maxval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(localMaxval, globalMaxval, 1, & + mpiR4, MPI_MAX, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_maxval_real + +!*********************************************************************** + + function global_maxval_int (array, dist, lMask) & + result(globalMaxval) + +! Computes the global maximum value of the physical domain of a 2-d field +! +! This is actually the specific interface for the generic global_maxval +! function corresponding to integer arrays. + + integer (int_kind), dimension(:,:,:), intent(in) :: & + array ! array for which max value needed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + integer (int_kind) :: & + globalMaxval ! resulting maximum value of array + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + blockMaxval, &! sum of local block domain + localMaxval ! sum of all local block domains + + integer (int_kind) :: & + i,j,iblock, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + numBlocks, &! number of local blocks + numProcs, &! number of processor participating + communicator, &! communicator for this distribution + blockID ! block location + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + + localMaxval = -HUGE(0_int_kind) + globalMaxval = -HUGE(0_int_kind) + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + + blockMaxval = -HUGE(0_int_kind) + + if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + blockMaxval = max(blockMaxval,array(i,j,iblock)) + endif + end do + end do + else + do j=jb,je + do i=ib,ie + blockMaxval = max(blockMaxval,array(i,j,iblock)) + end do + end do + endif + + localMaxval = max(localMaxval,blockMaxval) + + end do + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local maxval to global maxval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(localMaxval, globalMaxval, 1, & + MPI_INTEGER, MPI_MAX, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_maxval_int + +!*********************************************************************** + + function global_maxval_scalar_dbl (scalar, dist) & + result(globalMaxval) + +! Computes the global maximum value of a scalar value across +! a distributed machine. +! +! This is actually the specific interface for the generic global_maxval +! function corresponding to double precision scalars. + + real (dbl_kind), intent(in) :: & + scalar ! scalar for which max value needed + + type (distrb), intent(in) :: & + dist ! block distribution + + real (dbl_kind) :: & + globalMaxval ! resulting maximum value + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr, &! mpi error flag + numProcs, &! number of processor participating + communicator ! communicator for this distribution + +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + nprocs = numProcs, & + communicator = communicator) + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local maxval to global maxval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(scalar, globalMaxval, 1, & + mpiR8, MPI_MAX, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_maxval_scalar_dbl + +!*********************************************************************** + + function global_maxval_scalar_real (scalar, dist) & + result(globalMaxval) + +! Computes the global maximum value of a scalar value across +! a distributed machine. +! +! This is actually the specific interface for the generic global_maxval +! function corresponding to single precision scalars. + + real (real_kind), intent(in) :: & + scalar ! scalar for which max value needed + + type (distrb), intent(in) :: & + dist ! block distribution + + real (real_kind) :: & + globalMaxval ! resulting maximum value + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr, &! mpi error flag + numProcs, &! number of processor participating + communicator ! communicator for this distribution + +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + nprocs = numProcs, & + communicator = communicator) + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local maxval to global maxval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(scalar, globalMaxval, 1, & + mpiR4, MPI_MAX, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_maxval_scalar_real + +!*********************************************************************** + + function global_maxval_scalar_int (scalar, dist) & + result(globalMaxval) + +! Computes the global maximum value of a scalar value across +! a distributed machine. +! +! This is actually the specific interface for the generic global_maxval +! function corresponding to single precision scalars. + + integer (int_kind), intent(in) :: & + scalar ! scalar for which max value needed + + type (distrb), intent(in) :: & + dist ! block distribution + + integer (int_kind) :: & + globalMaxval ! resulting maximum value + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr, &! mpi error flag + numProcs, &! number of processor participating + communicator ! communicator for this distribution + +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + nprocs = numProcs, & + communicator = communicator) + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local maxval to global maxval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(scalar, globalMaxval, 1, & + MPI_INTEGER, MPI_MAX, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_maxval_scalar_int + +!*********************************************************************** + + function global_minval_dbl (array, dist, lMask) & + result(globalMinval) + +! Computes the global minimum value of the physical domain of a 2-d field +! +! This is actually the specific interface for the generic global_minval +! function corresponding to double precision arrays. + + real (dbl_kind), dimension(:,:,:), intent(in) :: & + array ! array for which min value needed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + real (dbl_kind) :: & + globalMinval ! resulting minimum value of array + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + real (dbl_kind) :: & + blockMinval, &! sum of local block domain + localMinval ! sum of all local block domains + + integer (int_kind) :: & + i,j,iblock, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + numBlocks, &! number of local blocks + numProcs, &! number of processor participating + communicator, &! communicator for this distribution + blockID ! block location + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + + localMinval = HUGE(0.0_dbl_kind) + globalMinval = HUGE(0.0_dbl_kind) + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + + blockMinval = HUGE(0.0_dbl_kind) + + if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + blockMinval = min(blockMinval,array(i,j,iblock)) + endif + end do + end do + else + do j=jb,je + do i=ib,ie + blockMinval = min(blockMinval,array(i,j,iblock)) + end do + end do + endif + + localMinval = min(localMinval,blockMinval) + + end do + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local minval to global minval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(localMinval, globalMinval, 1, & + mpiR8, MPI_MIN, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_minval_dbl + +!*********************************************************************** + + function global_minval_real (array, dist, lMask) & + result(globalMinval) + +! Computes the global minimum value of the physical domain of a 2-d field +! +! This is actually the specific interface for the generic global_minval +! function corresponding to single precision arrays. + + real (real_kind), dimension(:,:,:), intent(in) :: & + array ! array for which min value needed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + real (real_kind) :: & + globalMinval ! resulting minimum value of array + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + real (real_kind) :: & + blockMinval, &! sum of local block domain + localMinval ! sum of all local block domains + + integer (int_kind) :: & + i,j,iblock, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + numBlocks, &! number of local blocks + numProcs, &! number of processor participating + communicator, &! communicator for this distribution + blockID ! block location + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + + localMinval = HUGE(0.0_real_kind) + globalMinval = HUGE(0.0_real_kind) + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + + blockMinval = HUGE(0.0_real_kind) + + if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + blockMinval = min(blockMinval,array(i,j,iblock)) + endif + end do + end do + else + do j=jb,je + do i=ib,ie + blockMinval = min(blockMinval,array(i,j,iblock)) + end do + end do + endif + + localMinval = min(localMinval,blockMinval) + + end do + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local minval to global minval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(localMinval, globalMinval, 1, & + mpiR4, MPI_MIN, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_minval_real + +!*********************************************************************** + + function global_minval_int (array, dist, lMask) & + result(globalMinval) + +! Computes the global minimum value of the physical domain of a 2-d field +! +! This is actually the specific interface for the generic global_minval +! function corresponding to integer arrays. + + integer (int_kind), dimension(:,:,:), intent(in) :: & + array ! array for which min value needed + + type (distrb), intent(in) :: & + dist ! block distribution for array X + + logical (log_kind), dimension(:,:,:), intent(in), optional :: & + lMask ! optional logical mask + + integer (int_kind) :: & + globalMinval ! resulting minimum value of array + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + blockMinval, &! sum of local block domain + localMinval ! sum of all local block domains + + integer (int_kind) :: & + i,j,iblock, &! local counters + ib,ie,jb,je, &! beg,end of physical domain + ierr, &! mpi error flag + numBlocks, &! number of local blocks + numProcs, &! number of processor participating + communicator, &! communicator for this distribution + blockID ! block location + + type (block) :: & + this_block ! holds local block information + +!----------------------------------------------------------------------- + + localMinval = HUGE(0_int_kind) + globalMinval = HUGE(0_int_kind) + + call ice_distributionGet(dist, & + numLocalBlocks = numBlocks, & + nprocs = numProcs, & + communicator = communicator) + + do iblock=1,numBlocks + call ice_distributionGetBlockID(dist, iblock, blockID) + + this_block = get_block(blockID, blockID) + + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + + blockMinval = HUGE(0_int_kind) + + if (present(lMask)) then + do j=jb,je + do i=ib,ie + if (lMask(i,j,iblock)) then + blockMinval = min(blockMinval,array(i,j,iblock)) + endif + end do + end do + else + do j=jb,je + do i=ib,ie + blockMinval = min(blockMinval,array(i,j,iblock)) + end do + end do + endif + + localMinval = min(localMinval,blockMinval) + + end do + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local minval to global minval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(localMinval, globalMinval, 1, & + MPI_INTEGER, MPI_MIN, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_minval_int + +!*********************************************************************** + + function global_minval_scalar_dbl (scalar, dist) & + result(globalMinval) + +! Computes the global minimum value of a scalar value across +! a distributed machine. +! +! This is actually the specific interface for the generic global_minval +! function corresponding to double precision scalars. + + real (dbl_kind), intent(in) :: & + scalar ! scalar for which min value needed + + type (distrb), intent(in) :: & + dist ! block distribution + + real (dbl_kind) :: & + globalMinval ! resulting minimum value + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr, &! mpi error flag + numProcs, &! number of processor participating + communicator ! communicator for this distribution + +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + nprocs = numProcs, & + communicator = communicator) + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local minval to global minval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(scalar, globalMinval, 1, & + mpiR8, MPI_MIN, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_minval_scalar_dbl + +!*********************************************************************** + + function global_minval_scalar_real (scalar, dist) & + result(globalMinval) + +! Computes the global minimum value of a scalar value across +! a distributed machine. +! +! This is actually the specific interface for the generic global_minval +! function corresponding to single precision scalars. + + real (real_kind), intent(in) :: & + scalar ! scalar for which min value needed + + type (distrb), intent(in) :: & + dist ! block distribution + + real (real_kind) :: & + globalMinval ! resulting minimum value + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr, &! mpi error flag + numProcs, &! number of processor participating + communicator ! communicator for this distribution + +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + nprocs = numProcs, & + communicator = communicator) + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local minval to global minval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(scalar, globalMinval, 1, & + mpiR4, MPI_MIN, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_minval_scalar_real + +!*********************************************************************** + + function global_minval_scalar_int (scalar, dist) & + result(globalMinval) + +! Computes the global minimum value of a scalar value across +! a distributed machine. +! +! This is actually the specific interface for the generic global_minval +! function corresponding to single precision scalars. + + integer (int_kind), intent(in) :: & + scalar ! scalar for which min value needed + + type (distrb), intent(in) :: & + dist ! block distribution + + integer (int_kind) :: & + globalMinval ! resulting minimum value + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr, &! mpi error flag + numProcs, &! number of processor participating + communicator ! communicator for this distribution + +!----------------------------------------------------------------------- + + call ice_distributionGet(dist, & + nprocs = numProcs, & + communicator = communicator) + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local minval to global minval +! +!----------------------------------------------------------------------- + + if (my_task < numProcs) then + call MPI_ALLREDUCE(scalar, globalMinval, 1, & + MPI_INTEGER, MPI_MIN, communicator, ierr) + endif + +!----------------------------------------------------------------------- + + end function global_minval_scalar_int + +!*********************************************************************** + + end module ice_global_reductions + +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/mpi/ice_timers.F90 b/mpi/ice_timers.F90 new file mode 100644 index 00000000..835e7675 --- /dev/null +++ b/mpi/ice_timers.F90 @@ -0,0 +1,725 @@ +! SVN:$Id: ice_timers.F90 820 2014-08-26 19:08:29Z eclare $ +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| + + module ice_timers + +! This module contains routine for supporting multiple CPU timers +! and accumulates time for each individual block and node (task). +! +! 2005: Adapted from POP by William Lipscomb +! Replaced 'stdout' by 'nu_diag' +! 2006 ECH: Replaced 'system_clock' timing mechanism by 'MPI_WTIME' +! for MPI runs. Single-processor runs still use system_clock. + + use ice_kinds_mod + use ice_constants, only: c0, c1, bignum + use ice_domain, only: nblocks, distrb_info + use ice_global_reductions, only: global_minval, global_maxval, global_sum + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_diag + use ice_communicate, only: my_task, master_task + + implicit none + private + save + + public :: init_ice_timers, & + get_ice_timer, & + ice_timer_clear, & + ice_timer_start, & + ice_timer_stop, & + ice_timer_print, & + ice_timer_print_all, & + ice_timer_check + +!----------------------------------------------------------------------- +! public timers +!----------------------------------------------------------------------- + + integer (int_kind), public :: & + timer_total, &! total time + timer_step, &! time stepping + timer_dynamics, &! dynamics + timer_advect, &! horizontal advection + timer_column, &! column + timer_thermo, &! thermodynamics + timer_sw, &! radiative transfer + timer_ponds, &! melt ponds + timer_ridge, &! ridging + timer_catconv, &! category conversions + timer_couple, &! coupling + timer_readwrite, &! read/write + timer_diags, &! diagnostics/history + timer_hist, &! diagnostics/history +#if (defined CCSMCOUPLED) + timer_cplrecv, &! receive from coupler + timer_rcvsnd, &! time between receive to send + timer_cplsend, &! send to coupled + timer_sndrcv, &! time between send to receive +#endif + timer_bound, &! boundary updates + timer_bgc ! biogeochemistry +! timer_tmp ! for temporary timings + +!----------------------------------------------------------------------- +! +! module variables +! +!----------------------------------------------------------------------- + + integer (int_kind), parameter :: & + max_timers = 50 ! max number of timers + + type timer_data + character (char_len) :: & + name ! timer name + + logical (log_kind) :: & + in_use, &! true if timer initialized + node_started ! true if any thread has started timer + + integer (int_kind) :: & + num_blocks, &! number of blocks using this timer + num_nodes, &! number of nodes using this timer + num_starts, &! number of start requests + num_stops ! number of stop requests + + real (dbl_kind) :: & + node_cycles1, &! cycle number at start for node timer + node_cycles2 ! cycle number at stop for node timer + + real (dbl_kind) :: & + node_accum_time ! accumulated time for node timer + + logical (log_kind), dimension(:), pointer :: & + block_started ! true if block timer started + + real (dbl_kind), dimension(:), pointer :: & + block_cycles1, &! cycle number at start for block timers + block_cycles2 ! cycle number at stop for block timers + + real (dbl_kind), dimension(:), pointer :: & + block_accum_time ! accumulated time for block timers + + end type + + type (timer_data), dimension(max_timers) :: & + all_timers ! timer data for all timers + + real (dbl_kind) :: & + clock_rate ! clock rate in seconds for each cycle + +!*********************************************************************** + + contains + +!*********************************************************************** + + subroutine init_ice_timers + +! This routine initializes machine parameters and timer structures +! for computing cpu time from F90 intrinsic timer functions. + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: n ! dummy loop index + +!----------------------------------------------------------------------- +! +! initialize timer structures +! +!----------------------------------------------------------------------- + + clock_rate = c1 + + do n=1,max_timers + all_timers(n)%name = 'unknown_timer_name' + + all_timers(n)%in_use = .false. + all_timers(n)%node_started = .false. + + all_timers(n)%num_blocks = 0 + all_timers(n)%num_nodes = 0 + all_timers(n)%num_starts = 0 + all_timers(n)%num_stops = 0 + all_timers(n)%node_cycles1 = c0 + all_timers(n)%node_cycles2 = c0 + + all_timers(n)%node_accum_time = c0 + + nullify(all_timers(n)%block_started) + nullify(all_timers(n)%block_cycles1) + nullify(all_timers(n)%block_cycles2) + nullify(all_timers(n)%block_accum_time) + end do + + call get_ice_timer(timer_total, 'Total', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_step, 'TimeLoop', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_dynamics, 'Dynamics', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_advect, 'Advection',nblocks,distrb_info%nprocs) + call get_ice_timer(timer_column, 'Column', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_thermo, 'Thermo', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_sw, 'Shortwave',nblocks,distrb_info%nprocs) + call get_ice_timer(timer_ponds, 'Meltponds',nblocks,distrb_info%nprocs) + call get_ice_timer(timer_ridge, 'Ridging', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_catconv, 'Cat Conv', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_couple, 'Coupling', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_readwrite,'ReadWrite',nblocks,distrb_info%nprocs) + call get_ice_timer(timer_diags, 'Diags ',nblocks,distrb_info%nprocs) + call get_ice_timer(timer_hist, 'History ',nblocks,distrb_info%nprocs) + call get_ice_timer(timer_bound, 'Bound', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_bgc, 'BGC', nblocks,distrb_info%nprocs) +#if (defined CCSMCOUPLED) + call get_ice_timer(timer_cplrecv, 'Cpl-recv', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_rcvsnd, 'Rcv->Snd', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_cplsend, 'Cpl-Send', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_sndrcv, 'Snd->Rcv', nblocks,distrb_info%nprocs) +#endif +! call get_ice_timer(timer_tmp, ' ',nblocks,distrb_info%nprocs) + +!----------------------------------------------------------------------- + + end subroutine init_ice_timers + +!*********************************************************************** + + subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) + +! This routine initializes a timer with a given name and returns a +! timer id. + + character (*), intent(in) :: & + name_choice ! input name for this timer + + integer (int_kind), intent(in) :: & + num_nodes, &! number of nodes(tasks) using this timer + num_blocks ! number of blocks using this timer + ! (can be =1 if timer called outside + ! threaded region) + + integer (int_kind), intent(out) :: & + timer_id ! timer number assigned to this timer + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + n, &! dummy loop index + srch_error ! error flag for search + +!----------------------------------------------------------------------- +! +! search for next free timer +! +!----------------------------------------------------------------------- + + srch_error = 1 + + srch_loop: do n=1,max_timers + if (.not. all_timers(n)%in_use) then + srch_error = 0 + timer_id = n + + all_timers(n)%name = ' ' + all_timers(n)%name = name_choice + all_timers(n)%in_use = .true. + all_timers(n)%num_blocks = num_blocks + all_timers(n)%num_nodes = num_nodes + + allocate(all_timers(n)%block_started (num_blocks), & + all_timers(n)%block_cycles1 (num_blocks), & + all_timers(n)%block_cycles2 (num_blocks), & + all_timers(n)%block_accum_time(num_blocks)) + + all_timers(n)%block_started = .false. + all_timers(n)%block_cycles1 = c0 + all_timers(n)%block_cycles2 = c0 + all_timers(n)%block_accum_time = c0 + + exit srch_loop + endif + end do srch_loop + + if (srch_error /= 0) & + call abort_ice('get_ice_timer: Exceeded maximum number of timers') + + +!----------------------------------------------------------------------- + + end subroutine get_ice_timer + +!*********************************************************************** + + subroutine ice_timer_clear(timer_id) + +! This routine resets the time for a timer which has already been +! defined. NOTE: This routine must be called from outside a threaded +! region to ensure correct reset of block timers. + + integer (int_kind), intent(in) :: & + timer_id ! timer number + +!----------------------------------------------------------------------- +! +! if the timer has been defined, reset all times to 0 +! otherwise exit with an error +! +!----------------------------------------------------------------------- + + if (all_timers(timer_id)%in_use) then + all_timers(timer_id)%node_started = .false. + all_timers(timer_id)%num_starts = 0 + all_timers(timer_id)%num_stops = 0 + all_timers(timer_id)%node_cycles1 = c0 + all_timers(timer_id)%node_cycles2 = c0 + + all_timers(timer_id)%node_accum_time = c0 + + all_timers(timer_id)%block_started(:) = .false. + all_timers(timer_id)%block_cycles1(:) = c0 + all_timers(timer_id)%block_cycles2(:) = c0 + all_timers(timer_id)%block_accum_time(:) = c0 + else + call abort_ice & + ('ice_timer_clear: attempt to reset undefined timer') + + endif + +!----------------------------------------------------------------------- + + end subroutine ice_timer_clear + +!*********************************************************************** + + subroutine ice_timer_start(timer_id, block_id) + +! This routine starts a given node timer if it has not already +! been started by another thread. If block information is available, +! the appropriate block timer is also started. + + integer (int_kind), intent(in) :: & + timer_id ! timer number + + integer (int_kind), intent(in), optional :: & + block_id ! optional block id for this block + ! this must be the actual local address + ! of the block in the distribution + ! from which it is called + ! (if timer called outside of block + ! region, no block info required) + + double precision MPI_WTIME + external MPI_WTIME + +!----------------------------------------------------------------------- +! +! if timer is defined, start it up +! +!----------------------------------------------------------------------- + + if (all_timers(timer_id)%in_use) then + + !*** + !*** if called from within a block loop, start block timers + !*** + + if (present(block_id)) then + + !*** if block timer already started, stop it first + + if (all_timers(timer_id)%block_started(block_id)) & + call ice_timer_stop(timer_id, block_id) + + !*** start block timer + + all_timers(timer_id)%block_started(block_id) = .true. + all_timers(timer_id)%block_cycles1(block_id) = MPI_WTIME() + + !*** start node timer if not already started by + !*** another thread. if already started, keep track + !*** of number of start requests in order to match + !*** start and stop requests + + !$OMP CRITICAL + + if (.not. all_timers(timer_id)%node_started) then + all_timers(timer_id)%node_started = .true. + all_timers(timer_id)%num_starts = 1 + all_timers(timer_id)%num_stops = 0 + all_timers(timer_id)%node_cycles1 = MPI_WTIME() + else + all_timers(timer_id)%num_starts = & + all_timers(timer_id)%num_starts + 1 + endif + + !$OMP END CRITICAL + + !*** + !*** if called from outside a block loop, start node timer + !*** + + else + + !*** stop timer if already started + if (all_timers(timer_id)%node_started) & + call ice_timer_stop(timer_id) + + !*** start node timer + + all_timers(timer_id)%node_started = .true. + all_timers(timer_id)%node_cycles1 = MPI_WTIME() + + endif + else + call abort_ice & + ('ice_timer_start: attempt to start undefined timer') + + endif + +!----------------------------------------------------------------------- + + end subroutine ice_timer_start + +!*********************************************************************** + + subroutine ice_timer_stop(timer_id, block_id) + +! This routine stops a given node timer if appropriate. If block +! information is available the appropriate block timer is also stopped. + + integer (int_kind), intent(in) :: & + timer_id ! timer number + + integer (int_kind), intent(in), optional :: & + block_id ! optional block id for this block + ! this must be the actual local address + ! of the block in the distribution + ! from which it is called + ! (if timer called outside of block + ! region, no block info required) + + double precision MPI_WTIME + external MPI_WTIME + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + real (dbl_kind) :: & + cycles1, cycles2 ! temps to hold cycle info before correction + +!----------------------------------------------------------------------- +! +! get end cycles +! +!----------------------------------------------------------------------- + + cycles2 = MPI_WTIME() + +!----------------------------------------------------------------------- +! +! if timer is defined, stop it +! +!----------------------------------------------------------------------- + + if (all_timers(timer_id)%in_use) then + + !*** + !*** if called from within a block loop, stop block timer + !*** + + if (present(block_id)) then + + all_timers(timer_id)%block_started(block_id) = .false. + + cycles1 = all_timers(timer_id)%block_cycles1(block_id) + all_timers(timer_id)%block_accum_time(block_id) = & + all_timers(timer_id)%block_accum_time(block_id) + & + clock_rate*(cycles2 - cycles1) + + !*** stop node timer if number of requested stops + !*** matches the number of starts (to avoid stopping + !*** a node timer started by multiple threads) + + cycles1 = all_timers(timer_id)%node_cycles1 + + !$OMP CRITICAL + + all_timers(timer_id)%num_stops = & + all_timers(timer_id)%num_stops + 1 + + if (all_timers(timer_id)%num_starts == & + all_timers(timer_id)%num_stops) then + + all_timers(timer_id)%node_started = .false. + all_timers(timer_id)%node_accum_time = & + all_timers(timer_id)%node_accum_time + & + clock_rate*(cycles2 - cycles1) + + all_timers(timer_id)%num_starts = 0 + all_timers(timer_id)%num_stops = 0 + + endif + + !$OMP END CRITICAL + + !*** + !*** if called from outside a block loop, stop node timer + !*** + + else + + all_timers(timer_id)%node_started = .false. + cycles1 = all_timers(timer_id)%node_cycles1 + + all_timers(timer_id)%node_accum_time = & + all_timers(timer_id)%node_accum_time + & + clock_rate*(cycles2 - cycles1) + + endif + else + call abort_ice & + ('ice_timer_stop: attempt to stop undefined timer') + + endif + +!----------------------------------------------------------------------- + + end subroutine ice_timer_stop + +!*********************************************************************** + + subroutine ice_timer_print(timer_id,stats) + +! Prints the accumulated time for a given timer and optional +! statistics for that timer. It is assumed that this routine +! is called outside of a block loop. + + integer (int_kind), intent(in) :: & + timer_id ! timer number + + logical (log_kind), intent(in), optional :: & + stats ! if true, print statistics for node + ! and block times for this timer + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + n,icount, & ! dummy loop index and counter + nBlocks + + logical (log_kind) :: & + lrestart_timer ! flag to restart timer if timer is running + ! when this routine is called + + real (dbl_kind) :: & + local_time, &! temp space for holding local timer results + min_time, &! minimum accumulated time + max_time, &! maximum accumulated time + mean_time ! mean accumulated time + + character (41), parameter :: & + timer_format = "('Timer ',i3,': ',a9,f11.2,' seconds')" + + character (49), parameter :: & + stats_fmt1 = "(' Timer stats (node): min = ',f11.2,' seconds')",& + stats_fmt2 = "(' max = ',f11.2,' seconds')",& + stats_fmt3 = "(' mean= ',f11.2,' seconds')",& + stats_fmt4 = "(' Timer stats(block): min = ',f11.2,' seconds')" + +!----------------------------------------------------------------------- +! +! if timer has been defined, check to see whether it is currently +! running. If it is, stop the timer and print the info. +! +!----------------------------------------------------------------------- + + if (all_timers(timer_id)%in_use) then + if (all_timers(timer_id)%node_started) then + call ice_timer_stop(timer_id) + lrestart_timer = .true. + else + lrestart_timer = .false. + endif + + !*** Find max node time and print that time as default timer + !*** result + + if (my_task < all_timers(timer_id)%num_nodes) then + local_time = all_timers(timer_id)%node_accum_time + else + local_time = c0 + endif + max_time = global_maxval(local_time,distrb_info) + + if (my_task == master_task) then + write (nu_diag,timer_format) timer_id, & + trim(all_timers(timer_id)%name),max_time + endif + + if (present(stats)) then + if (stats) then + + !*** compute and print statistics for node timer + + min_time = global_minval(local_time,distrb_info) + mean_time = global_sum(local_time,distrb_info)/ & + real(all_timers(timer_id)%num_nodes,kind=dbl_kind) + if (my_task == master_task) then + write (nu_diag,stats_fmt1) min_time + write (nu_diag,stats_fmt2) max_time + write (nu_diag,stats_fmt3) mean_time + endif + + !*** compute and print statistics for block timers + !*** min block time + + local_time = bignum + do n=1,all_timers(timer_id)%num_blocks + local_time = min(local_time, & + all_timers(timer_id)%block_accum_time(n)) + end do + min_time = global_minval(local_time,distrb_info) + if (min_time == bignum) min_time = c0 + + !*** max block time + + local_time = -bignum + do n=1,all_timers(timer_id)%num_blocks + local_time = max(local_time, & + all_timers(timer_id)%block_accum_time(n)) + end do + max_time = global_maxval(local_time,distrb_info) + if (max_time == -bignum) min_time = c0 + + !*** mean block time + + local_time = c0 + nBlocks = all_timers(timer_id)%num_blocks + do n=1,nBlocks + local_time = local_time + & + all_timers(timer_id)%block_accum_time(n) + end do + icount = global_sum(nBlocks, distrb_info) + if (icount > 0) mean_time=global_sum(local_time,distrb_info)& + /real(icount,kind=dbl_kind) + + if (my_task == master_task) then + write (nu_diag,stats_fmt4) min_time + write (nu_diag,stats_fmt2) max_time + write (nu_diag,stats_fmt3) mean_time + endif + + endif + endif + + if (lrestart_timer) call ice_timer_start(timer_id) + else + call abort_ice & + ('ice_timer_print: attempt to print undefined timer') + + endif + +!----------------------------------------------------------------------- + + end subroutine ice_timer_print + +!*********************************************************************** + + subroutine ice_timer_print_all(stats) + +! Prints the accumulated time for a all timers and optional +! statistics for that timer. It is assumed that this routine +! is called outside of a block loop. + + logical (log_kind), intent(in), optional :: & + stats ! if true, print statistics for node + ! and block times for this timer + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: n ! dummy loop index + +!----------------------------------------------------------------------- +! +! loop through timers anc call timer_print for each defined timer +! +!----------------------------------------------------------------------- + + if (my_task == master_task) then + write(nu_diag,'(/,a19,/)') 'Timing information:' + endif + + do n=1,max_timers + if (all_timers(n)%in_use) then + if (present(stats)) then + call ice_timer_print(n,stats) + else + call ice_timer_print(n) + endif + endif + end do + +!----------------------------------------------------------------------- + + end subroutine ice_timer_print_all + +!*********************************************************************** + + subroutine ice_timer_check(timer_id,block_id) + +! This routine checks a given timer by stopping and restarting the +! timer. This is primarily used to periodically accumulate time in +! the timer to prevent timer cycles from wrapping around max_cycles. + + integer (int_kind), intent(in) :: & + timer_id ! timer number + + integer (int_kind), intent(in), optional :: & + block_id ! optional block id for this block + ! this must be the actual local address + ! of the block in the distribution + ! from which it is called + ! (if timer called outside of block + ! region, no block info required) + +!----------------------------------------------------------------------- +! +! stop and restart the requested timer +! +!----------------------------------------------------------------------- + + if (present(block_id)) then + call ice_timer_stop (timer_id,block_id) + call ice_timer_start(timer_id,block_id) + else + call ice_timer_stop (timer_id) + call ice_timer_start(timer_id) + endif + +!----------------------------------------------------------------------- + + end subroutine ice_timer_check + +!*********************************************************************** + + end module ice_timers + +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/source/ice_aerosol.F90 b/source/ice_aerosol.F90 new file mode 100755 index 00000000..e3e8821b --- /dev/null +++ b/source/ice_aerosol.F90 @@ -0,0 +1,736 @@ +! SVN:$Id: ice_aerosol.F90 820 2014-08-26 19:08:29Z eclare $ +!======================================================================= + +! Aerosol tracer within sea ice +! +! authors Marika Holland, NCAR +! David Bailey, NCAR + + module ice_aerosol + + use ice_kinds_mod + use ice_constants + use ice_fileunits, only: nu_diag + use ice_restart_shared, only: lenstr, restart_dir, restart_file, & + pointer_file, runtype + use ice_communicate, only: my_task, master_task + use ice_exit, only: abort_ice + + implicit none + + private + public :: init_aerosol, faero_default, update_aerosol, write_restart_aero + + logical (kind=log_kind), public :: & + restart_aero ! if .true., read aerosol tracer restart file + +!======================================================================= + + contains + +!======================================================================= + +! Initialize ice aerosol tracer (call prior to reading restart data) + + subroutine init_aerosol + + use ice_domain_size, only: n_aero + use ice_state, only: trcrn, nt_aero + + if (trim(runtype) == 'continue') restart_aero = .true. + if (restart_aero) then + call read_restart_aero + else + trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,:) = c0 + endif + + end subroutine init_aerosol + +!======================================================================= + +! constant values for atmospheric aerosols +! +! authors: Elizabeth Hunke, LANL + + subroutine faero_default + + use ice_flux, only: faero_atm + + faero_atm(:,:,1,:) = 1.e-15_dbl_kind ! W/m^2 s + faero_atm(:,:,2,:) = 1.e-13_dbl_kind + faero_atm(:,:,3,:) = 1.e-11_dbl_kind + + end subroutine faero_default + +!======================================================================= + +! read atmospheric aerosols +! +! authors: Elizabeth Hunke, LANL + + subroutine faero_data + + use ice_calendar, only: month, mday, istep, sec + use ice_domain_size, only: max_blocks + use ice_blocks, only: nx_block, ny_block + use ice_flux, only: faero_atm + use ice_forcing, only: interp_coeff_monthly, read_clim_data_nc, interpolate_data + +#ifdef ncdf + ! local parameters + + real (kind=dbl_kind), dimension(nx_block,ny_block,2,max_blocks), & + save :: & + aero1_data , & ! field values at 2 temporal data points + aero2_data , & ! field values at 2 temporal data points + aero3_data ! field values at 2 temporal data points + + character (char_len_long) :: & + aero_file, & ! netcdf filename + fieldname ! field name in netcdf file + + integer (kind=int_kind) :: & + ixm,ixp , & ! record numbers for neighboring months + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth ! middle day of month + + logical (kind=log_kind) :: readm + + !------------------------------------------------------------------- + ! monthly data + ! + ! Assume that monthly data values are located in the middle of the + ! month. + !------------------------------------------------------------------- + + midmonth = 15 ! data is given on 15th of every month +! midmonth = fix(p5 * real(daymo(month))) ! exact middle + + ! Compute record numbers for surrounding months + maxrec = 12 + ixm = mod(month+maxrec-2,maxrec) + 1 + ixp = mod(month, maxrec) + 1 + if (mday >= midmonth) ixm = 99 ! other two points will be used + if (mday < midmonth) ixp = 99 + + ! Determine whether interpolation will use values 1:2 or 2:3 + ! recslot = 2 means we use values 1:2, with the current value (2) + ! in the second slot + ! recslot = 1 means we use values 2:3, with the current value (2) + ! in the first slot + recslot = 1 ! latter half of month + if (mday < midmonth) recslot = 2 ! first half of month + + ! Find interpolation coefficients + call interp_coeff_monthly (recslot) + + ! Read 2 monthly values + readm = .false. + if (istep==1 .or. (mday==midmonth .and. sec==0)) readm = .true. + +! aero_file = trim(atm_data_dir)//'faero.nc' + aero_file = '/usr/projects/climate/eclare/DATA/gx1v3/faero.nc' + + fieldname='faero_atm001' + call read_clim_data_nc (readm, 0, ixm, month, ixp, & + aero_file, fieldname, aero1_data, & + field_loc_center, field_type_scalar) + + fieldname='faero_atm002' + call read_clim_data_nc (readm, 0, ixm, month, ixp, & + aero_file, fieldname, aero2_data, & + field_loc_center, field_type_scalar) + + fieldname='faero_atm003' + call read_clim_data_nc (readm, 0, ixm, month, ixp, & + aero_file, fieldname, aero3_data, & + field_loc_center, field_type_scalar) + + call interpolate_data (aero1_data, faero_atm(:,:,1,:)) ! W/m^2 s + call interpolate_data (aero2_data, faero_atm(:,:,2,:)) + call interpolate_data (aero3_data, faero_atm(:,:,3,:)) + + where (faero_atm(:,:,:,:) > 1.e20) faero_atm(:,:,:,:) = c0 + +#endif + + end subroutine faero_data + +!======================================================================= + +! Increase aerosol in ice or snow surface due to deposition +! and vertical cycling + + subroutine update_aerosol (nx_block, ny_block, & + dt, icells, & + indxi, indxj, & + meltt, melts, & + meltb, congel, & + snoice, & + fsnow, & + trcrn, & + aice_old, & + vice_old, vsno_old, & + vicen, vsnon, aicen, & + faero_atm, faero_ocn) + + use ice_domain_size, only: max_ntrcr, nilyr, nslyr, n_aero, max_aero + use ice_itd, only: hs_min + use ice_state, only: nt_aero + use ice_shortwave, only: hi_ssl, hs_ssl + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with ice present + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with ice + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in) :: & + meltt, & ! thermodynamic melt/growth rates + melts, & + meltb, & + congel, & + snoice, & + fsnow, & + vicen, & ! ice volume (m) + vsnon, & ! snow volume (m) + aicen, & ! ice area fraction + aice_old, & ! values prior to thermodynamic changes + vice_old, & + vsno_old + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_aero), & + intent(in) :: & + faero_atm ! aerosol deposition rate (W/m^2 s) + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_aero), & + intent(inout) :: & + faero_ocn ! aerosol flux to ocean (W/m^2 s) + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_ntrcr), & + intent(inout) :: & + trcrn ! ice/snow tracer array + + ! local variables + + integer (kind=int_kind) :: i, j, ij, k, n + + real (kind=dbl_kind) :: & + dzssl, dzssl_new, & ! snow ssl thickness + dzint, dzint_new, & ! snow interior thickness + dzssli, dzssli_new, & ! ice ssl thickness + dzinti, dzinti_new, & ! ice interior thickness + dznew, & ! tracks thickness changes + hs, hi, & ! snow/ice thickness (m) + dhs_evap, dhi_evap, & ! snow/ice thickness change due to evap + dhs_melts, dhi_meltt, & ! ... due to surface melt + dhs_snoice, dhi_snoice, & ! ... due to snow-ice formation + dhi_congel, dhi_meltb, & ! ... due to bottom growth, melt + hslyr, hilyr, & ! snow, ice layer thickness (m) + hslyr_old, hilyr_old, & ! old snow, ice layer thickness (m) + hs_old, hi_old, & ! old snow, ice thickness (m) + sloss1, sloss2, & ! aerosol mass loss (kg/m^2) + ar ! 1/aicen(i,j) + + real (kind=dbl_kind), dimension(max_aero) :: & + kscav, kscavsi , & ! scavenging by melt water + aerotot, aerotot0, & ! for conservation check + focn_old ! for conservation check + + real (kind=dbl_kind), dimension(max_aero,2) :: & + aerosno, aeroice, & ! kg/m^2 + aerosno0, aeroice0 ! for diagnostic prints + + data kscav / .03_dbl_kind, .20_dbl_kind,& + .02_dbl_kind,.02_dbl_kind,.01_dbl_kind,.01_dbl_kind / + data kscavsi / .03_dbl_kind, .20_dbl_kind,& + .02_dbl_kind,.02_dbl_kind,.01_dbl_kind,.01_dbl_kind / + + ! loop over grid cells with ice at beginning of time step + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !------------------------------------------------------------------- + ! initialize + !------------------------------------------------------------------- + focn_old(:) = faero_ocn(i,j,:) + aerosno (:,:) = c0 + aeroice (:,:) = c0 + aerosno0(:,:) = c0 + aeroice0(:,:) = c0 + + hs_old = vsno_old(i,j)/aice_old(i,j) + hi_old = vice_old(i,j)/aice_old(i,j) + hslyr_old = hs_old/real(nslyr,kind=dbl_kind) + hilyr_old = hi_old/real(nilyr,kind=dbl_kind) + + dzssl = min(hslyr_old/c2, hs_ssl) + dzssli = min(hilyr_old/c2, hi_ssl) + dzint = hs_old - dzssl + dzinti = hi_old - dzssli + + if (aicen(i,j) > c0) then + ar = c1/aicen(i,j) + hs = vsnon(i,j)*ar + hi = vicen(i,j)*ar + dhs_melts = -melts(i,j)*ar + dhi_snoice = snoice(i,j)*ar + dhs_snoice = dhi_snoice*rhoi/rhos + dhi_meltt = -meltt(i,j)*ar + dhi_meltb = -meltb(i,j)*ar + dhi_congel = congel(i,j)*ar + else ! ice disappeared during time step + hs = vsnon(i,j)/aice_old(i,j) + hi = vicen(i,j)/aice_old(i,j) + dhs_melts = -melts(i,j)/aice_old(i,j) + dhi_snoice = snoice(i,j)/aice_old(i,j) + dhs_snoice = dhi_snoice*rhoi/rhos + dhi_meltt = -meltt(i,j)/aice_old(i,j) + dhi_meltb = -meltb(i,j)/aice_old(i,j) + dhi_congel = congel(i,j)/aice_old(i,j) + endif + + dhs_evap = hs - (hs_old + dhs_melts - dhs_snoice & + + fsnow(i,j)/rhos*dt) + dhi_evap = hi - (hi_old + dhi_meltt + dhi_meltb & + + dhi_congel + dhi_snoice) + + ! trcrn(nt_aero) has units kg/m^3 + do k=1,n_aero + aerosno (k,:) = & + trcrn(i,j,nt_aero+(k-1)*4 :nt_aero+(k-1)*4+1)*vsno_old(i,j) + aeroice (k,:) = & + trcrn(i,j,nt_aero+(k-1)*4+2:nt_aero+(k-1)*4+3)*vice_old(i,j) + aerosno0(k,:) = aerosno(k,:) + aeroice0(k,:) = aeroice(k,:) + aerotot0(k) = aerosno(k,2) + aerosno(k,1) & + + aeroice(k,2) + aeroice(k,1) + enddo + + !------------------------------------------------------------------- + ! evaporation + !------------------------------------------------------------------- + dzint = dzint + min(dzssl + dhs_evap, c0) + dzinti = dzinti + min(dzssli + dhi_evap, c0) + dzssl = max(dzssl + dhs_evap, c0) + dzssli = max(dzssli + dhi_evap, c0) + + !------------------------------------------------------------------- + ! basal ice growth + !------------------------------------------------------------------- + dzinti = dzinti + dhi_congel + + !------------------------------------------------------------------- + ! surface snow melt + !------------------------------------------------------------------- + if (-dhs_melts > puny) then + do k = 1, n_aero + sloss1 = c0 + sloss2 = c0 + if (dzssl > puny) & + sloss1 = kscav(k)*aerosno(k,1) & + *min(-dhs_melts,dzssl)/dzssl + aerosno(k,1) = aerosno(k,1) - sloss1 + if (dzint > puny) & + sloss2 = kscav(k)*aerosno(k,2) & + *max(-dhs_melts-dzssl,c0)/dzint + aerosno(k,2) = aerosno(k,2) - sloss2 + faero_ocn(i,j,k) = faero_ocn(i,j,k) + (sloss1+sloss2)/dt + enddo ! n_aero + + ! update snow thickness + dzint=dzint+min(dzssl+dhs_melts, c0) + dzssl=max(dzssl+dhs_melts, c0) + + if ( dzssl <= puny ) then ! ssl melts away + aerosno(:,2) = aerosno(:,1) + aerosno(:,2) + aerosno(:,1) = c0 + dzssl = max(dzssl, c0) + endif + if (dzint <= puny ) then ! all snow melts away + aeroice(:,1) = aeroice(:,1) & + + aerosno(:,1) + aerosno(:,2) + aerosno(:,:) = c0 + dzint = max(dzint, c0) + endif + endif + + !------------------------------------------------------------------- + ! surface ice melt + !------------------------------------------------------------------- + if (-dhi_meltt > puny) then + do k = 1, n_aero + sloss1 = c0 + sloss2 = c0 + if (dzssli > puny) & + sloss1 = kscav(k)*aeroice(k,1) & + *min(-dhi_meltt,dzssli)/dzssli + aeroice(k,1) = aeroice(k,1) - sloss1 + if (dzinti > puny) & + sloss2 = kscav(k)*aeroice(k,2) & + *max(-dhi_meltt-dzssli,c0)/dzinti + aeroice(k,2) = aeroice(k,2) - sloss2 + faero_ocn(i,j,k) = faero_ocn(i,j,k) + (sloss1+sloss2)/dt + enddo + + dzinti = dzinti + min(dzssli+dhi_meltt, c0) + dzssli = max(dzssli+dhi_meltt, c0) + if (dzssli <= puny) then ! ssl ice melts away + do k = 1, n_aero + aeroice(k,2) = aeroice(k,1) + aeroice(k,2) + aeroice(k,1) = c0 + enddo + dzssli = max(dzssli, c0) + endif + if (dzinti <= puny) then ! all ice melts away + do k = 1, n_aero + faero_ocn(i,j,k) = faero_ocn(i,j,k) & + + (aeroice(k,1)+aeroice(k,2))/dt + aeroice(k,:)=c0 + enddo + dzinti = max(dzinti, c0) + endif + endif + + !------------------------------------------------------------------- + ! basal ice melt. Assume all aero lost in basal melt + !------------------------------------------------------------------- + if (-dhi_meltb > puny) then + do k=1,n_aero + sloss1=c0 + sloss2=c0 + if (dzssli > puny) & + sloss1 = max(-dhi_meltb-dzinti, c0) & + *aeroice(k,1)/dzssli + aeroice(k,1) = aeroice(k,1) - sloss1 + if (dzinti > puny) & + sloss2 = min(-dhi_meltb, dzinti) & + *aeroice(k,2)/dzinti + aeroice(k,2) = aeroice(k,2) - sloss2 + faero_ocn(i,j,k) = faero_ocn(i,j,k) + (sloss1+sloss2)/dt + enddo + + dzssli = dzssli + min(dzinti+dhi_meltb, c0) + dzinti = max(dzinti+dhi_meltb, c0) + endif + + !------------------------------------------------------------------- + ! snowfall + !------------------------------------------------------------------- + if (fsnow(i,j) > c0) dzssl = dzssl + fsnow(i,j)/rhos*dt + + !------------------------------------------------------------------- + ! snow-ice formation + !------------------------------------------------------------------- + if (dhs_snoice > puny) then + do k = 1, n_aero + sloss1 = c0 + sloss2 = c0 + if (dzint > puny) & + sloss2 = min(dhs_snoice, dzint) & + *aerosno(k,2)/dzint + aerosno(k,2) = aerosno(k,2) - sloss2 + if (dzssl > puny) & + sloss1 = max(dhs_snoice-dzint, c0) & + *aerosno(k,1)/dzssl + aerosno(k,1) = aerosno(k,1) - sloss1 + aeroice(k,1) = aeroice(k,1) & + + (c1-kscavsi(k))*(sloss2+sloss1) + faero_ocn(i,j,k) = faero_ocn(i,j,k) & + + kscavsi(k)*(sloss2+sloss1)/dt + enddo + dzssl = dzssl - max(dhs_snoice-dzint, c0) + dzint = max(dzint-dhs_snoice, c0) + dzssli = dzssli + dhi_snoice + endif + + !------------------------------------------------------------------- + ! aerosol deposition + !------------------------------------------------------------------- + if (aicen(i,j) > c0) then + hs = vsnon(i,j) * ar + else + hs = c0 + endif + if (hs > hs_min) then ! should this really be hs_min or 0? + ! should use same hs_min value as in radiation + do k=1,n_aero + aerosno(k,1) = aerosno(k,1) & + + faero_atm(i,j,k)*dt*aicen(i,j) + enddo + else + do k=1,n_aero + aeroice(k,1) = aeroice(k,1) & + + faero_atm(i,j,k)*dt*aicen(i,j) + enddo + endif + + !------------------------------------------------------------------- + ! redistribute aerosol within vertical layers + !------------------------------------------------------------------- + if (aicen(i,j) > c0) then + hs = vsnon(i,j) * ar ! new snow thickness + hi = vicen(i,j) * ar ! new ice thickness + else + hs = c0 + hi = c0 + endif + if (dzssl <= puny) then ! nothing in SSL + do k=1,n_aero + aerosno(k,2) = aerosno(k,2) + aerosno(k,1) + aerosno(k,1) = c0 + enddo + endif + if (dzint <= puny) then ! nothing in Snow Int + do k = 1, n_aero + aeroice(k,1) = aeroice(k,1) + aerosno(k,2) + aerosno(k,2) = c0 + enddo + endif + if (dzssli <= puny) then ! nothing in Ice SSL + do k = 1, n_aero + aeroice(k,2) = aeroice(k,2) + aeroice(k,1) + aeroice(k,1) = c0 + enddo + endif + + if (dzinti <= puny) then ! nothing in Ice INT + do k = 1, n_aero + faero_ocn(i,j,k) = faero_ocn(i,j,k) & + + (aeroice(k,1)+aeroice(k,2))/dt + aeroice(k,:)=c0 + enddo + endif + + hslyr = hs/real(nslyr,kind=dbl_kind) + hilyr = hi/real(nilyr,kind=dbl_kind) + dzssl_new = min(hslyr/c2, hs_ssl) + dzssli_new = min(hilyr/c2, hi_ssl) + dzint_new = hs - dzssl_new + dzinti_new = hi - dzssli_new + + if (hs > hs_min) then + do k = 1, n_aero + dznew = min(dzssl_new-dzssl, c0) + sloss1 = c0 + if (dzssl > puny) & + sloss1 = dznew*aerosno(k,1)/dzssl ! not neccesarily a loss + dznew = max(dzssl_new-dzssl, c0) + if (dzint > puny) & + sloss1 = sloss1 + aerosno(k,2)*dznew/dzint + aerosno(k,1) = aerosno(k,1) + sloss1 + aerosno(k,2) = aerosno(k,2) - sloss1 + enddo + else + aeroice(:,1) = aeroice(:,1) & + + aerosno(:,1) + aerosno(:,2) + aerosno(:,:) = c0 + endif + + if (vicen(i,j) > puny) then ! may want a limit on hi instead? + do k = 1, n_aero + sloss2 = c0 + dznew = min(dzssli_new-dzssli, c0) + if (dzssli > puny) & + sloss2 = dznew*aeroice(k,1)/dzssli + dznew = max(dzssli_new-dzssli, c0) + if (dzinti > puny) & + sloss2 = sloss2 + aeroice(k,2)*dznew/dzinti + aeroice(k,1) = aeroice(k,1) + sloss2 + aeroice(k,2) = aeroice(k,2) - sloss2 + enddo + else + faero_ocn(i,j,:) = faero_ocn(i,j,:) + (aeroice(:,1)+aeroice(:,2))/dt + aeroice(:,:) = c0 + endif + + !------------------------------------------------------------------- + ! check conservation + !------------------------------------------------------------------- + do k = 1, n_aero + aerotot(k) = aerosno(k,2) + aerosno(k,1) & + + aeroice(k,2) + aeroice(k,1) + if ((aerotot(k)-aerotot0(k)) & + - ( faero_atm(i,j,k)*aicen(i,j) & + - (faero_ocn(i,j,k)-focn_old(k)) )*dt > puny) then + + write(nu_diag,*) 'aerosol tracer: ',k + write(nu_diag,*) 'aerotot-aerotot0 ',aerotot(k)-aerotot0(k) + write(nu_diag,*) 'faero_atm-faero_ocn ', & + (faero_atm(i,j,k)*aicen(i,j)-(faero_ocn(i,j,k)-focn_old(k)))*dt + endif + enddo + + !------------------------------------------------------------------- + ! reload tracers + !------------------------------------------------------------------- + if (vicen(i,j) > puny) & + aeroice(:,:) = aeroice(:,:)/vicen(i,j) + if (vsnon(i,j) > puny) & + aerosno(:,:) = aerosno(:,:)/vsnon(i,j) + do k = 1, n_aero + do n = 1,2 + trcrn(i,j,nt_aero+(k-1)*4+n-1)=aerosno(k,n) + trcrn(i,j,nt_aero+(k-1)*4+n+1)=aeroice(k,n) + enddo + enddo + + !------------------------------------------------------------------- + ! check for negative values + !------------------------------------------------------------------- + if (trcrn(i,j,nt_aero ) < -puny .or. & + trcrn(i,j,nt_aero+1) < -puny .or. & + trcrn(i,j,nt_aero+2) < -puny .or. & + trcrn(i,j,nt_aero+3) < -puny) then + + write(nu_diag,*) 'MH aerosol negative in aerosol code' + write(nu_diag,*) 'MH INT neg in aerosol my_task = ',& + my_task, & + ' printing point = ',n, & + ' i and j = ',i,j + write(nu_diag,*) 'MH Int Neg aero snowssl= ' ,aerosno0(1,1) + write(nu_diag,*) 'MH Int Neg aero new snowssl= ',aerosno (1,1) + write(nu_diag,*) 'MH Int Neg aero snowint= ' ,aerosno0(1,2) + write(nu_diag,*) 'MH Int Neg aero new snowint= ',aerosno (1,2) + write(nu_diag,*) 'MH Int Neg aero ice_ssl= ' ,aeroice0(1,1) + write(nu_diag,*) 'MH Int Neg aero new ice_ssl= ',aeroice (1,1) + write(nu_diag,*) 'MH Int Neg aero ice_int= ' ,aeroice0(1,2) + write(nu_diag,*) 'MH Int Neg aero new ice_int= ',aeroice (1,2) + write(nu_diag,*) 'MH Int Neg aero aicen= ' ,aicen (i,j) + write(nu_diag,*) 'MH Int Neg aero vicen= ' ,vicen (i,j) + write(nu_diag,*) 'MH Int Neg aero vsnon= ' ,vsnon (i,j) + write(nu_diag,*) 'MH Int Neg aero viceold= ' ,vice_old(i,j) + write(nu_diag,*) 'MH Int Neg aero vsnoold= ' ,vsno_old(i,j) + write(nu_diag,*) 'MH Int Neg aero melts= ' ,melts (i,j) + write(nu_diag,*) 'MH Int Neg aero meltt= ' ,meltt (i,j) + write(nu_diag,*) 'MH Int Neg aero meltb= ' ,meltb (i,j) + write(nu_diag,*) 'MH Int Neg aero congel= ' ,congel (i,j) + write(nu_diag,*) 'MH Int Neg aero snoice= ' ,snoice (i,j) + write(nu_diag,*) 'MH Int Neg aero evap sno?= ' ,dhs_evap + write(nu_diag,*) 'MH Int Neg aero evap ice?= ' ,dhi_evap + write(nu_diag,*) 'MH Int Neg aero fsnow= ' ,fsnow (i,j) + write(nu_diag,*) 'MH Int Neg aero faero_atm= ' ,faero_atm(i,j,1) + write(nu_diag,*) 'MH Int Neg aero faero_ocn= ' ,faero_ocn(i,j,1) + + trcrn(i,j,nt_aero ) = max(trcrn(i,j,nt_aero ), c0) + trcrn(i,j,nt_aero+1) = max(trcrn(i,j,nt_aero+1), c0) + trcrn(i,j,nt_aero+2) = max(trcrn(i,j,nt_aero+2), c0) + trcrn(i,j,nt_aero+3) = max(trcrn(i,j,nt_aero+3), c0) + endif + enddo + + end subroutine update_aerosol + +!======================================================================= +!---! these subroutines write/read Fortran unformatted data files .. +!======================================================================= + +! Dumps all values needed for restarting +! +! authors Elizabeth Hunke, LANL (original version) +! David Bailey, NCAR +! Marika Holland, NCAR + + subroutine write_restart_aero() + + use ice_domain_size, only: ncat, n_aero + use ice_restart, only: write_restart_field + use ice_state, only: trcrn, nt_aero + use ice_fileunits, only: nu_dump_aero + + ! local variables + + integer (kind=int_kind) :: & + k ! loop indices + + logical (kind=log_kind) :: diag + + character (len=3) :: nchar + + !----------------------------------------------------------------- + + if (my_task == master_task) write(nu_diag,*) 'write_restart_aero (aerosols)' + + diag = .true. + + do k = 1, n_aero + write(nchar,'(i3.3)') k + call write_restart_field(nu_dump_aero,0, & + trcrn(:,:,nt_aero +(k-1)*4,:,:),'ruf8','aerosnossl'//nchar, & + ncat,diag) + call write_restart_field(nu_dump_aero,0, & + trcrn(:,:,nt_aero+1+(k-1)*4,:,:),'ruf8','aerosnoint'//nchar, & + ncat,diag) + call write_restart_field(nu_dump_aero,0, & + trcrn(:,:,nt_aero+2+(k-1)*4,:,:),'ruf8','aeroicessl'//nchar, & + ncat,diag) + call write_restart_field(nu_dump_aero,0, & + trcrn(:,:,nt_aero+3+(k-1)*4,:,:),'ruf8','aeroiceint'//nchar, & + ncat,diag) + enddo + + end subroutine write_restart_aero + +!======================================================================= + +! Reads all values needed for an ice aerosol restart +! +! authors Elizabeth Hunke, LANL (original version) +! David Bailey, NCAR +! Marika Holland, NCAR + + subroutine read_restart_aero() + + use ice_communicate, only: my_task, master_task + use ice_domain_size, only: n_aero, ncat + use ice_restart, only: read_restart_field + use ice_state, only: trcrn, nt_aero + use ice_fileunits, only: nu_restart_aero + + ! local variables + + integer (kind=int_kind) :: & + k ! loop indices + + logical (kind=log_kind) :: & + diag + + character (len=3) :: nchar + + !----------------------------------------------------------------- + + if (my_task == master_task) write(nu_diag,*) 'read_restart_aero (aerosols)' + + diag = .true. + + do k = 1, n_aero + write(nchar,'(i3.3)') k + call read_restart_field(nu_restart_aero,0, & + trcrn(:,:,nt_aero +(k-1)*4,:,:),'ruf8','aerosnossl'//trim(nchar), & + ncat,diag,field_type=field_type_scalar,field_loc=field_loc_center) + call read_restart_field(nu_restart_aero,0, & + trcrn(:,:,nt_aero+1+(k-1)*4,:,:),'ruf8','aerosnoint'//trim(nchar), & + ncat,diag,field_type=field_type_scalar,field_loc=field_loc_center) + call read_restart_field(nu_restart_aero,0, & + trcrn(:,:,nt_aero+2+(k-1)*4,:,:),'ruf8','aeroicessl'//trim(nchar), & + ncat,diag,field_type=field_type_scalar,field_loc=field_loc_center) + call read_restart_field(nu_restart_aero,0, & + trcrn(:,:,nt_aero+3+(k-1)*4,:,:),'ruf8','aeroiceint'//trim(nchar), & + ncat,diag,field_type=field_type_scalar,field_loc=field_loc_center) + enddo + + end subroutine read_restart_aero + +!======================================================================= + + end module ice_aerosol + +!======================================================================= diff --git a/source/ice_age.F90 b/source/ice_age.F90 new file mode 100755 index 00000000..731ce099 --- /dev/null +++ b/source/ice_age.F90 @@ -0,0 +1,134 @@ +! SVN:$Id: ice_age.F90 746 2013-09-28 22:47:56Z eclare $ +!======================================================================= +! +! authors Elizabeth Hunke + + module ice_age + + use ice_kinds_mod + use ice_constants + + implicit none + + private + public :: init_age, increment_age, write_restart_age, read_restart_age + + logical (kind=log_kind), public :: & + restart_age ! if .true., read age tracer restart file + +!======================================================================= + + contains + +!======================================================================= + +! Initialize ice age tracer (call prior to reading restart data) + + subroutine init_age(nx_block, ny_block, ncat, iage) + + integer(kind=int_kind), intent(in) :: & + nx_block , & + ny_block , & + ncat + + real(kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(out) :: iage + + iage(:,:,:) = c0 + + end subroutine init_age + +!======================================================================= + +! Increase ice age tracer by timestep length. + + subroutine increment_age (nx_block, ny_block, & + dt, icells, & + indxi, indxj, & + iage) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with ice present + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with ice + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + iage + + ! local variables + + integer (kind=int_kind) :: i, j, ij + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + iage(i,j) = iage(i,j) + dt + enddo + + end subroutine increment_age + +!======================================================================= + +! Dumps all values needed for restarting +! author Elizabeth C. Hunke, LANL + + subroutine write_restart_age() + + use ice_communicate, only: my_task, master_task + use ice_domain_size, only: ncat + use ice_fileunits, only: nu_diag, nu_dump_age + use ice_state, only: trcrn, nt_iage + use ice_restart,only: write_restart_field + + ! local variables + + logical (kind=log_kind) :: diag + + diag = .true. + + !----------------------------------------------------------------- + + call write_restart_field(nu_dump_age,0,trcrn(:,:,nt_iage,:,:),'ruf8', & + 'iage',ncat,diag) + + end subroutine write_restart_age + +!======================================================================= + +! Reads all values needed for an ice age restart +! author Elizabeth C. Hunke, LANL + + subroutine read_restart_age() + + use ice_communicate, only: my_task, master_task + use ice_domain_size, only: ncat + use ice_fileunits, only: nu_diag, nu_restart_age + use ice_state, only: trcrn, nt_iage + use ice_restart,only: read_restart_field + + ! local variables + + logical (kind=log_kind) :: & + diag + + diag = .true. + + if (my_task == master_task) write(nu_diag,*) 'min/max age (s)' + + call read_restart_field(nu_restart_age,0,trcrn(:,:,nt_iage,:,:),'ruf8', & + 'iage',ncat,diag,field_loc_center,field_type_scalar) + + end subroutine read_restart_age + +!======================================================================= + + end module ice_age + +!======================================================================= diff --git a/source/ice_algae.F90 b/source/ice_algae.F90 new file mode 100755 index 00000000..5c8ead35 --- /dev/null +++ b/source/ice_algae.F90 @@ -0,0 +1,1264 @@ +! SVN:$Id: ice_algae.F90 745 2013-09-28 18:22:36Z eclare $ +!======================================================================= +! +! Compute biogeochemistry in the skeletal layer +! +! authors: Nicole Jeffery, LANL +! Scott Elliot, LANL +! Elizabeth C. Hunke, LANL +! + module ice_algae + + use ice_kinds_mod + use ice_domain_size, only: nblyr, nilyr, max_blocks + use ice_blocks, only: nx_block, ny_block + use ice_fileunits, only: nu_diag, nu_restart_bgc, nu_rst_pointer, & + nu_dump_bgc, flush_fileunit + use ice_read_write, only: ice_open, ice_read, ice_write + use ice_communicate, only: my_task, master_task + use ice_exit, only: abort_ice + use ice_zbgc_shared ! everything + use ice_state, only: vicen, vice, trcr, ntrcr, nt_bgc_Am_sk, & + nt_bgc_C_sk, nt_bgc_chl_sk, nt_bgc_DMS_sk, nt_bgc_DMSPd_sk, & + nt_bgc_DMSPp_sk, nt_bgc_N_sk, nt_bgc_Nit_sk, nt_bgc_Sil_sk + + implicit none + + private + public :: get_forcing_bgc, bgc_diags, write_restart_bgc, & + algal_dyn, read_restart_bgc, & + skl_biogeochemistry + + real (kind=dbl_kind), parameter, private :: & + R_Si2N = 1.5_dbl_kind ! algal Si to N (mole/mole) + +!======================================================================= + + contains + +!======================================================================= +! +! Read and interpolate annual climatologies of silicate and nitrate. +! Restore model quantities to data if desired. +! +! author: Elizabeth C. Hunke, LANL + + subroutine get_forcing_bgc + + use ice_calendar, only: dt, istep, mday, month, sec + use ice_constants, only: field_loc_center, field_type_scalar + use ice_domain, only: nblocks + use ice_flux, only: sss + use ice_forcing, only: trestore, trest, & + read_clim_data, interpolate_data, interp_coeff_monthly + + integer (kind=int_kind) :: & + i, j, iblk , & ! horizontal indices + ixm,ixp , & ! record numbers for neighboring months + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth ! middle day of month + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & + nitdat , & ! data value toward which nitrate is restored + sildat ! data value toward which silicate is restored + + real (kind=dbl_kind), dimension(nx_block,ny_block,2,max_blocks) :: & + nit_data, & ! field values at 2 temporal data points + sil_data + + logical (kind=log_kind) :: readm + + if (trim(nit_data_type) == 'clim'.or. & + trim(sil_data_type) == 'clim') then + + nit_file = trim(bgc_data_dir)//'nitrate_WOA2005_surface_monthly' ! gx1 only + sil_file = trim(bgc_data_dir)//'silicate_WOA2005_surface_monthly' ! gx1 only + + if (my_task == master_task .and. istep == 1) then + if (trim(sil_data_type)=='clim' .AND. tr_bgc_Sil_sk) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'silicate data interpolated to timestep:' + write (nu_diag,*) trim(sil_file) + endif + if (trim(nit_data_type)=='clim' .AND. tr_bgc_Nit_sk) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'nitrate data interpolated to timestep:' + write (nu_diag,*) trim(nit_file) + if (restore_bgc) write (nu_diag,*) & + 'bgc restoring timescale (days) =', trestore + endif + endif ! my_task, istep + + !------------------------------------------------------------------- + ! monthly data + ! + ! Assume that monthly data values are located in the middle of the + ! month. + !------------------------------------------------------------------- + + midmonth = 15 ! data is given on 15th of every month +!!! midmonth = fix(p5 * real(daymo(month))) ! exact middle + + ! Compute record numbers for surrounding months + maxrec = 12 + ixm = mod(month+maxrec-2,maxrec) + 1 + ixp = mod(month, maxrec) + 1 + if (mday >= midmonth) ixm = -99 ! other two points will be used + if (mday < midmonth) ixp = -99 + + ! Determine whether interpolation will use values 1:2 or 2:3 + ! recslot = 2 means we use values 1:2, with the current value (2) + ! in the second slot + ! recslot = 1 means we use values 2:3, with the current value (2) + ! in the first slot + recslot = 1 ! latter half of month + if (mday < midmonth) recslot = 2 ! first half of month + + ! Find interpolation coefficients + call interp_coeff_monthly (recslot) + + readm = .false. + if (istep==1 .or. (mday==midmonth .and. sec==0)) readm = .true. + + endif ! sil/nit_data_type + + !------------------------------------------------------------------- + ! Read two monthly silicate values and interpolate. + ! Restore toward interpolated value. + !------------------------------------------------------------------- + + if (trim(sil_data_type)=='clim' .AND. tr_bgc_Sil_sk) then + call read_clim_data (readm, 0, ixm, month, ixp, & + sil_file, sil_data, & + field_loc_center, field_type_scalar) + call interpolate_data (sil_data, sildat) + + if (restore_bgc) then + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + sil(i,j,iblk) = sil(i,j,iblk) & + + (sildat(i,j,iblk)-sil(i,j,iblk))*dt/trest + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif + + endif + + !------------------------------------------------------------------- + ! Read two monthly nitrate values and interpolate. + ! Restore toward interpolated value. + !------------------------------------------------------------------- + + if (trim(nit_data_type)=='clim' .AND. tr_bgc_Nit_sk) then + call read_clim_data (readm, 0, ixm, month, ixp, & + nit_file, nit_data, & + field_loc_center, field_type_scalar) + call interpolate_data (nit_data, nitdat) + + if (restore_bgc) then + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + nit(i,j,iblk) = nit(i,j,iblk) & + + (nitdat(i,j,iblk)-nit(i,j,iblk))*dt/trest + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif + + elseif (trim(nit_data_type) == 'sss' .AND. tr_bgc_Nit_sk) then + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + nit(i,j,iblk) = sss(i,j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + endif + + end subroutine get_forcing_bgc + +!======================================================================= +! +! skeletal layer biochemistry +! + subroutine skl_biogeochemistry (nx_block, ny_block, & + icells, dt, & + indxi, indxj, & + nbtrcr, & + flux_bio, ocean_bio, & + hmix, aicen, & + meltb, congel, & + fswthru, first_ice, & + trcrn, grow_Cn) + + use ice_constants, only: p5, p05, p1, c1, c0, puny + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of cells with aicen > puny + nbtrcr ! number of bgc tracers + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + first_ice ! initialized values should be used + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + hmix , & ! mixed layer depth (m) + aicen , & ! ice area + meltb , & ! bottom ice melt (m) + congel , & ! bottom ice growth (m) + fswthru ! shortwave passing through ice to ocean + + real (kind=dbl_kind), dimension(nx_block,ny_block,ntrcr), & + intent(inout) :: & + trcrn ! tracer array + + ! history variables + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & + grow_Cn ! specific growth (1/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nbtrcr), & + intent(inout) :: & + flux_bio,& ! ocean tracer flux (mmol/m^2/s) positive into ocean + ocean_bio ! ocean tracer concentration (mmol/m^3) + + ! local variables + + integer (kind=int_kind) :: i, j, ij, nn + + real (kind=dbl_kind), dimension(icells,nbtrcr):: & + react , & ! biological sources and sinks (mmol/m^3) + cinit , & ! initial brine concentration (mmol/m^2) + congel_alg ! congelation flux contribution to ice algae (mmol/m^2 s) + + real (kind=dbl_kind), dimension (nbtrcr):: & + flux_bio_temp, & ! tracer flux to ocean (mmol/m^2 s) + PVflag , & ! 1 for tracers that flow with the brine, 0 otherwise + cling ! 1 for tracers that cling, 0 otherwise + + real (kind=dbl_kind), parameter :: & + PVc = 1.e-6_dbl_kind , & ! type 'constant' piston velocity (m/s) + PV_scale_growth = p5 , & ! scale factor in Jin PV during ice growth + PV_scale_melt = p05 , & ! scale factor in Jin PV during ice melt + MJ1 = 9.667e-9_dbl_kind, & ! coefficients in Jin 2008 (m/s) + MJ2 = 38.8_dbl_kind , & ! 4.49e-4_dbl_kind*secday (unitless) + MJ3 = 1.04e7_dbl_kind , & ! 1.39e-3_dbl_kind*secday^2 (s/m) + PV_frac_max = 0.9_dbl_kind ! max piston velocity coefficient + + real (kind=dbl_kind), dimension(icells) :: & + PVt , & ! type 'Jin2006' piston velocity (m/s) + ice_growth, & ! Jin2006 defn: congel rate or bottom melt rate (m/s) + f_meltn ! vertical melt fraction of skeletal layer in dt + + real (kind=dbl_kind):: & + rphi_sk , & ! 1 / skeletal layer porosity + cinit_tmp ! temporary variable for concentration (mmol/m^2) + + !------------------------------------------------------------------- + ! Initialize + !------------------------------------------------------------------- + + do nn = 1, nbtrcr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + cinit (ij,nn) = c0 + congel_alg(ij,nn) = c0 + react (ij,nn) = c0 + enddo + PVflag(nn) = c1 + if (bgc_tracer_type(nn) < p5) PVflag(nn) = c0 + cling(nn) = c0 + enddo + cling(nlt_bgc_N) = c1 + + rphi_sk = c1/phi_sk + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + PVt (ij) = c0 + f_meltn (ij) = c0 + ice_growth(ij) = (congel(i,j)-meltb(i,j))/dt + + if (first_ice(i,j)) then + trcrn(i,j,nt_bgc_N_sk) = ocean_bio(i,j,nlt_bgc_N) *sk_l*rphi_sk + if (tr_bgc_Nit_sk) & + trcrn(i,j,nt_bgc_Nit_sk) = ocean_bio(i,j,nlt_bgc_NO) *sk_l*rphi_sk + if (tr_bgc_Am_sk) & + trcrn(i,j,nt_bgc_Am_sk) = ocean_bio(i,j,nlt_bgc_NH) *sk_l*rphi_sk + if (tr_bgc_Sil_sk) & + trcrn(i,j,nt_bgc_Sil_sk) = ocean_bio(i,j,nlt_bgc_Sil) *sk_l*rphi_sk + if (tr_bgc_C_sk) & + trcrn(i,j,nt_bgc_C_sk) = ocean_bio(i,j,nlt_bgc_C) *sk_l*rphi_sk + if (tr_bgc_chl_sk) & + trcrn(i,j,nt_bgc_chl_sk) = ocean_bio(i,j,nlt_bgc_chl) *sk_l*rphi_sk + if (tr_bgc_DMSPp_sk) & + trcrn(i,j,nt_bgc_DMSPp_sk) = ocean_bio(i,j,nlt_bgc_DMSPp)*sk_l*rphi_sk + if (tr_bgc_DMSPd_sk) & + trcrn(i,j,nt_bgc_DMSPd_sk) = ocean_bio(i,j,nlt_bgc_DMSPd)*sk_l*rphi_sk + if (tr_bgc_DMS_sk) & + trcrn(i,j,nt_bgc_DMS_sk) = ocean_bio(i,j,nlt_bgc_DMS) *sk_l*rphi_sk + endif ! first_ice + + cinit(ij,nlt_bgc_N) = trcrn(i,j,nt_bgc_N_sk) + if (tr_bgc_Nit_sk) cinit(ij,nlt_bgc_NO) = trcrn(i,j,nt_bgc_Nit_sk) + if (tr_bgc_Am_sk) cinit(ij,nlt_bgc_NH) = trcrn(i,j,nt_bgc_Am_sk) + if (tr_bgc_Sil_sk) cinit(ij,nlt_bgc_Sil) = trcrn(i,j,nt_bgc_Sil_sk) + if (tr_bgc_C_sk) cinit(ij,nlt_bgc_C) = trcrn(i,j,nt_bgc_C_sk) + if (tr_bgc_chl_sk) cinit(ij,nlt_bgc_chl) = trcrn(i,j,nt_bgc_chl_sk) + if (tr_bgc_DMSPp_sk) cinit(ij,nlt_bgc_DMSPp) = trcrn(i,j,nt_bgc_DMSPp_sk) + if (tr_bgc_DMSPd_sk) cinit(ij,nlt_bgc_DMSPd) = trcrn(i,j,nt_bgc_DMSPd_sk) + if (tr_bgc_DMS_sk) cinit(ij,nlt_bgc_DMS) = trcrn(i,j,nt_bgc_DMS_sk) + + enddo ! ij + + do nn = 1, nbtrcr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + if (cinit(ij,nn) < c0) then + write(nu_diag,*)'initial sk_bgc < 0, ij,nn,nbtrcr,cinit(ij,nn)', & + ij,nn,nbtrcr,cinit(ij,nn) + call abort_ice ('ice_bgc.F90: BGC error1') + endif + enddo + enddo + + !------------------------------------------------------------------- + ! 'Jin2006': + ! 1. congel/melt dependent piston velocity (PV) for growth and melt + ! 2. If congel > melt use 'congel'; if melt > congel use 'melt' + ! 3. For algal N, PV for ice growth only provides a seeding concentration + ! 4. Melt affects nutrients and algae in the same manner through PV(melt) + !------------------------------------------------------------------- + + if (trim(bgc_flux_type) == 'Jin2006') then + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (ice_growth(ij) > c0) then ! ice_growth(ij) = congel(i,j)/dt + PVt(ij) = -min(abs(PV_scale_growth*(MJ1 + MJ2*ice_growth(ij) & + - MJ3*ice_growth(ij)**2)), & + PV_frac_max*sk_l/dt) + else ! ice_growth(ij) = -meltb(i,j)/dt + PVt(ij) = min(abs(PV_scale_melt *( MJ2*ice_growth(ij) & + - MJ3*ice_growth(ij)**2)), & + PV_frac_max*sk_l/dt) + endif + + if (ice_growth(ij) < c0) then ! flux from ice to ocean + ! Algae melt like nutrients + f_meltn(ij) = PVt(ij)*cinit(ij,nlt_bgc_N)/sk_l ! for algae only + elseif (ice_growth(ij) > c0 .AND. & + cinit(ij,nlt_bgc_N) < ocean_bio(i,j,nlt_bgc_N)*sk_l/phi_sk) then + ! Growth only contributes to seeding from ocean + congel_alg(ij,nlt_bgc_N) = (ocean_bio(i,j,nlt_bgc_N)*sk_l/phi_sk & + - cinit(ij,nlt_bgc_N))/dt + endif ! PVt > c0 + enddo ! ij + + !---------------------------------------------------------------------- + ! 'constant': + ! 1. Constant PV for congel > melt + ! 2. For algae, PV for ice growth only provides a seeding concentration + ! 3. Melt loss (f_meltn) affects algae only and is proportional to melt + !----------------------------------------------------------------------- + + else ! bgc_flux_type = 'constant' + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (ice_growth(ij) > c0) PVt(ij) = -PVc + if (ice_growth(ij) >= c0 .AND. & + cinit(ij,nlt_bgc_N)/sk_l < ocean_bio(i,j,nlt_bgc_N)/phi_sk) then + congel_alg(ij,nlt_bgc_N) = (ocean_bio(i,j,nlt_bgc_N)*sk_l/phi_sk & + - cinit(ij,nlt_bgc_N))/dt + elseif (ice_growth(ij) < c0) then + f_meltn(ij) = min(c1, meltb(i,j)/sk_l)*cinit(ij,nlt_bgc_N)/dt + endif + enddo ! ij + + endif ! bgc_flux_type + + !----------------------------------------------------------------------- + ! begin building biogeochemistry terms + !----------------------------------------------------------------------- + + call algal_dyn (nx_block, ny_block, & + icells, dt, & + indxi, indxj, & + fswthru, react, & + cinit, nbtrcr, & + grow_Cn, & + tr_bgc_N_sk, tr_bgc_Nit_sk, & + tr_bgc_Am_sk, tr_bgc_Sil_sk, & + tr_bgc_C_sk, tr_bgc_chl_sk, & + tr_bgc_DMSPp_sk, tr_bgc_DMSPd_sk, & + tr_bgc_DMS_sk) + + !----------------------------------------------------------------------- + ! compute new tracer concencentrations + !----------------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + do nn = 1, nbtrcr + + !----------------------------------------------------------------------- + ! if PVt(ij) > 0, ie melt, then ocean_bio term drops out (MJ2006) + ! Combine boundary fluxes + !----------------------------------------------------------------------- + + PVflag(nn) = SIGN(PVflag(nn),PVt(ij)) + cinit_tmp = max(c0, cinit(ij,nn) + react(ij,nn)*sk_l) + flux_bio_temp(nn) = (PVflag(nn)*PVt(ij)*cinit_tmp/sk_l & + - PVflag(nn)*min(c0,PVt(ij))*ocean_bio(i,j,nn)) & + + f_meltn(ij)*cling(nn) - congel_alg(ij,nn) + + if (cinit_tmp < flux_bio_temp(nn)*dt) then + flux_bio_temp(nn) = cinit_tmp/dt*(c1-puny) + endif + + cinit(ij,nn) = cinit_tmp - flux_bio_temp(nn)*dt + flux_bio(i,j,nn) = flux_bio(i,j,nn) + flux_bio_temp(nn)*phi_sk + + ! Uncomment to update ocean concentration + ! Currently not coupled with ocean biogeochemistry + !ocean_bio(i,j,nn) = ocean_bio(i,j,nn) & + ! + flux_bio(i,j,nn)/hmix(i,j)*aicen(i,j) + + if (cinit(ij,nn) < c0) then + write(nu_diag,*) 'sk_bgc < 0 after algal fluxes, ij,nn,cinit,flux_bio',& + ij,nn,cinit(ij,nn),flux_bio(i,j,nn) + write(nu_diag,*) 'cinit_tmp,flux_bio_temp,f_meltn,congel_alg,PVt,PVflag: ' + write(nu_diag,*) cinit_tmp,flux_bio_temp(nn),f_meltn(ij), & + congel_alg(ij,nn),PVt(ij),PVflag(nn) + write(nu_diag,*) 'congel, meltb: ',congel(i,j),meltb(i,j) + call abort_ice ('ice_bgc.F90: BGC error3') + endif + + enddo ! nbtrcr + + !----------------------------------------------------------------------- + ! reload tracer array + !----------------------------------------------------------------------- + + trcrn(i,j,nt_bgc_N_sk) = cinit(ij,nlt_bgc_N) + if (tr_bgc_Nit_sk) trcrn(i,j,nt_bgc_Nit_sk) = cinit(ij,nlt_bgc_NO) + if (tr_bgc_Am_sk) trcrn(i,j,nt_bgc_Am_sk) = cinit(ij,nlt_bgc_NH) + if (tr_bgc_Sil_sk) trcrn(i,j,nt_bgc_Sil_sk) = cinit(ij,nlt_bgc_Sil) + if (tr_bgc_C_sk) trcrn(i,j,nt_bgc_C_sk) = trcrn(i,j,nt_bgc_N_sk)*R_C2N + if (tr_bgc_chl_sk) trcrn(i,j,nt_bgc_chl_sk) = trcrn(i,j,nt_bgc_N_sk)*R_chl2N + if (tr_bgc_DMSPp_sk) trcrn(i,j,nt_bgc_DMSPp_sk) = cinit(ij,nlt_bgc_DMSPp) + if (tr_bgc_DMSPd_sk) trcrn(i,j,nt_bgc_DMSPd_sk) = cinit(ij,nlt_bgc_DMSPd) + if (tr_bgc_DMS_sk) trcrn(i,j,nt_bgc_DMS_sk) = cinit(ij,nlt_bgc_DMS) + + enddo ! icells + + end subroutine skl_biogeochemistry + +!======================================================================= +! +! Do biogeochemistry from subroutine algal_dynamics +! authors: Scott Elliott, LANL +! Nicole Jeffery, LANL + + subroutine algal_dyn (nx_block, ny_block, & + icells, dt, & + indxi, indxj, & + fswthru, reactb, & + ltrcrn, nbtrcr, & + growN, & + tr_bio_N, tr_bio_NO, & + tr_bio_NH, tr_bio_Sil, & + tr_bio_C, tr_bio_chl, & + tr_bio_DMSPp, tr_bio_DMSPd, & + tr_bio_DMS) + + use ice_constants, only: p1, p5, c0, c1, secday + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of cells with aicen > puny + nbtrcr ! number of layer tracers + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + fswthru ! average shortwave passing through current ice layer (W/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + growN ! algal specific growth rate (1/s) + + real (kind=dbl_kind), dimension(icells,nbtrcr), intent(inout) :: & + reactb ! biological reaction terms (mmol/m^3) + + real (kind=dbl_kind), dimension(icells,nbtrcr), intent(in) :: & + ltrcrn ! concentrations in layer * sk_l + + ! tracer flags for vertical or skeletal layer bgc + logical (kind=log_kind), intent(in):: & + tr_bio_N , & ! algal nitrogen + tr_bio_NO , & ! algal nitrate + tr_bio_NH , & ! algal ammonium + tr_bio_Sil , & ! algal silicate + tr_bio_C , & ! algal carbon + tr_bio_chl , & ! algal chlorophyll + tr_bio_DMSPp, & ! DMSP particulate + tr_bio_DMSPd, & ! DMSP dissolved + tr_bio_DMS ! DMS + + ! local variables + + real (kind=dbl_kind), parameter :: & + T_bot = -1.8_dbl_kind , & ! interface close to freezing (C) + chlabs = 9.e-4_dbl_kind, & ! chlorophyll absorption (1/(mg/m^3)) + ! assuming skeletal layer thickness = 3 cm + mu_max = 1.5_dbl_kind, & ! maximum growth rate 'Jin2006' (1/day) + T_max = -1.8_dbl_kind, & ! maximum growth at Tmax (C) + op_dep_min = 0.1 , & ! optical depth above which light attentuates + grow_Tdep = 0.0633_dbl_kind,& ! and its T dependence (1/C) + fr_graze = p1 , & ! A93 val for S, set to zero in Jin 06 + fr_graze_s = 0.5_dbl_kind , & ! fraction of grazing spilled or slopped + fr_graze_a = 0.5_dbl_kind , & ! fraction of grazing assimilated + fr_graze_e = 0.5_dbl_kind , & ! fraction of assimilation excreted + alpha2max = 0.8_dbl_kind, & ! light limitation (1/(W/m^2)) + !beta2max = 0.018_dbl_kind, & ! corresponding light inhibition (1/W/m^2) + K_Nit = c1 , & ! nitrate half saturation (mmol/m^3) + K_Am = c1 , & ! ammonium half saturation (mmol/m^3) + K_Sil = 4.0_dbl_kind , & ! silicon half saturation (mmol/m^3) + mort_pre = 0.0208_dbl_kind,& ! prefix to mortality (1/day) + mort_Tdep = 0.03_dbl_kind , & ! T dependence of mortality (1/C) + fr_mort2min= c1 , & ! fractionation to remin + !t_nitrif = 67.0_dbl_kind , & ! nitrification time scale (days) + max_loss = 0.9 ! restrict uptake to 90% of remaining value + + real (kind=dbl_kind), parameter :: & + fr_excrt_2S= c1 , & ! excretion is efficient initially + y_sk_DMS = c1 , & ! and conversion given high yield + t_sk_conv = 10.0_dbl_kind, & ! at a Stefels rate (days) + t_sk_ox = 10.0_dbl_kind ! DMS in turn oxidizes slowly (days) + + ! real (kind=dbl_kind), parameter :: & + ! pr_l = 10.0_dbl_kind, & ! product layer thickness (m) + ! chl_pr_v = 0.1_dbl_kind , & ! fixed nondiatom chlorophyll in ml (mg/m^3) + ! R_chl2N_nd = 3.0_dbl_kind , & ! shade adaptation below (mg/millimole) + ! R_C2N_nd = 7.0_dbl_kind , & ! open water ratio (mole/mole) + ! t_pr_dsrp = 10.0_dbl_kind ! disruption time scale (days) + + ! real (kind=dbl_kind), parameter :: & + ! R_S2N_nd = 0.03_dbl_kind, & ! open water ratio nondiatoms (mole/mole) + ! y_pr_DMS = c1 , & ! but we begin again with unit yield + ! t_pr_conv = 10.0_dbl_kind, & ! and a similar conversion (days) + ! t_pr_ox = 10.0_dbl_kind ! plus round final time scale (days) + + integer (kind=int_kind) :: i, j, ij + + real (kind=dbl_kind) :: & + Nin , & ! algal nitrogen concentration on volume (mmol/m^3) + Cin , & ! algal carbon concentration on volume (mmol/m^3) + chlin , & ! algal chlorophyll concentration on volume (mg/m^3) + NOin , & ! nitrate concentration on volume (mmol/m^3) + NHin , & ! ammonia/um concentration on volume (mmol/m^3) + Silin , & ! silicon concentration on volume (mmol/m^3) + DMSPpin , & ! DMSPp concentration on volume (mmol/m^3) + DMSPdin , & ! DMSPd concentration on volume (mmol/m^3) + DMSin ! DMS concentration on volume (mmol/m^3) + + real (kind=dbl_kind) :: & + op_dep , & ! bottom layer attenuation exponent (optical depth) + Iavg_loc ! bottom layer attenuated Fswthru (W/m^2) + + real (kind=dbl_kind) :: & + L_lim , & ! overall light limitation + Nit_lim , & ! overall nitrate limitation + Am_lim , & ! overall ammonium limitation + N_lim , & ! overall nitrogen species limitation + Sil_lim , & ! overall silicon limitation + fr_Nit , & ! fraction of local ecological growth as nitrate + fr_Am , & ! fraction of local ecological growth as ammonia + growmax_N, & ! maximum growth rate in N currency (mmol/m^3/s) + grow_N , & ! true growth rate in N currency (mmol/m^3/s) + potU_Nit , & ! potential nitrate uptake (mmol/m^3/s) + potU_Am , & ! potential ammonium uptake (mmol/m^3/s) + U_Nit , & ! actual nitrate uptake (mmol/m^3/s) + U_Am , & ! actual ammonium uptake (mmol/m^3/s) + U_Sil ! actual silicon uptake (mmol/m^3/s) + + real (kind=dbl_kind) :: & + resp , & ! respiration (mmol/m^3/s) + graze , & ! grazing (mmol/m^3/s) + mort , & ! sum of mortality and excretion (mmol/m^3/s) + nitrif ! nitrification (mmol/m^3/s) + + ! source terms underscore s, removal underscore r + + real (kind=dbl_kind) :: & + N_s_p , & ! algal nitrogen photosynthesis (mmol/m^3) + N_r_g , & ! algal nitrogen losses to grazing (mmol/m^3) + N_r_r , & ! algal nitrogen losses to respiration (mmol/m^3) + N_r_mo , & ! algal nitrogen losses to mortality (mmol/m^3) + N_s , & ! net algal nitrogen sources (mmol/m^3) + N_r , & ! net algal nitrogen removal (mmol/m^3) + C_s , & ! net algal carbon sources (mmol/m^3) + C_r , & ! net algal carbon removal (mmol/m^3) + NO_s_n , & ! skl nitrate from nitrification (mmol/m^3) + NO_r_p , & ! skl nitrate uptake by algae (mmol/m^3) + NO_s , & ! net skl nitrate sources (mmol/m^3) + NO_r , & ! net skl nitrate removal (mmol/m^3) + NH_s_e , & ! skl ammonium source from excretion (mmol/m^3) + NH_s_r , & ! skl ammonium source from respiration (mmol/m^3) + NH_s_mo , & ! skl ammonium source from mort/remin (mmol/m^3) + NH_r_p , & ! skl ammonium uptake by algae (mmol/m^3) + NH_r_n , & ! skl ammonium removal to nitrification (mmol/m^3) + NH_s , & ! net skl ammonium sources (mmol/m^3) + NH_r , & ! net skl ammonium removal (mmol/m^3) + Sil_r_p , & ! skl silicon uptake by algae (mmol/m^3) + Sil_s , & ! net skl silicon sources (mmol/m^3) + Sil_r ! net skl silicon removal (mmol/m^3) + + real (kind=dbl_kind) :: & + DMSPd_s_s , & ! skl dissolved DMSP from grazing spillage (mmol/m^3) + DMSPd_s_e , & ! skl dissolved DMSP from zooplankton excretion (mmol/m^3) + DMSPd_s_mo, & ! skl dissolved DMSP from MBJ algal mortexc (mmol/m^3) + DMSPd_r_c , & ! skl dissolved DMSP conversion (mmol/m^3) + DMSPd_s , & ! net skl dissolved DMSP sources (mmol/m^3) + DMSPd_r , & ! net skl dissolved DMSP removal (mmol/m^3) + DMS_s_c , & ! skl DMS source via conversion (mmol/m^3) + DMS_r_o , & ! skl DMS losses due to oxidation (mmol/m^3) + DMS_s , & ! net skl DMS sources (mmol/m^3) + DMS_r ! net skl DMS removal (mmol/m^3) + + ! real (kind=dbl_kind) :: & + ! DMSP_pr_s_nd , & ! product layer dissolved DMSP from local bio (mmol/m^3) + ! DMSP_pr_s_me , & ! product layer dissolved DMSP from melting (mmol/m^3) + ! DMSP_pr_r_c , & ! product layer dissolved DMSP conversion (mmol/m^3) + ! DMSP_pr_s , & ! net product dissolved DMSP sources (mmol/m^3) + ! DMSP_pr_r , & ! net product dissolved DMSP removal (mmol/m^3) + ! DMS_pr_s_c , & ! product layer DMS source via conversion (mmol/m^3) + ! DMS_pr_r_o , & ! product layer DMS losses due to oxidation (mmol/m^3) + ! DMS_pr_s , & ! net product DMS sources (mmol/m^3) + ! DMS_pr_r ! net product DMS removal (mmol/m^3) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !----------------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------------- + + Nin = c0 + Cin = c0 + chlin = c0 + NOin = c0 + NHin = c0 + Silin = c0 + DMSPpin = c0 + DMSPdin = c0 + DMSin = c0 + + NOin = ltrcrn(ij,nlt_bgc_NO) /sk_l + if (tr_bio_N) Nin = ltrcrn(ij,nlt_bgc_N) /sk_l + if (tr_bio_C) Cin = ltrcrn(ij,nlt_bgc_C) /sk_l + if (tr_bio_NH) NHin = ltrcrn(ij,nlt_bgc_NH) /sk_l + if (tr_bio_Sil) Silin = ltrcrn(ij,nlt_bgc_Sil) /sk_l + if (tr_bio_DMSPp) DMSPpin = ltrcrn(ij,nlt_bgc_DMSPp)/sk_l + if (tr_bio_DMSPd) DMSPdin = ltrcrn(ij,nlt_bgc_DMSPd)/sk_l + if (tr_bio_DMS) DMSin = ltrcrn(ij,nlt_bgc_DMS) /sk_l + chlin = R_chl2N * Nin + + !----------------------------------------------------------------------- + ! Light limitation + !----------------------------------------------------------------------- + + op_dep = chlabs * chlin + + ! Okhotsk maxima causes a reevaluation. + ! The new concept is, late algae at the bottom of the bottom strongly + ! attenuated. Since k about 0.03 1/m(mg/m3), efold at about 30 mg/m3 + ! values of order hundreds will be shut down... + ! Newest algae experience super aborption above because they sit low. + ! More than perhaps two efolds and light falls below half value. + + if (op_dep > op_dep_min) then + Iavg_loc = fswthru(i,j) * (c1 - exp(-op_dep)) / op_dep + else + Iavg_loc = fswthru(i,j) + endif + + ! With light inhibition + ! L_lim = (c1 - exp(-alpha2max*Iavg_loc)) * exp(-beta2max*Iavg_loc) + + ! Without light inhibition + L_lim = (c1 - exp(-alpha2max*Iavg_loc)) + + !----------------------------------------------------------------------- + ! Nutrient limitation + !----------------------------------------------------------------------- + + Nit_lim = NOin/(NOin + K_Nit) + Am_lim = c0 + N_lim = Nit_lim + if (tr_bio_NH) then + Am_lim = NHin/(NHin + K_Am) + N_lim = min(c1, Nit_lim + Am_lim) + endif + Sil_lim = c1 + if (tr_bio_Sil) Sil_lim = Silin/(Silin + K_Sil) + + ! Growth and uptake computed within the bottom layer + ! Note here per A93 discussions and MBJ model, salinity is a universal + ! restriction. Comparison with available column nutrients inserted + ! but in tests had no effect. + ! Primary production reverts to SE form, see MBJ below and be careful + + growmax_N = mu_max / secday * exp(grow_Tdep * (T_bot - T_max))* Nin + grow_N = min(L_lim, N_lim, Sil_lim) * growmax_N + potU_Nit = Nit_lim * growmax_N + potU_Am = Am_lim * growmax_N + U_Am = min(grow_N, potU_Am) + U_Nit = grow_N - U_Am + U_Sil = R_Si2N * grow_N + + if (tr_bio_Sil) U_Sil = min(U_Sil, max_loss * Silin/dt) + U_Nit = min(U_Nit, max_loss * NOin/dt) + U_Am = min(U_Am, max_loss * NHin/dt) + + grow_N = min(U_Sil/R_Si2N,U_Nit + U_Am) + fr_Am = c0 + if (tr_bio_NH) then + fr_Am = p5 + if (grow_N > c0) fr_Am = min(U_Am/grow_N, c1) + endif + fr_Nit = c1 - fr_Am + U_Nit = fr_Nit * grow_N + U_Am = fr_Am * grow_N + U_Sil = R_Si2N * grow_N + + resp = fr_resp * grow_N + graze = fr_graze * grow_N + mort = mort_pre * exp(mort_Tdep*(T_bot-T_max)) * Nin / secday + nitrif = c0 ! (NHin / t_nitrif) / secday + + ! history variables + + growN(i,j) = grow_N + if (Nin > c0) growN(i,j) = grow_N/Nin ! specific growth rate (per s) + + !----------------------------------------------------------------------- + ! Define reaction terms + !----------------------------------------------------------------------- + + ! Since the framework remains incomplete at this point, + ! it is assumed as a starting expedient that + ! DMSP loss to melting results in 10% conversion to DMS + ! which is then given a ten day removal constant. + ! Grazing losses are channeled into rough spillage and assimilation + ! then following ammonia there is some recycling. + + !-------------------------------------------------------------------- + ! Algal reaction term + ! N_react = (grow_N*(c1 -fr_resp - fr_graze) - mort)*dt + !-------------------------------------------------------------------- + + N_s_p = grow_N * dt + N_r_g = graze * dt + N_r_r = resp * dt + N_r_mo = mort * dt + N_s = N_s_p + N_r = N_r_g + N_r_r + N_r_mo + + !-------------------------------------------------------------------- + ! Carbon chemistry + ! C_react = R_C2N * N_react + !-------------------------------------------------------------------- + + C_s = R_C2N * N_s + C_r = R_C2N * N_r + + !-------------------------------------------------------------------- + ! Nitrate reaction term + ! NO_react = (nitrif - fr_Nit*grow_N)*dt + !-------------------------------------------------------------------- + + NO_s_n = nitrif * dt + NO_r_p = U_Nit * dt + NO_s = NO_s_n + NO_r = NO_r_p + + !-------------------------------------------------------------------- + ! Ammonium reaction term + ! NH_react = (-nitrif - (c1-fr_Nit - fr_resp + ! - fr_graze*fr_graze_e*fr_graze_a)*grow_N + mort*fr_mort2min)*dt + !-------------------------------------------------------------------- + + NH_s_r = N_r_r + NH_s_e = fr_graze_e * fr_graze_a * N_r_g + NH_s_mo = fr_mort2min * N_r_mo + NH_r_p = U_Am * dt + NH_r_n = nitrif * dt + NH_s = NH_s_r + NH_s_e + NH_s_mo + NH_r = NH_r_p + NH_r_n + + !-------------------------------------------------------------------- + ! Silica reaction term + ! Sil_react = - R_Si2N * grow_N * dt + !-------------------------------------------------------------------- + + Sil_r_p = U_Sil * dt + Sil_s = c0 + Sil_r = Sil_r_p + + !-------------------------------------------------------------------- + ! Sulfur cycle begins here + !-------------------------------------------------------------------- + ! Grazing losses are channeled into rough spillage and assimilation + ! then onward and the MBJ mortality channel is included + ! It is assumed as a starting expedient that + ! DMSP loss to melting gives partial conversion to DMS in product layer + ! which then undergoes Stefels removal. + + !-------------------------------------------------------------------- + ! DMSPd reaction term + ! DMSPd_react = R_S2N*((fr_graze_s+fr_excrt_2S*fr_graze_e*fr_graze_a) + ! *fr_graze*grow_N + fr_mort2min*mort)*dt + ! - [\DMSPd]/t_sk_conv*dt + !-------------------------------------------------------------------- + + DMSPd_s_s = fr_graze_s * R_S2N * N_r_g + DMSPd_s_e = fr_excrt_2S * fr_graze_e * fr_graze_a * R_S2N * N_r_g + DMSPd_s_mo = fr_mort2min * R_S2N * N_r_mo + DMSPd_r_c = DMSPdin * dt / (t_sk_conv * secday) + DMSPd_s = DMSPd_s_s + DMSPd_s_e + DMSPd_s_mo + DMSPd_r = DMSPd_r_c + + !-------------------------------------------------------------------- + ! DMS reaction term + ! DMS_react = ([\DMSPd]*y_sk_DMS/t_sk_conv - c1/t_sk_ox *[\DMS])*dt + !-------------------------------------------------------------------- + + DMS_s_c = y_sk_DMS * DMSPd_r_c + DMS_r_o = DMSin * dt / (t_sk_ox * secday) + DMS_s = DMS_s_c + DMS_r = DMS_r_o + + ! for mixed layer sulfur chemistry, fluxes kept separate for ice + ! area weighting + ! units are different here, but not sure if they need to be changed + ! no fluxes into the product layer here + + ! DMSP_pr_s_nd = chl_pr_v*pr_l * R_S2N_nd/R_chl2N_nd * dt & + ! / (t_pr_dsrp * secday) + ! DMSP_pr_s_me = fr_melt_2S * DMSPp_sk_r_me + ! DMSP_pr_r_c = dmsp(i,j,iblk)*pr_l * dt / (t_pr_conv * secday) + ! DMSP_pr_f = F_DMSP * dt + ! DMSP_pr_s = DMSP_pr_s_nd ! + DMSP_pr_s_me + DMSP_pr_f + ! DMSP_pr_r = DMSP_pr_r_c + + ! DMS_pr_s_c = y_pr_DMS * DMSP_pr_r_c + ! DMS_pr_r_o = dms(i,j,iblk)*pr_l * dt / (t_pr_ox * secday) + ! DMS_pr_f = F_DMS * dt + ! DMS_pr_s = DMS_pr_s_c ! + DMS_pr_f + ! DMS_pr_r = DMS_pr_r_o + + !----------------------------------------------------------------------- + ! Load reaction array + !----------------------------------------------------------------------- + + reactb(ij,nlt_bgc_NO) = NO_s - NO_r + if (tr_bio_N) reactb(ij,nlt_bgc_N) = N_s - N_r + if (tr_bio_C) reactb(ij,nlt_bgc_C) = C_s - C_r + if (tr_bio_NH) reactb(ij,nlt_bgc_NH) = NH_s - NH_r + if (tr_bio_Sil) reactb(ij,nlt_bgc_Sil) = Sil_s - Sil_r + if (tr_bio_DMSPd) reactb(ij,nlt_bgc_DMSPd) = DMSPd_s - DMSPd_r + if (tr_bio_DMS) reactb(ij,nlt_bgc_DMS) = DMS_s - DMS_r + + enddo + + end subroutine algal_dyn + +!======================================================================= +! +! Writes diagnostic info (max, min, global sums, etc) to standard out +! +! authors: Elizabeth C. Hunke, LANL +! Nicole Jeffery, LANL + + subroutine bgc_diags (dt) + + use ice_broadcast, only: broadcast_scalar + use ice_constants, only: c0, mps_to_cmpdy, c100 + use ice_diagnostics, only: npnt, print_points, pmloc, piloc, pjloc, pbloc + use ice_timers, only: timer_bgc, ice_timer_start, ice_timer_stop + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + i, j, n, iblk + + ! fields at diagnostic points + real (kind=dbl_kind), dimension(npnt) :: & + pN_sk, pNit_sk, pAm_sk, pSil_sk, & + pDMSPp_sk, pDMSPd_sk, pDMS_sk, pN_ac, & + pNit_ac, pAm_ac, pSil_ac, pDMSP_ac, pDMS_ac, & + pflux_NO, pflux_N, pflux_Sil, pflux_NH + + call ice_timer_start(timer_bgc) ! biogeochemistry + + if (print_points) then + + !----------------------------------------------------------------- + ! biogeochemical + ! state of the ice and associated fluxes for 2 defined points + ! NOTE these are computed for the last timestep only (not avg) + !----------------------------------------------------------------- + + do n = 1, npnt + if (my_task == pmloc(n)) then + i = piloc(n) + j = pjloc(n) + iblk = pbloc(n) + pAm_ac(n) = c0 + pSil_ac(n) = c0 + pDMSP_ac(n) = c0 + pDMS_ac(n) = c0 + + pN_ac(n) = ocean_bio(i,j,nlt_bgc_N,iblk) ! algalN(i,j,iblk) + pNit_ac(n) = ocean_bio(i,j,nlt_bgc_NO,iblk) ! nit(i,j,iblk) + if (tr_bgc_Am_sk) & + pAm_ac(n) = ocean_bio(i,j,nlt_bgc_NH,iblk) ! amm(i,j,iblk) + if (tr_bgc_Sil_sk) & + pSil_ac(n) = ocean_bio(i,j,nlt_bgc_Sil,iblk) ! sil(i,j,iblk) + if (tr_bgc_DMS_sk) then + pDMSP_ac(n) = ocean_bio(i,j,nlt_bgc_DMSPp,iblk)! dmsp(i,j,iblk) + pDMS_ac(n) = ocean_bio(i,j,nlt_bgc_DMS,iblk) ! dms(i,j,iblk) + endif + + ! fluxes in mmol/m^2/d + ! concentrations are bulk in mmol/m^3 + + if (skl_bgc) then + pNit_sk(n) = c0 + pAm_sk(n) = c0 + pSil_sk(n) = c0 + pDMSPp_sk(n) = c0 + pDMSPd_sk(n) = c0 + pDMS_sk(n) = c0 + + pN_sk(n) = trcr (i,j, nt_bgc_N_sk, iblk)*phi_sk/sk_l + pflux_N(n) = flux_bio(i,j,nlt_bgc_N, iblk)*mps_to_cmpdy/c100 + if (tr_bgc_Nit_sk) then + pNit_sk(n) = trcr (i,j, nt_bgc_Nit_sk, iblk)*phi_sk/sk_l + pflux_NO(n) = flux_bio(i,j,nlt_bgc_NO, iblk)*mps_to_cmpdy/c100 + endif + if (tr_bgc_Am_sk) then + pAm_sk(n) = trcr (i,j, nt_bgc_Am_sk, iblk)*phi_sk/sk_l + pflux_NH(n) = flux_bio(i,j,nlt_bgc_NH, iblk)*mps_to_cmpdy/c100 + endif + if (tr_bgc_Sil_sk) then + pSil_sk(n) = trcr (i,j, nt_bgc_Sil_sk, iblk)*phi_sk/sk_l + pflux_Sil(n)= flux_bio(i,j,nlt_bgc_Sil, iblk)*mps_to_cmpdy/c100 + endif + if (tr_bgc_DMSPp_sk) & + pDMSPp_sk(n) = trcr (i,j,nt_bgc_DMSPp_sk,iblk)*phi_sk/sk_l + if (tr_bgc_DMSPd_sk) & + pDMSPd_sk(n) = trcr (i,j,nt_bgc_DMSPd_sk,iblk)*phi_sk/sk_l + if (tr_bgc_DMS_sk) & + pDMS_sk (n) = trcr (i,j,nt_bgc_DMS_sk, iblk)*phi_sk/sk_l + endif + + endif ! my_task = pmloc + + call broadcast_scalar(pN_ac (n), pmloc(n)) + call broadcast_scalar(pNit_ac (n), pmloc(n)) + call broadcast_scalar(pAm_ac (n), pmloc(n)) + call broadcast_scalar(pSil_ac (n), pmloc(n)) + call broadcast_scalar(pDMSP_ac (n), pmloc(n)) + call broadcast_scalar(pDMS_ac (n), pmloc(n)) + call broadcast_scalar(pflux_N (n), pmloc(n)) + call broadcast_scalar(pflux_NO (n), pmloc(n)) + call broadcast_scalar(pflux_NH (n), pmloc(n)) + call broadcast_scalar(pflux_Sil(n), pmloc(n)) + + if (skl_bgc) then ! skl_bgc + call broadcast_scalar(pN_sk (n), pmloc(n)) + call broadcast_scalar(pNit_sk (n), pmloc(n)) + call broadcast_scalar(pAm_sk (n), pmloc(n)) + call broadcast_scalar(pSil_sk (n), pmloc(n)) + call broadcast_scalar(pDMSPp_sk(n), pmloc(n)) + call broadcast_scalar(pDMSPd_sk(n), pmloc(n)) + call broadcast_scalar(pDMS_sk (n), pmloc(n)) + endif + + enddo ! npnt + endif ! print_points + + !----------------------------------------------------------------- + ! start spewing + !----------------------------------------------------------------- + + if (my_task == master_task) then + + call flush_fileunit(nu_diag) + + !----------------------------------------------------------------- + ! diagnostics for Arctic and Antarctic points + !----------------------------------------------------------------- + + if (print_points) then + if (skl_bgc) then + + write(nu_diag,*) '----------BGC----------' + + write(nu_diag,*) '------bulk skl bgc-----' + write(nu_diag,900) 'nitrogen (mmol/m^3) = ',pN_sk(1),pN_sk(2) + write(nu_diag,900) 'nitrate (mmol/m^3) = ',pNit_sk(1),pNit_sk(2) + if (tr_bgc_Am_sk) & + write(nu_diag,900) 'ammonia/um (mmol/m^3) = ',pAm_sk(1),pAm_sk(2) + if (tr_bgc_Sil_sk) & + write(nu_diag,900) 'silicon (mmol/m^3) = ',pSil_sk(1),pSil_sk(2) + if (tr_bgc_DMS_sk) then + write(nu_diag,900) 'DMSPp (mmol/m^3) = ',pDMSPp_sk(1),pDMSPp_sk(2) + write(nu_diag,900) 'DMSPd (mmol/m^3) = ',pDMSPd_sk(1),pDMSPd_sk(2) + write(nu_diag,900) 'DMS (mmol/m^3) = ',pDMS_sk(1),pDMS_sk(2) + endif + + write(nu_diag,*) '---ice-ocean fluxes----' + write(nu_diag,900) 'algalN flx(mmol/m^2/d) = ',pflux_N(1),pflux_N(2) + write(nu_diag,900) 'nit. flux (mmol/m^2/d) = ',pflux_NO(1),pflux_NO(2) + if (tr_bgc_Am_sk) & + write(nu_diag,900) 'amm. flux (mmol/m^2/d) = ',pflux_NH(1),pflux_NH(2) + if (tr_bgc_Sil_sk) & + write(nu_diag,900) 'sil. flux (mmol/m^2/d) = ',pflux_Sil(1),pflux_Sil(2) + + write(nu_diag,*) '---ocean mixed layer---' + write(nu_diag,900) 'algal N (mmol/m^3) = ',pN_ac(1),pN_ac(2) + write(nu_diag,900) 'nitrate (mmol/m^3) = ',pNit_ac(1),pNit_ac(2) + if (tr_bgc_Am_sk) & + write(nu_diag,900) 'ammonia/um (mmol/m^3) = ',pAm_ac(1),pAm_ac(2) + if (tr_bgc_Sil_sk) & + write(nu_diag,900) 'silicon (mmol/m^3) = ',pSil_ac(1),pSil_ac(2) + if (tr_bgc_DMS_sk) then + write(nu_diag,900) 'DMSP (mmol/m^3) = ',pDMSP_ac(1),pDMSP_ac(2) + write(nu_diag,900) 'DMS (mmol/m^3) = ',pDMS_ac(1),pDMS_ac(2) + endif + + endif ! skl_bgc + endif ! print_points + + endif ! my_task = master_task + + call ice_timer_stop(timer_bgc) ! biogeochemistry + + 900 format (a25,2x,f24.17,2x,f24.17) + + end subroutine bgc_diags + +!======================================================================= +!---! these subroutines write/read Fortran unformatted data files .. +!======================================================================= +! +! Dumps all values needed for a bgc restart +! +! author Elizabeth C. Hunke, LANL + + subroutine write_restart_bgc() + + use ice_domain_size, only: ncat + use ice_state, only: trcrn + use ice_restart,only: write_restart_field + + ! local variables + + logical (kind=log_kind) :: diag + + diag = .true. + + !----------------------------------------------------------------- + ! Skeletal layer BGC + !----------------------------------------------------------------- + + call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_N_sk,:,:), & + 'ruf8','bgc_N_sk',ncat,diag) + if (tr_bgc_C_sk) & + call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_C_sk,:,:), & + 'ruf8','bgc_C_sk',ncat,diag) + if (tr_bgc_chl_sk) & + call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_chl_sk,:,:), & + 'ruf8','bgc_chl_sk',ncat,diag) + if (tr_bgc_Nit_sk) & + call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_Nit_sk,:,:), & + 'ruf8','bgc_Nit_sk',ncat,diag) + if (tr_bgc_Am_sk) & + call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_Am_sk,:,:), & + 'ruf8','bgc_Am_sk',ncat,diag) + if (tr_bgc_Sil_sk) & + call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_Sil_sk,:,:), & + 'ruf8','bgc_Sil_sk',ncat,diag) + if (tr_bgc_DMSPp_sk) & + call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_DMSPp_sk,:,:), & + 'ruf8','bgc_DMSPp_sk',ncat,diag) + if (tr_bgc_DMSPd_sk) & + call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_DMSPd_sk,:,:), & + 'ruf8','bgc_DMSPd_sk',ncat,diag) + if (tr_bgc_DMS_sk) & + call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_DMS_sk,:,:), & + 'ruf8','bgc_DMS_sk',ncat,diag) + + !----------------------------------------------------------------- + ! Ocean BGC + !----------------------------------------------------------------- + + if (tr_bgc_N_sk) & + call write_restart_field(nu_dump_bgc,0,algalN,'ruf8','algalN',1,diag) + if (tr_bgc_Nit_sk) & + call write_restart_field(nu_dump_bgc,0,nit, 'ruf8','nit', 1,diag) + if (tr_bgc_Am_sk) & + call write_restart_field(nu_dump_bgc,0,amm, 'ruf8','amm', 1,diag) + if (tr_bgc_Sil_sk) & + call write_restart_field(nu_dump_bgc,0,sil, 'ruf8','sil', 1,diag) + if (tr_bgc_DMSPp_sk) & + call write_restart_field(nu_dump_bgc,0,dmsp, 'ruf8','dmsp', 1,diag) + if (tr_bgc_DMS_sk) & + call write_restart_field(nu_dump_bgc,0,dms, 'ruf8','dms', 1,diag) + + end subroutine write_restart_bgc + +!======================================================================= +! +! Reads all values needed for a bgc restart +! +! author Elizabeth C. Hunke, LANL + + subroutine read_restart_bgc() + + use ice_constants, only: field_loc_center, field_type_scalar + use ice_domain_size, only: ncat + use ice_state, only: trcrn + use ice_restart,only: read_restart_field + + ! local variables + + logical (kind=log_kind) :: diag + + diag = .true. + + !----------------------------------------------------------------- + ! Skeletal Layer BGC + !----------------------------------------------------------------- + + if (my_task == master_task) write(nu_diag,*) 'skl bgc restart' + + call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_N_sk,:,:), & + 'ruf8','bgc_N_sk',ncat,diag,field_loc_center,field_type_scalar) + if (tr_bgc_C_sk) & + call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_C_sk,:,:), & + 'ruf8','bgc_C_sk',ncat,diag,field_loc_center,field_type_scalar) + if (tr_bgc_chl_sk) & + call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_chl_sk,:,:), & + 'ruf8','bgc_chl_sk',ncat,diag,field_loc_center,field_type_scalar) + if (tr_bgc_Nit_sk) & + call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_Nit_sk,:,:), & + 'ruf8','bgc_Nit_sk',ncat,diag,field_loc_center,field_type_scalar) + if (tr_bgc_Am_sk) & + call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_Am_sk,:,:), & + 'ruf8','bgc_Am_sk',ncat,diag,field_loc_center,field_type_scalar) + if (tr_bgc_Sil_sk) & + call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_Sil_sk,:,:), & + 'ruf8','bgc_Sil_sk',ncat,diag,field_loc_center,field_type_scalar) + if(tr_bgc_DMSPp_sk) & + call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_DMSPp_sk,:,:), & + 'ruf8','bgc_DMSPp_sk',ncat,diag,field_loc_center,field_type_scalar) + if (tr_bgc_DMSPd_sk) & + call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_DMSPd_sk,:,:), & + 'ruf8','bgc_DMSPd_sk',ncat,diag,field_loc_center,field_type_scalar) + if (tr_bgc_DMS_sk) & + call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_DMS_sk,:,:), & + 'ruf8','bgc_DMS_sk',ncat,diag,field_loc_center,field_type_scalar) + + !----------------------------------------------------------------- + ! Ocean BGC + !----------------------------------------------------------------- + + if (my_task == master_task) write(nu_diag,*) 'mixed layer ocean bgc restart' + + if (tr_bgc_N_sk) & + call read_restart_field(nu_restart_bgc,0,algalN,'ruf8','algalN',& + 1,diag,field_loc_center,field_type_scalar) + if (tr_bgc_Nit_sk) & + call read_restart_field(nu_restart_bgc,0,nit ,'ruf8','nit' ,& + 1,diag,field_loc_center,field_type_scalar) + if (tr_bgc_Am_sk) & + call read_restart_field(nu_restart_bgc,0,amm ,'ruf8','amm' ,& + 1,diag,field_loc_center,field_type_scalar) + if (tr_bgc_Sil_sk) & + call read_restart_field(nu_restart_bgc,0,sil ,'ruf8','sil' ,& + 1,diag,field_loc_center,field_type_scalar) + if (tr_bgc_DMSPp_sk) & + call read_restart_field(nu_restart_bgc,0,dmsp ,'ruf8','dmsp' ,& + 1,diag,field_loc_center,field_type_scalar) + if (tr_bgc_DMS_sk) & + call read_restart_field(nu_restart_bgc,0,dms ,'ruf8','dms' ,& + 1,diag,field_loc_center,field_type_scalar) + + end subroutine read_restart_bgc + +!======================================================================= + + end module ice_algae + +!======================================================================= diff --git a/source/ice_atmo.F90 b/source/ice_atmo.F90 new file mode 100755 index 00000000..cce06397 --- /dev/null +++ b/source/ice_atmo.F90 @@ -0,0 +1,1005 @@ +! SVN:$Id: ice_atmo.F90 936 2015-03-17 15:46:44Z eclare $ +!======================================================================= + +! Atmospheric boundary interface (stability based flux calculations) + +! author: Elizabeth C. Hunke, LANL +! +! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb +! 2004: Block structure added by William Lipscomb +! 2006: Converted to free source form (F90) by Elizabeth Hunke +! 2013: Form drag routine added (neutral_drag_coeffs) by David Schroeder +! 2014: Adjusted form drag and added high frequency coupling by Andrew Roberts + + module ice_atmo + + use ice_kinds_mod + use ice_blocks, only: nx_block, ny_block + use ice_constants + use ice_domain_size, only: max_blocks + + implicit none + save + + private + public :: atmo_boundary_layer, atmo_boundary_const, neutral_drag_coeffs + + character (len=char_len), public :: & + atmbndy ! atmo boundary method, 'default' ('ccsm3') or 'constant' + + logical (kind=log_kind), public :: & + calc_strair, & ! if true, calculate wind stress components + formdrag, & ! if true, calculate form drag + highfreq ! if true, use high frequency coupling + + integer (kind=int_kind), public :: & + natmiter ! number of iterations for boundary layer calculations + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + Cdn_atm , & ! atm drag coefficient + Cdn_ocn , & ! ocn drag coefficient + ! form drag + hfreebd, & ! freeboard (m) + hdraft, & ! draft of ice + snow column (Stoessel1993) + hridge, & ! ridge height + distrdg, & ! distance between ridges + hkeel, & ! keel depth + dkeel, & ! distance between keels + lfloe, & ! floe length + dfloe, & ! distance between floes + Cdn_atm_skin, & ! neutral skin drag coefficient + Cdn_atm_floe, & ! neutral floe edge drag coefficient + Cdn_atm_pond, & ! neutral pond edge drag coefficient + Cdn_atm_rdg, & ! neutral ridge drag coefficient + Cdn_ocn_skin, & ! skin drag coefficient + Cdn_ocn_floe, & ! floe edge drag coefficient + Cdn_ocn_keel, & ! keel drag coefficient + Cdn_atm_ratio ! ratio drag atm / neutral drag atm + +!======================================================================= + + contains + +!======================================================================= + +! Compute coefficients for atm/ice fluxes, stress, and reference +! temperature and humidity. NOTE: \\ +! (1) All fluxes are positive downward, \\ +! (2) Here, tstar = (WT)/U*, and qstar = (WQ)/U*, \\ +! (3a) wind speeds should all be above a minimum speed (eg. 1.0 m/s). \\ +! +! ASSUME: +! The saturation humidity of air at T(K): qsat(T) (kg/m**3) +! +! Code originally based on CSM1 + + subroutine atmo_boundary_layer (nx_block, ny_block, & + sfctype, icells, & + indxi, indxj, & + Tsf, potT, & + uatm, vatm, & + wind, zlvl, & + Qa, rhoa, & + strx, stry, & + Tref, Qref, & + delt, delq, & + lhcoef, shcoef, & + Cdn_atm, & + Cdn_atm_ratio_n, & + uice, vice, & + Uref ) + + + use ice_fileunits, only: nu_diag + use ice_communicate, only: my_task, master_task + use ice_exit, only: abort_ice + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells that require atmo fluxes + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed i and j indices + + character (len=3), intent(in) :: & + sfctype ! ice or ocean + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tsf , & ! surface temperature of ice or ocean + potT , & ! air potential temperature (K) + uatm , & ! x-direction wind speed (m/s) + vatm , & ! y-direction wind speed (m/s) + wind , & ! wind speed (m/s) + zlvl , & ! atm level height (m) + Qa , & ! specific humidity (kg/kg) + rhoa ! air density (kg/m^3) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + Cdn_atm ! neutral drag coefficient + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & + Cdn_atm_ratio_n ! ratio drag coeff / neutral drag coeff (atm) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + strx , & ! x surface stress (N) + stry ! y surface stress (N) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & + Tref , & ! reference height temperature (K) + Qref , & ! reference height specific humidity (kg/kg) + delt , & ! potential T difference (K) + delq , & ! humidity difference (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef ! transfer coefficient for latent heat + + real (kind=dbl_kind), dimension (nx_block,ny_block), optional, intent(out) :: & + Uref ! reference height wind speed (m/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block), optional, intent(in) :: & + uice , & ! x-direction ice speed (m/s) + vice ! y-direction ice speed (m/s) + + ! local variables + + logical (kind=log_kind), save :: & + firstpass=.true. ! high frequency checks on first pass + + integer (kind=int_kind) :: & + k , & ! iteration index + i, j , & ! horizontal indices + ij ! combined ij index + + real (kind=dbl_kind) :: & + TsfK , & ! surface temperature in Kelvin (K) + xqq , & ! temporary variable + psimh , & ! stability function at zlvl (momentum) + tau , & ! stress at zlvl + fac , & ! interpolation factor + al2 , & ! ln(z10 /zTrf) + psix2 , & ! stability function at zTrf (heat and water) + psimhs, & ! stable profile + ssq , & ! sat surface humidity (kg/kg) + qqq , & ! for qsat, dqsfcdt + TTT , & ! for qsat, dqsfcdt + qsat , & ! the saturation humidity of air (kg/m^3) + Lheat , & ! Lvap or Lsub, depending on surface type + umin ! minimum wind speed (m/s) + + real (kind=dbl_kind), dimension (icells) :: & + ustar , & ! ustar (m/s) + tstar , & ! tstar + qstar , & ! qstar + rdn , & ! sqrt of neutral exchange coefficient (momentum) + rhn , & ! sqrt of neutral exchange coefficient (heat) + ren , & ! sqrt of neutral exchange coefficient (water) + rd , & ! sqrt of exchange coefficient (momentum) + re , & ! sqrt of exchange coefficient (water) + rh , & ! sqrt of exchange coefficient (heat) + vmag , & ! surface wind magnitude (m/s) + alz , & ! ln(zlvl /z10) + thva , & ! virtual temperature (K) + cp , & ! specific heat of moist air + hol , & ! H (at zlvl ) over L + stable, & ! stability factor + psixh ! stability function at zlvl (heat and water) + + real (kind=dbl_kind), parameter :: & + cpvir = cp_wv/cp_air-c1, & ! defined as cp_wv/cp_air - 1. + zTrf = c2 ! reference height for air temp (m) + + ! local functions + real (kind=dbl_kind) :: & + xd , & ! dummy argument + psimhu, & ! unstable part of psimh + psixhu ! unstable part of psimx + + !------------------------------------------------------------ + ! Define functions + !------------------------------------------------------------ + + psimhu(xd) = log((c1+xd*(c2+xd))*(c1+xd*xd)/c8) & + - c2*atan(xd) + pih +!ech - c2*atan(xd) + 1.571_dbl_kind + + psixhu(xd) = c2 * log((c1 + xd*xd)/c2) + + al2 = log(zref/zTrf) + + !------------------------------------------------------------ + ! Initialize + !------------------------------------------------------------ + + if (highfreq) then + + ! high frequency coupling follows Roberts et al. (2014) + if (my_task==master_task.and.firstpass.and.sfctype(1:3)=='ice') then + if (present(uice) .and. present(vice)) then + write(nu_diag,*)'Using high frequency RASM atmospheric coupling' + else + call abort_ice('High frequency RASM coupling missing uice and vice') + endif + firstpass = .false. + endif + + umin = p5 ! minumum allowable wind-ice speed difference of 0.5 m/s + + else + + umin = c1 ! minumum allowable wind speed of 1m/s + + endif + + do j = 1, ny_block + do i = 1, nx_block + if (present(Uref)) then + Uref(i,j) = c0 + endif + Tref(i,j) = c0 + Qref(i,j) = c0 + delt(i,j) = c0 + delq(i,j) = c0 + shcoef(i,j) = c0 + lhcoef(i,j) = c0 + enddo + enddo + + !------------------------------------------------------------ + ! Compute turbulent flux coefficients, wind stress, and + ! reference temperature and humidity. + !------------------------------------------------------------ + + !------------------------------------------------------------ + ! define variables that depend on surface type + !------------------------------------------------------------ + + if (sfctype(1:3)=='ice') then + + qqq = qqqice ! for qsat + TTT = TTTice ! for qsat + Lheat = Lsub ! ice to vapor + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (highfreq) then + vmag(ij) = max(umin, sqrt( (uatm(i,j)-uice(i,j))**2 + & + (vatm(i,j)-vice(i,j))**2) ) + else + vmag(ij) = max(umin, wind(i,j)) + endif + + if (formdrag .and. Cdn_atm(i,j) > puny) then + rdn(ij) = sqrt(Cdn_atm(i,j)) + else + rdn(ij) = vonkar/log(zref/iceruf) ! neutral coefficient + Cdn_atm(i,j) = rdn(ij) * rdn(ij) + endif + + enddo ! ij + + elseif (sfctype(1:3)=='ocn') then + + qqq = qqqocn + TTT = TTTocn + Lheat = Lvap ! liquid to vapor + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + vmag(ij) = max(umin, wind(i,j)) + + rdn(ij) = sqrt(0.0027_dbl_kind/vmag(ij) & + + .000142_dbl_kind + .0000764_dbl_kind*vmag(ij)) + + enddo ! ij + + endif ! sfctype + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !------------------------------------------------------------ + ! define some more needed variables + !------------------------------------------------------------ + + TsfK = Tsf(i,j) + Tffresh ! surface temp (K) + qsat = qqq * exp(-TTT/TsfK) ! saturation humidity (kg/m^3) + ssq = qsat / rhoa(i,j) ! sat surf hum (kg/kg) + + thva(ij) = potT(i,j) * (c1 + zvir * Qa(i,j)) ! virtual pot temp (K) + delt(i,j) = potT(i,j) - TsfK ! pot temp diff (K) + delq(i,j) = Qa(i,j) - ssq ! spec hum dif (kg/kg) + alz(ij) = log(zlvl(i,j)/zref) + cp(ij) = cp_air*(c1 + cpvir*ssq) + + !------------------------------------------------------------ + ! first estimate of Z/L and ustar, tstar and qstar + !------------------------------------------------------------ + + ! neutral coefficients, z/L = 0.0 + rhn(ij) = rdn(ij) + ren(ij) = rdn(ij) + + ! ustar,tstar,qstar + ustar(ij) = rdn(ij) * vmag(ij) + tstar(ij) = rhn(ij) * delt(i,j) + qstar(ij) = ren(ij) * delq(i,j) + + enddo ! ij + + !------------------------------------------------------------ + ! iterate to converge on Z/L, ustar, tstar and qstar + !------------------------------------------------------------ + + do k = 1, natmiter + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! compute stability & evaluate all stability functions + hol(ij) = vonkar * gravit * zlvl(i,j) & + * (tstar(ij)/thva(ij) & + + qstar(ij)/(c1/zvir+Qa(i,j))) & + / ustar(ij)**2 + hol(ij) = sign( min(abs(hol(ij)),c10), hol(ij) ) + stable(ij) = p5 + sign(p5 , hol(ij)) + xqq = max(sqrt(abs(c1 - c16*hol(ij))) , c1) + xqq = sqrt(xqq) + + ! Jordan et al 1999 + psimhs = -(0.7_dbl_kind*hol(ij) & + + 0.75_dbl_kind*(hol(ij)-14.3_dbl_kind) & + * exp(-0.35_dbl_kind*hol(ij)) + 10.7_dbl_kind) + psimh = psimhs*stable(ij) & + + (c1 - stable(ij))*psimhu(xqq) + psixh(ij) = psimhs*stable(ij) & + + (c1 - stable(ij))*psixhu(xqq) + + ! shift all coeffs to measurement height and stability + rd(ij) = rdn(ij) / (c1+rdn(ij)/vonkar*(alz(ij)-psimh)) + rh(ij) = rhn(ij) / (c1+rhn(ij)/vonkar*(alz(ij)-psixh(ij))) + re(ij) = ren(ij) / (c1+ren(ij)/vonkar*(alz(ij)-psixh(ij))) + + ! update ustar, tstar, qstar using updated, shifted coeffs + ustar(ij) = rd(ij) * vmag(ij) + tstar(ij) = rh(ij) * delt(i,j) + qstar(ij) = re(ij) * delq(i,j) + + enddo ! ij + enddo ! end iteration + + if (calc_strair) then + + ! initialize + do j = 1, ny_block + do i = 1, nx_block + strx(i,j) = c0 + stry(i,j) = c0 + enddo + enddo + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (highfreq .and. sfctype(1:3)=='ice') then + + !------------------------------------------------------------ + ! momentum flux for RASM + !------------------------------------------------------------ + ! tau = rhoa(i,j) * rd * rd + ! strx = tau * |Uatm-U| * (uatm-u) + ! stry = tau * |Uatm-U| * (vatm-v) + !------------------------------------------------------------ + + tau = rhoa(i,j) * rd(ij) * rd(ij) ! not the stress at zlvl(i,j) + + ! high frequency momentum coupling following Roberts et al. (2014) + strx(i,j) = tau * sqrt((uatm(i,j)-uice(i,j))**2 + & + (vatm(i,j)-vice(i,j))**2) * & + (uatm(i,j)-uice(i,j)) + stry(i,j) = tau * sqrt((uatm(i,j)-uice(i,j))**2 + & + (vatm(i,j)-vice(i,j))**2) * & + (vatm(i,j)-vice(i,j)) + + else + + !------------------------------------------------------------ + ! momentum flux + !------------------------------------------------------------ + ! tau = rhoa(i,j) * ustar * ustar + ! strx = tau * uatm(i,j) / vmag + ! stry = tau * vatm(i,j) / vmag + !------------------------------------------------------------ + + tau = rhoa(i,j) * ustar(ij) * rd(ij) ! not the stress at zlvl(i,j) + strx(i,j) = tau * uatm(i,j) + stry(i,j) = tau * vatm(i,j) + + endif + + Cdn_atm_ratio_n(i,j) = rd(ij) * rd(ij) / rdn(ij) / rdn(ij) + + enddo ! ij + + endif ! calc_strair + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !------------------------------------------------------------ + ! coefficients for turbulent flux calculation + !------------------------------------------------------------ + ! add windless coefficient for sensible heat flux + ! as in Jordan et al (JGR, 1999) + !------------------------------------------------------------ + + shcoef(i,j) = rhoa(i,j) * ustar(ij) * cp(ij) * rh(ij) + c1 + lhcoef(i,j) = rhoa(i,j) * ustar(ij) * Lheat * re(ij) + + !------------------------------------------------------------ + ! Compute diagnostics: 2m ref T, Q, U + !------------------------------------------------------------ + + hol(ij) = hol(ij)*zTrf/zlvl(i,j) + xqq = max( c1, sqrt(abs(c1-c16*hol(ij))) ) + xqq = sqrt(xqq) + psix2 = -c5*hol(ij)*stable(ij) + (c1-stable(ij))*psixhu(xqq) + fac = (rh(ij)/vonkar) & + * (alz(ij) + al2 - psixh(ij) + psix2) + Tref(i,j)= potT(i,j) - delt(i,j)*fac + Tref(i,j)= Tref(i,j) - p01*zTrf ! pot temp to temp correction + fac = (re(ij)/vonkar) & + * (alz(ij) + al2 - psixh(ij) + psix2) + Qref(i,j)= Qa(i,j) - delq(i,j)*fac + if (present(Uref)) then + if (highfreq .and. sfctype(1:3)=='ice') then + Uref(i,j) = sqrt((uatm(i,j)-uice(i,j))**2 + & + (vatm(i,j)-vice(i,j))**2) * & + rd(ij) / rdn(ij) + else + Uref(i,j) = vmag(ij) * rd(ij) / rdn(ij) + endif + endif ! (present(Uref)) + enddo ! ij + + end subroutine atmo_boundary_layer + +!======================================================================= + +! Compute coefficients for atm/ice fluxes, stress +! NOTE: \\ +! (1) all fluxes are positive downward, \\ +! (2) reference temperature and humidity are NOT computed + + subroutine atmo_boundary_const (nx_block, ny_block, & + sfctype, icells, & + indxi, indxj, & + uatm, vatm, & + wind, rhoa, & + strx, stry, & + Tsf, potT, & + Qa, & + delt, delq, & + lhcoef, shcoef, & + Cdn_atm) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells that require atmo fluxes + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed i and j indices + + character (len=3), intent(in) :: & + sfctype ! ice or ocean + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tsf , & ! surface temperature of ice or ocean + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + uatm , & ! x-direction wind speed (m/s) + vatm , & ! y-direction wind speed (m/s) + wind , & ! wind speed (m/s) + rhoa ! air density (kg/m^3) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Cdn_atm ! neutral drag coefficient + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout):: & + strx , & ! x surface stress (N) + stry ! y surface stress (N) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + delt , & ! potential T difference (K) + delq , & ! humidity difference (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef ! transfer coefficient for latent heat + + ! local variables + + integer (kind=int_kind) :: & + i, j, & ! horizontal indices + ij ! combined ij index + + real (kind=dbl_kind) :: & + TsfK, & ! surface temperature in Kelvin (K) + qsat, & ! the saturation humidity of air (kg/m^3) + ssq , & ! sat surface humidity (kg/kg) + tau, & ! stress at zlvl + Lheat ! Lvap or Lsub, depending on surface type + + !------------------------------------------------------------ + ! Initialize + !------------------------------------------------------------ + + do j = 1, ny_block + do i = 1, nx_block + delt(i,j) = c0 + delq(i,j) = c0 + shcoef(i,j) = c0 + lhcoef(i,j) = c0 + enddo + enddo + + if (calc_strair) then + + do j = 1, ny_block + do i = 1, nx_block + strx(i,j) = c0 + stry(i,j) = c0 + enddo + enddo + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !------------------------------------------------------------ + ! momentum flux + !------------------------------------------------------------ + tau = rhoa(i,j) * 0.0012_dbl_kind * wind(i,j) +!AOMIP tau = rhoa(i,j) * (1.10_dbl_kind + c4*p01*wind(i,j)) & +!AOMIP * wind(i,j) * p001 + strx(i,j) = tau * uatm(i,j) + stry(i,j) = tau * vatm(i,j) + + enddo ! ij + + endif ! calc_strair + + !------------------------------------------------------------ + ! define variables that depend on surface type + !------------------------------------------------------------ + + if (sfctype(1:3)=='ice') then + Lheat = Lsub ! ice to vapor + elseif (sfctype(1:3)=='ocn') then + Lheat = Lvap ! liquid to vapor + endif ! sfctype + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !------------------------------------------------------------ + ! potential temperature and specific humidity differences + !------------------------------------------------------------ + + TsfK = Tsf(i,j) + Tffresh ! surface temp (K) + qsat = qqqocn * exp(-TTTocn/TsfK) ! sat humidity (kg/m^3) + ssq = qsat / rhoa(i,j) ! sat surf hum (kg/kg) + + delt(i,j)= potT(i,j) - TsfK ! pot temp diff (K) + delq(i,j)= Qa(i,j) - ssq ! spec hum dif (kg/kg) + + !------------------------------------------------------------ + ! coefficients for turbulent flux calculation + !------------------------------------------------------------ + + shcoef(i,j) = (1.20e-3_dbl_kind)*cp_air*rhoa(i,j)*wind(i,j) + lhcoef(i,j) = (1.50e-3_dbl_kind)*Lheat *rhoa(i,j)*wind(i,j) + + enddo ! ij + + end subroutine atmo_boundary_const + +!======================================================================= + +! Neutral drag coefficients for ocean and atmosphere also compute the +! intermediate necessary variables ridge height, distance, floe size +! based upon Tsamados et al. (2014), JPO, DOI: 10.1175/JPO-D-13-0215.1. +! Places where the code varies from the paper are commented. +! +! authors: Michel Tsamados, CPOM +! David Schroeder, CPOM +! +! changes: Andrew Roberts, NPS (RASM/CESM coupling and documentation) + + + subroutine neutral_drag_coeffs (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + apnd, hpnd, & + ipnd, & + alvl, vlvl, & + aice, vice, & + vsno, aicen, & + vicen, vsnon, & + Cdn_ocn, Cdn_ocn_skin, & + Cdn_ocn_floe, Cdn_ocn_keel,& + Cdn_atm, Cdn_atm_skin, & + Cdn_atm_floe, Cdn_atm_pond,& + Cdn_atm_rdg, hfreebd, & + hdraft, hridge, & + distrdg, hkeel, & + dkeel, lfloe, & + dfloe, ncat) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + ncat + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! compressed i and j indices + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + apnd ,& ! melt pond fraction of sea ice + hpnd ,& ! mean melt pond depth over sea ice + ipnd ,& ! mean ice pond depth over sea ice in cat n + alvl ,& ! level ice area fraction (of grid cell ?) + vlvl ! level ice mean thickness + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + aice , & ! concentration of ice + vice , & ! volume per unit area of ice + vsno ! volume per unit area of snow + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(in) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + hfreebd , & ! freeboard (m) + hdraft , & ! draught of ice + snow column (Stoessel1993) + hridge , & ! ridge height + distrdg , & ! distance between ridges + hkeel , & ! keel depth + dkeel , & ! distance between keels + lfloe , & ! floe length (m) + dfloe , & ! distance between floes + Cdn_ocn , & ! ocean-ice neutral drag coefficient + Cdn_ocn_skin , & ! drag coefficient due to skin drag + Cdn_ocn_floe , & ! drag coefficient due to floe edges + Cdn_ocn_keel , & ! drag coefficient dur to keels + Cdn_atm , & ! ice-atmosphere drag coefficient + Cdn_atm_skin , & ! drag coefficient due to skin drag + Cdn_atm_floe , & ! drag coefficient due to floe edges + Cdn_atm_pond , & ! drag coefficient dur to ponds + Cdn_atm_rdg ! drag coefficient dur to ridges + + real (kind=dbl_kind), parameter :: & + ! [,] = range of values that can be tested + csw = 0.002_dbl_kind ,&! ice-ocn drag coefficient [0.0005,0.005] + csa = 0.0005_dbl_kind,&! ice-air drag coefficient [0.0001,0.001] + dragia = 0.0012_dbl_kind,&! ice-air drag coefficient [0.0005,0.002] + mrdg = c20 ,&! screening effect see Lu2011 [5,50] + mrdgo = c10 ,&! screening effect see Lu2011 [5,50] + beta = p5 ,&! power exponent appearing in astar and + ! L=Lmin(A*/(A*-A))**beta [0,1] + Lmin = c8 ,&! min length of floe (m) [5,100] + Lmax = 300._dbl_kind ,&! max length of floe (m) [30,3000] + Lmoy = 300._dbl_kind ,&! average length of floe (m) [30,1000] + cfa = p2 ,&! Eq. 12 ratio of local from drag over + ! geometrical parameter [0,1] + cfw = p2 ,&! Eq. 15 ratio of local from drag over + ! geometrical parameter [0,1] + cpa = p2 ,&! Eq. 16 ratio of local form drag over + ! geometrical parameter [0,1] + cra = p2 ,&! Eq. 10 local form drag coefficient [0,1] + crw = p2 ,&! Eq. 11 local form drag coefficient [0,1] + sl = 22._dbl_kind ,&! Sheltering parameter Lupkes2012 [10,30] + lpmin = 2.26_dbl_kind ,&! min pond length (m) see Eq. 17 [1,10] + lpmax = 24.63_dbl_kind ,&! max pond length (m) see Eq. 17 [10,100] + tanar = p4 ,&! 0.25 sail slope = 14 deg [0.4,1] + tanak = p4 ,&! 0.58 keel slope = 30 deg [0.4,1] + invsqrte = 0.6065_dbl_kind,&! + phir = 0.8_dbl_kind ,&! porosity of ridges [0.4,1] + phik = 0.8_dbl_kind ,&! porosity of keels [0.4,1] + hkoverhr = c4 ,&! hkeel/hridge ratio [4,8] + dkoverdr = c1 ,&! dkeel/distrdg ratio [1,5] + sHGB = 0.18_dbl_kind ,&! Lupkes2012 Eq. 28, Hanssen1988, + ! Steele1989 suggest instead 0.18 + alpha2 = c0 ,&! weight functions for area of + beta2 = p75 ! ridged ice [0,1] + + integer (kind=int_kind) :: & + icells, & ! number of cells that require atmo fluxes + n , & ! category index + i, j , & ! horizontal indices + ij ! combined ij index + + real (kind=dbl_kind) :: & + astar, & ! new constant for form drag + ctecaf, & ! constante + ctecwf, & ! constante + sca, & ! wind attenuation function + scw, & ! ocean attenuation function + lp, & ! pond length (m) + ctecar, & + ctecwk, & + ai, aii, & ! ice area and its inverse + tmp1 ! temporary + + real (kind=dbl_kind) :: & + apond , & ! melt pond fraction of grid cell + vpond , & ! mean melt pond depth over grid cell + ipond , & ! mean melt pond ice depth over grid cell + ardg , & ! ridged ice area fraction of grid cell + vrdg ! ridged ice mean thickness + + real (kind=dbl_kind), parameter :: & + ocnruf = 0.000327_dbl_kind, & ! ocean surface roughness (m) + ocnrufi = c1/ocnruf, & ! inverse ocean roughness + icerufi = c1/iceruf ! inverse ice roughness + + real (kind=dbl_kind), parameter :: & + camax = 0.02_dbl_kind , & ! Maximum for atmospheric drag + cwmax = 0.06_dbl_kind ! Maximum for ocean drag + + astar = c1/(c1-(Lmin/Lmax)**(c1/beta)) + + + !----------------------------------------------------------------- + ! Initialize across entire grid + !----------------------------------------------------------------- + + hfreebd(:,:)=c0 + hdraft (:,:)=c0 + hridge (:,:)=c0 + distrdg(:,:)=c0 + hkeel (:,:)=c0 + dkeel (:,:)=c0 + lfloe (:,:)=c0 + dfloe (:,:)=c0 + Cdn_ocn(:,:)=dragio + Cdn_ocn_skin(:,:)=c0 + Cdn_ocn_floe(:,:)=c0 + Cdn_ocn_keel(:,:)=c0 + Cdn_atm(:,:) = (vonkar/log(zref/iceruf)) * (vonkar/log(zref/iceruf)) + Cdn_atm_skin(:,:)=c0 + Cdn_atm_floe(:,:)=c0 + Cdn_atm_pond(:,:)=c0 + Cdn_atm_rdg (:,:)=c0 + + !----------------------------------------------------------------- + ! Identify cells with nonzero ice area + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j) > p001) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !------------------------------------------------------------ + ! Initialize inside loop where concentration > 0.1% + !------------------------------------------------------------ + + Cdn_atm_skin(i,j) = csa + Cdn_ocn_skin(i,j) = csw + + ai = aice(i,j) + aii = c1/ai + + !------------------------------------------------------------ + ! Compute average quantities + !------------------------------------------------------------ + + ! ponds + apond = c0 + vpond = c0 + ipond = c0 + do n = 1,ncat + ! area of pond per unit area of grid cell + apond = apond+apnd(i,j,n)*aicen(i,j,n) + ! volume of pond per unit area of grid cell + vpond = vpond+apnd(i,j,n)*hpnd(i,j,n)*aicen(i,j,n) + ! volume of lid per unit area of grid cell + ipond = ipond+apnd(i,j,n)*ipnd(i,j,n)*aicen(i,j,n) + enddo + + ! draft and freeboard (see Eq. 27) + hdraft(i,j) = (rhoi*vice(i,j)+rhos*vsno(i,j))*aii/rhow ! without ponds + hfreebd(i,j) = (vice(i,j)+vsno(i,j))*aii-hdraft(i,j) + + ! Do not allow draft larger than ice thickness (see Eq. 28) + if (hdraft(i,j) >= vice(i,j)*aii) then + ! replace excess snow with ice so hi~=hdraft + hfreebd(i,j) = (hdraft(i,j)*ai*(c1-rhoi/rhow) + & + (vsno(i,j)-(vice(i,j)-hdraft(i,j)*ai)*rhoi/rhos) * & + (c1-rhos/rhow))*aii ! Stoessel1993 + endif + + ! floe size parameterization see Eq. 13 + lfloe(i,j) = Lmin * (astar / (astar - ai))**beta + + ! distance between floes parameterization see Eq. 14 + dfloe(i,j) = lfloe(i,j) * (c1/sqrt(ai) - c1) + + ! Relate ridge height and distance between ridges to + ! ridged ice area fraction and ridged ice mean thickness + ! Assumes total volume of ridged ice is split into ridges and keels. + ! Then assume total ridges volume over total area of ridges = + ! volume of one average ridge / area of one average ridge + ! Same for keels. + + ardg=c0 + vrdg=c0 + do n=1,ncat + ! ridged ice area fraction over grid cell + ardg=ardg+(c1-alvl(i,j,n))*aicen(i,j,n) + ! total ridged ice volume per unit grid cell area + vrdg=vrdg+(c1-vlvl(i,j,n))*vicen(i,j,n) + enddo + + ! hridge, hkeel, distrdg and dkeel estimates from CICE for + ! simple triangular geometry + + if (ardg > p001) then + + ! see Eq. 25 and Eq. 26 + hridge(i,j) = vrdg/ardg*c2 & + * (alpha2+beta2*hkoverhr/dkoverdr*tanar/tanak) & + / (phir*c1+phik*tanar/tanak*hkoverhr**c2/dkoverdr) + distrdg(i,j) = c2*hridge(i,j)*ai/ardg & + * (alpha2/tanar+beta2/tanak*hkoverhr/dkoverdr) + hkeel(i,j) = hkoverhr * hridge(i,j) + dkeel(i,j) = dkoverdr * distrdg(i,j) + + ! Use the height of ridges relative to the mean freeboard of + ! the pack. Therefore skin drag and ridge drag differ in + ! this code as compared to Tsamados et al. (2014) equations + ! 10 and 18, which reference both to sea level. + tmp1 = max(c0,hridge(i,j) - hfreebd(i,j)) + + !------------------------------------------------------------ + ! Skin drag (atmo) + !------------------------------------------------------------ + + Cdn_atm_skin(i,j) = csa*(c1 - mrdg*tmp1/distrdg(i,j)) + Cdn_atm_skin(i,j) = max(min(Cdn_atm_skin(i,j),camax),c0) + + !------------------------------------------------------------ + ! Ridge effect (atmo) + !------------------------------------------------------------ + + if (tmp1 > puny) then + sca = c1 - exp(-sHGB*distrdg(i,j)/tmp1) ! see Eq. 9 + ctecar = cra*p5 + Cdn_atm_rdg(i,j) = ctecar*tmp1/distrdg(i,j)*sca* & + (log(tmp1*icerufi)/log(zref*icerufi))**c2 + Cdn_atm_rdg(i,j) = min(Cdn_atm_rdg(i,j),camax) + endif + + ! Use the depth of keels relative to the mean draft of + ! the pack. Therefore skin drag and keel drag differ in + ! this code as compared to Tsamados et al. (2014) equations + ! 11 and 19, which reference both to sea level. In some + ! circumstances, hkeel can be less than hdraft because hkoverhr + ! is constant, and max(c0,...) temporarily addresses this. + tmp1 = max(c0,hkeel(i,j) - hdraft(i,j)) + + !------------------------------------------------------------ + ! Skin drag bottom ice (ocean) + !------------------------------------------------------------ + + Cdn_ocn_skin(i,j) = csw * (c1 - mrdgo*tmp1/dkeel(i,j)) + Cdn_ocn_skin(i,j) = max(min(Cdn_ocn_skin(i,j),cwmax), c0) + + !------------------------------------------------------------ + ! Keel effect (ocean) + !------------------------------------------------------------ + + if (tmp1 > puny) then + scw = c1 - exp(-sHGB*dkeel(i,j)/tmp1) + ctecwk = crw*p5 + Cdn_ocn_keel(i,j) = ctecwk*tmp1/dkeel(i,j)*scw* & + (log(tmp1*icerufi)/log(zref*icerufi))**c2 + Cdn_ocn_keel(i,j) = max(min(Cdn_ocn_keel(i,j),cwmax),c0) + endif + + endif ! ardg > 0.001 + + !------------------------------------------------------------ + ! Floe edge drag effect (atmo) + !------------------------------------------------------------ + + if (hfreebd(i,j) > puny) then + sca = c1 - exp(-sl*beta*(c1-ai)) + ctecaf = cfa*p5*(log(hfreebd(i,j)*ocnrufi)/log(zref*ocnrufi))**c2*sca + Cdn_atm_floe(i,j) = ctecaf * hfreebd(i,j) / lfloe(i,j) + Cdn_atm_floe(i,j) = max(min(Cdn_atm_floe(i,j),camax),c0) + endif + + !------------------------------------------------------------ + ! Pond edge effect (atmo) + !------------------------------------------------------------ + + if (hfreebd(i,j) > puny) then + sca = (apond)**(c1/(zref*beta)) + lp = lpmin*(1-apond)+lpmax*apond + Cdn_atm_pond(i,j) = cpa*p5*sca*apond*hfreebd(i,j)/lp & + * (log(hfreebd(i,j)*ocnrufi)/log(zref*ocnrufi))**c2 + Cdn_atm_pond(i,j) = min(Cdn_atm_pond(i,j),camax) + endif + + !------------------------------------------------------------ + ! Floe edge drag effect (ocean) + !------------------------------------------------------------ + + if (hdraft(i,j) > puny) then + scw = c1 - exp(-sl*beta*(c1-ai)) + ctecwf = cfw*p5*(log(hdraft(i,j)*ocnrufi)/log(zref*ocnrufi))**c2*scw + Cdn_ocn_floe(i,j) = ctecwf * hdraft(i,j) / lfloe(i,j) + Cdn_ocn_floe(i,j) = max(min(Cdn_ocn_floe(i,j),cwmax),c0) + endif + + !------------------------------------------------------------ + ! Total drag coefficient (atmo) + !------------------------------------------------------------ + + Cdn_atm(i,j) = Cdn_atm_skin(i,j) + Cdn_atm_floe(i,j) + & + Cdn_atm_pond(i,j) + Cdn_atm_rdg(i,j) + Cdn_atm(i,j) = min(Cdn_atm(i,j),camax) + + !------------------------------------------------------------ + ! Total drag coefficient (ocean) + !------------------------------------------------------------ + + Cdn_ocn(i,j) = Cdn_ocn_skin(i,j) + Cdn_ocn_floe(i,j) + & + Cdn_ocn_keel(i,j) + Cdn_ocn(i,j) = min(Cdn_ocn(i,j),cwmax) + + enddo ! ij + + end subroutine neutral_drag_coeffs + +!======================================================================= + + end module ice_atmo + +!======================================================================= diff --git a/source/ice_blocks.F90 b/source/ice_blocks.F90 new file mode 100755 index 00000000..b9ce5a79 --- /dev/null +++ b/source/ice_blocks.F90 @@ -0,0 +1,872 @@ +! SVN:$Id: ice_blocks.F90 700 2013-08-15 19:17:39Z eclare $ +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| + + module ice_blocks + +! This module contains data types and tools for decomposing a global +! horizontal domain into a set of blocks. It contains a data type +! for describing each block and contains routines for creating and +! querying the block decomposition for a global domain. +! +! author: Phil Jones, LANL +! Oct. 2004: Adapted from POP by William H. Lipscomb, LANL + + use ice_kinds_mod + use ice_domain_size, only: block_size_x, block_size_y + use ice_exit, only: abort_ice + + implicit none + private + save + + type, public :: block ! block data type + integer (int_kind) :: & + block_id ,&! global block number + local_id ,&! local address of block in current distrib + ilo, ihi, jlo, jhi ,&! begin, end indices for physical domain + iblock, jblock ! cartesian i,j position for block + + logical (log_kind) :: & + tripole, & ! flag is true if block is at tripole bndy + tripoleTFlag ! tripole boundary is a T-fold + + integer (int_kind), dimension(:), pointer :: & + i_glob, j_glob ! global domain location for each point + end type + + public :: create_blocks ,& + get_block ,& + get_block_parameter ,& + ice_blocksGetNbrID + + integer (int_kind), parameter, public :: & + nghost = 1 ! number of ghost cells around each block + + integer (int_kind), parameter, public :: &! size of block domain in + nx_block = block_size_x + 2*nghost, &! x,y dir including ghost + ny_block = block_size_y + 2*nghost ! cells + + ! predefined directions for neighbor id routine + ! Note: the directions that are commented out are implemented in + ! POP but not in CICE. If the tripole cut were in the south + ! instead of the north, these would need to be used (and also + ! implemented in ice_boundary.F90). + integer (int_kind), parameter, public :: & + ice_blocksNorth = 1, & ! (i ,j+1) + ice_blocksSouth = 2, & ! (i ,j-1) + ice_blocksEast = 3, & ! (i+1,j ) + ice_blocksWest = 4, & ! (i-1,j ) + ice_blocksNorthEast = 5, & ! (i+1,j+1) + ice_blocksNorthWest = 6, & ! (i-1,j+1) + ice_blocksSouthEast = 7, & ! (i+1,j-1) + ice_blocksSouthWest = 8 ! (i-1,j-1) + integer (int_kind), parameter, public :: & +! ice_blocksNorth2 = 9, & ! (i ,j+2) +! ice_blocksSouth2 = 10, & ! (i ,j-2) + ice_blocksEast2 = 11, & ! (i+2,j ) + ice_blocksWest2 = 12 ! (i-2,j ) +! ice_blocksNorthEast2 = 13, & ! (i+2,j+2) +! ice_blocksNorthWest2 = 14, & ! (i-2,j+2) +! ice_blocksSouthEast2 = 15, & ! (i+2,j-2) +! ice_blocksSouthWest2 = 16 ! (i-2,j-2) + integer (int_kind), parameter, public :: & + ice_blocksEastNorthEast = 17, & ! (i+2,j+1) +! ice_blocksEastSouthEast = 18, & ! (i+2,j-1) + ice_blocksWestNorthWest = 19 ! (i-2,j+1) +! ice_blocksWestSouthWest = 20, & ! (i-2,j-1) +! ice_blocksNorthNorthEast = 21, & ! (i+1,j-2) +! ice_blocksSouthSouthEast = 22, & ! (i+1,j-2) +! ice_blocksNorthNorthWest = 23, & ! (i-1,j+2) +! ice_blocksSouthSouthWest = 24 ! (i-1,j-2) + + integer (int_kind), public :: & + nblocks_tot ,&! total number of blocks in decomposition + nblocks_x ,&! tot num blocks in i direction + nblocks_y ! tot num blocks in j direction + +!----------------------------------------------------------------------- +! +! module private data +! +!----------------------------------------------------------------------- + + type (block), dimension(:), allocatable :: & + all_blocks ! block information for all blocks in domain + + integer (int_kind), dimension(:,:),allocatable :: & + all_blocks_ij ! block index stored in Cartesian order + ! useful for determining block index + ! of neighbor blocks + + integer (int_kind), dimension(:,:), allocatable, target :: & + i_global, &! global i index for each point in each block + j_global ! global j index for each point in each block + +!*********************************************************************** + +contains + +!*********************************************************************** + + subroutine create_blocks(nx_global, ny_global, ew_boundary_type, & + ns_boundary_type) + +! This subroutine decomposes the global domain into blocks and +! fills the data structures with all the necessary block information. + + use ice_fileunits, only: nu_diag + use ice_communicate, only: my_task, master_task + + integer (int_kind), intent(in) :: & + nx_global, ny_global ! global domain size in x,y + + character (*), intent(in) :: & + ew_boundary_type, &! type of boundary in logical east-west dir + ns_boundary_type ! type of boundary in logical north-south dir + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + i, j, n ,&! loop indices + iblock, jblock ,&! block loop indices + is, ie, js, je ! temp start, end indices + + logical (log_kind) :: dbug + +!---------------------------------------------------------------------- +! +! compute number of blocks and cartesian decomposition +! if the requested block size does not divide the global domain +! size evenly, add additional block space to accomodate padding +! +!---------------------------------------------------------------------- + + nblocks_x = (nx_global-1)/block_size_x + 1 + nblocks_y = (ny_global-1)/block_size_y + 1 + nblocks_tot = nblocks_x*nblocks_y + +!---------------------------------------------------------------------- +! +! allocate block arrays +! +!---------------------------------------------------------------------- + + allocate(all_blocks(nblocks_tot)) + allocate(i_global(nx_block,nblocks_tot), & + j_global(ny_block,nblocks_tot)) + allocate(all_blocks_ij(nblocks_x,nblocks_y)) + +!---------------------------------------------------------------------- +! +! fill block data structures for all blocks in domain +! +!---------------------------------------------------------------------- + + n = 0 + do jblock=1,nblocks_y + js = (jblock-1)*block_size_y + 1 + if (js > ny_global) call abort_ice(& + 'ice: create_blocks: Bad block decomp: ny_block too large?') + je = js + block_size_y - 1 + if (je > ny_global) je = ny_global ! pad array + + do iblock=1,nblocks_x + n = n + 1 ! global block id + + is = (iblock-1)*block_size_x + 1 + if (is > nx_global) call abort_ice(& + 'ice: create_blocks: Bad block decomp: nx_block too large?') + ie = is + block_size_x - 1 + if (ie > nx_global) ie = nx_global + + all_blocks(n)%block_id = n + all_blocks(n)%iblock = iblock + all_blocks(n)%jblock = jblock + all_blocks(n)%ilo = nghost + 1 + all_blocks(n)%jlo = nghost + 1 + all_blocks(n)%ihi = nx_block - nghost ! default value + all_blocks(n)%jhi = ny_block - nghost ! default value + + if (jblock == nblocks_y .and. & + (ns_boundary_type == 'tripole' .or. & + ns_boundary_type == 'tripoleT')) then + all_blocks(n)%tripole = .true. + else + all_blocks(n)%tripole = .false. + endif + all_blocks(n)%tripoleTFlag = (ns_boundary_type == 'tripoleT') + + all_blocks_ij(iblock,jblock) = n + + do j=1,ny_block + j_global(j,n) = js - nghost + j - 1 + + !*** southern ghost cells + + if (j_global(j,n) < 1) then + select case (ns_boundary_type) + case ('cyclic') + j_global(j,n) = j_global(j,n) + ny_global + case ('open') + j_global(j,n) = nghost - j + 1 + case ('closed') + j_global(j,n) = 0 + case ('tripole') + j_global(j,n) = nghost - j + 1 ! open + case ('tripoleT') + j_global(j,n) = -j_global(j,n) + 1 ! open + case default + call abort_ice(& + 'ice: create_blocks: unknown n-s bndy type') + end select + endif + + !*** padding required + + if (j_global(j,n) > ny_global + nghost) then + j_global(j,n) = 0 ! padding + + !*** northern ghost cells + + else if (j_global(j,n) > ny_global) then + select case (ns_boundary_type) + case ('cyclic') + j_global(j,n) = j_global(j,n) - ny_global + case ('open') + j_global(j,n) = 2*ny_global - j_global(j,n) + 1 + case ('closed') + j_global(j,n) = 0 + case ('tripole') + j_global(j,n) = -j_global(j,n) + case ('tripoleT') + j_global(j,n) = -j_global(j,n) + case default + call abort_ice(& + 'ice: create_blocks: unknown n-s bndy type') + end select + + !*** set last physical point if padded domain + + else if (j_global(j,n) == ny_global .and. & + j > all_blocks(n)%jlo .and. & + j < all_blocks(n)%jhi) then + all_blocks(n)%jhi = j ! last physical point in padded domain + endif + end do + + all_blocks(n)%j_glob => j_global(:,n) + + do i=1,nx_block + i_global(i,n) = is - nghost + i - 1 + + !*** western ghost cells + + if (i_global(i,n) < 1) then + select case (ew_boundary_type) + case ('cyclic') + i_global(i,n) = i_global(i,n) + nx_global + case ('open') + i_global(i,n) = nghost - i + 1 + case ('closed') + i_global(i,n) = 0 + case default + call abort_ice(& + 'ice: create_blocks: unknown e-w bndy type') + end select + endif + + !*** padded domain - fill padded region with zero + + if (i_global(i,n) > nx_global + nghost) then + i_global(i,n) = 0 + + !*** eastern ghost cells + + else if (i_global(i,n) > nx_global) then + select case (ew_boundary_type) + case ('cyclic') + i_global(i,n) = i_global(i,n) - nx_global + case ('open') + i_global(i,n) = 2*nx_global - i_global(i,n) + 1 + case ('closed') + i_global(i,n) = 0 + case default + call abort_ice(& + 'ice: create_blocks: unknown e-w bndy type') + end select + + !*** last physical point in padded domain + + else if (i_global(i,n) == nx_global .and. & + i > all_blocks(n)%ilo .and. & + i < all_blocks(n)%ihi) then + all_blocks(n)%ihi = i + endif + end do + + all_blocks(n)%i_glob => i_global(:,n) + + end do + end do + +! dbug = .true. + dbug = .false. + if (dbug) then + if (my_task == master_task) then + write(nu_diag,*) 'block i,j locations' + do n = 1, nblocks_tot + write(nu_diag,*) 'block id, iblock, jblock:', & + all_blocks(n)%block_id, & + all_blocks(n)%iblock, & + all_blocks(n)%jblock + enddo + endif + endif + +!---------------------------------------------------------------------- + +end subroutine create_blocks + +!*********************************************************************** + + function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & + result (nbrID) + +! This function returns the block id of a neighboring block in a +! requested direction. Directions: +! ice\_blocksNorth (i ,j+1) +! ice\_blocksSouth (i ,j-1) +! ice\_blocksEast (i+1,j ) +! ice\_blocksWest (i-1,j ) +! ice\_blocksNorthEast (i+1,j+1) +! ice\_blocksNorthWest (i-1,j+1) +! ice\_blocksSouthEast (i ,j-1) +! ice\_blocksSouthWest (i-1,j-1) +! ice\_blocksNorth2 (i ,j+2) +! ice\_blocksSouth2 (i ,j-2) +! ice\_blocksEast2 (i+2,j ) +! ice\_blocksWest2 (i-2,j ) +! ice\_blocksNorthEast2 (i+2,j+2) +! ice\_blocksNorthWest2 (i-2,j+2) +! ice\_blocksSouthEast2 (i+2,j-2) +! ice\_blocksSouthWest2 (i-2,j-2) +! ice\_blocksEastNorthEast (i+2,j+1) +! ice\_blocksEastSouthEast (i+2,j-1) +! ice\_blocksWestNorthWest (i-2,j+1) +! ice\_blocksWestSouthWest (i-2,j-1) +! ice\_blocksNorthNorthEast (i+1,j-2) +! ice\_blocksSouthSouthEast (i+1,j-2) +! ice\_blocksNorthNorthWest (i-1,j+2) +! ice\_blocksSouthSouthWest (i-1,j-2) +! + + integer (int_kind), intent(in) :: & + blockID, &! id of block for which neighbor id requested + direction ! direction for which to look for neighbor - + ! must be one of the predefined module + ! variables for block direction + + character (*), intent(in) :: & + iBoundary, &! determines what to do at edges of domain + jBoundary ! options are - open, closed, cyclic, tripole, tripoleT + + integer (int_kind) :: & + nbrID ! block ID of neighbor in requested dir + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + iBlock, jBlock, &! i,j block location of current block + inbr, jnbr ! i,j block location of neighboring block + +!---------------------------------------------------------------------- +! +! retrieve info for current block +! +!---------------------------------------------------------------------- + + call get_block_parameter(blockID, iblock=iBlock, jblock=jBlock) + +!---------------------------------------------------------------------- +! +! compute i,j block location of neighbor +! +!---------------------------------------------------------------------- + + select case(direction) + + case (ice_blocksNorth) + + inbr = iBlock + jnbr = jBlock + 1 + if (jnbr > nblocks_y) then + select case(jBoundary) + case ('open') + jnbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + jnbr = 0 + case ('cyclic') + jnbr = 1 + case ('tripole':'tripoleT') + !*** return negative j value to flag tripole + !*** i index of main northern neighbor across the + !*** tripole cut - may also need i+1,i-1 to get + !*** other points if there has been padding or + !*** if the block size does not divide the domain + !*** evenly + inbr = nblocks_x - iBlock + 1 + jnbr = -jBlock + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown north boundary') + end select + endif + + case (ice_blocksSouth) + + inbr = iBlock + jnbr = jBlock - 1 + if (jnbr < 1) then + select case(jBoundary) + case ('open') + jnbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + jnbr = 0 + case ('cyclic') + jnbr = nblocks_y + case ('tripole') + jnbr = 0 ! do not write into the neighbor's ghost cells + case ('tripoleT') + jnbr = 0 ! do not write into the neighbor's ghost cells + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown south boundary') + end select + endif + + case (ice_blocksEast ) + + inbr = iBlock + 1 + jnbr = jBlock + if (inbr > nblocks_x) then + select case(iBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + inbr = 0 + case ('cyclic') + inbr = 1 + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown east boundary') + end select + endif + + case (ice_blocksWest ) + + jnbr = jBlock + inbr = iBlock - 1 + if (inbr < 1) then + select case(iBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + inbr = 0 + case ('cyclic') + inbr = nblocks_x + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown west boundary') + end select + endif + + case (ice_blocksNorthEast) + + inbr = iBlock + 1 + jnbr = jBlock + 1 + if (inbr > nblocks_x) then + select case(iBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + inbr = 0 + case ('cyclic') + inbr = 1 + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown east boundary') + end select + endif + if (jnbr > nblocks_y) then + select case(jBoundary) + case ('open') + jnbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + jnbr = 0 + case ('cyclic') + jnbr = 1 + case ('tripole':'tripoleT') + !*** return negative j value to flag tripole + !*** i index of main northern neighbor across the + !*** tripole cut - may also need i+1,i-1 to get + !*** other points if there has been padding or + !*** if the block size does not divide the domain + !*** evenly + inbr = nblocks_x - iBlock + if (inbr == 0) inbr = nblocks_x + jnbr = -jBlock + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown north boundary') + end select + endif + + case (ice_blocksNorthWest) + + inbr = iBlock - 1 + jnbr = jBlock + 1 + if (inbr < 1) then + select case(iBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + inbr = 0 + case ('cyclic') + inbr = nblocks_x + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown west boundary') + end select + endif + if (jnbr > nblocks_y) then + select case(jBoundary) + case ('open') + jnbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + jnbr = 0 + case ('cyclic') + jnbr = 1 + case ('tripole':'tripoleT') + !*** return negative j value to flag tripole + !*** i index of main northern neighbor across the + !*** tripole cut - may also need i+1,i-1 to get + !*** other points if there has been padding or + !*** if the block size does not divide the domain + !*** evenly + inbr = nblocks_x - iBlock + 2 + if (inbr > nblocks_x) inbr = 1 + jnbr = -jBlock + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown north boundary') + end select + endif + + case (ice_blocksSouthEast ) + + inbr = iBlock + 1 + jnbr = jBlock - 1 + if (inbr > nblocks_x) then + select case(iBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + inbr = 0 + case ('cyclic') + inbr = 1 + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown east boundary') + end select + endif + if (jnbr < 1) then + select case(jBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + jnbr = 0 + case ('cyclic') + jnbr = nblocks_y + case ('tripole') + jnbr = 0 ! do not write into the neighbor's ghost cells + case ('tripoleT') + jnbr = 0 ! do not write into the neighbor's ghost cells + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown south boundary') + end select + endif + + case (ice_blocksSouthWest ) + inbr = iBlock - 1 + jnbr = jBlock - 1 + if (inbr < 1) then + select case(iBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + inbr = 0 + case ('cyclic') + inbr = nblocks_x + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown west boundary') + end select + endif + if (jnbr < 1) then + select case(jBoundary) + case ('open') + jnbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + jnbr = 0 + case ('cyclic') + jnbr = nblocks_y + case ('tripole') + jnbr = 0 ! do not write into the neighbor's ghost cells + case ('tripoleT') + jnbr = 0 ! do not write into the neighbor's ghost cells + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown south boundary') + end select + endif + + case (ice_blocksEast2) + + inbr = iBlock + 2 + jnbr = jBlock + if (inbr > nblocks_x) then + select case(iBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + inbr = 0 + case ('cyclic') + inbr = inbr - nblocks_x + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown east boundary') + end select + endif + + case (ice_blocksWest2) + jnbr = jBlock + inbr = iBlock - 2 + if (inbr < 1) then + select case(iBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + inbr = 0 + case ('cyclic') + inbr = nblocks_x + inbr + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown west boundary') + end select + endif + + case (ice_blocksEastNorthEast) + + inbr = iBlock + 2 + jnbr = jBlock + 1 + if (inbr > nblocks_x) then + select case(iBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + inbr = 0 + case ('cyclic') + inbr = inbr - nblocks_x + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown east boundary') + end select + endif + if (jnbr > nblocks_y) then + select case(jBoundary) + case ('open') + jnbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + jnbr = 0 + case ('cyclic') + jnbr = jnbr - nblocks_y + case ('tripole':'tripoleT') + !*** return negative j value to flag tripole + !*** i index of main northern neighbor across the + !*** tripole cut - may also need i+1,i-1 to get + !*** other points if there has been padding or + !*** if the block size does not divide the domain + !*** evenly + inbr = nblocks_x - iBlock - 1 + if (inbr <= 0) inbr = inbr + nblocks_x + jnbr = -jBlock + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown north boundary') + end select + endif + + case (ice_blocksWestNorthWest) + + inbr = iBlock - 2 + jnbr = jBlock + 1 + if (inbr < 1) then + select case(iBoundary) + case ('open') + inbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + inbr = 0 + case ('cyclic') + inbr = nblocks_x + inbr + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown west boundary') + end select + endif + if (jnbr > nblocks_y) then + select case(jBoundary) + case ('open') + jnbr = 0 ! do not write into the neighbor's ghost cells + case ('closed') + jnbr = 0 + case ('cyclic') + jnbr = jnbr + nblocks_y + case ('tripole':'tripoleT') + !*** return negative j value to flag tripole + !*** i index of main northern neighbor across the + !*** tripole cut - may also need i+1,i-1 to get + !*** other points if there has been padding or + !*** if the block size does not divide the domain + !*** evenly + inbr = nblocks_x - iBlock + 3 + if (inbr > nblocks_x) inbr = inbr - nblocks_x + jnbr = -jBlock + case default + call abort_ice( & + 'ice_blocksGetNbrID: unknown north boundary') + end select + endif + + case default + + call abort_ice( & + 'ice_blocksGetNbrID: unknown direction') + return + + end select + +!---------------------------------------------------------------------- +! +! now get block id for this neighbor block +! +!---------------------------------------------------------------------- + + if (inbr > 0 .and. jnbr > 0) then + nbrID = all_blocks_ij(inbr,jnbr) + else if (inbr > 0 .and. jnbr < 0) then ! tripole upper boundary + !*** return negative value to flag tripole + nbrID = -all_blocks_ij(inbr,abs(jnbr)) + else + nbrID = 0 ! neighbor outside domain + endif + +!---------------------------------------------------------------------- + + end function ice_blocksGetNbrID + +!********************************************************************** + + function get_block(block_id,local_id) + +! This function returns the block data structure for the block +! associated with the input block id. + + integer (int_kind), intent(in) :: & + block_id, &! global block id for requested block info + local_id ! local block id to assign to this block + + type (block) :: & + get_block ! block information returned for requested block + +!---------------------------------------------------------------------- +! +! check for valid id. if valid, return block info for requested block +! +!---------------------------------------------------------------------- + + if (block_id < 1 .or. block_id > nblocks_tot) then + call abort_ice('ice: get_block: invalid block_id') + endif + + get_block = all_blocks(block_id) + get_block%local_id = local_id + +!---------------------------------------------------------------------- + + end function get_block + +!********************************************************************** + + subroutine get_block_parameter(block_id, local_id, & + ilo, ihi, jlo, jhi, & + iblock, jblock, tripole, & + i_glob, j_glob) + +! This routine returns requested parts of the block data type +! for the block associated with the input block id + + integer (int_kind), intent(in) :: & + block_id ! global block id for which parameters are requested + + !(optional) parts of block data type to extract if requested + + integer (int_kind), intent(out), optional :: & + local_id ,&! local id assigned to block in current distrb + ilo, ihi, jlo, jhi ,&! begin,end indices for physical domain + iblock, jblock ! cartesian i,j position for bloc + + logical (log_kind), intent(out), optional :: & + tripole ! flag is true if block on tripole bndy + + integer (int_kind), dimension(:), pointer, optional :: & + i_glob, j_glob ! global domain location for each point + +!---------------------------------------------------------------------- +! +! extract each component of data type if requested +! +!---------------------------------------------------------------------- + + if (block_id < 1 .or. block_id > nblocks_tot) then + call abort_ice('ice: get_block_parameter: invalid block_id') + endif + + if (present(local_id)) local_id = all_blocks(block_id)%local_id + if (present(ilo )) ilo = all_blocks(block_id)%ilo + if (present(ihi )) ihi = all_blocks(block_id)%ihi + if (present(jlo )) jlo = all_blocks(block_id)%jlo + if (present(jhi )) jhi = all_blocks(block_id)%jhi + if (present(iblock )) iblock = all_blocks(block_id)%iblock + if (present(jblock )) jblock = all_blocks(block_id)%jblock + if (present(i_glob )) i_glob => all_blocks(block_id)%i_glob + if (present(j_glob )) j_glob => all_blocks(block_id)%j_glob + if (present(tripole )) tripole = all_blocks(block_id)%tripole + +!---------------------------------------------------------------------- + + end subroutine get_block_parameter + +!********************************************************************** + + end module ice_blocks + +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/source/ice_brine.F90 b/source/ice_brine.F90 new file mode 100755 index 00000000..d199ea5c --- /dev/null +++ b/source/ice_brine.F90 @@ -0,0 +1,828 @@ +! SVN:$Id: ice_brine.F90 744 2013-09-27 22:53:24Z eclare $ +!======================================================================= +! +! Computes ice microstructural information for use in biogeochemistry +! +! authors: Nicole Jeffery, LANL +! + module ice_brine + + use ice_kinds_mod + use ice_constants + use ice_domain_size, only: nilyr, nblyr, max_blocks, ncat + use ice_fileunits, only: nu_diag, nu_rst_pointer, nu_dump_hbrine, & + nu_restart_hbrine, flush_fileunit + use ice_blocks, only: nx_block, ny_block + use ice_communicate, only: my_task, master_task + use ice_exit, only: abort_ice + use ice_state, only: ntrcr, nt_qice, nt_sice + use ice_zbgc_shared, only: cgrid, bgrid, igrid, exp_h, k_o, rhosi, & + hbr_min, thinS, min_salin, igrid, remap_layers_bgc, & + phi_snow, restart_hbrine, first_ice + + implicit none + + private + public :: preflushing_changes, compute_microS_mushy, & + update_hbrine, init_hbrine, write_restart_hbrine, & + hbrine_diags + + real (kind=dbl_kind), parameter :: & + maxhbr = 1.25_dbl_kind , & ! brine overflows if hbr > maxhbr*hin + viscos = 2.1e-6_dbl_kind, & ! kinematic viscosity (m^2/s) + ! Brine salinity as a cubic function of temperature + a1 = -21.4_dbl_kind , & ! (psu/C) + a2 = -0.886_dbl_kind, & ! (psu/C^2) + a3 = -0.012_dbl_kind, & ! (psu/C^3) + ! Brine density as a quadratic of brine salinity + b1 = 1000.0_dbl_kind, & ! (kg/m^3) + b2 = 0.8_dbl_kind ! (kg/m^3/ppt) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat,max_blocks) :: & + first_ice_real ! .true. = c1, .false. = c0 + +!======================================================================= + + contains + +!======================================================================= + +! Initialize brine height tracer + + subroutine init_hbrine + + use ice_state, only: nt_fbri, trcrn + + integer (kind=int_kind) :: & + k ! vertical index + + real (kind=dbl_kind) :: & + zspace ! grid spacing for CICE vertical grid + + !----------------------------------------------------------------- + ! Calculate bio gridn: 0 to 1 corresponds to ice top to bottom + !----------------------------------------------------------------- + + bgrid(:) = c0 ! bgc grid points + bgrid(nblyr+2) = c1 ! bottom value + igrid(:) = c0 ! bgc interface grid points + igrid(1) = c0 ! ice top + igrid(nblyr+1) = c1 ! ice bottom + + zspace = c1/max(c1,(real(nblyr,kind=dbl_kind))) + do k = 2, nblyr+1 + bgrid(k) = zspace*(real(k,kind=dbl_kind) - c1p5) + enddo + + do k = 2, nblyr + igrid(k) = p5*(bgrid(k+1)+bgrid(k)) + enddo + + !----------------------------------------------------------------- + ! Calculate CICE cgrid for interpolation ice top (0) to bottom (1) + !----------------------------------------------------------------- + + cgrid(1) = c0 ! CICE vertical grid top point + zspace = c1/(real(nilyr,kind=dbl_kind)) ! CICE grid spacing + + do k = 2, nilyr+1 + cgrid(k) = zspace * (real(k,kind=dbl_kind) - c1p5) + enddo + + !----------------------------------------------------------------- + ! initialize restart variables + !----------------------------------------------------------------- + + if (restart_hbrine) then + call read_restart_hbrine + else + first_ice(:,:,:,:) = .true. + trcrn(:,:,nt_fbri,:,:) = c1 + endif + + end subroutine init_hbrine + +!======================================================================= + +! Computes the top and bottom brine boundary changes for flushing +! works for zsalinity and tr_salinity +! +! NOTE: In this subroutine, trcrn(nt_fbri) is the volume fraction of ice with +! dynamic salinity or the height ratio == hbr/vicen*aicen, where hbr is the +! height of the brine surface relative to the bottom of the ice. This volume fraction +! may be > 1 in which case there is brine above the ice surface (meltponds). + + subroutine preflushing_changes (nx_block, ny_block, & + icells, n_cat, & + indxii, indxjj, & + aicen, vicen, vsnon, & + meltb, meltt, congel, & + snoice, hice_old, fbri, & + dhbr_top, dhbr_bot, & + hbr_old, hin,hsn, firstice) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of cells with aicen > 0 + n_cat ! category + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxii, indxjj ! compressed indices for icells with aicen > puny + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon , & ! volume per unit area of snow (m) + meltb , & ! bottom ice melt (m) + meltt , & ! top ice melt (m) + congel , & ! bottom ice growth (m) + snoice ! top ice growth from flooding (m) + + real (kind=dbl_kind), dimension(nx_block*ny_block), intent(inout) :: & + hin , & ! ice thickness (m) + hsn , & ! snow thickness (m) + hbr_old ! old brine height (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + fbri , & ! trcrn(i,j,nt_fbri) + dhbr_top , & ! brine change in top for diagnostics (m) + dhbr_bot , & ! brine change in bottom for diagnostics (m) + hice_old ! old ice thickness (m) + + logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & + firstice ! if true, initialized values should be used + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij ! horizontal index, combines i and j loops + + real (kind=dbl_kind) :: & + hin_old ! ice thickness before current melt/growth (m) + + real (kind=dbl_kind):: & + dhice ! Change in hin due to subl/cond (m) + + !----------------------------------------------------------------- + ! initialize + !----------------------------------------------------------------- + + dhbr_top(:,:) = c0 + dhbr_bot(:,:) = c0 + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + + i = indxii(ij) + j = indxjj(ij) + if (fbri(i,j) <= c0) then + write(nu_diag, *) 'preflushing: fbri <= c0:',i,j + write(nu_diag, *) 'vicen, aicen', vicen(i,j), aicen(i,j) + write(nu_diag, *) 'fbri, hice_old', fbri(i,j), hice_old(i,j) + call abort_ice ('ice_brine error') + endif + + hin(ij) = vicen(i,j) / aicen(i,j) + hsn(ij) = vsnon(i,j) / aicen(i,j) + hin_old = max(c0, hin(ij) + meltb (i,j) + meltt (i,j) & + - congel(i,j) - snoice(i,j)) + dhice = hin_old - hice_old(i,j) ! change due to subl/cond + dhbr_top(i,j) = meltt (i,j) - dhice - snoice(i,j) + dhbr_bot(i,j) = congel(i,j) - meltb(i,j) + + if ((hice_old(i,j) < puny) .OR. (hin_old < puny) & + .OR. firstice(i,j)) then + hin_old = hin(ij) + dhbr_top (i,j) = c0 + dhbr_bot (i,j) = c0 + fbri (i,j) = c1 + endif + + hbr_old(ij) = fbri(i,j) * hice_old(i,j) + + enddo ! ij + + end subroutine preflushing_changes + +!======================================================================= + +! Computes ice microstructural properties for updating hbrine +! +! NOTE: This subroutine uses thermosaline_vertical output to compute +! average ice permeability and the surface ice porosity + + subroutine compute_microS_mushy (nx_block, ny_block, & + icells, n_cat, & + indxii, indxjj, & + trcrn, hice_old, hbr_old, & + sss, sst, bTin, & + bphin, kperm, zphi_min, & + bSin, brine_sal, brine_rho, & + iphin, ibrine_rho, ibrine_sal) + + use ice_therm_mushy, only: temperature_mush, liquid_fraction, permeability + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of cells with aicen > 0 + n_cat ! ice category + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxii, indxjj ! compressed indices for icells with aicen > puny + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + hice_old , & ! previous timestep ice height (m) + sss , & ! ocean salinity (ppt) + sst ! ocean temperature (C) + + real (kind=dbl_kind), dimension(nx_block,ny_block,ntrcr), & + intent(in) :: & + trcrn + + real (kind=dbl_kind), dimension(nx_block*ny_block), intent(out) :: & + kperm , & ! average ice permeability (m^2) + zphi_min ! surface porosity + + real (kind=dbl_kind), dimension (nx_block*ny_block), intent(inout) :: & + hbr_old ! previous timestep brine height (m) + + real (kind=dbl_kind), dimension (nx_block*ny_block,nblyr+1), & + intent(inout) :: & + iphin , & ! porosity on the igrid + ibrine_rho , & ! brine rho on interface + ibrine_sal ! brine sal on interface + + real (kind=dbl_kind), dimension (nx_block*ny_block,nblyr+2), & + intent(inout) :: & + bSin , & ! bulk salinity (ppt) on bgrid + brine_sal , & ! equilibrium brine salinity (ppt) + brine_rho ! internal brine density (kg/m^3) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nblyr+2), & + intent(inout) :: & + bTin , & ! temperature on bgrid + bphin ! porosity on bgrid + + ! local variables + + real (kind=dbl_kind), dimension (nx_block*ny_block,nilyr) :: & + cSin , & ! bulk salinity (ppt) + cqin ! enthalpy () + + real (kind=dbl_kind), dimension (nx_block*ny_block,nblyr+2) :: & + zTin , & ! Temperature of ice layers on bgrid (C) + zSin , & ! Salinity of ice layers on bgrid (C) + bqin ! enthalpy on the bgrid () + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij , & ! horizontal index, combines i and j loops + k ! vertical biology layer index + + real (kind=dbl_kind), dimension(icells) :: & + surface_S , & ! salinity of ice above hin > hbr + hinc_old , & ! mean ice thickness before current melt/growth (m) + hbrc_old ! mean brine thickness before current melt/growth (m) + + real (kind=dbl_kind), dimension (nx_block*ny_block,nblyr+2) :: & + trtmp_s , & ! temporary, remapped tracers + trtmp_q ! temporary, remapped tracers + + !----------------------------------------------------------------- + ! Define ice salinity and temperature on bgrid + !----------------------------------------------------------------- + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxii(ij) + j = indxjj(ij) + cSin(ij,k) = trcrn(i,j,nt_sice+k-1) + cqin(ij,k) = trcrn(i,j,nt_qice+k-1) + enddo + enddo + + trtmp_s(:,:) = c0 + trtmp_q(:,:) = c0 +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells ! map Sin and qin profiles to bgc grid + i = indxii(ij) + j = indxjj(ij) + hbr_old (ij) = min(hbr_old(ij), maxhbr*hice_old(i,j)) + hinc_old (ij) = hice_old(i,j) + hbrc_old (ij) = hbr_old (ij) + + call remap_layers_bgc (ntrcr, nilyr, & + nt_sice, & + trcrn(i,j,:), trtmp_s(ij,:), & + 0, nblyr+1, & + hinc_old(ij), hinc_old(ij), & + cgrid(2:nilyr+1), & + bgrid(1:nblyr+1), surface_S(ij)) + + call remap_layers_bgc (ntrcr, nilyr, & + nt_qice, & + trcrn(i,j,:), trtmp_q(ij,:), & + 0, nblyr+1, & + hinc_old(ij), hinc_old(ij), & + cgrid(2:nilyr+1), & + bgrid(1:nblyr+1), surface_S(ij)) + enddo + + do k = 1, nblyr+1 +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxii(ij) + j = indxjj(ij) + bqin (ij, k) = min(c0, trtmp_q(ij,k)) + bSin (ij, k) = max(min_salin, trtmp_s(ij,k)) + bTin (i,j,k) = temperature_mush(bqin(ij, k), bSin(ij,k)) + bphin(i,j,k) = liquid_fraction (bTin(i,j,k), bSin(ij,k)) + enddo ! ij + enddo ! k + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxii(ij) + j = indxjj(ij) + bSin (ij, nblyr+2) = sss(i,j) + bTin (i,j,nblyr+2) = sst(i,j) + bphin(i,j,nblyr+2) = c1 + enddo ! ij + + !----------------------------------------------------------------- + ! Define ice multiphase structure + !----------------------------------------------------------------- + + call prepare_hbrine (icells, indxii, indxjj, & + bSin, bTin, & + brine_sal, brine_rho, & + ibrine_sal, ibrine_rho, & + bphin, iphin, & + kperm, zphi_min, & + igrid, sss) + + end subroutine compute_microS_mushy + +!======================================================================= + + subroutine prepare_hbrine (icells, indxi, indxj, & + bSin, bTin, & + brine_sal, brine_rho, & + ibrine_sal, ibrine_rho, & + bphin, iphin, & + kperm, zphi_min, & + igrid, sss) + + use ice_therm_shared, only: calculate_Tin_from_qin + + integer (kind=int_kind), intent(in) :: & + icells ! number of cells with aicen > 0 + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for icells with aicen > puny + + real (kind=dbl_kind), dimension (nx_block*ny_block,nblyr+2), & + intent(in) :: & + bSin ! salinity of ice layers on bio grid (ppt) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nblyr+2), & + intent(in) :: & + bTin ! temperature of ice layers on bio grid for history (C) + + real (kind=dbl_kind), dimension (nx_block*ny_block,nblyr+2), & + intent(inout) :: & + brine_sal , & ! equilibrium brine salinity (ppt) + brine_rho ! internal brine density (kg/m^3) + + real (kind=dbl_kind), dimension (nx_block*ny_block,nblyr+1), & + intent(inout) :: & + ibrine_rho , & ! brine density on interface (kg/m^3) + ibrine_sal , & ! brine salinity on interface (ppt) + iphin ! porosity on interface + + real (kind=dbl_kind), dimension (nx_block,ny_block,nblyr+2), & + intent(inout) :: & + bphin ! porosity of layers + + real (kind=dbl_kind), dimension (nblyr+1), intent(in):: & + igrid ! biology grid interface points + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + sss ! sea surface salinity (ppt) + + real (kind=dbl_kind), dimension (nx_block*ny_block), intent(out) :: & + kperm , & ! harmonic average permeability (m^2) + zphi_min ! minimum porosity + + ! local variables + + real (kind=dbl_kind), dimension(icells, nblyr+1) :: & + kin ! permeability (m^2) + + real (kind=dbl_kind), dimension(icells) :: & + k_min, ktemp + + real (kind=dbl_kind) :: & + igrp, igrm, rigr ! grid finite differences + + integer (kind=int_kind) :: & + k, i, j, ij ! tracer indices + + !----------------------------------------------------------------- + ! calculate equilibrium brine density and gradients + !----------------------------------------------------------------- + + do k = 1, nblyr+1 +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1,icells + i = indxi(ij) + j = indxj(ij) + + brine_sal(ij, k) = a1*bTin(i,j,k) & + + a2*bTin(i,j,k)**2 & + + a3*bTin(i,j,k)**3 + brine_rho(ij, k) = b1 + b2*brine_sal(ij,k) + bphin (i,j,k) = min(c1, max(puny, bSin(ij,k)*rhosi & + /(brine_sal(ij,k)*brine_rho(ij,k)))) + kin (ij, k) = k_o*bphin(i,j,k)**exp_h + enddo ! ij + enddo ! k + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + brine_sal (ij, nblyr+2) = sss (i,j) + brine_rho (ij, nblyr+2) = rhow + bphin (i,j,nblyr+2) = c1 + ibrine_sal(ij, 1) = brine_sal (ij, 2) + ibrine_sal(ij, nblyr+1) = brine_sal (ij, nblyr+2) + ibrine_rho(ij, 1) = brine_rho (ij, 2) + ibrine_rho(ij, nblyr+1) = brine_rho (ij, nblyr+2) + iphin (ij, 1) = bphin (i,j,2) + iphin (ij, nblyr+1) = bphin (i,j,nblyr+1) + zphi_min (ij) = bphin (i,j,2) + k_min (ij) = MINVAL(kin(ij, 2:nblyr+1)) + kperm (ij) = c0 ! initialize + ktemp (ij) = c0 + enddo ! ij + + do k = 2, nblyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (k_min(ij) > c0) then + ktemp(ij) = ktemp(ij) + c1/kin(ij,k) + kperm(ij) = k_min(ij) + endif + + igrp = igrid(k+1) - igrid(k ) + igrm = igrid(k ) - igrid(k-1) + rigr = c1 / (igrid(k+1)-igrid(k-1)) + + ibrine_sal(ij,k) = (brine_sal(ij,k+1)*igrp & + + brine_sal(ij,k )*igrm) * rigr + ibrine_rho(ij,k) = (brine_rho(ij,k+1)*igrp & + + brine_rho(ij,k )*igrm) * rigr + iphin (ij,k) = min(c1, max(puny, & + (bphin(i,j,k+1)*igrp & + + bphin(i,j,k )*igrm) * rigr)) + enddo ! ij + enddo ! k + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (k_min(ij) > c0) then + ktemp(ij) = ktemp(ij) + c1/kin(ij,nblyr+1) + kperm(ij) = real(nblyr,kind=dbl_kind)/ktemp(ij) + endif + enddo ! ij + + end subroutine prepare_hbrine + +!======================================================================= + +! Changes include brine height increases from ice and snow surface melt, +! congelation growth, and upward pressure driven flow from snow loading. +! +! Decreases arise from downward flushing and bottom melt. +! +! NOTE: In this subroutine, trcrn(nt_fbri) is the volume fraction of ice +! with dynamic salinity or the height ratio == hbr/vicen*aicen, where +! hbr is the height of the brine surface relative to the bottom of the +! ice. This volume fraction may be > 1 in which case there is brine +! above the ice surface (ponds). + + subroutine update_hbrine (meltb, meltt, & + melts, dt, & + hin, hsn, & + hin_old, & + hbr, hbr_old, & + fbri, & + dhS_top, dhS_bottom, & + kperm, zphi_min, & + darcy_V) + + real (kind=dbl_kind), intent(in) :: & + dt ! timestep + + real (kind=dbl_kind), intent(in):: & + meltb, & ! bottom melt over dt (m) + meltt, & ! true top melt over dt (m) + melts, & ! true snow melt over dt (m) + hin, & ! ice thickness (m) + hsn, & ! snow thickness (m) + hin_old, & ! past timestep ice thickness (m) + hbr_old, & ! previous timestep hbr + kperm ! avg ice permeability + + real (kind=dbl_kind), intent(inout):: & + darcy_V , & ! Darcy velocity: m/s + dhS_top , & ! change in top hbr before darcy flow + dhS_bottom , & ! change in bottom hbr initially before darcy flow + hbr , & ! thickness of brine (m) + fbri , & ! brine height ratio tracer (hbr/hin) + zphi_min ! surface porosity + + ! local variables + + real (kind=dbl_kind) :: & + hbr_min , & ! thinS or hin + dhbr_hin , & ! hbr-hin + hbrocn , & ! brine height above sea level (m) hbr-h_ocn + dhbr , & ! change in brine surface + h_ocn , & ! new ocean surface from ice bottom (m) + darcy_coeff, & ! magnitude of the Darcy velocity/hbrocn (1/s) + hbrocn_new ! hbrocn after flushing + + real (kind=dbl_kind), parameter :: & + dh_min = p001, & ! brine remains within dh_min of sea level + ! when ice thickness is less than thinS +!echmod USE NAMELIST PARAMETERS rfracmin, rfracmax + run_off = c0 ! fraction of melt that runs off directly to the ocean + + hbrocn = c0 + darcy_V = c0 + hbrocn_new = c0 + h_ocn = rhosi/rhow*hin + rhos/rhow*hsn + + if (hbr_old > thinS .AND. hin_old > thinS) then + + dhS_top = -max(c0, min(hin_old-hbr_old, meltt)) * rhoi/rhow + dhS_top = dhS_top - max(c0, melts) * rhos/rhow + dhS_top = (c1 - run_off) * dhS_top + dhbr = dhS_bottom - dhS_top + hbr = max(hbr_min, hbr_old + dhbr) + hbrocn = hbr - h_ocn + darcy_coeff = max(c0, kperm*gravit/(viscos*hbr_old)) + + if (hbrocn > c0 .AND. hbr > thinS ) then + hbrocn_new = hbrocn*exp(-darcy_coeff/zphi_min*dt) + hbr = max(thinS, h_ocn + hbrocn_new) + elseif (hbrocn < c0) then + if (hbr >= hin) zphi_min = phi_snow + hbrocn_new = hbrocn*exp(-darcy_coeff/zphi_min*dt) + hbr = max(hbr_min, h_ocn + hbrocn_new) + endif + + hbrocn_new = hbr - h_ocn + darcy_V = -SIGN((hbrocn-hbrocn_new)/dt*zphi_min, hbrocn) + dhS_top = dhS_top + SIGN((hbrocn-hbrocn_new), hbrocn) + + else ! very thin brine height + hbr_min = min(thinS, hin) + hbr = max(hbr_min, hbr_old+dhS_bottom-dhS_top) + dhbr_hin = hbr - h_ocn + if (abs(dhbr_hin) > dh_min) & + hbr = max(hbr_min, h_ocn + SIGN(dh_min,dhbr_hin)) + endif + + fbri = hbr/hin + + end subroutine update_hbrine + +!======================================================================= + + subroutine read_restart_hbrine() + +! Reads all values needed for hbrine +! author Elizabeth C. Hunke, LANL + + use ice_communicate, only: my_task, master_task + use ice_domain, only: nblocks + use ice_fileunits, only: nu_diag, nu_restart_hbrine + use ice_state, only: trcrn, nt_fbri + use ice_restart,only: read_restart_field + + ! local variables + + integer (kind=int_kind) :: & + i, j, n, iblk ! counting indices + + logical (kind=log_kind) :: & + diag + + diag = .true. + + if (my_task == master_task) write(nu_diag,*) 'brine restart' + + call read_restart_field(nu_restart_hbrine,0,trcrn(:,:,nt_fbri,:,:),'ruf8', & + 'fbrn',ncat,diag,field_loc_center,field_type_scalar) + call read_restart_field(nu_restart_hbrine,0,first_ice_real(:,:,:,:),'ruf8', & + 'first_ice',ncat,diag,field_loc_center,field_type_scalar) + + do iblk = 1, nblocks + do n = 1,ncat + do j = 1, ny_block + do i = 1, nx_block + if (first_ice_real(i,j,n,iblk) >= p5) then + first_ice (i,j,n,iblk) = .true. + else + first_ice (i,j,n,iblk) = .false. + endif + enddo + enddo + enddo + enddo + + end subroutine read_restart_hbrine + +!======================================================================= + + subroutine write_restart_hbrine() + +! Dumps all values needed for a hbrine restart +! author Elizabeth C. Hunke, LANL + + use ice_domain, only: nblocks + use ice_state, only: trcrn, nt_fbri + use ice_restart,only: write_restart_field + + ! local variables + + integer (kind=int_kind) :: & + i, j, n, iblk + + logical (kind=log_kind) :: diag + + diag = .true. + + do iblk = 1, nblocks + do n = 1,ncat + do j = 1, ny_block + do i = 1, nx_block + if (first_ice(i,j,n,iblk)) then + first_ice_real(i,j,n,iblk) = c1 + else + first_ice_real(i,j,n,iblk) = c0 + endif + enddo + enddo + enddo + enddo + + call write_restart_field(nu_dump_hbrine,0,trcrn(:,:,nt_fbri,:,:),'ruf8', & + 'fbrn',ncat,diag) + call write_restart_field(nu_dump_hbrine,0,first_ice_real(:,:,:,:),'ruf8', & + 'first_ice',ncat,diag) + + end subroutine write_restart_hbrine + +!======================================================================= +! +! Writes diagnostic info (max, min, global sums, etc) to standard out +! +! authors: Elizabeth C. Hunke, LANL +! Bruce P. Briegleb, NCAR +! Cecilia M. Bitz, UW +! Nicole Jeffery, LANL + + subroutine hbrine_diags (dt) + + use ice_broadcast, only: broadcast_scalar + use ice_diagnostics, only: npnt, print_points, pmloc, piloc, pjloc, pbloc, & + plat, plon + use ice_domain_size, only: ncat + use ice_state, only: aice, aicen, vicen, vice, trcr, nt_fbri, & + trcrn, nt_sice + use ice_zbgc_shared, only: darcy_V + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + i, j, k, n, iblk + + ! fields at diagnostic points + real (kind=dbl_kind), dimension(npnt) :: & + phinS, phinS1, pdarcy_V, pfbri + + real (kind=dbl_kind), dimension(npnt,nilyr) :: & + pSin + + !----------------------------------------------------------------- + ! Dynamic brine height + !----------------------------------------------------------------- + + if (print_points) then + + !----------------------------------------------------------------- + ! state of the ice and associated fluxes for 2 defined points + ! NOTE these are computed for the last timestep only (not avg) + !----------------------------------------------------------------- + + do n = 1, npnt + if (my_task == pmloc(n)) then + i = piloc(n) + j = pjloc(n) + iblk = pbloc(n) + phinS1(n) = c0 + phinS(n) = c0 + pfbri(n) = trcrn(i,j,nt_fbri,1,iblk) + pdarcy_V(n) = darcy_V(i,j,1,iblk) + if (aice(i,j,iblk) > c0) & + phinS (n) = trcr(i,j,nt_fbri,iblk) & + * vice(i,j,iblk)/aice(i,j,iblk) + if (aicen(i,j,1,iblk)> c0) & + phinS1(n) = trcrn(i,j,nt_fbri,1,iblk) & + * vicen(i,j,1,iblk)/aicen(i,j,1,iblk) + do k = 1,nilyr + pSin(n,k) = trcr(i,j,nt_sice+k-1,iblk) + enddo + endif ! my_task = pmloc + + do k = 1,nilyr + call broadcast_scalar(pSin(n,k), pmloc(n)) + enddo + call broadcast_scalar(pfbri(n), pmloc(n)) + call broadcast_scalar(phinS1(n), pmloc(n)) + call broadcast_scalar(phinS(n), pmloc(n)) + call broadcast_scalar(pdarcy_V(n), pmloc(n)) + enddo ! npnt + endif ! print_points + + !----------------------------------------------------------------- + ! start spewing + !----------------------------------------------------------------- + + if (my_task == master_task) then + + call flush_fileunit(nu_diag) + + !----------------------------------------------------------------- + ! diagnostics for Arctic and Antarctic points + !----------------------------------------------------------------- + + if (print_points) then + write(nu_diag,*) '-------- hbrine -------' + write(nu_diag,900) 'hbrine, (m) = ',phinS(1),phinS(2) + write(nu_diag,900) 'fbri, cat1 (m) = ',pfbri(1),pfbri(2) + write(nu_diag,900) 'hbrine cat1, (m) = ',phinS1(1),phinS1(2) + write(nu_diag,900) 'darcy_V cat1, (m/s) = ',pdarcy_V(1),pdarcy_V(2) + do k = 1, nilyr + write(nu_diag,900) 'salinity profile (ppt) = ',pSin(1,k),pSin(2,k) + enddo + endif ! print_points + endif ! my_task = master_task + + 900 format (a25,2x,f24.17,2x,f24.17) + + end subroutine hbrine_diags + +!======================================================================= + + end module ice_brine + +!======================================================================= diff --git a/source/ice_calendar.F90 b/source/ice_calendar.F90 new file mode 100755 index 00000000..dbd7ded9 --- /dev/null +++ b/source/ice_calendar.F90 @@ -0,0 +1,686 @@ +! $Id: ice_calendar.F90 943 2015-03-19 22:56:23Z eclare $ +!======================================================================= + +! Calendar routines for managing time +! +! authors: Elizabeth C. Hunke, LANL +! Tony Craig, NCAR +! Craig MacLachlan, UK Met Office +! +! 2006 ECH: Removed 'w' option for history; added 'h' and histfreq_n. +! Converted to free form source (F90). +! 2010 CM : Fixed support for Gregorian calendar: subroutines +! sec2time, time2sec and set_calendar added. + + module ice_calendar + + use ice_kinds_mod + use ice_constants, only: c0, c1, c100, c30, c360, c365, c3600, & + c4, c400, secday + use ice_domain_size, only: max_nstrm + use ice_exit, only: abort_ice +#ifdef AusCOM + use cpl_parameters, only : inidate, iniday, inimon, iniyear, init_date + use cpl_parameters, only : il_out, caltype + use cpl_parameters, only : runtime0 !accumulated runtime by the end of last run +#endif + + implicit none + private + save + + public :: init_calendar, calendar, time2sec, sec2time + + integer (kind=int_kind), public :: & + days_per_year , & ! number of days in one year + daymo(12) , & ! number of days in each month + daycal(13) ! day number at end of month + + ! 360-day year data + integer (kind=int_kind) :: & + daymo360(12) , & ! number of days in each month + daycal360(13) ! day number at end of month + data daymo360 / 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30/ + data daycal360/ 0,30, 60, 90,120,150,180,210,240,270,300,330,360/ + + ! 365-day year data + integer (kind=int_kind) :: & + daymo365(12) , & ! number of days in each month + daycal365(13) ! day number at end of month + data daymo365 / 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31/ + data daycal365/ 0,31, 59, 90,120,151,181,212,243,273,304,334,365/ + + ! 366-day year data (leap year) + integer (kind=int_kind) :: & + daymo366(12) , & ! number of days in each month + daycal366(13) ! day number at end of month + data daymo366 / 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31/ + data daycal366/ 0,31, 60, 91,121,152,182,213,244,274,305,335,366/ + + real (kind=dbl_kind), parameter :: & + days_per_4c = 146097.0_dbl_kind, & + days_per_c = 36524.0_dbl_kind, & + days_per_4y = 1461.0_dbl_kind, & + days_per_y = 365.0_dbl_kind + + integer (kind=int_kind), public :: & + istep , & ! local step counter for time loop + istep0 , & ! counter, number of steps taken in previous run + istep1 , & ! counter, number of steps at current timestep + mday , & ! day of the month + hour , & ! hour of the year + month , & ! month number, 1 to 12 + monthp , & ! last month + year_init, & ! initial year + nyr , & ! year number + idate , & ! date (yyyymmdd) + idate0 , & ! initial date (yyyymmdd) + sec , & ! elapsed seconds into date + npt , & ! total number of time steps (dt) + ndtd , & ! number of dynamics subcycles: dt_dyn=dt/ndtd + stop_now , & ! if 1, end program execution + write_restart, & ! if 1, write restart now + diagfreq , & ! diagnostic output frequency (10 = once per 10 dt) + dumpfreq_n , & ! restart output frequency (10 = once per 10 d,m,y) + nstreams , & ! number of history output streams + histfreq_n(max_nstrm) ! history output frequency + + real (kind=dbl_kind), public :: & + dt , & ! thermodynamics timestep (s) + dt_dyn , & ! dynamics/transport/ridging timestep (s) + time , & ! total elapsed time (s) + time_forc , & ! time of last forcing update (s) + yday , & ! day of the year + tday , & ! absolute day number + dayyr , & ! number of days per year + nextsw_cday , & ! julian day of next shortwave calculation + basis_seconds ! Seconds since calendar zero + + logical (kind=log_kind), public :: & + new_year , & ! new year = .true. + new_month , & ! new month = .true. + new_day , & ! new day = .true. + new_hour , & ! new hour = .true. + use_leap_years , & ! use leap year functionality if true + write_ic , & ! write initial condition now + dump_last , & ! write restart file on last time step + force_restart_now, & ! force a restart now + write_history(max_nstrm) ! write history now + + character (len=1), public :: & + histfreq(max_nstrm), & ! history output frequency, 'y','m','d','h','1' + dumpfreq ! restart frequency, 'y','m','d' + + character (len=char_len),public :: calendar_type + +!======================================================================= + + contains + +!======================================================================= + +! Initialize calendar variables +! +! authors: Elizabeth C. Hunke, LANL +! Tony Craig, NCAR +! Craig MacLachlan, UK Met Office + + subroutine init_calendar + + use ice_fileunits, only: nu_diag + + istep = 0 ! local timestep number + time=istep0*dt ! s + yday=c0 ! absolute day number + mday=0 ! day of the month + month=0 ! month + nyr=0 ! year + idate=00000101 ! date + sec=0 ! seconds into date + istep1 = istep0 ! number of steps at current timestep + ! real (dumped) or imagined (use to set calendar) + stop_now = 0 ! end program execution if stop_now=1 + dt_dyn = dt/real(ndtd,kind=dbl_kind) ! dynamics et al timestep + force_restart_now = .false. + + ! Check that the number of days per year is set correctly when using + ! leap years. If not, set days_per_year correctly and warn the user. + if (use_leap_years .and. days_per_year /= 365) then + days_per_year = 365 + write(nu_diag,*) 'Warning: days_per_year has been set to 365', & + ' because use_leap_years = .true.' + end if + +#ifdef AusCOM + if ((days_year(year_init) == 366) .and. (caltype == 1)) days_per_year = 366 +#endif + + write(*,*)'CICE (calendar) days_per_year = ', days_per_year + + dayyr = real(days_per_year, kind=dbl_kind) + if (days_per_year == 360) then + daymo = daymo360 + daycal = daycal360 + elseif (days_per_year == 365) then + daymo = daymo365 + daycal = daycal365 +#ifdef AusCOM + elseif (days_per_year == 366) then + daymo = daymo366 + daycal = daycal366 +#endif + else +#ifdef AusCOM + call abort_ice('ice: days_per_year must be 360, 365 or 366') +#else + call abort_ice('ice: days_per_year must be 360 or 365') +#endif + endif + + ! Get the time in seconds from calendar zero to start of initial year + call time2sec(year_init,1,1,basis_seconds) + + ! determine initial date (assumes namelist year_init, istep0 unchanged) + sec = mod(time,secday) ! elapsed seconds into date at + ! end of dt + tday = (time-sec)/secday + c1 ! absolute day number + + ! Convert the current timestep into a calendar date + call sec2time(nyr,month,mday,basis_seconds+sec) + + yday = mday + daycal(month) ! day of the year + nyr = nyr - year_init + 1 ! year number + + idate0 = (nyr+year_init-1)*10000 + month*100 + mday ! date (yyyymmdd) + +#ifdef AusCOM + write(il_out,*) '(init_calendar) istep0, dt, time, sec = ', istep0, dt, time, sec + write(il_out,*) '(init_calendar) tday, yday, mday, nyr = ', tday, yday, mday, nyr + write(il_out,*) '(init_calendar) idate0 = ', idate0 + + idate0 = init_date + write(il_out,*) '(init_calendar) idate0 (-corrected-) = ',idate0 + print *, 'CICE (init_calendar) idate0 = ', idate0 +#endif + end subroutine init_calendar + +!======================================================================= + +! Determine the date at the end of the time step +! +! authors: Elizabeth C. Hunke, LANL +! Tony Craig, NCAR +! Craig MacLachlan, UK Met Office + + subroutine calendar(ttime) + + use ice_fileunits, only: nu_diag + use ice_communicate, only: my_task, master_task + + real (kind=dbl_kind), intent(in) :: & + ttime ! time variable + + ! local variables + + integer (kind=int_kind) :: & + ns , & ! loop index + nyrp,mdayp,hourp , & ! previous year, day, hour + elapsed_days , & ! since beginning this run + elapsed_months , & ! since beginning this run + elapsed_hours , & ! since beginning this run + month0 + +#ifdef AusCOM + integer (kind=int_kind) :: & + newh, newd, newm, newy !date by the end of this step +#endif + + nyrp=nyr + monthp=month + mdayp=mday + hourp=hour + new_year=.false. + new_month=.false. + new_day=.false. + new_hour=.false. + write_history(:)=.false. + write_restart=0 + +#ifdef AusCOM + write(il_out,*) '(calendar) ttime = ', ttime +#endif + sec = mod(ttime,secday) ! elapsed seconds into date at + ! end of dt +#ifdef AusCOM + call get_idate(ttime, newh, newd, newm, newy) + ! + !note ttime is seconds accumulated from the beginning of this run only. + !the following stuff is required here or there in other routines ... + ! + yday = (ttime-sec)/secday + c1 ! day of the year + hour = newh + mday = newd + month = newm + nyr = newy - year_init + 1 + ! + elapsed_months = (nyr - 1)*12 + month - 1 + tday = (ttime+runtime0 - mod(ttime+runtime0,secday))/secday + c1 + elapsed_days = int(yday) - 1 + elapsed_hours = int(ttime/3600) +#else + tday = (ttime-sec)/secday + c1 ! absolute day number + + ! Deterime the current date from the timestep + call sec2time(nyr,month,mday,basis_seconds+ttime) + + yday = mday + daycal(month) ! day of the year + nyr = nyr - year_init + 1 ! year number + + hour = int((ttime)/c3600) + c1 ! hour + + month0 = int((idate0 - int(idate0 / 10000) * 10000) / 100) + + elapsed_months = (nyr - 1)*12 + (month - month0) + elapsed_days = int((istep * dt) / secday) + elapsed_hours = int(ttime/3600) +#endif + + idate = (nyr+year_init-1)*10000 + month*100 + mday ! date (yyyymmdd) + +#ifdef AusCOM + write(il_out,*) '(calendar) runtime0 = ', runtime0 + write(il_out,*) '(calendar) nyr, year_init, month, mday = ', nyr, year_init, month, mday + write(il_out,*) '(calendar) idate = ', idate +#endif + if (istep >= npt+1) stop_now = 1 + if (istep == npt .and. dump_last) write_restart = 1 ! last timestep + if (nyr /= nyrp) new_year = .true. + if (month /= monthp) new_month = .true. + if (mday /= mdayp) new_day = .true. + if (hour /= hourp) new_hour = .true. + + + do ns = 1, nstreams + if (histfreq(ns)=='1' .and. histfreq_n(ns)/=0) then + if (mod(istep1, histfreq_n(ns))==0) & + write_history(ns)=.true. + endif + enddo + + if (istep > 1) then + + do ns = 1, nstreams + + select case (histfreq(ns)) + case ("y", "Y") + if (new_year .and. histfreq_n(ns)/=0) then + if (mod(nyr, histfreq_n(ns))==0) & + write_history(ns) = .true. + endif + case ("m", "M") + if (new_month .and. histfreq_n(ns)/=0) then + if (mod(elapsed_months,histfreq_n(ns))==0) & + write_history(ns) = .true. + endif + case ("d", "D") + if (new_day .and. histfreq_n(ns)/=0) then + if (mod(elapsed_days,histfreq_n(ns))==0) & + write_history(ns) = .true. + endif + case ("h", "H") + if (new_hour .and. histfreq_n(ns)/=0) then + if (mod(elapsed_hours,histfreq_n(ns))==0) & + write_history(ns) = .true. + endif + end select + + enddo ! nstreams + + select case (dumpfreq) + case ("y", "Y") + if (new_year .and. mod(nyr, dumpfreq_n)==0) & + write_restart = 1 + case ("m", "M") + if (new_month .and. mod(elapsed_months,dumpfreq_n)==0) & + write_restart = 1 + case ("d", "D") + if (new_day .and. mod(elapsed_days, dumpfreq_n)==0) & + write_restart = 1 + end select + + if (force_restart_now) write_restart = 1 + + endif ! istep > 1 + + if (my_task == master_task .and. mod(istep,diagfreq) == 0 & + .and. stop_now /= 1) then + write(nu_diag,*) ' ' + write(nu_diag,'(a7,i10,4x,a6,i10,4x,a4,i10)') & + 'istep1:', istep1, 'idate:', idate, 'sec:', sec + endif + + end subroutine calendar + +!======================================================================= + +! Convert the date to seconds since calendar zero. +! ** This is based on the UM routine TIME2SEC ** +! +! authors: Craig MacLachlan, UK Met Office + + subroutine time2sec(year,month,day,tsec) + + integer (kind=int_kind), intent(in) :: year ! year + integer (kind=int_kind), intent(in) :: month ! month + integer (kind=int_kind), intent(in) :: day ! year + real (kind=dbl_kind), intent(out) :: tsec ! seconds since calendar zero + + ! local variables + + real (kind=dbl_kind) :: days_since_calz ! days since calendar zero + integer (kind=int_kind) :: years_since_calz ! days since calendar zero + + if (dayyr == 360) then + days_since_calz = c360*year + c30*(month-1) + day - c1 + tsec = secday * days_since_calz + + else + + if (use_leap_years) then + + call set_calendar(year) + + ! Add on the days from this year + days_since_calz = day + daycal(month) - c1 + + ! Subtract a year because we only want to count whole years + years_since_calz = year - 1 + + ! Add days from preceeding years + days_since_calz = days_since_calz & + + int(years_since_calz/c400)*days_per_4c + years_since_calz = years_since_calz & + - int(years_since_calz/c400)*400 + + days_since_calz = days_since_calz & + + int(years_since_calz/c100)*days_per_c + years_since_calz = years_since_calz & + - int(years_since_calz/c100)*100 + + days_since_calz = days_since_calz & + + int(years_since_calz/c4)*days_per_4y + years_since_calz = years_since_calz & + - int(years_since_calz/c4)*4 + + days_since_calz = days_since_calz & + + years_since_calz*days_per_y + + tsec = secday * days_since_calz + + else ! Using fixed 365-day calendar + + days_since_calz = c365*year + daycal365(month) + day - c1 + tsec = secday * days_since_calz + + end if + + end if + + end subroutine time2sec + +!======================================================================= + +! Convert the time in seconds since calendar zero to a date. +! +! authors: Craig MacLachlan, UK Met Office + + subroutine sec2time(year,month,day,tsec) + + integer (kind=int_kind), intent(out) :: year ! year + integer (kind=int_kind), intent(out) :: month ! month + integer (kind=int_kind), intent(out) :: day ! year + real (kind=dbl_kind), intent(in) :: tsec ! seconds since calendar zero + + ! local variables + + real (kind=dbl_kind) :: days_since_calz ! days since calendar zero + integer (kind=int_kind) :: k ! counter + + days_since_calz = int(tsec/secday) + + if (dayyr == 360) then + + year = int(days_since_calz/c360) + month = mod(int(days_since_calz/c30),12) + 1 + day = mod(int(days_since_calz),30) + 1 + + else + + if (use_leap_years) then + + year = int(days_since_calz/days_per_4c)*400 + days_since_calz = days_since_calz & + - int(days_since_calz/days_per_4c)*days_per_4c + + if (days_since_calz == 4*days_per_c) then + year = year + 400 + days_since_calz = days_per_y + 1 + else + year = year + int(days_since_calz/days_per_c)*100 + days_since_calz = days_since_calz & + - int(days_since_calz/days_per_c)*days_per_c + + year = year + int(days_since_calz/days_per_4y)*4 + days_since_calz = days_since_calz & + - int(days_since_calz/days_per_4y)*days_per_4y + + if (days_since_calz == 4*days_per_y) then + year = year + 4 + days_since_calz = days_per_y + 1 + else + year = year + int(days_since_calz/days_per_y) + 1 + days_since_calz = days_since_calz & + - int(days_since_calz/days_per_y)*days_per_y + c1 + endif + endif + + ! Ensure the calendar variables are correct for this year. + call set_calendar(year) + + ! Calculate the month + month = 1 + do k = 1, 12 + if (days_since_calz > daycal(k)) month = k + enddo + + ! Calculate the day of the month + day = days_since_calz - daycal(month) + + else ! Using fixed 365-day calendar + + year = int(days_since_calz/c365) + days_since_calz = days_since_calz - year*365 + 1 + + ! Calculate the month + month = 1 + do k = 1, 12 + if (days_since_calz > daycal365(k)) month = k + enddo + + ! Calculate the day of the month + day = days_since_calz - daycal365(month) + + end if + + end if + + end subroutine sec2time + +!======================================================================= + +! Set the "days per month", "days per year", etc variables for the +! current year. +! +! authors: Craig MacLachlan, UK Met Office + + subroutine set_calendar(year) + + integer (kind=int_kind), intent(in) :: year ! current year + + ! Internal variable + logical (kind=log_kind) :: isleap ! Leap year logical + + isleap = .false. ! not a leap year + if (mod(year, 4) == 0) isleap = .true. + if (mod(year,100) == 0) isleap = .false. + if (mod(year,400) == 0) isleap = .true. + + ! Ensure the calendar is set correctly + if (isleap) then + daycal = daycal366 + daymo = daymo366 + dayyr=real(daycal(13), kind=dbl_kind) + days_per_year=int(dayyr) + else + daycal = daycal365 + daymo = daymo365 + dayyr=real(daycal(13), kind=dbl_kind) + days_per_year=int(dayyr) + endif + + end subroutine set_calendar + +#ifdef AusCOM +!======================================================================= +subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) + +use cpl_parameters + +implicit none + +real (kind=dbl_kind), intent(in) :: ttime +integer, intent(out) :: khfin, kdfin, kmfin, kyfin + +integer :: klmo(12) !length of the months +integer :: inc_day !increment of days since the beginning of this run +integer :: jm, jd + +logical :: lleap + +inc_day = int ((ttime + 0.5)/86400. ) +khfin = (ttime - inc_day*86400)/3600 + +IF (caltype .eq. 0 .or. caltype .eq. 1) THEN + + ! + ! 1. Length of the months + ! + DO jm = 1, 12 + klmo(jm) = 31 + if ( (jm-4)*(jm-6)*(jm-9)*(jm-11) == 0) klmo(jm) = 30 + IF (jm .eq. 2) THEN + ! + !* Leap years + ! + lleap = .FALSE. + IF (caltype .eq. 1) THEN + IF (MOD(iniyear, 4) .eq. 0) lleap = .TRUE. + IF (MOD(iniyear,100) .eq. 0) lleap = .FALSE. + IF (MOD(iniyear,400) .eq. 0) lleap = .TRUE. + ENDIF + klmo(jm) = 28 + if (lleap) klmo(jm) = 29 + ENDIF + ENDDO !jm=1,12 + + kdfin = iniday + kmfin = inimon + kyfin = iniyear + + ! + ! 2. Loop on the days + ! + + DO 210 jd = 1, inc_day + kdfin = kdfin + 1 + IF (kdfin .le. klmo(kmfin)) GOTO 210 + kdfin = 1 + kmfin = kmfin + 1 + IF (kmfin .le. 12) GOTO 210 + kmfin = 1 + kyfin = kyfin + 1 + ! + !* Leap years + ! + lleap = .FALSE. + IF (caltype .eq. 1) THEN + IF (MOD(kyfin, 4) .eq. 0) lleap = .TRUE. + IF (MOD(kyfin,100) .eq. 0) lleap = .FALSE. + IF (MOD(kyfin,400) .eq. 0) lleap = .TRUE. + ENDIF + klmo(2) = 28 + if (lleap) klmo(2) = 29 +210 CONTINUE + +ELSE !for years with constant length of months + + ! + ! 1. Calculate month lengths for current year + ! + DO jm = 1, 12 + klmo(jm) = caltype + ENDDO + kdfin = iniday + kmfin = inimon + kyfin = iniyear + + ! + ! 2. Loop on the days + ! + + DO 410 jd = 1, inc_day + kdfin = kdfin + 1 + IF (kdfin .le. klmo(kmfin)) GOTO 410 + kdfin = 1 + kmfin = kmfin + 1 + IF (kmfin .le. 12) GOTO 410 + kmfin = 1 + kyfin = kyfin + 1 +410 CONTINUE + +ENDIF + +end subroutine get_idate + +!======================================================================= +function days_year(year) + +use cpl_parameters, only : caltype + +implicit none + +integer, intent(in) :: year +real (kind=dbl_kind) :: days_year +logical :: lleap + +IF (caltype .eq. 0 .or. caltype .eq. 1) THEN + lleap = .FALSE. + days_year = 365. + IF (caltype .eq. 1) THEN + IF (MOD(year, 4) .eq. 0) lleap = .TRUE. + IF (MOD(year,100) .eq. 0) lleap = .FALSE. + IF (MOD(year,400) .eq. 0) lleap = .TRUE. + ENDIF + if (lleap) days_year = 366. +ELSE + days_year = dayyr +ENDIF + +return +end function days_year +#endif +!======================================================================= + + end module ice_calendar + +!======================================================================= diff --git a/source/ice_diagnostics.F90 b/source/ice_diagnostics.F90 new file mode 100755 index 00000000..0b5e5b63 --- /dev/null +++ b/source/ice_diagnostics.F90 @@ -0,0 +1,1601 @@ +! SVN:$Id: ice_diagnostics.F90 825 2014-08-29 15:37:09Z eclare $ +!======================================================================= + +! Diagnostic information output during run +! +! authors: Elizabeth C. Hunke, LANL +! Bruce P. Briegleb, NCAR +! +! 2004: Block structure added by William Lipscomb +! 2006: Converted to free source form (F90) by Elizabeth Hunke + + module ice_diagnostics + + use ice_kinds_mod + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0 + use ice_calendar, only: diagfreq, istep1, istep + use ice_domain_size, only: max_aero + use ice_fileunits, only: nu_diag + + implicit none + private + public :: runtime_diags, init_mass_diags, init_diags, print_state, print_points_state + + save + + ! diagnostic output file + character (len=char_len), public :: diag_file + + ! point print data + + logical (kind=log_kind), public :: & + print_points , & ! if true, print point data + print_global ! if true, print global data + + integer (kind=int_kind), parameter, public :: & + npnt = 2 ! total number of points to be printed + + ! Set to true to identify unstable fast-moving ice. + logical (kind=log_kind), parameter :: & + check_umax = .false. ! if true, check for speed > umax_stab + + real (kind=dbl_kind), parameter :: & + umax_stab = 1.0_dbl_kind , & ! ice speed threshold for instability (m/s) + aice_extmin = 0.15_dbl_kind ! min aice value for ice extent calc + + real (kind=dbl_kind), dimension(npnt), public :: & + latpnt , & ! latitude of diagnostic points + lonpnt ! longitude of diagnostic points + + integer (kind=int_kind) :: & + iindx , & ! i index for points + jindx , & ! j index for points + bindx ! block index for points + + ! for water and heat budgets + real (kind=dbl_kind), dimension(npnt) :: & + pdhi , & ! change in mean ice thickness (m) + pdhs , & ! change in mean snow thickness (m) + pde ! change in ice and snow energy (W m-2) + + real (kind=dbl_kind), dimension(npnt), public :: & + plat, plon ! latitude, longitude of points + + integer (kind=int_kind), dimension(npnt), public :: & + piloc, pjloc, pbloc, pmloc ! location of diagnostic points + + ! for hemispheric water and heat budgets + real (kind=dbl_kind) :: & + totmn , & ! total ice/snow water mass (nh) + totms , & ! total ice/snow water mass (sh) + totmin , & ! total ice water mass (nh) + totmis , & ! total ice water mass (sh) + toten , & ! total ice/snow energy (J) + totes ! total ice/snow energy (J) + + real (kind=dbl_kind), dimension(max_aero) :: & + totaeron , & ! total aerosol mass + totaeros ! total aerosol mass + + ! printing info for routine print_state + ! iblkp, ip, jp, mtask identify the grid cell to print + character (char_len) :: plabel + integer (kind=int_kind), parameter, public :: & + check_step = 999999999, & ! begin printing at istep1=check_step + iblkp = 1, & ! block number + ip = 3, & ! i index + jp = 5, & ! j index + mtask = 0 ! my_task + +!======================================================================= + + contains + +!======================================================================= + +! Writes diagnostic info (max, min, global sums, etc) to standard out +! +! authors: Elizabeth C. Hunke, LANL +! Bruce P. Briegleb, NCAR +! Cecilia M. Bitz, UW + + subroutine runtime_diags (dt) + + use ice_blocks, only: nx_block, ny_block + use ice_broadcast, only: broadcast_scalar + use ice_constants, only: c1, c1000, c2, p001, p5, puny, rhoi, rhos, rhow, & + rhofresh, Tffresh, Lfresh, Lvap, ice_ref_salinity, field_loc_center, & + m2_to_km2, awtvdr, awtidr, awtvdf, awtidf + use ice_domain, only: distrb_info, nblocks + use ice_domain_size, only: ncat, n_aero, max_blocks + use ice_fileunits, only: flush_fileunit + use ice_flux, only: alvdr, alidr, alvdf, alidf, evap, fsnow, frazil, & + fswabs, fswthru, flw, flwout, fsens, fsurf, flat, frzmlt_init, frain, fpond, & + coszen, faero_atm, faero_ocn, fhocn_ai, fsalt_ai, fresh_ai, & + update_ocn_f, Tair, Qa, fsw, fcondtop, meltt, meltb, meltl, snoice, & + dsnow, congel, sst, sss, Tf, fhocn + use ice_global_reductions, only: global_sum, global_sum_prod, global_maxval + use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_type + use ice_state ! everything + use ice_therm_shared, only: calc_Tsfc + use ice_zbgc_shared, only: rhosi +#ifdef CCSMCOUPLED + use ice_prescribed_mod, only: prescribed_ice +#endif + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + i, j, n, iblk + + ! hemispheric state quantities + real (kind=dbl_kind) :: & + umaxn, hmaxn, shmaxn, arean, snwmxn, extentn, shmaxnt, & + umaxs, hmaxs, shmaxs, areas, snwmxs, extents, shmaxst, & + etotn, mtotn, micen, msnwn, pmaxn, ketotn, & + etots, mtots, mices, msnws, pmaxs, ketots, & + urmsn, albtotn, arean_alb, mpndn, ptotn, spondn, & + urmss, albtots, areas_alb, mpnds, ptots, sponds + + ! hemispheric flux quantities + real (kind=dbl_kind) :: & + rnn, snn, frzn, hnetn, fhocnn, fhatmn, fhfrzn, & + rns, sns, frzs, hnets, fhocns, fhatms, fhfrzs, & + sfsaltn, sfreshn, evpn, fluxn , delmxn, delmin, & + sfsalts, sfreshs, evps, fluxs , delmxs, delmis, & + delein, werrn, herrn, msltn, delmsltn, serrn, & + deleis, werrs, herrs, mslts, delmslts, serrs + + ! aerosol diagnostics + real (kind=dbl_kind), dimension(max_aero) :: & + faeran, faeron, aerrn, & + faeras, faeros, aerrs, & + aeromx1n, aeromx1s, & + aerototn, aerotots + + ! fields at diagnostic points + real (kind=dbl_kind), dimension(npnt) :: & + paice, pTair, pQa, pfsnow, pfrain, pfsw, pflw, & + pTsfc, pevap, pfswabs, pflwout, pflat, pfsens, & + pfsurf, pfcondtop, psst, psss, pTf, hiavg, hsavg, hbravg, & + pfhocn, psalt, & + pmeltt, pmeltb, pmeltl, psnoice, pdsnow, pfrazil, pcongel + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, work2 + + !----------------------------------------------------------------- + ! state of the ice + !----------------------------------------------------------------- + ! hemispheric quantities + + ! total ice area + arean = global_sum(aice, distrb_info, field_loc_center, tarean) + areas = global_sum(aice, distrb_info, field_loc_center, tareas) + arean = arean * m2_to_km2 + areas = areas * m2_to_km2 + + ! ice extent (= area of grid cells with aice > aice_extmin) + work1(:,:,:) = c0 + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (aice(i,j,iblk) >= aice_extmin) work1(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + extentn = global_sum(work1, distrb_info, field_loc_center, & + tarean) + extents = global_sum(work1, distrb_info, field_loc_center, & + tareas) + extentn = extentn * m2_to_km2 + extents = extents * m2_to_km2 + + ! total ice volume + shmaxn = global_sum(vice, distrb_info, field_loc_center, tarean) + shmaxs = global_sum(vice, distrb_info, field_loc_center, tareas) + + ! total snow volume + snwmxn = global_sum(vsno, distrb_info, field_loc_center, tarean) + snwmxs = global_sum(vsno, distrb_info, field_loc_center, tareas) + + ! total pond volume + ptotn = c0 + ptots = c0 + if (tr_pond_topo) then + !$OMP PARALLEL DO PRIVATE(iblk,i,j,n) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + do n = 1, ncat + work1(i,j,iblk) = work1(i,j,iblk) & + + aicen(i,j,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_hpnd,n,iblk) + enddo + enddo + enddo + enddo + !$OMP END PARALLEL DO + ptotn = global_sum(work1, distrb_info, field_loc_center, tarean) + ptots = global_sum(work1, distrb_info, field_loc_center, tareas) + endif + + ! total ice-snow kinetic energy + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = p5 & + * (rhos*vsno(i,j,iblk) + rhoi*vice(i,j,iblk)) & + * (uvel(i,j,iblk)**2 + vvel(i,j,iblk)**2) + enddo + enddo + enddo + !$OMP END PARALLEL DO + ketotn = global_sum(work1, distrb_info, field_loc_center, tarean) + ketots = global_sum(work1, distrb_info, field_loc_center, tareas) + + ! rms ice speed + urmsn = c2*ketotn/(rhoi*shmaxn + rhos*snwmxn + puny) + if (urmsn > puny) then + urmsn = sqrt(urmsn) + else + urmsn = c0 + endif + + urmss = c2*ketots/(rhoi*shmaxs + rhos*snwmxs + puny) + if (urmss > puny) then + urmss = sqrt(urmss) + else + urmss = c0 + endif + + ! average ice albedo + ! mask out cells where sun is below horizon (for delta-Eddington) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = alvdr(i,j,iblk)*awtvdr & + + alidr(i,j,iblk)*awtidr & + + alvdf(i,j,iblk)*awtvdf & + + alidf(i,j,iblk)*awtidf + if (coszen(i,j,iblk) > puny) then + work2(i,j,iblk) = tarean(i,j,iblk) + else + work2(i,j,iblk) = c0 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + + arean_alb = global_sum(aice, distrb_info, field_loc_center, work2) + + albtotn = global_sum_prod(aice, work1, distrb_info, & + field_loc_center, work2) + + if (arean_alb > c0) then + albtotn = albtotn / arean_alb + else + albtotn = c0 + endif + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (coszen(i,j,iblk) > puny) then + work2(i,j,iblk) = tareas(i,j,iblk) + else + work2(i,j,iblk) = c0 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + + areas_alb = global_sum(aice, distrb_info, field_loc_center, work2) + + albtots = global_sum_prod(aice, work1, distrb_info, & + field_loc_center, work2) + + if (areas_alb > c0) then + albtots = albtots / areas_alb + else + albtots = c0 + endif + + ! maximum ice volume (= mean thickness including open water) + hmaxn = global_maxval(vice, distrb_info, lmask_n) + hmaxs = global_maxval(vice, distrb_info, lmask_s) + + ! maximum ice speed + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = sqrt(uvel(i,j,iblk)**2 & + + vvel(i,j,iblk)**2) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + umaxn = global_maxval(work1, distrb_info, lmask_n) + umaxs = global_maxval(work1, distrb_info, lmask_s) + + ! Write warning message if ice speed is too big + ! (Ice speeds of ~1 m/s or more usually indicate instability) + + if (check_umax) then + if (umaxn > umax_stab) then + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (abs(work1(i,j,iblk) - umaxn) < puny) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Warning, large ice speed' + write(nu_diag,*) 'my_task, iblk, i, j, umaxn:', & + my_task, iblk, i, j, umaxn + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + elseif (umaxs > umax_stab) then + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (abs(work1(i,j,iblk) - umaxs) < puny) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Warning, large ice speed' + write(nu_diag,*) 'my_task, iblk, i, j, umaxs:', & + my_task, iblk, i, j, umaxs + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif ! umax + endif ! check_umax + + ! maximum ice strength + + pmaxn = global_maxval(strength, distrb_info, lmask_n) + pmaxs = global_maxval(strength, distrb_info, lmask_s) + + pmaxn = pmaxn / c1000 ! convert to kN/m + pmaxs = pmaxs / c1000 + + if (print_global) then + + ! total ice/snow internal energy + call total_energy (work1) + + etotn = global_sum(work1, distrb_info, & + field_loc_center, tarean) + etots = global_sum(work1, distrb_info, & + field_loc_center, tareas) + + !----------------------------------------------------------------- + ! various fluxes + !----------------------------------------------------------------- + ! evap, fsens, and flwout need to be multiplied by aice because + ! regrettably they have been divided by aice for the coupler + !----------------------------------------------------------------- + + ! evaporation + + evpn = global_sum_prod(evap, aice, distrb_info, & + field_loc_center, tarean) + evps = global_sum_prod(evap, aice, distrb_info, & + field_loc_center, tareas) + evpn = evpn*dt + evps = evps*dt + + ! total brine tracer + shmaxnt = c0 + shmaxst = c0 + if (tr_brine) then + shmaxnt = global_sum(vice(:,:,:)*trcr(:,:,nt_fbri,:), distrb_info, & + field_loc_center, tarean) + shmaxst = global_sum(vice(:,:,:)*trcr(:,:,nt_fbri,:), distrb_info, & + field_loc_center, tareas) + endif + + ! salt flux + sfsaltn = global_sum(fsalt_ai, distrb_info, & + field_loc_center, tarean) + sfsalts = global_sum(fsalt_ai, distrb_info, & + field_loc_center, tareas) + sfsaltn = sfsaltn*dt + sfsalts = sfsalts*dt + + ! fresh water flux + sfreshn = global_sum(fresh_ai, distrb_info, & + field_loc_center, tarean) + sfreshs = global_sum(fresh_ai, distrb_info, & + field_loc_center, tareas) + sfreshn = sfreshn*dt + sfreshs = sfreshs*dt + + ! pond water flux + spondn = c0 + sponds = c0 + if (tr_pond_topo) then + spondn = global_sum(fpond, distrb_info, & + field_loc_center, tarean) + sponds = global_sum(fpond, distrb_info, & + field_loc_center, tareas) + spondn = spondn*dt + sponds = sponds*dt + endif + + ! ocean heat + ! Note: fswthru not included because it does not heat ice + fhocnn = global_sum(fhocn_ai, distrb_info, & + field_loc_center, tarean) + fhocns = global_sum(fhocn_ai, distrb_info, & + field_loc_center, tareas) + + ! latent heat + ! You may be wondering, where is the latent heat flux? + ! It is not included here because it cancels with + ! the evaporative flux times the enthalpy of the + ! ice/snow that evaporated. + + ! atmo heat flux + ! Note: flwout includes the reflected longwave down, needed by the + ! atmosphere as an upwards radiative boundary condition. + ! Also note: fswabs includes solar radiation absorbed in ocean, + ! which must be subtracted here. + + if (calc_Tsfc) then + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = & + (fswabs(i,j,iblk) - fswthru(i,j,iblk) & + + fsens (i,j,iblk) + flwout (i,j,iblk)) & + * aice (i,j,iblk) & + + flw (i,j,iblk) * aice_init (i,j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + else ! fsurf is computed by atmosphere model + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = & + (fsurf(i,j,iblk) - flat(i,j,iblk)) & + * aice(i,j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + endif ! calc_Tsfc + + fhatmn = global_sum(work1, distrb_info, & + field_loc_center, tarean) + fhatms = global_sum(work1, distrb_info, & + field_loc_center, tareas) + + ! freezing potential + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = max(c0,frzmlt_init(i,j,iblk)) + enddo + enddo + enddo + !$OMP END PARALLEL DO + fhfrzn = global_sum(work1, distrb_info, & + field_loc_center, tarean) + fhfrzs = global_sum(work1, distrb_info, & + field_loc_center, tareas) + + ! rain + rnn = global_sum_prod(frain, aice_init, distrb_info, & + field_loc_center, tarean) + rns = global_sum_prod(frain, aice_init, distrb_info, & + field_loc_center, tareas) + rnn = rnn*dt + rns = rns*dt + + ! snow + snn = global_sum_prod(fsnow, aice_init, distrb_info, & + field_loc_center, tarean) + sns = global_sum_prod(fsnow, aice_init, distrb_info, & + field_loc_center, tareas) + snn = snn*dt + sns = sns*dt + + ! frazil ice growth !! should not be multiplied by aice + ! m/step->kg/m^2/s + work1(:,:,:) = frazil(:,:,:)*rhoi/dt + frzn = global_sum(work1, distrb_info, & + field_loc_center, tarean) + frzs = global_sum(work1, distrb_info, & + field_loc_center, tareas) + frzn = frzn*dt + frzs = frzs*dt + + ! ice, snow, pond mass + micen = rhoi*shmaxn + msnwn = rhos*snwmxn + mices = rhoi*shmaxs + msnws = rhos*snwmxs + mpndn = rhofresh*ptotn + mpnds = rhofresh*ptots + + ! total ice, snow and pond mass + mtotn = micen + msnwn + mpndn + mtots = mices + msnws + mpnds + + ! mass change since beginning of time step + delmin = mtotn - totmn + delmis = mtots - totms + + ! ice mass change including frazil ice formation + delmxn = micen - totmin + delmxs = mices - totmis + if (.not. update_ocn_f) then + ! ice mass change excluding frazil ice formation + delmxn = delmxn - frzn + delmxs = delmxs - frzs + endif + + ! total water flux + fluxn = c0 + fluxs = c0 + if( arean > c0) then + ! water associated with frazil ice included in fresh + fluxn = rnn + snn + evpn - sfreshn + if (.not. update_ocn_f) then + fluxn = fluxn + frzn + endif + endif + if( areas > c0) then + ! water associated with frazil ice included in fresh + fluxs = rns + sns + evps - sfreshs + if (.not. update_ocn_f) then + fluxs = fluxs + frzs + endif + endif + + werrn = (fluxn-delmin)/(mtotn + c1) + werrs = (fluxs-delmis)/(mtots + c1) + + ! energy change + delein = etotn - toten + deleis = etots - totes + + fhatmn = fhatmn + ( - snn * Lfresh + evpn * Lvap ) / dt + fhatms = fhatms + ( - sns * Lfresh + evps * Lvap ) / dt + + hnetn = (fhatmn - fhocnn - fhfrzn) * dt + hnets = (fhatms - fhocns - fhfrzs) * dt + + herrn = (hnetn - delein) / (etotn - c1) + herrs = (hnets - deleis) / (etots - c1) + + ! salt mass + msltn = micen*ice_ref_salinity*p001 + mslts = mices*ice_ref_salinity*p001 + + ! change in salt mass + delmsltn = delmxn*ice_ref_salinity*p001 + delmslts = delmxs*ice_ref_salinity*p001 + + ! salt error + serrn = (sfsaltn + delmsltn) / (msltn + c1) + serrs = (sfsalts + delmslts) / (mslts + c1) + + ! aerosols + if (tr_aero) then + do n = 1, n_aero + faeran(n) = global_sum_prod(faero_atm(:,:,n,:), aice_init, & + distrb_info, field_loc_center, tarean) + faeras(n) = global_sum_prod(faero_atm(:,:,n,:), aice_init, & + distrb_info, field_loc_center, tareas) + faeran(n) = faeran(n)*dt + faeras(n) = faeras(n)*dt + faeron(n) = global_sum_prod(faero_ocn(:,:,n,:), aice, & + distrb_info, field_loc_center, tarean) + faeros(n) = global_sum_prod(faero_ocn(:,:,n,:), aice, & + distrb_info, field_loc_center, tareas) + faeron(n) = faeron(n)*dt + faeros(n) = faeros(n)*dt + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = & + trcr(i,j,nt_aero +4*(n-1),iblk)*vsno(i,j,iblk) & + + trcr(i,j,nt_aero+1+4*(n-1),iblk)*vsno(i,j,iblk) & + + trcr(i,j,nt_aero+2+4*(n-1),iblk)*vice(i,j,iblk) & + + trcr(i,j,nt_aero+3+4*(n-1),iblk)*vice(i,j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + aerototn(n) = global_sum(work1, distrb_info, field_loc_center, tarean) + aerotots(n) = global_sum(work1, distrb_info, field_loc_center, tareas) + aeromx1n(n) = global_maxval(work1, distrb_info, lmask_n) + aeromx1s(n) = global_maxval(work1, distrb_info, lmask_s) + + aerrn(n) = (totaeron(n)-aerototn(n)+faeran(n)-faeron(n)) & + / (aerototn(n) + c1) + aerrs(n) = (totaeros(n)-aerotots(n)+faeras(n)-faeros(n)) & + / (aerotots(n) + c1) + enddo ! n_aero + endif ! tr_aero + + endif ! print_global + + if (print_points) then + + !----------------------------------------------------------------- + ! state of the ice and associated fluxes for 2 defined points + ! NOTE these are computed for the last timestep only (not avg) + !----------------------------------------------------------------- + + call total_energy (work1) + call total_salt (work2) + + do n = 1, npnt + if (my_task == pmloc(n)) then + i = piloc(n) + j = pjloc(n) + iblk = pbloc(n) + + pTair(n) = Tair(i,j,iblk) - Tffresh ! air temperature + pQa(n) = Qa(i,j,iblk) ! specific humidity + pfsnow(n) = fsnow(i,j,iblk)*dt/rhos ! snowfall + pfrain(n) = frain(i,j,iblk)*dt/rhow ! rainfall + pfsw(n) = fsw(i,j,iblk) ! shortwave radiation + pflw(n) = flw(i,j,iblk) ! longwave radiation + paice(n) = aice(i,j,iblk) ! ice area + + hiavg(n) = c0 ! avg snow/ice thickness + hsavg(n) = c0 + hbravg(n) = c0 ! avg brine thickness + if (paice(n) /= c0) then + hiavg(n) = vice(i,j,iblk)/paice(n) + hsavg(n) = vsno(i,j,iblk)/paice(n) + if (tr_brine) hbravg(n) = trcr(i,j,nt_fbri,iblk)* hiavg(n) + endif + psalt(n) = work2(i,j,iblk) + pTsfc(n) = trcr(i,j,nt_Tsfc,iblk) ! ice/snow sfc temperature + pevap(n) = evap(i,j,iblk)*dt/rhoi ! sublimation/condensation + pfswabs(n) = fswabs(i,j,iblk) ! absorbed solar flux + pflwout(n) = flwout(i,j,iblk) ! outward longwave flux + pflat(n) = flat(i,j,iblk) ! latent heat flux + pfsens(n) = fsens(i,j,iblk) ! sensible heat flux + pfsurf(n) = fsurf(i,j,iblk) ! total sfc heat flux + pfcondtop(n) = fcondtop(i,j,iblk) ! top sfc cond flux + pmeltt(n) = meltt(i,j,iblk) ! top melt + pmeltb(n) = meltb(i,j,iblk) ! bottom melt + pmeltl(n) = meltl(i,j,iblk) ! lateral melt + psnoice(n) = snoice(i,j,iblk) ! snow ice + pdsnow(n) = dsnow(i,j,iblk) ! snow change + pfrazil(n) = frazil(i,j,iblk) ! frazil ice + pcongel(n) = congel(i,j,iblk) ! congelation ice + pdhi(n) = vice(i,j,iblk) - pdhi(n) ! ice thickness change + pdhs(n) = vsno(i,j,iblk) - pdhs(n) ! snow thickness change + pde(n) =-(work1(i,j,iblk)- pde(n))/dt ! ice/snow energy change + psst(n) = sst(i,j,iblk) ! sea surface temperature + psss(n) = sss(i,j,iblk) ! sea surface salinity + pTf(n) = Tf(i,j,iblk) ! freezing temperature + pfhocn(n) = -fhocn(i,j,iblk) ! ocean heat used by ice + + endif ! my_task = pmloc + + call broadcast_scalar(pTair (n), pmloc(n)) + call broadcast_scalar(pQa (n), pmloc(n)) + call broadcast_scalar(pfsnow (n), pmloc(n)) + call broadcast_scalar(pfrain (n), pmloc(n)) + call broadcast_scalar(pfsw (n), pmloc(n)) + call broadcast_scalar(pflw (n), pmloc(n)) + call broadcast_scalar(paice (n), pmloc(n)) + call broadcast_scalar(hsavg (n), pmloc(n)) + call broadcast_scalar(hiavg (n), pmloc(n)) + call broadcast_scalar(psalt (n), pmloc(n)) + call broadcast_scalar(hbravg (n), pmloc(n)) + call broadcast_scalar(pTsfc (n), pmloc(n)) + call broadcast_scalar(pevap (n), pmloc(n)) + call broadcast_scalar(pfswabs (n), pmloc(n)) + call broadcast_scalar(pflwout (n), pmloc(n)) + call broadcast_scalar(pflat (n), pmloc(n)) + call broadcast_scalar(pfsens (n), pmloc(n)) + call broadcast_scalar(pfsurf (n), pmloc(n)) + call broadcast_scalar(pfcondtop(n), pmloc(n)) + call broadcast_scalar(pmeltt (n), pmloc(n)) + call broadcast_scalar(pmeltb (n), pmloc(n)) + call broadcast_scalar(pmeltl (n), pmloc(n)) + call broadcast_scalar(psnoice (n), pmloc(n)) + call broadcast_scalar(pdsnow (n), pmloc(n)) + call broadcast_scalar(pfrazil (n), pmloc(n)) + call broadcast_scalar(pcongel (n), pmloc(n)) + call broadcast_scalar(pdhi (n), pmloc(n)) + call broadcast_scalar(pdhs (n), pmloc(n)) + call broadcast_scalar(pde (n), pmloc(n)) + call broadcast_scalar(psst (n), pmloc(n)) + call broadcast_scalar(psss (n), pmloc(n)) + call broadcast_scalar(pTf (n), pmloc(n)) + call broadcast_scalar(pfhocn (n), pmloc(n)) + + enddo ! npnt + endif ! print_points + + !----------------------------------------------------------------- + ! start spewing + !----------------------------------------------------------------- + + if (my_task == master_task) then + + write(nu_diag,899) 'Arctic','Antarctic' + + write(nu_diag,901) 'total ice area (km^2) = ',arean, areas + write(nu_diag,901) 'total ice extent(km^2) = ',extentn,extents + write(nu_diag,901) 'total ice volume (m^3) = ',shmaxn, shmaxs + write(nu_diag,901) 'total snw volume (m^3) = ',snwmxn, snwmxs + write(nu_diag,901) 'tot kinetic energy (J) = ',ketotn, ketots + write(nu_diag,900) 'rms ice speed (m/s) = ',urmsn, urmss + write(nu_diag,900) 'average albedo = ',albtotn,albtots + write(nu_diag,900) 'max ice volume (m) = ',hmaxn, hmaxs + write(nu_diag,900) 'max ice speed (m/s) = ',umaxn, umaxs + write(nu_diag,900) 'max strength (kN/m) = ',pmaxn, pmaxs + + if (print_global) then ! global diags for conservations checks + +#ifdef CCSMCOUPLED + if (prescribed_ice) then + write (nu_diag,*) '----------------------------' + write (nu_diag,*) 'This is the prescribed ice option.' + write (nu_diag,*) 'Heat and water will not be conserved.' + write (nu_diag,*) '----------------------------' + endif +#endif + + write(nu_diag,*) '----------------------------' + write(nu_diag,901) 'arwt rain h2o kg in dt = ',rnn,rns + write(nu_diag,901) 'arwt snow h2o kg in dt = ',snn,sns + write(nu_diag,901) 'arwt evap h2o kg in dt = ',evpn,evps + write(nu_diag,901) 'arwt frzl h2o kg in dt = ',frzn,frzs + if (tr_pond_topo) & + write(nu_diag,901) 'arwt fpnd h2o kg in dt = ',spondn,sponds + write(nu_diag,901) 'arwt frsh h2o kg in dt = ',sfreshn,sfreshs + + write(nu_diag,901) 'arwt ice mass (kg) = ',micen,mices + write(nu_diag,901) 'arwt snw mass (kg) = ',msnwn,msnws + if (tr_pond_topo) & + write(nu_diag,901) 'arwt pnd mass (kg) = ',mpndn,mpnds + + write(nu_diag,901) 'arwt tot mass (kg) = ',mtotn,mtots + write(nu_diag,901) 'arwt tot mass chng(kg) = ',delmin,delmis + write(nu_diag,901) 'arwt water flux = ',fluxn,fluxs + if (update_ocn_f) then + write (nu_diag,*) '(=rain+snow+evap-fresh) ' + else + write (nu_diag,*) '(=rain+snow+evap+frzl-fresh) ' + endif + write(nu_diag,901) 'water flux error = ',werrn,werrs + + write(nu_diag,*) '----------------------------' + write(nu_diag,901) 'arwt atm heat flux (W) = ',fhatmn,fhatms + write(nu_diag,901) 'arwt ocn heat flux (W) = ',fhocnn,fhocns + write(nu_diag,901) 'arwt frzl heat flux(W) = ',fhfrzn,fhfrzs + write(nu_diag,901) 'arwt tot energy (J) = ',etotn,etots + write(nu_diag,901) 'arwt net heat (J) = ',hnetn,hnets + write(nu_diag,901) 'arwt tot energy chng(J)= ',delein,deleis + write(nu_diag,901) 'arwt heat error = ',herrn,herrs + + write(nu_diag,*) '----------------------------' + write(nu_diag,901) 'total brine tr (m^3) = ',shmaxnt, shmaxst + write(nu_diag,901) 'arwt salt mass (kg) = ',msltn,mslts + write(nu_diag,901) 'arwt salt mass chng(kg)= ',delmsltn, & + delmslts + write(nu_diag,901) 'arwt salt flx in dt(kg)= ',sfsaltn, & + sfsalts + write(nu_diag,901) 'arwt salt flx error = ',serrn,serrs + + write(nu_diag,*) '----------------------------' + if (tr_aero) then + do n = 1, n_aero + write(nu_diag,*) ' aerosol ',n + write(nu_diag,901) 'faero_atm (kg/m2) = ', faeran(n), faeras(n) + write(nu_diag,901) 'faero_ocn (kg/m2) = ', faeron(n), faeros(n) + write(nu_diag,901) 'total aero (kg/m2) = ', aerototn(n), aerotots(n) + write(nu_diag,901) 'aero error = ', aerrn(n), aerrs(n) + write(nu_diag,901) 'maximum aero (kg/m2) = ', aeromx1n(n),aeromx1s(n) + enddo + write(nu_diag,*) '----------------------------' + endif ! tr_aero + + endif ! print_global + + call flush_fileunit(nu_diag) + + !----------------------------------------------------------------- + ! diagnostics for Arctic and Antarctic points + !----------------------------------------------------------------- + + if (print_points) then + + write(nu_diag,*) ' ' + write(nu_diag,902) ' Lat, Long ',plat(1),plon(1), & + plat(2),plon(2) + write(nu_diag,903) ' my_task, iblk, i, j ', & + pmloc(1),pbloc(1),piloc(1),pjloc(1), & + pmloc(2),pbloc(2),piloc(2),pjloc(2) + write(nu_diag,*) '----------atm----------' + write(nu_diag,900) 'air temperature (C) = ',pTair(1),pTair(2) + write(nu_diag,900) 'specific humidity = ',pQa(1),pQa(2) + write(nu_diag,900) 'snowfall (m) = ',pfsnow(1), & + pfsnow(2) + write(nu_diag,900) 'rainfall (m) = ',pfrain(1), & + pfrain(2) + if (.not.calc_Tsfc) then + write(nu_diag,900) 'total surface heat flux= ',pfsurf(1),pfsurf(2) + write(nu_diag,900) 'top sfc conductive flux= ',pfcondtop(1), & + pfcondtop(2) + write(nu_diag,900) 'latent heat flx = ',pflat(1),pflat(2) + else + write(nu_diag,900) 'shortwave radiation sum= ',pfsw(1),pfsw(2) + write(nu_diag,900) 'longwave radiation = ',pflw(1),pflw(2) + endif + write(nu_diag,*) '----------ice----------' + write(nu_diag,900) 'area fraction = ',paice(1),paice(2) + write(nu_diag,900) 'avg ice thickness (m) = ',hiavg(1),hiavg(2) + write(nu_diag,900) 'avg snow depth (m) = ',hsavg(1),hsavg(2) + write(nu_diag,900) 'avg salinity (ppt) = ',psalt(1),psalt(2) + write(nu_diag,900) 'avg brine thickness (m)= ',hbravg(1),hbravg(2) + + if (calc_Tsfc) then + write(nu_diag,900) 'surface temperature(C) = ',pTsfc(1),pTsfc(2) + write(nu_diag,900) 'absorbed shortwave flx = ',pfswabs(1), & + pfswabs(2) + write(nu_diag,900) 'outward longwave flx = ',pflwout(1), & + pflwout(2) + write(nu_diag,900) 'sensible heat flx = ',pfsens(1), & + pfsens(2) + write(nu_diag,900) 'latent heat flx = ',pflat(1),pflat(2) + endif + write(nu_diag,900) 'subl/cond (m ice) = ',pevap(1),pevap(2) + write(nu_diag,900) 'top melt (m) = ',pmeltt(1) & + ,pmeltt(2) + write(nu_diag,900) 'bottom melt (m) = ',pmeltb(1) & + ,pmeltb(2) + write(nu_diag,900) 'lateral melt (m) = ',pmeltl(1) & + ,pmeltl(2) + write(nu_diag,900) 'new ice (m) = ',pfrazil(1), & + pfrazil(2) + write(nu_diag,900) 'congelation (m) = ',pcongel(1), & + pcongel(2) + write(nu_diag,900) 'snow-ice (m) = ',psnoice(1), & + psnoice(2) + write(nu_diag,900) 'snow change (m) = ',pdsnow(1), & + pdsnow(2) + write(nu_diag,900) 'effective dhi (m) = ',pdhi(1),pdhi(2) + write(nu_diag,900) 'effective dhs (m) = ',pdhs(1),pdhs(2) + write(nu_diag,900) 'intnl enrgy chng(W/m^2)= ',pde (1),pde (2) + write(nu_diag,*) '----------ocn----------' + write(nu_diag,900) 'sst (C) = ',psst(1),psst(2) + write(nu_diag,900) 'sss (ppt) = ',psss(1),psss(2) + write(nu_diag,900) 'freezing temp (C) = ',pTf(1),pTf(2) + write(nu_diag,900) 'heat used (W/m^2) = ',pfhocn(1), & + pfhocn(2) + + endif ! print_points + endif ! my_task = master_task + + 799 format (27x,a24) + 800 format (a25,2x,f24.17) + 801 format (a25,2x,1pe24.17) + 899 format (27x,a24,2x,a24) + 900 format (a25,2x,f24.17,2x,f24.17) + 901 format (a25,2x,1pe24.17,2x,1pe24.17) + 902 format (a25,10x,f6.1,1x,f6.1,9x,f6.1,1x,f6.1) + 903 format (a25,5x,i4,1x,i4,1x,i4,1x,i4,7x,i4,1x,i4,1x,i4,1x,i4) + + end subroutine runtime_diags + +!======================================================================= + +! Computes global combined ice and snow mass sum +! +! author: Elizabeth C. Hunke, LANL + + subroutine init_mass_diags + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: field_loc_center, rhofresh, rhoi, rhos + use ice_domain, only: distrb_info, nblocks + use ice_domain_size, only: n_aero, ncat, max_blocks + use ice_global_reductions, only: global_sum + use ice_grid, only: tareas, tarean + use ice_state, only: aicen, vice, vsno, trcrn, trcr, & + tr_aero, nt_aero, tr_pond_topo, nt_apnd, nt_hpnd + + integer (kind=int_kind) :: n, i, j, iblk + + real (kind=dbl_kind) :: & + shmaxn, snwmxn, shmaxs, snwmxs, totpn, totps + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + ! total ice volume + shmaxn = global_sum(vice, distrb_info, field_loc_center, tarean) + shmaxs = global_sum(vice, distrb_info, field_loc_center, tareas) + + ! total snow volume + snwmxn = global_sum(vsno, distrb_info, field_loc_center, tarean) + snwmxs = global_sum(vsno, distrb_info, field_loc_center, tareas) + + ! north/south ice mass + totmin = rhoi*shmaxn + totmis = rhoi*shmaxs + + ! north/south ice+snow mass + totmn = totmin + rhos*snwmxn + totms = totmis + rhos*snwmxs + + ! north/south ice+snow energy + call total_energy (work1) + toten = global_sum(work1, distrb_info, field_loc_center, tarean) + totes = global_sum(work1, distrb_info, field_loc_center, tareas) + + if (print_points) then + do n = 1, npnt + if (my_task == pmloc(n)) then + i = piloc(n) + j = pjloc(n) + iblk = pbloc(n) + + pdhi(n) = vice(i,j,iblk) + pdhs(n) = vsno(i,j,iblk) + pde(n) = work1(i,j,iblk) + endif + enddo ! npnt + endif ! print_points + + if (tr_aero) then + do n=1,n_aero + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = trcr(i,j,nt_aero +4*(n-1),iblk)*vsno(i,j,iblk) & + + trcr(i,j,nt_aero+1+4*(n-1),iblk)*vsno(i,j,iblk) & + + trcr(i,j,nt_aero+2+4*(n-1),iblk)*vice(i,j,iblk) & + + trcr(i,j,nt_aero+3+4*(n-1),iblk)*vice(i,j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + totaeron(n)= global_sum(work1, distrb_info, field_loc_center, tarean) + totaeros(n)= global_sum(work1, distrb_info, field_loc_center, tareas) + enddo + endif + + if (tr_pond_topo) then + totpn = c0 + totps = c0 + !$OMP PARALLEL DO PRIVATE(iblk,i,j,n) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + do n = 1, ncat + work1(i,j,iblk) = work1(i,j,iblk) & + + aicen(i,j,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_hpnd,n,iblk) + enddo + enddo + enddo + enddo + !$OMP END PARALLEL DO + totpn = global_sum(work1, distrb_info, field_loc_center, tarean) + totps = global_sum(work1, distrb_info, field_loc_center, tareas) + + ! north/south ice+snow+pond mass + totmn = totmn + totpn*rhofresh + totms = totms + totps*rhofresh + endif + + end subroutine init_mass_diags + +!======================================================================= + +! Computes total energy of ice and snow in a grid cell. +! +! authors: E. C. Hunke, LANL + + subroutine total_energy (work) + + use ice_blocks, only: nx_block, ny_block + use ice_domain, only: nblocks + use ice_domain_size, only: ncat, nilyr, nslyr, max_blocks + use ice_grid, only: tmask + use ice_state, only: vicen, vsnon, trcrn, nt_qice, nt_qsno + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), & + intent(out) :: & + work ! total energy + + ! local variables + + integer (kind=int_kind) :: & + icells ! number of ocean/ice cells + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, & ! compressed indices in i/j directions + indxj + + integer (kind=int_kind) :: & + i, j, k, n, iblk, ij + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,k,ij,icells,indxi,indxj) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + icells = 0 + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j,iblk)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! tmask + enddo + enddo + + work(:,:,iblk) = c0 + + !----------------------------------------------------------------- + ! Aggregate + !----------------------------------------------------------------- + + do n = 1, ncat + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + work(i,j,iblk) = work(i,j,iblk) & + + trcrn(i,j,nt_qice+k-1,n,iblk) & + * vicen(i,j,n,iblk) / real(nilyr,kind=dbl_kind) + enddo ! ij + enddo ! k + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + work(i,j,iblk) = work(i,j,iblk) & + + trcrn(i,j,nt_qsno+k-1,n,iblk) & + * vsnon(i,j,n,iblk) / real(nslyr,kind=dbl_kind) + enddo ! ij + enddo ! k + enddo ! n + + enddo ! iblk + !$OMP END PARALLEL DO + + end subroutine total_energy + +!======================================================================= + +! Computes bulk salinity of ice and snow in a grid cell. +! author: E. C. Hunke, LANL + + subroutine total_salt (work) + + use ice_blocks, only: nx_block, ny_block + use ice_domain, only: nblocks + use ice_domain_size, only: ncat, nilyr, nslyr, max_blocks + use ice_grid, only: tmask + use ice_state, only: vicen, trcrn, nt_sice + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), & + intent(out) :: & + work ! total salt + + ! local variables + + integer (kind=int_kind) :: & + icells ! number of ocean/ice cells + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, & ! compressed indices in i/j directions + indxj + + integer (kind=int_kind) :: & + i, j, k, n, iblk, ij + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,k,ij,icells,indxi,indxj) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + icells = 0 + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j,iblk)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! tmask + enddo + enddo + + work(:,:,iblk) = c0 + + !----------------------------------------------------------------- + ! Aggregate + !----------------------------------------------------------------- + + do n = 1, ncat + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + work(i,j,iblk) = work(i,j,iblk) & + + trcrn(i,j,nt_sice+k-1,n,iblk) & + * vicen(i,j,n,iblk) / real(nilyr,kind=dbl_kind) + enddo ! ij + enddo ! k + enddo ! n + + enddo ! iblk + !$OMP END PARALLEL DO + + end subroutine total_salt + +!======================================================================= + +! Find tasks for diagnostic points. +! +! authors: Elizabeth C. Hunke and William H. Lipscomb, LANL + + subroutine init_diags + + use ice_grid, only: hm, TLAT, TLON + use ice_blocks, only: block, get_block + use ice_constants, only: c180, c360, p5, rad_to_deg, puny + use ice_domain, only: blocks_ice, distrb_info, nblocks + use ice_global_reductions, only: global_minval, global_maxval + + real (kind=dbl_kind) :: & + latdis , & ! latitude distance + londis , & ! longitude distance + totdis , & ! total distance + mindis , & ! minimum distance from desired location + mindis_g ! global minimum distance from desired location + + integer (kind=int_kind) :: & + n , & ! index for point search + i,j , & ! grid indices + iblk , & ! block index + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + type (block) :: & + this_block ! block information for current block + +!tcraig, do this all the time now for print_points_state usage +! if (print_points) then + + if (my_task==master_task) then + write(nu_diag,*) ' ' + write(nu_diag,*) ' Find indices of diagnostic points ' + endif + + piloc(:) = 0 + pjloc(:) = 0 + pbloc(:) = 0 + pmloc(:) = -999 + plat(:) = -999._dbl_kind + plon(:) = -999._dbl_kind + + ! find minimum distance to diagnostic points on this processor + do n = 1, npnt + if (lonpnt(n) > c180) lonpnt(n) = lonpnt(n) - c360 + + iindx = 0 + jindx = 0 + bindx = 0 + mindis = 540.0_dbl_kind ! 360. + 180. + + if (abs(latpnt(n)) < c360 .and. abs(lonpnt(n)) < c360) then + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,latdis,londis,totdis) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + if (hm(i,j,iblk) > p5) then + latdis = abs(latpnt(n)-TLAT(i,j,iblk)*rad_to_deg) + londis = abs(lonpnt(n)-TLON(i,j,iblk)*rad_to_deg) & + * cos(TLAT(i,j,iblk)) + totdis = sqrt(latdis**2 + londis**2) + if (totdis < mindis) then + mindis = totdis + jindx = j + iindx = i + bindx = iblk + endif ! totdis < mindis + endif ! hm > p5 + enddo ! i + enddo ! j + enddo ! iblk + !$OMP END PARALLEL DO + + endif + + ! find global minimum distance to diagnostic points + mindis_g = global_minval(mindis, distrb_info) + + ! save indices of minimum-distance grid cell + if (mindis <= 180.0 .and. abs(mindis_g - mindis) < puny) then + piloc(n) = iindx + pjloc(n) = jindx + pbloc(n) = bindx + pmloc(n) = my_task + plat(n) = TLAT(iindx,jindx,bindx)*rad_to_deg + plon(n) = TLON(iindx,jindx,bindx)*rad_to_deg + endif + + ! communicate to all processors + piloc(n) = global_maxval(piloc(n), distrb_info) + pjloc(n) = global_maxval(pjloc(n), distrb_info) + pbloc(n) = global_maxval(pbloc(n), distrb_info) + pmloc(n) = global_maxval(pmloc(n), distrb_info) + plat(n) = global_maxval(plat(n), distrb_info) + plon(n) = global_maxval(plon(n), distrb_info) + + ! write to log file + if (my_task==master_task) then + write(nu_diag,*) ' ' + write(nu_diag,100) n,latpnt(n),lonpnt(n),plat(n),plon(n), & + piloc(n), pjloc(n), pbloc(n), pmloc(n) + endif + 100 format(' found point',i4/ & + ' lat lon TLAT TLON i j block task'/ & + 4(f6.1,1x),1x,4(i4,2x) ) + + enddo ! npnt +! endif ! print_points + + end subroutine init_diags + +!======================================================================= + +! This routine is useful for debugging. +! Calls to it should be inserted in the form (after thermo, for example) +! do iblk = 1, nblocks +! do j=jlo,jhi +! do i=ilo,ihi +! plabel = 'post thermo' +! if (istep1 >= check_step .and. iblk==iblkp .and i==ip & +! .and. j==jp .and. my_task == mtask) & +! call print_state(plabel,i,j,iblk) +! enddo +! enddo +! enddo +! +! 'use ice_diagnostics' may need to be inserted also +! author: Elizabeth C. Hunke, LANL + + subroutine print_state(plabel,i,j,iblk) + + use ice_blocks, only: block, get_block + use ice_constants, only: puny, rhoi, rhos, Lfresh, cp_ice + use ice_domain, only: blocks_ice + use ice_domain_size, only: ncat, nilyr, nslyr + use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, & + trcrn, nt_Tsfc, nt_qice, nt_qsno + use ice_flux, only: uatm, vatm, potT, Tair, Qa, flw, frain, fsnow, & + fsens, flat, evap, flwout, swvdr, swvdf, swidr, swidf, rhoa, & + frzmlt, sst, sss, Tf, Tref, Qref, Uref, uocn, vocn, strtltx, strtlty + + character (len=20), intent(in) :: plabel + + integer (kind=int_kind), intent(in) :: & + i, j , & ! horizontal indices + iblk ! block index + + ! local variables + + real (kind=dbl_kind) :: & + eidebug, esdebug, & + qi, qs, Tsnow + + integer (kind=int_kind) :: n, k + + type (block) :: & + this_block ! block information for current block + + this_block = get_block(blocks_ice(iblk),iblk) + + write(nu_diag,*) plabel + write(nu_diag,*) 'istep1, my_task, i, j, iblk:', & + istep1, my_task, i, j, iblk + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(i), & + this_block%j_glob(j) + write(nu_diag,*) ' ' + write(nu_diag,*) 'aice0', aice0(i,j,iblk) + do n = 1, ncat + write(nu_diag,*) ' ' + write(nu_diag,*) 'n =',n + write(nu_diag,*) 'aicen', aicen(i,j,n,iblk) + write(nu_diag,*) 'vicen', vicen(i,j,n,iblk) + write(nu_diag,*) 'vsnon', vsnon(i,j,n,iblk) + if (aicen(i,j,n,iblk) > puny) then + write(nu_diag,*) 'hin', vicen(i,j,n,iblk)/aicen(i,j,n,iblk) + write(nu_diag,*) 'hsn', vsnon(i,j,n,iblk)/aicen(i,j,n,iblk) + endif + write(nu_diag,*) 'Tsfcn',trcrn(i,j,nt_Tsfc,n,iblk) + write(nu_diag,*) ' ' + enddo ! n + + eidebug = c0 + do n = 1,ncat + do k = 1,nilyr + qi = trcrn(i,j,nt_qice+k-1,n,iblk) + write(nu_diag,*) 'qice, cat ',n,' layer ',k, qi + eidebug = eidebug + qi + if (aicen(i,j,n,iblk) > puny) then + write(nu_diag,*) 'qi/rhoi', qi/rhoi + endif + enddo + write(nu_diag,*) ' ' + enddo + write(nu_diag,*) 'qice(i,j)',eidebug + write(nu_diag,*) ' ' + + esdebug = c0 + do n = 1,ncat + if (vsnon(i,j,n,iblk) > puny) then + do k = 1,nslyr + qs = trcrn(i,j,nt_qsno+k-1,n,iblk) + write(nu_diag,*) 'qsnow, cat ',n,' layer ',k, qs + esdebug = esdebug + qs + Tsnow = (Lfresh + qs/rhos) / cp_ice + write(nu_diag,*) 'qs/rhos', qs/rhos + write(nu_diag,*) 'Tsnow', Tsnow + enddo + write(nu_diag,*) ' ' + endif + enddo + write(nu_diag,*) 'qsnow(i,j)',esdebug + write(nu_diag,*) ' ' + + write(nu_diag,*) 'uvel(i,j)',uvel(i,j,iblk) + write(nu_diag,*) 'vvel(i,j)',vvel(i,j,iblk) + + write(nu_diag,*) ' ' + write(nu_diag,*) 'atm states and fluxes' + write(nu_diag,*) ' uatm = ',uatm (i,j,iblk) + write(nu_diag,*) ' vatm = ',vatm (i,j,iblk) + write(nu_diag,*) ' potT = ',potT (i,j,iblk) + write(nu_diag,*) ' Tair = ',Tair (i,j,iblk) + write(nu_diag,*) ' Qa = ',Qa (i,j,iblk) + write(nu_diag,*) ' rhoa = ',rhoa (i,j,iblk) + write(nu_diag,*) ' swvdr = ',swvdr(i,j,iblk) + write(nu_diag,*) ' swvdf = ',swvdf(i,j,iblk) + write(nu_diag,*) ' swidr = ',swidr(i,j,iblk) + write(nu_diag,*) ' swidf = ',swidf(i,j,iblk) + write(nu_diag,*) ' flw = ',flw (i,j,iblk) + write(nu_diag,*) ' frain = ',frain(i,j,iblk) + write(nu_diag,*) ' fsnow = ',fsnow(i,j,iblk) + write(nu_diag,*) ' ' + write(nu_diag,*) 'ocn states and fluxes' + write(nu_diag,*) ' frzmlt = ',frzmlt (i,j,iblk) + write(nu_diag,*) ' sst = ',sst (i,j,iblk) + write(nu_diag,*) ' sss = ',sss (i,j,iblk) + write(nu_diag,*) ' Tf = ',Tf (i,j,iblk) + write(nu_diag,*) ' uocn = ',uocn (i,j,iblk) + write(nu_diag,*) ' vocn = ',vocn (i,j,iblk) + write(nu_diag,*) ' strtltx = ',strtltx(i,j,iblk) + write(nu_diag,*) ' strtlty = ',strtlty(i,j,iblk) + write(nu_diag,*) ' ' + write(nu_diag,*) 'srf states and fluxes' + write(nu_diag,*) ' Tref = ',Tref (i,j,iblk) + write(nu_diag,*) ' Qref = ',Qref (i,j,iblk) + write(nu_diag,*) ' Uref = ',Uref (i,j,iblk) + write(nu_diag,*) ' fsens = ',fsens (i,j,iblk) + write(nu_diag,*) ' flat = ',flat (i,j,iblk) + write(nu_diag,*) ' evap = ',evap (i,j,iblk) + write(nu_diag,*) ' flwout = ',flwout(i,j,iblk) + write(nu_diag,*) ' ' + + end subroutine print_state + +!======================================================================= +!======================================================================= + +! This routine is useful for debugging. +! Calls can be inserted anywhere and it will print info on print_points points +! call print_points_state(plabel) +! +! 'use ice_diagnostics' may need to be inserted also + + subroutine print_points_state(plabel,ilabel) + + use ice_blocks, only: block, get_block + use ice_constants, only: puny, rhoi, rhos, Lfresh, cp_ice + use ice_domain, only: blocks_ice + use ice_domain_size, only: ncat, nilyr, nslyr + use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, & + trcrn, nt_Tsfc, nt_qice, nt_qsno + use ice_flux, only: uatm, vatm, potT, Tair, Qa, flw, frain, fsnow, & + fsens, flat, evap, flwout, swvdr, swvdf, swidr, swidf, rhoa, & + frzmlt, sst, sss, Tf, Tref, Qref, Uref, uocn, vocn, strtltx, strtlty + + character (len=*), intent(in),optional :: plabel + integer , intent(in),optional :: ilabel + + ! local variables + + real (kind=dbl_kind) :: & + eidebug, esdebug, & + qi, qs, Tsnow + + integer (kind=int_kind) :: m, n, k, i, j, iblk + character(len=256) :: llabel + + type (block) :: & + this_block ! block information for current block + + ! ---------------------- + + do m = 1, npnt + if (my_task == pmloc(m)) then + i = piloc(m) + j = pjloc(m) + iblk = pbloc(m) + this_block = get_block(blocks_ice(iblk),iblk) + + if (present(ilabel)) then + write(llabel,'(i6,a1,i3,a1)') ilabel,':',m,':' + else + write(llabel,'(i3,a1)') m,':' + endif + if (present(plabel)) then + write(llabel,'(a)') 'pps:'//trim(plabel)//':'//trim(llabel) + else + write(llabel,'(a)') 'pps:'//trim(llabel) + endif + + write(nu_diag,*) trim(llabel),'istep1, my_task, i, j, iblk=', & + istep1, my_task, i, j, iblk + write(nu_diag,*) trim(llabel),'Global i and j=', & + this_block%i_glob(i), & + this_block%j_glob(j) + write(nu_diag,*) trim(llabel),'aice0=', aice0(i,j,iblk) + + do n = 1, ncat + write(nu_diag,*) trim(llabel),'aicen=', n,aicen(i,j,n,iblk) + write(nu_diag,*) trim(llabel),'vicen=', n,vicen(i,j,n,iblk) + write(nu_diag,*) trim(llabel),'vsnon=', n,vsnon(i,j,n,iblk) + if (aicen(i,j,n,iblk) > puny) then + write(nu_diag,*) trim(llabel),'hin=', n,vicen(i,j,n,iblk)/aicen(i,j,n,iblk) + write(nu_diag,*) trim(llabel),'hsn=', n,vsnon(i,j,n,iblk)/aicen(i,j,n,iblk) + endif + write(nu_diag,*) trim(llabel),'Tsfcn=',n,trcrn(i,j,nt_Tsfc,n,iblk) + enddo + + eidebug = c0 + do n = 1,ncat + do k = 1,nilyr + qi = trcrn(i,j,nt_qice+k-1,n,iblk) + write(nu_diag,*) trim(llabel),'qice= ',n,k, qi + eidebug = eidebug + qi + enddo + enddo + write(nu_diag,*) trim(llabel),'qice=',eidebug + + esdebug = c0 + do n = 1,ncat + if (vsnon(i,j,n,iblk) > puny) then + do k = 1,nslyr + qs = trcrn(i,j,nt_qsno+k-1,n,iblk) + write(nu_diag,*) trim(llabel),'qsnow=',n,k, qs + esdebug = esdebug + qs + enddo + endif + enddo + write(nu_diag,*) trim(llabel),'qsnow=',esdebug + + write(nu_diag,*) trim(llabel),'uvel=',uvel(i,j,iblk) + write(nu_diag,*) trim(llabel),'vvel=',vvel(i,j,iblk) + + ! write(nu_diag,*) ' ' + ! write(nu_diag,*) 'atm states and fluxes' + ! write(nu_diag,*) ' uatm = ',uatm (i,j,iblk) + ! write(nu_diag,*) ' vatm = ',vatm (i,j,iblk) + ! write(nu_diag,*) ' potT = ',potT (i,j,iblk) + ! write(nu_diag,*) ' Tair = ',Tair (i,j,iblk) + ! write(nu_diag,*) ' Qa = ',Qa (i,j,iblk) + ! write(nu_diag,*) ' rhoa = ',rhoa (i,j,iblk) + ! write(nu_diag,*) ' swvdr = ',swvdr(i,j,iblk) + ! write(nu_diag,*) ' swvdf = ',swvdf(i,j,iblk) + ! write(nu_diag,*) ' swidr = ',swidr(i,j,iblk) + ! write(nu_diag,*) ' swidf = ',swidf(i,j,iblk) + ! write(nu_diag,*) ' flw = ',flw (i,j,iblk) + ! write(nu_diag,*) ' frain = ',frain(i,j,iblk) + ! write(nu_diag,*) ' fsnow = ',fsnow(i,j,iblk) + ! write(nu_diag,*) ' ' + ! write(nu_diag,*) 'ocn states and fluxes' + ! write(nu_diag,*) ' frzmlt = ',frzmlt (i,j,iblk) + ! write(nu_diag,*) ' sst = ',sst (i,j,iblk) + ! write(nu_diag,*) ' sss = ',sss (i,j,iblk) + ! write(nu_diag,*) ' Tf = ',Tf (i,j,iblk) + ! write(nu_diag,*) ' uocn = ',uocn (i,j,iblk) + ! write(nu_diag,*) ' vocn = ',vocn (i,j,iblk) + ! write(nu_diag,*) ' strtltx = ',strtltx(i,j,iblk) + ! write(nu_diag,*) ' strtlty = ',strtlty(i,j,iblk) + ! write(nu_diag,*) ' ' + ! write(nu_diag,*) 'srf states and fluxes' + ! write(nu_diag,*) ' Tref = ',Tref (i,j,iblk) + ! write(nu_diag,*) ' Qref = ',Qref (i,j,iblk) + ! write(nu_diag,*) ' Uref = ',Uref (i,j,iblk) + ! write(nu_diag,*) ' fsens = ',fsens (i,j,iblk) + ! write(nu_diag,*) ' flat = ',flat (i,j,iblk) + ! write(nu_diag,*) ' evap = ',evap (i,j,iblk) + ! write(nu_diag,*) ' flwout = ',flwout(i,j,iblk) + ! write(nu_diag,*) ' ' + + endif ! my_task + enddo ! ncnt + + end subroutine print_points_state + +!======================================================================= + + end module ice_diagnostics + +!======================================================================= diff --git a/source/ice_distribution.F90 b/source/ice_distribution.F90 new file mode 100755 index 00000000..1f46e40a --- /dev/null +++ b/source/ice_distribution.F90 @@ -0,0 +1,1964 @@ +! SVN:$Id: ice_distribution.F90 825 2014-08-29 15:37:09Z eclare $ +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| + + module ice_distribution + +! This module provides data types and routines for distributing +! blocks across processors. +! +! author: Phil Jones, LANL +! Oct. 2004: Adapted from POP by William H. Lipscomb, LANL +! Jan. 2008: Elizabeth Hunke updated to new POP infrastructure + + use ice_kinds_mod + use ice_domain_size, only: max_blocks + use ice_communicate, only: my_task, master_task, create_communicator + use ice_blocks, only: nblocks_x, nblocks_y, nblocks_tot + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_diag + + implicit none + private + save + + type, public :: distrb ! distribution data type + integer (int_kind) :: & + nprocs ,&! number of processors in this dist + communicator ,&! communicator to use in this dist + numLocalBlocks ! number of blocks distributed to this + ! local processor + + integer (int_kind), dimension(:), pointer :: & + blockLocation ,&! processor location for all blocks + blockLocalID ,&! local block id for all blocks + blockGlobalID ! global block id for each local block + + integer (int_kind), dimension(:), pointer :: blockCnt + integer (int_kind), dimension(:,:), pointer :: blockIndex + + end type + + public :: create_distribution, & + ice_distributionGet, & + ice_distributionGetBlockLoc, & + ice_distributionGetBlockID, & + create_local_block_ids + + character (char_len), public :: & + processor_shape ! 'square-pop' (approx) POP default config + ! 'square-ice' like square-pop but better for ice + ! 'slenderX1' (NPX x 1) + ! 'slenderX2' (NPX x 2) + +!ars599: 04042016: should we keep or not? Refer to fn: create_distrb_cart +!ars599: 26032014: will call from cpl_interface +! from function create_distrb_cart +! so change to public + integer (int_kind), public :: & + nprocsX, &! num of procs in x for global domain + nprocsY ! num of procs in y for global domain + + +!*********************************************************************** + + contains + +!*********************************************************************** + + function create_distribution(dist_type, nprocs, work_per_block) + +! This routine determines the distribution of blocks across processors +! by call the appropriate subroutine based on distribution type +! requested. Currently three distributions are supported: +! 2-d Cartesian distribution (cartesian), a load-balanced +! distribution using a rake algorithm based on an input amount of work +! per block, and a space-filling-curve algorithm. + + character (*), intent(in) :: & + dist_type ! method for distributing blocks + ! either cartesian or rake + + integer (int_kind), intent(in) :: & + nprocs ! number of processors in this distribution + + integer (int_kind), dimension(:), intent(in) :: & + work_per_block ! amount of work per block + + type (distrb) :: & + create_distribution ! resulting structure describing + ! distribution of blocks +!---------------------------------------------------------------------- +! +! select the appropriate distribution type +! +!---------------------------------------------------------------------- + + select case (trim(dist_type)) + + case('cartesian') + + create_distribution = create_distrb_cart(nprocs, work_per_block) + + case('rake') + + create_distribution = create_distrb_rake(nprocs, work_per_block) + + case('roundrobin') + + create_distribution = create_distrb_roundrobin(nprocs, work_per_block) + + case('sectrobin') + + create_distribution = create_distrb_sectrobin(nprocs, work_per_block) + + case('sectcart') + + create_distribution = create_distrb_sectcart(nprocs, work_per_block) + + case('spacecurve') + + create_distribution = create_distrb_spacecurve(nprocs, & + work_per_block) + + case default + + call abort_ice('ice distribution: unknown distribution type') + + end select + +!----------------------------------------------------------------------- + + end function create_distribution + +!*********************************************************************** + + subroutine create_local_block_ids(block_ids, distribution) + +! This routine determines which blocks in an input distribution are +! located on the local processor and creates an array of block ids +! for all local blocks. + + type (distrb), intent(in) :: & + distribution ! input distribution for which local + ! blocks required + + integer (int_kind), dimension(:), pointer :: & + block_ids ! array of block ids for every block + ! that resides on the local processor +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + n, bcount ! dummy counters + + logical (log_kind) :: dbug + +!----------------------------------------------------------------------- +! +! first determine number of local blocks to allocate array +! +!----------------------------------------------------------------------- + + bcount = 0 + do n=1,size(distribution%blockLocation) + if (distribution%blockLocation(n) == my_task+1) bcount = bcount + 1 + end do + + + if (bcount > 0) allocate(block_ids(bcount)) + +!----------------------------------------------------------------------- +! +! now fill array with proper block ids +! +!----------------------------------------------------------------------- + +! dbug = .true. + dbug = .false. + if (bcount > 0) then + do n=1,size(distribution%blockLocation) + if (distribution%blockLocation(n) == my_task+1) then + block_ids(distribution%blockLocalID(n)) = n + + if (dbug) then + write(nu_diag,*) 'block id, proc, local_block: ', & + block_ids(distribution%blockLocalID(n)), & + distribution%blockLocation(n), & + distribution%blockLocalID(n) + endif + endif + end do + endif + + end subroutine create_local_block_ids + +!*********************************************************************** + + subroutine proc_decomposition(nprocs, nprocs_x, nprocs_y) + +! This subroutine attempts to find an optimal (nearly square) +! 2d processor decomposition for a given number of processors. + + integer (int_kind), intent(in) :: & + nprocs ! total number or processors + + integer (int_kind), intent(out) :: & + nprocs_x, nprocs_y ! number of procs in each dimension + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + iguess, jguess ! guesses for nproc_x,y + + real (real_kind) :: & + square ! square root of nprocs + +!---------------------------------------------------------------------- +! +! start with an initial guess +! +!---------------------------------------------------------------------- + + square = sqrt(real(nprocs,kind=real_kind)) + nprocs_x = 0 + nprocs_y = 0 + + if (processor_shape == 'square-pop') then ! make as square as possible + iguess = nint(square) + jguess = nprocs/iguess + elseif (processor_shape == 'square-ice') then ! better for bipolar ice + jguess = nint(square) + iguess = nprocs/jguess + elseif (processor_shape == 'slenderX1') then ! 1 proc in y direction + jguess = 1 + iguess = nprocs/jguess + else ! 2 processors in y direction + jguess = min(2, nprocs) + iguess = nprocs/jguess + endif + +!---------------------------------------------------------------------- +! +! try various decompositions to find the best +! +!---------------------------------------------------------------------- + + proc_loop: do + if (processor_shape == 'square-pop') then + jguess = nprocs/iguess + else + iguess = nprocs/jguess + endif + + if (iguess*jguess == nprocs) then ! valid decomp + + !*** if the blocks can be evenly distributed, it is a + !*** good decomposition + if (mod(nblocks_x,iguess) == 0 .and. & + mod(nblocks_y,jguess) == 0) then + nprocs_x = iguess + nprocs_y = jguess + exit proc_loop + + !*** if the blocks can be evenly distributed in a + !*** transposed direction, it is a good decomposition + else if (mod(nblocks_x,jguess) == 0 .and. & + mod(nblocks_y,iguess) == 0) then + nprocs_x = jguess + nprocs_y = iguess + exit proc_loop + + !*** A valid decomposition, but keep searching for + !*** a better one + else + if (nprocs_x == 0) then + nprocs_x = iguess + nprocs_y = jguess + endif + if (processor_shape == 'square-pop') then + iguess = iguess - 1 + if (iguess == 0) then + exit proc_loop + else + cycle proc_loop + endif + else + jguess = jguess - 1 + if (jguess == 0) then + exit proc_loop + else + cycle proc_loop + endif + endif + endif + + else ! invalid decomp - keep trying + + if (processor_shape == 'square-pop') then + iguess = iguess - 1 + if (iguess == 0) then + exit proc_loop + else + cycle proc_loop + endif + else + jguess = jguess - 1 + if (jguess == 0) then + exit proc_loop + else + cycle proc_loop + endif + endif + endif + + end do proc_loop + + if (nprocs_x == 0) then + call abort_ice('ice: Unable to find 2d processor config') + endif + + if (my_task == master_task) then + write(nu_diag,'(a23,i4,a3,i4)') ' Processors (X x Y) = ', & + nprocs_x,' x ',nprocs_y + endif + +!---------------------------------------------------------------------- + + end subroutine proc_decomposition + +!********************************************************************** + + subroutine ice_distributionDestroy(distribution) + +! This routine destroys a defined distribution by deallocating +! all memory associated with the distribution. + + type (distrb), intent(inout) :: & + distribution ! distribution to destroy + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: istat ! status flag for deallocate + +!---------------------------------------------------------------------- +! +! reset scalars +! +!---------------------------------------------------------------------- + + distribution%nprocs = 0 + distribution%communicator = 0 + distribution%numLocalBlocks = 0 + +!---------------------------------------------------------------------- +! +! deallocate arrays +! +!---------------------------------------------------------------------- + + deallocate(distribution%blockLocation, stat=istat) + deallocate(distribution%blockLocalID , stat=istat) + deallocate(distribution%blockGlobalID, stat=istat) + +!----------------------------------------------------------------------- + + end subroutine ice_distributionDestroy + +!*********************************************************************** + + subroutine ice_distributionGet(distribution,& + nprocs, communicator, numLocalBlocks, & + blockLocation, blockLocalID, blockGlobalID) + +! This routine extracts information from a distribution. + + type (distrb), intent(in) :: & + distribution ! input distribution for which information + ! is requested + + integer (int_kind), intent(out), optional :: & + nprocs ,&! number of processors in this dist + communicator ,&! communicator to use in this dist + numLocalBlocks ! number of blocks distributed to this + ! local processor + + integer (int_kind), dimension(:), pointer, optional :: & + blockLocation ,&! processor location for all blocks + blockLocalID ,&! local block id for all blocks + blockGlobalID ! global block id for each local block + +!----------------------------------------------------------------------- +! +! depending on which optional arguments are present, extract the +! requested info +! +!----------------------------------------------------------------------- + + if (present(nprocs)) nprocs = distribution%nprocs + if (present(communicator)) communicator = distribution%communicator + if (present(numLocalBlocks)) numLocalBlocks = distribution%numLocalBlocks + + if (present(blockLocation)) then + if (associated(distribution%blockLocation)) then + blockLocation => distribution%blockLocation + else + call abort_ice( & + 'ice_distributionGet: blockLocation not allocated') + return + endif + endif + + if (present(blockLocalID)) then + if (associated(distribution%blockLocalID)) then + blockLocalID = distribution%blockLocalID + else + call abort_ice( & + 'ice_distributionGet: blockLocalID not allocated') + return + endif + endif + + if (present(blockGlobalID)) then + if (associated(distribution%blockGlobalID)) then + blockGlobalID = distribution%blockGlobalID + else + call abort_ice( & + 'ice_distributionGet: blockGlobalID not allocated') + return + endif + endif + +!----------------------------------------------------------------------- + + end subroutine ice_distributionGet + +!*********************************************************************** + + subroutine ice_distributionGetBlockLoc(distribution, blockID, & + processor, localID) + +! Given a distribution of blocks and a global block ID, return +! the processor and local index for the block. A zero for both +! is returned in the case that the block has been eliminated from +! the distribution (i.e. has no active points). + + type (distrb), intent(in) :: & + distribution ! input distribution for which information + ! is requested + + integer (int_kind), intent(in) :: & + blockID ! global block id for which location requested + + integer (int_kind), intent(out) :: & + processor, &! processor on which block resides + localID ! local index for this block on this proc + +!----------------------------------------------------------------------- +! +! check for valid blockID +! +!----------------------------------------------------------------------- + + if (blockID < 0 .or. blockID > nblocks_tot) then + call abort_ice( & + 'ice_distributionGetBlockLoc: invalid block id') + return + endif + +!----------------------------------------------------------------------- +! +! extract the location from the distribution data structure +! +!----------------------------------------------------------------------- + + processor = distribution%blockLocation(blockID) + localID = distribution%blockLocalID (blockID) + +!----------------------------------------------------------------------- + + end subroutine ice_distributionGetBlockLoc + +!*********************************************************************** + + subroutine ice_distributionGetBlockID(distribution, localID, & + blockID) + +! Given a distribution of blocks and a local block index, return +! the global block id for the block. + + type (distrb), intent(in) :: & + distribution ! input distribution for which information + ! is requested + + integer (int_kind), intent(in) :: & + localID ! local index for this block on this proc + + integer (int_kind), intent(out) :: & + blockID ! global block id for this local block + +!----------------------------------------------------------------------- +! +! check for valid localID +! +!----------------------------------------------------------------------- + + if (localID < 0 .or. localID > distribution%numLocalBlocks) then + call abort_ice( & + 'ice_distributionGetBlockID: invalid local id') + return + endif + +!----------------------------------------------------------------------- +! +! extract the global ID from the distribution data structure +! +!----------------------------------------------------------------------- + + blockID = distribution%blockGlobalID (localID) + +!----------------------------------------------------------------------- + + end subroutine ice_distributionGetBlockID + +!*********************************************************************** + + function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) + +! This function creates a distribution of blocks across processors +! using a 2-d Cartesian distribution. + + integer (int_kind), intent(in) :: & + nprocs ! number of processors in this distribution + + integer (int_kind), dimension(:), intent(in) :: & + workPerBlock ! amount of work per block + + type (distrb) :: & + newDistrb ! resulting structure describing Cartesian + ! distribution of blocks + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + i, j, &! dummy loop indices + istat, &! status flag for allocation + iblock, jblock, &! + is, ie, js, je, &! start, end block indices for each proc + processor, &! processor position in cartesian decomp + globalID, &! global block ID + localID, &! block location on this processor +! nprocsX, &! num of procs in x for global domain +! nprocsY, &! num of procs in y for global domain + numBlocksXPerProc, &! num of blocks per processor in x + numBlocksYPerProc ! num of blocks per processor in y + +!---------------------------------------------------------------------- +! +! create communicator for this distribution +! +!---------------------------------------------------------------------- + + call create_communicator(newDistrb%communicator, nprocs) + +!---------------------------------------------------------------------- +! +! try to find best processor arrangement +! +!---------------------------------------------------------------------- + + newDistrb%nprocs = nprocs + + call proc_decomposition(nprocs, nprocsX, nprocsY) + + +!---------------------------------------------------------------------- +! +! allocate space for decomposition +! +!---------------------------------------------------------------------- + + allocate (newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + +!---------------------------------------------------------------------- +! +! distribute blocks linearly across processors in each direction +! +!---------------------------------------------------------------------- + + numBlocksXPerProc = (nblocks_x-1)/nprocsX + 1 + numBlocksYPerProc = (nblocks_y-1)/nprocsY + 1 + + do j=1,nprocsY + do i=1,nprocsX + processor = (j-1)*nprocsX + i ! number the processors + ! left to right, bot to top + + is = (i-1)*numBlocksXPerProc + 1 ! starting block in i + ie = i *numBlocksXPerProc ! ending block in i + if (ie > nblocks_x) ie = nblocks_x + js = (j-1)*numBlocksYPerProc + 1 ! starting block in j + je = j *numBlocksYPerProc ! ending block in j + if (je > nblocks_y) je = nblocks_y + + localID = 0 ! initialize counter for local index + do jblock = js,je + do iblock = is,ie + globalID = (jblock - 1)*nblocks_x + iblock + if (workPerBlock(globalID) /= 0) then + localID = localID + 1 + newDistrb%blockLocation(globalID) = processor + newDistrb%blockLocalID (globalID) = localID + else ! no work - eliminate block from distribution + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 + endif + end do + end do + + ! if this is the local processor, set number of local blocks + if (my_task == processor - 1) then + newDistrb%numLocalBlocks = localID + endif + + end do + end do + +!---------------------------------------------------------------------- +! +! now store the local info +! +!---------------------------------------------------------------------- + + if (newDistrb%numLocalBlocks > 0) then + allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & + stat=istat) + + do j=1,nprocsY + do i=1,nprocsX + processor = (j-1)*nprocsX + i + + if (processor == my_task + 1) then + is = (i-1)*numBlocksXPerProc + 1 ! starting block in i + ie = i *numBlocksXPerProc ! ending block in i + if (ie > nblocks_x) ie = nblocks_x + js = (j-1)*numBlocksYPerProc + 1 ! starting block in j + je = j *numBlocksYPerProc ! ending block in j + if (je > nblocks_y) je = nblocks_y + + localID = 0 ! initialize counter for local index + do jblock = js,je + do iblock = is,ie + globalID = (jblock - 1)*nblocks_x + iblock + if (workPerBlock(globalID) /= 0) then + localID = localID + 1 + newDistrb%blockGlobalID (localID) = globalID + endif + end do + end do + + endif + + end do + end do + + endif + +!---------------------------------------------------------------------- + + end function create_distrb_cart + +!********************************************************************** + + function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) + +! This function distributes blocks across processors in a +! load-balanced manner based on the amount of work per block. +! A rake algorithm is used in which the blocks are first distributed +! in a Cartesian distribution and then a rake is applied in each +! Cartesian direction. + + integer (int_kind), intent(in) :: & + nprocs ! number of processors in this distribution + + integer (int_kind), dimension(:), intent(in) :: & + workPerBlock ! amount of work per block + + type (distrb) :: & + newDistrb ! resulting structure describing + ! load-balanced distribution of blocks + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n ,&! dummy loop indices + pid ,&! dummy for processor id + istat ,&! status flag for allocates + localBlock ,&! local block position on processor + numOcnBlocks ,&! number of ocean blocks + maxWork ,&! max amount of work in any block + nprocsX ,&! num of procs in x for global domain + nprocsY ! num of procs in y for global domain + + integer (int_kind), dimension(:), allocatable :: & + priority ,&! priority for moving blocks + workTmp ,&! work per row or column for rake algrthm + procTmp ! temp processor id for rake algrthm + + type (distrb) :: dist ! temp hold distribution + +!---------------------------------------------------------------------- +! +! first set up as Cartesian distribution +! +!---------------------------------------------------------------------- + + dist = create_distrb_cart(nprocs, workPerBlock) + +!---------------------------------------------------------------------- +! +! if the number of blocks is close to the number of processors, +! only do a 1-d rake on the entire distribution +! +!---------------------------------------------------------------------- + + numOcnBlocks = count(workPerBlock /= 0) + maxWork = maxval(workPerBlock) + + if (numOcnBlocks <= 2*nprocs) then + + allocate(priority(nblocks_tot), stat=istat) + + !*** initialize priority array + + do j=1,nblocks_y + do i=1,nblocks_x + n=(j-1)*nblocks_x + i + if (workPerBlock(n) > 0) then + priority(n) = maxWork + n - workPerBlock(n) + else + priority(n) = 0 + endif + end do + end do + + allocate(workTmp(nblocks_tot), procTmp(nblocks_tot), stat=istat) + + workTmp(:) = 0 + do i=1,nprocs + procTmp(i) = i + do n=1,nblocks_tot + if (dist%blockLocation(n) == i) then + workTmp(i) = workTmp(i) + workPerBlock(n) + endif + end do + end do + + call ice_distributionRake (workTmp, procTmp, workPerBlock, & + priority, dist) + + deallocate(workTmp, procTmp, stat=istat) + +!---------------------------------------------------------------------- +! +! otherwise re-distribute blocks using a rake in each direction +! +!---------------------------------------------------------------------- + + else + + call proc_decomposition(dist%nprocs, nprocsX, nprocsY) + +!---------------------------------------------------------------------- +! +! load-balance using a rake algorithm in the x-direction first +! +!---------------------------------------------------------------------- + + allocate(priority(nblocks_tot), stat=istat) + + !*** set highest priority such that eastern-most blocks + !*** and blocks with the least amount of work are + !*** moved first + + do j=1,nblocks_y + do i=1,nblocks_x + n=(j-1)*nblocks_x + i + if (workPerBlock(n) > 0) then + priority(n) = (maxWork + 1)*(nblocks_x + i) - & + workPerBlock(n) + else + priority(n) = 0 + endif + end do + end do + + allocate(workTmp(nprocsX), procTmp(nprocsX), stat=istat) + + do j=1,nprocsY + + workTmp(:) = 0 + do i=1,nprocsX + pid = (j-1)*nprocsX + i + procTmp(i) = pid + do n=1,nblocks_tot + if (dist%blockLocation(n) == pid) then + workTmp(i) = workTmp(i) + workPerBlock(n) + endif + end do + end do + + call ice_distributionRake (workTmp, procTmp, workPerBlock, & + priority, dist) + end do + + deallocate(workTmp, procTmp, stat=istat) + +!---------------------------------------------------------------------- +! +! use a rake algorithm in the y-direction now +! +!---------------------------------------------------------------------- + + !*** set highest priority for northern-most blocks + + do j=1,nblocks_y + do i=1,nblocks_x + n=(j-1)*nblocks_x + i + if (workPerBlock(n) > 0) then + priority(n) = (maxWork + 1)*(nblocks_y + j) - & + workPerBlock(n) + else + priority(n) = 0 + endif + end do + end do + + allocate(workTmp(nprocsY), procTmp(nprocsY), stat=istat) + + do i=1,nprocsX + + workTmp(:) = 0 + do j=1,nprocsY + pid = (j-1)*nprocsX + i + procTmp(j) = pid + do n=1,nblocks_tot + if (dist%blockLocation(n) == pid) then + workTmp(j) = workTmp(j) + workPerBlock(n) + endif + end do + end do + + call ice_distributionRake (workTmp, procTmp, workPerBlock, & + priority, dist) + + end do + + deallocate(workTmp, procTmp, priority, stat=istat) + + endif ! 1d or 2d rake + +!---------------------------------------------------------------------- +! +! create new distribution with info extracted from the temporary +! distribution +! +!---------------------------------------------------------------------- + + newDistrb%nprocs = nprocs + newDistrb%communicator = dist%communicator + + allocate(newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID(nblocks_tot), stat=istat) + + allocate(procTmp(nprocs), stat=istat) + + procTmp = 0 + do n=1,nblocks_tot + pid = dist%blockLocation(n) ! processor id + newDistrb%blockLocation(n) = pid + + if (pid > 0) then + procTmp(pid) = procTmp(pid) + 1 + newDistrb%blockLocalID (n) = procTmp(pid) + else + newDistrb%blockLocalID (n) = 0 + endif + end do + + newDistrb%numLocalBlocks = procTmp(my_task+1) + + if (minval(procTmp) < 1) then + call abort_ice( & + 'create_distrb_rake: processors left with no blocks') + return + endif + + deallocate(procTmp, stat=istat) + + if (istat > 0) then + call abort_ice( & + 'create_distrb_rake: error allocating last procTmp') + return + endif + + allocate(newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & + stat=istat) + + if (istat > 0) then + call abort_ice( & + 'create_distrb_rake: error allocating blockGlobalID') + return + endif + + localBlock = 0 + do n=1,nblocks_tot + if (newDistrb%blockLocation(n) == my_task+1) then + localBlock = localBlock + 1 + newDistrb%blockGlobalID(localBlock) = n + endif + end do + +!---------------------------------------------------------------------- + + call ice_distributionDestroy(dist) + +!---------------------------------------------------------------------- + + end function create_distrb_rake + +!*********************************************************************** + + function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) + +! This function creates a distribution of blocks across processors +! using a simple roundrobin algorithm. Mean for prescribed ice or +! standalone CAM mode. + + integer (int_kind), intent(in) :: & + nprocs ! number of processors in this distribution + + integer (int_kind), dimension(:), intent(in) :: & + workPerBlock ! amount of work per block + + type (distrb) :: & + newDistrb ! resulting structure describing Cartesian + ! distribution of blocks + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + i, j, &! dummy loop indices + istat, &! status flag for allocation + processor, &! processor position in cartesian decomp + globalID, &! global block ID + localID ! block location on this processor + + integer (int_kind), dimension(:), allocatable :: & + proc_tmp ! temp processor id + +!---------------------------------------------------------------------- +! +! create communicator for this distribution +! +!---------------------------------------------------------------------- + + call create_communicator(newDistrb%communicator, nprocs) + +!---------------------------------------------------------------------- +! +! try to find best processor arrangement +! +!---------------------------------------------------------------------- + + newDistrb%nprocs = nprocs + +!---------------------------------------------------------------------- +! +! allocate space for decomposition +! +!---------------------------------------------------------------------- + + allocate (newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + + allocate (newDistrb%blockCnt(nprocs)) + +!---------------------------------------------------------------------- +! +! distribute blocks across processors, one block per proc until used +! +!---------------------------------------------------------------------- + + allocate(proc_tmp(nprocs)) + processor = 0 + globalID = 0 + proc_tmp = 0 + + allocate(newDistrb%blockIndex(nprocs,max_blocks)) + newDistrb%blockIndex(:,:) = 0 + + do j=1,nblocks_y + do i=1,nblocks_x + + globalID = globalID + 1 + + if (workPerBlock(globalID) /= 0) then + processor = mod(processor,nprocs) + 1 + proc_tmp(processor) = proc_tmp(processor) + 1 + localID = proc_tmp(processor) + if (localID > max_blocks) then + call abort_ice('create_distrb_roundrobin: max_blocks too small') + return + endif + newDistrb%blockLocation(globalID) = processor + newDistrb%blockLocalID (globalID) = localID + newDistrb%blockIndex(processor,localID) = globalID + else ! no work - eliminate block from distribution + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 + endif + + end do + end do + + newDistrb%numLocalBlocks = proc_tmp(my_task+1) + newDistrb%blockCnt(:) = proc_tmp(:) + deallocate(proc_tmp) + +! write(nu_diag,*) 'my_task,newDistrb%numLocalBlocks',& +! my_task,newDistrb%numLocalBlocks + +!---------------------------------------------------------------------- +! +! now store the local info +! +!---------------------------------------------------------------------- + + globalID = 0 + + if (newDistrb%numLocalBlocks > 0) then + allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & + stat=istat) + + processor = my_task + 1 + do localID = 1,newDistrb%numLocalBlocks + newDistrb%blockGlobalID (localID) = newDistrb%blockIndex(processor,& + localID) + enddo + endif + +!---------------------------------------------------------------------- + + end function create_distrb_roundrobin + +!*********************************************************************** + + function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) + +! This function creates a distribution of blocks across processors +! using a simple sectrobin algorithm. Mean for prescribed ice or +! standalone CAM mode. + + integer (int_kind), intent(in) :: & + nprocs ! number of processors in this distribution + + integer (int_kind), dimension(:), intent(in) :: & + workPerBlock ! amount of work per block + + type (distrb) :: & + newDistrb ! resulting structure describing Cartesian + ! distribution of blocks + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + i, j, &! dummy loop indices + istat, &! status flag for allocation + mblocks, &! estimate of max blocks per pe + processor, &! processor position in cartesian decomp + globalID, &! global block ID + localID ! block location on this processor + + integer (int_kind), dimension(:), allocatable :: & + proc_tmp ! temp processor id + + logical (log_kind), dimension(:), allocatable :: & + bfree ! map of assigned blocks + + integer (int_kind) :: cnt, blktogether, i2 + integer (int_kind) :: totblocks, nchunks + logical (log_kind) :: keepgoing + +!---------------------------------------------------------------------- +! +! create communicator for this distribution +! +!---------------------------------------------------------------------- + + call create_communicator(newDistrb%communicator, nprocs) + +!---------------------------------------------------------------------- +! +! try to find best processor arrangement +! +!---------------------------------------------------------------------- + + newDistrb%nprocs = nprocs + +!---------------------------------------------------------------------- +! +! allocate space for decomposition +! +!---------------------------------------------------------------------- + + allocate (newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + + allocate (newDistrb%blockCnt(nprocs)) + +!---------------------------------------------------------------------- +! +! distribute groups of blocks across processors, one per proc until used +! +!---------------------------------------------------------------------- + + allocate(proc_tmp(nprocs)) + processor = 0 + globalID = 0 + proc_tmp = 0 + + allocate(newDistrb%blockIndex(nprocs,max_blocks)) + newDistrb%blockIndex(:,:) = 0 + + allocate(bfree(nblocks_x*nblocks_y)) + bfree=.true. + + totblocks = 0 + do j=1,nblocks_y + do i=1,nblocks_x + globalID = (j-1)*nblocks_x + i + if (workPerBlock(globalID) /= 0) then + totblocks=totblocks+1 + else ! no work - eliminate block from distribution + bfree(globalID) = .false. + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 + endif + enddo + enddo + + mblocks = totblocks/nprocs + if (mod(totblocks,nprocs) > 0) mblocks=mblocks+1 + + blktogether = max(1,nint(float(totblocks)/float(6*nprocs))) + +! write(nu_diag,*) 'ice_distrb_sectrobin totblocks = ',totblocks,nblocks_y*nblocks_x + + !------------------------------ + ! southern group of blocks + ! weave back and forth in i vs j + ! go south to north, low - high pes + !------------------------------ + + processor=1 + cnt = 0 + keepgoing = .true. + do j=1,nblocks_y + do i=1,nblocks_x + if (mod(j,2) == 0) then + i2 = nblocks_x - i + 1 + else + i2 = i + endif + globalID = (j-1)*nblocks_x + i2 + if (cnt >= blktogether) then + processor = mod(processor,nprocs) + 1 + cnt = 0 + if (processor == 1) keepgoing = .false. + endif +! write(nu_diag,'(a,6i7,l2)') 'tcx ',i,j,globalID,cnt,blktogether,processor,keepgoing + + if (keepgoing) then + if (bfree(globalID)) then + if (workPerBlock(globalID) /= 0) then + proc_tmp(processor) = proc_tmp(processor) + 1 + localID = proc_tmp(processor) + if (localID > max_blocks) then + call abort_ice('create_distrb_sectrobin: max_blocks too small') + return + endif + newDistrb%blockLocation(globalID) = processor + newDistrb%blockLocalID (globalID) = localID + newDistrb%blockIndex(processor,localID) = globalID + cnt = cnt + 1 + totblocks = totblocks-1 + bfree(globalID) = .false. + + else ! no work - eliminate block from distribution + bfree(globalID) = .false. + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 + endif + endif ! bfree + endif + end do + end do + +! write(nu_diag,*) 'ice_distrb_sectrobin totblocks left after southern = ',totblocks + + !------------------------------ + ! northern group of blocks + ! weave back and forth in i vs j + ! go north to south, high - low pes + !------------------------------ + + processor=nprocs + cnt = 0 + keepgoing = .true. + do j=nblocks_y,1,-1 + do i=1,nblocks_x + if (mod(j,2) == 1) then + i2 = nblocks_x - i + 1 + else + i2 = i + endif + globalID = (j-1)*nblocks_x + i2 + if (cnt >= blktogether) then + processor = mod(processor+nprocs-2,nprocs) + 1 + cnt = 0 + if (processor == nprocs) keepgoing = .false. + endif + + if (keepgoing) then + if (bfree(globalID)) then + if (workPerBlock(globalID) /= 0) then + proc_tmp(processor) = proc_tmp(processor) + 1 + localID = proc_tmp(processor) + if (localID > max_blocks) then + call abort_ice('create_distrb_sectrobin: max_blocks too small') + return + endif + newDistrb%blockLocation(globalID) = processor + newDistrb%blockLocalID (globalID) = localID + newDistrb%blockIndex(processor,localID) = globalID + cnt = cnt + 1 + totblocks = totblocks - 1 + bfree(globalID) = .false. + + else ! no work - eliminate block from distribution + bfree(globalID) = .false. + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 + endif + endif ! bfree + endif + end do + end do + +! write(nu_diag,*) 'ice_distrb_sectrobin totblocks left after northern = ',totblocks + + !------------------------------ + ! central group of blocks + ! weave back and forth in i vs j + ! go north to south, low - high / low - high pes + !------------------------------ + + nchunks = 2*nprocs + blktogether = max(1,nint(float(totblocks)/float(nchunks))) + processor=1 + cnt = 0 + do j=nblocks_y,1,-1 + do i=1,nblocks_x + if (mod(j,2) == 1) then + i2 = nblocks_x - i + 1 + else + i2 = i + endif + globalID = (j-1)*nblocks_x + i2 + if (totblocks > 0) then + do while (proc_tmp(processor) >= mblocks .or. cnt >= blktogether) + nchunks = nchunks - 1 + if (nchunks == 0) then + blktogether = 1 + else + blktogether = max(1,nint(float(totblocks)/float(nchunks))) + endif + cnt = 0 + processor = mod(processor,nprocs) + 1 + enddo + endif + +! write(nu_diag,*) 'ice_distrb_sectrobin central ',i,j,totblocks,cnt,nchunks,blktogether,processor + + if (bfree(globalID)) then + if (workPerBlock(globalID) /= 0) then + proc_tmp(processor) = proc_tmp(processor) + 1 + localID = proc_tmp(processor) + if (localID > max_blocks) then + call abort_ice('create_distrb_sectrobin: max_blocks too small') + return + endif + newDistrb%blockLocation(globalID) = processor + newDistrb%blockLocalID (globalID) = localID + newDistrb%blockIndex(processor,localID) = globalID + cnt = cnt + 1 + totblocks = totblocks-1 + bfree(globalID) = .false. + + else ! no work - eliminate block from distribution + bfree(globalID) = .false. + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 + endif + endif ! bfree + end do + end do + + newDistrb%numLocalBlocks = proc_tmp(my_task+1) + newDistrb%blockCnt(:) = proc_tmp(:) + deallocate(proc_tmp) + deallocate(bfree) + +!---------------------------------------------------------------------- +! +! now store the local info +! +!---------------------------------------------------------------------- + + globalID = 0 + + if (newDistrb%numLocalBlocks > 0) then + allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & + stat=istat) + + processor = my_task + 1 + do localID = 1,newDistrb%numLocalBlocks + newDistrb%blockGlobalID (localID) = newDistrb%blockIndex(processor,& + localID) + enddo + endif + +!---------------------------------------------------------------------- + + end function create_distrb_sectrobin + +!*********************************************************************** + + function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) + +! This function creates a distribution of blocks across processors +! using a simple sectcart algorithm. Mean for prescribed ice or +! standalone CAM mode. + + integer (int_kind), intent(in) :: & + nprocs ! number of processors in this distribution + + integer (int_kind), dimension(:), intent(in) :: & + workPerBlock ! amount of work per block + + type (distrb) :: & + newDistrb ! resulting structure describing Cartesian + ! distribution of blocks + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + i, j, i2, j2, &! dummy loop indices + istat, &! status flag for allocation + processor, &! processor position in cartesian decomp + globalID, &! global block ID + localID, &! block location on this processor + blktogether, &! number of blocks together + cnt ! counter + + integer (int_kind), dimension(:), allocatable :: & + proc_tmp ! temp processor id + + integer (int_kind) :: n + +!---------------------------------------------------------------------- +! +! create communicator for this distribution +! +!---------------------------------------------------------------------- + + call create_communicator(newDistrb%communicator, nprocs) + +!---------------------------------------------------------------------- +! +! try to find best processor arrangement +! +!---------------------------------------------------------------------- + + newDistrb%nprocs = nprocs + +!---------------------------------------------------------------------- +! +! allocate space for decomposition +! +!---------------------------------------------------------------------- + + allocate (newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + + allocate (newDistrb%blockCnt(nprocs)) +!---------------------------------------------------------------------- +! +! distribute blocks linearly across processors in quadrants +! +!---------------------------------------------------------------------- + + allocate(proc_tmp(nprocs)) + proc_tmp = 0 + + allocate(newDistrb%blockIndex(nprocs,max_blocks)) + newDistrb%blockIndex(:,:) = 0 + + blktogether = max(1,nint(float(nblocks_x*nblocks_y)/float(4*nprocs))) + + ! --- two phases, reset processor and cnt for each phase + ! --- phase 1 is south to north, east to west on the left half of the domain + ! --- phase 2 is north to south, east to west on the right half of the domain + + if (mod(nblocks_x,2) /= 0) then + call abort_ice( & + 'create_distrb_sectcart: nblocks_x not divisible by 2') + return + endif + + do n=1,2 + processor = 1 + cnt = 0 + do j2=1,nblocks_y + do i2=1,nblocks_x/2 + + if (n == 1) then + i = i2 + j = j2 + else + i = nblocks_x/2 + i2 + j = nblocks_y - j2 + 1 + endif + + globalID = (j-1)*nblocks_x + i + if (cnt >= blktogether) then + processor = mod(processor,nprocs) + 1 + cnt = 0 + endif + cnt = cnt + 1 + + if (workPerBlock(globalID) /= 0) then + proc_tmp(processor) = proc_tmp(processor) + 1 + localID = proc_tmp(processor) + if (localID > max_blocks) then + call abort_ice('create_distrb_sectcart: max_blocks too small') + return + endif + newDistrb%blockLocation(globalID) = processor + newDistrb%blockLocalID (globalID) = localID + newDistrb%blockIndex(processor,localID) = globalID + else ! no work - eliminate block from distribution + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 + endif + + end do + end do + end do + + newDistrb%numLocalBlocks = proc_tmp(my_task+1) + newDistrb%blockCnt(:) = proc_tmp(:) + deallocate(proc_tmp) + +! write(nu_diag,*) 'my_task,newDistrb%numLocalBlocks',& +! my_task,newDistrb%numLocalBlocks + +!---------------------------------------------------------------------- +! +! now store the local info +! +!---------------------------------------------------------------------- + + globalID = 0 + + if (newDistrb%numLocalBlocks > 0) then + allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & + stat=istat) + + processor = my_task + 1 + do localID = 1,newDistrb%numLocalBlocks + newDistrb%blockGlobalID (localID) = newDistrb%blockIndex(processor,& + localID) + enddo + endif + +!---------------------------------------------------------------------- + + end function create_distrb_sectcart + +!********************************************************************** + + function create_distrb_spacecurve(nprocs,work_per_block) + +! This function distributes blocks across processors in a +! load-balanced manner using space-filling curves +! added by J. Dennis 3/10/06 + + use ice_spacecurve + + integer (int_kind), intent(in) :: & + nprocs ! number of processors in this distribution + + integer (int_kind), dimension(:), intent(in) :: & + work_per_block ! amount of work per block + + type (distrb) :: & + create_distrb_spacecurve ! resulting structure describing + ! load-balanced distribution of blocks + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n ,&! dummy loop indices + pid ,&! dummy for processor id + localID ! local block position on processor + + integer (int_kind), dimension(:),allocatable :: & + idxT_i,idxT_j ! Temporary indices for SFC + + integer (int_kind), dimension(:,:),allocatable :: & + Mesh ,&! !arrays to hold Space-filling curve + Mesh2 ,&! + Mesh3 ! + + integer (int_kind) :: & + nblocksL,nblocks, &! Number of blocks local and total + ii,extra,tmp1, &! loop tempories used for + s1,ig ! partitioning curve + + logical, parameter :: Debug = .FALSE. + + type (factor_t) :: xdim,ydim + + integer (int_kind) :: it,jj,i2,j2 + integer (int_kind) :: curveSize,sb_x,sb_y,itmp,numfac + integer (int_kind) :: subNum, sfcNum + logical :: foundx + + integer (int_kind), dimension(:), allocatable :: & + proc_tmp ! temp processor id for rake algrthm + + type (distrb) :: dist ! temp hold distribution + + !------------------------------------------------------ + ! Space filling curves only work if: + ! + ! nblocks_x = nblocks_y + ! nblocks_x = 2^m 3^n 5^p where m,n,p are integers + !------------------------------------------------------ + + if((.not. IsFactorable(nblocks_y)) .or. (.not. IsFactorable(nblocks_x))) then + create_distrb_spacecurve = create_distrb_cart(nprocs, work_per_block) + return + endif + + !----------------------------------------------- + ! Factor the numbers of blocks in each dimension + !----------------------------------------------- + + xdim = Factor(nblocks_x) + ydim = Factor(nblocks_y) + numfac = xdim%numfact + + !--------------------------------------------- + ! Match the common factors to create SFC curve + !--------------------------------------------- + + curveSize=1 + do it=1,numfac + call MatchFactor(xdim,ydim,itmp,foundX) + curveSize = itmp*curveSize + enddo + + !-------------------------------------- + ! determine the size of the sub-blocks + ! within the space-filling curve + !-------------------------------------- + + sb_x = ProdFactor(xdim) + sb_y = ProdFactor(ydim) + + call create_communicator(dist%communicator, nprocs) + + dist%nprocs = nprocs + + !---------------------------------------------------------------------- + ! + ! allocate space for decomposition + ! + !---------------------------------------------------------------------- + + allocate (dist%blockLocation(nblocks_tot), & + dist%blockLocalID (nblocks_tot)) + + dist%blockLocation=0 + dist%blockLocalID =0 + + !---------------------------------------------------------------------- + ! Create the array to hold the SFC and indices into it + !---------------------------------------------------------------------- + + allocate(Mesh(curveSize,curveSize)) + allocate(Mesh2(nblocks_x,nblocks_y)) + allocate(Mesh3(nblocks_x,nblocks_y)) + allocate(idxT_i(nblocks_tot),idxT_j(nblocks_tot)) + + Mesh = 0 + Mesh2 = 0 + Mesh3 = 0 + + !---------------------------------------------------------------------- + ! Generate the space-filling curve + !---------------------------------------------------------------------- + + call GenSpaceCurve(Mesh) + Mesh = Mesh + 1 ! make it 1-based indexing + if(Debug) then + if(my_task ==0) call PrintCurve(Mesh) + endif + + !----------------------------------------------- + ! Reindex the SFC to address internal sub-blocks + !----------------------------------------------- + + do j=1,curveSize + do i=1,curveSize + sfcNum = (Mesh(i,j) - 1)*(sb_x*sb_y) + 1 + do jj=1,sb_y + do ii=1,sb_x + subNum = (jj-1)*sb_x + (ii-1) + i2 = (i-1)*sb_x + ii + j2 = (j-1)*sb_y + jj + Mesh2(i2,j2) = sfcNum + subNum + enddo + enddo + enddo + enddo + + !------------------------------------------------ + ! create a linear array of i,j coordinates of SFC + !------------------------------------------------ + + idxT_i=0;idxT_j=0 + do j=1,nblocks_y + do i=1,nblocks_x + n = (j-1)*nblocks_x + i + ig = Mesh2(i,j) + if(work_per_block(n) /= 0) then + idxT_i(ig)=i;idxT_j(ig)=j + endif + enddo + enddo + + !----------------------------- + ! Compress out the land blocks + !----------------------------- + + ii=0 + do i=1,nblocks_tot + if(IdxT_i(i) .gt. 0) then + ii=ii+1 + Mesh3(idxT_i(i),idxT_j(i)) = ii + endif + enddo + nblocks=ii + if(Debug) then + if(my_task==0) call PrintCurve(Mesh3) + endif + + !---------------------------------------------------- + ! Compute the partitioning of the space-filling curve + !---------------------------------------------------- + + nblocksL = nblocks/nprocs + ! every cpu gets nblocksL blocks, but the first 'extra' get nblocksL+1 + extra = mod(nblocks,nprocs) + s1 = extra*(nblocksL+1) + ! split curve into two curves: + ! 1 ... s1 s2 ... nblocks + ! + ! s1 = extra*(nblocksL+1) (count be 0) + ! s2 = s1+1 + ! + ! First region gets nblocksL+1 blocks per partition + ! Second region gets nblocksL blocks per partition + if(Debug) print *,'nprocs,extra,nblocks,nblocksL,s1: ', & + nprocs,extra,nblocks,nblocksL,s1 + + !----------------------------------------------------------- + ! Use the SFC to partition the blocks across processors + !----------------------------------------------------------- + + do j=1,nblocks_y + do i=1,nblocks_x + n = (j-1)*nblocks_x + i + ii = Mesh3(i,j) + if(ii>0) then + if(ii<=s1) then + ! ------------------------------------ + ! If on the first region of curve + ! all processes get nblocksL+1 blocks + ! ------------------------------------ + ii=ii-1 + tmp1 = ii/(nblocksL+1) + dist%blockLocation(n) = tmp1+1 + else + ! ------------------------------------ + ! If on the second region of curve + ! all processes get nblocksL blocks + ! ------------------------------------ + ii=ii-s1-1 + tmp1 = ii/nblocksL + dist%blockLocation(n) = extra + tmp1 + 1 + endif + endif + enddo + enddo + + !---------------------------------------------------------------------- + ! Reset the dist data structure + !---------------------------------------------------------------------- + + allocate(proc_tmp(nprocs)) + proc_tmp = 0 + + do n=1,nblocks_tot + pid = dist%blockLocation(n) + !!!dist%blockLocation(n) = pid + + if(pid>0) then + proc_tmp(pid) = proc_tmp(pid) + 1 + dist%blockLocalID(n) = proc_tmp(pid) + else + dist%blockLocalID(n) = 0 + endif + enddo + + dist%numLocalBlocks = proc_tmp(my_task+1) + + if (dist%numLocalBlocks > 0) then + allocate (dist%blockGlobalID(dist%numLocalBlocks)) + dist%blockGlobalID = 0 + endif + localID = 0 + do n=1,nblocks_tot + if (dist%blockLocation(n) == my_task+1) then + localID = localID + 1 + dist%blockGlobalID(localID) = n + endif + enddo + + if(Debug) then + if(my_task==0) print *,'dist%blockLocation:= ',dist%blockLocation + print *,'IAM: ',my_task,' SpaceCurve: Number of blocks {total,local} :=', & + nblocks_tot,nblocks,proc_tmp(my_task+1) + endif + !--------------------------------- + ! Deallocate temporary arrays + !--------------------------------- + deallocate(proc_tmp) + deallocate(Mesh,Mesh2,Mesh3) + deallocate(idxT_i,idxT_j) + + create_distrb_spacecurve = dist ! return the result + +!---------------------------------------------------------------------- + + end function create_distrb_spacecurve + +!********************************************************************** + + subroutine ice_distributionRake (procWork, procID, blockWork, & + priority, distribution) + +! This subroutine performs a rake algorithm to distribute the work +! along a vector of processors. In the rake algorithm, a work +! threshold is first set. Then, moving from left to right, work +! above that threshold is raked to the next processor in line. +! The process continues until the end of the vector is reached +! and then the threshold is reduced by one for a second rake pass. +! In this implementation, a priority for moving blocks is defined +! such that the rake algorithm chooses the highest priority +! block to be moved to the next processor. This can be used +! for example to always choose the eastern-most block or to +! ensure a block does not stray too far from its neighbors. + + integer (int_kind), intent(in), dimension(:) :: & + blockWork ,&! amount of work per block + procID ! global processor number + + integer (int_kind), intent(inout), dimension(:) :: & + procWork ,&! amount of work per processor + priority ! priority for moving a given block + + type (distrb), intent(inout) :: & + distribution ! distribution to change + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + i, n, &! dummy loop indices + np1, &! n+1 corrected for cyclical wrap + iproc, inext, &! processor ids for current and next + nprocs, numBlocks, &! number of blocks, processors + lastPriority, &! priority for most recent block + minPriority, &! minimum priority + lastLoc, &! location for most recent block + meanWork, maxWork, &! mean,max work per processor + diffWork, residual, &! work differences and residual work + numTransfers ! counter for number of block transfers + +!---------------------------------------------------------------------- +! +! initialization +! +!---------------------------------------------------------------------- + + nprocs = size(procWork) + numBlocks = size(blockWork) + + !*** compute mean,max work per processor + + meanWork = sum(procWork)/nprocs + 1 + maxWork = maxval(procWork) + residual = mod(meanWork,nprocs) + + minPriority = 1000000 + do n=1,nprocs + iproc = procID(n) + do i=1,numBlocks + if (distribution%blockLocation(i) == iproc) then + minPriority = min(minPriority,priority(i)) + endif + end do + end do + +!---------------------------------------------------------------------- +! +! do two sets of transfers +! +!---------------------------------------------------------------------- + + transferLoop: do + +!---------------------------------------------------------------------- +! +! do rake across the processors +! +!---------------------------------------------------------------------- + + numTransfers = 0 + do n=1,nprocs + if (n < nprocs) then + np1 = n+1 + else + np1 = 1 + endif + iproc = procID(n) + inext = procID(np1) + + if (procWork(n) > meanWork) then !*** pass work to next + + diffWork = procWork(n) - meanWork + + rake1: do while (diffWork > 1) + + !*** attempt to find a block with the required + !*** amount of work and with the highest priority + !*** for moving (eg boundary blocks first) + + lastPriority = 0 + lastLoc = 0 + + do i=1,numBlocks + if (distribution%blockLocation(i) == iproc) then + if (priority(i) > lastPriority ) then + lastPriority = priority(i) + lastLoc = i + endif + endif + end do + if (lastLoc == 0) exit rake1 ! could not shift work + + numTransfers = numTransfers + 1 + distribution%blockLocation(lastLoc) = inext + if (np1 == 1) priority(lastLoc) = minPriority + diffWork = diffWork - blockWork(lastLoc) + + procWork(n ) = procWork(n )-blockWork(lastLoc) + procWork(np1) = procWork(np1)+blockWork(lastLoc) + end do rake1 + endif + + end do + +!---------------------------------------------------------------------- +! +! increment meanWork by one and repeat +! +!---------------------------------------------------------------------- + + meanWork = meanWork + 1 + if (numTransfers == 0 .or. meanWork > maxWork) exit transferLoop + + end do transferLoop + +!---------------------------------------------------------------------- + +end subroutine ice_distributionRake + +!*********************************************************************** + +end module ice_distribution + +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/source/ice_domain.F90 b/source/ice_domain.F90 new file mode 100755 index 00000000..b9f84f97 --- /dev/null +++ b/source/ice_domain.F90 @@ -0,0 +1,506 @@ +! SVN:$Id: ice_domain.F90 820 2014-08-26 19:08:29Z eclare $ +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| + + module ice_domain + +! This module contains the model domain and routines for initializing +! the domain. It also initializes the decompositions and +! distributions across processors/threads by calling relevant +! routines in the block, distribution modules. +! +! author: Phil Jones, LANL +! Oct. 2004: Adapted from POP by William H. Lipscomb, LANL +! Feb. 2007: E. Hunke removed NE and SW boundary options (they were buggy +! and not used anyhow). + + use ice_kinds_mod + use ice_constants, only: puny, shlat, nhlat, rad_to_deg + use ice_communicate, only: my_task, master_task, get_num_procs + use ice_broadcast, only: broadcast_scalar + use ice_blocks, only: block, get_block, create_blocks, nghost, & + nblocks_x, nblocks_y, nblocks_tot, nx_block, ny_block + use ice_distribution, only: distrb + use ice_boundary, only: ice_halo + + implicit none + private + save + + public :: init_domain_blocks ,& + init_domain_distribution + + integer (int_kind), public :: & + nblocks ! actual number of blocks on this processor + + integer (int_kind), dimension(:), pointer, public :: & + blocks_ice ! block ids for local blocks + + type (distrb), public :: & + distrb_info ! block distribution info + + type (ice_halo), public :: & + halo_info ! ghost cell update info + + character (char_len), public :: & + ew_boundary_type, &! type of domain bndy in each logical + ns_boundary_type ! direction (ew is i, ns is j) + + logical (kind=log_kind), public :: & + maskhalo_dyn , & ! if true, use masked halo updates for dynamics + maskhalo_remap , & ! if true, use masked halo updates for transport + maskhalo_bound ! if true, use masked halo updates for bound_state + +!----------------------------------------------------------------------- +! +! module private variables - for the most part these appear as +! module variables to facilitate sharing info between init_domain1 +! and init_domain2. +! +!----------------------------------------------------------------------- + + character (char_len) :: & + distribution_type, &! method to use for distributing blocks + ! 'cartesian' + ! 'rake' + distribution_wght ! method for weighting work per block + ! 'block' = POP default configuration + ! 'latitude' = no. ocean points * |lat| + + integer (int_kind) :: & + nprocs ! num of processors + +!*********************************************************************** + + contains + +!*********************************************************************** + + subroutine init_domain_blocks + +! This routine reads in domain information and calls the routine +! to set up the block decomposition. + + use ice_distribution, only: processor_shape + use ice_domain_size, only: ncat, nilyr, nslyr, max_blocks, & + nx_global, ny_global + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_nml, nml_filename, nu_diag, & + get_fileunit, release_fileunit + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind) :: & + nml_error ! namelist read error flag + +!---------------------------------------------------------------------- +! +! input namelists +! +!---------------------------------------------------------------------- + + namelist /domain_nml/ nprocs, & + processor_shape, & + distribution_type, & + distribution_wght, & + ew_boundary_type, & + ns_boundary_type, & + maskhalo_dyn, & + maskhalo_remap, & + maskhalo_bound + +!---------------------------------------------------------------------- +! +! read domain information from namelist input +! +!---------------------------------------------------------------------- + + nprocs = -1 + processor_shape = 'slenderX2' + distribution_type = 'cartesian' + distribution_wght = 'latitude' + ew_boundary_type = 'cyclic' + ns_boundary_type = 'open' + maskhalo_dyn = .false. ! if true, use masked halos for dynamics + maskhalo_remap = .false. ! if true, use masked halos for transport + maskhalo_bound = .false. ! if true, use masked halos for bound_state + + call get_fileunit(nu_nml) + if (my_task == master_task) then + open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + if (nml_error /= 0) then + nml_error = -1 + else + nml_error = 1 + endif + do while (nml_error > 0) + read(nu_nml, nml=domain_nml,iostat=nml_error) + end do + if (nml_error == 0) close(nu_nml) + endif + call release_fileunit(nu_nml) + + call broadcast_scalar(nml_error, master_task) + if (nml_error /= 0) then + call abort_ice('ice: error reading domain_nml') + endif + + call broadcast_scalar(nprocs, master_task) + call broadcast_scalar(processor_shape, master_task) + call broadcast_scalar(distribution_type, master_task) + call broadcast_scalar(distribution_wght, master_task) + call broadcast_scalar(ew_boundary_type, master_task) + call broadcast_scalar(ns_boundary_type, master_task) + call broadcast_scalar(maskhalo_dyn, master_task) + call broadcast_scalar(maskhalo_remap, master_task) + call broadcast_scalar(maskhalo_bound, master_task) + +!---------------------------------------------------------------------- +! +! perform some basic checks on domain +! +!---------------------------------------------------------------------- + + if (nx_global < 1 .or. ny_global < 1 .or. ncat < 1) then + !*** + !*** domain size zero or negative + !*** + call abort_ice('ice: Invalid domain: size < 1') ! no domain + else if (nprocs /= get_num_procs()) then + !*** + !*** input nprocs does not match system (eg MPI) request + !*** +#if (defined CCSMCOUPLED) + nprocs = get_num_procs() +#else + call abort_ice('ice: Input nprocs not same as system request') +#endif + else if (nghost < 1) then + !*** + !*** must have at least 1 layer of ghost cells + !*** + call abort_ice('ice: Not enough ghost cells allocated') + endif + +!---------------------------------------------------------------------- +! +! compute block decomposition and details +! +!---------------------------------------------------------------------- + + call create_blocks(nx_global, ny_global, trim(ew_boundary_type), & + trim(ns_boundary_type)) + +!---------------------------------------------------------------------- +! +! Now we need grid info before proceeding further +! Print some domain information +! +!---------------------------------------------------------------------- + + if (my_task == master_task) then + write(nu_diag,'(/,a18,/)')'Domain Information' + write(nu_diag,'(a26,i6)') ' Horizontal domain: nx = ',nx_global + write(nu_diag,'(a26,i6)') ' ny = ',ny_global + write(nu_diag,'(a26,i6)') ' No. of categories: nc = ',ncat + write(nu_diag,'(a26,i6)') ' No. of ice layers: ni = ',nilyr + write(nu_diag,'(a26,i6)') ' No. of snow layers:ns = ',nslyr + write(nu_diag,'(a26,i6)') ' Processors: total = ',nprocs + write(nu_diag,'(a25,a10)') ' Processor shape: ', & + trim(processor_shape) + write(nu_diag,'(a25,a10)') ' Distribution type: ', & + trim(distribution_type) + write(nu_diag,'(a25,a10)') ' Distribution weight: ', & + trim(distribution_wght) + write(nu_diag,'(a25,a10)') ' ew_boundary_type: ', & + trim(ew_boundary_type) + write(nu_diag,'(a25,a10)') ' ns_boundary_type: ', & + trim(ns_boundary_type) + write(nu_diag,'(a26,l6)') ' maskhalo_dyn = ', & + maskhalo_dyn + write(nu_diag,'(a26,l6)') ' maskhalo_remap = ', & + maskhalo_remap + write(nu_diag,'(a26,l6)') ' maskhalo_bound = ', & + maskhalo_bound + write(nu_diag,'(a26,i6)') ' max_blocks = ', max_blocks + write(nu_diag,'(a26,i6,/)')' Number of ghost cells: ', nghost + endif + +!---------------------------------------------------------------------- + + end subroutine init_domain_blocks + +!*********************************************************************** + + subroutine init_domain_distribution(KMTG,ULATG) + +! This routine calls appropriate setup routines to distribute blocks +! across processors and defines arrays with block ids for any local +! blocks. Information about ghost cell update routines is also +! initialized here through calls to the appropriate boundary routines. + + use ice_boundary, only: ice_HaloCreate + use ice_distribution, only: create_distribution, create_local_block_ids + use ice_domain_size, only: max_blocks, nx_global, ny_global + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_diag + + real (dbl_kind), dimension(nx_global,ny_global), intent(in) :: & + KMTG ,&! global topography + ULATG ! global latitude field (radians) + +!---------------------------------------------------------------------- +! +! local variables +! +!---------------------------------------------------------------------- + + integer (int_kind), dimension (nx_global, ny_global) :: & + flat ! latitude-dependent scaling factor + + character (char_len) :: outstring + + integer (int_kind), parameter :: & + max_work_unit=10 ! quantize the work into values from 1,max + + integer (int_kind) :: & + i,j,n ,&! dummy loop indices + ig,jg ,&! global indices + work_unit ,&! size of quantized work unit + tblocks_tmp ,&! total number of blocks + nblocks_tmp ,&! temporary value of nblocks + nblocks_max ! max blocks on proc + + integer (int_kind), dimension(:), allocatable :: & + nocn ,&! number of ocean points per block + work_per_block ! number of work units per block + + type (block) :: & + this_block ! block information for current block + +!---------------------------------------------------------------------- +! +! check that there are at least nghost+1 rows or columns of land cells +! for closed boundary conditions (otherwise grid lengths are zero in +! cells neighboring ocean points). +! +!---------------------------------------------------------------------- + + if (trim(ns_boundary_type) == 'closed') then + allocate(nocn(nblocks_tot)) + nocn = 0 + do n=1,nblocks_tot + this_block = get_block(n,n) + if (this_block%jblock == nblocks_y) then ! north edge + do j = this_block%jhi-1, this_block%jhi + if (this_block%j_glob(j) > 0) then + do i = 1, nx_block + if (this_block%i_glob(i) > 0) then + ig = this_block%i_glob(i) + jg = this_block%j_glob(j) + if (KMTG(ig,jg) > puny) nocn(n) = nocn(n) + 1 + endif + enddo + endif + enddo + endif + if (this_block%jblock == 1) then ! south edge + do j = this_block%jlo, this_block%jlo+1 + if (this_block%j_glob(j) > 0) then + do i = 1, nx_block + if (this_block%i_glob(i) > 0) then + ig = this_block%i_glob(i) + jg = this_block%j_glob(j) + if (KMTG(ig,jg) > puny) nocn(n) = nocn(n) + 1 + endif + enddo + endif + enddo + endif + if (nocn(n) > 0) then + print*, 'ice: Not enough land cells along ns edge' + call abort_ice('ice: Not enough land cells along ns edge') + endif + enddo + deallocate(nocn) + endif + if (trim(ew_boundary_type) == 'closed') then + allocate(nocn(nblocks_tot)) + nocn = 0 + do n=1,nblocks_tot + this_block = get_block(n,n) + if (this_block%iblock == nblocks_x) then ! east edge + do j = 1, ny_block + if (this_block%j_glob(j) > 0) then + do i = this_block%ihi-1, this_block%ihi + if (this_block%i_glob(i) > 0) then + ig = this_block%i_glob(i) + jg = this_block%j_glob(j) + if (KMTG(ig,jg) > puny) nocn(n) = nocn(n) + 1 + endif + enddo + endif + enddo + endif + if (this_block%iblock == 1) then ! west edge + do j = 1, ny_block + if (this_block%j_glob(j) > 0) then + do i = this_block%ilo, this_block%ilo+1 + if (this_block%i_glob(i) > 0) then + ig = this_block%i_glob(i) + jg = this_block%j_glob(j) + if (KMTG(ig,jg) > puny) nocn(n) = nocn(n) + 1 + endif + enddo + endif + enddo + endif + if (nocn(n) > 0) then + print*, 'ice: Not enough land cells along ew edge' + call abort_ice('ice: Not enough land cells along ew edge') + endif + enddo + deallocate(nocn) + endif + +!---------------------------------------------------------------------- +! +! estimate the amount of work per processor using the topography +! and latitude +! +!---------------------------------------------------------------------- + + if (distribution_wght == 'latitude') then + flat = NINT(abs(ULATG*rad_to_deg), int_kind) ! linear function + else + flat = 1 + endif + + allocate(nocn(nblocks_tot)) + + nocn = 0 + do n=1,nblocks_tot + this_block = get_block(n,n) + do j=this_block%jlo,this_block%jhi + if (this_block%j_glob(j) > 0) then + do i=this_block%ilo,this_block%ihi + if (this_block%i_glob(i) > 0) then + ig = this_block%i_glob(i) + jg = this_block%j_glob(j) + if (KMTG(ig,jg) > puny .and. & + (ULATG(ig,jg) < shlat/rad_to_deg .or. & + ULATG(ig,jg) > nhlat/rad_to_deg) ) & + nocn(n) = nocn(n) + flat(ig,jg) + endif + end do + endif + end do + + !*** with array syntax, we actually do work on non-ocean + !*** points, so where the block is not completely land, + !*** reset nocn to be the full size of the block + + ! use processor_shape = 'square-pop' and distribution_wght = 'block' + ! to make CICE and POP decompositions/distributions identical. + + +#ifdef CICE_IN_NEMO + ! Keep all blocks even the ones only containing land points + if (distribution_wght == 'block') nocn(n) = nx_block*ny_block +#else + if (distribution_wght == 'block' .and. & ! POP style + nocn(n) > 0) nocn(n) = nx_block*ny_block +#endif + end do + + work_unit = maxval(nocn)/max_work_unit + 1 + + !*** find number of work units per block + + allocate(work_per_block(nblocks_tot)) + + where (nocn > 0) + work_per_block = nocn/work_unit + 1 + elsewhere + work_per_block = 0 + end where + deallocate(nocn) + +!---------------------------------------------------------------------- +! +! determine the distribution of blocks across processors +! +!---------------------------------------------------------------------- + + distrb_info = create_distribution(distribution_type, & + nprocs, work_per_block) + + deallocate(work_per_block) + +!---------------------------------------------------------------------- +! +! allocate and determine block id for any local blocks +! +!---------------------------------------------------------------------- + + call create_local_block_ids(blocks_ice, distrb_info) + + if (associated(blocks_ice)) then + nblocks = size(blocks_ice) + else + nblocks = 0 + endif + nblocks_max = 0 + tblocks_tmp = 0 + do n=0,distrb_info%nprocs - 1 + nblocks_tmp = nblocks + call broadcast_scalar(nblocks_tmp, n) + nblocks_max = max(nblocks_max,nblocks_tmp) + tblocks_tmp = tblocks_tmp + nblocks_tmp + end do + + if (my_task == master_task) then + write(nu_diag,*) & + 'ice: total number of blocks is', tblocks_tmp + endif + + if (nblocks_max > max_blocks) then + write(outstring,*) & + 'ice: no. blocks exceed max: increase max to', nblocks_max + call abort_ice(trim(outstring)) + else if (nblocks_max < max_blocks) then + write(outstring,*) & + 'ice: no. blocks too large: decrease max to', nblocks_max + if (my_task == master_task) then + write(nu_diag,*) ' ********WARNING***********' + write(nu_diag,*) trim(outstring) + write(nu_diag,*) ' **************************' + write(nu_diag,*) ' ' + endif + endif + +!---------------------------------------------------------------------- +! +! Set up ghost cell updates for each distribution. +! Boundary types are cyclic, closed, tripole or tripoleT. +! +!---------------------------------------------------------------------- + + ! update ghost cells on all four boundaries + halo_info = ice_HaloCreate(distrb_info, & + trim(ns_boundary_type), & + trim(ew_boundary_type), & + nx_global) + +!---------------------------------------------------------------------- + + end subroutine init_domain_distribution + +!*********************************************************************** + + end module ice_domain + +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/source/ice_domain_size.F90 b/source/ice_domain_size.F90 new file mode 100755 index 00000000..b3d47a38 --- /dev/null +++ b/source/ice_domain_size.F90 @@ -0,0 +1,76 @@ +! SVN:$Id: ice_domain_size.F90 700 2013-08-15 19:17:39Z eclare $ +!======================================================================= + +! Defines the global domain size and number of categories and layers. +! Code originally based on domain_size.F in POP +! +! author Elizabeth C. Hunke, LANL +! 2004: Block structure and snow parameters added by William Lipscomb +! Renamed (used to be ice_model_size) +! 2006: Converted to free source form (F90) by Elizabeth Hunke +! Removed hardwired sizes (NX...can now be set in compile scripts) + + module ice_domain_size + + use ice_kinds_mod + +!======================================================================= + + implicit none + private + save + + integer (kind=int_kind), parameter, public :: & + nx_global = NXGLOB , & ! i-axis size + ny_global = NYGLOB , & ! j-axis size + ncat = NICECAT , & ! number of categories +!#ifdef AusCOM +! nilyr = N_ILYR , & ! number of ice layers per category +!#else + nilyr = NICELYR , & ! number of ice layers per category +!#endif + nslyr = NSNWLYR , & ! number of snow layers per category + + max_aero = 6 , & ! maximum number of aerosols + n_aero = NTRAERO , & ! number of aerosols in use + + nblyr = NBGCLYR , & ! number of bio/brine layers per category + max_nbtrcr= 9 , & ! maximum number of biology tracers +! nltrcr = max_nbtrcr*TRBRI, & ! maximum layer bgc tracers (for zbgc) + + max_ntrcr = 1 & ! 1 = surface temperature + + nilyr & ! ice salinity + + nilyr & ! ice enthalpy + + nslyr & ! snow enthalpy + !!!!! optional tracers: + + TRAGE & ! age + + TRFY & ! first-year area + + TRLVL*2 & ! level/deformed ice + + TRPND*3 & ! ponds + + n_aero*4 & ! number of aerosols * 4 aero layers + + TRBRI & ! brine height + + TRBGCS , & ! skeletal layer BGC +! + TRBGCZ*nltrcr*nblyr ! for zbgc (off if TRBRI=0) + max_nstrm = 5 ! max number of history output streams + + integer (kind=int_kind), parameter, public :: & + block_size_x = BLCKX , & ! size of block in first horiz dimension + block_size_y = BLCKY ! size of block in second horiz dimension + + !*** The model will inform the user of the correct + !*** values for the parameter below. A value higher than + !*** necessary will not cause the code to fail, but will + !*** allocate more memory than is necessary. A value that + !*** is too low will cause the code to exit. + !*** A good initial guess is found using + !*** max_blocks = (nx_global/block_size_x)*(ny_global/block_size_y)/ + !*** num_procs + + integer (kind=int_kind), parameter, public :: & + max_blocks = MXBLCKS ! max number of blocks per processor + +!======================================================================= + + end module ice_domain_size + +!======================================================================= diff --git a/source/ice_dyn_eap.F90 b/source/ice_dyn_eap.F90 new file mode 100755 index 00000000..6d3cb028 --- /dev/null +++ b/source/ice_dyn_eap.F90 @@ -0,0 +1,2008 @@ +! SVN:$Id: ice_dyn_eap.F90 843 2014-10-02 19:54:30Z eclare $ +!======================================================================= +! +! Elastic-anisotropic sea ice dynamics model +! Computes ice velocity and deformation +! +! See: +! +! Wilchinsky, A.V. and D.L. Feltham (2006). Modelling the rheology of +! sea ice as a collection of diamond-shaped floes. +! Journal of Non-Newtonian Fluid Mechanics, 138(1), 22-32. +! +! Tsamados, M., D.L. Feltham, and A.V. Wilchinsky (2012). Impact on new +! anisotropic rheology on simulations of Arctic sea ice. JGR, in press. +! +! authors: Michel Tsamados, CPOM +! David Schroeder, CPOM + + module ice_dyn_eap + + use ice_kinds_mod + use ice_blocks, only: nx_block, ny_block + use ice_domain_size, only: max_blocks + + implicit none + private + public :: eap, init_eap, write_restart_eap, read_restart_eap + save + + ! Look-up table needed for calculating structure tensor + integer (int_kind), parameter :: & + nx_yield = 41, & + ny_yield = 41, & + na_yield = 21 + + real (kind=dbl_kind), dimension (nx_yield,ny_yield,na_yield) :: & + s11r, s12r, s22r, s11s, s12s, s22s + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + a11_1, a11_2, a11_3, a11_4, & ! components of + a12_1, a12_2, a12_3, a12_4 ! structure tensor + + ! history + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), public :: & + e11 , & ! components of strain rate tensor (1/s) + e12 , & + e22 , & + yieldstress11, & ! components of yield stress tensor (kg/s^2) + yieldstress12, & + yieldstress22, & + s11 , & ! components of stress tensor (kg/s^2) + s12 , & + s22 , & + a11 , & ! components of structure tensor () + a12 + +!======================================================================= + + contains + +!======================================================================= +! +! Elastic-anisotropic-plastic dynamics driver +! based on subroutine evp + + subroutine eap (dt) + +#ifdef CICE_IN_NEMO +! Wind stress is set during this routine from the values supplied +! via NEMO (unless calc_strair is true). These values are supplied +! rotated on u grid and multiplied by aice. strairxT = 0 in this +! case so operations in evp_prep1 are pointless but carried out to +! minimise code changes. +#endif + + use ice_atmo, only: Cdn_ocn + use ice_boundary, only: ice_halo, ice_HaloMask, ice_HaloUpdate, & + ice_HaloDestroy + use ice_blocks, only: block, get_block + use ice_constants, only: field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_vector, c0, p5 + use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_dyn + use ice_dyn_shared, only: fcor_blk, ndte, dtei, a_min, m_min, & + cosw, sinw, denom1, uvel_init, vvel_init, arlx1i, & + evp_prep1, evp_prep2, stepu, evp_finish + use ice_flux, only: rdg_conv, rdg_shear, prs_sig, strairxT, strairyT, & + strairx, strairy, uocn, vocn, ss_tltx, ss_tlty, iceumask, fm, & + strtltx, strtlty, strocnx, strocny, strintx, strinty, & + strocnxT, strocnyT, strax, stray, & + stressp_1, stressp_2, stressp_3, stressp_4, & + stressm_1, stressm_2, stressm_3, stressm_4, & + stress12_1, stress12_2, stress12_3, stress12_4 + use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & + tarear, uarear, tinyarea, to_ugrid, t2ugrid_vector, u2tgrid_vector + use ice_mechred, only: ice_strength + use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & + aice_init, aice0, aicen, vicen, strength +! use ice_timers, only: timer_dynamics, timer_bound, & +! ice_timer_start, ice_timer_stop, & +! timer_tmp1, timer_tmp2, timer_tmp3 + use ice_timers, only: timer_dynamics, timer_bound, & + ice_timer_start, ice_timer_stop +#ifdef CICE_IN_NEMO + use ice_atmo, only: calc_strair +#endif + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + ksub , & ! subcycle step + iblk , & ! block index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + i, j + + integer (kind=int_kind), dimension(max_blocks) :: & + icellt , & ! no. of cells where icetmask = 1 + icellu ! no. of cells where iceumask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & + indxti , & ! compressed index in i-direction + indxtj , & ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + tmass , & ! total mass of ice and snow (kg/m^2) + waterx , & ! for ocean stress calculation, x (m/s) + watery , & ! for ocean stress calculation, y (m/s) + forcex , & ! work array: combined atm stress and ocn tilt, x + forcey , & ! work array: combined atm stress and ocn tilt, y + aiu , & ! ice fraction on u-grid + umass , & ! total mass of ice and snow (u grid) + umassdti ! mass of U-cell/dte (kg/m^2 s) + + real (kind=dbl_kind), allocatable :: fld2(:,:,:,:) + + real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & + strtmp ! stress combinations for momentum equation + + integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & + icetmask ! ice extent mask (T-cell) + + type (ice_halo) :: & + halo_info_mask ! ghost cell update info for masked halo + + type (block) :: & + this_block ! block information for current block + + call ice_timer_start(timer_dynamics) ! dynamics + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + allocate(fld2(nx_block,ny_block,2,max_blocks)) + + ! This call is needed only if dt changes during runtime. +! call set_evp_parameters (dt) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + rdg_conv (i,j,iblk) = c0 + rdg_shear(i,j,iblk) = c0 + divu (i,j,iblk) = c0 + shear(i,j,iblk) = c0 + prs_sig(i,j,iblk) = c0 + e11(i,j,iblk) = c0 + e12(i,j,iblk) = c0 + e22(i,j,iblk) = c0 + s11(i,j,iblk) = c0 + s12(i,j,iblk) = c0 + s22(i,j,iblk) = c0 + yieldstress11(i,j,iblk) = c0 + yieldstress12(i,j,iblk) = c0 + yieldstress22(i,j,iblk) = c0 + enddo + enddo + + !----------------------------------------------------------------- + ! preparation for dynamics + !----------------------------------------------------------------- + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + call evp_prep1 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + aice (:,:,iblk), vice (:,:,iblk), & + vsno (:,:,iblk), tmask (:,:,iblk), & + strairxT(:,:,iblk), strairyT(:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + tmass (:,:,iblk), icetmask(:,:,iblk)) + + enddo ! iblk + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (icetmask, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + !----------------------------------------------------------------- + ! convert fields from T to U grid + !----------------------------------------------------------------- + + call to_ugrid(tmass,umass) + call to_ugrid(aice_init, aiu) + +#ifdef CICE_IN_NEMO + !---------------------------------------------------------------- + ! Set wind stress to values supplied via NEMO + ! This wind stress is rotated on u grid and multiplied by aice + !---------------------------------------------------------------- + if (.not. calc_strair) then + strairx(:,:,:) = strax(:,:,:) + strairy(:,:,:) = stray(:,:,:) + else +#endif + call t2ugrid_vector(strairx) + call t2ugrid_vector(strairy) +#ifdef CICE_IN_NEMO + endif +#endif + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! more preparation for dynamics + !----------------------------------------------------------------- + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + call evp_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt(iblk), icellu(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), umass (:,:,iblk), & + umassdti (:,:,iblk), fcor_blk (:,:,iblk), & + umask (:,:,iblk), & + uocn (:,:,iblk), vocn (:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & + icetmask (:,:,iblk), iceumask (:,:,iblk), & + fm (:,:,iblk), dt, & + strtltx (:,:,iblk), strtlty (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvel_init (:,:,iblk), vvel_init (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk)) + + !----------------------------------------------------------------- + ! Initialize structure tensor + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + if (icetmask(i,j,iblk)==0) then + ! structure tensor + a11_1(i,j,iblk) = p5 + a11_2(i,j,iblk) = p5 + a11_3(i,j,iblk) = p5 + a11_4(i,j,iblk) = p5 + a12_1(i,j,iblk) = c0 + a12_2(i,j,iblk) = c0 + a12_3(i,j,iblk) = c0 + a12_4(i,j,iblk) = c0 + endif ! icetmask + enddo ! i + enddo ! j + + !----------------------------------------------------------------- + ! ice strength + ! New strength used in Ukita Moritz rheology + !----------------------------------------------------------------- + + call ice_strength (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt(iblk), & + indxti (:,iblk), & + indxtj (:,iblk), & + aice (:,:, iblk), & + vice (:,:, iblk), & + aice0 (:,:, iblk), & + aicen (:,:,:,iblk), & + vicen (:,:,:,iblk), & + strength(:,:, iblk)) + + ! load velocity into array for boundary updates + fld2(:,:,1,iblk) = uvel(:,:,iblk) + fld2(:,:,2,iblk) = vvel(:,:,iblk) + + enddo ! iblk + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (strength, halo_info, & + field_loc_center, field_type_scalar) + ! velocities may have changed in evp_prep2 + call ice_HaloUpdate (fld2, halo_info, & + field_loc_NEcorner, field_type_vector) + + ! unload + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + uvel(:,:,iblk) = fld2(:,:,1,iblk) + vvel(:,:,iblk) = fld2(:,:,2,iblk) + enddo + !$OMP END PARALLEL DO + + if (maskhalo_dyn) & + call ice_HaloMask(halo_info_mask, halo_info, icetmask) + call ice_timer_stop(timer_bound) + + do ksub = 1,ndte ! subcycling + + !----------------------------------------------------------------- + ! stress tensor equation, total surface stress + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,strtmp) + do iblk = 1, nblocks + +! call ice_timer_start(timer_tmp1) ! dynamics + call stress_eap (nx_block, ny_block, & + ksub, ndte, & + icellt(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + arlx1i, denom1, & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxt (:,:,iblk), dyt (:,:,iblk), & + dxhy (:,:,iblk), dyhx (:,:,iblk), & + cxp (:,:,iblk), cyp (:,:,iblk), & + cxm (:,:,iblk), cym (:,:,iblk), & + tarear (:,:,iblk), strength (:,:,iblk), & + a11 (:,:,iblk), a12 (:,:,iblk), & + a11_1 (:,:,iblk), a11_2 (:,:,iblk), & + a11_3 (:,:,iblk), a11_4 (:,:,iblk), & + a12_1 (:,:,iblk), a12_2 (:,:,iblk), & + a12_3 (:,:,iblk), a12_4 (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + e11 (:,:,iblk), e12 (:,:,iblk), & + e22 (:,:,iblk), & + s11 (:,:,iblk), s12 (:,:,iblk), & + s22 (:,:,iblk), & + yieldstress11 (:,:,iblk), & + yieldstress12 (:,:,iblk), & + yieldstress22 (:,:,iblk), & + prs_sig (:,:,iblk), & + rdg_conv (:,:,iblk), rdg_shear (:,:,iblk), & + strtmp (:,:,:)) +! call ice_timer_stop(timer_tmp1) ! dynamics + + !----------------------------------------------------------------- + ! momentum equation + !----------------------------------------------------------------- + + call stepu (nx_block, ny_block, & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), strtmp (:,:,:), & + uocn (:,:,iblk), vocn (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + umassdti (:,:,iblk), fm (:,:,iblk), & + uarear (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + uvel_init(:,:,iblk), vvel_init(:,:,iblk),& + uvel (:,:,iblk), vvel (:,:,iblk)) + + ! load velocity into array for boundary updates + fld2(:,:,1,iblk) = uvel(:,:,iblk) + fld2(:,:,2,iblk) = vvel(:,:,iblk) + + !----------------------------------------------------------------- + ! evolution of structure tensor A + !----------------------------------------------------------------- + +! call ice_timer_start(timer_tmp3) ! dynamics + if (mod(ksub,10) == 1) then ! only called every 10th timestep + call stepa (nx_block, ny_block, & + dtei, icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + a11 (:,:,iblk), a12 (:,:,iblk), & + a11_1 (:,:,iblk), a11_2 (:,:,iblk), & + a11_3 (:,:,iblk), a11_4 (:,:,iblk), & + a12_1 (:,:,iblk), a12_2 (:,:,iblk), & + a12_3 (:,:,iblk), a12_4 (:,:,iblk), & + stressp_1(:,:,iblk), stressp_2(:,:,iblk), & + stressp_3(:,:,iblk), stressp_4(:,:,iblk), & + stressm_1(:,:,iblk), stressm_2(:,:,iblk), & + stressm_3(:,:,iblk), stressm_4(:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk)) + endif +! call ice_timer_stop(timer_tmp3) ! dynamics + enddo + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + if (maskhalo_dyn) then + call ice_HaloUpdate (fld2, halo_info_mask, & + field_loc_NEcorner, field_type_vector) + else + call ice_HaloUpdate (fld2, halo_info, & + field_loc_NEcorner, field_type_vector) + endif + + ! unload + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + uvel(:,:,iblk) = fld2(:,:,1,iblk) + vvel(:,:,iblk) = fld2(:,:,2,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bound) + + enddo ! subcycling + + deallocate(fld2) + if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) + + !----------------------------------------------------------------- + ! ice-ocean stress + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call evp_finish & + (nx_block, ny_block, & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + uocn (:,:,iblk), vocn (:,:,iblk), & + aiu (:,:,iblk), fm (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strocnxT(:,:,iblk), strocnyT(:,:,iblk)) + + enddo + !$OMP END PARALLEL DO + + call u2tgrid_vector(strocnxT) ! shift + call u2tgrid_vector(strocnyT) + + call ice_timer_stop(timer_dynamics) ! dynamics + + end subroutine eap + +!======================================================================= + +! Initialize parameters and variables needed for the eap dynamics +! (based on init_evp) + + subroutine init_eap (dt) + + use ice_blocks, only: nx_block, ny_block + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, c1, c2, c12, p5, pi, pih, piq + use ice_domain, only: nblocks + use ice_dyn_shared, only: init_evp + use ice_exit, only: abort_ice + use ice_restart_shared, only: runtype + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + i, j, k, & + iblk ! block index + + real (kind=dbl_kind), parameter :: & + eps6 = 1.0e-6_dbl_kind, & + phi = pi/c12 ! diamond shaped floe smaller angle (default phi = 30 deg) + + integer (kind=int_kind) :: & + ix, iy, ip, iz, n, ia + + integer (kind=int_kind), parameter :: & + nz = 100 + + real (kind=dbl_kind) :: & + ainit, xinit, yinit, pinit, zinit, & + da, dx, dy, dp, dz, a1 + + call init_evp (dt) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + e11(i,j,iblk) = c0 + e12(i,j,iblk) = c0 + e22(i,j,iblk) = c0 + s11(i,j,iblk) = c0 + s12(i,j,iblk) = c0 + s22(i,j,iblk) = c0 + yieldstress11(i,j,iblk) = c0 + yieldstress12(i,j,iblk) = c0 + yieldstress22(i,j,iblk) = c0 + a11_1 (i,j,iblk) = p5 + a11_2 (i,j,iblk) = p5 + a11_3 (i,j,iblk) = p5 + a11_4 (i,j,iblk) = p5 + a12_1 (i,j,iblk) = c0 + a12_2 (i,j,iblk) = c0 + a12_3 (i,j,iblk) = c0 + a12_4 (i,j,iblk) = c0 + enddo ! i + enddo ! j + enddo ! iblk + !$OMP END PARALLEL DO + + !----------------------------------------------------------------- + ! create lookup table for eap dynamics (see Appendix A1) + !----------------------------------------------------------------- + + da = p5/real(na_yield-1,kind=dbl_kind) + ainit = p5 - da + dx = pi/real(nx_yield-1,kind=dbl_kind) + xinit = pi + piq - dx + dz = pi/real(nz,kind=dbl_kind) + zinit = -pih + dy = pi/real(ny_yield-1,kind=dbl_kind) + yinit = -dy + + do ia=1,na_yield + do ix=1,nx_yield + do iy=1,ny_yield + s11r(ix,iy,ia) = c0 + s12r(ix,iy,ia) = c0 + s22r(ix,iy,ia) = c0 + s11s(ix,iy,ia) = c0 + s12s(ix,iy,ia) = c0 + s22s(ix,iy,ia) = c0 + if (ia <= na_yield-1) then + do iz=1,nz + s11r(ix,iy,ia) = s11r(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s11kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s12r(ix,iy,ia) = s12r(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s12kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s22r(ix,iy,ia) = s22r(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s22kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s11s(ix,iy,ia) = s11s(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s11ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s12s(ix,iy,ia) = s12s(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s12ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s22s(ix,iy,ia) = s22s(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s22ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + enddo + if (abs(s11r(ix,iy,ia)) < eps6) s11r(ix,iy,ia) = c0 + if (abs(s12r(ix,iy,ia)) < eps6) s12r(ix,iy,ia) = c0 + if (abs(s22r(ix,iy,ia)) < eps6) s22r(ix,iy,ia) = c0 + if (abs(s11s(ix,iy,ia)) < eps6) s11s(ix,iy,ia) = c0 + if (abs(s12s(ix,iy,ia)) < eps6) s12s(ix,iy,ia) = c0 + if (abs(s22s(ix,iy,ia)) < eps6) s22s(ix,iy,ia) = c0 + else + s11r(ix,iy,ia) = p5*s11kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s12r(ix,iy,ia) = p5*s12kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s22r(ix,iy,ia) = p5*s22kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s11s(ix,iy,ia) = p5*s11ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s12s(ix,iy,ia) = p5*s12ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s22s(ix,iy,ia) = p5*s22ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + if (abs(s11r(ix,iy,ia)) < eps6) s11r(ix,iy,ia) = c0 + if (abs(s12r(ix,iy,ia)) < eps6) s12r(ix,iy,ia) = c0 + if (abs(s22r(ix,iy,ia)) < eps6) s22r(ix,iy,ia) = c0 + if (abs(s11s(ix,iy,ia)) < eps6) s11s(ix,iy,ia) = c0 + if (abs(s12s(ix,iy,ia)) < eps6) s12s(ix,iy,ia) = c0 + if (abs(s22s(ix,iy,ia)) < eps6) s22s(ix,iy,ia) = c0 + endif + enddo + enddo + enddo + + end subroutine init_eap + +!======================================================================= +! Function : w1 (see Gaussian function psi in Tsamados et al 2013) + + FUNCTION w1(a) + double precision, intent(in) :: a + + real (kind=dbl_kind) :: w1 + + w1 = - 223.87569446_dbl_kind & + + 2361.2198663_dbl_kind*a & + - 10606.56079975_dbl_kind*a*a & + + 26315.50025642_dbl_kind*a*a*a & + - 38948.30444297_dbl_kind*a*a*a*a & + + 34397.72407466_dbl_kind*a*a*a*a*a & + - 16789.98003081_dbl_kind*a*a*a*a*a*a & + + 3495.82839237_dbl_kind*a*a*a*a*a*a*a + + end FUNCTION w1 + +!======================================================================= +! Function : w2 (see Gaussian function psi in Tsamados et al 2013) + + FUNCTION w2(a) + double precision, intent(in) :: a + + real (kind=dbl_kind) :: w2 + + w2 = - 6670.68911883_dbl_kind & + + 70222.33061536_dbl_kind*a & + - 314871.71525448_dbl_kind*a*a & + + 779570.02793492_dbl_kind*a*a*a & + - 1151098.82436864_dbl_kind*a*a*a*a & + + 1013896.59464498_dbl_kind*a*a*a*a*a & + - 493379.44906738_dbl_kind*a*a*a*a*a*a & + + 102356.551518_dbl_kind*a*a*a*a*a*a*a + + end FUNCTION w2 + +!======================================================================= +! Function : s11kr + + FUNCTION s11kr(x,y,z,phi) + + use ice_constants , only: p5, pi, pih, c0, c1, puny + + real (kind=dbl_kind), intent(in) :: & + x,y,z,phi + + real (kind=dbl_kind) :: & + s11kr, p + + real (kind=dbl_kind) :: & + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, t1t2i12, t1t2i21, t1t2i22, & + t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 + + p = phi + + n1t2i11 = cos(z+pih-p) * cos(z+p) + n1t2i12 = cos(z+pih-p) * sin(z+p) + n1t2i21 = sin(z+pih-p) * cos(z+p) + n1t2i22 = sin(z+pih-p) * sin(z+p) + n2t1i11 = cos(z-pih+p) * cos(z-p) + n2t1i12 = cos(z-pih+p) * sin(z-p) + n2t1i21 = sin(z-pih+p) * cos(z-p) + n2t1i22 = sin(z-pih+p) * sin(z-p) + t1t2i11 = cos(z-p) * cos(z+p) + t1t2i12 = cos(z-p) * sin(z+p) + t1t2i21 = sin(z-p) * cos(z+p) + t1t2i22 = sin(z-p) * sin(z+p) + t2t1i11 = cos(z+p) * cos(z-p) + t2t1i12 = cos(z+p) * sin(z-p) + t2t1i21 = sin(z+p) * cos(z-p) + t2t1i22 = sin(z+p) * sin(z-p) +! In expression of tensor d, with this formulatin d(x)=-d(x+pi) +! Solution, when diagonalizing always check sgn(a11-a22) if > then keep x else x=x-pi/2 + d11 = cos(y)*cos(y)*(cos(x)+sin(x)*tan(y)*tan(y)) + d12 = cos(y)*cos(y)*tan(y)*(-cos(x)+sin(x)) + d22 = cos(y)*cos(y)*(sin(x)+cos(x)*tan(y)*tan(y)) + IIn1t2 = n1t2i11 * d11 + (n1t2i12 + n1t2i21) * d12 + n1t2i22 * d22 + IIn2t1 = n2t1i11 * d11 + (n2t1i12 + n2t1i21) * d12 + n2t1i22 * d22 + IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 + + if (-IIn1t2>=puny) then + Hen1t2 = c1 + else + Hen1t2 = c0 + endif + + if (-IIn2t1>=puny) then + Hen2t1 = c1 + else + Hen2t1 = c0 + endif + + s11kr = (- Hen1t2 * n1t2i11 - Hen2t1 * n2t1i11) + + end FUNCTION s11kr + +!======================================================================= +! Function : s12kr + + FUNCTION s12kr(x,y,z,phi) + + use ice_constants , only: p5, pi, pih, c0, c1, puny + real (kind=dbl_kind), intent(in) :: & + x,y,z,phi + + real (kind=dbl_kind) :: & + s12kr, s12r0, s21r0, p + + real (kind=dbl_kind) :: & + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, t1t2i12, t1t2i21, t1t2i22, & + t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 + + p = phi + + n1t2i11 = cos(z+pih-p) * cos(z+p) + n1t2i12 = cos(z+pih-p) * sin(z+p) + n1t2i21 = sin(z+pih-p) * cos(z+p) + n1t2i22 = sin(z+pih-p) * sin(z+p) + n2t1i11 = cos(z-pih+p) * cos(z-p) + n2t1i12 = cos(z-pih+p) * sin(z-p) + n2t1i21 = sin(z-pih+p) * cos(z-p) + n2t1i22 = sin(z-pih+p) * sin(z-p) + t1t2i11 = cos(z-p) * cos(z+p) + t1t2i12 = cos(z-p) * sin(z+p) + t1t2i21 = sin(z-p) * cos(z+p) + t1t2i22 = sin(z-p) * sin(z+p) + t2t1i11 = cos(z+p) * cos(z-p) + t2t1i12 = cos(z+p) * sin(z-p) + t2t1i21 = sin(z+p) * cos(z-p) + t2t1i22 = sin(z+p) * sin(z-p) + d11 = cos(y)*cos(y)*(cos(x)+sin(x)*tan(y)*tan(y)) + d12 = cos(y)*cos(y)*tan(y)*(-cos(x)+sin(x)) + d22 = cos(y)*cos(y)*(sin(x)+cos(x)*tan(y)*tan(y)) + IIn1t2 = n1t2i11 * d11 + (n1t2i12 + n1t2i21) * d12 + n1t2i22 * d22 + IIn2t1 = n2t1i11 * d11 + (n2t1i12 + n2t1i21) * d12 + n2t1i22 * d22 + IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 + + if (-IIn1t2>=puny) then + Hen1t2 = c1 + else + Hen1t2 = c0 + endif + + if (-IIn2t1>=puny) then + Hen2t1 = c1 + else + Hen2t1 = c0 + endif + + s12r0 = (- Hen1t2 * n1t2i12 - Hen2t1 * n2t1i12) + s21r0 = (- Hen1t2 * n1t2i21 - Hen2t1 * n2t1i21) + s12kr=p5*(s12r0+s21r0) + + end FUNCTION s12kr + +!======================================================================= +! Function : s22r + + FUNCTION s22kr(x,y,z,phi) + + use ice_constants , only: p5, pi, pih, c0, c1, puny + + real (kind=dbl_kind), intent(in) :: & + x,y,z,phi + + real (kind=dbl_kind) :: & + s22kr, p + + real (kind=dbl_kind) :: & + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, t1t2i12, t1t2i21, t1t2i22, & + t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 + + p = phi + + n1t2i11 = cos(z+pih-p) * cos(z+p) + n1t2i12 = cos(z+pih-p) * sin(z+p) + n1t2i21 = sin(z+pih-p) * cos(z+p) + n1t2i22 = sin(z+pih-p) * sin(z+p) + n2t1i11 = cos(z-pih+p) * cos(z-p) + n2t1i12 = cos(z-pih+p) * sin(z-p) + n2t1i21 = sin(z-pih+p) * cos(z-p) + n2t1i22 = sin(z-pih+p) * sin(z-p) + t1t2i11 = cos(z-p) * cos(z+p) + t1t2i12 = cos(z-p) * sin(z+p) + t1t2i21 = sin(z-p) * cos(z+p) + t1t2i22 = sin(z-p) * sin(z+p) + t2t1i11 = cos(z+p) * cos(z-p) + t2t1i12 = cos(z+p) * sin(z-p) + t2t1i21 = sin(z+p) * cos(z-p) + t2t1i22 = sin(z+p) * sin(z-p) + d11 = cos(y)*cos(y)*(cos(x)+sin(x)*tan(y)*tan(y)) + d12 = cos(y)*cos(y)*tan(y)*(-cos(x)+sin(x)) + d22 = cos(y)*cos(y)*(sin(x)+cos(x)*tan(y)*tan(y)) + IIn1t2 = n1t2i11 * d11 + (n1t2i12 + n1t2i21) * d12 + n1t2i22 * d22 + IIn2t1 = n2t1i11 * d11 + (n2t1i12 + n2t1i21) * d12 + n2t1i22 * d22 + IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 + + if (-IIn1t2>=puny) then + Hen1t2 = c1 + else + Hen1t2 = c0 + endif + + if (-IIn2t1>=puny) then + Hen2t1 = c1 + else + Hen2t1 = c0 + endif + + s22kr = (- Hen1t2 * n1t2i22 - Hen2t1 * n2t1i22) + + end FUNCTION s22kr + +!======================================================================= +! Function : s11ks + + FUNCTION s11ks(x,y,z,phi) + + use ice_constants , only: p5, pi, pih, c0, c1, puny + + real (kind=dbl_kind), intent(in):: & + x,y,z,phi + + real (kind=dbl_kind) :: & + s11ks, p + + real (kind=dbl_kind) :: & + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, t1t2i12, t1t2i21, t1t2i22, & + t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 + + p = phi + + n1t2i11 = cos(z+pih-p) * cos(z+p) + n1t2i12 = cos(z+pih-p) * sin(z+p) + n1t2i21 = sin(z+pih-p) * cos(z+p) + n1t2i22 = sin(z+pih-p) * sin(z+p) + n2t1i11 = cos(z-pih+p) * cos(z-p) + n2t1i12 = cos(z-pih+p) * sin(z-p) + n2t1i21 = sin(z-pih+p) * cos(z-p) + n2t1i22 = sin(z-pih+p) * sin(z-p) + t1t2i11 = cos(z-p) * cos(z+p) + t1t2i12 = cos(z-p) * sin(z+p) + t1t2i21 = sin(z-p) * cos(z+p) + t1t2i22 = sin(z-p) * sin(z+p) + t2t1i11 = cos(z+p) * cos(z-p) + t2t1i12 = cos(z+p) * sin(z-p) + t2t1i21 = sin(z+p) * cos(z-p) + t2t1i22 = sin(z+p) * sin(z-p) + d11 = cos(y)*cos(y)*(cos(x)+sin(x)*tan(y)*tan(y)) + d12 = cos(y)*cos(y)*tan(y)*(-cos(x)+sin(x)) + d22 = cos(y)*cos(y)*(sin(x)+cos(x)*tan(y)*tan(y)) + IIn1t2 = n1t2i11 * d11 + (n1t2i12 + n1t2i21) * d12 + n1t2i22 * d22 + IIn2t1 = n2t1i11 * d11 + (n2t1i12 + n2t1i21) * d12 + n2t1i22 * d22 + IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 + + if (-IIn1t2>=puny) then + Hen1t2 = c1 + else + Hen1t2 = c0 + endif + + if (-IIn2t1>=puny) then + Hen2t1 = c1 + else + Hen2t1 = c0 + endif + + s11ks = sign(c1,IIt1t2+puny)*(Hen1t2 * t1t2i11 + Hen2t1 * t2t1i11) + + end FUNCTION s11ks + +!======================================================================= +! Function : s12ks + + FUNCTION s12ks(x,y,z,phi) + + use ice_constants , only: p5, pi, pih, c0, c1, puny + + real (kind=dbl_kind), intent(in) :: & + x,y,z,phi + + real (kind=dbl_kind) :: & + s12ks,s12s0,s21s0,p + + real (kind=dbl_kind) :: & + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, t1t2i12, t1t2i21, t1t2i22, & + t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 + + p =phi + + n1t2i11 = cos(z+pih-p) * cos(z+p) + n1t2i12 = cos(z+pih-p) * sin(z+p) + n1t2i21 = sin(z+pih-p) * cos(z+p) + n1t2i22 = sin(z+pih-p) * sin(z+p) + n2t1i11 = cos(z-pih+p) * cos(z-p) + n2t1i12 = cos(z-pih+p) * sin(z-p) + n2t1i21 = sin(z-pih+p) * cos(z-p) + n2t1i22 = sin(z-pih+p) * sin(z-p) + t1t2i11 = cos(z-p) * cos(z+p) + t1t2i12 = cos(z-p) * sin(z+p) + t1t2i21 = sin(z-p) * cos(z+p) + t1t2i22 = sin(z-p) * sin(z+p) + t2t1i11 = cos(z+p) * cos(z-p) + t2t1i12 = cos(z+p) * sin(z-p) + t2t1i21 = sin(z+p) * cos(z-p) + t2t1i22 = sin(z+p) * sin(z-p) + d11 = cos(y)*cos(y)*(cos(x)+sin(x)*tan(y)*tan(y)) + d12 = cos(y)*cos(y)*tan(y)*(-cos(x)+sin(x)) + d22 = cos(y)*cos(y)*(sin(x)+cos(x)*tan(y)*tan(y)) + IIn1t2 = n1t2i11 * d11 + (n1t2i12 + n1t2i21) * d12 + n1t2i22 * d22 + IIn2t1 = n2t1i11 * d11 + (n2t1i12 + n2t1i21) * d12 + n2t1i22 * d22 + IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 + + if (-IIn1t2>=puny) then + Hen1t2 = c1 + else + Hen1t2 = c0 + endif + + if (-IIn2t1>=puny) then + Hen2t1 = c1 + else + Hen2t1 = c0 + endif + + s12s0 = sign(c1,IIt1t2+puny)*(Hen1t2 * t1t2i12 + Hen2t1 * t2t1i12) + s21s0 = sign(c1,IIt1t2+puny)*(Hen1t2 * t1t2i21 + Hen2t1 * t2t1i21) + s12ks=p5*(s12s0+s21s0) + + end FUNCTION s12ks + +!======================================================================= +! Function : s22ks + + FUNCTION s22ks(x,y,z,phi) + + use ice_constants , only: p5, pi, pih, c0, c1, puny + + real (kind=dbl_kind), intent(in) :: & + x,y,z,phi + + real (kind=dbl_kind) :: & + s22ks,p + + real (kind=dbl_kind) :: & + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, t1t2i12, t1t2i21, t1t2i22, & + t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 + + p = phi + + n1t2i11 = cos(z+pih-p) * cos(z+p) + n1t2i12 = cos(z+pih-p) * sin(z+p) + n1t2i21 = sin(z+pih-p) * cos(z+p) + n1t2i22 = sin(z+pih-p) * sin(z+p) + n2t1i11 = cos(z-pih+p) * cos(z-p) + n2t1i12 = cos(z-pih+p) * sin(z-p) + n2t1i21 = sin(z-pih+p) * cos(z-p) + n2t1i22 = sin(z-pih+p) * sin(z-p) + t1t2i11 = cos(z-p) * cos(z+p) + t1t2i12 = cos(z-p) * sin(z+p) + t1t2i21 = sin(z-p) * cos(z+p) + t1t2i22 = sin(z-p) * sin(z+p) + t2t1i11 = cos(z+p) * cos(z-p) + t2t1i12 = cos(z+p) * sin(z-p) + t2t1i21 = sin(z+p) * cos(z-p) + t2t1i22 = sin(z+p) * sin(z-p) + d11 = cos(y)*cos(y)*(cos(x)+sin(x)*tan(y)*tan(y)) + d12 = cos(y)*cos(y)*tan(y)*(-cos(x)+sin(x)) + d22 = cos(y)*cos(y)*(sin(x)+cos(x)*tan(y)*tan(y)) + IIn1t2 = n1t2i11 * d11 + (n1t2i12 + n1t2i21) * d12 + n1t2i22 * d22 + IIn2t1 = n2t1i11 * d11 + (n2t1i12 + n2t1i21) * d12 + n2t1i22 * d22 + IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 + + if (-IIn1t2>=puny) then + Hen1t2 = c1 + else + Hen1t2 = c0 + endif + + if (-IIn2t1>=puny) then + Hen2t1 = c1 + else + Hen2t1 = c0 + endif + + s22ks = sign(c1,IIt1t2+puny)*(Hen1t2 * t1t2i22 + Hen2t1 * t2t1i22) + + end FUNCTION s22ks + + +!======================================================================= + +! Computes the rates of strain and internal stress components for +! each of the four corners on each T-grid cell. +! Computes stress terms for the momentum equation +! (based on subroutine stress) + + subroutine stress_eap (nx_block, ny_block, & + ksub, ndte, & + icellt, & + indxti, indxtj, & + arlx1i, denom1, & + uvel, vvel, & + dxt, dyt, & + dxhy, dyhx, & + cxp, cyp, & + cxm, cym, & + tarear, strength, & + a11, a12, & + a11_1, a11_2, a11_3, a11_4, & + a12_1, a12_2, a12_3, a12_4, & + stressp_1, stressp_2, & + stressp_3, stressp_4, & + stressm_1, stressm_2, & + stressm_3, stressm_4, & + stress12_1, stress12_2, & + stress12_3, stress12_4, & + shear, divu, & + e11, e12, & + e22, & + s11, s12, & + s22, & + yieldstress11, & + yieldstress12, & + yieldstress22, & + prs_sig, & + rdg_conv, rdg_shear, & + strtmp) + + use ice_constants, only: c0, p027, p055, p111, p166, & + p2, p222, p25, p333, p5, puny + +!echmod tmp +! use ice_timers, only: & +! ice_timer_start, ice_timer_stop, & +! timer_tmp1, timer_tmp2, timer_tmp3 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ksub , & ! subcycling step + ndte , & ! number of subcycles + icellt ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), intent(in) :: & + arlx1i , & ! dte/2T (original) or 1/alpha1 (revised) + denom1 ! constant for stress equation + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + strength , & ! ice strength (N/m) + uvel , & ! x-component of velocity (m/s) + vvel , & ! y-component of velocity (m/s) + dxt , & ! width of T-cell through the middle (m) + dyt , & ! height of T-cell through the middle (m) + dxhy , & ! 0.5*(HTE - HTE) + dyhx , & ! 0.5*(HTN - HTN) + cyp , & ! 1.5*HTE - 0.5*HTE + cxp , & ! 1.5*HTN - 0.5*HTN + cym , & ! 0.5*HTE - 1.5*HTE + cxm , & ! 0.5*HTN - 1.5*HTN + tarear ! 1/tarea + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + stressp_1, stressp_2, stressp_3, stressp_4, & ! sigma11+sigma22 + stressm_1, stressm_2, stressm_3, stressm_4, & ! sigma11-sigma22 + stress12_1,stress12_2,stress12_3,stress12_4 ! sigma12 + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + a11, a12, a11_1, a11_2, a11_3, a11_4, & ! structure tensor + a12_1, a12_2, a12_3, a12_4 ! structure tensor + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + prs_sig , & ! replacement pressure, for stress calc + shear , & ! strain rate II component (1/s) + divu , & ! strain rate I component, velocity divergence (1/s) + e11 , & ! components of strain rate tensor (1/s) + e12 , & ! + e22 , & ! + s11 , & ! components of stress tensor (kg/s^2) + s12 , & ! + s22 , & ! + yieldstress11, & ! components of yield stress tensor (kg/s^2) + yieldstress12, & + yieldstress22, & + rdg_conv , & ! convergence term for ridging (1/s) + rdg_shear ! shear term for ridging (1/s) + + real (kind=dbl_kind), dimension(nx_block,ny_block,8), & + intent(out) :: & + strtmp ! stress combinations + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + stressptmp_1, stressptmp_2, stressptmp_3, stressptmp_4, & ! sigma11+sigma22 + stressmtmp_1, stressmtmp_2, stressmtmp_3, stressmtmp_4, & ! sigma11-sigma22 + stress12tmp_1,stress12tmp_2,stress12tmp_3,stress12tmp_4 ! sigma12 + + real (kind=dbl_kind) :: & + divune, divunw, divuse, divusw , & ! divergence + tensionne, tensionnw, tensionse, tensionsw, & ! tension + shearne, shearnw, shearse, shearsw , & ! shearing + ssigpn, ssigps, ssigpe, ssigpw , & + ssigmn, ssigms, ssigme, ssigmw , & + ssig12n, ssig12s, ssig12e, ssig12w , & + ssigp1, ssigp2, ssigm1, ssigm2, ssig121, ssig122, & + csigpne, csigpnw, csigpse, csigpsw , & + csigmne, csigmnw, csigmse, csigmsw , & + csig12ne, csig12nw, csig12se, csig12sw , & + str12ew, str12we, str12ns, str12sn , & + strp_tmp, strm_tmp + + real (kind=dbl_kind) :: & + alpharne, alpharnw, alpharsw, alpharse, & + alphasne, alphasnw, alphassw, alphasse + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + strtmp(:,:,:) = c0 + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + ! divergence = e_11 + e_22 + divune = cyp(i,j)*uvel(i ,j ) - dyt(i,j)*uvel(i-1,j ) & + + cxp(i,j)*vvel(i ,j ) - dxt(i,j)*vvel(i ,j-1) + divunw = cym(i,j)*uvel(i-1,j ) + dyt(i,j)*uvel(i ,j ) & + + cxp(i,j)*vvel(i-1,j ) - dxt(i,j)*vvel(i-1,j-1) + divusw = cym(i,j)*uvel(i-1,j-1) + dyt(i,j)*uvel(i ,j-1) & + + cxm(i,j)*vvel(i-1,j-1) + dxt(i,j)*vvel(i-1,j ) + divuse = cyp(i,j)*uvel(i ,j-1) - dyt(i,j)*uvel(i-1,j-1) & + + cxm(i,j)*vvel(i ,j-1) + dxt(i,j)*vvel(i ,j ) + + ! tension strain rate = e_11 - e_22 + tensionne = -cym(i,j)*uvel(i ,j ) - dyt(i,j)*uvel(i-1,j ) & + + cxm(i,j)*vvel(i ,j ) + dxt(i,j)*vvel(i ,j-1) + tensionnw = -cyp(i,j)*uvel(i-1,j ) + dyt(i,j)*uvel(i ,j ) & + + cxm(i,j)*vvel(i-1,j ) + dxt(i,j)*vvel(i-1,j-1) + tensionsw = -cyp(i,j)*uvel(i-1,j-1) + dyt(i,j)*uvel(i ,j-1) & + + cxp(i,j)*vvel(i-1,j-1) - dxt(i,j)*vvel(i-1,j ) + tensionse = -cym(i,j)*uvel(i ,j-1) - dyt(i,j)*uvel(i-1,j-1) & + + cxp(i,j)*vvel(i ,j-1) - dxt(i,j)*vvel(i ,j ) + + ! shearing strain rate = e_12 + shearne = -cym(i,j)*vvel(i ,j ) - dyt(i,j)*vvel(i-1,j ) & + - cxm(i,j)*uvel(i ,j ) - dxt(i,j)*uvel(i ,j-1) + shearnw = -cyp(i,j)*vvel(i-1,j ) + dyt(i,j)*vvel(i ,j ) & + - cxm(i,j)*uvel(i-1,j ) - dxt(i,j)*uvel(i-1,j-1) + shearsw = -cyp(i,j)*vvel(i-1,j-1) + dyt(i,j)*vvel(i ,j-1) & + - cxp(i,j)*uvel(i-1,j-1) + dxt(i,j)*uvel(i-1,j ) + shearse = -cym(i,j)*vvel(i ,j-1) - dyt(i,j)*vvel(i-1,j-1) & + - cxp(i,j)*uvel(i ,j-1) + dxt(i,j)*uvel(i ,j ) + + !----------------------------------------------------------------- + ! Stress updated depending on strain rate and structure tensor + !----------------------------------------------------------------- +! call ice_timer_start(timer_tmp2) ! dynamics + + ! ne + call update_stress_rdg (ksub, ndte, divune, tensionne, & + shearne, a11_1(i,j), a12_1(i,j), & + stressptmp_1, stressmtmp_1, & + stress12tmp_1, strength(i,j), & + alpharne, alphasne) + ! nw + call update_stress_rdg (ksub, ndte, divunw, tensionnw, & + shearnw, a11_2(i,j), a12_2(i,j), & + stressptmp_2, stressmtmp_2, & + stress12tmp_2, strength(i,j), & + alpharnw, alphasnw) + ! sw + call update_stress_rdg (ksub, ndte, divusw, tensionsw, & + shearsw, a11_3(i,j), a12_3(i,j), & + stressptmp_3, stressmtmp_3, & + stress12tmp_3, strength(i,j), & + alpharsw, alphassw) + ! se + call update_stress_rdg (ksub, ndte, divuse, tensionse, & + shearse, a11_4(i,j), a12_4(i,j), & + stressptmp_4, stressmtmp_4, & + stress12tmp_4, strength(i,j), & + alpharse, alphasse) + +! call ice_timer_stop(timer_tmp2) ! dynamics + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then + + ! diagnostic only + ! shear = sqrt(tension**2 + shearing**2) + shear(i,j) = p25*tarear(i,j)*sqrt( & + (tensionne + tensionnw + tensionse + tensionsw)**2 & + + (shearne + shearnw + shearse + shearsw)**2) + + divu(i,j) = p25*(divune + divunw + divuse + divusw) * tarear(i,j) + rdg_conv(i,j) = -min(p25*(alpharne + alpharnw & + + alpharsw + alpharse),c0) * tarear(i,j) + !rdg_shear=0 for computing closing_net in ridge_prep + !rdg_shear(i,j) = p25*(alphasne + alphasnw & + ! + alphassw + alphasse) * tarear(i,j) + endif + + e11(i,j) = p5*p25*(divune + divunw + divuse + divusw + & + tensionne + tensionnw + tensionse + tensionsw) * tarear(i,j) + + e12(i,j) = p5*p25*(shearne + shearnw + shearse + shearsw) * tarear(i,j) + + e22(i,j) = p5*p25*(divune + divunw + divuse + divusw - & + tensionne - tensionnw - tensionse - tensionsw) * tarear(i,j) + + prs_sig(i,j) = strength(i,j) + + !----------------------------------------------------------------- + ! elastic relaxation, see Eq. A12-A14 + !----------------------------------------------------------------- + + stressp_1(i,j) = (stressp_1(i,j) + stressptmp_1*arlx1i) & + * denom1 + stressp_2(i,j) = (stressp_2(i,j) + stressptmp_2*arlx1i) & + * denom1 + stressp_3(i,j) = (stressp_3(i,j) + stressptmp_3*arlx1i) & + * denom1 + stressp_4(i,j) = (stressp_4(i,j) + stressptmp_4*arlx1i) & + * denom1 + + stressm_1(i,j) = (stressm_1(i,j) + stressmtmp_1*arlx1i) & + * denom1 + stressm_2(i,j) = (stressm_2(i,j) + stressmtmp_2*arlx1i) & + * denom1 + stressm_3(i,j) = (stressm_3(i,j) + stressmtmp_3*arlx1i) & + * denom1 + stressm_4(i,j) = (stressm_4(i,j) + stressmtmp_4*arlx1i) & + * denom1 + + stress12_1(i,j) = (stress12_1(i,j) + stress12tmp_1*arlx1i) & + * denom1 + stress12_2(i,j) = (stress12_2(i,j) + stress12tmp_2*arlx1i) & + * denom1 + stress12_3(i,j) = (stress12_3(i,j) + stress12tmp_3*arlx1i) & + * denom1 + stress12_4(i,j) = (stress12_4(i,j) + stress12tmp_4*arlx1i) & + * denom1 + + s11(i,j) = p5 * p25 * (stressp_1(i,j) + stressp_2(i,j) & + + stressp_3(i,j) + stressp_4(i,j) & + + stressm_1(i,j) + stressm_2(i,j) & + + stressm_3(i,j) + stressm_4(i,j)) + s22(i,j) = p5 * p25 * (stressp_1(i,j) + stressp_2(i,j) & + + stressp_3(i,j) + stressp_4(i,j) & + - stressm_1(i,j) - stressm_2(i,j) & + - stressm_3(i,j) - stressm_4(i,j)) + s12(i,j) = p25 * (stress12_1(i,j) + stress12_2(i,j) & + + stress12_3(i,j) + stress12_4(i,j)) + + yieldstress11(i,j) = p5 * p25 * (stressptmp_1 + stressptmp_2 & + + stressptmp_3 + stressptmp_4 & + + stressmtmp_1 + stressmtmp_2 & + + stressmtmp_3 + stressmtmp_4) + yieldstress22(i,j) = p5 * p25 * (stressptmp_1 + stressptmp_2 & + + stressptmp_3 + stressptmp_4 & + - stressmtmp_1 - stressmtmp_2 & + - stressmtmp_3 - stressmtmp_4) + yieldstress12(i,j) = p25 * (stress12tmp_1 + stress12tmp_2 & + + stress12tmp_3 + stress12tmp_4) + + !----------------------------------------------------------------- + ! Eliminate underflows. + ! The following code is commented out because it is relatively + ! expensive and most compilers include a flag that accomplishes + ! the same thing more efficiently. This code is cheaper than + ! handling underflows if the compiler lacks a flag; uncomment + ! it in that case. The compiler flag is often described with the + ! phrase "flush to zero". + !----------------------------------------------------------------- + +! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) +! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) +! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) +! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) + +! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) +! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) +! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) +! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) + +! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) +! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) +! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) +! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) + + !----------------------------------------------------------------- + ! combinations of the stresses for the momentum equation ! kg/s^2 + !----------------------------------------------------------------- + + ssigpn = stressp_1(i,j) + stressp_2(i,j) + ssigps = stressp_3(i,j) + stressp_4(i,j) + ssigpe = stressp_1(i,j) + stressp_4(i,j) + ssigpw = stressp_2(i,j) + stressp_3(i,j) + ssigp1 =(stressp_1(i,j) + stressp_3(i,j))*p055 + ssigp2 =(stressp_2(i,j) + stressp_4(i,j))*p055 + + ssigmn = stressm_1(i,j) + stressm_2(i,j) + ssigms = stressm_3(i,j) + stressm_4(i,j) + ssigme = stressm_1(i,j) + stressm_4(i,j) + ssigmw = stressm_2(i,j) + stressm_3(i,j) + ssigm1 =(stressm_1(i,j) + stressm_3(i,j))*p055 + ssigm2 =(stressm_2(i,j) + stressm_4(i,j))*p055 + + ssig12n = stress12_1(i,j) + stress12_2(i,j) + ssig12s = stress12_3(i,j) + stress12_4(i,j) + ssig12e = stress12_1(i,j) + stress12_4(i,j) + ssig12w = stress12_2(i,j) + stress12_3(i,j) + ssig121 =(stress12_1(i,j) + stress12_3(i,j))*p111 + ssig122 =(stress12_2(i,j) + stress12_4(i,j))*p111 + + csigpne = p111*stressp_1(i,j) + ssigp2 + p027*stressp_3(i,j) + csigpnw = p111*stressp_2(i,j) + ssigp1 + p027*stressp_4(i,j) + csigpsw = p111*stressp_3(i,j) + ssigp2 + p027*stressp_1(i,j) + csigpse = p111*stressp_4(i,j) + ssigp1 + p027*stressp_2(i,j) + + csigmne = p111*stressm_1(i,j) + ssigm2 + p027*stressm_3(i,j) + csigmnw = p111*stressm_2(i,j) + ssigm1 + p027*stressm_4(i,j) + csigmsw = p111*stressm_3(i,j) + ssigm2 + p027*stressm_1(i,j) + csigmse = p111*stressm_4(i,j) + ssigm1 + p027*stressm_2(i,j) + + csig12ne = p222*stress12_1(i,j) + ssig122 & + + p055*stress12_3(i,j) + csig12nw = p222*stress12_2(i,j) + ssig121 & + + p055*stress12_4(i,j) + csig12sw = p222*stress12_3(i,j) + ssig122 & + + p055*stress12_1(i,j) + csig12se = p222*stress12_4(i,j) + ssig121 & + + p055*stress12_2(i,j) + + str12ew = p5*dxt(i,j)*(p333*ssig12e + p166*ssig12w) + str12we = p5*dxt(i,j)*(p333*ssig12w + p166*ssig12e) + str12ns = p5*dyt(i,j)*(p333*ssig12n + p166*ssig12s) + str12sn = p5*dyt(i,j)*(p333*ssig12s + p166*ssig12n) + + !----------------------------------------------------------------- + ! for dF/dx (u momentum) + !----------------------------------------------------------------- + strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) + strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) + + ! northeast (i,j) + strtmp(i,j,1) = -strp_tmp - strm_tmp - str12ew & + + dxhy(i,j)*(-csigpne + csigmne) + dyhx(i,j)*csig12ne + + ! northwest (i+1,j) + strtmp(i,j,2) = strp_tmp + strm_tmp - str12we & + + dxhy(i,j)*(-csigpnw + csigmnw) + dyhx(i,j)*csig12nw + + strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) + strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) + + ! southeast (i,j+1) + strtmp(i,j,3) = -strp_tmp - strm_tmp + str12ew & + + dxhy(i,j)*(-csigpse + csigmse) + dyhx(i,j)*csig12se + + ! southwest (i+1,j+1) + strtmp(i,j,4) = strp_tmp + strm_tmp + str12we & + + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw + + !----------------------------------------------------------------- + ! for dF/dy (v momentum) + !----------------------------------------------------------------- + strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) + strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) + + ! northeast (i,j) + strtmp(i,j,5) = -strp_tmp + strm_tmp - str12ns & + - dyhx(i,j)*(csigpne + csigmne) + dxhy(i,j)*csig12ne + + ! southeast (i,j+1) + strtmp(i,j,6) = strp_tmp - strm_tmp - str12sn & + - dyhx(i,j)*(csigpse + csigmse) + dxhy(i,j)*csig12se + + strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) + strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) + + ! northwest (i+1,j) + strtmp(i,j,7) = -strp_tmp + strm_tmp + str12ns & + - dyhx(i,j)*(csigpnw + csigmnw) + dxhy(i,j)*csig12nw + + ! southwest (i+1,j+1) + strtmp(i,j,8) = strp_tmp - strm_tmp + str12sn & + - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw + + enddo ! ij + + end subroutine stress_eap + +!======================================================================= + +! Updates the stress depending on values of strain rate and structure +! tensor and for ksub=ndte it computes closing and sliding rate + + subroutine update_stress_rdg (ksub, ndte, divu, tension, & + shear, a11, a12, & + stressp, stressm, & + stress12, strength, & + alphar, alphas) + + use ice_constants, only: c0, p025, p05, p1, p5, c1, c2, c12, puny, & + pi, pih, pi2, piq + + integer (kind=int_kind), intent(in) :: & + ksub, & + ndte + + real (kind=dbl_kind), intent(in) :: & + a11, a12, & + divu, tension, shear, & + strength + + real (kind=dbl_kind), intent(out) :: & + stressp, stressm, stress12, & + alphar, alphas + + ! local variables + + integer (kind=int_kind) :: & + kx ,ky, ka + + real (kind=dbl_kind) :: & + stemp11r, stemp12r, stemp22r, & + stemp11s, stemp12s, stemp22s, & + a22, Q11, Q12, Qd11, Qd12, & + Q11Q11, Q11Q12, Q12Q12, & + dtemp11, dtemp12, dtemp22, & + fxinvdx, fyinvdy, fainvda, & + mfxinvdx, mfyinvdy, mfainvda, & + fff, ffm, fmm, mmm, mmf, mff, fmf, mfm, & + rotstemp11r, rotstemp12r, rotstemp22r, & + rotstemp11s, rotstemp12s, rotstemp22s, & + sig11, sig12, sig22, & + sgprm11, sgprm12, sgprm22, & + invstressconviso, & + gamma, alpha, x, y, dx, dy, da, & + invdx, invdy, invda, invsin, & + invleng, dtemp1, dtemp2, atempprime, a + + real (kind=dbl_kind), parameter :: & + kfriction = 0.45_dbl_kind + +! Factor to maintain the same stress as in EVP (see Section 3) +! Can be set to 1 otherwise + invstressconviso = c1/(c1+kfriction*kfriction) + + invsin = c1/sin(pi2/c12) * invstressconviso + +! compute eigenvalues, eigenvectors and angles for structure tensor, strain rates + +! 1) structure tensor + + a22 = c1-a11 + +! gamma: angle between general coordiantes and principal axis + gamma = p5*atan2((c2*a12),(a11 - a22)) + +! rotational tensor from general coordinates into principal axis + Q11 = cos(gamma) + Q12 = sin(gamma) + + Q11Q11 = Q11*Q11 + Q11Q12 = Q11*Q12 + Q12Q12 = Q12*Q12 + +! rotation Q*atemp*Q^T + atempprime = Q11Q11*a11 + c2*Q11Q12*a12 + Q12Q12*a22 + +! make first principal value the largest + atempprime = max(atempprime, c1 - atempprime) + +! 2) strain rate + + dtemp11 = p5*(divu + tension) + dtemp12 = shear*p5 + dtemp22 = p5*(divu - tension) + +! alpha: angle between general coordiantes and principal axis + alpha = p5*atan2((c2*dtemp12),(dtemp11 - dtemp22)) + +! y: angle between major principal axis of strain rate and structure tensor +! to make sure y between 0 and pi/2 + if (alpha > gamma) alpha = alpha - pi + if (alpha < gamma-pi) alpha = alpha + pi + y = gamma - alpha +!echmod require 0 <= y < (ny_yield-1)*dy = pi/2 +! y = mod(y+pi2, pih) +!echmod require 0 <= y < (ny_yield-1)*dy = pi +! y = mod(y+pi2, pi) +! alpha = gamma - y + +! rotate tensor (anticlockwise) from general coordinates into principal axis + Qd11 = cos(alpha) + Qd12 = sin(alpha) + + dtemp1 = Qd11*(Qd11*dtemp11 + c2*Qd12*dtemp12) + Qd12*Qd12*dtemp22 + dtemp2 = Qd12*(Qd12*dtemp11 - c2*Qd11*dtemp12) + Qd11*Qd11*dtemp22 + x = c0 + +! In cos and sin values + if ((ABS(dtemp1) > puny).or.(ABS(dtemp2) > puny)) then + invleng = c1/sqrt(dtemp1*dtemp1 + dtemp2*dtemp2) + dtemp1 = dtemp1*invleng + dtemp2 = dtemp2*invleng + x = atan2(dtemp2,dtemp1) + endif + +!echmod to ensure the angle lies between pi/4 and 9 pi/4 + if (x < piq) x = x + pi2 +!echmod require 0 <= x < (nx_yield-1)*dx = 2 pi +! x = mod(x+pi2, pi2) + +! Now calculate updated stress tensor + dx = pi/real(nx_yield-1,kind=dbl_kind) + dy = pi/real(ny_yield-1,kind=dbl_kind) + da = p5/real(na_yield-1,kind=dbl_kind) + invdx = c1/dx + invdy = c1/dy + invda = c1/da + + kx = int((x-piq-pi)*invdx) + 1 + ky = int(y*invdy) + 1 + ka = int((atempprime-p5)*invda) + 1 + +! Determine sigma_r(A1,Zeta,y) and sigma_s (see Section A1) + stemp11r = s11r(kx,ky,ka) + stemp12r = s12r(kx,ky,ka) + stemp22r = s22r(kx,ky,ka) + + stemp11s = s11s(kx,ky,ka) + stemp12s = s12s(kx,ky,ka) + stemp22s = s22s(kx,ky,ka) + +! Calculate mean ice stress over a collection of floes (Equation 3) + + stressp = strength*(stemp11r + kfriction*stemp11s & + + stemp22r + kfriction*stemp22s) * invsin + stress12 = strength*(stemp12r + kfriction*stemp12s) * invsin + stressm = strength*(stemp11r + kfriction*stemp11s & + - stemp22r - kfriction*stemp22s) * invsin + +! Back - rotation of the stress from principal axes into general coordinates + +! Update stress + sig11 = p5*(stressp + stressm) + sig12 = stress12 + sig22 = p5*(stressp - stressm) + + sgprm11 = Q11Q11*sig11 + Q12Q12*sig22 - c2*Q11Q12 *sig12 + sgprm12 = Q11Q12*sig11 - Q11Q12*sig22 + (Q11Q11 - Q12Q12)*sig12 + sgprm22 = Q12Q12*sig11 + Q11Q11*sig22 + c2*Q11Q12 *sig12 + + stressp = sgprm11 + sgprm22 + stress12 = sgprm12 + stressm = sgprm11 - sgprm22 + +! Compute ridging and sliding functions in general coordinates (Equation 11) + if (ksub == ndte) then + rotstemp11r = Q11Q11*stemp11r - c2*Q11Q12* stemp12r & + + Q12Q12*stemp22r + rotstemp12r = Q11Q11*stemp12r + Q11Q12*(stemp11r-stemp22r) & + - Q12Q12*stemp12r + rotstemp22r = Q12Q12*stemp11r + c2*Q11Q12* stemp12r & + + Q11Q11*stemp22r + + rotstemp11s = Q11Q11*stemp11s - c2*Q11Q12* stemp12s & + + Q12Q12*stemp22s + rotstemp12s = Q11Q11*stemp12s + Q11Q12*(stemp11s-stemp22s) & + - Q12Q12*stemp12s + rotstemp22s = Q12Q12*stemp11s + c2*Q11Q12* stemp12s & + + Q11Q11*stemp22s + + alphar = rotstemp11r*dtemp11 + c2*rotstemp12r*dtemp12 & + + rotstemp22r*dtemp22 + alphas = rotstemp11s*dtemp11 + c2*rotstemp12s*dtemp12 & + + rotstemp22s*dtemp22 + endif + + end subroutine update_stress_rdg + +!======================================================================= + +! Solves evolution equation for structure tensor (A19, A20) + + subroutine stepa (nx_block, ny_block, & + dtei, icellt, & + indxti, indxtj, & + a11, a12, & + a11_1, a11_2, a11_3, a11_4, & + a12_1, a12_2, a12_3, a12_4, & + stressp_1, stressp_2, & + stressp_3, stressp_4, & + stressm_1, stressm_2, & + stressm_3, stressm_4, & + stress12_1, stress12_2, & + stress12_3, stress12_4) + + use ice_constants, only: p001, p2, p25, p5, c1 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt ! no. of cells where icetmask = 1 + + real (kind=dbl_kind), intent(in) :: & + dtei ! 1/dte, where dte is subcycling timestep (1/s) + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + ! ice stress tensor (kg/s^2) in each corner of T cell + stressp_1, stressp_2, stressp_3, stressp_4, & ! sigma11+sigma22 + stressm_1, stressm_2, stressm_3, stressm_4, & ! sigma11-sigma22 + stress12_1, stress12_2, stress12_3, stress12_4 ! sigma12 + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + ! structure tensor () in each corner of T cell + a11, a12, a11_1, a11_2, a11_3, a11_4, & ! components of + a12_1, a12_2, a12_3, a12_4 ! structure tensor () + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + mresult11, mresult12, & + dteikth, p5kth + + real (kind=dbl_kind), parameter :: & + kth = p2*p001 + + dteikth = c1 / (dtei + kth) + p5kth = p5 * kth + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + +! ne + call calc_ffrac(1, stressp_1(i,j), stressm_1(i,j), & + stress12_1(i,j), & + a11_1(i,j), & + mresult11) + + call calc_ffrac(2, stressp_1(i,j), stressm_1(i,j), & + stress12_1(i,j), & + a12_1(i,j), & + mresult12) + + a11_1(i,j) = (a11_1(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit + a12_1(i,j) = (a12_1(i,j)*dtei - mresult12) * dteikth ! implicit + + +! nw + call calc_ffrac(1, stressp_2(i,j), stressm_2(i,j), & + stress12_2(i,j), & + a11_2(i,j), & + mresult11) + + call calc_ffrac(2, stressp_2(i,j), stressm_2(i,j), & + stress12_2(i,j), & + a12_2(i,j), & + mresult12) + + a11_2(i,j) = (a11_2(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit + a12_2(i,j) = (a12_2(i,j)*dtei - mresult12) * dteikth ! implicit + +! sw + call calc_ffrac(1, stressp_3(i,j), stressm_3(i,j), & + stress12_3(i,j), & + a11_3(i,j), & + mresult11) + + call calc_ffrac(2, stressp_3(i,j), stressm_3(i,j), & + stress12_3(i,j), & + a12_3(i,j), & + mresult12) + + a11_3(i,j) = (a11_3(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit + a12_3(i,j) = (a12_3(i,j)*dtei - mresult12) * dteikth ! implicit + +! se + call calc_ffrac(1, stressp_4(i,j), stressm_4(i,j), & + stress12_4(i,j), & + a11_4(i,j), & + mresult11) + + call calc_ffrac(2, stressp_4(i,j), stressm_4(i,j), & + stress12_4(i,j), & + a12_4(i,j), & + mresult12) + + a11_4(i,j) = (a11_4(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit + a12_4(i,j) = (a12_4(i,j)*dtei - mresult12) * dteikth ! implicit + +! average structure tensor + + a11(i,j) = p25*(a11_1(i,j) + a11_2(i,j) + a11_3(i,j) + a11_4(i,j)) + a12(i,j) = p25*(a12_1(i,j) + a12_2(i,j) + a12_3(i,j) + a12_4(i,j)) + + enddo ! ij + + end subroutine stepa + +!======================================================================= + +! computes term in evolution equation for structure tensor which determines +! the ice floe re-orientation due to fracture +! Eq. 7: Ffrac = -kf(A-S) or = 0 depending on sigma_1 and sigma_2 + + subroutine calc_ffrac (blockno, stressp, stressm, & + stress12, & + a1x, & + mresult) + + use ice_constants, only: c0, p001, p1, p5, c2, c3 + + integer(kind=int_kind), intent(in) :: & + blockno + + real (kind=dbl_kind), intent(in) :: & + stressp, stressm, stress12, a1x + + real (kind=dbl_kind), intent(out) :: & + mresult + + ! local variables + + real (kind=dbl_kind) :: & + sigma11, sigma12, sigma22, & + gamma, sigma_1, sigma_2, & + Q11, Q12, Q11Q11, Q11Q12, Q12Q12 + + real (kind=dbl_kind), parameter :: & + kfrac = p001, & + threshold = c3*p1 + + sigma11 = p5*(stressp+stressm) + sigma12 = stress12 + sigma22 = p5*(stressp-stressm) + + gamma = p5*atan2((c2*sigma12),(sigma11-sigma22)) + +! rotate tensor to get into sigma principal axis + + Q11 = cos(gamma) + Q12 = sin(gamma) + + Q11Q11 = Q11*Q11 + Q11Q12 = Q11*Q12 + Q12Q12 = Q12*Q12 + + sigma_1 = Q11Q11*sigma11 + c2*Q11Q12*sigma12 & + + Q12Q12*sigma22 ! S(1,1) + sigma_2 = Q12Q12*sigma11 - c2*Q11Q12*sigma12 & + + Q11Q11*sigma22 ! S(2,2) + +! Pure divergence + if ((sigma_1 >= c0).and.(sigma_2 >= c0)) then + mresult = c0 + +! Unconfined compression: cracking of blocks not along the axial splitting direction +! which leads to the loss of their shape, so we again model it through diffusion + elseif ((sigma_1 >= c0).and.(sigma_2 < c0)) then + if (blockno == 1) mresult = kfrac * (a1x - Q12Q12) + if (blockno == 2) mresult = kfrac * (a1x + Q11Q12) + +! Shear faulting + elseif (sigma_2 == c0) then + mresult = c0 + elseif ((sigma_1 <= c0).and.(sigma_1/sigma_2 <= threshold)) then + if (blockno == 1) mresult = kfrac * (a1x - Q12Q12) + if (blockno == 2) mresult = kfrac * (a1x + Q11Q12) + +! Horizontal spalling + else + mresult = c0 + endif + + end subroutine calc_ffrac + +!======================================================================= +!---! these subroutines write/read Fortran unformatted data files .. +!======================================================================= + +! Dumps all values needed for a restart + + subroutine write_restart_eap () + + use ice_fileunits, only: nu_diag, nu_dump_eap + use ice_restart, only: write_restart_field + + ! local variables + + integer (kind=int_kind) :: & + iyear, imonth, iday ! year, month, day + + character(len=char_len_long) :: filename + + logical (kind=log_kind) :: diag + + diag = .true. + + !----------------------------------------------------------------- + ! structure tensor + !----------------------------------------------------------------- + + call write_restart_field(nu_dump_eap,0,a11_1,'ruf8','a11_1',1,diag) + call write_restart_field(nu_dump_eap,0,a11_3,'ruf8','a11_3',1,diag) + call write_restart_field(nu_dump_eap,0,a11_2,'ruf8','a11_2',1,diag) + call write_restart_field(nu_dump_eap,0,a11_4,'ruf8','a11_4',1,diag) + + call write_restart_field(nu_dump_eap,0,a12_1,'ruf8','a12_1',1,diag) + call write_restart_field(nu_dump_eap,0,a12_3,'ruf8','a12_3',1,diag) + call write_restart_field(nu_dump_eap,0,a12_2,'ruf8','a12_2',1,diag) + call write_restart_field(nu_dump_eap,0,a12_4,'ruf8','a12_4',1,diag) + + end subroutine write_restart_eap + +!======================================================================= + +! Reads all values needed for elastic anisotropic plastic dynamics restart + + subroutine read_restart_eap() + + use ice_blocks, only: nghost + use ice_boundary, only: ice_HaloUpdate_stress + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, & + field_loc_center, field_type_scalar + use ice_domain, only: nblocks, halo_info + use ice_fileunits, only: nu_diag, nu_restart_eap + use ice_grid, only: grid_type + use ice_restart, only: read_restart_field + use ice_restart_shared, only: restart_format + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk + + logical (kind=log_kind) :: & + diag + + diag = .true. + + !----------------------------------------------------------------- + ! Structure tensor must be read and scattered in pairs in order + ! to properly match corner values across a tripole grid cut. + !----------------------------------------------------------------- + if (my_task == master_task) write(nu_diag,*) & + 'structure tensor restart data' + + call read_restart_field(nu_restart_eap,0,a11_1,'ruf8', & + 'a11_1',1,diag,field_loc_center,field_type_scalar) ! a11_1 + call read_restart_field(nu_restart_eap,0,a11_3,'ruf8', & + 'a11_3',1,diag,field_loc_center,field_type_scalar) ! a11_3 + call read_restart_field(nu_restart_eap,0,a11_2,'ruf8', & + 'a11_2',1,diag,field_loc_center,field_type_scalar) ! a11_2 + call read_restart_field(nu_restart_eap,0,a11_4,'ruf8', & + 'a11_4',1,diag,field_loc_center,field_type_scalar) ! a11_4 + + call read_restart_field(nu_restart_eap,0,a12_1,'ruf8', & + 'a12_1',1,diag,field_loc_center,field_type_scalar) ! a12_1 + call read_restart_field(nu_restart_eap,0,a12_3,'ruf8', & + 'a12_3',1,diag,field_loc_center,field_type_scalar) ! a12_3 + call read_restart_field(nu_restart_eap,0,a12_2,'ruf8', & + 'a12_2',1,diag,field_loc_center,field_type_scalar) ! a12_2 + call read_restart_field(nu_restart_eap,0,a12_4,'ruf8', & + 'a12_4',1,diag,field_loc_center,field_type_scalar) ! a12_4 + + if (trim(grid_type) == 'tripole' .and. trim(restart_format) == 'pio') then + + call ice_HaloUpdate_stress(a11_1, a11_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_3, a11_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_2, a11_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_4, a11_2, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_1, a12_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_3, a12_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_2, a12_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_4, a12_2, halo_info, & + field_loc_center, field_type_scalar) + + endif + + !----------------------------------------------------------------- + ! Ensure unused values in west and south ghost cells are 0 + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, nghost + do i = 1, nx_block + a11_1 (i,j,iblk) = c0 + a11_2 (i,j,iblk) = c0 + a11_3 (i,j,iblk) = c0 + a11_4 (i,j,iblk) = c0 + a12_1 (i,j,iblk) = c0 + a12_2 (i,j,iblk) = c0 + a12_3 (i,j,iblk) = c0 + a12_4 (i,j,iblk) = c0 + enddo + enddo + do j = 1, ny_block + do i = 1, nghost + a11_1 (i,j,iblk) = c0 + a11_2 (i,j,iblk) = c0 + a11_3 (i,j,iblk) = c0 + a11_4 (i,j,iblk) = c0 + a12_1 (i,j,iblk) = c0 + a12_2 (i,j,iblk) = c0 + a12_3 (i,j,iblk) = c0 + a12_4 (i,j,iblk) = c0 + enddo + enddo + enddo + !$OMP END PARALLEL DO + + end subroutine read_restart_eap + +!======================================================================= + + end module ice_dyn_eap + +!======================================================================= diff --git a/source/ice_dyn_evp.F90 b/source/ice_dyn_evp.F90 new file mode 100755 index 00000000..916a074c --- /dev/null +++ b/source/ice_dyn_evp.F90 @@ -0,0 +1,848 @@ +! SVN:$Id: ice_dyn_evp.F90 843 2014-10-02 19:54:30Z eclare $ +!======================================================================= +! +! Elastic-viscous-plastic sea ice dynamics model +! Computes ice velocity and deformation +! +! See: +! +! Hunke, E. C., and J. K. Dukowicz (1997). An elastic-viscous-plastic model +! for sea ice dynamics. {\em J. Phys. Oceanogr.}, {\bf 27}, 1849--1867. +! +! Hunke, E. C. (2001). Viscous-Plastic Sea Ice Dynamics with the EVP Model: +! Linearization Issues. {\em Journal of Computational Physics}, {\bf 170}, +! 18--38. +! +! Hunke, E. C., and J. K. Dukowicz (2002). The Elastic-Viscous-Plastic +! Sea Ice Dynamics Model in General Orthogonal Curvilinear Coordinates +! on a Sphere---Incorporation of Metric Terms. {\em Monthly Weather Review}, +! {\bf 130}, 1848--1865. +! +! Hunke, E. C., and J. K. Dukowicz (2003). The sea ice momentum +! equation in the free drift regime. Los Alamos Tech. Rep. LA-UR-03-2219. +! +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (submitted 2013). The +! revised elastic-viscous-plastic method. Ocean Modelling. +! +! author: Elizabeth C. Hunke, LANL +! +! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb (LANL) +! 2004: Block structure added by William Lipscomb +! 2005: Removed boundary calls for stress arrays (WHL) +! 2006: Streamlined for efficiency by Elizabeth Hunke +! Converted to free source form (F90) + + module ice_dyn_evp + + use ice_kinds_mod + use ice_dyn_shared ! everything + +#ifdef AusCOM + use cpl_parameters !, only : use_ocnslope + use cpl_arrays_setup, only : sicemass +#endif + + implicit none + private + public :: evp + save + +!======================================================================= + + contains + +!======================================================================= + +! Elastic-viscous-plastic dynamics driver +! +#ifdef CICE_IN_NEMO +! Wind stress is set during this routine from the values supplied +! via NEMO (unless calc_strair is true). These values are supplied +! rotated on u grid and multiplied by aice. strairxT = 0 in this +! case so operations in evp_prep1 are pointless but carried out to +! minimise code changes. +#endif +! +! author: Elizabeth C. Hunke, LANL + + subroutine evp (dt) + + use ice_atmo, only: Cdn_ocn + use ice_boundary, only: ice_halo, ice_HaloMask, ice_HaloUpdate, & + ice_HaloDestroy, ice_HaloUpdate_stress + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_constants, only: field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_vector, c0 + use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_dyn + use ice_domain_size, only: max_blocks + use ice_flux, only: rdg_conv, rdg_shear, prs_sig, strairxT, strairyT, & + strairx, strairy, uocn, vocn, ss_tltx, ss_tlty, iceumask, fm, & + strtltx, strtlty, strocnx, strocny, strintx, strinty, & + strocnxT, strocnyT, strax, stray, & + stressp_1, stressp_2, stressp_3, stressp_4, & + stressm_1, stressm_2, stressm_3, stressm_4, & + stress12_1, stress12_2, stress12_3, stress12_4 + use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & + tarear, uarear, tinyarea, to_ugrid, t2ugrid_vector, u2tgrid_vector, & + grid_type + use ice_mechred, only: ice_strength + use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & + aice_init, aice0, aicen, vicen, strength + use ice_timers, only: timer_dynamics, timer_bound, & + ice_timer_start, ice_timer_stop +!ars599: 22042015: not quite sure about this part do we need +#ifdef ACCESS +!#ifdef CICE_IN_NEMO + use ice_atmo, only: calc_strair +#endif + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + ksub , & ! subcycle step + iblk , & ! block index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + i, j + + integer (kind=int_kind), dimension(max_blocks) :: & + icellt , & ! no. of cells where icetmask = 1 + icellu ! no. of cells where iceumask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & + indxti , & ! compressed index in i-direction + indxtj , & ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + tmass , & ! total mass of ice and snow (kg/m^2) + waterx , & ! for ocean stress calculation, x (m/s) + watery , & ! for ocean stress calculation, y (m/s) + forcex , & ! work array: combined atm stress and ocn tilt, x + forcey , & ! work array: combined atm stress and ocn tilt, y + aiu , & ! ice fraction on u-grid + umass , & ! total mass of ice and snow (u grid) + umassdti ! mass of U-cell/dte (kg/m^2 s) + + real (kind=dbl_kind), allocatable :: fld2(:,:,:,:) + + real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & + strtmp ! stress combinations for momentum equation + + integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & + icetmask, & ! ice extent mask (T-cell) + halomask ! generic halo mask + + type (ice_halo) :: & + halo_info_mask ! ghost cell update info for masked halo + + type (block) :: & + this_block ! block information for current block + + call ice_timer_start(timer_dynamics) ! dynamics + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + allocate(fld2(nx_block,ny_block,2,max_blocks)) + + ! This call is needed only if dt changes during runtime. +! call set_evp_parameters (dt) + + !----------------------------------------------------------------- + ! boundary updates + ! commented out because the ghost cells are freshly + ! updated after cleanup_itd + !----------------------------------------------------------------- + +! call ice_timer_start(timer_bound) +! call ice_HaloUpdate (aice, halo_info, & +! field_loc_center, field_type_scalar) +! call ice_HaloUpdate (vice, halo_info, & +! field_loc_center, field_type_scalar) +! call ice_HaloUpdate (vsno, halo_info, & +! field_loc_center, field_type_scalar) +! call ice_timer_stop(timer_bound) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + + do j = 1, ny_block + do i = 1, nx_block + rdg_conv (i,j,iblk) = c0 + rdg_shear(i,j,iblk) = c0 + divu (i,j,iblk) = c0 + shear(i,j,iblk) = c0 + prs_sig(i,j,iblk) = c0 + enddo + enddo + + !----------------------------------------------------------------- + ! preparation for dynamics + !----------------------------------------------------------------- + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + call evp_prep1 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + aice (:,:,iblk), vice (:,:,iblk), & + vsno (:,:,iblk), tmask (:,:,iblk), & + strairxT(:,:,iblk), strairyT(:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + tmass (:,:,iblk), icetmask(:,:,iblk)) + + enddo ! iblk + !$OMP END PARALLEL DO + +#ifdef AusCOM + sicemass(:,:,:) = tmass(:,:,:) +#endif + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (icetmask, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + !----------------------------------------------------------------- + ! convert fields from T to U grid + !----------------------------------------------------------------- + + call to_ugrid(tmass,umass) + call to_ugrid(aice_init, aiu) +#ifdef ACCESS +!#ifdef CICE_IN_NEMO + !---------------------------------------------------------------- + ! Set wind stress to values supplied via NEMO + ! This wind stress is rotated on u grid and multiplied by aice + !---------------------------------------------------------------- + if (.not. calc_strair) then + strairx(:,:,:) = strax(:,:,:) + strairy(:,:,:) = stray(:,:,:) + else +!#endif +!ars599: 22042015: not quite sure about this part do we need +! the second part call t2ugrid_vector if not calling ACCESS?? +!#ifdef ACCESS + !This wind stress is on T grid and multiplied by aice (in get_sbc_ice) + !in T or U grid? (strax is from file) + ! calc_strair in ACCESS ice_in is also F +! strairx = strax +! strairy = stray +#endif + call t2ugrid_vector(strairx) + call t2ugrid_vector(strairy) +#ifdef ACCESS +!#ifdef CICE_IN_NEMO + endif +#endif + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! more preparation for dynamics + !----------------------------------------------------------------- + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + call evp_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt(iblk), icellu(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), umass (:,:,iblk), & + umassdti (:,:,iblk), fcor_blk (:,:,iblk), & + umask (:,:,iblk), & + uocn (:,:,iblk), vocn (:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & + icetmask (:,:,iblk), iceumask (:,:,iblk), & + fm (:,:,iblk), dt, & + strtltx (:,:,iblk), strtlty (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvel_init (:,:,iblk), vvel_init (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk)) + + !----------------------------------------------------------------- + ! ice strength + !----------------------------------------------------------------- + + call ice_strength (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt(iblk), & + indxti (:,iblk), & + indxtj (:,iblk), & + aice (:,:, iblk), & + vice (:,:, iblk), & + aice0 (:,:, iblk), & + aicen (:,:,:,iblk), & + vicen (:,:,:,iblk), & + strength(:,:, iblk) ) + + ! load velocity into array for boundary updates + fld2(:,:,1,iblk) = uvel(:,:,iblk) + fld2(:,:,2,iblk) = vvel(:,:,iblk) + + enddo ! iblk + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (strength, halo_info, & + field_loc_center, field_type_scalar) + ! velocities may have changed in evp_prep2 + call ice_HaloUpdate (fld2, halo_info, & + field_loc_NEcorner, field_type_vector) + + ! unload + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + uvel(:,:,iblk) = fld2(:,:,1,iblk) + vvel(:,:,iblk) = fld2(:,:,2,iblk) + enddo + !$OMP END PARALLEL DO + + if (maskhalo_dyn) & + call ice_HaloMask(halo_info_mask, halo_info, icetmask) + call ice_timer_stop(timer_bound) + + do ksub = 1,ndte ! subcycling + + !----------------------------------------------------------------- + ! stress tensor equation, total surface stress + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,strtmp) + do iblk = 1, nblocks + +! if (trim(yield_curve) == 'ellipse') then + call stress (nx_block, ny_block, & + ksub, icellt(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxt (:,:,iblk), dyt (:,:,iblk), & + dxhy (:,:,iblk), dyhx (:,:,iblk), & + cxp (:,:,iblk), cyp (:,:,iblk), & + cxm (:,:,iblk), cym (:,:,iblk), & + tarear (:,:,iblk), tinyarea (:,:,iblk), & + strength (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + prs_sig (:,:,iblk), & + rdg_conv (:,:,iblk), rdg_shear (:,:,iblk), & + strtmp (:,:,:) ) +! endif ! yield_curve + + !----------------------------------------------------------------- + ! momentum equation + !----------------------------------------------------------------- + + call stepu (nx_block, ny_block, & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), strtmp (:,:,:), & + uocn (:,:,iblk), vocn (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + umassdti (:,:,iblk), fm (:,:,iblk), & + uarear (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + uvel_init(:,:,iblk), vvel_init(:,:,iblk),& + uvel (:,:,iblk), vvel (:,:,iblk)) + + ! load velocity into array for boundary updates + fld2(:,:,1,iblk) = uvel(:,:,iblk) + fld2(:,:,2,iblk) = vvel(:,:,iblk) + enddo + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + if (maskhalo_dyn) then + call ice_HaloUpdate (fld2, halo_info_mask, & + field_loc_NEcorner, field_type_vector) + else + call ice_HaloUpdate (fld2, halo_info, & + field_loc_NEcorner, field_type_vector) + endif + + ! unload + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + uvel(:,:,iblk) = fld2(:,:,1,iblk) + vvel(:,:,iblk) = fld2(:,:,2,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bound) + + enddo ! subcycling + + deallocate(fld2) + if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) + + ! Force symmetry across the tripole seam + if (trim(grid_type) == 'tripole') then + if (maskhalo_dyn) then + !------------------------------------------------------- + ! set halomask to zero because ice_HaloMask always keeps + ! local copies AND tripole zipper communication + !------------------------------------------------------- + halomask = 0 + call ice_HaloMask(halo_info_mask, halo_info, halomask) + + call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_3, stressp_1, halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_2, stressp_4, halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_4, stressp_2, halo_info_mask, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stressm_1, stressm_3, halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_3, stressm_1, halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_2, stressm_4, halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_4, stressm_2, halo_info_mask, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info_mask, & + field_loc_center, field_type_scalar) + + call ice_HaloDestroy(halo_info_mask) + else + call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_3, stressp_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_2, stressp_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_4, stressp_2, halo_info, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stressm_1, stressm_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_3, stressm_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_2, stressm_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_4, stressm_2, halo_info, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & + field_loc_center, field_type_scalar) + endif ! maskhalo + endif ! tripole + + !----------------------------------------------------------------- + ! ice-ocean stress + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call evp_finish & + (nx_block, ny_block, & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + uocn (:,:,iblk), vocn (:,:,iblk), & + aiu (:,:,iblk), fm (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strocnxT(:,:,iblk), strocnyT(:,:,iblk)) + + enddo + !$OMP END PARALLEL DO + + call u2tgrid_vector(strocnxT) ! shift + call u2tgrid_vector(strocnyT) + + call ice_timer_stop(timer_dynamics) ! dynamics + + end subroutine evp + +!======================================================================= + +! Computes the rates of strain and internal stress components for +! each of the four corners on each T-grid cell. +! Computes stress terms for the momentum equation +! +! author: Elizabeth C. Hunke, LANL + + subroutine stress (nx_block, ny_block, & + ksub, icellt, & + indxti, indxtj, & + uvel, vvel, & + dxt, dyt, & + dxhy, dyhx, & + cxp, cyp, & + cxm, cym, & + tarear, tinyarea, & + strength, & + stressp_1, stressp_2, & + stressp_3, stressp_4, & + stressm_1, stressm_2, & + stressm_3, stressm_4, & + stress12_1, stress12_2, & + stress12_3, stress12_4, & + shear, divu, & + prs_sig, & + rdg_conv, rdg_shear, & + str ) + + use ice_constants, only: c0, c4, p027, p055, p111, p166, & + p2, p222, p25, p333, p5, puny + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ksub , & ! subcycling step + icellt ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + strength , & ! ice strength (N/m) + uvel , & ! x-component of velocity (m/s) + vvel , & ! y-component of velocity (m/s) + dxt , & ! width of T-cell through the middle (m) + dyt , & ! height of T-cell through the middle (m) + dxhy , & ! 0.5*(HTE - HTE) + dyhx , & ! 0.5*(HTN - HTN) + cyp , & ! 1.5*HTE - 0.5*HTE + cxp , & ! 1.5*HTN - 0.5*HTN + cym , & ! 0.5*HTE - 1.5*HTE + cxm , & ! 0.5*HTN - 1.5*HTN + tarear , & ! 1/tarea + tinyarea ! puny*tarea + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 + stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 + stress12_1,stress12_2,stress12_3,stress12_4 ! sigma12 + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + prs_sig , & ! replacement pressure, for stress calc + shear , & ! strain rate II component (1/s) + divu , & ! strain rate I component, velocity divergence (1/s) + rdg_conv , & ! convergence term for ridging (1/s) + rdg_shear ! shear term for ridging (1/s) + + real (kind=dbl_kind), dimension(nx_block,ny_block,8), & + intent(out) :: & + str ! stress combinations + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + divune, divunw, divuse, divusw , & ! divergence + tensionne, tensionnw, tensionse, tensionsw, & ! tension + shearne, shearnw, shearse, shearsw , & ! shearing + Deltane, Deltanw, Deltase, Deltasw , & ! Delt + c0ne, c0nw, c0se, c0sw , & ! useful combinations + c1ne, c1nw, c1se, c1sw , & + ssigpn, ssigps, ssigpe, ssigpw , & + ssigmn, ssigms, ssigme, ssigmw , & + ssig12n, ssig12s, ssig12e, ssig12w , & + ssigp1, ssigp2, ssigm1, ssigm2, ssig121, ssig122, & + csigpne, csigpnw, csigpse, csigpsw , & + csigmne, csigmnw, csigmse, csigmsw , & + csig12ne, csig12nw, csig12se, csig12sw , & + str12ew, str12we, str12ns, str12sn , & + strp_tmp, strm_tmp, tmp + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + str(:,:,:) = c0 + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + ! divergence = e_11 + e_22 + divune = cyp(i,j)*uvel(i ,j ) - dyt(i,j)*uvel(i-1,j ) & + + cxp(i,j)*vvel(i ,j ) - dxt(i,j)*vvel(i ,j-1) + divunw = cym(i,j)*uvel(i-1,j ) + dyt(i,j)*uvel(i ,j ) & + + cxp(i,j)*vvel(i-1,j ) - dxt(i,j)*vvel(i-1,j-1) + divusw = cym(i,j)*uvel(i-1,j-1) + dyt(i,j)*uvel(i ,j-1) & + + cxm(i,j)*vvel(i-1,j-1) + dxt(i,j)*vvel(i-1,j ) + divuse = cyp(i,j)*uvel(i ,j-1) - dyt(i,j)*uvel(i-1,j-1) & + + cxm(i,j)*vvel(i ,j-1) + dxt(i,j)*vvel(i ,j ) + + ! tension strain rate = e_11 - e_22 + tensionne = -cym(i,j)*uvel(i ,j ) - dyt(i,j)*uvel(i-1,j ) & + + cxm(i,j)*vvel(i ,j ) + dxt(i,j)*vvel(i ,j-1) + tensionnw = -cyp(i,j)*uvel(i-1,j ) + dyt(i,j)*uvel(i ,j ) & + + cxm(i,j)*vvel(i-1,j ) + dxt(i,j)*vvel(i-1,j-1) + tensionsw = -cyp(i,j)*uvel(i-1,j-1) + dyt(i,j)*uvel(i ,j-1) & + + cxp(i,j)*vvel(i-1,j-1) - dxt(i,j)*vvel(i-1,j ) + tensionse = -cym(i,j)*uvel(i ,j-1) - dyt(i,j)*uvel(i-1,j-1) & + + cxp(i,j)*vvel(i ,j-1) - dxt(i,j)*vvel(i ,j ) + + ! shearing strain rate = e_12 + shearne = -cym(i,j)*vvel(i ,j ) - dyt(i,j)*vvel(i-1,j ) & + - cxm(i,j)*uvel(i ,j ) - dxt(i,j)*uvel(i ,j-1) + shearnw = -cyp(i,j)*vvel(i-1,j ) + dyt(i,j)*vvel(i ,j ) & + - cxm(i,j)*uvel(i-1,j ) - dxt(i,j)*uvel(i-1,j-1) + shearsw = -cyp(i,j)*vvel(i-1,j-1) + dyt(i,j)*vvel(i ,j-1) & + - cxp(i,j)*uvel(i-1,j-1) + dxt(i,j)*uvel(i-1,j ) + shearse = -cym(i,j)*vvel(i ,j-1) - dyt(i,j)*vvel(i-1,j-1) & + - cxp(i,j)*uvel(i ,j-1) + dxt(i,j)*uvel(i ,j ) + + ! Delta (in the denominator of zeta, eta) + Deltane = sqrt(divune**2 + ecci*(tensionne**2 + shearne**2)) + Deltanw = sqrt(divunw**2 + ecci*(tensionnw**2 + shearnw**2)) + Deltase = sqrt(divuse**2 + ecci*(tensionse**2 + shearse**2)) + Deltasw = sqrt(divusw**2 + ecci*(tensionsw**2 + shearsw**2)) + + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then + divu(i,j) = p25*(divune + divunw + divuse + divusw) * tarear(i,j) + tmp = p25*(Deltane + Deltanw + Deltase + Deltasw) * tarear(i,j) + rdg_conv(i,j) = -min(divu(i,j),c0) + rdg_shear(i,j) = p5*(tmp-abs(divu(i,j))) + + ! diagnostic only + ! shear = sqrt(tension**2 + shearing**2) + shear(i,j) = p25*tarear(i,j)*sqrt( & + (tensionne + tensionnw + tensionse + tensionsw)**2 & + + (shearne + shearnw + shearse + shearsw)**2) + + endif + + !----------------------------------------------------------------- + ! replacement pressure/Delta ! kg/s + ! save replacement pressure for principal stress calculation + !----------------------------------------------------------------- + c0ne = strength(i,j)/max(Deltane,tinyarea(i,j)) + c0nw = strength(i,j)/max(Deltanw,tinyarea(i,j)) + c0sw = strength(i,j)/max(Deltasw,tinyarea(i,j)) + c0se = strength(i,j)/max(Deltase,tinyarea(i,j)) + prs_sig(i,j) = c0ne*Deltane ! northeast + + c1ne = c0ne*arlx1i + c1nw = c0nw*arlx1i + c1sw = c0sw*arlx1i + c1se = c0se*arlx1i + + c0ne = c1ne*ecci + c0nw = c1nw*ecci + c0sw = c1sw*ecci + c0se = c1se*ecci + + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + ! (1) northeast, (2) northwest, (3) southwest, (4) southeast + !----------------------------------------------------------------- + + stressp_1(i,j) = (stressp_1(i,j) + c1ne*(divune - Deltane)) & + * denom1 + stressp_2(i,j) = (stressp_2(i,j) + c1nw*(divunw - Deltanw)) & + * denom1 + stressp_3(i,j) = (stressp_3(i,j) + c1sw*(divusw - Deltasw)) & + * denom1 + stressp_4(i,j) = (stressp_4(i,j) + c1se*(divuse - Deltase)) & + * denom1 + + stressm_1(i,j) = (stressm_1(i,j) + c0ne*tensionne) * denom1 + stressm_2(i,j) = (stressm_2(i,j) + c0nw*tensionnw) * denom1 + stressm_3(i,j) = (stressm_3(i,j) + c0sw*tensionsw) * denom1 + stressm_4(i,j) = (stressm_4(i,j) + c0se*tensionse) * denom1 + + stress12_1(i,j) = (stress12_1(i,j) + c0ne*shearne*p5) * denom1 + stress12_2(i,j) = (stress12_2(i,j) + c0nw*shearnw*p5) * denom1 + stress12_3(i,j) = (stress12_3(i,j) + c0sw*shearsw*p5) * denom1 + stress12_4(i,j) = (stress12_4(i,j) + c0se*shearse*p5) * denom1 + + !----------------------------------------------------------------- + ! Eliminate underflows. + ! The following code is commented out because it is relatively + ! expensive and most compilers include a flag that accomplishes + ! the same thing more efficiently. This code is cheaper than + ! handling underflows if the compiler lacks a flag; uncomment + ! it in that case. The compiler flag is often described with the + ! phrase "flush to zero". + !----------------------------------------------------------------- + +! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) +! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) +! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) +! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) + +! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) +! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) +! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) +! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) + +! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) +! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) +! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) +! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) + + !----------------------------------------------------------------- + ! combinations of the stresses for the momentum equation ! kg/s^2 + !----------------------------------------------------------------- + + ssigpn = stressp_1(i,j) + stressp_2(i,j) + ssigps = stressp_3(i,j) + stressp_4(i,j) + ssigpe = stressp_1(i,j) + stressp_4(i,j) + ssigpw = stressp_2(i,j) + stressp_3(i,j) + ssigp1 =(stressp_1(i,j) + stressp_3(i,j))*p055 + ssigp2 =(stressp_2(i,j) + stressp_4(i,j))*p055 + + ssigmn = stressm_1(i,j) + stressm_2(i,j) + ssigms = stressm_3(i,j) + stressm_4(i,j) + ssigme = stressm_1(i,j) + stressm_4(i,j) + ssigmw = stressm_2(i,j) + stressm_3(i,j) + ssigm1 =(stressm_1(i,j) + stressm_3(i,j))*p055 + ssigm2 =(stressm_2(i,j) + stressm_4(i,j))*p055 + + ssig12n = stress12_1(i,j) + stress12_2(i,j) + ssig12s = stress12_3(i,j) + stress12_4(i,j) + ssig12e = stress12_1(i,j) + stress12_4(i,j) + ssig12w = stress12_2(i,j) + stress12_3(i,j) + ssig121 =(stress12_1(i,j) + stress12_3(i,j))*p111 + ssig122 =(stress12_2(i,j) + stress12_4(i,j))*p111 + + csigpne = p111*stressp_1(i,j) + ssigp2 + p027*stressp_3(i,j) + csigpnw = p111*stressp_2(i,j) + ssigp1 + p027*stressp_4(i,j) + csigpsw = p111*stressp_3(i,j) + ssigp2 + p027*stressp_1(i,j) + csigpse = p111*stressp_4(i,j) + ssigp1 + p027*stressp_2(i,j) + + csigmne = p111*stressm_1(i,j) + ssigm2 + p027*stressm_3(i,j) + csigmnw = p111*stressm_2(i,j) + ssigm1 + p027*stressm_4(i,j) + csigmsw = p111*stressm_3(i,j) + ssigm2 + p027*stressm_1(i,j) + csigmse = p111*stressm_4(i,j) + ssigm1 + p027*stressm_2(i,j) + + csig12ne = p222*stress12_1(i,j) + ssig122 & + + p055*stress12_3(i,j) + csig12nw = p222*stress12_2(i,j) + ssig121 & + + p055*stress12_4(i,j) + csig12sw = p222*stress12_3(i,j) + ssig122 & + + p055*stress12_1(i,j) + csig12se = p222*stress12_4(i,j) + ssig121 & + + p055*stress12_2(i,j) + + str12ew = p5*dxt(i,j)*(p333*ssig12e + p166*ssig12w) + str12we = p5*dxt(i,j)*(p333*ssig12w + p166*ssig12e) + str12ns = p5*dyt(i,j)*(p333*ssig12n + p166*ssig12s) + str12sn = p5*dyt(i,j)*(p333*ssig12s + p166*ssig12n) + + !----------------------------------------------------------------- + ! for dF/dx (u momentum) + !----------------------------------------------------------------- + strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) + strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) + + ! northeast (i,j) + str(i,j,1) = -strp_tmp - strm_tmp - str12ew & + + dxhy(i,j)*(-csigpne + csigmne) + dyhx(i,j)*csig12ne + + ! northwest (i+1,j) + str(i,j,2) = strp_tmp + strm_tmp - str12we & + + dxhy(i,j)*(-csigpnw + csigmnw) + dyhx(i,j)*csig12nw + + strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) + strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) + + ! southeast (i,j+1) + str(i,j,3) = -strp_tmp - strm_tmp + str12ew & + + dxhy(i,j)*(-csigpse + csigmse) + dyhx(i,j)*csig12se + + ! southwest (i+1,j+1) + str(i,j,4) = strp_tmp + strm_tmp + str12we & + + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw + + !----------------------------------------------------------------- + ! for dF/dy (v momentum) + !----------------------------------------------------------------- + strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) + strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) + + ! northeast (i,j) + str(i,j,5) = -strp_tmp + strm_tmp - str12ns & + - dyhx(i,j)*(csigpne + csigmne) + dxhy(i,j)*csig12ne + + ! southeast (i,j+1) + str(i,j,6) = strp_tmp - strm_tmp - str12sn & + - dyhx(i,j)*(csigpse + csigmse) + dxhy(i,j)*csig12se + + strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) + strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) + + ! northwest (i+1,j) + str(i,j,7) = -strp_tmp + strm_tmp + str12ns & + - dyhx(i,j)*(csigpnw + csigmnw) + dxhy(i,j)*csig12nw + + ! southwest (i+1,j+1) + str(i,j,8) = strp_tmp - strm_tmp + str12sn & + - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw + + enddo ! ij + + end subroutine stress + +!======================================================================= + + end module ice_dyn_evp + +!======================================================================= diff --git a/source/ice_dyn_shared.F90 b/source/ice_dyn_shared.F90 new file mode 100755 index 00000000..58429cd6 --- /dev/null +++ b/source/ice_dyn_shared.F90 @@ -0,0 +1,914 @@ +! SVN:$Id: ice_dyn_shared.F90 700 2013-08-15 19:17:39Z eclare $ +!======================================================================= + +! Elastic-viscous-plastic sea ice dynamics model code shared with other +! approaches +! +! author: Elizabeth C. Hunke, LANL +! +! 2013: Split from ice_dyn_evp.F90 by Elizabeth Hunke + + module ice_dyn_shared + + use ice_kinds_mod + use ice_constants, only: c0, c1, p01, p001, dragio, rhow + use ice_blocks, only: nx_block, ny_block + use ice_domain_size, only: max_blocks + +#ifdef AusCOM + use cpl_parameters !, only : use_ocnslope + use cpl_arrays_setup, only : sicemass +#endif + + implicit none + private + public :: init_evp, set_evp_parameters, stepu, principal_stress, & + evp_prep1, evp_prep2, evp_finish + save + + ! namelist parameters + + integer (kind=int_kind), public :: & + kdyn , & ! type of dynamics ( 1 = evp, 2 = eap ) + ndte ! number of subcycles: ndte=dt/dte + + logical (kind=log_kind), public :: & + revised_evp ! if true, use revised evp procedure + + ! other EVP parameters + + character (len=char_len), public :: & + yield_curve ! 'ellipse' ('teardrop' needs further testing) + ! + real (kind=dbl_kind), parameter, public :: & +!ars599: 24092014 (CODE: petteri) +!#if !defined(AusCOM) && !defined(ACCICE) + ! tuning parameters, set in namelist +#ifndef AusCOM + dragw = dragio * rhow, & + ! drag coefficient for water on ice *rhow (kg/m^3) +#endif + eyc = 0.36_dbl_kind, & + ! coefficient for calculating the parameter E +!ars599: 24032014 +! not sure if that is right!! +!#if !defined(AusCOM) && !defined(ACCICE) +#ifndef AusCOM + cosw = c1 , & ! cos(ocean turning angle) ! turning angle = 0 + sinw = c0 , & ! sin(ocean turning angle) ! turning angle = 0 +#endif + a_min = p001, & ! minimum ice area + m_min = p01 ! minimum ice mass (kg/m^2) + + real (kind=dbl_kind), public :: & +!ars599: 24092014 (CODE: petteri) +!#if defined(AusCOM) || defined(ACCICE) +#ifdef AusCOM + ! auscom1 has these in namelist +! dragio , & ! ice-ocn drag coefficient + cosw , & ! cos(ocean turning angle) ! + sinw , & ! sin(ocean turning angle) ! + dragw , & ! drag coefficient for water on ice *rhow (kg/m^3) +#endif + revp , & ! 0 for classic EVP, 1 for revised EVP + ecci , & ! 1/e^2 + dtei , & ! 1/dte, where dte is subcycling timestep (1/s) + dte2T , & ! dte/2T + denom1 ! constants for stress equation + + real (kind=dbl_kind), public :: & ! Bouillon et al relaxation constants + arlx1i , & ! alpha1 for stressp + brlx ! beta for momentum + + real (kind=dbl_kind), allocatable, public :: & + fcor_blk(:,:,:) ! Coriolis parameter (1/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + uvel_init, & ! x-component of velocity (m/s), beginning of timestep + vvel_init ! y-component of velocity (m/s), beginning of timestep + +!======================================================================= + + contains + +!======================================================================= + +! Initialize parameters and variables needed for the evp dynamics +! author: Elizabeth C. Hunke, LANL + + subroutine init_evp (dt) + + use ice_blocks, only: nx_block, ny_block + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, c2, omega + use ice_domain, only: nblocks + use ice_domain_size, only: max_blocks + use ice_flux, only: rdg_conv, rdg_shear, iceumask, fm, & + stressp_1, stressp_2, stressp_3, stressp_4, & + stressm_1, stressm_2, stressm_3, stressm_4, & + stress12_1, stress12_2, stress12_3, stress12_4 + use ice_state, only: uvel, vvel, divu, shear + use ice_grid, only: ULAT, ULON + use ice_fileunits, only: nu_diag + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + i, j, & + iblk ! block index + + call set_evp_parameters (dt) + + if (my_task == master_task) then + write(nu_diag,*) 'dt = ',dt + write(nu_diag,*) 'dte = ',dt/real(ndte,kind=dbl_kind) + write(nu_diag,*) 'tdamp =', eyc*dt + endif + + allocate(fcor_blk(nx_block,ny_block,max_blocks)) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + + ! velocity + uvel(i,j,iblk) = c0 ! m/s + vvel(i,j,iblk) = c0 ! m/s + + ! strain rates + divu (i,j,iblk) = c0 + shear(i,j,iblk) = c0 + rdg_conv (i,j,iblk) = c0 + rdg_shear(i,j,iblk) = c0 + + ! Coriolis parameter +!! fcor_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s + fcor_blk(i,j,iblk) = c2*omega*sin(ULAT(i,j,iblk)) ! 1/s + + ! stress tensor, kg/s^2 + stressp_1 (i,j,iblk) = c0 + stressp_2 (i,j,iblk) = c0 + stressp_3 (i,j,iblk) = c0 + stressp_4 (i,j,iblk) = c0 + stressm_1 (i,j,iblk) = c0 + stressm_2 (i,j,iblk) = c0 + stressm_3 (i,j,iblk) = c0 + stressm_4 (i,j,iblk) = c0 + stress12_1(i,j,iblk) = c0 + stress12_2(i,j,iblk) = c0 + stress12_3(i,j,iblk) = c0 + stress12_4(i,j,iblk) = c0 + + ! ice extent mask on velocity points + iceumask(i,j,iblk) = .false. + + enddo ! i + enddo ! j + enddo ! iblk + !$OMP END PARALLEL DO + + end subroutine init_evp + +!======================================================================= + +! Set parameters needed for the evp dynamics. +! Note: This subroutine is currently called only during initialization. +! If the dynamics time step can vary during runtime, it should +! be called whenever the time step changes. +! +! author: Elizabeth C. Hunke, LANL + + subroutine set_evp_parameters (dt) + + use ice_communicate, only: my_task, master_task + use ice_constants, only: p25, c1, c2, c4, p5 + use ice_domain, only: distrb_info + use ice_global_reductions, only: global_minval, global_maxval + use ice_grid, only: dxt, dyt, tmask, tarea + use ice_fileunits, only: nu_diag + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + real (kind=dbl_kind) :: & + Se , & ! stability parameter for revised EVP + xi , & ! stability parameter for revised EVP + gamma , & ! stability parameter for revised EVP + xmin, ymin , & ! minimum grid length for ocean points, m + dte , & ! subcycling timestep for EVP dynamics, s + ecc , & ! (ratio of major to minor ellipse axes)^2 + tdamp2 ! 2*(wave damping time scale T) + + ! elastic time step + dte = dt/real(ndte,kind=dbl_kind) ! s + dtei = c1/dte ! 1/s + + ! major/minor axis length ratio, squared + ecc = c4 + ecci = p25 ! 1/ecc + + ! constants for stress equation + tdamp2 = c2*eyc*dt ! s + dte2T = dte/tdamp2 ! ellipse (unitless) + + ! grid min/max + xmin = global_minval(dxt, distrb_info, tmask) + ymin = global_minval(dyt, distrb_info, tmask) + xmin = min(xmin,ymin) ! min(dxt, dyt) + + ! revised evp parameters + Se = 0.86_dbl_kind ! Se > 0.5 + xi = 5.5e-3_dbl_kind ! Sv/Sc < 1 + gamma = p25 * 1.e11_dbl_kind * dt ! rough estimate (P/m~10^5/10^3) + + if (revised_evp) then ! Bouillon et al, Ocean Mod 2013 + revp = c1 + arlx1i = c2*xi/Se ! 1/alpha1 + brlx = c2*Se*xi*gamma/xmin**2 ! beta + +! classic evp parameters (but modified equations) +! arlx1i = dte2T +! brlx = dt*dtei + + else ! Hunke, JCP 2013 with modified stress eq + revp = c0 + arlx1i = dte2T + brlx = dt*dtei + +! revised evp parameters +! arlx1i = c2*xi/Se ! 1/alpha1 +! brlx = c2*Se*xi*gamma/xmin**2 ! beta + + endif + if (my_task == master_task) then + write (nu_diag,*) 'arlx, brlx', c1/arlx1i, brlx + write (nu_diag,*) 'Se, Sv, xi', & + sqrt(brlx/(arlx1i*gamma))*xmin, & + p5*brlx/gamma*xmin**2, & + p5*xmin*sqrt(brlx*arlx1i/gamma) + endif + + denom1 = c1/(c1+arlx1i) + + end subroutine set_evp_parameters + +!======================================================================= + +! Computes quantities needed in the stress tensor (sigma) +! and momentum (u) equations, but which do not change during +! the thermodynamics/transport time step: +! ice mass and ice extent masks +! +! author: Elizabeth C. Hunke, LANL + + subroutine evp_prep1 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + aice, vice, & + vsno, tmask, & + strairxT, strairyT, & + strairx, strairy, & + tmass, icetmask) + + use ice_constants, only: c0, rhoi, rhos + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + aice , & ! concentration of ice + vice , & ! volume per unit area of ice (m) + vsno , & ! volume per unit area of snow (m) + strairxT, & ! stress on ice by air, x-direction + strairyT ! stress on ice by air, y-direction + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + tmask ! land/boundary mask, thickness (T-cell) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + strairx , & ! stress on ice by air, x-direction + strairy , & ! stress on ice by air, y-direction + tmass ! total mass of ice and snow (kg/m^2) + + integer (kind=int_kind), dimension (nx_block,ny_block), & + intent(out) :: & + icetmask ! ice extent mask (T-cell) + + ! local variables + + integer (kind=int_kind) :: & + i, j + + logical (kind=log_kind), dimension(nx_block,ny_block) :: & + tmphm ! temporary mask + + do j = 1, ny_block + do i = 1, nx_block + + !----------------------------------------------------------------- + ! total mass of ice and snow, centered in T-cell + ! NOTE: vice and vsno must be up to date in all grid cells, + ! including ghost cells + !----------------------------------------------------------------- + if (tmask(i,j)) then + tmass(i,j) = (rhoi*vice(i,j) + rhos*vsno(i,j)) ! kg/m^2 + else + tmass(i,j) = c0 + endif + + !----------------------------------------------------------------- + ! ice extent mask (T-cells) + !----------------------------------------------------------------- + tmphm(i,j) = tmask(i,j) .and. (aice (i,j) > a_min) & + .and. (tmass(i,j) > m_min) + + !----------------------------------------------------------------- + ! prep to convert to U grid + !----------------------------------------------------------------- + ! these quantities include the factor of aice needed for + ! correct treatment of free drift + strairx(i,j) = strairxT(i,j) + strairy(i,j) = strairyT(i,j) + + !----------------------------------------------------------------- + ! augmented mask (land + open ocean) + !----------------------------------------------------------------- + icetmask (i,j) = 0 + + enddo + enddo + + do j = jlo, jhi + do i = ilo, ihi + + ! extend ice extent mask (T-cells) to points around pack + if (tmphm(i-1,j+1) .or. tmphm(i,j+1) .or. tmphm(i+1,j+1) .or. & + tmphm(i-1,j) .or. tmphm(i,j) .or. tmphm(i+1,j) .or. & + tmphm(i-1,j-1) .or. tmphm(i,j-1) .or. tmphm(i+1,j-1) ) then + icetmask(i,j) = 1 + endif + + if (.not.tmask(i,j)) icetmask(i,j) = 0 + + enddo + enddo + + end subroutine evp_prep1 + +!======================================================================= +! Computes quantities needed in the stress tensor (sigma) +! and momentum (u) equations, but which do not change during +! the thermodynamics/transport time step: +! --wind stress shift to U grid, +! --ice mass and ice extent masks, +! initializes ice velocity for new points to ocean sfc current +! +! author: Elizabeth C. Hunke, LANL + + subroutine evp_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt, icellu, & + indxti, indxtj, & + indxui, indxuj, & + aiu, umass, & + umassdti, fcor, & + umask, & + uocn, vocn, & + strairx, strairy, & + ss_tltx, ss_tlty, & + icetmask, iceumask, & + fm, dt, & + strtltx, strtlty, & + strocnx, strocny, & + strintx, strinty, & + waterx, watery, & + forcex, forcey, & + stressp_1, stressp_2, & + stressp_3, stressp_4, & + stressm_1, stressm_2, & + stressm_3, stressm_4, & + stress12_1, stress12_2, & + stress12_3, stress12_4, & + uvel_init, vvel_init, & + uvel, vvel) + + use ice_constants, only: c0, c1, gravit + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + integer (kind=int_kind), intent(out) :: & + icellt , & ! no. of cells where icetmask = 1 + icellu ! no. of cells where iceumask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(out) :: & + indxti , & ! compressed index in i-direction + indxtj , & ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + umask ! land/boundary mask, thickness (U-cell) + + integer (kind=int_kind), dimension (nx_block,ny_block), & + intent(in) :: & + icetmask ! ice extent mask (T-cell) + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + iceumask ! ice extent mask (U-cell) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + aiu , & ! ice fraction on u-grid + umass , & ! total mass of ice and snow (u grid) + fcor , & ! Coriolis parameter (1/s) + strairx , & ! stress on ice by air, x-direction + strairy , & ! stress on ice by air, y-direction + uocn , & ! ocean current, x-direction (m/s) + vocn , & ! ocean current, y-direction (m/s) + ss_tltx , & ! sea surface slope, x-direction (m/m) + ss_tlty ! sea surface slope, y-direction + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + uvel_init,& ! x-component of velocity (m/s), beginning of time step + vvel_init,& ! y-component of velocity (m/s), beginning of time step + umassdti, & ! mass of U-cell/dt (kg/m^2 s) + waterx , & ! for ocean stress calculation, x (m/s) + watery , & ! for ocean stress calculation, y (m/s) + forcex , & ! work array: combined atm stress and ocn tilt, x + forcey ! work array: combined atm stress and ocn tilt, y + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + fm , & ! Coriolis param. * mass in U-cell (kg/s) + stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 + stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 + stress12_1,stress12_2,stress12_3,stress12_4, & ! sigma12 + uvel , & ! x-component of velocity (m/s) + vvel , & ! y-component of velocity (m/s) + strtltx , & ! stress due to sea surface slope, x-direction + strtlty , & ! stress due to sea surface slope, y-direction + strocnx , & ! ice-ocean stress, x-direction + strocny , & ! ice-ocean stress, y-direction + strintx , & ! divergence of internal ice stress, x (N/m^2) + strinty ! divergence of internal ice stress, y (N/m^2) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + logical (kind=log_kind), dimension(nx_block,ny_block) :: & + iceumask_old ! old-time iceumask + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + waterx (i,j) = c0 + watery (i,j) = c0 + forcex (i,j) = c0 + forcey (i,j) = c0 + umassdti (i,j) = c0 + + if (revp==1) then ! revised evp + stressp_1 (i,j) = c0 + stressp_2 (i,j) = c0 + stressp_3 (i,j) = c0 + stressp_4 (i,j) = c0 + stressm_1 (i,j) = c0 + stressm_2 (i,j) = c0 + stressm_3 (i,j) = c0 + stressm_4 (i,j) = c0 + stress12_1(i,j) = c0 + stress12_2(i,j) = c0 + stress12_3(i,j) = c0 + stress12_4(i,j) = c0 + else if (icetmask(i,j)==0) then ! classic evp + stressp_1 (i,j) = c0 + stressp_2 (i,j) = c0 + stressp_3 (i,j) = c0 + stressp_4 (i,j) = c0 + stressm_1 (i,j) = c0 + stressm_2 (i,j) = c0 + stressm_3 (i,j) = c0 + stressm_4 (i,j) = c0 + stress12_1(i,j) = c0 + stress12_2(i,j) = c0 + stress12_3(i,j) = c0 + stress12_4(i,j) = c0 + endif ! revp + enddo ! i + enddo ! j + + !----------------------------------------------------------------- + ! Identify cells where icetmask = 1 + ! Note: The icellt mask includes north and east ghost cells + ! where stresses are needed. + !----------------------------------------------------------------- + + icellt = 0 + do j = jlo, jhi+1 + do i = ilo, ihi+1 + if (icetmask(i,j) == 1) then + icellt = icellt + 1 + indxti(icellt) = i + indxtj(icellt) = j + endif + enddo + enddo + + !----------------------------------------------------------------- + ! Define iceumask + ! Identify cells where iceumask is true + ! Initialize velocity where needed + !----------------------------------------------------------------- + + icellu = 0 + do j = jlo, jhi + do i = ilo, ihi + + ! ice extent mask (U-cells) + iceumask_old(i,j) = iceumask(i,j) ! save + iceumask(i,j) = (umask(i,j)) .and. (aiu (i,j) > a_min) & + .and. (umass(i,j) > m_min) + + if (iceumask(i,j)) then + icellu = icellu + 1 + indxui(icellu) = i + indxuj(icellu) = j + + ! initialize velocity for new ice points to ocean sfc current + if (.not. iceumask_old(i,j)) then + uvel(i,j) = uocn(i,j) + vvel(i,j) = vocn(i,j) + endif + else + ! set velocity and stresses to zero for masked-out points + uvel(i,j) = c0 + vvel(i,j) = c0 + strintx(i,j) = c0 + strinty(i,j) = c0 + strocnx(i,j) = c0 + strocny(i,j) = c0 + endif + + uvel_init(i,j) = uvel(i,j) + vvel_init(i,j) = vvel(i,j) + enddo + enddo + + !----------------------------------------------------------------- + ! Define variables for momentum equation + !----------------------------------------------------------------- + + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + + umassdti(i,j) = umass(i,j)/dt ! kg/m^2 s + + fm(i,j) = fcor(i,j)*umass(i,j) ! Coriolis * mass + + ! for ocean stress + waterx(i,j) = uocn(i,j)*cosw - vocn(i,j)*sinw*sign(c1,fm(i,j)) + watery(i,j) = vocn(i,j)*cosw + uocn(i,j)*sinw*sign(c1,fm(i,j)) + + ! combine tilt with wind stress +#ifndef coupled + ! calculate tilt from geostrophic currents if needed + strtltx(i,j) = -fm(i,j)*vocn(i,j) + strtlty(i,j) = fm(i,j)*uocn(i,j) +#else + strtltx(i,j) = -gravit*umass(i,j)*ss_tltx(i,j) + strtlty(i,j) = -gravit*umass(i,j)*ss_tlty(i,j) +#endif +#ifdef AusCOM + if (.not. use_ocnslope) then !03/06/08: the ocn sfc slope naugty? + strtltx(i,j) = -fm(i,j)*vocn(i,j) + strtlty(i,j) = fm(i,j)*uocn(i,j) + endif +#endif + forcex(i,j) = strairx(i,j) + strtltx(i,j) + forcey(i,j) = strairy(i,j) + strtlty(i,j) + enddo + + end subroutine evp_prep2 + +!======================================================================= + +! Calculation of the surface stresses +! Integration of the momentum equation to find velocity (u,v) +! +! author: Elizabeth C. Hunke, LANL + + subroutine stepu (nx_block, ny_block, & + icellu, Cw, & + indxui, indxuj, & + aiu, str, & + uocn, vocn, & + waterx, watery, & + forcex, forcey, & + umassdti, fm, & + uarear, & + strocnx, strocny, & + strintx, strinty, & + uvel_init, vvel_init,& + uvel, vvel) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellu ! total count when iceumask is true + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvel_init,& ! x-component of velocity (m/s), beginning of timestep + vvel_init,& ! y-component of velocity (m/s), beginning of timestep + aiu , & ! ice fraction on u-grid + waterx , & ! for ocean stress calculation, x (m/s) + watery , & ! for ocean stress calculation, y (m/s) + forcex , & ! work array: combined atm stress and ocn tilt, x + forcey , & ! work array: combined atm stress and ocn tilt, y + umassdti, & ! mass of U-cell/dt (kg/m^2 s) + uocn , & ! ocean current, x-direction (m/s) + vocn , & ! ocean current, y-direction (m/s) + fm , & ! Coriolis param. * mass in U-cell (kg/s) + uarear ! 1/uarea + + real (kind=dbl_kind), dimension(nx_block,ny_block,8), & + intent(in) :: & + str + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + uvel , & ! x-component of velocity (m/s) + vvel ! y-component of velocity (m/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + strocnx , & ! ice-ocean stress, x-direction + strocny , & ! ice-ocean stress, y-direction + strintx , & ! divergence of internal ice stress, x (N/m^2) + strinty ! divergence of internal ice stress, y (N/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + Cw ! ocean-ice neutral drag coefficient + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + uold, vold , & ! old-time uvel, vvel + vrel , & ! relative ice-ocean velocity + cca,ccb,ab2,cc1,cc2,& ! intermediate variables + taux, tauy ! part of ocean stress term + +#ifdef AusCOM + real :: vel_max = 5.0 !m/s. Dave: set velocity limit to uvel and vvel. +#endif + + !----------------------------------------------------------------- + ! integrate the momentum equation + !----------------------------------------------------------------- +!ars599: 24092014 (CODE: petteri) +#ifdef AusCOM + dragw = dragio * rhow +! ! drag coefficient for water on ice *rhow (kg/m^3) +#endif + + do ij =1, icellu + i = indxui(ij) + j = indxuj(ij) + + uold = uvel(i,j) + vold = vvel(i,j) + + ! (magnitude of relative ocean current)*rhow*drag*aice + vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + (vocn(i,j) - vold)**2) ! m/s + ! ice/ocean stress + taux = vrel*waterx(i,j) ! NOTE this is not the entire + tauy = vrel*watery(i,j) ! ocn stress term + + ! revp = 0 for classic evp, 1 for revised evp + cca = (brlx + revp)*umassdti(i,j) + vrel * cosw ! kg/m^2 s +! ars599: 04032014 (sign_code) +! cice4.1 add AusCOM for +! rotating to opposite direction in the Southern Hemisphere +! New code add sign so no need + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s + + ab2 = cca**2 + ccb**2 + + ! divergence of the internal stress tensor + strintx(i,j) = uarear(i,j)* & + (str(i,j,1) + str(i+1,j,2) + str(i,j+1,3) + str(i+1,j+1,4)) + strinty(i,j) = uarear(i,j)* & + (str(i,j,5) + str(i,j+1,6) + str(i+1,j,7) + str(i+1,j+1,8)) + + ! finally, the velocity components + cc1 = strintx(i,j) + forcex(i,j) + taux & + + umassdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) + cc2 = strinty(i,j) + forcey(i,j) + tauy & + + umassdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) + + uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s + vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 + +#ifdef AusCOM +!20160624 -- Siobhan and Dave's idea to set ice velocity limit to avoid +!transport remap "departure point error": + !if (abs(uvel(i,j) >= vel_max) uvel(i,j) = sign(c1, uvel(i,j)) + !* vel_max + !if (abs(vvel(i,j) >= vel_max) vvel(i,j) = sign(c1, vvel(i,j)) + !* vel_max + uvel(i,j) = sign(min(abs(uvel(i,j)),vel_max),uvel(i,j)) + vvel(i,j) = sign(min(abs(vvel(i,j)),vel_max),vvel(i,j)) +#endif + + !----------------------------------------------------------------- + ! ocean-ice stress for coupling + ! here, strocn includes the factor of aice + !----------------------------------------------------------------- + strocnx(i,j) = taux + strocny(i,j) = tauy + + enddo ! ij + + end subroutine stepu + +!======================================================================= + +! Calculation of the ice-ocean stress. +! ...the sign will be reversed later... +! +! author: Elizabeth C. Hunke, LANL + + subroutine evp_finish (nx_block, ny_block, & + icellu, Cw, & + indxui, indxuj, & + uvel, vvel, & + uocn, vocn, & + aiu, fm, & + strintx, strinty, & + strairx, strairy, & + strocnx, strocny, & + strocnxT, strocnyT) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellu ! total count when iceumask is true + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvel , & ! x-component of velocity (m/s) + vvel , & ! y-component of velocity (m/s) + uocn , & ! ocean current, x-direction (m/s) + vocn , & ! ocean current, y-direction (m/s) + aiu , & ! ice fraction on u-grid + fm , & ! Coriolis param. * mass in U-cell (kg/s) + strintx , & ! divergence of internal ice stress, x (N/m^2) + strinty , & ! divergence of internal ice stress, y (N/m^2) + strairx , & ! stress on ice by air, x-direction + strairy ! stress on ice by air, y-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + strocnx , & ! ice-ocean stress, x-direction + strocny , & ! ice-ocean stress, y-direction + strocnxT, & ! ice-ocean stress, x-direction + strocnyT ! ice-ocean stress, y-direction + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: vrel + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + Cw ! ocean-ice neutral drag coefficient + + do j = 1, ny_block + do i = 1, nx_block + strocnxT(i,j) = c0 + strocnyT(i,j) = c0 + enddo + enddo + + ! ocean-ice stress for coupling + do ij =1, icellu + i = indxui(ij) + j = indxuj(ij) + + vrel = rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & + (vocn(i,j) - vvel(i,j))**2) ! m/s + +! strocnx(i,j) = strocnx(i,j) & +! - vrel*(uvel(i,j)*cosw - vvel(i,j)*sinw) * aiu(i,j) +! strocny(i,j) = strocny(i,j) & +! - vrel*(vvel(i,j)*cosw + uvel(i,j)*sinw) * aiu(i,j) + + ! update strocnx to most recent iterate and complete the term + vrel = vrel * aiu(i,j) + strocnx(i,j) = vrel*((uocn(i,j) - uvel(i,j))*cosw & + - (vocn(i,j) - vvel(i,j))*sinw*sign(c1,fm(i,j))) + strocny(i,j) = vrel*((vocn(i,j) - vvel(i,j))*cosw & + + (uocn(i,j) - uvel(i,j))*sinw*sign(c1,fm(i,j))) + + ! Hibler/Bryan stress + ! the sign is reversed later, therefore negative here +! strocnx(i,j) = -(strairx(i,j) + strintx(i,j)) +! strocny(i,j) = -(strairy(i,j) + strinty(i,j)) + + ! Prepare to convert to T grid + ! divide by aice for coupling + strocnxT(i,j) = strocnx(i,j) / aiu(i,j) + strocnyT(i,j) = strocny(i,j) / aiu(i,j) + enddo + + end subroutine evp_finish + +!======================================================================= + +! Computes principal stresses for comparison with the theoretical +! yield curve; northeast values +! +! author: Elizabeth C. Hunke, LANL + + subroutine principal_stress(nx_block, ny_block, & + stressp_1, stressm_1, & + stress12_1, prs_sig, & + sig1, sig2) + + use ice_constants, only: spval_dbl, puny, p5, c4 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + stressp_1 , & ! sigma11 + sigma22 + stressm_1 , & ! sigma11 - sigma22 + stress12_1, & ! sigma12 + prs_sig ! replacement pressure, for stress calc + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + sig1 , & ! principal stress component + sig2 ! principal stress component + + ! local variables + + integer (kind=int_kind) :: i, j + + do j = 1, ny_block + do i = 1, nx_block + if (prs_sig(i,j) > puny) then + sig1(i,j) = (p5*(stressp_1(i,j) & + + sqrt(stressm_1(i,j)**2+c4*stress12_1(i,j)**2))) & + / prs_sig(i,j) + sig2(i,j) = (p5*(stressp_1(i,j) & + - sqrt(stressm_1(i,j)**2+c4*stress12_1(i,j)**2))) & + / prs_sig(i,j) + else + sig1(i,j) = spval_dbl + sig2(i,j) = spval_dbl + endif + enddo + enddo + + end subroutine principal_stress + +!======================================================================= + + end module ice_dyn_shared + +!======================================================================= diff --git a/source/ice_fileunits.F90 b/source/ice_fileunits.F90 new file mode 100755 index 00000000..ad0004a7 --- /dev/null +++ b/source/ice_fileunits.F90 @@ -0,0 +1,299 @@ +! SVN:$Id: ice_fileunits.F90 843 2014-10-02 19:54:30Z eclare $ +!======================================================================= +! +! This module contains an I/O unit manager for tracking, assigning +! and reserving I/O unit numbers. +! +! There are three reserved I/O units set as parameters in this +! module. The default units for standard input (stdin), standard +! output (stdout) and standard error (stderr). These are currently +! set as units 5,6,6, respectively as that is the most commonly +! used among vendors. However, the user may change these if those +! default units are conflicting with other models or if the +! vendor is using different values. +! +! The maximum number of I/O units per node is currently set by +! the parameter ice\_IOMaxUnit. +! +! author: Elizabeth C. Hunke, LANL +! 2006: ECH converted to free source form (F90) +! 2007: ECH added dynamic file units, modified from POP_IOUnitsMod.F90 + + module ice_fileunits + + use ice_kinds_mod +#ifdef CCSMCOUPLED + use shr_file_mod, only : shr_file_getunit, shr_file_freeunit +#endif + + implicit none + private + public :: init_fileunits, get_fileunit, flush_fileunit, & + release_fileunit, release_all_fileunits + save + + character (len=char_len), public :: & + diag_type ! 'stdout' or 'file' + + logical (log_kind), public :: & + bfbflag ! logical for bit-for-bit computations + + integer (kind=int_kind), public :: & + nu_grid , & ! grid file + nu_kmt , & ! land mask file + nu_nml , & ! namelist input file + nu_forcing , & ! forcing data file + nu_dump , & ! dump file for restarting + nu_restart , & ! restart input file + nu_dump_age , & ! dump file for restarting ice age tracer + nu_restart_age, & ! restart input file for ice age tracer + nu_dump_FY , & ! dump file for restarting first-year area tracer + nu_restart_FY , & ! restart input file for first-year area tracer + nu_dump_lvl , & ! dump file for restarting level ice tracers + nu_restart_lvl, & ! restart input file for level ice tracers + nu_dump_pond , & ! dump file for restarting melt pond tracer + nu_restart_pond,& ! restart input file for melt pond tracer + nu_dump_aero , & ! dump file for restarting aerosol tracer + nu_restart_aero,& ! restart input file for aerosol tracer + nu_dump_bgc , & ! dump file for restarting bgc + nu_restart_bgc, & ! restart input file for bgc + nu_dump_hbrine, & ! dump file for restarting hbrine + nu_restart_hbrine, & ! restart input file for hbrine + nu_dump_eap , & ! dump file for restarting eap dynamics + nu_restart_eap, & ! restart input file for eap dynamics + nu_rst_pointer, & ! pointer to latest restart file + nu_history , & ! binary history output file + nu_hdr , & ! header file for binary history output + nu_diag ! diagnostics output file + +#ifndef AusCOM + character (32), parameter, public :: & + nml_filename = 'ice_in' ! namelist input file name +#else +!ars599: 25032014 change to public + character (32), parameter, public :: & +! character (32), parameter :: & + nml_filename = 'cice_in.nml' ! namelist input file name +#endif + + integer (kind=int_kind), parameter, public :: & + ice_stdin = 5, & ! reserved unit for standard input + ice_stdout = 6, & ! reserved unit for standard output + ice_stderr = 6 ! reserved unit for standard error + + integer (kind=int_kind), parameter :: & + ice_IOUnitsMinUnit = NUMIN, & ! do not use unit numbers below + ice_IOUnitsMaxUnit = NUMAX ! or above + + logical (kind=log_kind), dimension(ice_IOUnitsMaxUnit) :: & + ice_IOUnitsInUse ! flag=.true. if unit currently open + + ! instance control + integer (kind=int_kind), public :: inst_index + character(len=16) , public :: inst_name + character(len=16) , public :: inst_suffix + +!======================================================================= + + contains + +!======================================================================= + +! This routine grabs needed unit numbers. +! nu_diag is set to 6 (stdout) but may be reset later by the namelist. +! nu_nml is obtained separately. + + subroutine init_fileunits + +#ifndef AusCOM + nu_diag = ice_stdout ! default +#else + nu_diag = 111 + open(nu_diag,file='ice_diag_out',form='formatted') ! status='new') +#endif + + ice_IOUnitsInUse = .false. + ice_IOUnitsInUse(ice_stdin) = .true. ! reserve unit 5 + ice_IOUnitsInUse(ice_stdout) = .true. ! reserve unit 6 + ice_IOUnitsInUse(ice_stderr) = .true. + + call get_fileunit(nu_grid) + call get_fileunit(nu_kmt) + call get_fileunit(nu_forcing) + call get_fileunit(nu_dump) + call get_fileunit(nu_restart) + call get_fileunit(nu_dump_age) + call get_fileunit(nu_restart_age) + call get_fileunit(nu_dump_FY) + call get_fileunit(nu_restart_FY) + call get_fileunit(nu_dump_lvl) + call get_fileunit(nu_restart_lvl) + call get_fileunit(nu_dump_pond) + call get_fileunit(nu_restart_pond) + call get_fileunit(nu_dump_aero) + call get_fileunit(nu_restart_aero) + call get_fileunit(nu_dump_bgc) + call get_fileunit(nu_restart_bgc) + call get_fileunit(nu_dump_hbrine) + call get_fileunit(nu_restart_hbrine) + call get_fileunit(nu_dump_eap) + call get_fileunit(nu_restart_eap) + call get_fileunit(nu_rst_pointer) + call get_fileunit(nu_history) + call get_fileunit(nu_hdr) + + end subroutine init_fileunits + +!======================================================================= + +! This routine returns the next available I/O unit and marks it as +! in use to prevent any later use. +! Note that {\em all} processors must call this routine even if only +! the master task is doing the I/O. This is necessary insure that +! the units remain synchronized for other parallel I/O functions. + + subroutine get_fileunit(iunit) + + integer (kind=int_kind), intent(out) :: & + iunit ! next free I/O unit + + ! local variables + +#ifndef CCSMCOUPLED + integer (kind=int_kind) :: n ! dummy loop index + logical (kind=log_kind) :: alreadyInUse +#endif + +#ifdef CCSMCOUPLED + iunit = shr_file_getUnit() +#else + + srch_units: do n=ice_IOUnitsMinUnit, ice_IOUnitsMaxUnit + if (.not. ice_IOUnitsInUse(n)) then ! I found one, I found one + + !*** make sure not in use by library or calling routines + INQUIRE (unit=n,OPENED=alreadyInUse) + + if (.not. alreadyInUse) then + iunit = n ! return the free unit number + ice_IOUnitsInUse(iunit) = .true. ! mark iunit as being in use + exit srch_units + else + !*** if inquire shows this unit in use, mark it as + !*** in use to prevent further queries + ice_IOUnitsInUse(n) = .true. + endif + endif + end do srch_units + + if (iunit > ice_IOUnitsMaxUnit) stop 'ice_IOUnitsGet: No free units' + +#endif + + end subroutine get_fileunit + +!======================================================================= + +! This routine releases unit numbers at the end of a run. + + subroutine release_all_fileunits + + call release_fileunit(nu_grid) + call release_fileunit(nu_kmt) + call release_fileunit(nu_forcing) + call release_fileunit(nu_dump) + call release_fileunit(nu_restart) + call release_fileunit(nu_dump_age) + call release_fileunit(nu_restart_age) + call release_fileunit(nu_dump_FY) + call release_fileunit(nu_restart_FY) + call release_fileunit(nu_dump_lvl) + call release_fileunit(nu_restart_lvl) + call release_fileunit(nu_dump_pond) + call release_fileunit(nu_restart_pond) + call release_fileunit(nu_dump_aero) + call release_fileunit(nu_restart_aero) + call release_fileunit(nu_dump_bgc) + call release_fileunit(nu_restart_bgc) + call release_fileunit(nu_dump_hbrine) + call release_fileunit(nu_restart_hbrine) + call release_fileunit(nu_dump_eap) + call release_fileunit(nu_restart_eap) + call release_fileunit(nu_rst_pointer) + call release_fileunit(nu_history) + call release_fileunit(nu_hdr) + if (nu_diag /= ice_stdout) call release_fileunit(nu_diag) + + end subroutine release_all_fileunits + +!======================================================================= + +! This routine releases an I/O unit (marks it as available). +! Note that {\em all} processors must call this routine even if only +! the master task is doing the I/O. This is necessary insure that +! the units remain synchronized for other parallel I/O functions. + + subroutine release_fileunit(iunit) + + integer (kind=int_kind), intent(in) :: & + iunit ! I/O unit to be released + +#ifdef CCSMCOUPLED + call shr_file_freeUnit(iunit) +#else +! check for proper unit number + if (iunit < 1 .or. iunit > ice_IOUnitsMaxUnit) then + write (*,*) 'XXX Warning -- bad unit: iunit = ', iunit + !stop 'release_fileunit: bad unit' + endif + +! mark the unit as not in use + ice_IOUnitsInUse(iunit) = .false. ! that was easy... +#endif + + end subroutine release_fileunit + +!======================================================================= + + +! This routine enables a user to flush the output from an IO unit +! (typically stdout) to force output when the system is buffering +! such output. Because this system function is system dependent, +! we only support this wrapper and users are welcome to insert the +! code relevant to their local machine. In the case where the CCSM +! libraries are available, the shared routine for sys flush can be +! used (and is provided here under a preprocessor option). + + subroutine flush_fileunit(iunit) + +#ifdef CCSMCOUPLED + use shr_sys_mod, only : shr_sys_flush +#endif + + integer (kind=int_kind), intent(in) :: & + iunit ! I/O unit to be flushed + +!----------------------------------------------------------------------- +! +! insert your system code here +! +!----------------------------------------------------------------------- + +#ifdef CCSMCOUPLED + call shr_sys_flush(iunit) +#else +#if (defined IRIX64 || defined CRAY || defined OSF1 || defined SUNOS || defined LINUX || defined NEC_SX | defined UNICOSMP) + call flush(iunit) +#endif +#if (defined AIX) + call flush_(iunit) +#endif +#endif + + end subroutine flush_fileunit + +!======================================================================= + + end module ice_fileunits + +!======================================================================= diff --git a/source/ice_firstyear.F90 b/source/ice_firstyear.F90 new file mode 100755 index 00000000..5dc78c08 --- /dev/null +++ b/source/ice_firstyear.F90 @@ -0,0 +1,173 @@ +! SVN:$Id: ice_firstyear.F90 746 2013-09-28 22:47:56Z eclare $ +!======================================================================= +! +! First year concentration tracer for sea ice +! +! see +! Armour, K. C., C. M. Bitz, L. Thompson and E. C. Hunke (2011). Controls +! on Arctic sea ice from first-year and multi-year ice survivability. +! J. Climate, 24, 23782390. doi: 10.1175/2010JCLI3823.1. +! +! authors C. Bitz, University of Washington, modified from ice_age module +! +! 2012: E. Hunke adopted from CESM into CICE, changed name from ice_FY.F90 +! + module ice_firstyear + + use ice_kinds_mod + use ice_constants + + implicit none + + private + public :: init_FY, update_FYarea, write_restart_FY, read_restart_FY + + logical (kind=log_kind), public :: & + restart_FY ! if .true., read FY tracer restart file + +!======================================================================= + + contains + +!======================================================================= + +! Initialize ice FY tracer (call prior to reading restart data) + + subroutine init_FY(nx_block, ny_block, ncat, firstyear) + + integer(kind=int_kind), intent(in) :: & + nx_block , & + ny_block , & + ncat + + real(kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(out) :: firstyear + + firstyear(:,:,:) = c0 + + end subroutine init_FY + +!======================================================================= + +! Zero ice FY tracer on fixed day of year. Zeroing FY ice tracer promotes +! ice to MY ice. Unfortunately some frazil ice may grow before the +! zeroing date and thus get promoted to MY ice too soon. +! Bummer. + + subroutine update_FYarea (nx_block, ny_block, & + dt, icells, & + indxi, indxj, & + nhmask, shmask, & + yday, FYarea) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with ice present + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with ice + + real (kind=dbl_kind), intent(in) :: & + dt , & ! time step + yday ! day of the year + + logical (kind=log_kind), dimension(nx_block,ny_block), & + intent(in) :: & + nhmask, shmask + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + FYarea + + ! local variables + + integer (kind=int_kind) :: i, j, ij + + if ((yday >= 259._dbl_kind) .and. & + (yday < 259._dbl_kind+dt/secday)) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (nhmask(i,j)) FYarea(i,j) = c0; + enddo + endif + + if ((yday >= 75._dbl_kind) .and. & + (yday < 75._dbl_kind+dt/secday)) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (shmask(i,j)) FYarea(i,j) = c0; + enddo + endif + + end subroutine update_FYarea + +!======================================================================= + +! Dumps all values needed for restarting +! author Elizabeth C. Hunke, LANL + + subroutine write_restart_FY() + + use ice_communicate, only: my_task, master_task + use ice_domain_size, only: ncat + use ice_fileunits, only: nu_diag, nu_dump_FY + use ice_flux, only: frz_onset + use ice_state, only: trcrn, nt_FY + use ice_restart, only: write_restart_field + + ! local variables + + logical (kind=log_kind) :: diag + + diag = .true. + + !----------------------------------------------------------------- + + call write_restart_field(nu_dump_FY,0,trcrn(:,:,nt_FY,:,:),'ruf8', & + 'FY',ncat,diag) + call write_restart_field(nu_dump_FY,0,frz_onset,'ruf8', & + 'frz_onset',1,diag) + + end subroutine write_restart_FY + +!======================================================================= + +! Reads all values needed for an ice FY restart +! author Elizabeth C. Hunke, LANL + + subroutine read_restart_FY() + + use ice_communicate, only: my_task, master_task + use ice_domain_size, only: ncat + use ice_fileunits, only: nu_diag, nu_restart_FY + use ice_flux, only: frz_onset + use ice_state, only: trcrn, nt_FY + use ice_restart, only: read_restart_field + + ! local variables + + logical (kind=log_kind) :: & + diag + + diag = .true. + + if (my_task == master_task) write(nu_diag,*) 'min/max first-year ice area' + + call read_restart_field(nu_restart_FY,0,trcrn(:,:,nt_FY,:,:),'ruf8', & + 'FY',ncat,diag,field_loc_center,field_type_scalar) + + if (my_task == master_task) write(nu_diag,*) 'min/max frz_onset' + + call read_restart_field(nu_restart_FY,0,frz_onset,'ruf8', & + 'frz_onset',1,diag,field_loc_center,field_type_scalar) + + end subroutine read_restart_FY + +!======================================================================= + + end module ice_firstyear + +!======================================================================= diff --git a/source/ice_flux.F90 b/source/ice_flux.F90 new file mode 100755 index 00000000..d4dd93f7 --- /dev/null +++ b/source/ice_flux.F90 @@ -0,0 +1,1004 @@ +! SVN:$Id: ice_flux.F90 936 2015-03-17 15:46:44Z eclare $ +!======================================================================= + +! Flux variable declarations; these include fields sent from the coupler +! ("in"), sent to the coupler ("out"), written to diagnostic history files +! ("diagnostic"), and used internally ("internal"). +! +! author Elizabeth C. Hunke, LANL +! +! 2004: Block structure added by William Lipscomb +! Swappped, revised, and added some subroutines +! 2006: Converted to free source form (F90) by Elizabeth Hunke + + module ice_flux + + use ice_kinds_mod + use ice_fileunits, only: nu_diag + use ice_blocks, only: nx_block, ny_block + use ice_domain_size, only: max_blocks, ncat, max_aero, max_nstrm, nilyr + use ice_constants, only: c0, c1, c5, c10, c20, c180, dragio, & + depressT, stefan_boltzmann, Tffresh, emissivity + + implicit none + private + public :: init_coupler_flux, init_history_therm, init_history_dyn, & + init_flux_ocn, init_flux_atm, scale_fluxes, merge_fluxes!, & +!ars599: 06042016: something wrong with set_sfcflux can't recon. so +! markout from ice_flux + ! set_sfcflux + save + + !----------------------------------------------------------------- + ! Dynamics component + !----------------------------------------------------------------- + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + + ! in from atmos (if .not.calc_strair) + strax , & ! wind stress components (N/m^2) + stray , & ! + + ! in from ocean + uocn , & ! ocean current, x-direction (m/s) + vocn , & ! ocean current, y-direction (m/s) + ss_tltx , & ! sea surface slope, x-direction (m/m) + ss_tlty , & ! sea surface slope, y-direction + + ! out to atmosphere + strairxT, & ! stress on ice by air, x-direction + strairyT, & ! stress on ice by air, y-direction + + ! out to ocean T-cell (kg/m s^2) + ! Note, CICE_IN_NEMO uses strocnx and strocny for coupling + strocnxT, & ! ice-ocean stress, x-direction + strocnyT ! ice-ocean stress, y-direction + + ! diagnostic + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + sig1 , & ! principal stress component + sig2 , & ! principal stress component + strairx , & ! stress on ice by air, x-direction + strairy , & ! stress on ice by air, y-direction + strocnx , & ! ice-ocean stress, x-direction + strocny , & ! ice-ocean stress, y-direction + strtltx , & ! stress due to sea surface slope, x-direction + strtlty , & ! stress due to sea surface slope, y-direction + strintx , & ! divergence of internal ice stress, x (N/m^2) + strinty , & ! divergence of internal ice stress, y (N/m^2) + daidtd , & ! ice area tendency due to transport (1/s) + dvidtd , & ! ice volume tendency due to transport (m/s) + dvsdtd , & ! snow volume tendency due to transport (m/s) + dagedtd , & ! ice age tendency due to transport (s/s) + dardg1dt, & ! rate of area loss by ridging ice (1/s) + dardg2dt, & ! rate of area gain by new ridges (1/s) + dvirdgdt, & ! rate of ice volume ridged (m/s) + opening ! rate of opening due to divergence/shear (1/s) + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ncat,max_blocks), public :: & + ! ridging diagnostics in categories + dardg1ndt, & ! rate of area loss by ridging ice (1/s) + dardg2ndt, & ! rate of area gain by new ridges (1/s) + dvirdgndt, & ! rate of ice volume ridged (m/s) + aparticn, & ! participation function + krdgn, & ! mean ridge thickness/thickness of ridging ice + ardgn, & ! fractional area of ridged ice + vrdgn, & ! volume of ridged ice + araftn, & ! rafting ice area + vraftn, & ! rafting ice volume + aredistn, & ! redistribution function: fraction of new ridge area + vredistn ! redistribution function: fraction of new ridge volume + + ! restart + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + ! ice stress tensor in each corner of T cell (kg/s^2) + stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 + stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 + stress12_1,stress12_2,stress12_3,stress12_4 ! sigma12 + + logical (kind=log_kind), & + dimension (nx_block,ny_block,max_blocks), public :: & + iceumask ! ice extent mask (U-cell) + + ! internal + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + prs_sig , & ! replacement pressure, for stress calc + fm ! Coriolis param. * mass in U-cell (kg/s) + + !----------------------------------------------------------------- + ! Thermodynamic component + !----------------------------------------------------------------- + + ! in from atmosphere (if calc_Tsfc) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + zlvl , & ! atm level height (m) + uatm , & ! wind velocity components (m/s) + vatm , & + wind , & ! wind speed (m/s) + potT , & ! air potential temperature (K) + Tair , & ! air temperature (K) + Qa , & ! specific humidity (kg/kg) + rhoa , & ! air density (kg/m^3) + swvdr , & ! sw down, visible, direct (W/m^2) + swvdf , & ! sw down, visible, diffuse (W/m^2) + swidr , & ! sw down, near IR, direct (W/m^2) + swidf , & ! sw down, near IR, diffuse (W/m^2) + flw ! incoming longwave radiation (W/m^2) + + ! in from atmosphere (if .not. Tsfc_calc) + ! required for coupling to HadGEM3 + ! NOTE: when in CICE_IN_NEMO mode, these are gridbox mean fields, + ! not per ice area. When in standalone mode, these are per ice area. + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ncat,max_blocks), public :: & + fsurfn_f , & ! net flux to top surface, excluding fcondtop + fcondtopn_f, & ! downward cond flux at top surface (W m-2) + fsensn_f , & ! sensible heat flux (W m-2) + flatn_f ! latent heat flux (W m-2) + + ! in from atmosphere + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + frain , & ! rainfall rate (kg/m^2 s) + fsnow ! snowfall rate (kg/m^2 s) + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,max_aero,max_blocks), public :: & + faero_atm ! aerosol deposition rate (kg/m^2 s) + + ! in from ocean + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + sss , & ! sea surface salinity (ppt) + sst , & ! sea surface temperature (C) + frzmlt , & ! freezing/melting potential (W/m^2) + frzmlt_init, & ! frzmlt used in current time step (W/m^2) + Tf , & ! freezing temperature (C) + qdp , & ! deep ocean heat flux (W/m^2), negative upward + hmix , & ! mixed layer depth (m) + daice_da ! data assimilation concentration increment rate + ! (concentration s-1)(only used in hadgem drivers) + + ! out to atmosphere (if calc_Tsfc) + ! note Tsfc is in ice_state.F + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + fsens , & ! sensible heat flux (W/m^2) + flat , & ! latent heat flux (W/m^2) + fswabs , & ! shortwave flux absorbed in ice and ocean (W/m^2) + fswint_ai, & ! SW absorbed in ice interior below surface (W/m^2) + flwout , & ! outgoing longwave radiation (W/m^2) + Tref , & ! 2m atm reference temperature (K) + Qref , & ! 2m atm reference spec humidity (kg/kg) + Uref , & ! 10m atm reference wind speed (m/s) + evap ! evaporative water flux (kg/m^2/s) + + ! albedos aggregated over categories (if calc_Tsfc) + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), public :: & + alvdr , & ! visible, direct (fraction) + alidr , & ! near-ir, direct (fraction) + alvdf , & ! visible, diffuse (fraction) + alidf , & ! near-ir, diffuse (fraction) + ! grid-box-mean versions + alvdr_ai, & ! visible, direct (fraction) + alidr_ai, & ! near-ir, direct (fraction) + alvdf_ai, & ! visible, diffuse (fraction) + alidf_ai, & ! near-ir, diffuse (fraction) + ! components for history + albice , & ! bare ice albedo + albsno , & ! snow albedo + albpnd , & ! melt pond albedo + apeff_ai ! effective pond area used for radiation calculation + + real (kind=dbl_kind), & + dimension(nx_block,ny_block,max_blocks,max_nstrm), public :: & + albcnt ! counter for zenith angle + + ! out to ocean + ! (Note CICE_IN_NEMO does not use these for coupling. + ! It uses fresh_ai,fsalt_ai,fhocn_ai and fswthru_ai) + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + fpond , & ! fresh water flux to ponds (kg/m^2/s) + fresh , & ! fresh water flux to ocean (kg/m^2/s) + fsalt , & ! salt flux to ocean (kg/m^2/s) + fhocn , & ! net heat flux to ocean (W/m^2) + fswthru ! shortwave penetrating to ocean (W/m^2) + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,max_aero,max_blocks), public :: & + faero_ocn ! aerosol flux to ocean (kg/m^2/s) + + ! internal + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,max_blocks), public :: & + fswfac , & ! for history + scale_factor! scaling factor for shortwave components + + logical (kind=log_kind), public :: & + update_ocn_f, & ! if true, update fresh water and salt fluxes + l_mpond_fresh ! if true, include freshwater feedback from meltponds + ! when running in ice-ocean or coupled configuration + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat,max_blocks), public :: & + meltsn , & ! snow melt in category n (m) + melttn , & ! top melt in category n (m) + meltbn , & ! bottom melt in category n (m) + congeln , & ! congelation ice formation in category n (m) + snoicen ! snow-ice formation in category n (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat,max_blocks), public :: & + keffn_top , & ! effective thermal conductivity of the top ice layer + Tn_top ! on categories (W/m^2/K) + + ! for biogeochemistry + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat,max_blocks), public :: & + hin_old , & ! old ice thickness + dsnown ! change in snow thickness in category n (m) + + !----------------------------------------------------------------- + ! quantities passed from ocean mixed layer to atmosphere + ! (for running with CAM) + !----------------------------------------------------------------- + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + strairx_ocn , & ! stress on ocean by air, x-direction + strairy_ocn , & ! stress on ocean by air, y-direction + fsens_ocn , & ! sensible heat flux (W/m^2) + flat_ocn , & ! latent heat flux (W/m^2) + flwout_ocn , & ! outgoing longwave radiation (W/m^2) + evap_ocn , & ! evaporative water flux (kg/m^2/s) + alvdr_ocn , & ! visible, direct (fraction) + alidr_ocn , & ! near-ir, direct (fraction) + alvdf_ocn , & ! visible, diffuse (fraction) + alidf_ocn , & ! near-ir, diffuse (fraction) + Tref_ocn , & ! 2m atm reference temperature (K) + Qref_ocn ! 2m atm reference spec humidity (kg/kg) + + !----------------------------------------------------------------- + ! diagnostic + !----------------------------------------------------------------- + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + fsurf , & ! net surface heat flux (excluding fcondtop)(W/m^2) + fcondtop,&! top surface conductive flux (W/m^2) + congel, & ! basal ice growth (m/step-->cm/day) + frazil, & ! frazil ice growth (m/step-->cm/day) + snoice, & ! snow-ice formation (m/step-->cm/day) + meltt , & ! top ice melt (m/step-->cm/day) + melts , & ! snow melt (m/step-->cm/day) + meltb , & ! basal ice melt (m/step-->cm/day) + meltl , & ! lateral ice melt (m/step-->cm/day) + dsnow, & ! change in snow thickness (m/step-->cm/day) + daidtt, & ! ice area tendency thermo. (s^-1) + dvidtt, & ! ice volume tendency thermo. (m/s) + dvsdtt, & ! snow volume tendency thermo. (m/s) + dagedtt,& ! ice age tendency thermo. (s/s) + mlt_onset, &! day of year that sfc melting begins + frz_onset ! day of year that freezing begins (congel or frazil) + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ncat,max_blocks), public :: & + fsurfn, & ! category fsurf + fcondtopn,& ! category fcondtop + fsensn, & ! category sensible heat flux + flatn ! category latent heat flux + + ! As above but these remain grid box mean values i.e. they are not + ! divided by aice at end of ice_dynamics. These are used in + ! CICE_IN_NEMO for coupling and also for generating + ! ice diagnostics and history files as these are more accurate. + ! (The others suffer from problem of incorrect values at grid boxes + ! that change from an ice free state to an icy state.) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + fresh_ai, & ! fresh water flux to ocean (kg/m^2/s) + fsalt_ai, & ! salt flux to ocean (kg/m^2/s) + fhocn_ai, & ! net heat flux to ocean (W/m^2) + fswthru_ai ! shortwave penetrating to ocean (W/m^2) + + ! Used with data assimilation in hadgem drivers + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + fresh_da, & ! fresh water flux to ocean due to data assim (kg/m^2/s) + fsalt_da ! salt flux to ocean due to data assimilation(kg/m^2/s) + + !----------------------------------------------------------------- + ! internal + !----------------------------------------------------------------- + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + rside , & ! fraction of ice that melts laterally + fsw , & ! incoming shortwave radiation (W/m^2) + coszen , & ! cosine solar zenith angle, < 0 for sun below horizon + rdg_conv, & ! convergence term for ridging (1/s) + rdg_shear ! shear term for ridging (1/s) + + real (kind=dbl_kind), dimension(nx_block,ny_block,nilyr+1,max_blocks), public :: & + salinz ,& ! initial salinity profile (ppt) + Tmltz ! initial melting temperature (^oC) + +!======================================================================= + + contains + +!======================================================================= + +! Initialize all fluxes exchanged with flux coupler +! and some data-derived fields +! +! author Elizabeth C. Hunke, LANL + + subroutine init_coupler_flux + use ice_constants, only: p001,vonkar,zref,iceruf + use ice_therm_shared, only: ktherm + use ice_zbgc_shared, only: flux_bio + use ice_atmo, only: Cdn_atm + + integer (kind=int_kind) :: n + + logical (kind=log_kind), parameter :: & + l_winter = .true. , & ! winter/summer default switch + l_spring = .false. ! spring example + + real (kind=dbl_kind) :: fcondtopn_d(6), fsurfn_d(6) + + data fcondtopn_d / -50.0_dbl_kind,-17.0_dbl_kind,-12.0_dbl_kind, & + -9.0_dbl_kind, -7.0_dbl_kind, -3.0_dbl_kind / + data fsurfn_d / 0.20_dbl_kind, 0.15_dbl_kind, 0.10_dbl_kind, & + 0.05_dbl_kind, 0.01_dbl_kind, 0.01_dbl_kind / + + !----------------------------------------------------------------- + ! fluxes received from atmosphere + !----------------------------------------------------------------- + zlvl (:,:,:) = c10 ! atm level height (m) + rhoa (:,:,:) = 1.3_dbl_kind ! air density (kg/m^3) + uatm (:,:,:) = c5 ! wind velocity (m/s) + vatm (:,:,:) = c5 + strax (:,:,:) = 0.05_dbl_kind + stray (:,:,:) = 0.05_dbl_kind + fsnow (:,:,:) = c0 ! snowfall rate (kg/m2/s) + ! fsnow must be 0 for exact restarts + if (l_spring) then + !typical spring values + potT (:,:,:) = 263.15_dbl_kind ! air potential temp (K) + Tair (:,:,:) = 263.15_dbl_kind ! air temperature (K) + Qa (:,:,:) = 0.001_dbl_kind ! specific humidity (kg/kg) + swvdr (:,:,:) = 25._dbl_kind ! shortwave radiation (W/m^2) + swvdf (:,:,:) = 25._dbl_kind ! shortwave radiation (W/m^2) + swidr (:,:,:) = 25._dbl_kind ! shortwave radiation (W/m^2) + swidf (:,:,:) = 25._dbl_kind ! shortwave radiation (W/m^2) + flw (:,:,:) = 230.0_dbl_kind ! incoming longwave rad (W/m^2) + do n = 1, ncat ! surface heat flux (W/m^2) + fsurfn_f(:,:,n,:) = fsurfn_d(n) + enddo + fcondtopn_f(:,:,:,:) = 0.0_dbl_kind ! conductive heat flux (W/m^2) + flatn_f(:,:,:,:) = -1.0_dbl_kind ! latent heat flux (W/m^2) + fsensn_f(:,:,:,:) = c0 ! sensible heat flux (W/m^2) + elseif (l_winter) then + !typical winter values + potT (:,:,:) = 253.0_dbl_kind ! air potential temp (K) + Tair (:,:,:) = 253.0_dbl_kind ! air temperature (K) + Qa (:,:,:) = 0.0006_dbl_kind ! specific humidity (kg/kg) + swvdr (:,:,:) = c0 ! shortwave radiation (W/m^2) + swvdf (:,:,:) = c0 ! shortwave radiation (W/m^2) + swidr (:,:,:) = c0 ! shortwave radiation (W/m^2) + swidf (:,:,:) = c0 ! shortwave radiation (W/m^2) + flw (:,:,:) = c180 ! incoming longwave rad (W/m^2) + frain (:,:,:) = c0 ! rainfall rate (kg/m2/s) + do n = 1, ncat ! conductive heat flux (W/m^2) + fcondtopn_f(:,:,n,:) = fcondtopn_d(n) + enddo + fsurfn_f = fcondtopn_f ! surface heat flux (W/m^2) + flatn_f(:,:,:,:) = c0 ! latent heat flux (kg/m2/s) + fsensn_f(:,:,:,:) = c0 ! sensible heat flux (W/m^2) + else + !typical summer values + potT (:,:,:) = 273.0_dbl_kind ! air potential temp (K) + Tair (:,:,:) = 273.0_dbl_kind ! air temperature (K) + Qa (:,:,:) = 0.0035_dbl_kind ! specific humidity (kg/kg) + swvdr (:,:,:) = 50._dbl_kind ! shortwave radiation (W/m^2) + swvdf (:,:,:) = 50._dbl_kind ! shortwave radiation (W/m^2) + swidr (:,:,:) = 50._dbl_kind ! shortwave radiation (W/m^2) + swidf (:,:,:) = 50._dbl_kind ! shortwave radiation (W/m^2) + flw (:,:,:) = 280.0_dbl_kind ! incoming longwave rad (W/m^2) + frain (:,:,:) = c0 ! rainfall rate (kg/m2/s) + do n = 1, ncat ! surface heat flux (W/m^2) + fsurfn_f(:,:,n,:) = fsurfn_d(n) + enddo + fcondtopn_f(:,:,:,:) = 0.0_dbl_kind ! conductive heat flux (W/m^2) + flatn_f(:,:,:,:) = -2.0_dbl_kind ! latent heat flux (W/m^2) + fsensn_f(:,:,:,:) = c0 ! sensible heat flux (W/m^2) + endif ! l_winter + + faero_atm (:,:,:,:) = c0 ! aerosol deposition rate (kg/m2/s) + + !----------------------------------------------------------------- + ! fluxes received from ocean + !----------------------------------------------------------------- + + ss_tltx(:,:,:)= c0 ! sea surface tilt (m/m) + ss_tlty(:,:,:)= c0 + uocn (:,:,:) = c0 ! surface ocean currents (m/s) + vocn (:,:,:) = c0 + frzmlt(:,:,:) = c0 ! freezing/melting potential (W/m^2) + sss (:,:,:) = 34.0_dbl_kind ! sea surface salinity (ppt) +!ars599: 04042016: should we change to AusCOM or ACCESS? +#ifndef CICE_IN_NEMO + if (ktherm == 2) then ! freezing temp (C) + ! liquidus_temperature_mush(sss) + Tf (:,:,:) = sss(:,:,:) / (-18.48_dbl_kind & + + ((18.48_dbl_kind*p001)*sss(:,:,:))) + else + Tf (:,:,:) = -depressT*sss(:,:,:) + endif + sst (:,:,:) = Tf(:,:,:) ! sea surface temp (C) +#endif + qdp (:,:,:) = c0 ! deep ocean heat flux (W/m^2) + hmix (:,:,:) = c20 ! ocean mixed layer depth + daice_da(:,:,:) = c0 ! data assimilation increment rate + + !----------------------------------------------------------------- + ! fluxes sent to atmosphere + !----------------------------------------------------------------- + + strairxT(:,:,:) = c0 ! wind stress, T grid + strairyT(:,:,:) = c0 + + fsens (:,:,:) = c0 + flat (:,:,:) = c0 + fswabs (:,:,:) = c0 + flwout (:,:,:) = -stefan_boltzmann*Tffresh**4 + ! in case atm model diagnoses Tsfc from flwout + evap (:,:,:) = c0 + Tref (:,:,:) = c0 + Qref (:,:,:) = c0 + Uref (:,:,:) = c0 + alvdr (:,:,:) = c0 + alidr (:,:,:) = c0 + alvdf (:,:,:) = c0 + alidf (:,:,:) = c0 + keffn_top(:,:,:,:) = c0 + Tn_top (:,:,:,:) = c0 + + !----------------------------------------------------------------- + ! fluxes sent to ocean + !----------------------------------------------------------------- + + strocnxT(:,:,:) = c0 ! ice-ocean stress, x-direction (T-cell) + strocnyT(:,:,:) = c0 ! ice-ocean stress, y-direction (T-cell) + fresh (:,:,:) = c0 + fsalt (:,:,:) = c0 + fhocn (:,:,:) = c0 + fswthru (:,:,:) = c0 + fresh_da(:,:,:) = c0 ! data assimilation + fsalt_da(:,:,:) = c0 + flux_bio (:,:,:,:) = c0 ! bgc + + !----------------------------------------------------------------- + ! derived or computed fields + !----------------------------------------------------------------- + + fsw (:,:,:) = c0 ! shortwave radiation (W/m^2) + scale_factor(:,:,:) = c1 ! shortwave scaling factor + wind (:,:,:) = sqrt(uatm(:,:,:)**2 & + + vatm(:,:,:)**2) ! wind speed, (m/s) + Cdn_atm(:,:,:) = (vonkar/log(zref/iceruf)) & + * (vonkar/log(zref/iceruf)) ! atmo drag for RASM + + end subroutine init_coupler_flux + +!======================================================================= + +! Initialize some fluxes sent to coupler for use by the atm model +! +! author: Elizabeth C. Hunke, LANL + + subroutine init_flux_atm + + !----------------------------------------------------------------- + ! initialize albedo and fluxes + !----------------------------------------------------------------- + + strairxT(:,:,:) = c0 ! wind stress, T grid + strairyT(:,:,:) = c0 + ! for rectangular grid tests without thermo + ! strairxT(:,:,:) = 0.15_dbl_kind + ! strairyT(:,:,:) = 0.15_dbl_kind + + fsens (:,:,:) = c0 + flat (:,:,:) = c0 + fswabs (:,:,:) = c0 + flwout (:,:,:) = c0 + evap (:,:,:) = c0 + Tref (:,:,:) = c0 + Qref (:,:,:) = c0 + Uref (:,:,:) = c0 + + end subroutine init_flux_atm + +!======================================================================= +! Initialize some fluxes sent to coupler for use by the ocean model +! +! NOTE: These fluxes should be initialized immediately after the +! call to the coupler. The atmospheric fluxes can be initialized +! at the beginning of the following time step because they are +! not modified by any subroutines between the call to_coupler +! and the end of the time step. +! +! author: Elizabeth C. Hunke, LANL + + subroutine init_flux_ocn + + use ice_zbgc_shared, only: flux_bio + + !----------------------------------------------------------------- + ! fluxes sent + !----------------------------------------------------------------- + + fresh (:,:,:) = c0 + fsalt (:,:,:) = c0 + fhocn (:,:,:) = c0 + fswthru (:,:,:) = c0 + faero_ocn(:,:,:,:) = c0 + + flux_bio (:,:,:,:) = c0 ! bgc + + end subroutine init_flux_ocn + +!======================================================================= + +! Initialize thermodynamic fields written to history files. +! +! authors: William H. Lipscomb, LANL +! Elizabeth C. Hunke, LANL + + subroutine init_history_therm + use ice_atmo, only: hfreebd, hdraft, hridge, distrdg, hkeel, & + dkeel, lfloe, dfloe, Cdn_atm, Cdn_atm_rdg, & + Cdn_atm_floe, Cdn_atm_pond, Cdn_atm_skin, & + Cdn_atm_ratio, Cdn_ocn, Cdn_ocn_keel, & + Cdn_ocn_floe, Cdn_ocn_skin, formdrag + use ice_state, only: aice, vice, vsno, trcr, tr_iage, nt_iage + use ice_constants, only: vonkar,zref,iceruf + + fsurf (:,:,:) = c0 + fcondtop(:,:,:)= c0 + congel (:,:,:) = c0 + frazil (:,:,:) = c0 + snoice (:,:,:) = c0 + dsnow (:,:,:) = c0 + meltt (:,:,:) = c0 + melts (:,:,:) = c0 + meltb (:,:,:) = c0 + meltl (:,:,:) = c0 + daidtt (:,:,:) = aice(:,:,:) ! temporary initial area + dvidtt (:,:,:) = vice(:,:,:) ! temporary initial volume + dvsdtt (:,:,:) = vsno(:,:,:) ! temporary initial volume + if (tr_iage) then + dagedtt(:,:,:) = trcr(:,:,nt_iage,:) ! temporary initial age + else + dagedtt(:,:,:) = c0 + endif + fsurfn (:,:,:,:) = c0 + fcondtopn (:,:,:,:) = c0 + flatn (:,:,:,:) = c0 + fsensn (:,:,:,:) = c0 + fpond (:,:,:) = c0 + fresh_ai (:,:,:) = c0 + fsalt_ai (:,:,:) = c0 + fhocn_ai (:,:,:) = c0 + fswthru_ai(:,:,:) = c0 + albice (:,:,:) = c0 + albsno (:,:,:) = c0 + albpnd (:,:,:) = c0 + + ! drag coefficients are computed prior to the atmo_boundary call, + ! during the thermodynamics section + Cdn_ocn(:,:,:) = dragio + Cdn_atm(:,:,:) = (vonkar/log(zref/iceruf)) & + * (vonkar/log(zref/iceruf)) ! atmo drag for RASM + + if (formdrag) then + Cdn_atm_rdg (:,:,:) = c0 + Cdn_atm_ratio(:,:,:)= c0 + Cdn_atm_floe(:,:,:) = c0 + Cdn_atm_pond(:,:,:) = c0 + Cdn_atm_skin(:,:,:) = c0 + Cdn_ocn_skin(:,:,:) = c0 + Cdn_ocn_keel(:,:,:) = c0 + Cdn_ocn_floe(:,:,:) = c0 + hfreebd (:,:,:) = c0 + hdraft (:,:,:) = c0 + hridge (:,:,:) = c0 + distrdg (:,:,:) = c0 + hkeel (:,:,:) = c0 + dkeel (:,:,:) = c0 + lfloe (:,:,:) = c0 + dfloe (:,:,:) = c0 + endif + + end subroutine init_history_therm + +!======================================================================= + +! Initialize dynamic fields written to history files. +! +! authors: William H. Lipscomb, LANL +! Elizabeth C. Hunke, LANL + + subroutine init_history_dyn + + use ice_state, only: aice, vice, vsno, trcr, tr_iage, nt_iage + + sig1 (:,:,:) = c0 + sig2 (:,:,:) = c0 + strocnx (:,:,:) = c0 + strocny (:,:,:) = c0 + strairx (:,:,:) = c0 + strairy (:,:,:) = c0 + strtltx (:,:,:) = c0 + strtlty (:,:,:) = c0 + strintx (:,:,:) = c0 + strinty (:,:,:) = c0 + dardg1dt(:,:,:) = c0 + dardg2dt(:,:,:) = c0 + dvirdgdt(:,:,:) = c0 + opening (:,:,:) = c0 + daidtd (:,:,:) = aice(:,:,:) ! temporary initial area + dvidtd (:,:,:) = vice(:,:,:) ! temporary initial volume + dvsdtd (:,:,:) = vsno(:,:,:) ! temporary initial volume + if (tr_iage) & + dagedtd (:,:,:) = trcr(:,:,nt_iage,:) ! temporary initial age + fm (:,:,:) = c0 + prs_sig (:,:,:) = c0 + ardgn (:,:,:,:) = c0 + vrdgn (:,:,:,:) = c0 + krdgn (:,:,:,:) = c1 + aparticn(:,:,:,:) = c0 + aredistn(:,:,:,:) = c0 + vredistn(:,:,:,:) = c0 + dardg1ndt(:,:,:,:) = c0 + dardg2ndt(:,:,:,:) = c0 + dvirdgndt(:,:,:,:) = c0 + + end subroutine init_history_dyn + +!======================================================================= + +! Aggregate flux information from all ice thickness categories +! +! author: Elizabeth C. Hunke and William H. Lipscomb, LANL + + subroutine merge_fluxes (nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen, & + flw, coszn, & + strairxn, strairyn, & + Cdn_atm_ratio_n, & + fsurfn, fcondtopn, & + fsensn, flatn, & + fswabsn, flwoutn, & + evapn, & + Trefn, Qrefn, & + freshn, fsaltn, & + fhocnn, fswthrun, & + strairxT, strairyT, & + Cdn_atm_ratio, & + fsurf, fcondtop, & + fsens, flat, & + fswabs, flwout, & + evap, & + Tref, Qref, & + fresh, fsalt, & + fhocn, fswthru, & + melttn, meltsn, meltbn, congeln, snoicen, & + meltt, melts, & + meltb, & + congel, snoice, & + Uref, Urefn ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + ! single category fluxes + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in):: & + aicen , & ! concentration of ice + flw , & ! downward longwave flux (W/m**2) + coszn , & ! cosine of solar zenith angle + strairxn, & ! air/ice zonal strss, (N/m**2) + strairyn, & ! air/ice merdnl strss, (N/m**2) + Cdn_atm_ratio_n, & ! ratio of total drag over neutral drag (atm) + fsurfn , & ! net heat flux to top surface (W/m**2) + fcondtopn,& ! downward cond flux at top sfc (W/m**2) + fsensn , & ! sensible heat flx (W/m**2) + flatn , & ! latent heat flx (W/m**2) + fswabsn , & ! shortwave absorbed heat flx (W/m**2) + flwoutn , & ! upwd lw emitted heat flx (W/m**2) + evapn , & ! evaporation (kg/m2/s) + Trefn , & ! air tmp reference level (K) + Qrefn , & ! air sp hum reference level (kg/kg) + freshn , & ! fresh water flux to ocean (kg/m2/s) + fsaltn , & ! salt flux to ocean (kg/m2/s) + fhocnn , & ! actual ocn/ice heat flx (W/m**2) + fswthrun, & ! sw radiation through ice bot (W/m**2) + melttn , & ! top ice melt (m) + meltbn , & ! bottom ice melt (m) + meltsn , & ! snow melt (m) + congeln , & ! congelation ice growth (m) + snoicen ! snow-ice growth (m) + + real (kind=dbl_kind), dimension(nx_block,ny_block), optional, intent(in):: & + Urefn ! air speed reference level (m/s) + + ! cumulative fluxes + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout):: & + strairxT, & ! air/ice zonal strss, (N/m**2) + strairyT, & ! air/ice merdnl strss, (N/m**2) + Cdn_atm_ratio, & ! ratio of total drag over neutral drag (atm) + fsurf , & ! net heat flux to top surface (W/m**2) + fcondtop, & ! downward cond flux at top sfc (W/m**2) + fsens , & ! sensible heat flx (W/m**2) + flat , & ! latent heat flx (W/m**2) + fswabs , & ! shortwave absorbed heat flx (W/m**2) + flwout , & ! upwd lw emitted heat flx (W/m**2) + evap , & ! evaporation (kg/m2/s) + Tref , & ! air tmp reference level (K) + Qref , & ! air sp hum reference level (kg/kg) + fresh , & ! fresh water flux to ocean (kg/m2/s) + fsalt , & ! salt flux to ocean (kg/m2/s) + fhocn , & ! actual ocn/ice heat flx (W/m**2) + fswthru , & ! sw radiation through ice bot (W/m**2) + meltt , & ! top ice melt (m) + meltb , & ! bottom ice melt (m) + melts , & ! snow melt (m) + congel , & ! congelation ice growth (m) + snoice ! snow-ice growth (m) + + real (kind=dbl_kind), dimension(nx_block,ny_block), optional, & + intent(inout):: & + Uref ! air speed reference level (m/s) + + integer (kind=int_kind) :: & + ij, i, j ! horizontal indices + + !----------------------------------------------------------------- + ! Merge fluxes + ! NOTE: The albedo is aggregated only in cells where ice exists + ! and (for the delta-Eddington scheme) where the sun is above + ! the horizon. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! atmo fluxes + + strairxT (i,j) = strairxT(i,j) + strairxn(i,j)*aicen(i,j) + strairyT (i,j) = strairyT(i,j) + strairyn(i,j)*aicen(i,j) + Cdn_atm_ratio (i,j) = Cdn_atm_ratio (i,j) + & + Cdn_atm_ratio_n (i,j)*aicen(i,j) + fsurf (i,j) = fsurf (i,j) + fsurfn (i,j)*aicen(i,j) + fcondtop (i,j) = fcondtop(i,j) + fcondtopn(i,j)*aicen(i,j) + fsens (i,j) = fsens (i,j) + fsensn (i,j)*aicen(i,j) + flat (i,j) = flat (i,j) + flatn (i,j)*aicen(i,j) + fswabs (i,j) = fswabs (i,j) + fswabsn (i,j)*aicen(i,j) + flwout (i,j) = flwout (i,j) & + + (flwoutn(i,j) - (c1-emissivity)*flw(i,j))*aicen(i,j) + evap (i,j) = evap (i,j) + evapn (i,j)*aicen(i,j) + Tref (i,j) = Tref (i,j) + Trefn (i,j)*aicen(i,j) + Qref (i,j) = Qref (i,j) + Qrefn (i,j)*aicen(i,j) + if (present(Urefn) .and. present(Uref)) then + Uref (i,j) = Uref (i,j) + Urefn (i,j)*aicen(i,j) + endif + + ! ocean fluxes + + fresh (i,j) = fresh (i,j) + freshn (i,j)*aicen(i,j) + fsalt (i,j) = fsalt (i,j) + fsaltn (i,j)*aicen(i,j) + fhocn (i,j) = fhocn (i,j) + fhocnn (i,j)*aicen(i,j) + fswthru (i,j) = fswthru (i,j) + fswthrun(i,j)*aicen(i,j) + + ! ice/snow thickness + + meltt (i,j) = meltt (i,j) + melttn (i,j)*aicen(i,j) + meltb (i,j) = meltb (i,j) + meltbn (i,j)*aicen(i,j) + melts (i,j) = melts (i,j) + meltsn (i,j)*aicen(i,j) + congel (i,j) = congel (i,j) + congeln (i,j)*aicen(i,j) + snoice (i,j) = snoice (i,j) + snoicen (i,j)*aicen(i,j) + + enddo ! ij + + end subroutine merge_fluxes + +!======================================================================= + +! Divide ice fluxes by ice area before sending them to the +! coupler, since the coupler multiplies by ice area. +! +! authors: C.M.Bitz, William H. Lipscomb + + subroutine scale_fluxes (nx_block, ny_block, & + tmask, nbtrcr, & + aice, Tf, & + Tair, Qa, & + strairxT, strairyT, & + fsens, flat, & + fswabs, flwout, & + evap, & + Tref, Qref, & + fresh, fsalt, & + fhocn, fswthru, & + faero_ocn, & + alvdr, alidr, & + alvdf, alidf, & + flux_bio, & + fsurf, fcondtop, & + Uref, wind ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + nbtrcr ! number of biology tracers + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + tmask ! land/boundary mask, thickness (T-cell) + + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in):: & + aice , & ! fractional ice area + Tf , & ! freezing temperature (C) + Tair , & ! surface air temperature (K) + Qa ! sfc air specific humidity (kg/kg) + + real (kind=dbl_kind), dimension(nx_block,ny_block), optional, & + intent(in):: & + wind ! wind speed (m/s) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout):: & + strairxT, & ! air/ice zonal stress (N/m**2) + strairyT, & ! air/ice merdnl stress (N/m**2) + fsens , & ! sensible heat flx (W/m**2) + flat , & ! latent heat flx (W/m**2) + fswabs , & ! shortwave absorbed heat flx (W/m**2) + flwout , & ! upwd lw emitted heat flx (W/m**2) + evap , & ! evaporation (kg/m2/s) + Tref , & ! air tmp reference level (K) + Qref , & ! air sp hum reference level (kg/kg) + fresh , & ! fresh water flux to ocean (kg/m2/s) + fsalt , & ! salt flux to ocean (kg/m2/s) + fhocn , & ! actual ocn/ice heat flx (W/m**2) + fswthru , & ! sw radiation through ice bot (W/m**2) + alvdr , & ! visible, direct (fraction) + alidr , & ! near-ir, direct (fraction) + alvdf , & ! visible, diffuse (fraction) + alidf ! near-ir, diffuse (fraction) + + real (kind=dbl_kind), dimension(nx_block,ny_block), optional, & + intent(inout):: & + Uref ! air speed reference level (m/s) + + real (kind=dbl_kind), dimension(nx_block,ny_block,nbtrcr), & + intent(inout):: & + flux_bio ! tracer flux to ocean from biology (mmol/m2/s) + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_aero), & + intent(inout):: & + faero_ocn ! aersol flux to ocean (kg/m2/s) + + ! For hadgem drivers. Assumes either both fields are passed or neither + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout), optional :: & + fsurf , & ! surface heat flux (W/m**2) + fcondtop ! top surface conductive flux (W/m**2) + + ! local variables + + real (kind=dbl_kind) :: ar ! 1/aice + + integer (kind=int_kind) :: & + i, j ! horizontal indices + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j) .and. aice(i,j) > c0) then + ar = c1 / aice(i,j) + strairxT(i,j) = strairxT(i,j) * ar + strairyT(i,j) = strairyT(i,j) * ar + fsens (i,j) = fsens (i,j) * ar + flat (i,j) = flat (i,j) * ar + fswabs (i,j) = fswabs (i,j) * ar + flwout (i,j) = flwout (i,j) * ar + evap (i,j) = evap (i,j) * ar + Tref (i,j) = Tref (i,j) * ar + Qref (i,j) = Qref (i,j) * ar + if (present(Uref)) then + Uref (i,j) = Uref (i,j) * ar + endif + fresh (i,j) = fresh (i,j) * ar + fsalt (i,j) = fsalt (i,j) * ar + fhocn (i,j) = fhocn (i,j) * ar + fswthru (i,j) = fswthru (i,j) * ar + alvdr (i,j) = alvdr (i,j) * ar + alidr (i,j) = alidr (i,j) * ar + alvdf (i,j) = alvdf (i,j) * ar + alidf (i,j) = alidf (i,j) * ar + flux_bio (i,j,:) = flux_bio (i,j,:) * ar + faero_ocn(i,j,:) = faero_ocn(i,j,:) * ar + else ! zero out fluxes + strairxT(i,j) = c0 + strairyT(i,j) = c0 + fsens (i,j) = c0 + flat (i,j) = c0 + fswabs (i,j) = c0 + flwout (i,j) = -stefan_boltzmann *(Tf(i,j) + Tffresh)**4 + ! to make upward longwave over ocean reasonable for history file + evap (i,j) = c0 + Tref (i,j) = Tair(i,j) + Qref (i,j) = Qa (i,j) + if (present(Uref) .and. present(wind)) then + Uref (i,j) = wind(i,j) + endif + fresh (i,j) = c0 + fsalt (i,j) = c0 + fhocn (i,j) = c0 + fswthru (i,j) = c0 + alvdr (i,j) = c0 ! zero out albedo where ice is absent + alidr (i,j) = c0 + alvdf (i,j) = c0 + alidf (i,j) = c0 + flux_bio (i,j,:) = c0 + faero_ocn(i,j,:) = c0 + endif ! tmask and aice > 0 + enddo ! i + enddo ! j + + ! Scale fluxes for history output + if (present(fsurf) .and. present(fcondtop) ) then + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j) .and. aice(i,j) > c0) then + ar = c1 / aice(i,j) + fsurf (i,j) = fsurf (i,j) * ar + fcondtop(i,j) = fcondtop(i,j) * ar + else ! zero out fluxes + fsurf (i,j) = c0 + fcondtop(i,j) = c0 + endif ! tmask and aice > 0 + enddo ! i + enddo ! j + + endif ! present(fsurf & fcondtop) + + end subroutine scale_fluxes + +!======================================================================= + + end module ice_flux + +!======================================================================= diff --git a/source/ice_forcing.F90 b/source/ice_forcing.F90 new file mode 100755 index 00000000..4c7567e4 --- /dev/null +++ b/source/ice_forcing.F90 @@ -0,0 +1,3992 @@ +! SVN:$Id: ice_forcing.F90 825 2014-08-29 15:37:09Z eclare $ +!======================================================================= +! +! Reads and interpolates forcing data for atmosphere and ocean quantities. +! +! authors: Elizabeth C. Hunke and William H. Lipscomb, LANL +! +! 2004 WHL: Block structure added +! 2005 WHL: ECMWF option added +! 2006 ECH: LY option added +! 2006 WHL: Module name changed from ice_flux_in +! 2006 ECH: Fixed bugs, rearranged routines, edited comments, etc. +! Added NCAR ocean forcing file +! Converted to free source form (F90) +! 2007: netcdf version of read_data added by Alison McLaren, Met Office +! + module ice_forcing + + use ice_kinds_mod + use ice_blocks, only: nx_block, ny_block + use ice_domain_size, only: ncat, max_blocks, nx_global, ny_global + use ice_communicate, only: my_task, master_task + use ice_calendar, only: istep, istep1, time, time_forc, year_init, & + sec, mday, month, nyr, yday, daycal, dayyr, & + daymo, days_per_year + use ice_fileunits, only: nu_diag, nu_forcing + use ice_atmo, only: calc_strair + use ice_exit, only: abort_ice + use ice_read_write, only: ice_open, ice_read, & + ice_open_nc, ice_read_nc, ice_close_nc + use ice_therm_shared, only: ktherm +#ifdef AusCOM + use cpl_parameters !, only : use_lwflxd +#endif + + implicit none + private + public :: init_forcing_atmo, init_forcing_ocn, & + get_forcing_atmo, get_forcing_ocn, & + read_clim_data, read_clim_data_nc, & +#ifdef AusCOM +!ars599: 27032014: add in get_forcing_atmo_ready + get_forcing_atmo_ready, & +#endif + interpolate_data, interp_coeff_monthly + save + + integer (kind=int_kind), public :: & + ycycle , & ! number of years in forcing cycle + fyear_init , & ! first year of data in forcing cycle + fyear , & ! current year in forcing cycle + fyear_final ! last year in cycle + + character (char_len_long) :: & ! input data file names + height_file, & + uwind_file, & + vwind_file, & + wind_file, & + strax_file, & + stray_file, & + potT_file, & + tair_file, & + humid_file, & + rhoa_file, & + fsw_file, & + flw_file, & + rain_file, & + sst_file, & + sss_file, & + pslv_file, & + sublim_file, & + snow_file + + character (char_len_long), dimension(ncat) :: & ! input data file names + topmelt_file, & + botmelt_file + + real (kind=dbl_kind) :: & + c1intp, c2intp , & ! interpolation coefficients + ftime ! forcing time (for restart) + + integer (kind=int_kind) :: & + oldrecnum = 0 ! old record number (save between steps) + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & + cldf ! cloud fraction + + real (kind=dbl_kind), dimension(nx_block,ny_block,2,max_blocks) :: & + fsw_data, & ! field values at 2 temporal data points + cldf_data, & + fsnow_data, & + Tair_data, & + uatm_data, & + vatm_data, & + wind_data, & + strax_data, & + stray_data, & + Qa_data, & + rhoa_data, & + potT_data, & + zlvl_data, & + flw_data, & + sst_data, & + sss_data, & + uocn_data, & + vocn_data, & + sublim_data, & + frain_data + + real (kind=dbl_kind), & + dimension(nx_block,ny_block,2,max_blocks,ncat) :: & + topmelt_data, & + botmelt_data + + character(char_len), public :: & + atm_data_format, & ! 'bin'=binary or 'nc'=netcdf + ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf + atm_data_type, & ! 'default', 'monthly', 'ncar', + ! 'LYq' or 'hadgem' or 'oned' + sss_data_type, & ! 'default', 'clim', 'ncar', 'oned' + sst_data_type, & ! 'default', 'clim', 'ncar', 'oned', + ! 'hadgem_sst' or 'hadgem_sst_uvocn' + precip_units ! 'mm_per_month', 'mm_per_sec', 'mks' + + character(char_len_long), public :: & + atm_data_dir , & ! top directory for atmospheric data + ocn_data_dir , & ! top directory for ocean data + oceanmixed_file ! file name for ocean forcing data + + integer (kind=int_kind), parameter :: & + nfld = 8 ! number of fields to search for in forcing file + + ! as in the dummy atm (latm) + real (kind=dbl_kind), parameter, public :: & + frcvdr = 0.28_dbl_kind, & ! frac of incoming sw in vis direct band + frcvdf = 0.24_dbl_kind, & ! frac of incoming sw in vis diffuse band + frcidr = 0.31_dbl_kind, & ! frac of incoming sw in near IR direct band + frcidf = 0.17_dbl_kind ! frac of incoming sw in near IR diffuse band + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,max_blocks,nfld,12) :: & + ocn_frc_m ! ocn data for 12 months + + logical (kind=log_kind), public :: & + restore_sst ! restore sst if true + + integer (kind=int_kind), public :: & + trestore ! restoring time scale (days) + + real (kind=dbl_kind), public :: & + trest ! restoring time scale (sec) + + logical (kind=log_kind), public :: & + dbug ! prints debugging output if true + +!======================================================================= + + contains + +!======================================================================= + + subroutine init_forcing_atmo + +! Determine the current and final year of the forcing cycle based on +! namelist input; initialize the atmospheric forcing data filenames. + + fyear = fyear_init + mod(nyr-1,ycycle) ! current year + fyear_final = fyear_init + ycycle - 1 ! last year in forcing cycle + + if (trim(atm_data_type) /= 'default' .and. & + my_task == master_task) then + write (nu_diag,*) ' Initial forcing data year = ',fyear_init + write (nu_diag,*) ' Final forcing data year = ',fyear_final + endif + + !------------------------------------------------------------------- + ! Get filenames for input forcing data + !------------------------------------------------------------------- + + ! default forcing values from init_flux_atm +#ifndef AusCOM + if (trim(atm_data_type) == 'ncar') then + call NCAR_files(fyear) +! elseif (trim(atm_data_type) == 'ecmwf') then +! call ecmwf_files(fyear) + elseif (trim(atm_data_type) == 'LYq') then + call LY_files(fyear) + elseif (trim(atm_data_type) == 'hadgem') then + call hadgem_files(fyear) + elseif (trim(atm_data_type) == 'monthly') then + call monthly_files(fyear) + elseif (trim(atm_data_type) == 'oned') then + call oned_files(fyear) + endif +#endif + + end subroutine init_forcing_atmo + +!======================================================================= + + subroutine init_forcing_ocn(dt) + +! Set sea surface salinity and freezing temperature to annual mean value +! using a 12-month climatology. +! Read sst data for current month, and adjust sst based on freezing +! temperature. No interpolation in time. + +! Note: SST is subsequently prognosed if CICE is run with a mixed layer +! ocean (oceanmixed_ice = T), and can be restored to data +! (restore_sst = T). SSS is not prognosed by CICE. + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c12, c1000, secday, depressT, & + field_loc_center, field_type_scalar + use ice_domain, only: nblocks + use ice_domain_size, only: max_blocks + use ice_flux, only: sss, sst, Tf + use ice_zbgc_shared, only: restore_bgc +#ifdef ncdf + use netcdf +#endif + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk , & ! horizontal indices + k , & ! month index + fid , & ! file id for netCDF file + nbits + + logical (kind=log_kind) :: diag + + character (char_len) :: & + fieldname ! field name in netcdf file + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + nbits = 64 ! double precision data + + if (restore_sst .or. restore_bgc) then + if (trestore == 0) then + trest = dt ! use data instantaneously + else + trest = real(trestore,kind=dbl_kind) * secday ! seconds + endif + endif + + !------------------------------------------------------------------- + ! Sea surface salinity (SSS) + ! initialize to annual climatology created from monthly data + !------------------------------------------------------------------- + + if (trim(sss_data_type) == 'clim') then + + sss_file = trim(ocn_data_dir)//'sss.mm.100x116.da' ! gx3 only + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'SSS climatology computed from:' + write (nu_diag,*) trim(sss_file) + endif + + if (my_task == master_task) & + call ice_open (nu_forcing, sss_file, nbits) + + sss(:,:,:) = c0 + + do k = 1,12 ! loop over 12 months + call ice_read (nu_forcing, k, work1, 'rda8', dbug, & + field_loc_center, field_type_scalar) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + sss(i,j,iblk) = sss(i,j,iblk) + work1(i,j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + enddo ! k + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + sss(i,j,iblk) = sss(i,j,iblk) / c12 ! annual average + sss(i,j,iblk) = max(sss(i,j,iblk),c0) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call ocn_freezing_temperature + + if (my_task == master_task) close(nu_forcing) + + endif ! sss_data_type + + !------------------------------------------------------------------- + ! Sea surface temperature (SST) + ! initialize to data for current month + !------------------------------------------------------------------- + + if (trim(sst_data_type) == 'clim') then + + if (nx_global == 320) then ! gx1 + sst_file = trim(ocn_data_dir)//'sst_clim_hurrell.dat' + else ! gx3 + sst_file = trim(ocn_data_dir)//'sst.mm.100x116.da' + endif + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'Initial SST file:', trim(sst_file) + endif + + if (my_task == master_task) & + call ice_open (nu_forcing, sst_file, nbits) + + call ice_read (nu_forcing, month, sst, 'rda8', dbug, & + field_loc_center, field_type_scalar) + + if (my_task == master_task) close(nu_forcing) + + ! Make sure sst is not less than freezing temperature Tf + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + sst(i,j,iblk) = max(sst(i,j,iblk),Tf(i,j,iblk)) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + endif ! init_sst_data + + + if (trim(sst_data_type) == 'hadgem_sst' .or. & + trim(sst_data_type) == 'hadgem_sst_uvocn') then + + diag = .true. ! write diagnostic information + + sst_file = trim (ocn_data_dir)//'MONTHLY/sst.1997.nc' + + if (my_task == master_task) then + + write (nu_diag,*) ' ' + write (nu_diag,*) 'Initial SST file:', trim(sst_file) + + call ice_open_nc(sst_file,fid) + + endif + + fieldname='sst' + call ice_read_nc(fid,month,fieldname,sst,diag) + + if (my_task == master_task) call ice_close_nc(fid) + + ! Make sure sst is not less than freezing temperature Tf + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + sst(i,j,iblk) = max(sst(i,j,iblk),Tf(i,j,iblk)) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + endif ! sst_data_type + + if (trim(sst_data_type) == 'ncar' .or. & + trim(sss_data_type) == 'ncar') then +! call ocn_data_ncar_init + call ocn_data_ncar_init_3D + endif + + end subroutine init_forcing_ocn + +!======================================================================= + + subroutine ocn_freezing_temperature + + ! Compute ocean freezing temperature Tf based on tfrz_option + ! 'minus1p8' Tf = -1.8 C (default) + ! 'linear_salt' Tf = -depressT * sss + ! 'mushy' Tf conforms with mushy layer thermo (ktherm=2) + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: depressT, c1000 + use ice_domain, only: nblocks + use ice_flux, only: sss, Tf + use ice_ocean, only: tfrz_option + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk ! horizontal indices + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (trim(tfrz_option) == 'mushy') then + Tf(i,j,iblk) = sss(i,j,iblk) / (-18.48_dbl_kind & + + ((18.48_dbl_kind/c1000) * sss(i,j,iblk))) + elseif (trim(tfrz_option) == 'linear_salt') then + Tf(i,j,iblk) = -depressT * sss(i,j,iblk) ! deg C + else + Tf(i,j,iblk) = -1.8_dbl_kind + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + + end subroutine ocn_freezing_temperature + +!======================================================================= + + subroutine get_forcing_atmo + +! Get atmospheric forcing data and interpolate as necessary + + use ice_blocks, only: block, get_block + use ice_constants, only: field_loc_center, field_type_scalar + use ice_boundary, only: ice_HaloUpdate + use ice_domain, only: nblocks, blocks_ice, halo_info + use ice_flux, only: Tair, fsw, flw, frain, fsnow, Qa, rhoa, & + uatm, vatm, strax, stray, zlvl, wind, swvdr, swvdf, swidr, swidf, & + potT, sst + use ice_state, only: aice, trcr, nt_Tsfc + use ice_grid, only: ANGLET, hm + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + integer (kind=int_kind) :: & + iblk, & ! block index + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + type (block) :: & + this_block ! block information for current block + + fyear = fyear_init + mod(nyr-1,ycycle) ! current year + if (trim(atm_data_type) /= 'default' .and. istep <= 1 & + .and. my_task == master_task) then + write (nu_diag,*) ' Current forcing data year = ',fyear + endif + + ftime = time ! forcing time + time_forc = ftime ! for restarting + + !------------------------------------------------------------------- + ! Read and interpolate atmospheric data + !------------------------------------------------------------------- + + if (trim(atm_data_type) == 'ncar') then + call ncar_data + elseif (trim(atm_data_type) == 'LYq') then + call LY_data + elseif (trim(atm_data_type) == 'hadgem') then + call hadgem_data + elseif (trim(atm_data_type) == 'monthly') then + call monthly_data + elseif (trim(atm_data_type) == 'oned') then + call oned_data + else ! default values set in init_flux + return + endif + + !------------------------------------------------------------------- + ! Convert forcing data to fields needed by ice model + !------------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + call prepare_forcing (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + hm (:,:,iblk), & + Tair (:,:,iblk), & + fsw (:,:,iblk), & + cldf (:,:,iblk), & + flw (:,:,iblk), & + frain (:,:,iblk), & + fsnow (:,:,iblk), & + Qa (:,:,iblk), & + rhoa (:,:,iblk), & + uatm (:,:,iblk), & + vatm (:,:,iblk), & + strax (:,:,iblk), & + stray (:,:,iblk), & + zlvl (:,:,iblk), & + wind (:,:,iblk), & + swvdr (:,:,iblk), & + swvdf (:,:,iblk), & + swidr (:,:,iblk), & + swidf (:,:,iblk), & + potT (:,:,iblk), & + ANGLET(:,:,iblk), & + trcr (:,:,nt_Tsfc,iblk), & + sst (:,:,iblk), & + aice (:,:,iblk) ) + + enddo ! iblk + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (swvdr, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (swvdf, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (swidr, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (swidf, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + end subroutine get_forcing_atmo + +#ifdef AusCOM +!======================================================================= +!BOP +! +! !IROUTINE: get_forcing_atmo_ready +! +! !INTERFACE: +! + subroutine get_forcing_atmo_ready +! +! !DESCRIPTION: +! +! Get atmospheric forcing data required by cice +! +! !REVISION HISTORY: +! +! authors: adapted by Siobhan for AusCOM. +! +! !USES: +! + use ice_domain + use ice_blocks + use ice_flux + use ice_state + use ice_grid, only: ANGLET, hm + + integer (kind=int_kind) :: & + iblk, & ! block index + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + type (block) :: & + this_block ! block information for current block + +! +!EOP +! + + !------------------------------------------------------------------- + ! prepare atmospheric data + !------------------------------------------------------------------- + + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + !------------------------------------------------------------------- + ! Convert forcing data to fields needed by ice model + !------------------------------------------------------------------- + + call prepare_forcing_from_oasis (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + hm (:,:,iblk), & + Tair (:,:,iblk), & + fsw (:,:,iblk), & + flw (:,:,iblk), & + frain (:,:,iblk), & + fsnow (:,:,iblk), & + Qa (:,:,iblk), & + rhoa (:,:,iblk), & + uatm (:,:,iblk), & + vatm (:,:,iblk), & + strax (:,:,iblk), & + stray (:,:,iblk), & + zlvl (:,:,iblk), & + wind (:,:,iblk), & + swvdr (:,:,iblk), & + swvdf (:,:,iblk), & + swidr (:,:,iblk), & + swidf (:,:,iblk), & + potT (:,:,iblk), & + ANGLET(:,:,iblk), & + trcr (:,:,nt_Tsfc,iblk), & + sst (:,:,iblk), & + aice (:,:,iblk) ) + + enddo ! iblk + + end subroutine get_forcing_atmo_ready + +!======================================================================= +! +!BOP +! +! !IROUTINE: prepare_forcing_from_oasis - finish manipulating forcing +! +! !INTERFACE: +! + subroutine prepare_forcing_from_oasis (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + hm, & + Tair, & + fsw, flw, & + frain, fsnow, & + Qa, rhoa, & + uatm, vatm, & + strax, stray, & + zlvl, wind, & + swvdr, swvdf, & + swidr, swidf, & + potT, ANGLET, & + Tsfc, sst, & + aice) +! +! !DESCRIPTION: +! +! !REVISION HISTORY: +! +! authors: S. Ofarell (adapted/revised from prepare_forcing) +! +! !USES: +! +! !INPUT/OUTPUT PARAMETERS: +! + +!ars599: 26032014 (CODE: const) +! since auscom will call oasis related subroutine +! which will use some constants from ice_constants +! no need to add #ifdef AusCOM cuz such subroutines +! are in #ifdef AusCOM already. + + use ice_constants, only: c0, c10 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + Tair , & ! air temperature (K) + ANGLET , & ! ANGLE converted to T-cells + Tsfc , & ! ice skin temperature + sst , & ! sea surface temperature + aice , & ! ice area fraction + hm ! land mask + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + fsw , & ! incoming shortwave radiation (W/m^2) + frain , & ! rainfall rate (kg/m^2 s) + fsnow , & ! snowfall rate (kg/m^2 s) + Qa , & ! specific humidity (kg/kg) + rhoa , & ! air density (kg/m^3) + uatm , & ! wind velocity components (m/s) + vatm , & + strax , & ! wind stress components (N/m^2) + stray , & + zlvl , & ! atm level height (m) + wind , & ! wind speed (m/s) + flw , & ! incoming longwave radiation (W/m^2) + swvdr , & ! sw down, visible, direct (W/m^2) + swvdf , & ! sw down, visible, diffuse (W/m^2) + swidr , & ! sw down, near IR, direct (W/m^2) + swidf , & ! sw down, near IR, diffuse (W/m^2) + potT ! air potential temperature (K) + + ! as in the dummy atm (latm) + real (kind=dbl_kind), parameter :: & + frcvdr = 0.28_dbl_kind, & ! frac of incoming sw in vis direct band + frcvdf = 0.24_dbl_kind, & ! frac of incoming sw in vis diffuse band + frcidr = 0.31_dbl_kind, & ! frac of incoming sw in near IR direct band + frcidf = 0.17_dbl_kind ! frac of incoming sw in near IR diffuse band +! +!EOP +! + integer (kind=int_kind) :: & + i, j, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: workx, worky, & + fcc, sstk, rtea, ptem, qlwm, & + flwd + + do j = jlo, jhi + do i = ilo, ihi + + !----------------------------------------------------------------- + ! make sure interpolated values are physically realistic + !----------------------------------------------------------------- + fsw (i,j) = max(fsw(i,j),c0) + fsnow(i,j) = max(fsnow(i,j),c0) + frain(i,j) = max(frain(i,j),c0) + Qa (i,j) = max(Qa(i,j),c0) + + enddo ! i + enddo ! j + + !----------------------------------------------------------------- + ! calculations specific to datasets ( no need! ...omitted... ) + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! Compute other fields needed by model + !----------------------------------------------------------------- + + do j = jlo, jhi + do i = ilo, ihi + + zlvl(i,j) = c10 + potT(i,j) = Tair(i,j) + + ! divide shortwave into spectral bands + swvdr(i,j) = fsw(i,j)*frcvdr ! visible direct + swvdf(i,j) = fsw(i,j)*frcvdf ! visible diffuse + swidr(i,j) = fsw(i,j)*frcidr ! near IR direct + swidf(i,j) = fsw(i,j)*frcidf ! near IR diffuse + + !----------------------------------------------------------------- + ! longwave, Rosati and Miyakoda, JPO 18, p. 1607 (1988) - sort of + !----------------------------------------------------------------- + flwd=flw(i,j) ! read in throug oasis + flw(i,j) = flwd * hm(i,j) ! land mask + + if (calc_strair) then + + wind(i,j) = sqrt(uatm(i,j)**2 + vatm(i,j)**2) + + !----------------------------------------------------------------- + ! Rotate zonal/meridional vectors to local coordinates. + ! Velocity comes in on T grid, but is oriented geographically --- + ! need to rotate to pop-grid FIRST using ANGLET + ! then interpolate to the U-cell centers (otherwise we + ! interpolate across the pole). + ! Use ANGLET which is on the T grid ! + ! Atmo variables are needed in T cell centers in subroutine + ! atmo_boundary_layer, and are interpolated to the U grid later as + ! necessary. + !----------------------------------------------------------------- + +!bi003: 20090109--set 'rotate_wind' option in case oasis sends U, V as scalars +! NOTE different namcouple is used for oasis this case! +!----------------------------------------------------------------------------- + if (rotate_winds) then + workx = uatm(i,j) ! wind velocity, m/s + worky = vatm(i,j) + uatm (i,j) = workx*cos(ANGLET(i,j)) & ! convert to POP grid + + worky*sin(ANGLET(i,j)) ! note uatm, vatm, wind + vatm (i,j) = worky*cos(ANGLET(i,j)) & ! are on the T-grid here + - workx*sin(ANGLET(i,j)) + endif + else ! strax, stray, wind are read from files + if (rotate_winds) then + workx = strax(i,j) ! wind stress + worky = stray(i,j) + strax(i,j) = workx*cos(ANGLET(i,j)) & ! convert to POP grid + + worky*sin(ANGLET(i,j)) ! note strax, stray, wind + stray(i,j) = worky*cos(ANGLET(i,j)) & ! are on the T-grid here + - workx*sin(ANGLET(i,j)) + endif + endif ! calc_strair + + enddo ! i + enddo ! j + + return + end subroutine prepare_forcing_from_oasis + +#endif + +!======================================================================= + + subroutine get_forcing_ocn (dt) + +! Read and interpolate annual climatologies of SSS and SST. +! Restore model SST to data if desired. +! Interpolate ocean fields to U grid if necessary. + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + if (trim(sst_data_type) == 'clim' .or. & + trim(sss_data_type) == 'clim') then + call ocn_data_clim(dt) + elseif (trim(sst_data_type) == 'ncar' .or. & + trim(sss_data_type) == 'ncar') then + call ocn_data_ncar(dt) + elseif (trim(sst_data_type) == 'hadgem_sst' .or. & + trim(sst_data_type) == 'hadgem_sst_uvocn') then + call ocn_data_hadgem(dt) + elseif (trim(sst_data_type) == 'oned' .or. & + trim(sss_data_type) == 'oned') then + call ocn_data_oned(dt) + endif + + end subroutine get_forcing_ocn + +!======================================================================= + + subroutine read_data (flag, recd, yr, ixm, ixx, ixp, & + maxrec, data_file, field_data, & + field_loc, field_type) + +! If data is at the beginning of a one-year record, get data from +! the previous year. +! If data is at the end of a one-year record, get data from the +! following year. +! If no earlier data exists (beginning of fyear_init), then +! (1) For monthly data, get data from the end of fyear_final. +! (2) For more frequent data, let the ixm value equal the +! first value of the year. +! If no later data exists (end of fyear_final), then +! (1) For monthly data, get data from the beginning of fyear_init. +! (2) For more frequent data, let the ixp value +! equal the last value of the year. +! In other words, we assume persistence when daily or 6-hourly +! data is missing, and we assume periodicity when monthly data +! is missing. + + use ice_diagnostics, only: check_step + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_readwrite + + logical (kind=log_kind), intent(in) :: flag + + integer (kind=int_kind), intent(in) :: & + recd , & ! baseline record number + yr , & ! year of forcing data + ixm, ixx, ixp , & ! record numbers of 3 data values + ! relative to recd + maxrec ! maximum record value + + real (kind=dbl_kind), dimension(nx_block,ny_block,2,max_blocks), & + intent(out) :: & + field_data ! 2 values needed for interpolation + + integer (kind=int_kind), intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + ! local variables + + character (char_len_long) :: & + data_file ! data file to be read + + integer (kind=int_kind) :: & + nbits , & ! = 32 for single precision, 64 for double + nrec , & ! record number to read + n2, n4 , & ! like ixm and ixp, but + ! adjusted at beginning and end of data + arg ! value of time argument in field_data + + call ice_timer_start(timer_readwrite) ! reading/writing + + nbits = 64 ! double precision data + + if (istep1 > check_step) dbug = .true. !! debugging + + if (my_task==master_task .and. (dbug)) then + write(nu_diag,*) ' ', trim(data_file) + endif + + if (flag) then + + !----------------------------------------------------------------- + ! Initialize record counters + ! (n2, n4 will change only at the very beginning or end of + ! a forcing cycle.) + !----------------------------------------------------------------- + n2 = ixm + n4 = ixp + arg = 0 + + !----------------------------------------------------------------- + ! read data + !----------------------------------------------------------------- + + if (ixm /= -99) then + ! currently in first half of data interval + if (ixx <= 1) then + if (yr > fyear_init) then ! get data from previous year + call file_year (data_file, yr-1) + else ! yr = fyear_init, no prior data exists + if (maxrec > 12) then ! extrapolate from first record + if (ixx == 1) n2 = ixx + else ! go to end of fyear_final + call file_year (data_file, fyear_final) + endif + endif ! yr > fyear_init + endif ! ixx <= 1 + + call ice_open (nu_forcing, data_file, nbits) + + arg = 1 + nrec = recd + n2 + call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & + 'rda8', dbug, field_loc, field_type) + + if (ixx==1 .and. my_task == master_task) close(nu_forcing) + endif ! ixm ne -99 + + ! always read ixx data from data file for current year + call file_year (data_file, yr) + call ice_open (nu_forcing, data_file, nbits) + + arg = arg + 1 + nrec = recd + ixx + call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & + 'rda8', dbug, field_loc, field_type) + + if (ixp /= -99) then + ! currently in latter half of data interval + if (ixx==maxrec) then + if (yr < fyear_final) then ! get data from following year + if (my_task == master_task) close(nu_forcing) + call file_year (data_file, yr+1) + call ice_open (nu_forcing, data_file, nbits) + else ! yr = fyear_final, no more data exists + if (maxrec > 12) then ! extrapolate from ixx + n4 = ixx + else ! go to beginning of fyear_init + if (my_task == master_task) close(nu_forcing) + call file_year (data_file, fyear_init) + + call ice_open (nu_forcing, data_file, nbits) + + endif + endif ! yr < fyear_final + endif ! ixx = maxrec + + arg = arg + 1 + nrec = recd + n4 + call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & + 'rda8', dbug, field_loc, field_type) + endif ! ixp /= -99 + + if (my_task == master_task) close(nu_forcing) + + endif ! flag + + call ice_timer_stop(timer_readwrite) ! reading/writing + + end subroutine read_data + +!======================================================================= + + subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & + maxrec, data_file, fieldname, field_data, & + field_loc, field_type) + +! If data is at the beginning of a one-year record, get data from +! the previous year. +! If data is at the end of a one-year record, get data from the +! following year. +! If no earlier data exists (beginning of fyear_init), then +! (1) For monthly data, get data from the end of fyear_final. +! (2) For more frequent data, let the ixm value equal the +! first value of the year. +! If no later data exists (end of fyear_final), then +! (1) For monthly data, get data from the beginning of fyear_init. +! (2) For more frequent data, let the ixp value +! equal the last value of the year. +! In other words, we assume persistence when daily or 6-hourly +! data is missing, and we assume periodicity when monthly data +! is missing. +! +! Adapted by Alison McLaren, Met Office from read_data + + use ice_constants, only: c0 + use ice_diagnostics, only: check_step + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_readwrite + + logical (kind=log_kind), intent(in) :: flag + + integer (kind=int_kind), intent(in) :: & + recd , & ! baseline record number + yr , & ! year of forcing data + ixm, ixx, ixp , & ! record numbers of 3 data values + ! relative to recd + maxrec ! maximum record value + + character (char_len_long) :: & + data_file ! data file to be read + + character (char_len), intent(in) :: & + fieldname ! field name in netCDF file + + integer (kind=int_kind), intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + real (kind=dbl_kind), dimension(nx_block,ny_block,2,max_blocks), & + intent(out) :: & + field_data ! 2 values needed for interpolation + + ! local variables + +#ifdef ncdf + integer (kind=int_kind) :: & + nrec , & ! record number to read + n2, n4 , & ! like ixm and ixp, but + ! adjusted at beginning and end of data + arg , & ! value of time argument in field_data + fid ! file id for netCDF routines + + call ice_timer_start(timer_readwrite) ! reading/writing + + if (istep1 > check_step) dbug = .true. !! debugging + + if (my_task==master_task .and. (dbug)) then + write(nu_diag,*) ' ', trim(data_file) + endif + + if (flag) then + + !----------------------------------------------------------------- + ! Initialize record counters + ! (n2, n4 will change only at the very beginning or end of + ! a forcing cycle.) + !----------------------------------------------------------------- + n2 = ixm + n4 = ixp + arg = 0 + + !----------------------------------------------------------------- + ! read data + !----------------------------------------------------------------- + + if (ixm /= -99) then + ! currently in first half of data interval + if (ixx <= 1) then + if (yr > fyear_init) then ! get data from previous year + call file_year (data_file, yr-1) + else ! yr = fyear_init, no prior data exists + if (maxrec > 12) then ! extrapolate from first record + if (ixx == 1) n2 = ixx + else ! go to end of fyear_final + call file_year (data_file, fyear_final) + endif + endif ! yr > fyear_init + endif ! ixx <= 1 + + call ice_open_nc (data_file, fid) + + arg = 1 + nrec = recd + n2 + + call ice_read_nc & + (fid, nrec, fieldname, field_data(:,:,arg,:), dbug, & + field_loc, field_type) + + if (ixx==1) call ice_close_nc(fid) + endif ! ixm ne -99 + + ! always read ixx data from data file for current year + call file_year (data_file, yr) + call ice_open_nc (data_file, fid) + + arg = arg + 1 + nrec = recd + ixx + + call ice_read_nc & + (fid, nrec, fieldname, field_data(:,:,arg,:), dbug, & + field_loc, field_type) + + if (ixp /= -99) then + ! currently in latter half of data interval + if (ixx==maxrec) then + if (yr < fyear_final) then ! get data from following year + call ice_close_nc(fid) + call file_year (data_file, yr+1) + call ice_open_nc (data_file, fid) + else ! yr = fyear_final, no more data exists + if (maxrec > 12) then ! extrapolate from ixx + n4 = ixx + else ! go to beginning of fyear_init + call ice_close_nc(fid) + call file_year (data_file, fyear_init) + call ice_open_nc (data_file, fid) + + endif + endif ! yr < fyear_final + endif ! ixx = maxrec + + arg = arg + 1 + nrec = recd + n4 + + call ice_read_nc & + (fid, nrec, fieldname, field_data(:,:,arg,:), dbug, & + field_loc, field_type) + endif ! ixp /= -99 + + call ice_close_nc(fid) + + endif ! flag + + call ice_timer_stop(timer_readwrite) ! reading/writing + +#else + field_data = c0 ! to satisfy intent(out) attribute +#endif + end subroutine read_data_nc + +!======================================================================= + + subroutine read_clim_data (readflag, recd, ixm, ixx, ixp, & + data_file, field_data, & + field_loc, field_type) + +! Read data needed for interpolation, as in read_data. +! Assume a one-year cycle of climatological data, so that there is +! no need to get data from other years or to extrapolate data beyond +! the forcing time period. + + use ice_diagnostics, only: check_step + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_readwrite + + logical (kind=log_kind),intent(in) :: readflag + + integer (kind=int_kind), intent(in) :: & + recd , & ! baseline record number + ixm,ixx,ixp ! record numbers of 3 data values + ! relative to recd + + character (char_len_long), intent(in) :: data_file + + integer (kind=int_kind), intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + real (kind=dbl_kind), dimension(nx_block,ny_block,2,max_blocks), & + intent(out) :: & + field_data ! 2 values needed for interpolation + + ! local variables + + integer (kind=int_kind) :: & + nbits , & ! = 32 for single precision, 64 for double + nrec , & ! record number to read + arg ! value of time argument in field_data + + call ice_timer_start(timer_readwrite) ! reading/writing + + nbits = 64 ! double precision data + + if (istep1 > check_step) dbug = .true. !! debugging + + if (my_task==master_task .and. (dbug)) & + write(nu_diag,*) ' ', trim(data_file) + + if (readflag) then + + !----------------------------------------------------------------- + ! read data + !----------------------------------------------------------------- + + call ice_open (nu_forcing, data_file, nbits) + + arg = 0 + if (ixm /= -99) then + arg = 1 + nrec = recd + ixm + call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & + 'rda8', dbug, field_loc, field_type) + endif + + arg = arg + 1 + nrec = recd + ixx + call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & + 'rda8', dbug, field_loc, field_type) + + if (ixp /= -99) then + arg = arg + 1 + nrec = recd + ixp + call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & + 'rda8', dbug, field_loc, field_type) + endif + + if (my_task == master_task) close (nu_forcing) + endif ! readflag + + call ice_timer_stop(timer_readwrite) ! reading/writing + + end subroutine read_clim_data + +!======================================================================= + + subroutine read_clim_data_nc (readflag, recd, ixm, ixx, ixp, & + data_file, fieldname, field_data, & + field_loc, field_type) + +! Read data needed for interpolation, as in read_data. +! Assume a one-year cycle of climatological data, so that there is +! no need to get data from other years or to extrapolate data beyond +! the forcing time period. + + use ice_diagnostics, only: check_step + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_readwrite + + logical (kind=log_kind),intent(in) :: readflag + + integer (kind=int_kind), intent(in) :: & + recd , & ! baseline record number + ixm,ixx,ixp ! record numbers of 3 data values + ! relative to recd + + character (char_len_long), intent(in) :: data_file + + character (char_len), intent(in) :: & + fieldname ! field name in netCDF file + + integer (kind=int_kind), intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + real (kind=dbl_kind), dimension(nx_block,ny_block,2,max_blocks), & + intent(out) :: & + field_data ! 2 values needed for interpolation + + ! local variables + + integer (kind=int_kind) :: & + nbits , & ! = 32 for single precision, 64 for double + nrec , & ! record number to read + arg , & ! value of time argument in field_data + fid ! file id for netCDF routines + + call ice_timer_start(timer_readwrite) ! reading/writing + + nbits = 64 ! double precision data + + if (istep1 > check_step) dbug = .true. !! debugging + + if (my_task==master_task .and. (dbug)) & + write(nu_diag,*) ' ', trim(data_file) + + if (readflag) then + + !----------------------------------------------------------------- + ! read data + !----------------------------------------------------------------- + + call ice_open_nc (data_file, fid) + + arg = 0 + if (ixm /= -99) then + arg = 1 + nrec = recd + ixm + call ice_read_nc & + (fid, nrec, fieldname, field_data(:,:,arg,:), & + dbug, field_loc, field_type) + endif + + arg = arg + 1 + nrec = recd + ixx + call ice_read_nc & + (fid, nrec, fieldname, field_data(:,:,arg,:), & + dbug, field_loc, field_type) + + if (ixp /= -99) then + arg = arg + 1 + nrec = recd + ixp + call ice_read_nc & + (fid, nrec, fieldname, field_data(:,:,arg,:), & + dbug, field_loc, field_type) + endif + + if (my_task == master_task) call ice_close_nc (fid) + endif ! readflag + + call ice_timer_stop(timer_readwrite) ! reading/writing + + end subroutine read_clim_data_nc + +!======================================================================= + + subroutine interp_coeff_monthly (recslot) + +! Compute coefficients for interpolating monthly data to current time step. + + use ice_constants, only: c1, secday + + integer (kind=int_kind), intent(in) :: & + recslot ! slot (1 or 2) for current record + + ! local variables + + real (kind=dbl_kind) :: & + tt , & ! seconds elapsed in current year + t1, t2 ! seconds elapsed at month midpoint + + real (kind=dbl_kind) :: & + daymid(0:13) ! month mid-points + + daymid(1:13) = 14._dbl_kind ! time frame ends 0 sec into day 15 + daymid(0) = 14._dbl_kind - daymo(12) ! Dec 15, 0 sec + + ! make time cyclic + tt = mod(ftime/secday,dayyr) + + ! Find neighboring times + + if (recslot==2) then ! first half of month + t2 = daycal(month) + daymid(month) ! midpoint, current month + if (month == 1) then + t1 = daymid(0) ! Dec 15 (0 sec) + else + t1 = daycal(month-1) + daymid(month-1) ! midpoint, previous month + endif + else ! second half of month + t1 = daycal(month) + daymid(month) ! midpoint, current month + t2 = daycal(month+1) + daymid(month+1)! day 15 of next month (0 sec) + endif + + ! Compute coefficients + c1intp = (t2 - tt) / (t2 - t1) + c2intp = c1 - c1intp + + end subroutine interp_coeff_monthly + +!======================================================================= + + subroutine interp_coeff (recnum, recslot, secint, dataloc) + +! Compute coefficients for interpolating data to current time step. +! Works for any data interval that divides evenly into a +! year (daily, 6-hourly, etc.) +! Use interp_coef_monthly for monthly data. + + use ice_constants, only: c1, p5, secday + + integer (kind=int_kind), intent(in) :: & + recnum , & ! record number for current data value + recslot , & ! spline slot for current record + dataloc ! = 1 for data located in middle of time interval + ! = 2 for date located at end of time interval + + real (kind=dbl_kind), intent(in) :: & + secint ! seconds in data interval + + ! local variables + + real (kind=dbl_kind) :: & + secyr ! seconds in a year + + real (kind=dbl_kind) :: & + tt , & ! seconds elapsed in current year + t1, t2 , & ! seconds elapsed at data points + rcnum ! recnum => dbl_kind + + secyr = dayyr * secday ! seconds in a year + tt = mod(ftime,secyr) + + ! Find neighboring times + rcnum = real(recnum,kind=dbl_kind) + if (recslot==2) then ! current record goes in slot 2 + if (dataloc==1) then ! data located at middle of interval + t2 = (rcnum-p5)*secint + else ! data located at end of interval + t2 = rcnum*secint + endif + t1 = t2 - secint ! - 1 interval + else ! recslot = 1 + if (dataloc==1) then ! data located at middle of interval + t1 = (rcnum-p5)*secint + else + t1 = rcnum*secint ! data located at end of interval + endif + t2 = t1 + secint ! + 1 interval + endif + + ! Compute coefficients + c1intp = abs((t2 - tt) / (t2 - t1)) + c2intp = c1 - c1intp + + end subroutine interp_coeff + +!======================================================================= + + subroutine interpolate_data (field_data, field) + +! Linear interpolation + +! author: Elizabeth C. Hunke, LANL + + use ice_domain, only: nblocks + + real (kind=dbl_kind), dimension(nx_block,ny_block,2,max_blocks), & + intent(in) :: & + field_data ! 2 values used for interpolation + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + intent(out) :: & + field ! interpolated field + + ! local variables + + integer (kind=int_kind) :: i,j, iblk + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + field(i,j,iblk) = c1intp * field_data(i,j,1,iblk) & + + c2intp * field_data(i,j,2,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + end subroutine interpolate_data + +!======================================================================= + + subroutine file_year (data_file, yr) + +! Construct the correct name of the atmospheric data file +! to be read, given the year and assuming the naming convention +! that filenames end with 'yyyy.dat' or 'yyyy.r' or 'yyyy.nc'. + + character (char_len_long), intent(inout) :: data_file + + integer (kind=int_kind), intent(in) :: yr + + character (char_len_long) :: tmpname + + integer (kind=int_kind) :: i + + if (trim(atm_data_type) == 'hadgem') then ! netcdf + i = index(data_file,'.nc') - 5 + tmpname = data_file + write(data_file,'(a,i4.4,a)') tmpname(1:i), yr, '.nc' + else ! LANL/NCAR naming convention + i = index(data_file,'.dat') - 5 + tmpname = data_file + write(data_file,'(a,i4.4,a)') tmpname(1:i), yr, '.dat' + endif + + end subroutine file_year + +!======================================================================= + + subroutine prepare_forcing (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + hm, & + Tair, fsw, & + cldf, flw, & + frain, fsnow, & + Qa, rhoa, & + uatm, vatm, & + strax, stray, & + zlvl, wind, & + swvdr, swvdf, & + swidr, swidf, & + potT, ANGLET, & + Tsfc, sst, & + aice) + + use ice_constants, only: c0, c1, c10, c12, c4, & + secday, Tffresh, stefan_boltzmann, & + emissivity, qqqocn, TTTocn + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + Tair , & ! air temperature (K) + ANGLET , & ! ANGLE converted to T-cells + Tsfc , & ! ice skin temperature + sst , & ! sea surface temperature + aice , & ! ice area fraction + hm ! land mask + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + fsw , & ! incoming shortwave radiation (W/m^2) + cldf , & ! cloud fraction + frain , & ! rainfall rate (kg/m^2 s) + fsnow , & ! snowfall rate (kg/m^2 s) + Qa , & ! specific humidity (kg/kg) + rhoa , & ! air density (kg/m^3) + uatm , & ! wind velocity components (m/s) + vatm , & + strax , & ! wind stress components (N/m^2) + stray , & + zlvl , & ! atm level height (m) + wind , & ! wind speed (m/s) + flw , & ! incoming longwave radiation (W/m^2) + swvdr , & ! sw down, visible, direct (W/m^2) + swvdf , & ! sw down, visible, diffuse (W/m^2) + swidr , & ! sw down, near IR, direct (W/m^2) + swidf , & ! sw down, near IR, diffuse (W/m^2) + potT ! air potential temperature (K) + + ! local variables + + integer (kind=int_kind) :: & + i, j + + real (kind=dbl_kind) :: workx, worky, & + precip_factor, zlvl0 + + do j = jlo, jhi + do i = ilo, ihi + + !----------------------------------------------------------------- + ! make sure interpolated values are physically realistic + !----------------------------------------------------------------- + cldf (i,j) = max(min(cldf(i,j),c1),c0) + fsw (i,j) = max(fsw(i,j),c0) + fsnow(i,j) = max(fsnow(i,j),c0) + rhoa (i,j) = max(rhoa(i,j),c0) + Qa (i,j) = max(Qa(i,j),c0) + + enddo ! i + enddo ! j + + !----------------------------------------------------------------- + ! calculations specific to datasets + !----------------------------------------------------------------- + + if (trim(atm_data_type) == 'ncar') then + + ! precip is in mm/month + + zlvl0 = c10 + + do j = jlo, jhi + do i = ilo, ihi + ! correct known biases in NCAR data (as in CCSM latm) + Qa (i,j) = Qa (i,j) * 0.94_dbl_kind + fsw(i,j) = fsw(i,j) * 0.92_dbl_kind + + ! downward longwave as in Parkinson and Washington (1979) + call longwave_parkinson_washington(Tair(i,j), cldf(i,j), & + flw(i,j)) + enddo + enddo + + elseif (trim(atm_data_type) == 'LYq') then + + ! precip is in mm/s + + zlvl0 = c10 + + do j = jlo, jhi + do i = ilo, ihi + ! longwave based on Rosati and Miyakoda, JPO 18, p. 1607 (1988) + call longwave_rosati_miyakoda(cldf(i,j), Tsfc(i,j), & + aice(i,j), sst(i,j), & + Qa(i,j), Tair(i,j), & + hm(i,j), flw(i,j)) + enddo + enddo + + elseif (trim(atm_data_type) == 'oned') then ! rectangular grid + + ! precip is in kg/m^2/s + + zlvl0 = c10 + + do j = jlo, jhi + do i = ilo, ihi + + !----------------------------------------------------------------- + ! compute downward longwave as in Parkinson and Washington (1979) + !----------------------------------------------------------------- + + ! downward longwave as in Parkinson and Washington (1979) + call longwave_parkinson_washington(Tair(i,j), cldf(i,j), & + flw(i,j)) + + ! longwave based on Rosati and Miyakoda, JPO 18, p. 1607 (1988) +! call longwave_rosati_miyakoda(cldf(i,j), Tsfc(i,j), & +! aice(i,j), sst(i,j), & +! Qa(i,j), Tair(i,j), & +! hm(i,j), flw(i,j)) + enddo + enddo + + endif ! atm_data_type + + !----------------------------------------------------------------- + ! Compute other fields needed by model + !----------------------------------------------------------------- + + ! convert precipitation units to kg/m^2 s + if (trim(precip_units) == 'mm_per_month') then + precip_factor = c12/(secday*days_per_year) + elseif (trim(precip_units) == 'mm_per_day') then + precip_factor = c1/secday + elseif (trim(precip_units) == 'mm_per_sec' .or. & + trim(precip_units) == 'mks') then + precip_factor = c1 ! mm/sec = kg/m^2 s + endif + + do j = jlo, jhi + do i = ilo, ihi + + zlvl(i,j) = zlvl0 + potT(i,j) = Tair(i,j) + + ! divide shortwave into spectral bands + swvdr(i,j) = fsw(i,j)*frcvdr ! visible direct + swvdf(i,j) = fsw(i,j)*frcvdf ! visible diffuse + swidr(i,j) = fsw(i,j)*frcidr ! near IR direct + swidf(i,j) = fsw(i,j)*frcidf ! near IR diffuse + + ! convert precipitation units to kg/m^2 s + fsnow(i,j) = fsnow(i,j) * precip_factor + enddo ! i + enddo ! j + + ! determine whether precip is rain or snow + ! HadGEM forcing provides separate snowfall and rainfall rather + ! than total precipitation + if (trim(atm_data_type) /= 'hadgem') then + + do j = jlo, jhi + do i = ilo, ihi + frain(i,j) = c0 + if (Tair(i,j) >= Tffresh) then + frain(i,j) = fsnow(i,j) + fsnow(i,j) = c0 + endif + enddo ! i + enddo ! j + + endif + + if (calc_strair) then + + do j = jlo, jhi + do i = ilo, ihi + + wind(i,j) = sqrt(uatm(i,j)**2 + vatm(i,j)**2) + + !----------------------------------------------------------------- + ! Rotate zonal/meridional vectors to local coordinates. + ! Velocity comes in on T grid, but is oriented geographically --- + ! need to rotate to pop-grid FIRST using ANGLET + ! then interpolate to the U-cell centers (otherwise we + ! interpolate across the pole). + ! Use ANGLET which is on the T grid ! + ! Atmo variables are needed in T cell centers in subroutine + ! atmo_boundary_layer, and are interpolated to the U grid later as + ! necessary. + !----------------------------------------------------------------- + workx = uatm(i,j) ! wind velocity, m/s + worky = vatm(i,j) + uatm (i,j) = workx*cos(ANGLET(i,j)) & ! convert to POP grid + + worky*sin(ANGLET(i,j)) ! note uatm, vatm, wind + vatm (i,j) = worky*cos(ANGLET(i,j)) & ! are on the T-grid here + - workx*sin(ANGLET(i,j)) + + enddo ! i + enddo ! j + + else ! strax, stray, wind are read from files + + do j = jlo, jhi + do i = ilo, ihi + + workx = strax(i,j) ! wind stress + worky = stray(i,j) + strax(i,j) = workx*cos(ANGLET(i,j)) & ! convert to POP grid + + worky*sin(ANGLET(i,j)) ! note strax, stray, wind + stray(i,j) = worky*cos(ANGLET(i,j)) & ! are on the T-grid here + - workx*sin(ANGLET(i,j)) + + enddo ! i + enddo ! j + + endif ! calc_strair + + end subroutine prepare_forcing + +!======================================================================= + + subroutine longwave_parkinson_washington(Tair, cldf, flw) + + use ice_constants, only: c1, Tffresh, stefan_boltzmann + + ! compute downward longwave as in Parkinson and Washington (1979) + ! (for now) + ! Parkinson, C. L. and W. M. Washington (1979), + ! Large-scale numerical-model of sea ice, + ! JGR, 84, 311-337, doi:10.1029/JC084iC01p00311 + + real(kind=dbl_kind), intent(in) :: & + Tair , & ! air temperature (K) + cldf ! cloud fraction + + real(kind=dbl_kind), intent(out) :: & + flw ! incoming longwave radiation (W/m^2) + + flw = stefan_boltzmann*Tair**4 & + * (c1 - 0.261_dbl_kind & + * exp(-7.77e-4_dbl_kind*(Tffresh - Tair)**2)) & + * (c1 + 0.275_dbl_kind*cldf) + + end subroutine longwave_parkinson_washington + +!======================================================================= + + subroutine longwave_rosati_miyakoda(cldf, Tsfc, & + aice, sst, & + Qa, Tair, & + hm, flw) + + use ice_constants, only: c1, c4, c1000, & + Tffresh, stefan_boltzmann, emissivity + + ! based on + ! Rosati, A. and K. Miyakoda (1988), + ! A general-circulation model for upper ocean simulation, + ! J. Physical Oceanography, 18, 1601-1626, + ! doi:10.1175/1520-0485(1988)018<1601:AGCMFU>2.0.CO;2 + + real(kind=dbl_kind), intent(in) :: & + cldf , & ! cloud fraction + Tsfc , & ! ice skin temperature + aice , & ! ice area fraction + sst , & ! sea surface temperature + Qa , & ! specific humidity (kg/kg) + Tair , & ! air temperature (K) + hm ! land mask + + real(kind=dbl_kind), intent(out) :: & + flw ! incoming longwave radiation (W/m^2) + + real(kind=dbl_kind) :: & + fcc , & ! cloudiness modification + sstk , & ! ice/ocean surface temperature (K) + rtea , & ! square root of the vapour pressure + ptem , & ! potential air temperature (K) + qlwm + + fcc = c1 - 0.8_dbl_kind * cldf + sstk = (Tsfc * aice & + + sst * (c1 - aice)) + Tffresh + rtea = sqrt(c1000*Qa / & + (0.622_dbl_kind+0.378_dbl_kind*Qa)) + ptem = Tair ! get this from stability? + qlwm = ptem * ptem * ptem & + * ( ptem*(0.39_dbl_kind-0.05_dbl_kind*rtea)*fcc & + + c4*(sstk-ptem) ) + flw = emissivity*stefan_boltzmann * ( sstk**4 - qlwm ) + flw = flw * hm ! land mask + + end subroutine longwave_rosati_miyakoda + +!======================================================================= +! NCAR atmospheric forcing +!======================================================================= + + subroutine ncar_files (yr) + +! Construct filenames based on the LANL naming conventions for NCAR data. +! Edit for other directory structures or filenames. +! Note: The year number in these filenames does not matter, because +! subroutine file_year will insert the correct year. + + integer (kind=int_kind), intent(in) :: & + yr ! current forcing year + + fsw_file = & + trim(atm_data_dir)//'ISCCPM/MONTHLY/RADFLX/swdn.1996.dat' + call file_year(fsw_file,yr) + + flw_file = & + trim(atm_data_dir)//'ISCCPM/MONTHLY/RADFLX/cldf.1996.dat' + call file_year(flw_file,yr) + + rain_file = & + trim(atm_data_dir)//'MXA/MONTHLY/PRECIP/prec.1996.dat' + call file_year(rain_file,yr) + + uwind_file = & + trim(atm_data_dir)//'NCEP/4XDAILY/STATES/u_10.1996.dat' + call file_year(uwind_file,yr) + + vwind_file = & + trim(atm_data_dir)//'NCEP/4XDAILY/STATES/v_10.1996.dat' + call file_year(vwind_file,yr) + + tair_file = & + trim(atm_data_dir)//'NCEP/4XDAILY/STATES/t_10.1996.dat' + call file_year(tair_file,yr) + + humid_file = & + trim(atm_data_dir)//'NCEP/4XDAILY/STATES/q_10.1996.dat' + call file_year(humid_file,yr) + + rhoa_file = & + trim(atm_data_dir)//'NCEP/4XDAILY/STATES/dn10.1996.dat' + call file_year(rhoa_file,yr) + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'Forcing data year =', fyear + write (nu_diag,*) 'Atmospheric data files:' + write (nu_diag,*) trim(fsw_file) + write (nu_diag,*) trim(flw_file) + write (nu_diag,*) trim(rain_file) + write (nu_diag,*) trim(uwind_file) + write (nu_diag,*) trim(vwind_file) + write (nu_diag,*) trim(tair_file) + write (nu_diag,*) trim(humid_file) + write (nu_diag,*) trim(rhoa_file) + endif ! master_task + + end subroutine ncar_files + +!======================================================================= + + subroutine ncar_data + + use ice_constants, only: c4, p5, secday, & + field_loc_center, field_type_scalar, field_type_vector + use ice_flux, only: fsw, fsnow, Tair, uatm, vatm, rhoa, Qa + + integer (kind=int_kind) :: & + ixm,ixx,ixp , & ! record numbers for neighboring months + recnum , & ! record number + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + dataloc , & ! = 1 for data located in middle of time interval + ! = 2 for date located at end of time interval + midmonth ! middle day of month + + real (kind=dbl_kind) :: & + sec6hr ! number of seconds in 6 hours + + logical (kind=log_kind) :: readm, read6 + + !------------------------------------------------------------------- + ! monthly data + ! + ! Assume that monthly data values are located in the middle of the + ! month. + !------------------------------------------------------------------- + + midmonth = 15 ! data is given on 15th of every month +! midmonth = fix(p5 * real(daymo(month))) ! exact middle + + ! Compute record numbers for surrounding months + maxrec = 12 + ixm = mod(month+maxrec-2,maxrec) + 1 + ixp = mod(month, maxrec) + 1 + if (mday >= midmonth) ixm = -99 ! other two points will be used + if (mday < midmonth) ixp = -99 + + ! Determine whether interpolation will use values 1:2 or 2:3 + ! recslot = 2 means we use values 1:2, with the current value (2) + ! in the second slot + ! recslot = 1 means we use values 2:3, with the current value (2) + ! in the first slot + recslot = 1 ! latter half of month + if (mday < midmonth) recslot = 2 ! first half of month + + ! Find interpolation coefficients + call interp_coeff_monthly (recslot) + + ! Read 2 monthly values + readm = .false. + if (istep==1 .or. (mday==midmonth .and. sec==0)) readm = .true. + + if (trim(atm_data_format) == 'bin') then + call read_data (readm, 0, fyear, ixm, month, ixp, & + maxrec, fsw_file, fsw_data, & + field_loc_center, field_type_scalar) + call read_data (readm, 0, fyear, ixm, month, ixp, & + maxrec, flw_file, cldf_data, & + field_loc_center, field_type_scalar) + call read_data (readm, 0, fyear, ixm, month, ixp, & + maxrec, rain_file, fsnow_data, & + field_loc_center, field_type_scalar) + else + call abort_ice ('nonbinary atm_data_format unavailable') +! The routine exists, for example: +! call read_data_nc (readm, 0, fyear, ixm, month, ixp, & +! maxrec, fsw_file, 'fsw', fsw_data, & +! field_loc_center, field_type_scalar) +! call read_data_nc (readm, 0, fyear, ixm, month, ixp, & +! maxrec, flw_file, 'cldf',cldf_data, & +! field_loc_center, field_type_scalar) +! call read_data_nc (readm, 0, fyear, ixm, month, ixp, & +! maxrec, rain_file,'prec',fsnow_data, & +! field_loc_center, field_type_scalar) + endif + + ! Interpolate to current time step + call interpolate_data (fsw_data, fsw) + call interpolate_data (cldf_data, cldf) + call interpolate_data (fsnow_data, fsnow) + + !------------------------------------------------------------------- + ! 6-hourly data + ! + ! Assume that the 6-hourly value is located at the end of the + ! 6-hour period. This is the convention for NCEP reanalysis data. + ! E.g. record 1 gives conditions at 6 am GMT on 1 January. + !------------------------------------------------------------------- + + dataloc = 2 ! data located at end of interval + sec6hr = secday/c4 ! seconds in 6 hours + maxrec = 1460 ! 365*4 + + ! current record number + recnum = 4*int(yday) - 3 + int(real(sec,kind=dbl_kind)/sec6hr) + + ! Compute record numbers for surrounding data + + ixm = mod(recnum+maxrec-2,maxrec) + 1 + ixx = mod(recnum-1, maxrec) + 1 +! ixp = mod(recnum, maxrec) + 1 + + ! Compute interpolation coefficients + ! If data is located at the end of the time interval, then the + ! data value for the current record always goes in slot 2. + + recslot = 2 + ixp = -99 + call interp_coeff (recnum, recslot, sec6hr, dataloc) + + ! Read + read6 = .false. + if (istep==1 .or. oldrecnum /= recnum) read6 = .true. + + if (trim(atm_data_format) == 'bin') then + call read_data (read6, 0, fyear, ixm, ixx, ixp, & + maxrec, tair_file, Tair_data, & + field_loc_center, field_type_scalar) + call read_data (read6, 0, fyear, ixm, ixx, ixp, & + maxrec, uwind_file, uatm_data, & + field_loc_center, field_type_vector) + call read_data (read6, 0, fyear, ixm, ixx, ixp, & + maxrec, vwind_file, vatm_data, & + field_loc_center, field_type_vector) + call read_data (read6, 0, fyear, ixm, ixx, ixp, & + maxrec, rhoa_file, rhoa_data, & + field_loc_center, field_type_scalar) + call read_data (read6, 0, fyear, ixm, ixx, ixp, & + maxrec, humid_file, Qa_data, & + field_loc_center, field_type_scalar) + else + call abort_ice ('nonbinary atm_data_format unavailable') + endif + + ! Interpolate + call interpolate_data (Tair_data, Tair) + call interpolate_data (uatm_data, uatm) + call interpolate_data (vatm_data, vatm) + call interpolate_data (rhoa_data, rhoa) + call interpolate_data (Qa_data, Qa) + + ! Save record number for next time step + oldrecnum = recnum + + end subroutine ncar_data + +!======================================================================= +! Large and Yeager forcing (AOMIP style) +!======================================================================= + + subroutine LY_files (yr) + +! Construct filenames based on the LANL naming conventions for CORE +! (Large and Yeager) data. +! Edit for other directory structures or filenames. +! Note: The year number in these filenames does not matter, because +! subroutine file_year will insert the correct year. + +! author: Elizabeth C. Hunke, LANL + + integer (kind=int_kind), intent(in) :: & + yr ! current forcing year + + flw_file = & + trim(atm_data_dir)//'MONTHLY/cldf.omip.dat' + + rain_file = & + trim(atm_data_dir)//'MONTHLY/prec.nmyr.dat' + + uwind_file = & + trim(atm_data_dir)//'4XDAILY/u_10.1996.dat' + call file_year(uwind_file,yr) + + vwind_file = & + trim(atm_data_dir)//'4XDAILY/v_10.1996.dat' + call file_year(vwind_file,yr) + + tair_file = & + trim(atm_data_dir)//'4XDAILY/t_10.1996.dat' + call file_year(tair_file,yr) + + humid_file = & + trim(atm_data_dir)//'4XDAILY/q_10.1996.dat' + call file_year(humid_file,yr) + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'Forcing data year = ', fyear + write (nu_diag,*) 'Atmospheric data files:' + write (nu_diag,*) trim(flw_file) + write (nu_diag,*) trim(rain_file) + write (nu_diag,*) trim(uwind_file) + write (nu_diag,*) trim(vwind_file) + write (nu_diag,*) trim(tair_file) + write (nu_diag,*) trim(humid_file) + endif ! master_task + + end subroutine LY_files + +!======================================================================= +! +! read Large and Yeager atmospheric data +! note: also uses AOMIP protocol, in part + + subroutine LY_data + + use ice_blocks, only: block, get_block + use ice_constants, only: c4, p1, p5, secday, Tffresh, & + field_loc_center, field_type_scalar, field_type_vector + use ice_global_reductions, only: global_minval, global_maxval + use ice_domain, only: nblocks, distrb_info, blocks_ice + use ice_flux, only: fsnow, Tair, uatm, vatm, Qa, fsw + use ice_grid, only: hm, tlon, tlat, tmask, umask + use ice_state, only: aice + + integer (kind=int_kind) :: & + i, j , & + ixm,ixx,ixp , & ! record numbers for neighboring months + recnum , & ! record number + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth , & ! middle day of month + dataloc , & ! = 1 for data located in middle of time interval + ! = 2 for date located at end of time interval + iblk , & ! block index + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + sec6hr , & ! number of seconds in 6 hours + vmin, vmax + + logical (kind=log_kind) :: readm, read6 + + type (block) :: & + this_block ! block information for current block + + !------------------------------------------------------------------- + ! monthly data + ! + ! Assume that monthly data values are located in the middle of the + ! month. + !------------------------------------------------------------------- + + midmonth = 15 ! data is given on 15th of every month +! midmonth = fix(p5 * real(daymo(month))) ! exact middle + + ! Compute record numbers for surrounding months + maxrec = 12 + ixm = mod(month+maxrec-2,maxrec) + 1 + ixp = mod(month, maxrec) + 1 + if (mday >= midmonth) ixm = -99 ! other two points will be used + if (mday < midmonth) ixp = -99 + + ! Determine whether interpolation will use values 1:2 or 2:3 + ! recslot = 2 means we use values 1:2, with the current value (2) + ! in the second slot + ! recslot = 1 means we use values 2:3, with the current value (2) + ! in the first slot + recslot = 1 ! latter half of month + if (mday < midmonth) recslot = 2 ! first half of month + + ! Find interpolation coefficients + call interp_coeff_monthly (recslot) + + ! Read 2 monthly values + readm = .false. + if (istep==1 .or. (mday==midmonth .and. sec==0)) readm = .true. + + call read_clim_data (readm, 0, ixm, month, ixp, & + flw_file, cldf_data, field_loc_center, field_type_scalar) + call read_clim_data (readm, 0, ixm, month, ixp, & + rain_file, fsnow_data, field_loc_center, field_type_scalar) + + call interpolate_data (cldf_data, cldf) + call interpolate_data (fsnow_data, fsnow) ! units mm/s = kg/m^2/s + + !------------------------------------------------------------------- + ! 6-hourly data + ! + ! Assume that the 6-hourly value is located at the end of the + ! 6-hour period. This is the convention for NCEP reanalysis data. + ! E.g. record 1 gives conditions at 6 am GMT on 1 January. + !------------------------------------------------------------------- + + dataloc = 2 ! data located at end of interval + sec6hr = secday/c4 ! seconds in 6 hours + maxrec = 1460 ! 365*4 + + ! current record number + recnum = 4*int(yday) - 3 + int(real(sec,kind=dbl_kind)/sec6hr) + + ! Compute record numbers for surrounding data (2 on each side) + + ixm = mod(recnum+maxrec-2,maxrec) + 1 + ixx = mod(recnum-1, maxrec) + 1 +! ixp = mod(recnum, maxrec) + 1 + + ! Compute interpolation coefficients + ! If data is located at the end of the time interval, then the + ! data value for the current record goes in slot 2 + + recslot = 2 + ixp = -99 + call interp_coeff (recnum, recslot, sec6hr, dataloc) + + ! Read + read6 = .false. + if (istep==1 .or. oldrecnum .ne. recnum) read6 = .true. + + if (trim(atm_data_format) == 'bin') then + call read_data (read6, 0, fyear, ixm, ixx, ixp, maxrec, & + tair_file, Tair_data, & + field_loc_center, field_type_scalar) + call read_data (read6, 0, fyear, ixm, ixx, ixp, maxrec, & + uwind_file, uatm_data, & + field_loc_center, field_type_vector) + call read_data (read6, 0, fyear, ixm, ixx, ixp, maxrec, & + vwind_file, vatm_data, & + field_loc_center, field_type_vector) + call read_data (read6, 0, fyear, ixm, ixx, ixp, maxrec, & + humid_file, Qa_data, & + field_loc_center, field_type_scalar) + else + call abort_ice ('nonbinary atm_data_format unavailable') + endif + + ! Interpolate + call interpolate_data (Tair_data, Tair) + call interpolate_data (uatm_data, uatm) + call interpolate_data (vatm_data, vatm) + call interpolate_data (Qa_data, Qa) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + ! limit summer Tair values where ice is present + do j = 1, ny_block + do i = 1, nx_block + if (aice(i,j,iblk) > p1) Tair(i,j,iblk) = min(Tair(i,j,iblk), Tffresh+p1) + enddo + enddo + + call Qa_fixLY(nx_block, ny_block, & + Tair (:,:,iblk), & + Qa (:,:,iblk)) + + do j = 1, ny_block + do i = 1, nx_block + Qa (i,j,iblk) = Qa (i,j,iblk) * hm(i,j,iblk) + Tair(i,j,iblk) = Tair(i,j,iblk) * hm(i,j,iblk) + uatm(i,j,iblk) = uatm(i,j,iblk) * hm(i,j,iblk) + vatm(i,j,iblk) = vatm(i,j,iblk) * hm(i,j,iblk) + enddo + enddo + + ! AOMIP + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + call compute_shortwave(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + TLON (:,:,iblk), & + TLAT (:,:,iblk), & + hm (:,:,iblk), & + Qa (:,:,iblk), & + cldf (:,:,iblk), & + fsw (:,:,iblk)) + + enddo ! iblk + !$OMP END PARALLEL DO + + ! Save record number + oldrecnum = recnum + + if (dbug) then + if (my_task == master_task) write (nu_diag,*) 'LY_bulk_data' + vmin = global_minval(fsw,distrb_info,tmask) + + vmax = global_maxval(fsw,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'fsw',vmin,vmax + vmin = global_minval(cldf,distrb_info,tmask) + vmax = global_maxval(cldf,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'cldf',vmin,vmax + vmin =global_minval(fsnow,distrb_info,tmask) + vmax =global_maxval(fsnow,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'fsnow',vmin,vmax + vmin = global_minval(Tair,distrb_info,tmask) + vmax = global_maxval(Tair,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'Tair',vmin,vmax + vmin = global_minval(uatm,distrb_info,umask) + vmax = global_maxval(uatm,distrb_info,umask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'uatm',vmin,vmax + vmin = global_minval(vatm,distrb_info,umask) + vmax = global_maxval(vatm,distrb_info,umask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'vatm',vmin,vmax + vmin = global_minval(Qa,distrb_info,tmask) + vmax = global_maxval(Qa,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'Qa',vmin,vmax + + endif ! dbug + + end subroutine LY_data + +!======================================================================= +! +! AOMIP shortwave forcing +! standard calculation using solar declination angle +! then shortwave is reduced using a function of cloud fraction + + subroutine compute_shortwave(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + TLON, TLAT, hm, Qa, cldf, fsw) + +!---!------------------------------------------------------------------- +!---!------------------------------------------------------------------- + + use ice_constants, only: c0, c1, c12, c2, c180, c365, & + c3600, p1, p5, p6, pi, secday + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + TLON, TLAT , & ! longitude, latitude + Qa , & ! specific humidity + cldf , & ! cloud fraction + hm ! land mask + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + fsw ! shortwave + + real (kind=dbl_kind) :: & + hour_angle, & + solar_time, & + declin , & + cosZ , & + e, d , & + sw0 , & + deg2rad + + integer (kind=int_kind) :: & + i, j + + do j=jlo,jhi + do i=ilo,ihi + deg2rad = pi/c180 + solar_time = mod(real(sec,kind=dbl_kind),secday)/c3600 & + + c12*sin(p5*TLON(i,j)) + hour_angle = (c12 - solar_time)*pi/c12 + declin = 23.44_dbl_kind*cos((172._dbl_kind-yday) & + * c2*pi/c365)*deg2rad ! use dayyr instead of c365??? + cosZ = sin(TLAT(i,j))*sin(declin) & + + cos(TLAT(i,j))*cos(declin)*cos(hour_angle) + cosZ = max(cosZ,c0) + e = 1.e5*Qa(i,j)/(0.622_dbl_kind + 0.378_dbl_kind*Qa(i,j)) + d = (cosZ+2.7_dbl_kind)*e*1.e-5_dbl_kind+1.085_dbl_kind*cosZ+p1 + sw0 = 1353._dbl_kind*cosZ**2/d + sw0 = max(sw0,c0) + + ! total downward shortwave for cice + Fsw(i,j) = sw0*(c1-p6*cldf(i,j)**3) + Fsw(i,j) = Fsw(i,j)*hm(i,j) + enddo + enddo + + end subroutine compute_shortwave + +!======================================================================= +! +! prevents humidity from being super-saturated + + subroutine Qa_fixLY(nx_block, ny_block, Tair, Qa) + + use ice_constants, only: c1, c10, c2, Tffresh, puny + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + Tair ! air temperature + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + Qa ! specific humidity + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + worka + + worka = Tair - Tffresh + worka = c2 + (0.7859_dbl_kind + 0.03477_dbl_kind*worka) & + /(c1 + 0.00412_dbl_kind*worka) & ! 2+ converts ea mb -> Pa + + 0.00422_dbl_kind*worka ! for ice + ! vapor pressure + worka = (c10**worka) ! saturated + worka = max(worka,puny) ! puny over land to prevent division by zero + ! specific humidity + worka = 0.622_dbl_kind*worka/(1.e5_dbl_kind-0.378_dbl_kind*worka) + + Qa = min(Qa, worka) + + end subroutine Qa_fixLY + +!======================================================================= +! HadGEM or HadGAM atmospheric forcing +!======================================================================= + + subroutine hadgem_files (yr) + +! Construct filenames based on selected model options +! +! Note: The year number in these filenames does not matter, because +! subroutine file_year will insert the correct year. +! +! author: Alison McLaren, Met Office + + use ice_therm_shared, only: calc_Tsfc + use ice_ocean, only: oceanmixed_ice + + integer (kind=int_kind), intent(in) :: & + yr ! current forcing year + + integer (kind=int_kind) :: & + n ! thickness category index + + ! ----------------------------------------------------------- + ! Rainfall and snowfall + ! ----------------------------------------------------------- + + snow_file = & + trim(atm_data_dir)//'MONTHLY/snowfall.1996.nc' + call file_year(snow_file,yr) + + rain_file = & + trim(atm_data_dir)//'MONTHLY/rainfall.1996.nc' + call file_year(rain_file,yr) + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'Atmospheric data files:' + write (nu_diag,*) trim(rain_file) + write (nu_diag,*) trim(snow_file) + endif + + if (calc_strair) then + + ! -------------------------------------------------------- + ! Wind velocity + ! -------------------------------------------------------- + + uwind_file = & + trim(atm_data_dir)//'MONTHLY/u_10.1996.nc' + call file_year(uwind_file,yr) + + vwind_file = & + trim(atm_data_dir)//'MONTHLY/v_10.1996.nc' + call file_year(vwind_file,yr) + + if (my_task == master_task) then + write (nu_diag,*) trim(uwind_file) + write (nu_diag,*) trim(vwind_file) + endif + + else + + ! -------------------------------------------------------- + ! Wind stress + ! -------------------------------------------------------- + + strax_file = & + trim(atm_data_dir)//'MONTHLY/taux.1996.nc' + call file_year(strax_file,yr) + + stray_file = & + trim(atm_data_dir)//'MONTHLY/tauy.1996.nc' + call file_year(stray_file,yr) + + if (my_task == master_task) then + write (nu_diag,*) trim(strax_file) + write (nu_diag,*) trim(stray_file) + endif + + if (calc_Tsfc .or. oceanmixed_ice) then + + ! -------------------------------------------------- + ! Wind speed + ! -------------------------------------------------- + + wind_file = & + trim(atm_data_dir)//'MONTHLY/wind_10.1996.nc' + call file_year(wind_file,yr) + + if (my_task == master_task) then + write (nu_diag,*) trim(wind_file) + endif + + endif ! calc_Tsfc or oceanmixed_ice + + endif ! calc_strair + + ! -------------------------------------------------------------- + ! Atmosphere properties. Even if these fields are not + ! being used to force the ice (i.e. calc_Tsfc=.false.), they + ! are still needed to generate forcing for mixed layer model or + ! to calculate wind stress + ! -------------------------------------------------------------- + + if (calc_Tsfc .or. oceanmixed_ice .or. calc_strair) then + + fsw_file = & + trim(atm_data_dir)//'MONTHLY/SW_incoming.1996.nc' + call file_year(fsw_file,yr) + + flw_file = & + trim(atm_data_dir)//'MONTHLY/LW_incoming.1996.nc' + call file_year(flw_file,yr) + + tair_file = & + trim(atm_data_dir)//'MONTHLY/t_10.1996.nc' + call file_year(tair_file,yr) + + humid_file = & + trim(atm_data_dir)//'MONTHLY/q_10.1996.nc' + call file_year(humid_file,yr) + + rhoa_file = & + trim(atm_data_dir)//'MONTHLY/rho_10.1996.nc' + call file_year(rhoa_file,yr) + + if (my_task == master_task) then + write (nu_diag,*) trim(fsw_file) + write (nu_diag,*) trim(flw_file) + write (nu_diag,*) trim(tair_file) + write (nu_diag,*) trim(humid_file) + write (nu_diag,*) trim(rhoa_file) + endif ! master_task + + endif ! calc_Tsfc or oceanmixed_ice or calc_strair + + if (.not. calc_Tsfc) then + + ! ------------------------------------------------------ + ! Sublimation, topmelt and botmelt + ! ------------------------------------------------------ + + do n = 1, ncat + + ! 'topmelt' = fsurf - fcondtop. + write(topmelt_file(n), '(a,i1,a)') & + trim(atm_data_dir)//'MONTHLY/topmeltn',n,'.1996.nc' + call file_year(topmelt_file(n),yr) + + ! 'botmelt' = fcondtop. + write(botmelt_file(n), '(a,i1,a)') & + trim(atm_data_dir)//'MONTHLY/botmeltn',n,'.1996.nc' + call file_year(botmelt_file(n),yr) + + enddo + + ! 'sublim' = - flat / Lsub. + sublim_file = & + trim(atm_data_dir)//'MONTHLY/sublim.1996.nc' + call file_year(sublim_file,yr) + + if (my_task == master_task) then + do n = 1, ncat + write (nu_diag,*) trim(topmelt_file(n)) + write (nu_diag,*) trim(botmelt_file(n)) + enddo + write (nu_diag,*) trim(sublim_file) + + endif + + endif ! .not. calc_Tsfc + + end subroutine hadgem_files + +!======================================================================= + +! read HadGEM or HadGAM atmospheric data + + subroutine hadgem_data + +! authors: Alison McLaren, Met Office + + use ice_constants, only: p5, Lsub, & + field_loc_center, field_type_scalar, field_type_vector + use ice_domain, only: nblocks + use ice_flux, only: fsnow, frain, uatm, vatm, strax, stray, wind, & + fsw, flw, Tair, rhoa, Qa, fcondtopn_f, fsurfn_f, flatn_f + use ice_state, only: aice,aicen + use ice_ocean, only: oceanmixed_ice + use ice_therm_shared, only: calc_Tsfc + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + n , & ! thickness category index + iblk , & ! block index + ixm,ixp , & ! record numbers for neighboring months + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth ! middle day of month + + logical (kind=log_kind) :: readm + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & + topmelt, & ! temporary fields + botmelt, & + sublim + + character (char_len) :: & + fieldname ! field name in netcdf file + + !------------------------------------------------------------------- + ! monthly data + ! + ! Assume that monthly data values are located in the middle of the + ! month. + !------------------------------------------------------------------- + + midmonth = 15 ! data is given on 15th of every month +! midmonth = fix(p5 * real(daymo(month))) ! exact middle + + ! Compute record numbers for surrounding months + maxrec = 12 + ixm = mod(month+maxrec-2,maxrec) + 1 + ixp = mod(month, maxrec) + 1 + if (mday >= midmonth) ixm = -99 ! other two points will be used + if (mday < midmonth) ixp = -99 + + ! Determine whether interpolation will use values 1:2 or 2:3 + ! recslot = 2 means we use values 1:2, with the current value (2) + ! in the second slot + ! recslot = 1 means we use values 2:3, with the current value (2) + ! in the first slot + recslot = 1 ! latter half of month + if (mday < midmonth) recslot = 2 ! first half of month + + ! Find interpolation coefficients + call interp_coeff_monthly (recslot) + + ! Read 2 monthly values + readm = .false. + if (istep==1 .or. (mday==midmonth .and. sec==0)) readm = .true. + + ! ----------------------------------------------------------- + ! Rainfall and snowfall + ! ----------------------------------------------------------- + + fieldname='rainfall' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, rain_file, fieldname, frain_data, & + field_loc_center, field_type_scalar) + fieldname='snowfall' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, snow_file, fieldname, fsnow_data, & + field_loc_center, field_type_scalar) + + ! Interpolate to current time step + call interpolate_data (fsnow_data, fsnow) + call interpolate_data (frain_data, frain) + + if (calc_strair) then + + ! -------------------------------------------------------- + ! Wind velocity + ! -------------------------------------------------------- + + fieldname='u_10' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, uwind_file, fieldname, uatm_data, & + field_loc_center, field_type_vector) + fieldname='v_10' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, vwind_file, fieldname, vatm_data, & + field_loc_center, field_type_vector) + + ! Interpolate to current time step + call interpolate_data (uatm_data, uatm) + call interpolate_data (vatm_data, vatm) + + else + + ! -------------------------------------------------------- + ! Wind stress + ! -------------------------------------------------------- + + fieldname='taux' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, strax_file, fieldname, strax_data, & + field_loc_center, field_type_vector) + fieldname='tauy' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, stray_file, fieldname, stray_data, & + field_loc_center, field_type_vector) + + ! Interpolate to current time step + call interpolate_data (strax_data, strax) + call interpolate_data (stray_data, stray) + + if (calc_Tsfc .or. oceanmixed_ice) then + + ! -------------------------------------------------- + ! Wind speed + ! -------------------------------------------------- + + fieldname='wind_10' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, wind_file, fieldname, wind_data, & + field_loc_center, field_type_scalar) + + ! Interpolate to current time step + call interpolate_data (wind_data, wind) + + endif ! calc_Tsfc or oceanmixed_ice + + endif ! calc_strair + + ! ----------------------------------------------------------- + ! SW incoming, LW incoming, air temperature, density and + ! humidity at 10m. + ! + ! Even if these fields are not being used to force the ice + ! (i.e. calc_Tsfc=.false.), they are still needed to generate + ! forcing for mixed layer model or to calculate wind stress + ! ----------------------------------------------------------- + + if (calc_Tsfc .or. oceanmixed_ice .or. calc_strair) then + + fieldname='SW_incoming' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, fsw_file, fieldname, fsw_data, & + field_loc_center, field_type_scalar) + fieldname='LW_incoming' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, flw_file, fieldname, flw_data, & + field_loc_center, field_type_scalar) + fieldname='t_10' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, tair_file, fieldname, Tair_data, & + field_loc_center, field_type_scalar) + fieldname='rho_10' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, rhoa_file, fieldname, rhoa_data, & + field_loc_center, field_type_scalar) + fieldname='q_10' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, humid_file, fieldname, Qa_data, & + field_loc_center, field_type_scalar) + + ! Interpolate onto current timestep + + call interpolate_data (fsw_data, fsw) + call interpolate_data (flw_data, flw) + call interpolate_data (Tair_data, Tair) + call interpolate_data (rhoa_data, rhoa) + call interpolate_data (Qa_data, Qa) + + endif ! calc_Tsfc or oceanmixed_ice or calc_strair + + if (.not. calc_Tsfc) then + + ! ------------------------------------------------------ + ! Sublimation, topmelt and botmelt + ! ------------------------------------------------------ + + fieldname='sublim' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, sublim_file, fieldname, sublim_data, & + field_loc_center, field_type_scalar) + + ! Interpolate to current time step + call interpolate_data (sublim_data, sublim) + + do n = 1, ncat + write(fieldname, '(a,i1)') 'topmeltn',n + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, topmelt_file(n), fieldname, topmelt_data(:,:,:,:,n), & + field_loc_center, field_type_scalar) + + write(fieldname, '(a,i1)') 'botmeltn',n + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, botmelt_file(n), fieldname, botmelt_data(:,:,:,:,n), & + field_loc_center, field_type_scalar) + + call interpolate_data (topmelt_data(:,:,:,:,n), topmelt) + call interpolate_data (botmelt_data(:,:,:,:,n), botmelt) + + !-------------------------------------------------------- + ! Convert from UM variables to CICE variables + ! topmelt = fsurf - fcondtop + ! botmelt = fcondtop (as zero layer) + ! + ! Convert UM sublimation data into CICE LH flux + ! (sublim = - flatn / Lsub) and have same value for all + ! categories + !-------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + fcondtopn_f(i,j,n,iblk) = botmelt(i,j,iblk) + fsurfn_f(i,j,n,iblk) = topmelt(i,j,iblk) & + + botmelt(i,j,iblk) + flatn_f(i,j,n,iblk) = - sublim(i,j,iblk)*Lsub + enddo + enddo + enddo + !$OMP END PARALLEL DO + + enddo ! ncat + + endif ! .not. calc_Tsfc + + end subroutine hadgem_data + +!======================================================================= +! monthly forcing +!======================================================================= + + subroutine monthly_files (yr) + +! Construct filenames based on the LANL naming conventions for NCAR data. +! Edit for other directory structures or filenames. +! Note: The year number in these filenames does not matter, because +! subroutine file_year will insert the correct year. + +! author: Elizabeth C. Hunke, LANL + + integer (kind=int_kind), intent(in) :: & + yr ! current forcing year + + flw_file = & + trim(atm_data_dir)//'MONTHLY/cldf.omip.dat' + + rain_file = & + trim(atm_data_dir)//'MONTHLY/prec.nmyr.dat' + + tair_file = & + trim(atm_data_dir)//'MONTHLY/t_10.1996.dat' + call file_year(tair_file,yr) + + humid_file = & + trim(atm_data_dir)//'MONTHLY/q_10.1996.dat' + call file_year(humid_file,yr) + + ! stress/speed is used instead of wind components + strax_file = & + trim(atm_data_dir)//'MONTHLY/strx.1996.dat' + call file_year(strax_file,yr) + + stray_file = & + trim(atm_data_dir)//'MONTHLY/stry.1996.dat' + call file_year(stray_file,yr) + + wind_file = & + trim(atm_data_dir)//'MONTHLY/wind.1996.dat' + call file_year(wind_file,yr) + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'Forcing data year = ', fyear + write (nu_diag,*) 'Atmospheric data files:' + write (nu_diag,*) trim(flw_file) + write (nu_diag,*) trim(rain_file) + write (nu_diag,*) trim(tair_file) + write (nu_diag,*) trim(humid_file) + write (nu_diag,*) trim(uwind_file) + write (nu_diag,*) trim(vwind_file) + endif ! master_task + + end subroutine monthly_files + +!======================================================================= +! read monthly atmospheric data + + subroutine monthly_data + + use ice_blocks, only: block, get_block + use ice_constants, only: p5, & + field_loc_center, field_type_scalar, field_type_vector + use ice_global_reductions, only: global_minval, global_maxval + use ice_domain, only: nblocks, distrb_info, blocks_ice + use ice_flux, only: fsnow, Tair, Qa, wind, strax, stray, fsw + use ice_grid, only: hm, tlon, tlat, tmask, umask + + integer (kind=int_kind) :: & + i, j , & + ixm,ixp , & ! record numbers for neighboring months + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth , & ! middle day of month + iblk , & ! block index + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + vmin, vmax + + logical (kind=log_kind) :: readm + + type (block) :: & + this_block ! block information for current block + + !------------------------------------------------------------------- + ! monthly data + ! + ! Assume that monthly data values are located in the middle of the + ! month. + !------------------------------------------------------------------- + + midmonth = 15 ! data is given on 15th of every month +! midmonth = fix(p5 * real(daymo(month))) ! exact middle + + ! Compute record numbers for surrounding months + maxrec = 12 + ixm = mod(month+maxrec-2,maxrec) + 1 + ixp = mod(month, maxrec) + 1 + if (mday >= midmonth) ixm = -99 ! other two points will be used + if (mday < midmonth) ixp = -99 + + ! Determine whether interpolation will use values 1:2 or 2:3 + ! recslot = 2 means we use values 1:2, with the current value (2) + ! in the second slot + ! recslot = 1 means we use values 2:3, with the current value (2) + ! in the first slot + recslot = 1 ! latter half of month + if (mday < midmonth) recslot = 2 ! first half of month + + ! Find interpolation coefficients + call interp_coeff_monthly (recslot) + + ! Read 2 monthly values + readm = .false. + if (istep==1 .or. (mday==midmonth .and. sec==0)) readm = .true. + + call read_clim_data (readm, 0, ixm, month, ixp, & + flw_file, cldf_data, & + field_loc_center, field_type_scalar) + call read_clim_data (readm, 0, ixm, month, ixp, & + rain_file, fsnow_data, & + field_loc_center, field_type_scalar) + call read_clim_data (readm, 0, ixm, month, ixp, & + tair_file, Tair_data, & + field_loc_center, field_type_scalar) + call read_clim_data (readm, 0, ixm, month, ixp, & + humid_file, Qa_data, & + field_loc_center, field_type_scalar) + call read_clim_data (readm, 0, ixm, month, ixp, & + wind_file, wind_data, & + field_loc_center, field_type_scalar) + call read_clim_data (readm, 0, ixm, month, ixp, & + strax_file, strax_data, & + field_loc_center, field_type_vector) + call read_clim_data (readm, 0, ixm, month, ixp, & + stray_file, stray_data, & + field_loc_center, field_type_vector) + + call interpolate_data (cldf_data, cldf) + call interpolate_data (fsnow_data, fsnow) ! units mm/s = kg/m^2/s + call interpolate_data (Tair_data, Tair) + call interpolate_data (Qa_data, Qa) + call interpolate_data (wind_data, wind) + call interpolate_data (strax_data, strax) + call interpolate_data (stray_data, stray) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + call Qa_fixLY(nx_block, ny_block, & + Tair (:,:,iblk), & + Qa (:,:,iblk)) + + do j = 1, ny_block + do i = 1, nx_block + Qa (i,j,iblk) = Qa (i,j,iblk) * hm(i,j,iblk) + Tair (i,j,iblk) = Tair (i,j,iblk) * hm(i,j,iblk) + wind (i,j,iblk) = wind (i,j,iblk) * hm(i,j,iblk) + strax(i,j,iblk) = strax(i,j,iblk) * hm(i,j,iblk) + stray(i,j,iblk) = stray(i,j,iblk) * hm(i,j,iblk) + enddo + enddo + + ! AOMIP + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + call compute_shortwave(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + TLON (:,:,iblk), & + TLAT (:,:,iblk), & + hm (:,:,iblk), & + Qa (:,:,iblk), & + cldf (:,:,iblk), & + fsw (:,:,iblk)) + + enddo ! iblk + !$OMP END PARALLEL DO + + if (dbug) then + if (my_task == master_task) write (nu_diag,*) 'LY_bulk_data' + vmin = global_minval(fsw,distrb_info,tmask) + vmax = global_maxval(fsw,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'fsw',vmin,vmax + vmin = global_minval(cldf,distrb_info,tmask) + vmax = global_maxval(cldf,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'cldf',vmin,vmax + vmin =global_minval(fsnow,distrb_info,tmask) + vmax =global_maxval(fsnow,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'fsnow',vmin,vmax + vmin = global_minval(Tair,distrb_info,tmask) + vmax = global_maxval(Tair,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'Tair',vmin,vmax + vmin = global_minval(wind,distrb_info,umask) + vmax = global_maxval(wind,distrb_info,umask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'wind',vmin,vmax + vmin = global_minval(strax,distrb_info,umask) + vmax = global_maxval(strax,distrb_info,umask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'strax',vmin,vmax + vmin = global_minval(stray,distrb_info,umask) + vmax = global_maxval(stray,distrb_info,umask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'stray',vmin,vmax + vmin = global_minval(Qa,distrb_info,tmask) + vmax = global_maxval(Qa,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'Qa',vmin,vmax + + endif ! dbug + + end subroutine monthly_data + +!======================================================================= +! Oned atmospheric data +!======================================================================= + + subroutine oned_data + + use ice_blocks, only: block, get_block + use ice_constants, only: p001, p01, p25, c0, c1 + use ice_domain, only: nblocks, blocks_ice + use ice_flux, only: uatm, vatm, Tair, fsw, fsnow, Qa, rhoa, frain + +#ifdef ncdf + use netcdf + + ! local parameters + + character (char_len_long) :: & + met_file, & ! netcdf filename + fieldname ! field name in netcdf file + + integer (kind=int_kind) :: & + fid ! file id for netCDF file + + real (kind=dbl_kind):: & + work ! temporary variable + + logical (kind=log_kind) :: diag + + integer (kind=int_kind) :: & + status ! status flag + + integer (kind=int_kind) :: & + iblk, & ! block index + ilo,jlo ! beginning of physical domain + + type (block) :: & + this_block ! block information for current block + + real (kind=dbl_kind) :: & ! used to determine specific humidity + Temp , & ! air temperature (K) + rh , & ! relative humidity (%) + Psat , & ! saturation vapour pressure (hPa) + ws ! saturation mixing ratio + + real (kind=dbl_kind), parameter :: & ! coefficients for Hyland-Wexler Qa + ps1 = 0.58002206e4_dbl_kind, & ! (K) + ps2 = 1.3914993_dbl_kind, & ! + ps3 = 0.48640239e-1_dbl_kind, & ! (K^-1) + ps4 = 0.41764768e-4_dbl_kind, & ! (K^-2) + ps5 = 0.14452093e-7_dbl_kind, & ! (K^-3) + ps6 = 6.5459673_dbl_kind, & ! + ws1 = 621.97_dbl_kind, & ! for saturation mixing ratio + Pair = 1020._dbl_kind ! Sea level pressure (hPa) + + diag = .false. ! write diagnostic information + + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + jlo = this_block%jlo + + if (trim(atm_data_format) == 'nc') then ! read nc file + + ! hourly data beginning Jan 1, 1989, 01:00 + ! HARDWIRED for dt = 1 hour! + met_file = uwind_file + call ice_open_nc(met_file,fid) + + fieldname='Uatm' + call ice_read_nc(fid,istep1,fieldname,work,diag) + uatm(:,:,:) = work + + fieldname='Vatm' + call ice_read_nc(fid,istep1,fieldname,work,diag) + vatm(:,:,:) = work + + fieldname='Tair' + call ice_read_nc(fid,istep1,fieldname,work,diag) + Temp = work + Tair(:,:,:) = Temp + + if (my_task == master_task) status = nf90_close(fid) + + ! hourly solar data beginning Jan 1, 1989, 01:00 + met_file = fsw_file + call ice_open_nc(met_file,fid) + + fieldname='fsw' + call ice_read_nc(fid,istep1,fieldname,work,diag) + fsw(:,:,:) = work + + if (my_task == master_task) status = nf90_close(fid) + + ! hourly interpolated monthly data beginning Jan 1, 1989, 01:00 + met_file = humid_file + call ice_open_nc(met_file,fid) + + fieldname='rh' + call ice_read_nc(fid,istep1,fieldname,work,diag) + rh = work + + fieldname='fsnow' + call ice_read_nc(fid,istep1,fieldname,work,diag) + fsnow(:,:,:) = work + + if (my_task == master_task) status = nf90_close(fid) + + !------------------------------------------------------------------- + ! Find specific humidity using Hyland-Wexler formulation + ! Hyland, R.W. and A. Wexler, Formulations for the Thermodynamic + ! Properties of the saturated phases of H20 from 173.15K to 473.15K, + ! ASHRAE Trans, 89(2A), 500-519, 1983 + !------------------------------------------------------------------- + + Psat = exp(-ps1/Temp + ps2 - ps3*Temp + ps4*Temp**2 - ps5 * Temp**3 & + + ps6 * log(Temp))*p01 ! saturation vapour pressure + ws = ws1 * Psat/(Pair - Psat) ! saturation mixing ratio + Qa(:,:,:) = rh * ws * p01/(c1 + rh * ws * p01) * p001 + ! specific humidity (kg/kg) + endif ! atm_data_format + + ! flw calculated in prepare_forcing + rhoa (:,:,:) = 1.3_dbl_kind ! air density (kg/m^3) + cldf (:,:,:) = p25 ! cloud fraction + frain(:,:,:) = c0 ! this is available in hourlymet_rh file + + enddo ! nblocks + +#endif + + end subroutine oned_data + +!======================================================================= + + subroutine oned_files(yr) + + integer (kind=int_kind), intent(in) :: & + yr ! current forcing year + + fsw_file = & + trim(atm_data_dir)//'hourlysolar_brw1989_5yr.nc' + + rain_file = & + trim(atm_data_dir)//'hourlymet_rh_5yr.nc' + + uwind_file = & + trim(atm_data_dir)//'hourlymet_brw1989_5yr.nc' + + vwind_file = & + trim(atm_data_dir)//'hourlymet_brw1989_5yr.nc' + + tair_file = & + trim(atm_data_dir)//'hourlymet_brw1989_5yr.nc' + + humid_file = & + trim(atm_data_dir)//'hourlymet_rh_5yr.nc' + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'Atmospheric data files:' + write (nu_diag,*) trim(fsw_file) + write (nu_diag,*) trim(rain_file) + write (nu_diag,*) trim(uwind_file) + write (nu_diag,*) trim(vwind_file) + write (nu_diag,*) trim(tair_file) + write (nu_diag,*) trim(humid_file) + endif ! master_task + + end subroutine oned_files + +!======================================================================= +! Climatological ocean forcing +!======================================================================= + + subroutine ocn_data_clim (dt) + +! Interpolate monthly sss, sst data to timestep. +! Restore prognostic sst to data. +! Interpolate fields from U grid to T grid if necessary. + +! author: Elizabeth C. Hunke and William H. Lipscomb, LANL + + use ice_constants, only: c0, p5, c1000, depressT, & + field_loc_center, field_type_scalar + use ice_domain, only: nblocks + use ice_flux, only: Tf, sss, sst, uocn, vocn, ss_tltx, ss_tlty + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk , & ! horizontal indices + ixm,ixp , & ! record numbers for neighboring months + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth ! middle day of month + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & + sstdat ! data value toward which SST is restored + + logical (kind=log_kind) :: readm + + if (my_task == master_task .and. istep == 1) then + if (trim(sss_data_type)=='clim') then + write (nu_diag,*) ' ' + write (nu_diag,*) 'SSS data interpolated to timestep:' + write (nu_diag,*) trim(sss_file) + endif + if (trim(sst_data_type)=='clim') then + write (nu_diag,*) ' ' + write (nu_diag,*) 'SST data interpolated to timestep:' + write (nu_diag,*) trim(sst_file) + if (restore_sst) write (nu_diag,*) & + 'SST restoring timescale (days) =', trestore + endif + endif ! my_task, istep + + !------------------------------------------------------------------- + ! monthly data + ! + ! Assume that monthly data values are located in the middle of the + ! month. + !------------------------------------------------------------------- + + if (trim(sss_data_type)=='clim' .or. & + trim(sst_data_type)=='clim') then + + midmonth = 15 ! data is given on 15th of every month +!!! midmonth = fix(p5 * real(daymo(month))) ! exact middle + + ! Compute record numbers for surrounding months + maxrec = 12 + ixm = mod(month+maxrec-2,maxrec) + 1 + ixp = mod(month, maxrec) + 1 + if (mday >= midmonth) ixm = -99 ! other two points will be used + if (mday < midmonth) ixp = -99 + + ! Determine whether interpolation will use values 1:2 or 2:3 + ! recslot = 2 means we use values 1:2, with the current value (2) + ! in the second slot + ! recslot = 1 means we use values 2:3, with the current value (2) + ! in the first slot + recslot = 1 ! latter half of month + if (mday < midmonth) recslot = 2 ! first half of month + + ! Find interpolation coefficients + call interp_coeff_monthly (recslot) + + readm = .false. + if (istep==1 .or. (mday==midmonth .and. sec==0)) readm = .true. + + endif ! sss/sst_data_type + + !------------------------------------------------------------------- + ! Read two monthly SSS values and interpolate. + ! Note: SSS is restored instantaneously to data. + !------------------------------------------------------------------- + + if (trim(sss_data_type)=='clim') then + call read_clim_data (readm, 0, ixm, month, ixp, & + sss_file, sss_data, & + field_loc_center, field_type_scalar) + call interpolate_data (sss_data, sss) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + sss(i,j,iblk) = max(sss(i,j,iblk), c0) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call ocn_freezing_temperature + endif + + !------------------------------------------------------------------- + ! Read two monthly SST values and interpolate. + ! Restore toward interpolated value. + !------------------------------------------------------------------- + + if (trim(sst_data_type)=='clim') then + call read_clim_data (readm, 0, ixm, month, ixp, & + sst_file, sst_data, & + field_loc_center, field_type_scalar) + call interpolate_data (sst_data, sstdat) + + if (restore_sst) then + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + sst(i,j,iblk) = sst(i,j,iblk) & + + (sstdat(i,j,iblk)-sst(i,j,iblk))*dt/trest + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif + endif + + end subroutine ocn_data_clim + +!======================================================================= +! NCAR CCSM M-configuration (AIO) ocean forcing +!======================================================================= + + subroutine ocn_data_ncar_init + +! Reads NCAR pop ocean forcing data set 'pop_frc_gx1v3_010815.nc' +! +! List of ocean forcing fields: Note that order is important! +! (order is determined by field list in vname). +! +! For ocean mixed layer-----------------------------units +! +! 1 sst------temperature---------------------------(C) +! 2 sss------salinity------------------------------(ppt) +! 3 hbl------depth---------------------------------(m) +! 4 u--------surface u current---------------------(m/s) +! 5 v--------surface v current---------------------(m/s) +! 6 dhdx-----surface tilt x direction--------------(m/m) +! 7 dhdy-----surface tilt y direction--------------(m/m) +! 8 qdp------ocean sub-mixed layer heat flux-------(W/m2) +! +! Fields 4, 5, 6, 7 are on the U-grid; 1, 2, 3, and 8 are +! on the T-grid. + +! authors: Bruce Briegleb, NCAR +! Elizabeth Hunke, LANL + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, & + field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_vector + use ice_domain_size, only: max_blocks +#ifdef ncdf + use netcdf +#endif + + integer (kind=int_kind) :: & + n , & ! field index + m , & ! month index + nrec, & ! record number for direct access + nbits + + character(char_len) :: & + vname(nfld) ! variable names to search for in file + data vname / & + 'T', 'S', 'hblt', 'U', 'V', & + 'dhdx', 'dhdy', 'qdp' / + + integer (kind=int_kind) :: & + fid , & ! file id + dimid ! dimension id + + integer (kind=int_kind) :: & + status , & ! status flag + nlat , & ! number of longitudes of data + nlon ! number of latitudes of data + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + if (my_task == master_task) then + + write (nu_diag,*) 'WARNING: evp_prep calculates surface tilt' + write (nu_diag,*) 'WARNING: stress from geostrophic currents,' + write (nu_diag,*) 'WARNING: not data from ocean forcing file.' + write (nu_diag,*) 'WARNING: Alter ice_dyn_evp.F if desired.' + + if (restore_sst) write (nu_diag,*) & + 'SST restoring timescale = ',trestore,' days' + + sst_file = trim(ocn_data_dir)//oceanmixed_file ! not just sst + + !--------------------------------------------------------------- + ! Read in ocean forcing data from an existing file + !--------------------------------------------------------------- + write (nu_diag,*) 'ocean mixed layer forcing data file = ', & + trim(sst_file) + + endif ! master_task + + if (trim(ocn_data_format) == 'nc') then +#ifdef ncdf + if (my_task == master_task) then + call ice_open_nc(sst_file, fid) + +! status = nf90_inq_dimid(fid,'nlon',dimid) + status = nf90_inq_dimid(fid,'ni',dimid) + status = nf90_inquire_dimension(fid,dimid,len=nlon) + +! status = nf90_inq_dimid(fid,'nlat',dimid) + status = nf90_inq_dimid(fid,'nj',dimid) + status = nf90_inquire_dimension(fid,dimid,len=nlat) + + if( nlon .ne. nx_global ) then + call abort_ice ('ice: ocn frc file nlon ne nx_global') + endif + if( nlat .ne. ny_global ) then + call abort_ice ('ice: ocn frc file nlat ne ny_global') + endif + + endif ! master_task + + ! Read in ocean forcing data for all 12 months + do n=1,nfld + do m=1,12 + + ! Note: netCDF does single to double conversion if necessary + if (n >= 4 .and. n <= 7) then + call ice_read_nc(fid, m, vname(n), work1, dbug, & + field_loc_NEcorner, field_type_vector) + else + call ice_read_nc(fid, m, vname(n), work1, dbug, & + field_loc_center, field_type_scalar) + endif + ocn_frc_m(:,:,:,n,m) = work1(:,:,:) + + enddo ! month loop + enddo ! field loop + + if (my_task == master_task) status = nf90_close(fid) +#endif + + else ! binary format + + nbits = 64 + call ice_open (nu_forcing, sst_file, nbits) + + nrec = 0 + do n=1,nfld + do m=1,12 + nrec = nrec + 1 + if (n >= 4 .and. n <= 7) then + call ice_read (nu_forcing, nrec, work1, 'rda8', dbug, & + field_loc_NEcorner, field_type_vector) + else + call ice_read (nu_forcing, nrec, work1, 'rda8', dbug, & + field_loc_center, field_type_scalar) + endif + ocn_frc_m(:,:,:,n,m) = work1(:,:,:) + enddo ! month loop + enddo ! field loop + close (nu_forcing) + + endif + +!echmod - currents cause Fram outflow to be too large + ocn_frc_m(:,:,:,4,:) = c0 + ocn_frc_m(:,:,:,5,:) = c0 +!echmod + + end subroutine ocn_data_ncar_init + +!======================================================================= + + subroutine ocn_data_ncar_init_3D + +! Reads NCAR pop ocean forcing data set 'oceanmixed_ice_depth.nc' +! +! List of ocean forcing fields: Note that order is important! +! (order is determined by field list in vname). +! +! For ocean mixed layer-----------------------------units +! +! 1 sst------temperature---------------------------(C) +! 2 sss------salinity------------------------------(ppt) +! 3 hbl------depth---------------------------------(m) +! 4 u--------surface u current---------------------(m/s) +! 5 v--------surface v current---------------------(m/s) +! 6 dhdx-----surface tilt x direction--------------(m/m) +! 7 dhdy-----surface tilt y direction--------------(m/m) +! 8 qdp------ocean sub-mixed layer heat flux-------(W/m2) +! +! All fields are on the T-grid. +! +! authors: Bruce Briegleb, NCAR +! Elizabeth Hunke, LANL + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, & + field_loc_center, field_type_scalar + use ice_domain_size, only: max_blocks + use ice_grid, only: to_ugrid, ANGLET + use ice_read_write, only: ice_read_nc_uv +#ifdef ncdf + use netcdf +#endif + + integer (kind=int_kind) :: & + n , & ! field index + m , & ! month index + nzlev ! z level of currents + + character(char_len) :: & + vname(nfld) ! variable names to search for in file + data vname / & + 'T', 'S', 'hblt', 'U', 'V', & + 'dhdx', 'dhdy', 'qdp' / + + integer (kind=int_kind) :: & + fid , & ! file id + dimid ! dimension id + + integer (kind=int_kind) :: & + status , & ! status flag + nlat , & ! number of longitudes of data + nlon ! number of latitudes of data + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, work2 + + if (my_task == master_task) then + + write (nu_diag,*) 'WARNING: evp_prep calculates surface tilt' + write (nu_diag,*) 'WARNING: stress from geostrophic currents,' + write (nu_diag,*) 'WARNING: not data from ocean forcing file.' + write (nu_diag,*) 'WARNING: Alter ice_dyn_evp.F if desired.' + + if (restore_sst) write (nu_diag,*) & + 'SST restoring timescale = ',trestore,' days' + + sst_file = trim(ocn_data_dir)//oceanmixed_file ! not just sst + + !--------------------------------------------------------------- + ! Read in ocean forcing data from an existing file + !--------------------------------------------------------------- + write (nu_diag,*) 'ocean mixed layer forcing data file = ', & + trim(sst_file) + write (nu_diag,*) + + endif ! master_task + + if (trim(ocn_data_format) == 'nc') then +#ifdef ncdf + if (my_task == master_task) then + call ice_open_nc(sst_file, fid) + +! status = nf90_inq_dimid(fid,'nlon',dimid) + status = nf90_inq_dimid(fid,'ni',dimid) + status = nf90_inquire_dimension(fid,dimid,len=nlon) + +! status = nf90_inq_dimid(fid,'nlat',dimid) + status = nf90_inq_dimid(fid,'nj',dimid) + status = nf90_inquire_dimension(fid,dimid,len=nlat) + + if( nlon .ne. nx_global ) then + call abort_ice ('ice: ocn frc file nlon ne nx_global') + endif + if( nlat .ne. ny_global ) then + call abort_ice ('ice: ocn frc file nlat ne ny_global') + endif + + endif ! master_task + + ! Read in ocean forcing data for all 12 months + do n=1,nfld + do m=1,12 + + ! Note: netCDF does single to double conversion if necessary + if (n == 4 .or. n == 5) then ! 3D currents + nzlev = 1 ! surface currents + call ice_read_nc_uv(fid, m, nzlev, vname(n), work1, dbug, & + field_loc_center, field_type_scalar) + else + call ice_read_nc(fid, m, vname(n), work1, dbug, & + field_loc_center, field_type_scalar) + endif + + ! the land mask used in ocean_mixed_depth.nc does not + ! match our gx1v3 mask (hm) + where (work1(:,:,:) < -900.) work1(:,:,:) = c0 + + ocn_frc_m(:,:,:,n,m) = work1(:,:,:) + + enddo ! month loop + enddo ! field loop + + if (my_task == master_task) status = nf90_close(fid) + + ! Rotate vector quantities and shift to U-grid + do n=4,6,2 + do m=1,12 + + work1(:,:,:) = ocn_frc_m(:,:,:,n ,m) + work2(:,:,:) = ocn_frc_m(:,:,:,n+1,m) + ocn_frc_m(:,:,:,n ,m) = work1(:,:,:)*cos(ANGLET(:,:,:)) & + + work2(:,:,:)*sin(ANGLET(:,:,:)) + ocn_frc_m(:,:,:,n+1,m) = work2(:,:,:)*cos(ANGLET(:,:,:)) & + - work1(:,:,:)*sin(ANGLET(:,:,:)) + + work1(:,:,:) = ocn_frc_m(:,:,:,n ,m) + work2(:,:,:) = ocn_frc_m(:,:,:,n+1,m) + call to_ugrid(work1,ocn_frc_m(:,:,:,n ,m)) + call to_ugrid(work2,ocn_frc_m(:,:,:,n+1,m)) + + enddo ! month loop + enddo ! field loop + +#endif + + else ! binary format + + call abort_ice ('new ocean forcing is netcdf only') + + endif + + end subroutine ocn_data_ncar_init_3D + +!======================================================================= + + subroutine ocn_data_ncar(dt) + +! Interpolate monthly ocean data to timestep. +! Restore sst if desired. sst is updated with surface fluxes in ice_ocean.F. + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c1, p5, depressT + use ice_global_reductions, only: global_minval, global_maxval + use ice_domain, only: nblocks, distrb_info + use ice_domain_size, only: max_blocks + use ice_flux, only: sss, sst, Tf, uocn, vocn, ss_tltx, ss_tlty, & + qdp, hmix + use ice_restart_shared, only: restart + use ice_grid, only: hm, tmask, umask + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind) :: & + i, j, n, iblk , & + ixm,ixp , & ! record numbers for neighboring months + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth ! middle day of month + + real (kind=dbl_kind) :: & + vmin, vmax + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + !------------------------------------------------------------------- + ! monthly data + ! + ! Assume that monthly data values are located in the middle of the + ! month. + !------------------------------------------------------------------- + + midmonth = 15 ! data is given on 15th of every month +! midmonth = fix(p5 * real(daymo(month),kind=dbl_kind)) ! exact middle + + ! Compute record numbers for surrounding months + maxrec = 12 + ixm = mod(month+maxrec-2,maxrec) + 1 + ixp = mod(month, maxrec) + 1 + if (mday >= midmonth) ixm = -99 ! other two points will be used + if (mday < midmonth) ixp = -99 + + ! Determine whether interpolation will use values 1:2 or 2:3 + ! recslot = 2 means we use values 1:2, with the current value (2) + ! in the second slot + ! recslot = 1 means we use values 2:3, with the current value (2) + ! in the first slot + recslot = 1 ! latter half of month + if (mday < midmonth) recslot = 2 ! first half of month + + ! Find interpolation coefficients + call interp_coeff_monthly (recslot) + + do n = nfld, 1, -1 + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + ! use sst_data arrays as temporary work space until n=1 + if (ixm /= -99) then ! first half of month + sst_data(:,:,1,iblk) = ocn_frc_m(:,:,iblk,n,ixm) + sst_data(:,:,2,iblk) = ocn_frc_m(:,:,iblk,n,month) + else ! second half of month + sst_data(:,:,1,iblk) = ocn_frc_m(:,:,iblk,n,month) + sst_data(:,:,2,iblk) = ocn_frc_m(:,:,iblk,n,ixp) + endif + enddo + !$OMP END PARALLEL DO + + call interpolate_data (sst_data,work1) + ! masking by hm is necessary due to NaNs in the data file + do j = 1, ny_block + do i = 1, nx_block + if (n == 2) sss (i,j,:) = c0 + if (n == 3) hmix (i,j,:) = c0 + if (n == 4) uocn (i,j,:) = c0 + if (n == 5) vocn (i,j,:) = c0 + if (n == 6) ss_tltx(i,j,:) = c0 + if (n == 7) ss_tlty(i,j,:) = c0 + if (n == 8) qdp (i,j,:) = c0 + do iblk = 1, nblocks + if (hm(i,j,iblk) == c1) then + if (n == 2) sss (i,j,iblk) = work1(i,j,iblk) + if (n == 3) hmix (i,j,iblk) = work1(i,j,iblk) + if (n == 4) uocn (i,j,iblk) = work1(i,j,iblk) + if (n == 5) vocn (i,j,iblk) = work1(i,j,iblk) + if (n == 6) ss_tltx(i,j,iblk) = work1(i,j,iblk) + if (n == 7) ss_tlty(i,j,iblk) = work1(i,j,iblk) + if (n == 8) qdp (i,j,iblk) = work1(i,j,iblk) + endif + enddo + enddo + enddo + enddo + + do j = 1, ny_block + do i = 1, nx_block + sss (i,j,:) = max (sss(i,j,:), c0) + hmix(i,j,:) = max(hmix(i,j,:), c0) + enddo + enddo + + call ocn_freezing_temperature + + if (restore_sst) then + do j = 1, ny_block + do i = 1, nx_block + sst(i,j,:) = sst(i,j,:) + (work1(i,j,:)-sst(i,j,:))*dt/trest + enddo + enddo +! else sst is only updated in ice_ocean.F + endif + + ! initialize sst properly on first step + if (istep1 <= 1 .and. .not. (restart)) then + call interpolate_data (sst_data,sst) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (hm(i,j,iblk) == c1) then + sst(i,j,iblk) = max (sst(i,j,iblk), Tf(i,j,iblk)) + else + sst(i,j,iblk) = c0 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif + + if (dbug) then + if (my_task == master_task) & + write (nu_diag,*) 'ocn_data_ncar' + vmin = global_minval(Tf,distrb_info,tmask) + vmax = global_maxval(Tf,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'Tf',vmin,vmax + vmin = global_minval(sst,distrb_info,tmask) + vmax = global_maxval(sst,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'sst',vmin,vmax + vmin = global_minval(sss,distrb_info,tmask) + vmax = global_maxval(sss,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'sss',vmin,vmax + vmin = global_minval(hmix,distrb_info,tmask) + vmax = global_maxval(hmix,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'hmix',vmin,vmax + vmin = global_minval(uocn,distrb_info,umask) + vmax = global_maxval(uocn,distrb_info,umask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'uocn',vmin,vmax + vmin = global_minval(vocn,distrb_info,umask) + vmax = global_maxval(vocn,distrb_info,umask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'vocn',vmin,vmax + vmin = global_minval(ss_tltx,distrb_info,umask) + vmax = global_maxval(ss_tltx,distrb_info,umask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'ss_tltx',vmin,vmax + vmin = global_minval(ss_tlty,distrb_info,umask) + vmax = global_maxval(ss_tlty,distrb_info,umask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'ss_tlty',vmin,vmax + vmin = global_minval(qdp,distrb_info,tmask) + vmax = global_maxval(qdp,distrb_info,tmask) + if (my_task.eq.master_task) & + write (nu_diag,*) 'qdp',vmin,vmax + endif + + end subroutine ocn_data_ncar + +!======================================================================= +! ocean data for oned configuration +! Current (released) values are the same as the defaults (ice_flux.F90) + + subroutine ocn_data_oned(dt) + + use ice_constants, only: c0, c20, p001, depressT + use ice_flux, only: sss, sst, Tf, uocn, vocn, ss_tltx, ss_tlty, & + qdp, hmix, frzmlt + !use ice_therm_mushy, only: liquidus_temperature_mush + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + sss (:,:,:) = 34.0_dbl_kind ! sea surface salinity (ppt) + + call ocn_freezing_temperature + + sst (:,:,:) = Tf(:,:,:) ! sea surface temp (C) + uocn (:,:,:) = c0 ! surface ocean currents (m/s) + vocn (:,:,:) = c0 + ss_tltx(:,:,:) = c0 ! sea surface tilt (m/m) + ss_tlty(:,:,:) = c0 + frzmlt (:,:,:) = c0 ! freezing/melting potential (W/m^2) + qdp (:,:,:) = c0 ! deep ocean heat flux (W/m^2) + hmix (:,:,:) = c20 ! ocean mixed layer depth + + end subroutine ocn_data_oned + +!======================================================================= + + subroutine ocn_data_hadgem(dt) + +! Reads in HadGEM ocean forcing data as required from netCDF files +! Current options (selected by sst_data_type) +! hadgem_sst: read in sst only +! hadgem_sst_uvocn: read in sst plus uocn and vocn + +! authors: Ann Keen, Met Office + + use ice_constants, only: p5, cm_to_m, & + field_loc_center, field_type_scalar, field_type_vector + use ice_domain, only: nblocks + use ice_flux, only: sst, uocn, vocn + use ice_grid, only: t2ugrid_vector, ANGLET + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + iblk , & ! block index + ixm,ixp , & ! record numbers for neighboring months + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth ! middle day of month + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & + sstdat ! data value toward which SST is restored + + real (kind=dbl_kind) :: workx, worky + + logical (kind=log_kind) :: readm + + character (char_len) :: & + fieldname ! field name in netcdf file + + character (char_len_long) :: & + filename ! name of netCDF file + + !------------------------------------------------------------------- + ! monthly data + ! + ! Assume that monthly data values are located in the middle of the + ! month. + !------------------------------------------------------------------- + + midmonth = 15 ! data is given on 15th of every month +! midmonth = fix(p5 * real(daymo(month))) ! exact middle + + ! Compute record numbers for surrounding months + maxrec = 12 + ixm = mod(month+maxrec-2,maxrec) + 1 + ixp = mod(month, maxrec) + 1 + if (mday >= midmonth) ixm = -99 ! other two points will be used + if (mday < midmonth) ixp = -99 + + ! Determine whether interpolation will use values 1:2 or 2:3 + ! recslot = 2 means we use values 1:2, with the current value (2) + ! in the second slot + ! recslot = 1 means we use values 2:3, with the current value (2) + ! in the first slot + recslot = 1 ! latter half of month + if (mday < midmonth) recslot = 2 ! first half of month + + ! Find interpolation coefficients + call interp_coeff_monthly (recslot) + + ! Read 2 monthly values + readm = .false. + if (istep==1 .or. (mday==midmonth .and. sec==0)) readm = .true. + + if (my_task == master_task .and. istep == 1) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'SST data interpolated to timestep:' + write (nu_diag,*) trim(ocn_data_dir)//'MONTHLY/sst.1997.nc' + if (restore_sst) write (nu_diag,*) & + 'SST restoring timescale (days) =', trestore + if (trim(sst_data_type)=='hadgem_sst_uvocn') then + write (nu_diag,*) ' ' + write (nu_diag,*) 'uocn and vocn interpolated to timestep:' + write (nu_diag,*) trim(ocn_data_dir)//'MONTHLY/uocn.1997.nc' + write (nu_diag,*) trim(ocn_data_dir)//'MONTHLY/vocn.1997.nc' + endif + endif ! my_task, istep + + ! ----------------------------------------------------------- + ! SST + ! ----------------------------------------------------------- + sst_file = trim(ocn_data_dir)//'MONTHLY/sst.1997.nc' + fieldname='sst' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, sst_file, fieldname, sst_data, & + field_loc_center, field_type_scalar) + + ! Interpolate to current time step + call interpolate_data (sst_data, sstdat) + + ! Restore SSTs if required + if (restore_sst) then + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + sst(i,j,iblk) = sst(i,j,iblk) & + + (sstdat(i,j,iblk)-sst(i,j,iblk))*dt/trest + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif + + ! ----------------------------------------------------------- + ! Ocean currents + ! -------------- + ! Values read in are on T grid and oriented geographically, hence + ! vectors need to be rotated to model grid and then interpolated + ! to U grid. + ! Also need to be converted from cm s-1 (UM) to m s-1 (CICE) + ! ----------------------------------------------------------- + + if (trim(sst_data_type)=='hadgem_sst_uvocn') then + + filename = trim(ocn_data_dir)//'MONTHLY/uocn.1997.nc' + fieldname='uocn' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, filename, fieldname, uocn_data, & + field_loc_center, field_type_vector) + + ! Interpolate to current time step + call interpolate_data (uocn_data, uocn) + + filename = trim(ocn_data_dir)//'MONTHLY/vocn.1997.nc' + fieldname='vocn' + call read_data_nc (readm, 0, fyear, ixm, month, ixp, & + maxrec, filename, fieldname, vocn_data, & + field_loc_center, field_type_vector) + + ! Interpolate to current time step + call interpolate_data (vocn_data, vocn) + + !----------------------------------------------------------------- + ! Rotate zonal/meridional vectors to local coordinates, + ! and change units + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + + workx = uocn(i,j,iblk) + worky = vocn(i,j,iblk) + uocn(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & + + worky*sin(ANGLET(i,j,iblk)) + vocn(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & + - workx*sin(ANGLET(i,j,iblk)) + + uocn(i,j,iblk) = uocn(i,j,iblk) * cm_to_m + vocn(i,j,iblk) = vocn(i,j,iblk) * cm_to_m + + enddo ! i + enddo ! j + enddo ! nblocks + !$OMP END PARALLEL DO + + !----------------------------------------------------------------- + ! Interpolate to U grid + !----------------------------------------------------------------- + + call t2ugrid_vector(uocn) + call t2ugrid_vector(vocn) + + endif ! sst_data_type = hadgem_sst_uvocn + + end subroutine ocn_data_hadgem + +!======================================================================= + + end module ice_forcing + +!======================================================================= diff --git a/source/ice_grid.F90 b/source/ice_grid.F90 new file mode 100755 index 00000000..eb29a683 --- /dev/null +++ b/source/ice_grid.F90 @@ -0,0 +1,2220 @@ +! SVN:$Id: ice_grid.F90 843 2014-10-02 19:54:30Z eclare $ +!======================================================================= + +! Spatial grids, masks, and boundary conditions +! +! authors: Elizabeth C. Hunke and William H. Lipscomb, LANL +! Tony Craig, NCAR +! +! 2004: Block structure added by William Lipscomb +! init_grid split into two parts as in POP 2.0 +! Boundary update routines replaced by POP versions +! 2006: Converted to free source form (F90) by Elizabeth Hunke +! 2007: Option to read from netcdf files (A. Keen, Met Office) +! Grid reading routines reworked by E. Hunke for boundary values + + module ice_grid + + use ice_kinds_mod + use ice_boundary, only: ice_HaloUpdate, ice_HaloExtrapolate + use ice_communicate, only: my_task, master_task + use ice_blocks, only: block, get_block, nx_block, ny_block, nghost + use ice_domain_size, only: nx_global, ny_global, max_blocks + use ice_domain, only: blocks_ice, nblocks, halo_info, distrb_info, & + ew_boundary_type, ns_boundary_type, init_domain_distribution + use ice_fileunits, only: nu_diag, nu_grid, nu_kmt + use ice_gather_scatter, only: gather_global, scatter_global + use ice_read_write, only: ice_read, ice_read_nc, ice_read_global, & + ice_read_global_nc, ice_open, ice_open_nc, ice_close_nc + use ice_timers, only: timer_bound, ice_timer_start, ice_timer_stop +#ifdef AusCOM + use cpl_parameters, only : use_umask +#endif + + implicit none + private + public :: init_grid1, init_grid2, & + t2ugrid_vector, u2tgrid_vector, & + to_ugrid, to_tgrid + + character (len=char_len_long), public :: & + grid_format , & ! file format ('bin'=binary or 'nc'=netcdf) + gridcpl_file , & ! input file for POP coupling grid info + grid_file , & ! input file for POP grid info + kmt_file , & ! input file for POP grid info + grid_type ! current options are rectangular (default), + ! displaced_pole, tripole, regional + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public, save :: & + dxt , & ! width of T-cell through the middle (m) + dyt , & ! height of T-cell through the middle (m) + dxu , & ! width of U-cell through the middle (m) + dyu , & ! height of U-cell through the middle (m) + HTE , & ! length of eastern edge of T-cell (m) + HTN , & ! length of northern edge of T-cell (m) + tarea , & ! area of T-cell (m^2) + uarea , & ! area of U-cell (m^2) + tarear , & ! 1/tarea + uarear , & ! 1/uarea + tinyarea,& ! puny*tarea + tarean , & ! area of NH T-cells + tareas , & ! area of SH T-cells + ULON , & ! longitude of velocity pts (radians) + ULAT , & ! latitude of velocity pts (radians) + TLON , & ! longitude of temp pts (radians) + TLAT , & ! latitude of temp pts (radians) + ANGLE , & ! for conversions between POP grid and lat/lon + ANGLET , & ! ANGLE converted to T-cells + ocn_gridcell_frac ! only relevant for lat-lon grids + ! gridcell value of [1 - (land fraction)] (T-cell) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public, save :: & + cyp , & ! 1.5*HTE - 0.5*HTE + cxp , & ! 1.5*HTN - 0.5*HTN + cym , & ! 0.5*HTE - 1.5*HTE + cxm , & ! 0.5*HTN - 1.5*HTN + dxhy , & ! 0.5*(HTE - HTE) + dyhx ! 0.5*(HTN - HTN) + + ! Corners of grid boxes for history output + real (kind=dbl_kind), dimension (4,nx_block,ny_block,max_blocks), public, save :: & + lont_bounds, & ! longitude of gridbox corners for T point + latt_bounds, & ! latitude of gridbox corners for T point + lonu_bounds, & ! longitude of gridbox corners for U point + latu_bounds ! latitude of gridbox corners for U point + + ! geometric quantities used for remapping transport + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public, save :: & + xav , & ! mean T-cell value of x + yav , & ! mean T-cell value of y + xxav , & ! mean T-cell value of xx +! xyav , & ! mean T-cell value of xy +! yyav , & ! mean T-cell value of yy + yyav ! mean T-cell value of yy +! xxxav, & ! mean T-cell value of xxx +! xxyav, & ! mean T-cell value of xxy +! xyyav, & ! mean T-cell value of xyy +! yyyav ! mean T-cell value of yyy + + real (kind=dbl_kind), & + dimension (2,2,nx_block,ny_block,max_blocks), public, save :: & + mne, & ! matrices used for coordinate transformations in remapping + mnw, & ! ne = northeast corner, nw = northwest, etc. + mse, & + msw + + ! masks + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public, save :: & + hm , & ! land/boundary mask, thickness (T-cell) + bm , & ! task/block id + uvm ! land/boundary mask, velocity (U-cell) + + logical (kind=log_kind), & + dimension (nx_block,ny_block,max_blocks), public, save :: & + tmask , & ! land/boundary mask, thickness (T-cell) + umask , & ! land/boundary mask, velocity (U-cell) + lmask_n, & ! northern hemisphere mask + lmask_s ! southern hemisphere mask + + ! grid dimensions for rectangular grid + real (kind=dbl_kind), parameter :: & + dxrect = 30.e5_dbl_kind ,&! uniform HTN (cm) + dyrect = 30.e5_dbl_kind ! uniform HTE (cm) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public, save :: & + rndex_global ! global index for local subdomain (dbl) + +!======================================================================= + + contains + +!======================================================================= + +! Distribute blocks across processors. The distribution is optimized +! based on latitude and topography, contained in the ULAT and KMT arrays. +! +! authors: William Lipscomb and Phil Jones, LANL + + subroutine init_grid1 + + use ice_blocks, only: nx_block, ny_block + use ice_broadcast, only: broadcast_array + use ice_constants, only: c1, rad_to_deg, puny + use ice_domain_size, only: max_blocks + + integer (kind=int_kind) :: & + fid_grid, & ! file id for netCDF grid file + fid_kmt ! file id for netCDF kmt file + + character (char_len) :: & + fieldname ! field name in netCDF file + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1, work_g2 + + !----------------------------------------------------------------- + ! Get global ULAT and KMT arrays used for block decomposition. + !----------------------------------------------------------------- + + allocate(work_g1(nx_global,ny_global)) + allocate(work_g2(nx_global,ny_global)) + + if (trim(grid_type) == 'displaced_pole' .or. & + trim(grid_type) == 'tripole' .or. & + trim(grid_type) == 'regional' ) then + + if (trim(grid_format) == 'nc') then + + call ice_open_nc(grid_file,fid_grid) + call ice_open_nc(kmt_file,fid_kmt) + + fieldname='ulat' + call ice_read_global_nc(fid_grid,1,fieldname,work_g1,.true.) + fieldname='kmt' + call ice_read_global_nc(fid_kmt,1,fieldname,work_g2,.true.) + + if (my_task == master_task) then + call ice_close_nc(fid_grid) + call ice_close_nc(fid_kmt) + endif + + else + + call ice_open(nu_grid,grid_file,64) ! ULAT + call ice_open(nu_kmt, kmt_file, 32) ! KMT + + call ice_read_global(nu_grid,1,work_g1,'rda8',.true.) ! ULAT + call ice_read_global(nu_kmt, 1,work_g2,'ida4',.true.) ! KMT + + if (my_task == master_task) then + close (nu_grid) + close (nu_kmt) + endif + + endif + + else ! rectangular grid + + work_g1(:,:) = 75._dbl_kind/rad_to_deg ! arbitrary polar latitude + work_g2(:,:) = c1 + + endif + + call broadcast_array(work_g1, master_task) ! ULAT + call broadcast_array(work_g2, master_task) ! KMT + + !----------------------------------------------------------------- + ! distribute blocks among processors + !----------------------------------------------------------------- + + call init_domain_distribution(work_g2, work_g1) ! KMT, ULAT + + deallocate(work_g1) + deallocate(work_g2) + + !----------------------------------------------------------------- + ! write additional domain information + !----------------------------------------------------------------- + + if (my_task == master_task) then + write(nu_diag,'(a26,i6)') ' Block size: nx_block = ',nx_block + write(nu_diag,'(a26,i6)') ' ny_block = ',ny_block + endif + + end subroutine init_grid1 + +!======================================================================= + +! Horizontal grid initialization: +! +! U{LAT,LONG} = true {latitude,longitude} of U points +! HT{N,E} = cell widths on {N,E} sides of T cell +! ANGLE = angle between local x direction and true east +! hm = land mask (c1 for ocean points, c0 for land points) +! D{X,Y}{T,U} = {x,y} spacing centered at {T,U} points +! T-grid and ghost cell values +! Various grid quantities needed for dynamics and transport +! +! author: Elizabeth C. Hunke, LANL + + subroutine init_grid2 + + use ice_blocks, only: get_block, block, nx_block, ny_block + use ice_constants, only: c0, c1, c2, pi, pi2, puny, p5, p25, c1p5, & + field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_vector, field_type_angle + use ice_domain_size, only: max_blocks + use ice_exit, only: abort_ice + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + angle_0, angle_w, angle_s, angle_sw + + logical (kind=log_kind), dimension(nx_block,ny_block,max_blocks):: & + out_of_range + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + type (block) :: & + this_block ! block information for current block + + !----------------------------------------------------------------- + ! lat, lon, cell widths, angle, land mask + !----------------------------------------------------------------- + + if (trim(grid_type) == 'displaced_pole' .or. & + trim(grid_type) == 'tripole' .or. & + trim(grid_type) == 'regional' ) then + if (trim(grid_format) == 'nc') then + call popgrid_nc ! read POP grid lengths from nc file + else + call popgrid ! read POP grid lengths directly + endif + elseif (trim(grid_type) == 'latlon') then + call latlongrid ! lat lon grid for sequential CCSM (CAM mode) + return + elseif (trim(grid_type) == 'cpom_grid') then + call cpomgrid ! cpom model orca1 type grid + else + call rectgrid ! regular rectangular grid + endif + + !----------------------------------------------------------------- + ! T-grid cell and U-grid cell quantities + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi +#ifndef AusCOM + tarea(i,j,iblk) = dxt(i,j,iblk)*dyt(i,j,iblk) + uarea(i,j,iblk) = dxu(i,j,iblk)*dyu(i,j,iblk) +#endif + if (tarea(i,j,iblk) > c0) then + tarear(i,j,iblk) = c1/tarea(i,j,iblk) + else + tarear(i,j,iblk) = c0 ! possible on boundaries + endif + if (uarea(i,j,iblk) > c0) then + uarear(i,j,iblk) = c1/uarea(i,j,iblk) + else + uarear(i,j,iblk) = c0 ! possible on boundaries + endif + tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) + + dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) + dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) + enddo + enddo + + do j = jlo, jhi+1 + do i = ilo, ihi+1 + cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) + cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) + ! match order of operations in cyp, cxp for tripole grids + cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) + cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) + enddo + enddo + + enddo ! iblk + !$OMP END PARALLEL DO + + !----------------------------------------------------------------- + ! Ghost cell updates + ! On the tripole grid, one must be careful with updates of + ! quantities that involve a difference of cell lengths. + ! For example, dyhx and dxhy are cell-centered vector components. + ! Also note that on the tripole grid, cxp and cxm would swap places, + ! as would cyp and cym. These quantities are computed only + ! in north and east ghost cells (above), not south and west. + !----------------------------------------------------------------- + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (tarea, halo_info, & + field_loc_center, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (uarea, halo_info, & + field_loc_NEcorner, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (tarear, halo_info, & + field_loc_center, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (uarear, halo_info, & + field_loc_NEcorner, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (tinyarea, halo_info, & + field_loc_center, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (dxhy, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) + call ice_HaloUpdate (dyhx, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) + call ice_timer_stop(timer_bound) + + !----------------------------------------------------------------- + ! Calculate ANGLET to be compatible with POP ocean model + ! First, ensure that -pi <= ANGLE <= pi + !----------------------------------------------------------------- + + out_of_range = .false. + where (ANGLE < -pi .or. ANGLE > pi) out_of_range = .true. + if (count(out_of_range) > 0) then + call abort_ice ('ice: init_grid: ANGLE out of expected range') + endif + +#ifndef AusCOM + !----------------------------------------------------------------- + ! Compute ANGLE on T-grid + !----------------------------------------------------------------- + ANGLET = c0 + + if (trim(grid_type) == 'cpom_grid') then + ANGLET(:,:,:) = ANGLE(:,:,:) + else + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & + !$OMP angle_0,angle_w,angle_s,angle_sw) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + angle_0 = ANGLE(i ,j ,iblk) ! w----0 + angle_w = ANGLE(i-1,j ,iblk) ! | | + angle_s = ANGLE(i, j-1,iblk) ! | | + angle_sw = ANGLE(i-1,j-1,iblk) ! sw---s + + if ( angle_0 < c0 ) then + if ( abs(angle_w - angle_0) > pi) & + angle_w = angle_w - pi2 + if ( abs(angle_s - angle_0) > pi) & + angle_s = angle_s - pi2 + if ( abs(angle_sw - angle_0) > pi) & + angle_sw = angle_sw - pi2 + endif + + ANGLET(i,j,iblk) = angle_0 * p25 + angle_w * p25 & + + angle_s * p25 + angle_sw* p25 + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif ! cpom_grid + + if (trim(grid_type) == 'regional') then + ! for W boundary extrapolate from interior + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + i = ilo + if (this_block%i_glob(i) == 1) then + do j = jlo, jhi + ANGLET(i,j,iblk) = c2*ANGLET(i+1,j,iblk)-ANGLET(i+2,j,iblk) + enddo + endif + enddo + !$OMP END PARALLEL DO + endif ! regional + +!ars599: 21042015 Not so sure should add endif here or after regional endif? +! 22042015 modify based on after compiling +#endif + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (ANGLET, halo_info, & + field_loc_center, field_type_angle, & + fillValue=c1) + call ice_timer_stop(timer_bound) + +!#ifndef AusCOM + call makemask ! velocity mask, hemisphere masks +!#endif + + call Tlatlon ! get lat, lon on the T grid + + !---------------------------------------------------------------- + ! Corner coordinates for CF compliant history files + !---------------------------------------------------------------- + + call gridbox_corners + + !----------------------------------------------------------------- + ! Compute global index (used for unpacking messages from coupler) + !----------------------------------------------------------------- + + if (my_task==master_task) then + allocate(work_g1(nx_global,ny_global)) + do j=1,ny_global + do i=1,nx_global + work_g1(i,j) = real((j-1)*nx_global + i,kind=dbl_kind) + enddo + enddo + else + allocate(work_g1(1,1)) ! to save memory + endif + + call scatter_global(rndex_global, work_g1, & + master_task, distrb_info, & + field_loc_center, field_type_scalar) + + deallocate(work_g1) + + end subroutine init_grid2 + +!======================================================================= + +! POP displaced pole grid and land mask (or tripole). +! Grid record number, field and units are: \\ +! (1) ULAT (radians) \\ +! (2) ULON (radians) \\ +! (3) HTN (cm) \\ +! (4) HTE (cm) \\ +! (5) HUS (cm) \\ +! (6) HUW (cm) \\ +! (7) ANGLE (radians) +! +! Land mask record number and field is (1) KMT. +! +! author: Elizabeth C. Hunke, LANL + + subroutine popgrid + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c1, pi, & + field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_angle + use ice_domain_size, only: max_blocks + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + logical (kind=log_kind) :: diag + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + type (block) :: & + this_block ! block information for current block + + call ice_open(nu_grid,grid_file,64) + call ice_open(nu_kmt,kmt_file,32) + + diag = .true. ! write diagnostic info + + !----------------------------------------------------------------- + ! topography + !----------------------------------------------------------------- + + call ice_read(nu_kmt,1,work1,'ida4',diag, & + field_loc=field_loc_center, & + field_type=field_type_scalar) + + hm(:,:,:) = c0 + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + hm(i,j,iblk) = work1(i,j,iblk) + if (hm(i,j,iblk) >= c1) hm(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + + !----------------------------------------------------------------- + ! lat, lon, angle + !----------------------------------------------------------------- + + allocate(work_g1(nx_global,ny_global)) + + call ice_read_global(nu_grid,1,work_g1,'rda8',.true.) ! ULAT + call gridbox_verts(work_g1,latt_bounds) + call scatter_global(ULAT, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULAT, distrb_info, & + ew_boundary_type, ns_boundary_type) + + call ice_read_global(nu_grid,2,work_g1,'rda8',.true.) ! ULON + call gridbox_verts(work_g1,lont_bounds) + call scatter_global(ULON, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULON, distrb_info, & + ew_boundary_type, ns_boundary_type) + + call ice_read_global(nu_grid,7,work_g1,'rda8',.true.) ! ANGLE + call scatter_global(ANGLE, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_angle) + + !----------------------------------------------------------------- + ! cell dimensions + ! calculate derived quantities from global arrays to preserve + ! information on boundaries + !----------------------------------------------------------------- + + call ice_read_global(nu_grid,3,work_g1,'rda8',.true.) ! HTN + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + + call ice_read_global(nu_grid,4,work_g1,'rda8',.true.) ! HTE + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + + deallocate(work_g1) + + if (my_task == master_task) then + close (nu_grid) + close (nu_kmt) + endif + + end subroutine popgrid + +!======================================================================= + +! POP displaced pole grid and land mask. +! Grid record number, field and units are: \\ +! (1) ULAT (radians) \\ +! (2) ULON (radians) \\ +! (3) HTN (cm) \\ +! (4) HTE (cm) \\ +! (5) HUS (cm) \\ +! (6) HUW (cm) \\ +! (7) ANGLE (radians) +! +! Land mask record number and field is (1) KMT. +! +! author: Elizabeth C. Hunke, LANL +! Revised for netcdf input: Ann Keen, Met Office, May 2007 + + subroutine popgrid_nc + +#ifdef ncdf +#ifdef AusCOM +! we also let cice read in the following fields to avoid possible mismatch +! between cice and mom4. +! ( 8) TLAT +! ( 9) TLON +! (10) TAREA +! (11) UAREA +! (12) ANGLET +! the following fields are also available in grid.nc but not read in 'cos they +! are not critical +! LONT_BONDS +! LATT_BONDS +! LONU_BONDS +! LATU_BONDS +#endif +! Land mask record number and field is (1) KMT. +#ifdef AusCOM +! Land mask field KMU is also read in from kmt.nc (optional)! +#endif + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c1, pi, pi2, rad_to_deg, puny, p5, p25, & + field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_angle + use ice_domain_size, only: max_blocks + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + fid_grid, & ! file id for netCDF grid file + fid_kmt ! file id for netCDF kmt file + + logical (kind=log_kind) :: diag + + character (char_len) :: & + fieldname ! field name in netCDF file + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + type (block) :: & + this_block ! block information for current block + + call ice_open_nc(grid_file,fid_grid) + call ice_open_nc(kmt_file,fid_kmt) + + diag = .true. ! write diagnostic info + + !----------------------------------------------------------------- + ! topography + !----------------------------------------------------------------- + + fieldname='kmt' + call ice_read_nc(fid_kmt,1,fieldname,work1,diag, & + field_loc=field_loc_center, & + field_type=field_type_scalar) + + hm(:,:,:) = c0 + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + hm(i,j,iblk) = work1(i,j,iblk) + if (hm(i,j,iblk) >= c1) hm(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + +#ifdef AusCOM + if ( use_umask ) then + fieldname='kmu' + call ice_read_nc(fid_kmt,1,fieldname,work1,diag, & + field_loc=field_loc_center, & + field_type=field_type_scalar) + + uvm(:,:,:) = c0 + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + uvm(i,j,iblk) = work1(i,j,iblk) + if (uvm(i,j,iblk) >= c1) uvm(i,j,iblk) = c1 + enddo + enddo + enddo + endif +#endif + + !----------------------------------------------------------------- + ! lat, lon, angle + !----------------------------------------------------------------- + + allocate(work_g1(nx_global,ny_global)) + + fieldname='ulat' + call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! ULAT +#ifdef AusCOM + !Note 'accurate' vertices 'latt_bounds' etc are also available in + ! grid.nc file. but since they are only used for history output, + ! not important, so let cice itself work out the estimates... +#endif + call gridbox_verts(work_g1,latt_bounds) + call scatter_global(ULAT, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULAT, distrb_info, & + ew_boundary_type, ns_boundary_type) + + fieldname='ulon' + call ice_read_global_nc(fid_grid,2,fieldname,work_g1,diag) ! ULON + call gridbox_verts(work_g1,lont_bounds) + call scatter_global(ULON, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULON, distrb_info, & + ew_boundary_type, ns_boundary_type) + + fieldname='angle' + call ice_read_global_nc(fid_grid,7,fieldname,work_g1,diag) ! ANGLE + call scatter_global(ANGLE, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_angle) + + ! fix ANGLE: roundoff error due to single precision + where (ANGLE > pi) ANGLE = pi + where (ANGLE < -pi) ANGLE = -pi + +#ifdef AusCOM + fieldname='tlat' + call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! TLAT + !call gridbox_verts(work_g1,latu_bounds) + !note 'latu_bounds' etc are calculated in routine 'gridbox_corners' + call scatter_global(TLAT, work_g1, master_task, distrb_info, & + field_loc_center, field_type_scalar) + call ice_HaloExtrapolate(TLAT, distrb_info, & + ew_boundary_type, ns_boundary_type) + + fieldname='tlon' + call ice_read_global_nc(fid_grid,2,fieldname,work_g1,diag) ! TLON + !call gridbox_verts(work_g1,lont_bounds) + !....................................... + call scatter_global(TLON, work_g1, master_task, distrb_info, & + field_loc_center, field_type_scalar) + call ice_HaloExtrapolate(TLON, distrb_info, & + ew_boundary_type, ns_boundary_type) + + fieldname='angleT' + call ice_read_global_nc(fid_grid,7,fieldname,work_g1,diag) ! ANGLET + call scatter_global(ANGLET, work_g1, master_task, distrb_info, & + field_loc_center, field_type_scalar) + + ! fix ANGLET: roundoff error due to single precision + where (ANGLET > pi) ANGLET = pi + where (ANGLET < -pi) ANGLET = -pi + + fieldname='tarea' + call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! TAREA + call scatter_global(TAREA, work_g1, master_task, distrb_info, & + field_loc_center, field_type_scalar) + !call ice_HaloExtrapolate(TAREA, distrb_info, & + ! ew_boundary_type, ns_boundary_type) + ! ... to be done in init_grid2 after callint this routine ... + + fieldname='uarea' + call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! TAREA + call scatter_global(UAREA, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + !call ice_HaloExtrapolate(UAREA, distrb_info, & + ! ew_boundary_type, ns_boundary_type) + ! ... to be done in init_grid2 after calling this routine ... +#endif + + !----------------------------------------------------------------- + ! cell dimensions + ! calculate derived quantities from global arrays to preserve + ! information on boundaries + !----------------------------------------------------------------- + + fieldname='htn' + call ice_read_global_nc(fid_grid,3,fieldname,work_g1,diag) ! HTN + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + + fieldname='hte' + call ice_read_global_nc(fid_grid,4,fieldname,work_g1,diag) ! HTE + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + + deallocate(work_g1) + + if (my_task == master_task) then + call ice_close_nc(fid_grid) + call ice_close_nc(fid_kmt) + endif + +#endif + end subroutine popgrid_nc + +!======================================================================= + +! Read in kmt file that matches CAM lat-lon grid and has single column +! functionality +! author: Mariana Vertenstein +! 2007: Elizabeth Hunke upgraded to netcdf90 and cice ncdf calls + + subroutine latlongrid + +#ifdef ncdf +! use ice_boundary + use ice_domain_size +#ifdef CCSMCOUPLED + use ice_scam, only : scmlat, scmlon, single_column +#endif + use ice_constants, only: c0, c1, pi, pi2, rad_to_deg, puny, p5, p25, & + field_loc_center, field_type_scalar, radius + use ice_exit, only: abort_ice + use netcdf + + integer (kind=int_kind) :: & + i, j, iblk + + integer (kind=int_kind) :: & + ni, nj, ncid, dimid, varid, ier + + character (len=char_len) :: & + subname='latlongrid' ! subroutine name + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + closelat, & ! Single-column latitude value + closelon, & ! Single-column longitude value + closelatidx, & ! Single-column latitude index to retrieve + closelonidx ! Single-column longitude index to retrieve + + integer (kind=int_kind) :: & + start(2), & ! Start index to read in + count(2) ! Number of points to read in + + integer (kind=int_kind) :: & + start3(3), & ! Start index to read in + count3(3) ! Number of points to read in + + integer (kind=int_kind) :: & + status ! status flag + + real (kind=dbl_kind), allocatable :: & + lats(:),lons(:),pos_lons(:), glob_grid(:,:) ! temporaries + + real (kind=dbl_kind) :: & + pos_scmlon,& ! temporary + scamdata ! temporary + + !----------------------------------------------------------------- + ! - kmt file is actually clm fractional land file + ! - Determine consistency of dimensions + ! - Read in lon/lat centers in degrees from kmt file + ! - Read in ocean from "kmt" file (1 for ocean, 0 for land) + !----------------------------------------------------------------- +#ifdef CCSMCOUPLED + + ! Determine dimension of domain file and check for consistency + + if (my_task == master_task) then + call ice_open_nc(kmt_file, ncid) + + status = nf90_inq_dimid (ncid, 'ni', dimid) + status = nf90_inquire_dimension(ncid, dimid, len=ni) + status = nf90_inq_dimid (ncid, 'nj', dimid) + status = nf90_inquire_dimension(ncid, dimid, len=nj) + end if + + ! Determine start/count to read in for either single column or global lat-lon grid + ! If single_column, then assume that only master_task is used since there is only one task + + if (single_column) then + ! Check for consistency + if (my_task == master_task) then + if ((nx_global /= 1).or. (ny_global /= 1)) then + write(nu_diag,*) 'Because you have selected the column model flag' + write(nu_diag,*) 'Please set nx_global=ny_global=1 in file' + write(nu_diag,*) 'ice_domain_size.F and recompile' + call abort_ice ('latlongrid: check nx_global, ny_global') + endif + end if + + ! Read in domain file for single column + allocate(lats(nj)) + allocate(lons(ni)) + allocate(pos_lons(ni)) + allocate(glob_grid(ni,nj)) + + start3=(/1,1,1/) + count3=(/ni,nj,1/) + status = nf90_inq_varid(ncid, 'xc' , varid) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid xc') + status = nf90_get_var(ncid, varid, glob_grid, start3, count3) + if (status /= nf90_noerr) call abort_ice (subname//' get_var xc') + do i = 1,ni + lons(i) = glob_grid(i,1) + end do + + status = nf90_inq_varid(ncid, 'yc' , varid) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid yc') + status = nf90_get_var(ncid, varid, glob_grid, start3, count3) + if (status /= nf90_noerr) call abort_ice (subname//' get_var yc') + do j = 1,nj + lats(j) = glob_grid(1,j) + end do + + ! convert lons array and scmlon to 0,360 and find index of value closest to 0 + ! and obtain single-column longitude/latitude indices to retrieve + + pos_lons(:)= mod(lons(:) + 360._dbl_kind,360._dbl_kind) + pos_scmlon = mod(scmlon + 360._dbl_kind,360._dbl_kind) + start(1) = (MINLOC(abs(pos_lons-pos_scmlon),dim=1)) + start(2) = (MINLOC(abs(lats -scmlat ),dim=1)) + + deallocate(lats) + deallocate(lons) + deallocate(pos_lons) + deallocate(glob_grid) + + status = nf90_inq_varid(ncid, 'xc' , varid) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid xc') + status = nf90_get_var(ncid, varid, scamdata, start) + if (status /= nf90_noerr) call abort_ice (subname//' get_var xc') + TLON = scamdata + status = nf90_inq_varid(ncid, 'yc' , varid) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid yc') + status = nf90_get_var(ncid, varid, scamdata, start) + if (status /= nf90_noerr) call abort_ice (subname//' get_var yc') + TLAT = scamdata + status = nf90_inq_varid(ncid, 'area' , varid) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid area') + status = nf90_get_var(ncid, varid, scamdata, start) + if (status /= nf90_noerr) call abort_ice (subname//' get_var are') + tarea = scamdata + status = nf90_inq_varid(ncid, 'mask' , varid) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid mask') + status = nf90_get_var(ncid, varid, scamdata, start) + if (status /= nf90_noerr) call abort_ice (subname//' get_var mask') + hm = scamdata + status = nf90_inq_varid(ncid, 'frac' , varid) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid frac') + status = nf90_get_var(ncid, varid, scamdata, start) + if (status /= nf90_noerr) call abort_ice (subname//' get_var frac') + ocn_gridcell_frac = scamdata + else + ! Check for consistency + if (my_task == master_task) then + if (nx_global /= ni .and. ny_global /= nj) then + call abort_ice ('latlongrid: ni,nj not equal to nx_global,ny_global') + end if + end if + + ! Read in domain file for global lat-lon grid + call ice_read_nc(ncid, 1, 'xc' , TLON , diag=.true.) + call ice_read_nc(ncid, 1, 'yc' , TLAT , diag=.true.) + call ice_read_nc(ncid, 1, 'area', tarea , diag=.true., & + field_loc=field_loc_center,field_type=field_type_scalar) + call ice_read_nc(ncid, 1, 'mask', hm , diag=.true.) + call ice_read_nc(ncid, 1, 'frac', ocn_gridcell_frac, diag=.true.) + end if + + if (my_task == master_task) then + call ice_close_nc(ncid) + end if + + !$OMP PARALLEL DO PRIVATE(iblk,this_block,ilo,ihi,jlo,jhi,i,j) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + ! Convert from degrees to radians + TLON(i,j,iblk) = pi*TLON(i,j,iblk)/180._dbl_kind + + ! Convert from degrees to radians + TLAT(i,j,iblk) = pi*TLAT(i,j,iblk)/180._dbl_kind + + ! Convert from radians^2 to m^2 + ! (area in domain file is in radians^2 and tarea is in m^2) + tarea(i,j,iblk) = tarea(i,j,iblk) * (radius*radius) + end do + end do + end do + !$OMP END PARALLEL DO + + !----------------------------------------------------------------- + ! Calculate various geometric 2d arrays + ! The U grid (velocity) is not used when run with sequential CAM + ! because we only use thermodynamic sea ice. However, ULAT is used + ! in the default initialization of CICE so we calculate it here as + ! a "dummy" so that CICE will initialize with ice. If a no ice + ! initialization is OK (or desired) this can be commented out and + ! ULAT will remain 0 as specified above. ULAT is located at the + ! NE corner of the grid cell, TLAT at the center, so here ULAT is + ! hacked by adding half the latitudinal spacing (in radians) to + ! TLAT. + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,this_block,ilo,ihi,jlo,jhi,i,j) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + + if (ny_global == 1) then + uarea(i,j,iblk) = tarea(i,j, iblk) + else + uarea(i,j,iblk) = p25* & + (tarea(i,j, iblk) + tarea(i+1,j, iblk) & + + tarea(i,j+1,iblk) + tarea(i+1,j+1,iblk)) + endif + tarear(i,j,iblk) = c1/tarea(i,j,iblk) + uarear(i,j,iblk) = c1/uarea(i,j,iblk) + tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) + + if (single_column) then + ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/nj) + else + if (ny_global == 1) then + ULAT (i,j,iblk) = TLAT(i,j,iblk) + else + ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/ny_global) + endif + endif + ULON (i,j,iblk) = c0 + ANGLE (i,j,iblk) = c0 + + ANGLET(i,j,iblk) = c0 + HTN (i,j,iblk) = 1.e36_dbl_kind + HTE (i,j,iblk) = 1.e36_dbl_kind + dxt (i,j,iblk) = 1.e36_dbl_kind + dyt (i,j,iblk) = 1.e36_dbl_kind + dxu (i,j,iblk) = 1.e36_dbl_kind + dyu (i,j,iblk) = 1.e36_dbl_kind + dxhy (i,j,iblk) = 1.e36_dbl_kind + dyhx (i,j,iblk) = 1.e36_dbl_kind + cyp (i,j,iblk) = 1.e36_dbl_kind + cxp (i,j,iblk) = 1.e36_dbl_kind + cym (i,j,iblk) = 1.e36_dbl_kind + cxm (i,j,iblk) = 1.e36_dbl_kind + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call makemask +#endif +#endif + + end subroutine latlongrid + +!======================================================================= + +! Regular rectangular grid and mask +! +! author: Elizabeth C. Hunke, LANL + + subroutine rectgrid + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c1, rad_to_deg, c2, radius, cm_to_m, & + field_loc_center, field_loc_NEcorner, field_type_scalar + use ice_domain_size, only: max_blocks + use ice_exit, only: abort_ice + + integer (kind=int_kind) :: & + i, j, iblk, & + imid, jmid + + real (kind=dbl_kind) :: length + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + !----------------------------------------------------------------- + ! Calculate various geometric 2d arrays + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + ANGLE(i,j,iblk) = c0 ! "square with the world" + enddo + enddo + enddo + !$OMP END PARALLEL DO + + allocate(work_g1(nx_global,ny_global)) + + ! Weddell Sea + ! lower left corner of grid is 55W, 75S + + ! Barrow AK + ! lower left corner of grid is 156.5W, 71.35N + + if (my_task == master_task) then + work_g1 = c0 + length = dxrect*cm_to_m/radius*rad_to_deg + +! work_g1(1,:) = -55._dbl_kind ! Weddell Sea + work_g1(1,:) = -156.5_dbl_kind ! Barrow AK + + do j = 1, ny_global + do i = 2, nx_global + work_g1(i,j) = work_g1(i-1,j) + length ! ULON + enddo + enddo + work_g1(:,:) = work_g1(:,:) / rad_to_deg + endif + call scatter_global(ULON, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULON, distrb_info, & + ew_boundary_type, ns_boundary_type) + + if (my_task == master_task) then + work_g1 = c0 + length = dyrect*cm_to_m/radius*rad_to_deg + +! work_g1(:,1) = -75._dbl_kind ! Weddell Sea + work_g1(:,1) = 71.35_dbl_kind ! Barrow AK + + do i = 1, nx_global + do j = 2, ny_global + work_g1(i,j) = work_g1(i,j-1) + length ! ULAT + enddo + enddo + work_g1(:,:) = work_g1(:,:) / rad_to_deg + endif + call scatter_global(ULAT, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULAT, distrb_info, & + ew_boundary_type, ns_boundary_type) + + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g1(i,j) = dxrect ! HTN + enddo + enddo + endif + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g1(i,j) = dyrect ! HTE + enddo + enddo + endif + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + + !----------------------------------------------------------------- + ! Construct T-cell land mask + ! Keyed on ew_boundary_type; ns_boundary_type should be 'open'. + !----------------------------------------------------------------- + + if (my_task == master_task) then + work_g1(:,:) = c0 ! initialize hm as land + + if (trim(ew_boundary_type) == 'cyclic') then + + do j = 3,ny_global-2 ! closed top and bottom + do i = 1,nx_global ! open sides + work_g1(i,j) = c1 ! NOTE nx_global > 5 + enddo + enddo + + elseif (trim(ew_boundary_type) == 'open') then + + ! land in the upper left and lower right corners, + ! otherwise open boundaries + imid = aint(real(nx_global)/c2,kind=int_kind) + jmid = aint(real(ny_global)/c2,kind=int_kind) + + do j = 3,ny_global-2 + do i = 3,nx_global-2 + work_g1(i,j) = c1 ! open central domain + enddo + enddo + + if (nx_global > 5 .and. ny_global > 5) then + + do j = 1, jmid+2 + do i = 1, imid+2 + work_g1(i,j) = c1 ! open lower left corner + enddo + enddo + + do j = max(jmid-2,1), ny_global + do i = max(imid-2,1), nx_global + work_g1(i,j) = c1 ! open upper right corner + enddo + enddo + + endif + + elseif (trim(ew_boundary_type) == 'closed') then + + call abort_ice('closed boundaries not available') + + endif + endif + + call scatter_global(hm, work_g1, master_task, distrb_info, & + field_loc_center, field_type_scalar) + + deallocate(work_g1) + + end subroutine rectgrid + +!======================================================================= + +! CPOM displaced pole grid and land mask. \\ +! Grid record number, field and units are: \\ +! (1) ULAT (degrees) \\ +! (2) ULON (degrees) \\ +! (3) HTN (m) \\ +! (4) HTE (m) \\ +! (7) ANGLE (radians) \\ +! +! Land mask record number and field is (1) KMT. +! +! author: Adrian K. Turner, CPOM, UCL, 09/08/06 + + subroutine cpomgrid + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c1, rad_to_deg, m_to_cm, & + field_loc_NEcorner, field_type_scalar + use ice_domain_size, only: max_blocks + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + logical (kind=log_kind) :: diag + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + type (block) :: & + this_block ! block information for current block + + call ice_open(nu_grid,grid_file,64) + call ice_open(nu_kmt,kmt_file,32) + + diag = .true. ! write diagnostic info + + ! topography + call ice_read(nu_kmt,1,work1,'ida4',diag) + + hm(:,:,:) = c0 + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + hm(i,j,iblk) = work1(i,j,iblk) + if (hm(i,j,iblk) >= c1) hm(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + + allocate(work_g1(nx_global,ny_global)) + + ! lat, lon, cell dimensions, angles + call ice_read_global(nu_grid,1,work_g1, 'rda8',diag) + call scatter_global(ULAT, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + + call ice_read_global(nu_grid,2,work_g1, 'rda8',diag) + call scatter_global(ULON, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + + call ice_read_global(nu_grid,3,work_g1, 'rda8',diag) + work_g1 = work_g1 * m_to_cm + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + + call ice_read_global(nu_grid,4,work_g1, 'rda8',diag) + work_g1 = work_g1 * m_to_cm + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + + call ice_read_global(nu_grid,7,work_g1,'rda8',diag) + call scatter_global(ANGLE, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + + ! fix units + ULAT = ULAT / rad_to_deg + ULON = ULON / rad_to_deg + + deallocate(work_g1) + + if (my_task == master_task) then + close (nu_grid) + close (nu_kmt) + endif + + write(nu_diag,*) "min/max HTN: ", minval(HTN), maxval(HTN) + write(nu_diag,*) "min/max HTE: ", minval(HTE), maxval(HTE) + + end subroutine cpomgrid + +!======================================================================= + +! Calculate dxu and dxt from HTN on the global grid, to preserve +! ghost cell and/or land values that might otherwise be lost. Scatter +! dxu, dxt and HTN to all processors. +! +! author: Elizabeth C. Hunke, LANL + + subroutine primary_grid_lengths_HTN(work_g) + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: p5, c2, cm_to_m, & + field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_type_scalar + use ice_domain_size, only: max_blocks + + real (kind=dbl_kind), dimension(:,:) :: work_g ! global array holding HTN + + ! local variables + + integer (kind=int_kind) :: & + i, j, & + ip1 ! i+1 + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g2 + + if (my_task == master_task) then + allocate(work_g2(nx_global,ny_global)) + else + allocate(work_g2(1,1)) + endif + + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g(i,j) = work_g(i,j) * cm_to_m ! HTN + enddo + enddo + do j = 1, ny_global + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + ip1 = i+1 + if (i == nx_global) ip1 = 1 + work_g2(i,j) = p5*(work_g(i,j) + work_g(ip1,j)) ! dxu + enddo + enddo + endif + call scatter_global(HTN, work_g, master_task, distrb_info, & + field_loc_Nface, field_type_scalar) + call scatter_global(dxu, work_g2, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + + if (my_task == master_task) then + do j = 2, ny_global + do i = 1, nx_global + work_g2(i,j) = p5*(work_g(i,j) + work_g(i,j-1)) ! dxt + enddo + enddo + ! extrapolate to obtain dxt along j=1 + do i = 1, nx_global + work_g2(i,1) = c2*work_g(i,2) - work_g(i,3) ! dxt + enddo + endif + call scatter_global(dxt, work_g2, master_task, distrb_info, & + field_loc_center, field_type_scalar) + + deallocate(work_g2) + + end subroutine primary_grid_lengths_HTN + +!======================================================================= +! Calculate dyu and dyt from HTE on the global grid, to preserve +! ghost cell and/or land values that might otherwise be lost. Scatter +! dyu, dyt and HTE to all processors. +! +! author: Elizabeth C. Hunke, LANL + + subroutine primary_grid_lengths_HTE(work_g) + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: p5, c2, cm_to_m, & + field_loc_center, field_loc_NEcorner, & + field_loc_Eface, field_type_scalar + use ice_domain_size, only: max_blocks + + real (kind=dbl_kind), dimension(:,:) :: work_g ! global array holding HTE + + ! local variables + + integer (kind=int_kind) :: & + i, j, & + im1 ! i-1 + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g2 + + if (my_task == master_task) then + allocate(work_g2(nx_global,ny_global)) + else + allocate(work_g2(1,1)) + endif + + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g(i,j) = work_g(i,j) * cm_to_m ! HTE + enddo + enddo + do j = 1, ny_global-1 + do i = 1, nx_global + work_g2(i,j) = p5*(work_g(i,j) + work_g(i,j+1)) ! dyu + enddo + enddo + ! extrapolate to obtain dyu along j=ny_global + ! for CESM: use NYGLOB to prevent a compile time out of bounds + ! error when ny_global=1 as in the se dycore; this code is not + ! exersized in prescribed mode. +#if (NYGLOB>2) + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g(i,ny_global-1) & + - work_g(i,ny_global-2) ! dyu + enddo +#endif + endif + call scatter_global(HTE, work_g, master_task, distrb_info, & + field_loc_Eface, field_type_scalar) + call scatter_global(dyu, work_g2, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + im1 = i-1 + if (i == 1) im1 = nx_global + work_g2(i,j) = p5*(work_g(i,j) + work_g(im1,j)) ! dyt + enddo + enddo + endif + call scatter_global(dyt, work_g2, master_task, distrb_info, & + field_loc_center, field_type_scalar) + + deallocate(work_g2) + + end subroutine primary_grid_lengths_HTE + +!======================================================================= + +! Sets the boundary values for the T cell land mask (hm) and +! makes the logical land masks for T and U cells (tmask, umask). +! Also creates hemisphere masks (mask-n northern, mask-s southern) +! +! author: Elizabeth C. Hunke, LANL + + subroutine makemask + + use ice_constants, only: c0, puny, p5, & + field_loc_center, field_loc_NEcorner, field_type_scalar + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + type (block) :: & + this_block ! block information for current block + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (hm, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + !----------------------------------------------------------------- + ! construct T-cell and U-cell masks + !----------------------------------------------------------------- + + bm = c0 +#ifdef AusCOM + if ( .not. use_umask ) then +#endif + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + uvm(i,j,iblk) = min (hm(i,j, iblk), hm(i+1,j, iblk), & + hm(i,j+1,iblk), hm(i+1,j+1,iblk)) + bm(i,j,iblk) = my_task + iblk/100.0_dbl_kind + enddo + enddo + enddo +#ifdef AusCOM + endif +#endif + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uvm, halo_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloUpdate (bm, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + tmask(i,j,iblk) = .false. + umask(i,j,iblk) = .false. + if ( hm(i,j,iblk) > p5) tmask(i,j,iblk) = .true. + if (uvm(i,j,iblk) > p5) umask(i,j,iblk) = .true. + enddo + enddo + + !----------------------------------------------------------------- + ! create hemisphere masks + !----------------------------------------------------------------- + + lmask_n(:,:,iblk) = .false. + lmask_s(:,:,iblk) = .false. + + tarean(:,:,iblk) = c0 + tareas(:,:,iblk) = c0 + + do j = 1, ny_block + do i = 1, nx_block + + if (ULAT(i,j,iblk) >= -puny) lmask_n(i,j,iblk) = .true. ! N. Hem. + if (ULAT(i,j,iblk) < -puny) lmask_s(i,j,iblk) = .true. ! S. Hem. + + ! N hemisphere area mask (m^2) + if (lmask_n(i,j,iblk)) tarean(i,j,iblk) = tarea(i,j,iblk) & + * hm(i,j,iblk) + + ! S hemisphere area mask (m^2) + if (lmask_s(i,j,iblk)) tareas(i,j,iblk) = tarea(i,j,iblk) & + * hm(i,j,iblk) + + enddo + enddo + + enddo ! iblk + !$OMP END PARALLEL DO + + end subroutine makemask + +!======================================================================= + +! Initializes latitude and longitude on T grid +! +! author: Elizabeth C. Hunke, LANL; code originally based on POP grid +! generation routine + + subroutine Tlatlon + + use ice_constants, only: c0, c1, rad_to_deg, c2, c4, & + field_loc_center, field_type_scalar + use ice_global_reductions, only: global_minval, global_maxval + save + + integer (kind=int_kind) :: & + i, j, iblk , & ! horizontal indices + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + z1,x1,y1,z2,x2,y2,z3,x3,y3,z4,x4,y4,tx,ty,tz,da + + type (block) :: & + this_block ! block information for current block + +#ifndef AusCOM + TLAT(:,:,:) = c0 + TLON(:,:,:) = c0 + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & + !$OMP x1,y1,z1,x2,y2,z2,x3,y3,z3,x4,y4,z4, & + !$OMP tx,ty,tz,da) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + + z1 = cos(ULAT(i-1,j-1,iblk)) + x1 = cos(ULON(i-1,j-1,iblk))*z1 + y1 = sin(ULON(i-1,j-1,iblk))*z1 + z1 = sin(ULAT(i-1,j-1,iblk)) + + z2 = cos(ULAT(i,j-1,iblk)) + x2 = cos(ULON(i,j-1,iblk))*z2 + y2 = sin(ULON(i,j-1,iblk))*z2 + z2 = sin(ULAT(i,j-1,iblk)) + + z3 = cos(ULAT(i-1,j,iblk)) + x3 = cos(ULON(i-1,j,iblk))*z3 + y3 = sin(ULON(i-1,j,iblk))*z3 + z3 = sin(ULAT(i-1,j,iblk)) + + z4 = cos(ULAT(i,j,iblk)) + x4 = cos(ULON(i,j,iblk))*z4 + y4 = sin(ULON(i,j,iblk))*z4 + z4 = sin(ULAT(i,j,iblk)) + + tx = (x1+x2+x3+x4)/c4 + ty = (y1+y2+y3+y4)/c4 + tz = (z1+z2+z3+z4)/c4 + da = sqrt(tx**2+ty**2+tz**2) + + tz = tz/da + + ! TLON in radians East + TLON(i,j,iblk) = c0 + if (tx /= c0 .or. ty /= c0) TLON(i,j,iblk) = atan2(ty,tx) + + ! TLAT in radians North + TLAT(i,j,iblk) = asin(tz) + + enddo ! i + enddo ! j + enddo ! iblk + !$OMP END PARALLEL DO + + if (trim(grid_type) == 'regional') then + ! for W boundary extrapolate from interior + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + i = ilo + if (this_block%i_glob(i) == 1) then + do j = jlo, jhi + TLON(i,j,iblk) = c2*TLON(i+1,j,iblk) - & + TLON(i+2,j,iblk) + TLAT(i,j,iblk) = c2*TLAT(i+1,j,iblk) - & + TLAT(i+2,j,iblk) + enddo + endif + enddo + !$OMP END PARALLEL DO + endif ! regional + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (TLON, halo_info, & + field_loc_center, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (TLAT, halo_info, & + field_loc_center, field_type_scalar, & + fillValue=c1) + call ice_HaloExtrapolate(TLON, distrb_info, & + ew_boundary_type, ns_boundary_type) + call ice_HaloExtrapolate(TLAT, distrb_info, & + ew_boundary_type, ns_boundary_type) + call ice_timer_stop(timer_bound) +#else + !AusCOM does NOT calculate TLON, TLAT. Instead, they are read in + ! from grid.nc (see routine popgrid_nc) +#endif + + x1 = global_minval(TLON, distrb_info, tmask) + x2 = global_maxval(TLON, distrb_info, tmask) + x3 = global_minval(TLAT, distrb_info, tmask) + x4 = global_maxval(TLAT, distrb_info, tmask) + + y1 = global_minval(ULON, distrb_info, umask) + y2 = global_maxval(ULON, distrb_info, umask) + y3 = global_minval(ULAT, distrb_info, umask) + y4 = global_maxval(ULAT, distrb_info, umask) + + if (my_task==master_task) then + write(nu_diag,*) ' ' + if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then + write(nu_diag,*) 'min/max ULON:', y1*rad_to_deg, y2*rad_to_deg + write(nu_diag,*) 'min/max ULAT:', y3*rad_to_deg, y4*rad_to_deg + endif + write(nu_diag,*) 'min/max TLON:', x1*rad_to_deg, x2*rad_to_deg + write(nu_diag,*) 'min/max TLAT:', x3*rad_to_deg, x4*rad_to_deg + endif ! my_task + + end subroutine Tlatlon + +!======================================================================= + +! Transfer vector component from T-cell centers to U-cell centers. +! +! author: Elizabeth C. Hunke, LANL + + subroutine t2ugrid_vector (work) + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: field_loc_center, field_type_vector + use ice_domain_size, only: max_blocks + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + intent(inout) :: & + work + + ! local variables + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + work1(:,:,:) = work(:,:,:) + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (work1, halo_info, & + field_loc_center, field_type_vector) + call ice_timer_stop(timer_bound) + + call to_ugrid(work1,work) + + end subroutine t2ugrid_vector + +!======================================================================= + +! Shifts quantities from the T-cell midpoint (work1) to the U-cell +! midpoint (work2) +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: Elizabeth C. Hunke, LANL + + subroutine to_ugrid(work1,work2) + + use ice_constants, only: c0, p25 + + real (kind=dbl_kind), intent(in) :: & + work1(nx_block,ny_block,max_blocks) + + real (kind=dbl_kind), intent(out) :: & + work2(nx_block,ny_block,max_blocks) + + type (block) :: & + this_block ! block information for current block + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + work2(:,:,:) = c0 + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p25 * & + (work1(i, j, iblk)*tarea(i, j, iblk) & + + work1(i+1,j, iblk)*tarea(i+1,j, iblk) & + + work1(i, j+1,iblk)*tarea(i, j+1,iblk) & + + work1(i+1,j+1,iblk)*tarea(i+1,j+1,iblk)) & + / uarea(i, j, iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + end subroutine to_ugrid + +!======================================================================= + +! Transfer from U-cell centers to T-cell centers. Writes work into +! another array that has ghost cells +! NOTE: Input array is dimensioned only over physical cells. +! +! author: Elizabeth C. Hunke, LANL + + subroutine u2tgrid_vector (work) + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: field_loc_NEcorner, field_type_vector + use ice_domain_size, only: max_blocks + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), & + intent(inout) :: & + work + + ! local variables + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + work1(:,:,:) = work(:,:,:) + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (work1, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_timer_stop(timer_bound) + + call to_tgrid(work1,work) + + end subroutine u2tgrid_vector + +!======================================================================= + +! Shifts quantities from the U-cell midpoint (work1) to the T-cell +! midpoint (work2) +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: Elizabeth C. Hunke, LANL + + subroutine to_tgrid(work1, work2) + + use ice_constants, only: p25 + + real (kind=dbl_kind) :: work1(nx_block,ny_block,max_blocks), & + work2(nx_block,ny_block,max_blocks) + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + type (block) :: & + this_block ! block information for current block + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p25 * & + (work1(i, j ,iblk) * uarea(i, j, iblk) & + + work1(i-1,j ,iblk) * uarea(i-1,j, iblk) & + + work1(i, j-1,iblk) * uarea(i, j-1,iblk) & + + work1(i-1,j-1,iblk) * uarea(i-1,j-1,iblk)) & + / tarea(i, j, iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + end subroutine to_tgrid + +!======================================================================= +! The following code is used for obtaining the coordinates of the grid +! vertices for CF-compliant netCDF history output. Approximate! +!======================================================================= + +! These fields are only used for netcdf history output, and the +! ghost cell values are not needed. +! NOTE: Extrapolations were used: these fields are approximate! +! +! authors: A. McLaren, Met Office +! E. Hunke, LANL + + subroutine gridbox_corners + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, rad_to_deg, c2, c360, & + field_loc_NEcorner, field_type_scalar + use ice_domain_size, only: max_blocks + + integer (kind=int_kind) :: & + i,j,iblk,icorner,& ! index counters + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g2 + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + type (block) :: & + this_block ! block information for current block + + !------------------------------------------------------------- + ! Get coordinates of grid boxes for each block as follows: + ! (1) SW corner, (2) SE corner, (3) NE corner, (4) NW corner + !------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + + latu_bounds(1,i,j,iblk)=TLAT(i ,j ,iblk)*rad_to_deg + latu_bounds(2,i,j,iblk)=TLAT(i+1,j ,iblk)*rad_to_deg + latu_bounds(3,i,j,iblk)=TLAT(i+1,j+1,iblk)*rad_to_deg + latu_bounds(4,i,j,iblk)=TLAT(i ,j+1,iblk)*rad_to_deg + + lonu_bounds(1,i,j,iblk)=TLON(i ,j ,iblk)*rad_to_deg + lonu_bounds(2,i,j,iblk)=TLON(i+1,j ,iblk)*rad_to_deg + lonu_bounds(3,i,j,iblk)=TLON(i+1,j+1,iblk)*rad_to_deg + lonu_bounds(4,i,j,iblk)=TLON(i ,j+1,iblk)*rad_to_deg + + enddo + enddo + enddo + !$OMP END PARALLEL DO + + !---------------------------------------------------------------- + ! extrapolate on global grid to get edge values + !---------------------------------------------------------------- + + if (my_task == master_task) then + allocate(work_g2(nx_global,ny_global)) + else + allocate(work_g2(1,1)) + endif + + work1(:,:,:) = latu_bounds(2,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + latu_bounds(2,:,:,:) = work1(:,:,:) + + work1(:,:,:) = latu_bounds(3,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + latu_bounds(3,:,:,:) = work1(:,:,:) + + work1(:,:,:) = latu_bounds(4,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + latu_bounds(4,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lonu_bounds(2,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lonu_bounds(2,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lonu_bounds(3,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lonu_bounds(3,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lonu_bounds(4,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lonu_bounds(4,:,:,:) = work1(:,:,:) + + deallocate(work_g2) + + !---------------------------------------------------------------- + ! Convert longitude to Degrees East >0 for history output + !---------------------------------------------------------------- + + allocate(work_g2(nx_block,ny_block)) ! not used as global here + !OMP fails in this loop + do iblk = 1, nblocks + do icorner = 1, 4 + work_g2(:,:) = lont_bounds(icorner,:,:,iblk) + c360 + where (work_g2 > c360) work_g2 = work_g2 - c360 + where (work_g2 < c0 ) work_g2 = work_g2 + c360 + lont_bounds(icorner,:,:,iblk) = work_g2(:,:) + work_g2(:,:) = lonu_bounds(icorner,:,:,iblk) + c360 + where (work_g2 > c360) work_g2 = work_g2 - c360 + where (work_g2 < c0 ) work_g2 = work_g2 + c360 + lonu_bounds(icorner,:,:,iblk) = work_g2(:,:) + enddo + enddo + deallocate(work_g2) + + end subroutine gridbox_corners + +!======================================================================= + +! NOTE: Boundary conditions for fields on NW, SW, SE corners +! have not been implemented; using NE corner location for all. +! Extrapolations are also used: these fields are approximate! +! +! authors: A. McLaren, Met Office +! E. Hunke, LANL + + subroutine gridbox_verts(work_g,vbounds) + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, rad_to_deg, c2, & + field_loc_NEcorner, field_type_scalar + use ice_domain_size, only: max_blocks + + real (kind=dbl_kind), dimension(:,:), intent(in) :: work_g + + real (kind=dbl_kind), & + dimension(4,nx_block,ny_block,max_blocks), & + intent(out) :: vbounds + + integer (kind=int_kind) :: & + i,j ! index counters + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g2 + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + if (my_task == master_task) then + allocate(work_g2(nx_global,ny_global)) + else + allocate(work_g2(1,1)) + endif + + !------------------------------------------------------------- + ! Get coordinates of grid boxes for each block as follows: + ! (1) SW corner, (2) SE corner, (3) NE corner, (4) NW corner + !------------------------------------------------------------- + + work_g2(:,:) = c0 + if (my_task == master_task) then + do j = 2, ny_global + do i = 2, nx_global + work_g2(i,j) = work_g(i-1,j-1) * rad_to_deg + enddo + enddo + ! extrapolate + do j = 1, ny_global + work_g2(1,j) = c2*work_g2(2,j) - work_g2(3,j) + enddo + do i = 1, nx_global + work_g2(i,1) = c2*work_g2(i,2) - work_g2(i,3) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + vbounds(1,:,:,:) = work1(:,:,:) + + work_g2(:,:) = c0 + if (my_task == master_task) then + do j = 2, ny_global + do i = 1, nx_global + work_g2(i,j) = work_g(i,j-1) * rad_to_deg + enddo + enddo + ! extrapolate + do i = 1, nx_global + work_g2(i,1) = (c2*work_g2(i,2) - work_g2(i,3)) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + vbounds(2,:,:,:) = work1(:,:,:) + + work_g2(:,:) = c0 + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g2(i,j) = work_g(i,j) * rad_to_deg + enddo + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + vbounds(3,:,:,:) = work1(:,:,:) + + work_g2(:,:) = c0 + if (my_task == master_task) then + do j = 1, ny_global + do i = 2, nx_global + work_g2(i,j) = work_g(i-1,j ) * rad_to_deg + enddo + enddo + ! extrapolate + do j = 1, ny_global + work_g2(1,j) = c2*work_g2(2,j) - work_g2(3,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + vbounds(4,:,:,:) = work1(:,:,:) + + deallocate (work_g2) + + end subroutine gridbox_verts + +!======================================================================= + + end module ice_grid + +!======================================================================= diff --git a/source/ice_history.F90 b/source/ice_history.F90 new file mode 100755 index 00000000..cd712630 --- /dev/null +++ b/source/ice_history.F90 @@ -0,0 +1,2088 @@ +! SVN:$Id: ice_history.F90 820 2014-08-26 19:08:29Z eclare $ +!======================================================================= + +! Driver for core history output +! +! The following variables are currently hard-wired as snapshots +! (instantaneous rather than time-averages): +! divu, shear, sig1, sig2, trsig, mlt_onset, frz_onset, hisnap, aisnap +! +! Options for histfreq: '1','h','d','m','y','x', where x means that +! output stream will not be used (recommended for efficiency). +! histfreq_n can be any nonnegative integer, where 0 means that the +! corresponding histfreq frequency will not be used. +! The flags (f_) can be set to '1','h','d','m','y' or 'x', where +! n means the field will not be written. To output the same field at +! more than one frequency, for instance monthy and daily, set +! f_ = 'md'. +! +! authors Tony Craig and Bruce Briegleb, NCAR +! Elizabeth C. Hunke and William H. Lipscomb, LANL +! C. M. Bitz, UW +! +! 2004 WHL: Block structure added +! 2006 ECH: Accepted some CCSM code into mainstream CICE +! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. +! Added histfreq_n and histfreq='h' options, removed histfreq='w' +! Converted to free source form (F90) +! Added option for binary output instead of netCDF +! 2009 D Bailey and ECH: Generalized for multiple frequency output +! 2010 Alison McLaren and ECH: Added 3D capability + + module ice_history + + use ice_kinds_mod +#ifdef AusCOM + use cpl_parameters, only: caltype +#endif + + implicit none + private + public :: init_hist, accum_hist + save + +!======================================================================= + + contains + +!======================================================================= + +! Initialize history files +! +! authors Tony Craig, NCAR +! Elizabeth C. Hunke, LANL +! C.M. Bitz, UW +! Bruce P. Briegleb, NCAR +! William H. Lipscomb, LANL + + subroutine init_hist (dt) + + use ice_atmo, only: formdrag + use ice_blocks, only: nx_block, ny_block + use ice_broadcast, only: broadcast_scalar, broadcast_array + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, c1, c2, c100, mps_to_cmpdy, rhofresh, & + Tffresh, kg_to_g, secday + use ice_calendar, only: yday, days_per_year, histfreq, & + histfreq_n, nstreams + use ice_domain_size, only: max_blocks, max_nstrm + use ice_dyn_shared, only: kdyn + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_nml, nml_filename, nu_diag, & + get_fileunit, release_fileunit + use ice_flux, only: mlt_onset, frz_onset, albcnt + use ice_history_shared ! everything + use ice_history_mechred, only: init_hist_mechred_2D, init_hist_mechred_3Dc + use ice_history_pond, only: init_hist_pond_2D, init_hist_pond_3Dc + use ice_history_bgc, only: init_hist_bgc_2D, init_hist_bgc_3Dc, & + init_hist_bgc_4Db + use ice_history_drag, only: init_hist_drag_2D + use ice_restart_shared, only: restart + use ice_state, only: tr_iage, tr_FY, tr_lvl, tr_pond, tr_aero, tr_brine + use ice_therm_shared, only: calc_Tsfc, heat_capacity + use ice_zbgc_shared, only: skl_bgc + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: n, ns, ns1, ns2 + integer (kind=int_kind), dimension(max_nstrm) :: & + ntmp + integer (kind=int_kind) :: nml_error ! namelist i/o error flag + + !----------------------------------------------------------------- + ! read namelist + !----------------------------------------------------------------- + + call get_fileunit(nu_nml) + if (my_task == master_task) then + open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + if (nml_error /= 0) then + nml_error = -1 + else + nml_error = 1 + endif + do while (nml_error > 0) + read(nu_nml, nml=icefields_nml,iostat=nml_error) + end do + if (nml_error == 0) close(nu_nml) + endif + call release_fileunit(nu_nml) + + call broadcast_scalar(nml_error, master_task) + if (nml_error /= 0) then + close (nu_nml) + call abort_ice('ice: error reading icefields_nml') + endif + + ! histfreq options ('1','h','d','m','y') + nstreams = 0 + do ns = 1, max_nstrm + if (histfreq(ns) == '1' .or. histfreq(ns) == 'h' .or. & + histfreq(ns) == 'd' .or. histfreq(ns) == 'm' .or. & + histfreq(ns) == 'y') then + nstreams = nstreams + 1 + if (ns >= 2) then + if (histfreq(ns-1) == 'x') then + call abort_ice('ice: histfreq all non x must be at start of array') + endif + endif + else if (histfreq(ns) /= 'x') then + call abort_ice('ice: histfreq contains illegal element') + endif + enddo + if (nstreams == 0) write (nu_diag,*) 'WARNING: No history output' + do ns1 = 1, nstreams + do ns2 = 1, nstreams + if (histfreq(ns1) == histfreq(ns2) .and. ns1/=ns2 & + .and. my_task == master_task) then + call abort_ice('ice: histfreq elements must be unique') + endif + enddo + enddo + + if (.not. tr_iage) then + f_iage = 'x' + f_dagedtt = 'x' + f_dagedtd = 'x' + endif + if (.not. tr_FY) f_FY = 'x' + + if (kdyn /= 2) then + f_a11 = 'x' + f_a12 = 'x' + f_e11 = 'x' + f_e12 = 'x' + f_e22 = 'x' + f_s11 = 'x' + f_s12 = 'x' + f_s22 = 'x' + f_yieldstress11 = 'x' + f_yieldstress12 = 'x' + f_yieldstress22 = 'x' + endif + + ! these must be output at the same frequency because of + ! cos(zenith angle) averaging + if (f_albice(1:1) /= 'x' .and. f_albsni(1:1) /= 'x') f_albice = f_albsni + if (f_albsno(1:1) /= 'x') f_albsno = f_albice + if (f_albpnd(1:1) /= 'x') f_albpnd = f_albice + if (f_coszen(1:1) /= 'x' .and. f_albice(1:1) /= 'x') f_coszen = f_albice + if (f_coszen(1:1) /= 'x' .and. f_albsni(1:1) /= 'x') f_coszen = f_albsni + + ! to prevent array-out-of-bounds when aggregating + if (f_fmeltt_ai(1:1) /= 'x') f_fmelttn_ai = f_fmeltt_ai + + ! AEW: These are only calculated under certain circumstances + ! (if using multilayers with UM-style coupling) + if (calc_Tsfc .or. .not. heat_capacity) then + f_Tn_top = 'x' + f_keffn_top = 'x' + endif + +#ifndef ncdf + f_bounds = .false. +#endif + + ! write dimensions for 3D or 4D history variables + ! note: list of variables checked here is incomplete + if (f_aicen(1:1) /= 'x' .or. f_vicen(1:1) /= 'x' .or. & + f_Tinz (1:1) /= 'x' .or. f_Sinz (1:1) /= 'x') f_NCAT = .true. + if (f_Tinz (1:1) /= 'x' .or. f_Sinz (1:1) /= 'x') f_VGRDi = .true. + if (f_Tsnz (1:1) /= 'x') f_VGRDs = .true. + + call broadcast_scalar (f_tmask, master_task) + call broadcast_scalar (f_blkmask, master_task) + call broadcast_scalar (f_tarea, master_task) + call broadcast_scalar (f_uarea, master_task) + call broadcast_scalar (f_dxt, master_task) + call broadcast_scalar (f_dyt, master_task) + call broadcast_scalar (f_dxu, master_task) + call broadcast_scalar (f_dyu, master_task) + call broadcast_scalar (f_HTN, master_task) + call broadcast_scalar (f_HTE, master_task) + call broadcast_scalar (f_ANGLE, master_task) + call broadcast_scalar (f_ANGLET, master_task) + call broadcast_scalar (f_bounds, master_task) + call broadcast_scalar (f_NCAT, master_task) + call broadcast_scalar (f_VGRDi, master_task) + call broadcast_scalar (f_VGRDs, master_task) + call broadcast_scalar (f_VGRDb, master_task) + +! call broadcast_scalar (f_example, master_task) + call broadcast_scalar (f_hi, master_task) + call broadcast_scalar (f_hs, master_task) + call broadcast_scalar (f_Tsfc, master_task) + call broadcast_scalar (f_aice, master_task) + call broadcast_scalar (f_uvel, master_task) + call broadcast_scalar (f_vvel, master_task) + call broadcast_scalar (f_uatm, master_task) + call broadcast_scalar (f_vatm, master_task) + call broadcast_scalar (f_sice, master_task) + call broadcast_scalar (f_fswdn, master_task) + call broadcast_scalar (f_flwdn, master_task) + call broadcast_scalar (f_snow, master_task) + call broadcast_scalar (f_snow_ai, master_task) + call broadcast_scalar (f_rain, master_task) + call broadcast_scalar (f_rain_ai, master_task) + call broadcast_scalar (f_sst, master_task) + call broadcast_scalar (f_sss, master_task) + call broadcast_scalar (f_uocn, master_task) + call broadcast_scalar (f_vocn, master_task) + call broadcast_scalar (f_frzmlt, master_task) + call broadcast_scalar (f_fswfac, master_task) + call broadcast_scalar (f_fswint_ai, master_task) + call broadcast_scalar (f_fswabs, master_task) + call broadcast_scalar (f_fswabs_ai, master_task) + call broadcast_scalar (f_albsni, master_task) + call broadcast_scalar (f_alvdr, master_task) + call broadcast_scalar (f_alidr, master_task) + call broadcast_scalar (f_alvdf, master_task) + call broadcast_scalar (f_alidf, master_task) + call broadcast_scalar (f_albice, master_task) + call broadcast_scalar (f_albsno, master_task) + call broadcast_scalar (f_albpnd, master_task) + call broadcast_scalar (f_coszen, master_task) + call broadcast_scalar (f_flat, master_task) + call broadcast_scalar (f_flat_ai, master_task) + call broadcast_scalar (f_fsens, master_task) + call broadcast_scalar (f_fsens_ai, master_task) + call broadcast_scalar (f_flwup, master_task) + call broadcast_scalar (f_flwup_ai, master_task) + call broadcast_scalar (f_evap, master_task) + call broadcast_scalar (f_evap_ai, master_task) + call broadcast_scalar (f_Tair, master_task) + call broadcast_scalar (f_Tref, master_task) + call broadcast_scalar (f_Qref, master_task) + call broadcast_scalar (f_congel, master_task) + call broadcast_scalar (f_frazil, master_task) + call broadcast_scalar (f_snoice, master_task) + call broadcast_scalar (f_dsnow, master_task) + call broadcast_scalar (f_meltt, master_task) + call broadcast_scalar (f_melts, master_task) + call broadcast_scalar (f_meltb, master_task) + call broadcast_scalar (f_meltl, master_task) + call broadcast_scalar (f_fresh, master_task) + call broadcast_scalar (f_fresh_ai, master_task) + call broadcast_scalar (f_fsalt, master_task) + call broadcast_scalar (f_fsalt_ai, master_task) + call broadcast_scalar (f_fhocn, master_task) + call broadcast_scalar (f_fhocn_ai, master_task) + call broadcast_scalar (f_fswthru, master_task) + call broadcast_scalar (f_fswthru_ai, master_task) + call broadcast_scalar (f_strairx, master_task) + call broadcast_scalar (f_strairy, master_task) + call broadcast_scalar (f_strtltx, master_task) + call broadcast_scalar (f_strtlty, master_task) + call broadcast_scalar (f_strcorx, master_task) + call broadcast_scalar (f_strcory, master_task) + call broadcast_scalar (f_strocnx, master_task) + call broadcast_scalar (f_strocny, master_task) + call broadcast_scalar (f_strintx, master_task) + call broadcast_scalar (f_strinty, master_task) + call broadcast_scalar (f_strength, master_task) + call broadcast_scalar (f_divu, master_task) + call broadcast_scalar (f_shear, master_task) + call broadcast_scalar (f_sig1, master_task) + call broadcast_scalar (f_sig2, master_task) + call broadcast_scalar (f_dvidtt, master_task) + call broadcast_scalar (f_dvsdtt, master_task) + call broadcast_scalar (f_dvidtd, master_task) + call broadcast_scalar (f_dvsdtd, master_task) + call broadcast_scalar (f_daidtt, master_task) + call broadcast_scalar (f_daidtd, master_task) + call broadcast_scalar (f_dagedtt, master_task) + call broadcast_scalar (f_dagedtd, master_task) + call broadcast_scalar (f_mlt_onset, master_task) + call broadcast_scalar (f_frz_onset, master_task) + call broadcast_scalar (f_aisnap, master_task) + call broadcast_scalar (f_hisnap, master_task) + call broadcast_scalar (f_aicen, master_task) + call broadcast_scalar (f_vicen, master_task) + call broadcast_scalar (f_vsnon, master_task) + call broadcast_scalar (f_trsig, master_task) + call broadcast_scalar (f_icepresent, master_task) + call broadcast_scalar (f_fsurf_ai, master_task) + call broadcast_scalar (f_fcondtop_ai, master_task) + call broadcast_scalar (f_fmeltt_ai, master_task) + call broadcast_scalar (f_fsurfn_ai, master_task) + call broadcast_scalar (f_fcondtopn_ai, master_task) + call broadcast_scalar (f_fmelttn_ai, master_task) + call broadcast_scalar (f_flatn_ai, master_task) + call broadcast_scalar (f_fsensn_ai, master_task) + +! call broadcast_scalar (f_field3dz, master_task) + call broadcast_scalar (f_Tn_top, master_task) + call broadcast_scalar (f_keffn_top, master_task) + call broadcast_scalar (f_Tinz, master_task) + call broadcast_scalar (f_Sinz, master_task) + call broadcast_scalar (f_Tsnz, master_task) + + call broadcast_scalar (f_iage, master_task) + call broadcast_scalar (f_FY, master_task) + + call broadcast_scalar (f_a11, master_task) + call broadcast_scalar (f_a12, master_task) + call broadcast_scalar (f_e11, master_task) + call broadcast_scalar (f_e12, master_task) + call broadcast_scalar (f_e22, master_task) + call broadcast_scalar (f_s11, master_task) + call broadcast_scalar (f_s12, master_task) + call broadcast_scalar (f_s22, master_task) + call broadcast_scalar (f_yieldstress11, master_task) + call broadcast_scalar (f_yieldstress12, master_task) + call broadcast_scalar (f_yieldstress22, master_task) + + ! 2D variables + do ns1 = 1, nstreams + if (histfreq(ns1) /= 'x') then + +!!!!! begin example +! call define_hist_field(n_example,"example","m",tstr2D, tcstr, & +! "example: mean ice thickness", & +! "ice volume per unit grid cell area", c1, c0, & +! ns1, f_example) +!!!!! end example + + call define_hist_field(n_hi,"hi","m",tstr2D, tcstr, & + "grid cell mean ice thickness", & + "ice volume per unit grid cell area", c1, c0, & + ns1, f_hi) + + call define_hist_field(n_hs,"hs","m",tstr2D, tcstr, & + "grid cell mean snow thickness", & + "snow volume per unit grid cell area", c1, c0, & + ns1, f_hs) + + call define_hist_field(n_Tsfc,"Tsfc","C",tstr2D, tcstr, & + "snow/ice surface temperature", & + "averaged with Tf if no ice is present", c1, c0, & + ns1, f_Tsfc) + + call define_hist_field(n_aice,"aice","1",tstr2D, tcstr, & + "ice area (aggregate)", & + "none", c1, c0, & + ns1, f_aice) + + call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & + "ice velocity (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_uvel) + + call define_hist_field(n_vvel,"vvel","m/s",ustr2D, ucstr, & + "ice velocity (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_vvel) + + call define_hist_field(n_uatm,"uatm","m/s",ustr2D, ucstr, & + "atm velocity (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_uatm) + + call define_hist_field(n_vatm,"vatm","m/s",ustr2D, ucstr, & + "atm velocity (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_vatm) + + call define_hist_field(n_sice,"sice","ppt",tstr2D, tcstr, & + "bulk ice salinity", & + "none", c1, c0, & + ns1, f_sice) + + call define_hist_field(n_fswdn,"fswdn","W/m^2",tstr2D, tcstr, & + "down solar flux", & + "positive downward", c1, c0, & + ns1, f_fswdn) + + call define_hist_field(n_flwdn,"flwdn","W/m^2",tstr2D, tcstr, & + "down longwave flux", & + "positive downward", c1, c0, & + ns1, f_flwdn) + + call define_hist_field(n_snow,"snow","cm/day",tstr2D, tcstr, & + "snowfall rate (cpl)", & + "none", mps_to_cmpdy/rhofresh, c0, & + ns1, f_snow) + + call define_hist_field(n_snow_ai,"snow_ai","cm/day",tstr2D, tcstr, & + "snowfall rate", & + "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & + ns1, f_snow_ai) + + call define_hist_field(n_rain,"rain","cm/day",tstr2D, tcstr, & + "rainfall rate (cpl)", & + "none", mps_to_cmpdy/rhofresh, c0, & + ns1, f_rain) + + call define_hist_field(n_rain_ai,"rain_ai","cm/day",tstr2D, tcstr, & + "rainfall rate", & + "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & + ns1, f_rain_ai) + + call define_hist_field(n_sst,"sst","C",tstr2D, tcstr, & + "sea surface temperature", & + "none", c1, c0, & + ns1, f_sst) + + call define_hist_field(n_sss,"sss","ppt",tstr2D, tcstr, & + "sea surface salinity", & + "none", c1, c0, & + ns1, f_sss) + + call define_hist_field(n_uocn,"uocn","m/s",ustr2D, ucstr, & + "ocean current (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_uocn) + + call define_hist_field(n_vocn,"vocn","m/s",ustr2D, ucstr, & + "ocean current (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_vocn) + + call define_hist_field(n_frzmlt,"frzmlt","W/m^2",tstr2D, tcstr, & + "freeze/melt potential", & + "if >0, new ice forms; if <0, ice melts", c1, c0, & + ns1, f_frzmlt) + + call define_hist_field(n_fswfac,"fswfac","1",tstr2D, tcstr, & + "shortwave scaling factor", & + "ratio of netsw new:old", c1, c0, & + ns1, f_fswfac) + + call define_hist_field(n_fswint_ai,"fswint_ai","W/m^2",tstr2D, tcstr, & + "shortwave absorbed in ice interior", & + "does not include surface", c1, c0, & + ns1, f_fswint_ai) + + call define_hist_field(n_fswabs,"fswabs","W/m^2",tstr2D, tcstr, & + "snow/ice/ocn absorbed solar flux (cpl)", & + "positive downward", c1, c0, & + ns1, f_fswabs) + + call define_hist_field(n_fswabs_ai,"fswabs_ai","W/m^2",tstr2D, tcstr, & + "snow/ice/ocn absorbed solar flux", & + "weighted by ice area", c1, c0, & + ns1, f_fswabs_ai) + +! call define_hist_field(n_albsni,"albsni","%",tstr2D, tcstr, & +! "snow/ice broad band albedo", & +! "scaled (divided) by aice", c100, c0, & +! ns1, f_albsni) + + call define_hist_field(n_albsni,"albsni","%",tstr2D, tcstr, & + "snow/ice broad band albedo", & + "averaged for coszen>0, weighted by aice", c100, c0, & + ns1, f_albsni) + + call define_hist_field(n_alvdr,"alvdr","%",tstr2D, tcstr, & + "visible direct albedo", & + "scaled (divided) by aice", c100, c0, & + ns1, f_alvdr) + + call define_hist_field(n_alidr,"alidr","%",tstr2D, tcstr, & + "near IR direct albedo", & + "scaled (divided) by aice", c100, c0, & + ns1, f_alidr) + + call define_hist_field(n_alvdf,"alvdf","%",tstr2D, tcstr, & + "visible diffuse albedo", & + "scaled (divided) by aice", c100, c0, & + ns1, f_alvdf) + + call define_hist_field(n_alidf,"alidf","%",tstr2D, tcstr, & + "near IR diffuse albedo", & + "scaled (divided) by aice", c100, c0, & + ns1, f_alidf) + + call define_hist_field(n_albice,"albice","%",tstr2D, tcstr, & + "bare ice albedo", & + "averaged for coszen>0, weighted by aice", c100, c0, & + ns1, f_albice) + + call define_hist_field(n_albsno,"albsno","%",tstr2D, tcstr, & + "snow albedo", & + "averaged for coszen>0, weighted by aice", c100, c0, & + ns1, f_albsno) + + call define_hist_field(n_albpnd,"albpnd","%",tstr2D, tcstr, & + "melt pond albedo", & + "averaged for coszen>0, weighted by aice", c100, c0, & + ns1, f_albpnd) + + call define_hist_field(n_coszen,"coszen","radian",tstr2D, tcstr, & + "cosine of the zenith angle", & + "negative below horizon", c1, c0, & + ns1, f_coszen) + + call define_hist_field(n_flat,"flat","W/m^2",tstr2D, tcstr, & + "latent heat flux (cpl)", & + "positive downward", c1, c0, & + ns1, f_flat) + + call define_hist_field(n_flat_ai,"flat_ai","W/m^2",tstr2D, tcstr, & + "latent heat flux", & + "weighted by ice area", c1, c0, & + ns1, f_flat_ai) + + call define_hist_field(n_fsens,"fsens","W/m^2",tstr2D, tcstr, & + "sensible heat flux (cpl)", & + "positive downward", c1, c0, & + ns1, f_fsens) + + call define_hist_field(n_fsens_ai,"fsens_ai","W/m^2",tstr2D, tcstr, & + "sensible heat flux", & + "weighted by ice area", c1, c0, & + ns1, f_fsens_ai) + + call define_hist_field(n_flwup,"flwup","W/m^2",tstr2D, tcstr, & + "upward longwave flux (cpl)", & + "positive downward", c1, c0, & + ns1, f_flwup) + + call define_hist_field(n_flwup_ai,"flwup_ai","W/m^2",tstr2D, tcstr, & + "upward longwave flux", & + "weighted by ice area", c1, c0, & + ns1, f_flwup_ai) + + call define_hist_field(n_evap,"evap","cm/day",tstr2D, tcstr, & + "evaporative water flux (cpl)", & + "none", mps_to_cmpdy/rhofresh, c0, & + ns1, f_evap) + + call define_hist_field(n_evap_ai,"evap_ai","cm/day",tstr2D, tcstr, & + "evaporative water flux", & + "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & + ns1, f_evap_ai) + + call define_hist_field(n_Tair,"Tair","C",tstr2D, tcstr, & + "air temperature", & + "none", c1, -Tffresh, & + ns1, f_Tair) + + call define_hist_field(n_Tref,"Tref","C",tstr2D, tcstr, & + "2m reference temperature", & + "none", c1, -Tffresh, & + ns1, f_Tref) + + call define_hist_field(n_Qref,"Qref","g/kg",tstr2D, tcstr, & + "2m reference specific humidity", & + "none", kg_to_g, c0, & + ns1, f_Qref) + + call define_hist_field(n_congel,"congel","cm/day",tstr2D, tcstr, & + "congelation ice growth", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_congel) + + call define_hist_field(n_frazil,"frazil","cm/day",tstr2D, tcstr, & + "frazil ice growth", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_frazil) + + call define_hist_field(n_snoice,"snoice","cm/day",tstr2D, tcstr, & + "snow-ice formation", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_snoice) + + call define_hist_field(n_dsnow,"dsnow","cm/day",tstr2D, tcstr, & + "snow formation", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_dsnow) + + call define_hist_field(n_meltt,"meltt","cm/day",tstr2D, tcstr, & + "top ice melt", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_meltt) + + call define_hist_field(n_melts,"melts","cm/day",tstr2D, tcstr, & + "top snow melt", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_melts) + + call define_hist_field(n_meltb,"meltb","cm/day",tstr2D, tcstr, & + "basal ice melt", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_meltb) + + call define_hist_field(n_meltl,"meltl","cm/day",tstr2D, tcstr, & + "lateral ice melt", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_meltl) + + call define_hist_field(n_fresh,"fresh","cm/day",tstr2D, tcstr, & + "freshwtr flx ice to ocn (cpl)", & + "if positive, ocean gains fresh water", & + mps_to_cmpdy/rhofresh, c0, & + ns1, f_fresh) + + call define_hist_field(n_fresh_ai,"fresh_ai","cm/day",tstr2D, tcstr, & + "freshwtr flx ice to ocn", & + "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & + ns1, f_fresh_ai) + + call define_hist_field(n_fsalt,"fsalt","kg/m^2/s",tstr2D, tcstr, & + "salt flux ice to ocn (cpl)", & + "if positive, ocean gains salt", c1, c0, & + ns1, f_fsalt) + + call define_hist_field(n_fsalt_ai,"fsalt_ai","kg/m^2/s",tstr2D, tcstr, & + "salt flux ice to ocean", & + "weighted by ice area", c1, c0, & + ns1, f_fsalt_ai) + + call define_hist_field(n_fhocn,"fhocn","W/m^2",tstr2D, tcstr, & + "heat flux ice to ocn (cpl)", & + "if positive, ocean gains heat", c1, c0, & + ns1, f_fhocn) + + call define_hist_field(n_fhocn_ai,"fhocn_ai","W/m^2",tstr2D, tcstr, & + "heat flux ice to ocean", & + "weighted by ice area", c1, c0, & + ns1, f_fhocn_ai) + + call define_hist_field(n_fswthru,"fswthru","W/m^2",tstr2D, tcstr, & + "SW thru ice to ocean (cpl)", & + "if positive, ocean gains heat", c1, c0, & + ns1, f_fswthru) + + call define_hist_field(n_fswthru_ai,"fswthru_ai","W/m^2",tstr2D, tcstr,& + "SW flux thru ice to ocean", & + "weighted by ice area", c1, c0, & + ns1, f_fswthru_ai) + + call define_hist_field(n_strairx,"strairx","N/m^2",ustr2D, ucstr, & + "atm/ice stress (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_strairx) + + call define_hist_field(n_strairy,"strairy","N/m^2",ustr2D, ucstr, & + "atm/ice stress (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_strairy) + + call define_hist_field(n_strtltx,"strtltx","N/m^2",ustr2D, ucstr, & + "sea sfc tilt stress (x)", & + "none", c1, c0, & + ns1, f_strtltx) + + call define_hist_field(n_strtlty,"strtlty","N/m^2",ustr2D, ucstr, & + "sea sfc tilt stress (y)", & + "none", c1, c0, & + ns1, f_strtlty) + + call define_hist_field(n_strcorx,"strcorx","N/m^2",ustr2D, ucstr, & + "coriolis stress (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_strcorx) + + call define_hist_field(n_strcory,"strcory","N/m^2",ustr2D, ucstr, & + "coriolis stress (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_strcory) + + call define_hist_field(n_strocnx,"strocnx","N/m^2",ustr2D, ucstr, & + "ocean/ice stress (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_strocnx) + + call define_hist_field(n_strocny,"strocny","N/m^2",ustr2D, ucstr, & + "ocean/ice stress (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_strocny) + + call define_hist_field(n_strintx,"strintx","N/m^2",ustr2D, ucstr, & + "internal ice stress (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_strintx) + + call define_hist_field(n_strinty,"strinty","N/m^2",ustr2D, ucstr, & + "internal ice stress (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_strinty) + + call define_hist_field(n_strength,"strength","N/m",tstr2D, tcstr, & + "compressive ice strength", & + "none", c1, c0, & + ns1, f_strength) + + call define_hist_field(n_divu,"divu","%/day",tstr2D, tcstr, & + "strain rate (divergence)", & + "none", secday*c100, c0, & + ns1, f_divu) + + call define_hist_field(n_shear,"shear","%/day",tstr2D, tcstr, & + "strain rate (shear)", & + "none", secday*c100, c0, & + ns1, f_shear) + + call define_hist_field(n_sig1,"sig1","1",ustr2D, ucstr, & + "norm. principal stress 1", & + "sig1 is instantaneous", c1, c0, & + ns1, f_sig1) + + call define_hist_field(n_sig2,"sig2","1",ustr2D, ucstr, & + "norm. principal stress 2", & + "sig2 is instantaneous", c1, c0, & + ns1, f_sig2) + + call define_hist_field(n_dvidtt,"dvidtt","cm/day",tstr2D, tcstr, & + "ice volume tendency thermo", & + "none", mps_to_cmpdy, c0, & + ns1, f_dvidtt) + + call define_hist_field(n_dvsdtt,"dvsdtt","cm/day",tstr2D, tcstr, & + "snow volume tendency thermo", & + "none", mps_to_cmpdy, c0, & + ns1, f_dvsdtt) + + call define_hist_field(n_dvidtd,"dvidtd","cm/day",tstr2D, tcstr, & + "ice volume tendency dynamics", & + "none", mps_to_cmpdy, c0, & + ns1, f_dvidtd) + + call define_hist_field(n_dvsdtd,"dvsdtd","cm/day",tstr2D, tcstr, & + "snow volume tendency dynamics", & + "none", mps_to_cmpdy, c0, & + ns1, f_dvsdtd) + + call define_hist_field(n_daidtt,"daidtt","%/day",tstr2D, tcstr, & + "area tendency thermo", & + "none", secday*c100, c0, & + ns1, f_daidtt) + + call define_hist_field(n_daidtd,"daidtd","%/day",tstr2D, tcstr, & + "area tendency dynamics", & + "none", secday*c100, c0, & + ns1, f_daidtd) + + call define_hist_field(n_dagedtt,"dagedtt","day/day",tstr2D, tcstr, & + "age tendency thermo", & + "excludes time step increment", c1, c0, & + ns1, f_dagedtt) + + call define_hist_field(n_dagedtd,"dagedtd","day/day",tstr2D, tcstr, & + "age tendency dynamics", & + "excludes time step increment", c1, c0, & + ns1, f_dagedtd) + + call define_hist_field(n_mlt_onset,"mlt_onset","day of year", & + tstr2D, tcstr,"melt onset date", & + "midyear restart gives erroneous dates", c1, c0, & + ns1, f_mlt_onset) + + call define_hist_field(n_frz_onset,"frz_onset","day of year", & + tstr2D, tcstr,"freeze onset date", & + "midyear restart gives erroneous dates", c1, c0, & + ns1, f_frz_onset) + + call define_hist_field(n_hisnap,"hisnap","m",tstr2D, tcstr, & + "ice volume snapshot", & + "none", c1, c0, & + ns1, f_hisnap) + + call define_hist_field(n_aisnap,"aisnap","1",tstr2D, tcstr, & + "ice area snapshot", & + "none", c1, c0, & + ns1, f_aisnap) + + call define_hist_field(n_trsig,"trsig","N/m^2",tstr2D, tcstr, & + "internal stress tensor trace", & + "ice strength approximation", c1, c0, & + ns1, f_trsig) + + call define_hist_field(n_icepresent,"ice_present","1",tstr2D, tcstr, & + "fraction of time-avg interval that ice is present", & + "ice extent flag", c1, c0, & + ns1, f_icepresent) + + call define_hist_field(n_fsurf_ai,"fsurf_ai","W/m^2",tstr2D, tcstr, & + "net surface heat flux", & + "positive downward, excludes conductive flux, weighted by ice area", & + c1, c0, & + ns1, f_fsurf_ai) + + call define_hist_field(n_fcondtop_ai,"fcondtop_ai","W/m^2", & + tstr2D, tcstr,"top surface conductive heat flux", & + "positive downward, weighted by ice area", c1, c0, & + ns1, f_fcondtop_ai) + + call define_hist_field(n_fmeltt_ai,"fmeltt_ai","W/m^2",tstr2D, tcstr, & + "net surface heat flux causing melt", & + "always >= 0, weighted by ice area", c1, c0, & + ns1, f_fmeltt_ai) + + call define_hist_field(n_a11,"a11"," ",tstr2D, tcstr, & + "a11: component a11 of the structure tensor", & + "none", c1, c0, & + ns1, f_a11) + + call define_hist_field(n_a12,"a12"," ",tstr2D, tcstr, & + "a12: component a12 of the structure tensor", & + "none", c1, c0, & + ns1, f_a12) + + call define_hist_field(n_e11,"e11","1/s",tstr2D, tcstr, & + "e11: component e11 of the strain rate tensor", & + "none", c1, c0, & + ns1, f_e11) + + call define_hist_field(n_e12,"e12","1/s",tstr2D, tcstr, & + "e12: component e12 of the strain rate tensor", & + "none", c1, c0, & + ns1, f_e12) + + call define_hist_field(n_e22,"e22","1/s",tstr2D, tcstr, & + "e22: component e22 of the strain rate tensor", & + "none", c1, c0, & + ns1, f_e22) + + call define_hist_field(n_s11,"s11","kg/s^2",tstr2D, tcstr, & + "s11: component s11 of the stress tensor", & + "none", c1, c0, & + ns1, f_s11) + + call define_hist_field(n_s12,"s12","kg/s^2",tstr2D, tcstr, & + "s12: component s12 of the stress tensor", & + "none", c1, c0, & + ns1, f_s12) + + call define_hist_field(n_s22,"s22","kg/s^2",tstr2D, tcstr, & + "s22: component s12 of the stress tensor", & + "none", c1, c0, & + ns1, f_s22) + + call define_hist_field(n_yieldstress11,"yieldstress11","kg/s^2",tstr2D, tcstr, & + "yieldstress11: component 11 of the yieldstress tensor", & + "none", c1, c0, & + ns1, f_yieldstress11) + + call define_hist_field(n_yieldstress12,"yieldstress12","kg/s^2",tstr2D, tcstr, & + "yieldstress12: component 12 of the yieldstress tensor", & + "none", c1, c0, & + ns1, f_yieldstress12) + + call define_hist_field(n_yieldstress22,"yieldstress22","kg/s^2",tstr2D, tcstr, & + "yieldstress22: component 12 of the yieldstress tensor", & + "none", c1, c0, & + ns1, f_yieldstress22) + + ! Tracers + + ! Ice Age + call define_hist_field(n_iage,"iage","years",tstr2D, tcstr, & + "sea ice age", & + "none", c1/(secday*days_per_year), c0, & + ns1, f_iage) + + ! First Year Ice Area + call define_hist_field(n_FY,"FYarea"," ",tstr2D, tcstr, & + "first-year ice area", & + "weighted by ice area", c1, c0, & + ns1, f_FY) + + endif ! if (histfreq(ns1) /= 'x') then + enddo ! ns1 + + ! other 2D history variables + + ! mechanical redistribution + call init_hist_mechred_2D + + ! melt ponds + if (tr_pond) call init_hist_pond_2D + + ! biogeochemistry + if (tr_aero .or. tr_brine .or. skl_bgc) call init_hist_bgc_2D + + if (formdrag) call init_hist_drag_2D + + !----------------------------------------------------------------- + ! 3D (category) variables looped separately for ordering + !----------------------------------------------------------------- + do ns1 = 1, nstreams + if (histfreq(ns1) /= 'x') then + + call define_hist_field(n_aicen,"aicen","1",tstr3Dc, tcstr, & + "ice area, categories","none", c1, c0, & + ns1, f_aicen) + + call define_hist_field(n_vicen,"vicen","m",tstr3Dc, tcstr, & + "ice volume, categories","none", c1, c0, & + ns1, f_vicen) + + call define_hist_field(n_vsnon,"vsnon","m",tstr3Dc, tcstr, & + "snow depth on ice, categories","volume per unit area of snow", c1, c0, & + ns1, f_vsnon) + + call define_hist_field(n_fsurfn_ai,"fsurfn_ai","W/m^2",tstr3Dc, tcstr, & + "net surface heat flux, categories","weighted by ice area", c1, c0, & + ns1, f_fsurfn_ai) + + call define_hist_field(n_fcondtopn_ai,"fcondtopn_ai","W/m^2",tstr3Dc, tcstr, & + "top sfc conductive heat flux, cat","weighted by ice area", c1, c0, & + ns1, f_fcondtopn_ai) + + call define_hist_field(n_fmelttn_ai,"fmelttn_ai","W/m^2",tstr3Dc, tcstr, & + "net sfc heat flux causing melt, cat","weighted by ice area", c1, c0, & + ns1, f_fmelttn_ai) + + call define_hist_field(n_flatn_ai,"flatn_ai","W/m^2",tstr3Dc, tcstr, & + "latent heat flux, category","weighted by ice area", c1, c0, & + ns1, f_flatn_ai) + + call define_hist_field(n_fsensn_ai,"fsensn_ai","W/m^2",tstr3Dc, tcstr, & + "sensible heat flux, category","weighted by ice area", c1, c0, & + ns1, f_fsensn_ai) + + call define_hist_field(n_Tn_top,"Tn_top","K",tstr3Dc, tcstr, & + "temperature of the top layer (snow or ice), categories","multilayer scheme", c1, c0, & + ns1, f_Tn_top) + + call define_hist_field(n_keffn_top,"keffn_top","W/m^2/K",tstr3Dc, tcstr, & + "effective thermal conductivity of the top ice layer, categories", & + "multilayer scheme", c1, c0, & + ns1, f_keffn_top) + + endif ! if (histfreq(ns1) /= 'x') then + enddo ! ns1 + + ! other 3D (category) history variables + + ! mechanical redistribution + call init_hist_mechred_3Dc + + ! melt ponds + if (tr_pond) call init_hist_pond_3Dc + + ! biogeochemistry + if (tr_brine) call init_hist_bgc_3Dc + + !----------------------------------------------------------------- + ! 3D (vertical) variables must be looped separately + !----------------------------------------------------------------- + +! do ns1 = 1, nstreams +! if (histfreq(ns1) /= 'x') then + +! call define_hist_field(n_field3dz,"field3dz","1",tstr3Dz, tcstr, & +! "example 3dz field", & +! "vertical profile", c1, c0, & +! ns1, f_field3dz) + +! endif ! if (histfreq(ns1) /= 'x') then +! enddo ! ns1 + + !----------------------------------------------------------------- + ! 4D (categories, vertical) variables must be looped separately + !----------------------------------------------------------------- + + do ns1 = 1, nstreams + if (histfreq(ns1) /= 'x') then + + call define_hist_field(n_Tinz,"Tinz","C",tstr4Di, tcstr, & + "ice internal temperatures on CICE grid", & + "vertical profile", c1, c0, & + ns1, f_Tinz) + + call define_hist_field(n_Sinz,"Sinz","ppt",tstr4Di, tcstr, & + "ice internal bulk salinity", & + "vertical profile", c1, c0, & + ns1, f_Sinz) + + endif ! if (histfreq(ns1) /= 'x') then + enddo ! ns1 + + do ns1 = 1, nstreams + if (histfreq(ns1) /= 'x') then + + call define_hist_field(n_Tsnz,"Tsnz","C",tstr4Ds, tcstr, & + "snow internal temperatures", & + "vertical profile", c1, c0, & + ns1, f_Tsnz) + + endif ! if (histfreq(ns1) /= 'x') then + enddo + + if (f_Tinz (1:1) /= 'x') then + if (allocated(Tinz4d)) deallocate(Tinz4d) + allocate(Tinz4d(nx_block,ny_block,nzilyr,ncat_hist)) + endif + if (f_Sinz (1:1) /= 'x') then + if (allocated(Sinz4d)) deallocate(Sinz4d) + allocate(Sinz4d(nx_block,ny_block,nzilyr,ncat_hist)) + endif + if (f_Tsnz (1:1) /= 'x') then + if (allocated(Tsnz4d)) deallocate(Tsnz4d) + allocate(Tsnz4d(nx_block,ny_block,nzslyr,ncat_hist)) + endif + if (f_Sinz (1:1) /= 'x') then + if (allocated(Sinz4d)) deallocate(Sinz4d) + allocate(Sinz4d(nx_block,ny_block,nzilyr,ncat_hist)) + endif + + ! other 4D history variables + + ! biogeochemistry + if (tr_brine) call init_hist_bgc_4Db + + !----------------------------------------------------------------- + ! fill igrd array with namelist values + !----------------------------------------------------------------- + + igrd=.true. + + igrd(n_tmask ) = f_tmask + igrd(n_blkmask ) = f_blkmask + igrd(n_tarea ) = f_tarea + igrd(n_uarea ) = f_uarea + igrd(n_dxt ) = f_dxt + igrd(n_dyt ) = f_dyt + igrd(n_dxu ) = f_dxu + igrd(n_dyu ) = f_dyu + igrd(n_HTN ) = f_HTN + igrd(n_HTE ) = f_HTE + igrd(n_ANGLE ) = f_ANGLE + igrd(n_ANGLET ) = f_ANGLET + + igrdz=.true. + igrdz(n_NCAT ) = f_NCAT + igrdz(n_VGRDi ) = f_VGRDi + igrdz(n_VGRDs ) = f_VGRDs + igrdz(n_VGRDb ) = f_VGRDb + + !----------------------------------------------------------------- + ! diagnostic output + !----------------------------------------------------------------- + + ntmp(:) = 0 + if (my_task == master_task) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'The following variables will be ', & + 'written to the history tape: ' + write(nu_diag,101) 'description','units','variable','frequency','x' + do n=1,num_avail_hist_fields_tot + if (avail_hist_fields(n)%vhistfreq_n /= 0) & + write(nu_diag,100) avail_hist_fields(n)%vdesc, & + avail_hist_fields(n)%vunit, avail_hist_fields(n)%vname, & + avail_hist_fields(n)%vhistfreq,avail_hist_fields(n)%vhistfreq_n + do ns = 1, nstreams + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + ntmp(ns)=ntmp(ns)+1 + enddo + enddo ! num_avail_hist_fields_tot + write(nu_diag,*) ' ' + endif + 100 format (1x,a40,2x,a16,2x,a12,1x,a1,2x,i6) + 101 format (2x,a19,10x,a16,9x,a12,2x,a,3x,a1) + + call broadcast_array(ntmp, master_task) + do ns = 1, nstreams + if (ntmp(ns)==0) histfreq_n(ns) = 0 + enddo + + !----------------------------------------------------------------- + ! initialize the history arrays + !----------------------------------------------------------------- + + if (allocated(a2D)) deallocate(a2D) + if (num_avail_hist_fields_2D > 0) & + allocate(a2D(nx_block,ny_block,num_avail_hist_fields_2D,max_blocks)) + + if (allocated(a3Dc)) deallocate(a3Dc) + if (num_avail_hist_fields_3Dc > 0) & + allocate(a3Dc(nx_block,ny_block,ncat_hist,num_avail_hist_fields_3Dc,max_blocks)) + + nzlyr = max(nzilyr, nzslyr) + if (allocated(a3Dz)) deallocate(a3Dz) + if (num_avail_hist_fields_3Dz > 0) & + allocate(a3Dz(nx_block,ny_block,nzlyr,num_avail_hist_fields_3Dz,max_blocks)) + + nzlyrb = nzblyr + if (allocated(a3Db)) deallocate(a3Db) + if (num_avail_hist_fields_3Db > 0) & + allocate(a3Db(nx_block,ny_block,nzlyrb,num_avail_hist_fields_3Db,max_blocks)) + + if (allocated(a4Di)) deallocate(a4Di) + if (num_avail_hist_fields_4Di > 0) & + allocate(a4Di(nx_block,ny_block,nzilyr,ncat_hist,num_avail_hist_fields_4Di,max_blocks)) + if (allocated(a4Ds)) deallocate(a4Ds) + if (num_avail_hist_fields_4Ds > 0) & + allocate(a4Ds(nx_block,ny_block,nzslyr,ncat_hist,num_avail_hist_fields_4Ds,max_blocks)) + if (allocated(a4Db)) deallocate(a4Db) + if (num_avail_hist_fields_4Db > 0) & + allocate(a4Db(nx_block,ny_block,nzblyr,ncat_hist,num_avail_hist_fields_4Db,max_blocks)) + + if (allocated(a2D)) a2D (:,:,:,:) = c0 + if (allocated(a3Dc)) a3Dc(:,:,:,:,:) = c0 + if (allocated(a3Dz)) a3Dz(:,:,:,:,:) = c0 + if (allocated(a3Db)) a3Db(:,:,:,:,:) = c0 + if (allocated(a4Di)) a4Di(:,:,:,:,:,:) = c0 + if (allocated(a4Ds)) a4Ds(:,:,:,:,:,:) = c0 + if (allocated(a4Db)) a4Db(:,:,:,:,:,:) = c0 + avgct(:) = c0 + albcnt(:,:,:,:) = c0 + + if (restart .and. yday >= c2) then +! restarting midyear gives erroneous onset dates + mlt_onset = 999._dbl_kind + frz_onset = 999._dbl_kind + else + mlt_onset = c0 + frz_onset = c0 + endif + + end subroutine init_hist + +!======================================================================= + +! accumulate average ice quantities or snapshots +! +! author: Elizabeth C. Hunke, LANL + + subroutine accum_hist (dt) + + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_constants, only: c0, c1, p25, puny, secday, depressT, & + awtvdr, awtidr, awtvdf, awtidf, Lfresh, rhos, cp_ice, spval + use ice_domain, only: blocks_ice, nblocks + use ice_grid, only: tmask, lmask_n, lmask_s +#ifdef AusCOM + use ice_grid, only: umask +!ars599: 27032014 +! was in subroutine ice_write_hist of ice_history_write +! but seems to be here rather than subroutine ice_write_hist +! new code set in ice_constants +! use ice_shortwave, only: awtvdr, awtidr, awtvdf, awtidf +! use ice_constants, only: awtvdr, awtidr, awtvdf, awtidf +! however since already used at line #1145 so mark out +#endif + use ice_calendar, only: new_year, write_history, & + write_ic, time, histfreq, nstreams, month, & + new_month + use ice_dyn_eap, only: a11, a12, e11, e12, e22, s11, s12, s22, & + yieldstress11, yieldstress12, yieldstress22 + use ice_dyn_shared, only: kdyn, principal_stress + use ice_flux, only: fsw, flw, fsnow, frain, sst, sss, uocn, vocn, & + frzmlt_init, fswfac, fswabs, fswthru, alvdr, alvdf, alidr, alidf, & + albice, albsno, albpnd, coszen, flat, fsens, flwout, evap, & + Tair, Tref, Qref, congel, frazil, snoice, dsnow, & + melts, meltb, meltt, meltl, fresh, fsalt, fresh_ai, fsalt_ai, & + fhocn, fhocn_ai, uatm, vatm, & + fswthru_ai, strairx, strairy, strtltx, strtlty, strintx, strinty, & + strocnx, strocny, fm, daidtt, dvidtt, dvsdtt, daidtd, dvidtd, dvsdtd, fsurf, & + fcondtop, fsurfn, fcondtopn, flatn, fsensn, albcnt, prs_sig, & + stressp_1, stressm_1, stress12_1, & + stressp_2, stressm_2, stress12_2, & + stressp_3, stressm_3, stress12_3, & + stressp_4, stressm_4, stress12_4, sig1, sig2, & + mlt_onset, frz_onset, dagedtt, dagedtd, fswint_ai, Tn_top, & + keffn_top + use ice_atmo, only: formdrag + use ice_history_shared ! almost everything + use ice_history_write, only: ice_write_hist + use ice_history_bgc, only: accum_hist_bgc + use ice_history_mechred, only: accum_hist_mechred + use ice_history_pond, only: accum_hist_pond + use ice_history_drag, only: accum_hist_drag + use ice_state ! almost everything + use ice_therm_shared, only: calculate_Tin_from_qin, Tmlt, ktherm + use ice_therm_mushy, only: temperature_mush, temperature_snow + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_readwrite + use ice_zbgc_shared, only: skl_bgc + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + i,j,k,ic,n,ns,nn, & + iblk , & ! block index + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nstrm ! nstreams (1 if writing initial condition) + + real (kind=dbl_kind) :: & + ravgct , & ! 1/avgct + ravgctz ! 1/avgct + + real (kind=dbl_kind) :: & + qn , & ! temporary variable for enthalpy + Tmlts ! temporary variable for melting temperature + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + worka, workb + + type (block) :: & + this_block ! block information for current block + + !--------------------------------------------------------------- + ! increment step counter + !--------------------------------------------------------------- + + n2D = num_avail_hist_fields_2D + n3Dccum = n2D + num_avail_hist_fields_3Dc + n3Dzcum = n3Dccum + num_avail_hist_fields_3Dz + n3Dbcum = n3Dzcum + num_avail_hist_fields_3Db + n4Dicum = n3Dbcum + num_avail_hist_fields_4Di + n4Dscum = n4Dicum + num_avail_hist_fields_4Ds + n4Dbcum = n4Dscum + num_avail_hist_fields_4Db ! should equal num_avail_hist_fields_tot + + do ns = 1,nstreams + if (.not. hist_avg .or. histfreq(ns) == '1') then ! write snapshots + do n = 1,n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a2D(:,:,n,:) = c0 + enddo + do n = n2D + 1, n3Dccum + nn = n - n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a3Dc(:,:,:,nn,:) = c0 + enddo + do n = n3Dccum + 1, n3Dzcum + nn = n - n3Dccum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a3Dz(:,:,:,nn,:) = c0 + enddo + do n = n3Dzcum + 1, n3Dbcum + nn = n - n3Dzcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a3Db(:,:,:,nn,:) = c0 + enddo + do n = n3Dbcum + 1, n4Dicum + nn = n - n3Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a4Di(:,:,:,:,nn,:) = c0 + enddo + do n = n4Dicum + 1, n4Dscum + nn = n - n4Dicum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a4Ds(:,:,:,:,nn,:) = c0 + enddo + do n = n4Dscum + 1, n4Dbcum + nn = n - n4Dscum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a4Db(:,:,:,:,nn,:) = c0 + enddo + avgct(ns) = c1 + else ! write averages over time histfreq + avgct(ns) = avgct(ns) + c1 +! if (avgct(ns) == c1) time_beg(ns) = (time-dt)/int(secday) + if (avgct(ns) == c1) then + time_beg(ns) = (time-dt)/int(secday) + time_beg(ns) = real(time_beg(ns),kind=real_kind) + endif + endif + enddo + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & + !$OMP k,n,qn,ns,worka,workb) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + workb(:,:) = aice_init(:,:,iblk) + +! if (f_example(1:1) /= 'x') & +! call accum_hist_field(n_example,iblk, vice(:,:,iblk), a2D) + if (f_hi (1:1) /= 'x') & + call accum_hist_field(n_hi, iblk, vice(:,:,iblk), a2D) + if (f_hs (1:1) /= 'x') & + call accum_hist_field(n_hs, iblk, vsno(:,:,iblk), a2D) + if (f_Tsfc (1:1) /= 'x') & + call accum_hist_field(n_Tsfc, iblk, trcr(:,:,nt_Tsfc,iblk), a2D) + if (f_aice (1:1) /= 'x') & + call accum_hist_field(n_aice, iblk, aice(:,:,iblk), a2D) + if (f_uvel (1:1) /= 'x') & + call accum_hist_field(n_uvel, iblk, uvel(:,:,iblk), a2D) + if (f_vvel (1:1) /= 'x') & + call accum_hist_field(n_vvel, iblk, vvel(:,:,iblk), a2D) + if (f_uatm (1:1) /= 'x') & + call accum_hist_field(n_uatm, iblk, uatm(:,:,iblk), a2D) + if (f_vatm (1:1) /= 'x') & + call accum_hist_field(n_vatm, iblk, vatm(:,:,iblk), a2D) + + if (f_sice (1:1) /= 'x') then + do j = jlo, jhi + do i = ilo, ihi + worka(i,j) = c0 + do k = 1, nzilyr + worka(i,j) = worka(i,j) + trcr(i,j,nt_sice+k-1,iblk) + enddo + worka(i,j) = worka(i,j) / nzilyr + enddo + enddo + call accum_hist_field(n_sice, iblk, worka(:,:), a2D) + endif + + if (f_fswdn (1:1) /= 'x') & + call accum_hist_field(n_fswdn, iblk, fsw(:,:,iblk), a2D) + if (f_flwdn (1:1) /= 'x') & + call accum_hist_field(n_flwdn, iblk, flw(:,:,iblk), a2D) + if (f_snow (1:1) /= 'x') & + call accum_hist_field(n_snow, iblk, fsnow(:,:,iblk), a2D) + if (f_snow_ai(1:1) /= 'x') & + call accum_hist_field(n_snow_ai,iblk, fsnow(:,:,iblk)*workb(:,:), a2D) + if (f_rain (1:1) /= 'x') & + call accum_hist_field(n_rain, iblk, frain(:,:,iblk), a2D) + if (f_rain_ai(1:1) /= 'x') & + call accum_hist_field(n_rain_ai,iblk, frain(:,:,iblk)*workb(:,:), a2D) + + if (f_sst (1:1) /= 'x') & + call accum_hist_field(n_sst, iblk, sst(:,:,iblk), a2D) + if (f_sss (1:1) /= 'x') & + call accum_hist_field(n_sss, iblk, sss(:,:,iblk), a2D) + if (f_uocn (1:1) /= 'x') & + call accum_hist_field(n_uocn, iblk, uocn(:,:,iblk), a2D) + if (f_vocn (1:1) /= 'x') & + call accum_hist_field(n_vocn, iblk, vocn(:,:,iblk), a2D) + if (f_frzmlt (1:1) /= 'x') & + call accum_hist_field(n_frzmlt, iblk, frzmlt_init(:,:,iblk), a2D) + + if (f_fswfac (1:1) /= 'x') & + call accum_hist_field(n_fswfac, iblk, fswfac(:,:,iblk), a2D) + if (f_fswabs (1:1) /= 'x') & + call accum_hist_field(n_fswabs, iblk, fswabs(:,:,iblk), a2D) + + if (f_fswint_ai (1:1) /= 'x') & + call accum_hist_field(n_fswint_ai, iblk, fswint_ai(:,:,iblk), a2D) + + if (f_fswabs_ai(1:1)/= 'x') & + call accum_hist_field(n_fswabs_ai, iblk, fswabs(:,:,iblk)*workb(:,:), a2D) + + if (f_albsni (1:1) /= 'x') & + call accum_hist_field(n_albsni, iblk, & + (awtvdr*alvdr(:,:,iblk) & + + awtidr*alidr(:,:,iblk) & + + awtvdf*alvdf(:,:,iblk) & + + awtidf*alidf(:,:,iblk))*aice(:,:,iblk), a2D) +! awtvdr*alvdr(:,:,iblk) & +! + awtidr*alidr(:,:,iblk) & +! + awtvdf*alvdf(:,:,iblk) & +! + awtidf*alidf(:,:,iblk), a2D) + if (f_alvdr (1:1) /= 'x') & + call accum_hist_field(n_alvdr, iblk, alvdr(:,:,iblk), a2D) + if (f_alidr (1:1) /= 'x') & + call accum_hist_field(n_alidr, iblk, alidr(:,:,iblk), a2D) + if (f_alvdf (1:1) /= 'x') & + call accum_hist_field(n_alvdf, iblk, alvdf(:,:,iblk), a2D) + if (f_alidf (1:1) /= 'x') & + call accum_hist_field(n_alidf, iblk, alidf(:,:,iblk), a2D) + + if (f_albice (1:1) /= 'x') & + call accum_hist_field(n_albice, iblk, albice(:,:,iblk), a2D) + if (f_albsno (1:1) /= 'x') & + call accum_hist_field(n_albsno, iblk, albsno(:,:,iblk), a2D) + if (f_albpnd (1:1) /= 'x') & + call accum_hist_field(n_albpnd, iblk, albpnd(:,:,iblk), a2D) + if (f_coszen (1:1) /= 'x') & + call accum_hist_field(n_coszen, iblk, coszen(:,:,iblk), a2D) + + if (f_flat (1:1) /= 'x') & + call accum_hist_field(n_flat, iblk, flat(:,:,iblk), a2D) + if (f_flat_ai(1:1) /= 'x') & + call accum_hist_field(n_flat_ai,iblk, flat(:,:,iblk)*workb(:,:), a2D) + if (f_fsens (1:1) /= 'x') & + call accum_hist_field(n_fsens, iblk, fsens(:,:,iblk), a2D) + if (f_fsens_ai(1:1)/= 'x') & + call accum_hist_field(n_fsens_ai,iblk, fsens(:,:,iblk)*workb(:,:), a2D) + if (f_flwup (1:1) /= 'x') & + call accum_hist_field(n_flwup, iblk, flwout(:,:,iblk), a2D) + if (f_flwup_ai(1:1)/= 'x') & + call accum_hist_field(n_flwup_ai,iblk, flwout(:,:,iblk)*workb(:,:), a2D) + if (f_evap (1:1) /= 'x') & + call accum_hist_field(n_evap, iblk, evap(:,:,iblk), a2D) + if (f_evap_ai(1:1) /= 'x') & + call accum_hist_field(n_evap_ai,iblk, evap(:,:,iblk)*workb(:,:), a2D) + + if (f_Tair (1:1) /= 'x') & + call accum_hist_field(n_Tair, iblk, Tair(:,:,iblk), a2D) + if (f_Tref (1:1) /= 'x') & + call accum_hist_field(n_Tref, iblk, Tref(:,:,iblk), a2D) + if (f_Qref (1:1) /= 'x') & + call accum_hist_field(n_Qref, iblk, Qref(:,:,iblk), a2D) + if (f_congel (1:1) /= 'x') & + call accum_hist_field(n_congel, iblk, congel(:,:,iblk), a2D) + if (f_frazil (1:1) /= 'x') & + call accum_hist_field(n_frazil, iblk, frazil(:,:,iblk), a2D) + if (f_snoice (1:1) /= 'x') & + call accum_hist_field(n_snoice, iblk, snoice(:,:,iblk), a2D) + if (f_dsnow (1:1) /= 'x') & + call accum_hist_field(n_dsnow, iblk, dsnow(:,:,iblk), a2D) + if (f_meltt (1:1) /= 'x') & + call accum_hist_field(n_meltt, iblk, meltt(:,:,iblk), a2D) + if (f_melts (1:1) /= 'x') & + call accum_hist_field(n_melts, iblk, melts(:,:,iblk), a2D) + if (f_meltb (1:1) /= 'x') & + call accum_hist_field(n_meltb, iblk, meltb(:,:,iblk), a2D) + if (f_meltl (1:1) /= 'x') & + call accum_hist_field(n_meltl, iblk, meltl(:,:,iblk), a2D) + + if (f_fresh (1:1) /= 'x') & + call accum_hist_field(n_fresh, iblk, fresh(:,:,iblk), a2D) + if (f_fresh_ai(1:1)/= 'x') & + call accum_hist_field(n_fresh_ai,iblk, fresh_ai(:,:,iblk), a2D) + if (f_fsalt (1:1) /= 'x') & + call accum_hist_field(n_fsalt, iblk, fsalt(:,:,iblk), a2D) + if (f_fsalt_ai(1:1)/= 'x') & + call accum_hist_field(n_fsalt_ai,iblk, fsalt_ai(:,:,iblk), a2D) + + if (f_fhocn (1:1) /= 'x') & + call accum_hist_field(n_fhocn, iblk, fhocn(:,:,iblk), a2D) + if (f_fhocn_ai(1:1)/= 'x') & + call accum_hist_field(n_fhocn_ai,iblk, fhocn_ai(:,:,iblk), a2D) + if (f_fswthru(1:1) /= 'x') & + call accum_hist_field(n_fswthru, iblk, fswthru(:,:,iblk), a2D) + if (f_fswthru_ai(1:1)/= 'x') & + call accum_hist_field(n_fswthru_ai,iblk, fswthru_ai(:,:,iblk), a2D) + + if (f_strairx(1:1) /= 'x') & + call accum_hist_field(n_strairx, iblk, strairx(:,:,iblk), a2D) + if (f_strairy(1:1) /= 'x') & + call accum_hist_field(n_strairy, iblk, strairy(:,:,iblk), a2D) + if (f_strtltx(1:1) /= 'x') & + call accum_hist_field(n_strtltx, iblk, strtltx(:,:,iblk), a2D) + if (f_strtlty(1:1) /= 'x') & + call accum_hist_field(n_strtlty, iblk, strtlty(:,:,iblk), a2D) + if (f_strcorx(1:1) /= 'x') & + call accum_hist_field(n_strcorx, iblk, fm(:,:,iblk)*vvel(:,:,iblk), a2D) + if (f_strcory(1:1) /= 'x') & + call accum_hist_field(n_strcory, iblk,-fm(:,:,iblk)*uvel(:,:,iblk), a2D) + if (f_strocnx(1:1) /= 'x') & + call accum_hist_field(n_strocnx, iblk, strocnx(:,:,iblk), a2D) + if (f_strocny(1:1) /= 'x') & + call accum_hist_field(n_strocny, iblk, strocny(:,:,iblk), a2D) + if (f_strintx(1:1) /= 'x') & + call accum_hist_field(n_strintx, iblk, strintx(:,:,iblk), a2D) + if (f_strinty(1:1) /= 'x') & + call accum_hist_field(n_strinty, iblk, strinty(:,:,iblk), a2D) + if (f_strength(1:1)/= 'x') & + call accum_hist_field(n_strength,iblk, strength(:,:,iblk), a2D) + +! The following fields (divu, shear, sig1, and sig2) will be smeared +! if averaged over more than a few days. +! Snapshots may be more useful (see below). + +! if (f_divu (1:1) /= 'x') & +! call accum_hist_field(n_divu, iblk, divu(:,:,iblk), a2D) +! if (f_shear (1:1) /= 'x') & +! call accum_hist_field(n_shear, iblk, shear(:,:,iblk), a2D) +! if (f_sig1 (1:1) /= 'x') & +! call accum_hist_field(n_sig1, iblk, sig1(:,:,iblk), a2D) +! if (f_sig2 (1:1) /= 'x') & +! call accum_hist_field(n_sig2, iblk, sig2(:,:,iblk), a2D) +! if (f_trsig (1:1) /= 'x') & +! call accum_hist_field(n_trsig, iblk, trsig(:,:,iblk), a2D) + + if (f_dvidtt (1:1) /= 'x') & + call accum_hist_field(n_dvidtt, iblk, dvidtt(:,:,iblk), a2D) + if (f_dvsdtt (1:1) /= 'x') & + call accum_hist_field(n_dvsdtt, iblk, dvsdtt(:,:,iblk), a2D) + if (f_dvidtd (1:1) /= 'x') & + call accum_hist_field(n_dvidtd, iblk, dvidtd(:,:,iblk), a2D) + if (f_dvsdtd (1:1) /= 'x') & + call accum_hist_field(n_dvsdtd, iblk, dvsdtd(:,:,iblk), a2D) + if (f_daidtt (1:1) /= 'x') & + call accum_hist_field(n_daidtt, iblk, daidtt(:,:,iblk), a2D) + if (f_daidtd (1:1) /= 'x') & + call accum_hist_field(n_daidtd, iblk, daidtd(:,:,iblk), a2D) + if (f_dagedtt (1:1) /= 'x') & + call accum_hist_field(n_dagedtt, iblk, dagedtt(:,:,iblk), a2D) + if (f_dagedtd (1:1) /= 'x') & + call accum_hist_field(n_dagedtd, iblk, dagedtd(:,:,iblk), a2D) + + if (f_fsurf_ai(1:1)/= 'x') & + call accum_hist_field(n_fsurf_ai,iblk, fsurf(:,:,iblk)*workb(:,:), a2D) + if (f_fcondtop_ai(1:1)/= 'x') & + call accum_hist_field(n_fcondtop_ai, iblk, & + fcondtop(:,:,iblk)*workb(:,:), a2D) + + if (f_icepresent(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) worka(i,j) = c1 + enddo + enddo + call accum_hist_field(n_icepresent, iblk, worka(:,:), a2D) + endif + + ! 3D category fields + if (f_aicen (1:1) /= 'x') & + call accum_hist_field(n_aicen-n2D, iblk, ncat_hist, & + aicen(:,:,1:ncat_hist,iblk), a3Dc) + if (f_vicen (1:1) /= 'x') & + call accum_hist_field(n_vicen-n2D, iblk, ncat_hist, & + vicen(:,:,1:ncat_hist,iblk), a3Dc) + if (f_vsnon (1:1) /= 'x') & + call accum_hist_field(n_vsnon-n2D, iblk, ncat_hist, & + vsnon(:,:,1:ncat_hist,iblk), a3Dc) + if (f_Tn_top (1:1) /= 'x') & + call accum_hist_field(n_Tn_top-n2D, iblk, ncat_hist, & + Tn_top(:,:,1:ncat_hist,iblk), a3Dc) + if (f_keffn_top (1:1) /= 'x') & + call accum_hist_field(n_keffn_top-n2D, iblk, ncat_hist, & + keffn_top(:,:,1:ncat_hist,iblk), a3Dc) + if (f_fsurfn_ai (1:1) /= 'x') & + call accum_hist_field(n_fsurfn_ai-n2D, iblk, ncat_hist, & + fsurfn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + if (f_fcondtopn_ai (1:1) /= 'x') & + call accum_hist_field(n_fcondtopn_ai-n2D, iblk, ncat_hist, & + fcondtopn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + if (f_flatn_ai (1:1) /= 'x') & + call accum_hist_field(n_flatn_ai-n2D, iblk, ncat_hist, & + flatn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + if (f_fsensn_ai (1:1) /= 'x') & + call accum_hist_field(n_fsensn_ai-n2D, iblk, ncat_hist, & + fsensn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + ! Calculate surface heat flux that causes melt (calculated by the + ! atmos in HadGEM3 so needed for checking purposes) + if (f_fmelttn_ai (1:1) /= 'x') & + call accum_hist_field(n_fmelttn_ai-n2D, iblk, ncat_hist, & + max(fsurfn(:,:,1:ncat_hist,iblk) - fcondtopn(:,:,1:ncat_hist,iblk),c0) & + *aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + +! example for 3D field (x,y,z) +! if (f_field3dz (1:1) /= 'x') & +! call accum_hist_field(n_field3dz-n3Dccum, iblk, nzilyr, & +! field3dz(:,:,1:nzilyr,iblk), a3Dz) + + ! 4D category fields + if (f_Tinz (1:1) /= 'x') then + Tinz4d(:,:,:,:) = c0 + if (ktherm == 2) then + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzilyr + Tinz4d(i,j,k,n) = temperature_mush( & + trcrn(i,j,nt_qice+k-1,n,iblk), trcrn(i,j,nt_sice+k-1,n,iblk)) + enddo + enddo + enddo + enddo + else + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzilyr + qn = trcrn(i,j,nt_qice+k-1,n,iblk) +! Tinz4d(i,j,k,n) = calculate_Tin_from_qin(qn,Tmlt(k)) + Tmlts = -trcrn(i,j,nt_sice+k-1,n,iblk)*depressT + Tinz4d(i,j,k,n) = calculate_Tin_from_qin(qn,Tmlts) + enddo + enddo + enddo + enddo + endif + call accum_hist_field(n_Tinz-n3Dbcum, iblk, nzilyr, ncat_hist, & + Tinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) + endif + if (f_Sinz (1:1) /= 'x') then + Sinz4d(:,:,:,:) = c0 + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (vicen(i,j,n,iblk) > puny) then + Sinz4d(i,j,1:nzilyr,n) = trcrn(i,j,nt_sice:nt_sice+nzilyr-1,n,iblk) + endif + enddo + enddo + enddo + call accum_hist_field(n_Sinz-n3Dbcum, iblk, nzilyr, ncat_hist, & + Sinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) + endif + + if (f_Tsnz (1:1) /= 'x') then + Tsnz4d(:,:,:,:) = c0 + if (ktherm == 2) then + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzslyr + qn = trcrn(i,j,nt_qsno+k-1,n,iblk) + Tsnz4d(i,j,k,n) = temperature_snow(trcrn(i,j,nt_qsno+k-1,n,iblk)) + enddo + enddo + enddo + enddo + else + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzslyr + qn = trcrn(i,j,nt_qsno+k-1,n,iblk) + Tsnz4d(i,j,k,n) = (Lfresh + qn/rhos)/cp_ice + enddo + enddo + enddo + enddo + endif + call accum_hist_field(n_Tsnz-n4Dicum, iblk, nzslyr, ncat_hist, & + Tsnz4d(:,:,1:nzslyr,1:ncat_hist), a4Ds) + endif + + ! Calculate aggregate surface melt flux by summing category values + if (f_fmeltt_ai(1:1) /= 'x') then + do ns = 1, nstreams + if (n_fmeltt_ai(ns) /= 0) then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + do n=1,ncat_hist + worka(i,j) = worka(i,j) + a3Dc(i,j,n,n_fmelttn_ai(ns)-n2D,iblk) + enddo ! n + endif ! tmask + enddo ! i + enddo ! j + a2D(:,:,n_fmeltt_ai(ns),iblk) = worka(:,:) + endif + enddo + endif + + !--------------------------------------------------------------- + ! accumulate other history output + !--------------------------------------------------------------- + + ! mechanical redistribution + call accum_hist_mechred (iblk) + + ! melt ponds + if (tr_pond) call accum_hist_pond (iblk) + + ! biogeochemistry + if (tr_aero .or. tr_brine .or. skl_bgc) call accum_hist_bgc (iblk) + + ! form drag + if (formdrag) call accum_hist_drag (iblk) + + enddo ! iblk + !$OMP END PARALLEL DO + + !--------------------------------------------------------------- + ! Write output files at prescribed intervals + !--------------------------------------------------------------- + + nstrm = nstreams + if (write_ic) nstrm = 1 + + do ns = 1, nstrm + if (write_history(ns) .or. write_ic) then + + !--------------------------------------------------------------- + ! Mask out land points and convert units + !--------------------------------------------------------------- + + ravgct = c1/avgct(ns) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & + !$OMP n,nn,ravgctz) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do n = 1, num_avail_hist_fields_2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) then + + do j = jlo, jhi + do i = ilo, ihi +#ifdef AusCOM + if (n_uocn(ns)==n.or.n_vocn(ns)==n) then + if (.not. umask(i,j,iblk)) then ! mask out land points + a2D(i,j,n,iblk) = spval + else ! convert units + a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & + * ravgct + avail_hist_fields(n)%conb + endif + else + if (.not. tmask(i,j,iblk)) then ! mask out land points + a2D(i,j,n,iblk) = spval + else ! convert units + a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & + * ravgct + avail_hist_fields(n)%conb + endif + endif +#else + if (.not. tmask(i,j,iblk)) then ! mask out land points + a2D(i,j,n,iblk) = spval + else ! convert units + a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & + * ravgct + avail_hist_fields(n)%conb + endif +#endif + enddo ! i + enddo ! j + + ! back out albedo/zenith angle dependence + if (avail_hist_fields(n)%vname(1:6) == 'albice') then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + ravgctz = c0 + if (albcnt(i,j,iblk,ns) > puny) & + ravgctz = c1/albcnt(i,j,iblk,ns) + if (f_albice (1:1) /= 'x' .and. n_albice(ns) /= 0) & + a2D(i,j,n_albice(ns),iblk) = & + a2D(i,j,n_albice(ns),iblk)*avgct(ns)*ravgctz + if (f_albsno (1:1) /= 'x' .and. n_albsno(ns) /= 0) & + a2D(i,j,n_albsno(ns),iblk) = & + a2D(i,j,n_albsno(ns),iblk)*avgct(ns)*ravgctz + if (f_albpnd (1:1) /= 'x' .and. n_albpnd(ns) /= 0) & + a2D(i,j,n_albpnd(ns),iblk) = & + a2D(i,j,n_albpnd(ns),iblk)*avgct(ns)*ravgctz + endif + enddo ! i + enddo ! j + endif + if (avail_hist_fields(n)%vname(1:6) == 'albsni') then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + ravgctz = c0 + if (albcnt(i,j,iblk,ns) > puny) & + ravgctz = c1/albcnt(i,j,iblk,ns) + if (f_albsni (1:1) /= 'x' .and. n_albsni(ns) /= 0) & + a2D(i,j,n_albsni(ns),iblk) = & + a2D(i,j,n_albsni(ns),iblk)*avgct(ns)*ravgctz + endif + enddo ! i + enddo ! j + endif + + endif + enddo ! n + + do n = 1, num_avail_hist_fields_3Dc + nn = n2D + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a3Dc(i,j,k,n,iblk) = spval + else ! convert units + a3Dc(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Dc(i,j,k,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! k + endif + enddo ! n + + do n = 1, num_avail_hist_fields_3Dz + nn = n3Dccum + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, nzlyr + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a3Dz(i,j,k,n,iblk) = spval + else ! convert units + a3Dz(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Dz(i,j,k,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! k + endif + enddo ! n + do n = 1, num_avail_hist_fields_3Db + nn = n3Dzcum + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, nzblyr + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a3Db(i,j,k,n,iblk) = spval + else ! convert units + a3Db(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Db(i,j,k,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! k + endif + enddo ! n + + do n = 1, num_avail_hist_fields_4Di + nn = n3Dbcum + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, nzilyr + do ic = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a4Di(i,j,k,ic,n,iblk) = spval + else ! convert units + a4Di(i,j,k,ic,n,iblk) = avail_hist_fields(nn)%cona*a4Di(i,j,k,ic,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! ic + enddo ! k + endif + enddo ! n + + do n = 1, num_avail_hist_fields_4Ds + nn = n4Dicum + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, nzslyr + do ic = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a4Ds(i,j,k,ic,n,iblk) = spval + else ! convert units + a4Ds(i,j,k,ic,n,iblk) = avail_hist_fields(nn)%cona*a4Ds(i,j,k,ic,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! ic + enddo ! k + endif + enddo ! n + do n = 1, num_avail_hist_fields_4Db + nn = n4Dscum + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, nzblyr + do ic = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a4Db(i,j,k,ic,n,iblk) = spval + else ! convert units + a4Db(i,j,k,ic,n,iblk) = avail_hist_fields(nn)%cona*a4Db(i,j,k,ic,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! ic + enddo ! k + endif + enddo ! n + + !--------------------------------------------------------------- + ! snapshots + !--------------------------------------------------------------- + + ! compute sig1 and sig2 + + call principal_stress (nx_block, ny_block, & + stressp_1 (:,:,iblk), & + stressm_1 (:,:,iblk), & + stress12_1(:,:,iblk), & + prs_sig (:,:,iblk), & + sig1 (:,:,iblk), & + sig2 (:,:,iblk)) + + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + if (n_divu (ns) /= 0) a2D(i,j,n_divu(ns), iblk) = spval + if (n_shear (ns) /= 0) a2D(i,j,n_shear(ns), iblk) = spval + if (n_sig1 (ns) /= 0) a2D(i,j,n_sig1(ns), iblk) = spval + if (n_sig2 (ns) /= 0) a2D(i,j,n_sig2(ns), iblk) = spval + if (n_mlt_onset(ns) /= 0) a2D(i,j,n_mlt_onset(ns),iblk) = spval + if (n_frz_onset(ns) /= 0) a2D(i,j,n_frz_onset(ns),iblk) = spval + if (n_hisnap (ns) /= 0) a2D(i,j,n_hisnap(ns), iblk) = spval + if (n_aisnap (ns) /= 0) a2D(i,j,n_aisnap(ns), iblk) = spval + if (n_trsig (ns) /= 0) a2D(i,j,n_trsig(ns), iblk) = spval + if (n_iage (ns) /= 0) a2D(i,j,n_iage(ns), iblk) = spval + if (n_FY (ns) /= 0) a2D(i,j,n_FY(ns), iblk) = spval + if (n_Tn_top (ns) /= 0) a3Dc(i,j,:,n_Tn_top(ns)-n2D,iblk) = spval + if (n_keffn_top (ns) /= 0) a3Dc(i,j,:,n_keffn_top(ns)-n2D,iblk) = spval + + if (n_a11 (ns) /= 0) a2D(i,j,n_a11(ns), iblk) = spval + if (n_a12 (ns) /= 0) a2D(i,j,n_a12(ns), iblk) = spval + if (n_e11 (ns) /= 0) a2D(i,j,n_e11(ns), iblk) = spval + if (n_e12 (ns) /= 0) a2D(i,j,n_e12(ns), iblk) = spval + if (n_e22 (ns) /= 0) a2D(i,j,n_e22(ns), iblk) = spval + if (n_s11 (ns) /= 0) a2D(i,j,n_s11(ns), iblk) = spval + if (n_s12 (ns) /= 0) a2D(i,j,n_s12(ns), iblk) = spval + if (n_s22 (ns) /= 0) a2D(i,j,n_s22(ns), iblk) = spval + if (n_yieldstress11 (ns) /= 0) a2D(i,j,n_yieldstress11(ns),iblk) = spval + if (n_yieldstress12 (ns) /= 0) a2D(i,j,n_yieldstress12(ns),iblk) = spval + if (n_yieldstress22 (ns) /= 0) a2D(i,j,n_yieldstress22(ns),iblk) = spval + else + if (n_divu (ns) /= 0) a2D(i,j,n_divu(ns),iblk) = & + divu (i,j,iblk)*avail_hist_fields(n_divu(ns))%cona + if (n_shear (ns) /= 0) a2D(i,j,n_shear(ns),iblk) = & + shear(i,j,iblk)*avail_hist_fields(n_shear(ns))%cona + if (n_sig1 (ns) /= 0) a2D(i,j,n_sig1(ns),iblk) = & + sig1 (i,j,iblk)*avail_hist_fields(n_sig1(ns))%cona + if (n_sig2 (ns) /= 0) a2D(i,j,n_sig2(ns),iblk) = & + sig2 (i,j,iblk)*avail_hist_fields(n_sig2(ns))%cona + if (n_mlt_onset(ns) /= 0) a2D(i,j,n_mlt_onset(ns),iblk) = & + mlt_onset(i,j,iblk) + if (n_frz_onset(ns) /= 0) a2D(i,j,n_frz_onset(ns),iblk) = & + frz_onset(i,j,iblk) + if (n_hisnap (ns) /= 0) a2D(i,j,n_hisnap(ns),iblk) = & + vice(i,j,iblk) + if (n_aisnap (ns) /= 0) a2D(i,j,n_aisnap(ns),iblk) = & + aice(i,j,iblk) + + if (kdyn == 2) then ! for EAP dynamics different time of output + if (n_trsig (ns) /= 0) a2D(i,j,n_trsig(ns),iblk ) = & + prs_sig(i,j,iblk) + else + if (n_trsig (ns) /= 0) a2D(i,j,n_trsig(ns),iblk ) = & + p25*(stressp_1(i,j,iblk) & + + stressp_2(i,j,iblk) & + + stressp_3(i,j,iblk) & + + stressp_4(i,j,iblk)) + endif + + if (n_iage (ns) /= 0) a2D(i,j,n_iage(ns),iblk) = & + trcr(i,j,nt_iage,iblk)*avail_hist_fields(n_iage(ns))%cona + if (n_FY (ns) /= 0) a2D(i,j,n_FY(ns),iblk) = & + trcr(i,j,nt_FY,iblk)*avail_hist_fields(n_FY(ns))%cona + + if (n_Tn_top (ns) /= 0) a3Dc(i,j,:,n_Tn_top(ns)-n2D,iblk) = & + Tn_top(i,j,:,iblk)*avail_hist_fields(n_Tn_top(ns))%cona + if (n_keffn_top(ns) /= 0) a3Dc(i,j,:,n_keffn_top(ns)-n2D,iblk)= & + keffn_top(i,j,:,iblk)*avail_hist_fields(n_keffn_top(ns))%cona + + if (n_a11 (ns) /= 0) a2D(i,j,n_a11(ns),iblk) = & + a11 (i,j,iblk)*avail_hist_fields(n_a11(ns))%cona + if (n_a12 (ns) /= 0) a2D(i,j,n_a12(ns),iblk) = & + a12 (i,j,iblk)*avail_hist_fields(n_a12(ns))%cona + if (n_e11 (ns) /= 0) a2D(i,j,n_e11(ns),iblk) = & + e11 (i,j,iblk)*avail_hist_fields(n_e11(ns))%cona + if (n_e12 (ns) /= 0) a2D(i,j,n_e12(ns),iblk) = & + e12 (i,j,iblk)*avail_hist_fields(n_e12(ns))%cona + if (n_e22 (ns) /= 0) a2D(i,j,n_e22(ns),iblk) = & + e22 (i,j,iblk)*avail_hist_fields(n_e22(ns))%cona + if (n_s11 (ns) /= 0) a2D(i,j,n_s11(ns),iblk) = & + s11 (i,j,iblk)*avail_hist_fields(n_s11(ns))%cona + if (n_s12 (ns) /= 0) a2D(i,j,n_s12(ns),iblk) = & + s12 (i,j,iblk)*avail_hist_fields(n_s12(ns))%cona + if (n_s22 (ns) /= 0) a2D(i,j,n_s22(ns),iblk) = & + s22 (i,j,iblk)*avail_hist_fields(n_s22(ns))%cona + if (n_yieldstress11 (ns) /= 0) a2D(i,j,n_yieldstress11(ns),iblk) = & + yieldstress11 (i,j,iblk)*avail_hist_fields(n_yieldstress11(ns))%cona + if (n_yieldstress12 (ns) /= 0) a2D(i,j,n_yieldstress12(ns),iblk) = & + yieldstress12 (i,j,iblk)*avail_hist_fields(n_yieldstress12(ns))%cona + if (n_yieldstress22 (ns) /= 0) a2D(i,j,n_yieldstress22(ns),iblk) = & + yieldstress22 (i,j,iblk)*avail_hist_fields(n_yieldstress22(ns))%cona + endif + enddo ! i + enddo ! j + + enddo ! iblk + !$OMP END PARALLEL DO + + time_end(ns) = time/int(secday) + time_end(ns) = real(time_end(ns),kind=real_kind) + + !--------------------------------------------------------------- + ! write file + !--------------------------------------------------------------- + + call ice_timer_start(timer_readwrite) ! reading/writing + call ice_write_hist (ns) + call ice_timer_stop(timer_readwrite) ! reading/writing + + !--------------------------------------------------------------- + ! reset to zero + !------------------------------------------------------------ + if (write_ic) then + if (allocated(a2D)) a2D (:,:,:,:) = c0 + if (allocated(a3Dc)) a3Dc(:,:,:,:,:) = c0 + if (allocated(a3Dz)) a3Dz(:,:,:,:,:) = c0 + if (allocated(a3Db)) a3Db(:,:,:,:,:) = c0 + if (allocated(a4Di)) a4Di(:,:,:,:,:,:) = c0 + if (allocated(a4Ds)) a4Ds(:,:,:,:,:,:) = c0 + if (allocated(a4Db)) a4Db(:,:,:,:,:,:) = c0 + avgct(:) = c0 + albcnt(:,:,:,:) = c0 + write_ic = .false. ! write initial condition once at most + else + avgct(ns) = c0 + albcnt(:,:,:,ns) = c0 + endif +! if (write_history(ns)) albcnt(:,:,:,ns) = c0 + + do n = 1,n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a2D(:,:,n,:) = c0 + enddo + do n = n2D + 1, n3Dccum + nn = n - n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a3Dc(:,:,:,nn,:) = c0 + enddo + do n = n3Dccum + 1, n3Dzcum + nn = n - n3Dccum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a3Dz(:,:,:,nn,:) = c0 + enddo + do n = n3Dzcum + 1, n3Dbcum + nn = n - n3Dzcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a3Db(:,:,:,nn,:) = c0 + enddo + do n = n3Dbcum + 1, n4Dicum + nn = n - n3Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a4Di(:,:,:,:,nn,:) = c0 + enddo + do n = n4Dicum + 1, n4Dscum + nn = n - n4Dicum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a4Ds(:,:,:,:,nn,:) = c0 + enddo + do n = n4Dscum + 1, n4Dbcum + nn = n - n4Dscum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a4Db(:,:,:,:,nn,:) = c0 + enddo + + endif ! write_history or write_ic + enddo ! nstreams + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + if (new_year) then + do j=jlo,jhi + do i=ilo,ihi + ! reset NH Jan 1 + if (lmask_n(i,j,iblk)) mlt_onset(i,j,iblk) = c0 + ! reset SH Jan 1 + if (lmask_s(i,j,iblk)) frz_onset(i,j,iblk) = c0 + enddo + enddo + endif ! new_year + + if ( (month .eq. 7) .and. new_month ) then + do j=jlo,jhi + do i=ilo,ihi + ! reset SH Jul 1 + if (lmask_s(i,j,iblk)) mlt_onset(i,j,iblk) = c0 + ! reset NH Jul 1 + if (lmask_n(i,j,iblk)) frz_onset(i,j,iblk) = c0 + enddo + enddo + endif ! 1st of July + enddo ! iblk + !$OMP END PARALLEL DO + + end subroutine accum_hist + +!======================================================================= + + end module ice_history + +!======================================================================= diff --git a/source/ice_history.F90_spo b/source/ice_history.F90_spo new file mode 100755 index 00000000..bb1a04c2 --- /dev/null +++ b/source/ice_history.F90_spo @@ -0,0 +1,2092 @@ +! SVN:$Id: ice_history.F90 820 2014-08-26 19:08:29Z eclare $ +!======================================================================= + +! Driver for core history output +! +! The following variables are currently hard-wired as snapshots +! (instantaneous rather than time-averages): +! divu, shear, sig1, sig2, trsig, mlt_onset, frz_onset, hisnap, aisnap +! +! Options for histfreq: '1','h','d','m','y','x', where x means that +! output stream will not be used (recommended for efficiency). +! histfreq_n can be any nonnegative integer, where 0 means that the +! corresponding histfreq frequency will not be used. +! The flags (f_) can be set to '1','h','d','m','y' or 'x', where +! n means the field will not be written. To output the same field at +! more than one frequency, for instance monthy and daily, set +! f_ = 'md'. +! +! authors Tony Craig and Bruce Briegleb, NCAR +! Elizabeth C. Hunke and William H. Lipscomb, LANL +! C. M. Bitz, UW +! +! 2004 WHL: Block structure added +! 2006 ECH: Accepted some CCSM code into mainstream CICE +! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. +! Added histfreq_n and histfreq='h' options, removed histfreq='w' +! Converted to free source form (F90) +! Added option for binary output instead of netCDF +! 2009 D Bailey and ECH: Generalized for multiple frequency output +! 2010 Alison McLaren and ECH: Added 3D capability + + module ice_history + + use ice_kinds_mod +#ifdef AusCOM + use cpl_parameters, only: caltype +#endif + + implicit none + private + public :: init_hist, accum_hist + save + +!======================================================================= + + contains + +!======================================================================= + +! Initialize history files +! +! authors Tony Craig, NCAR +! Elizabeth C. Hunke, LANL +! C.M. Bitz, UW +! Bruce P. Briegleb, NCAR +! William H. Lipscomb, LANL + + subroutine init_hist (dt) + + use ice_atmo, only: formdrag + use ice_blocks, only: nx_block, ny_block + use ice_broadcast, only: broadcast_scalar, broadcast_array + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, c1, c2, c100, mps_to_cmpdy, rhofresh, & + Tffresh, kg_to_g, secday + use ice_calendar, only: yday, days_per_year, histfreq, & + histfreq_n, nstreams + use ice_domain_size, only: max_blocks, max_nstrm + use ice_dyn_shared, only: kdyn + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_nml, nml_filename, nu_diag, & + get_fileunit, release_fileunit + use ice_flux, only: mlt_onset, frz_onset, albcnt + use ice_history_shared ! everything + use ice_history_mechred, only: init_hist_mechred_2D, init_hist_mechred_3Dc + use ice_history_pond, only: init_hist_pond_2D, init_hist_pond_3Dc + use ice_history_bgc, only: init_hist_bgc_2D, init_hist_bgc_3Dc, & + init_hist_bgc_4Db + use ice_history_drag, only: init_hist_drag_2D + use ice_restart_shared, only: restart + use ice_state, only: tr_iage, tr_FY, tr_lvl, tr_pond, tr_aero, tr_brine + use ice_therm_shared, only: calc_Tsfc, heat_capacity + use ice_zbgc_shared, only: skl_bgc + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: n, ns, ns1, ns2 + integer (kind=int_kind), dimension(max_nstrm) :: & + ntmp + integer (kind=int_kind) :: nml_error ! namelist i/o error flag + + !----------------------------------------------------------------- + ! read namelist + !----------------------------------------------------------------- + + call get_fileunit(nu_nml) + if (my_task == master_task) then + open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + if (nml_error /= 0) then + nml_error = -1 + else + nml_error = 1 + endif + do while (nml_error > 0) + read(nu_nml, nml=icefields_nml,iostat=nml_error) + end do + if (nml_error == 0) close(nu_nml) + endif + call release_fileunit(nu_nml) + + call broadcast_scalar(nml_error, master_task) + if (nml_error /= 0) then + close (nu_nml) + call abort_ice('ice: error reading icefields_nml') + endif + + ! histfreq options ('1','h','d','m','y') + nstreams = 0 + do ns = 1, max_nstrm + if (histfreq(ns) == '1' .or. histfreq(ns) == 'h' .or. & + histfreq(ns) == 'd' .or. histfreq(ns) == 'm' .or. & + histfreq(ns) == 'y') then + nstreams = nstreams + 1 + if (ns >= 2) then + if (histfreq(ns-1) == 'x') then + call abort_ice('ice: histfreq all non x must be at start of array') + endif + endif + else if (histfreq(ns) /= 'x') then + call abort_ice('ice: histfreq contains illegal element') + endif + enddo + if (nstreams == 0) write (nu_diag,*) 'WARNING: No history output' + do ns1 = 1, nstreams + do ns2 = 1, nstreams + if (histfreq(ns1) == histfreq(ns2) .and. ns1/=ns2 & + .and. my_task == master_task) then + call abort_ice('ice: histfreq elements must be unique') + endif + enddo + enddo + + if (.not. tr_iage) then + f_iage = 'x' + f_dagedtt = 'x' + f_dagedtd = 'x' + endif + if (.not. tr_FY) f_FY = 'x' + + if (kdyn /= 2) then + f_a11 = 'x' + f_a12 = 'x' + f_e11 = 'x' + f_e12 = 'x' + f_e22 = 'x' + f_s11 = 'x' + f_s12 = 'x' + f_s22 = 'x' + f_yieldstress11 = 'x' + f_yieldstress12 = 'x' + f_yieldstress22 = 'x' + endif + + ! these must be output at the same frequency because of + ! cos(zenith angle) averaging + if (f_albice(1:1) /= 'x' .and. f_albsni(1:1) /= 'x') f_albice = f_albsni + if (f_albsno(1:1) /= 'x') f_albsno = f_albice + if (f_albpnd(1:1) /= 'x') f_albpnd = f_albice + if (f_coszen(1:1) /= 'x' .and. f_albice(1:1) /= 'x') f_coszen = f_albice + if (f_coszen(1:1) /= 'x' .and. f_albsni(1:1) /= 'x') f_coszen = f_albsni + + ! to prevent array-out-of-bounds when aggregating + if (f_fmeltt_ai(1:1) /= 'x') f_fmelttn_ai = f_fmeltt_ai + + ! AEW: These are only calculated under certain circumstances + ! (if using multilayers with UM-style coupling) + if (calc_Tsfc .or. .not. heat_capacity) then + f_Tn_top = 'x' + f_keffn_top = 'x' + endif + +#ifndef ncdf + f_bounds = .false. +#endif + + ! write dimensions for 3D or 4D history variables + ! note: list of variables checked here is incomplete + if (f_aicen(1:1) /= 'x' .or. f_vicen(1:1) /= 'x' .or. & + f_Tinz (1:1) /= 'x' .or. f_Sinz (1:1) /= 'x') f_NCAT = .true. + if (f_Tinz (1:1) /= 'x' .or. f_Sinz (1:1) /= 'x') f_VGRDi = .true. + if (f_Tsnz (1:1) /= 'x') f_VGRDs = .true. + + call broadcast_scalar (f_tmask, master_task) + call broadcast_scalar (f_blkmask, master_task) + call broadcast_scalar (f_tarea, master_task) + call broadcast_scalar (f_uarea, master_task) + call broadcast_scalar (f_dxt, master_task) + call broadcast_scalar (f_dyt, master_task) + call broadcast_scalar (f_dxu, master_task) + call broadcast_scalar (f_dyu, master_task) + call broadcast_scalar (f_HTN, master_task) + call broadcast_scalar (f_HTE, master_task) + call broadcast_scalar (f_ANGLE, master_task) + call broadcast_scalar (f_ANGLET, master_task) + call broadcast_scalar (f_bounds, master_task) + call broadcast_scalar (f_NCAT, master_task) + call broadcast_scalar (f_VGRDi, master_task) + call broadcast_scalar (f_VGRDs, master_task) + call broadcast_scalar (f_VGRDb, master_task) + +! call broadcast_scalar (f_example, master_task) + call broadcast_scalar (f_hi, master_task) + call broadcast_scalar (f_hs, master_task) + call broadcast_scalar (f_Tsfc, master_task) + call broadcast_scalar (f_aice, master_task) + call broadcast_scalar (f_uvel, master_task) + call broadcast_scalar (f_vvel, master_task) + call broadcast_scalar (f_uatm, master_task) + call broadcast_scalar (f_vatm, master_task) + call broadcast_scalar (f_sice, master_task) + call broadcast_scalar (f_fswdn, master_task) + call broadcast_scalar (f_flwdn, master_task) + call broadcast_scalar (f_snow, master_task) + call broadcast_scalar (f_snow_ai, master_task) + call broadcast_scalar (f_rain, master_task) + call broadcast_scalar (f_rain_ai, master_task) + call broadcast_scalar (f_sst, master_task) + call broadcast_scalar (f_sss, master_task) + call broadcast_scalar (f_uocn, master_task) + call broadcast_scalar (f_vocn, master_task) + call broadcast_scalar (f_frzmlt, master_task) + call broadcast_scalar (f_fswfac, master_task) + call broadcast_scalar (f_fswint_ai, master_task) + call broadcast_scalar (f_fswabs, master_task) + call broadcast_scalar (f_fswabs_ai, master_task) + call broadcast_scalar (f_albsni, master_task) + call broadcast_scalar (f_alvdr, master_task) + call broadcast_scalar (f_alidr, master_task) + call broadcast_scalar (f_alvdf, master_task) + call broadcast_scalar (f_alidf, master_task) + call broadcast_scalar (f_albice, master_task) + call broadcast_scalar (f_albsno, master_task) + call broadcast_scalar (f_albpnd, master_task) + call broadcast_scalar (f_coszen, master_task) + call broadcast_scalar (f_flat, master_task) + call broadcast_scalar (f_flat_ai, master_task) + call broadcast_scalar (f_fsens, master_task) + call broadcast_scalar (f_fsens_ai, master_task) + call broadcast_scalar (f_flwup, master_task) + call broadcast_scalar (f_flwup_ai, master_task) + call broadcast_scalar (f_evap, master_task) + call broadcast_scalar (f_evap_ai, master_task) + call broadcast_scalar (f_Tair, master_task) + call broadcast_scalar (f_Tref, master_task) + call broadcast_scalar (f_Qref, master_task) + call broadcast_scalar (f_congel, master_task) + call broadcast_scalar (f_frazil, master_task) + call broadcast_scalar (f_snoice, master_task) + call broadcast_scalar (f_dsnow, master_task) + call broadcast_scalar (f_meltt, master_task) + call broadcast_scalar (f_melts, master_task) + call broadcast_scalar (f_meltb, master_task) + call broadcast_scalar (f_meltl, master_task) + call broadcast_scalar (f_fresh, master_task) + call broadcast_scalar (f_fresh_ai, master_task) + call broadcast_scalar (f_fsalt, master_task) + call broadcast_scalar (f_fsalt_ai, master_task) + call broadcast_scalar (f_fhocn, master_task) + call broadcast_scalar (f_fhocn_ai, master_task) + call broadcast_scalar (f_fswthru, master_task) + call broadcast_scalar (f_fswthru_ai, master_task) + call broadcast_scalar (f_strairx, master_task) + call broadcast_scalar (f_strairy, master_task) + call broadcast_scalar (f_strtltx, master_task) + call broadcast_scalar (f_strtlty, master_task) + call broadcast_scalar (f_strcorx, master_task) + call broadcast_scalar (f_strcory, master_task) + call broadcast_scalar (f_strocnx, master_task) + call broadcast_scalar (f_strocny, master_task) + call broadcast_scalar (f_strintx, master_task) + call broadcast_scalar (f_strinty, master_task) + call broadcast_scalar (f_strength, master_task) + call broadcast_scalar (f_divu, master_task) + call broadcast_scalar (f_shear, master_task) + call broadcast_scalar (f_sig1, master_task) + call broadcast_scalar (f_sig2, master_task) + call broadcast_scalar (f_dvidtt, master_task) + call broadcast_scalar (f_dvsdtt, master_task) + call broadcast_scalar (f_dvidtd, master_task) + call broadcast_scalar (f_dvsdtd, master_task) + call broadcast_scalar (f_daidtt, master_task) + call broadcast_scalar (f_daidtd, master_task) + call broadcast_scalar (f_dagedtt, master_task) + call broadcast_scalar (f_dagedtd, master_task) + call broadcast_scalar (f_mlt_onset, master_task) + call broadcast_scalar (f_frz_onset, master_task) + call broadcast_scalar (f_aisnap, master_task) + call broadcast_scalar (f_hisnap, master_task) + call broadcast_scalar (f_aicen, master_task) + call broadcast_scalar (f_vicen, master_task) + call broadcast_scalar (f_vsnon, master_task) + call broadcast_scalar (f_trsig, master_task) + call broadcast_scalar (f_icepresent, master_task) + call broadcast_scalar (f_fsurf_ai, master_task) + call broadcast_scalar (f_fcondtop_ai, master_task) + call broadcast_scalar (f_fmeltt_ai, master_task) + call broadcast_scalar (f_fsurfn_ai, master_task) + call broadcast_scalar (f_fcondtopn_ai, master_task) + call broadcast_scalar (f_fmelttn_ai, master_task) + call broadcast_scalar (f_flatn_ai, master_task) + call broadcast_scalar (f_fsensn_ai, master_task) + +! call broadcast_scalar (f_field3dz, master_task) + call broadcast_scalar (f_Tn_top, master_task) + call broadcast_scalar (f_keffn_top, master_task) + call broadcast_scalar (f_Tinz, master_task) + call broadcast_scalar (f_Sinz, master_task) + call broadcast_scalar (f_Tsnz, master_task) + + call broadcast_scalar (f_iage, master_task) + call broadcast_scalar (f_FY, master_task) + + call broadcast_scalar (f_a11, master_task) + call broadcast_scalar (f_a12, master_task) + call broadcast_scalar (f_e11, master_task) + call broadcast_scalar (f_e12, master_task) + call broadcast_scalar (f_e22, master_task) + call broadcast_scalar (f_s11, master_task) + call broadcast_scalar (f_s12, master_task) + call broadcast_scalar (f_s22, master_task) + call broadcast_scalar (f_yieldstress11, master_task) + call broadcast_scalar (f_yieldstress12, master_task) + call broadcast_scalar (f_yieldstress22, master_task) + + ! 2D variables + do ns1 = 1, nstreams + if (histfreq(ns1) /= 'x') then + +!!!!! begin example +! call define_hist_field(n_example,"example","m",tstr2D, tcstr, & +! "example: mean ice thickness", & +! "ice volume per unit grid cell area", c1, c0, & +! ns1, f_example) +!!!!! end example + + call define_hist_field(n_hi,"hi","m",tstr2D, tcstr, & + "grid cell mean ice thickness", & + "ice volume per unit grid cell area", c1, c0, & + ns1, f_hi) + + call define_hist_field(n_hs,"hs","m",tstr2D, tcstr, & + "grid cell mean snow thickness", & + "snow volume per unit grid cell area", c1, c0, & + ns1, f_hs) + + call define_hist_field(n_Tsfc,"Tsfc","C",tstr2D, tcstr, & + "snow/ice surface temperature", & + "averaged with Tf if no ice is present", c1, c0, & + ns1, f_Tsfc) + + call define_hist_field(n_aice,"aice","1",tstr2D, tcstr, & + "ice area (aggregate)", & + "none", c1, c0, & + ns1, f_aice) + + call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & + "ice velocity (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_uvel) + + call define_hist_field(n_vvel,"vvel","m/s",ustr2D, ucstr, & + "ice velocity (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_vvel) + + call define_hist_field(n_uatm,"uatm","m/s",ustr2D, ucstr, & + "atm velocity (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_uatm) + + call define_hist_field(n_vatm,"vatm","m/s",ustr2D, ucstr, & + "atm velocity (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_vatm) + + call define_hist_field(n_sice,"sice","ppt",tstr2D, tcstr, & + "bulk ice salinity", & + "none", c1, c0, & + ns1, f_sice) + + call define_hist_field(n_fswdn,"fswdn","W/m^2",tstr2D, tcstr, & + "down solar flux", & + "positive downward", c1, c0, & + ns1, f_fswdn) + + call define_hist_field(n_flwdn,"flwdn","W/m^2",tstr2D, tcstr, & + "down longwave flux", & + "positive downward", c1, c0, & + ns1, f_flwdn) + + call define_hist_field(n_snow,"snow","cm/day",tstr2D, tcstr, & + "snowfall rate (cpl)", & + "none", mps_to_cmpdy/rhofresh, c0, & + ns1, f_snow) + + call define_hist_field(n_snow_ai,"snow_ai","cm/day",tstr2D, tcstr, & + "snowfall rate", & + "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & + ns1, f_snow_ai) + + call define_hist_field(n_rain,"rain","cm/day",tstr2D, tcstr, & + "rainfall rate (cpl)", & + "none", mps_to_cmpdy/rhofresh, c0, & + ns1, f_rain) + + call define_hist_field(n_rain_ai,"rain_ai","cm/day",tstr2D, tcstr, & + "rainfall rate", & + "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & + ns1, f_rain_ai) + + call define_hist_field(n_sst,"sst","C",tstr2D, tcstr, & + "sea surface temperature", & + "none", c1, c0, & + ns1, f_sst) + + call define_hist_field(n_sss,"sss","ppt",tstr2D, tcstr, & + "sea surface salinity", & + "none", c1, c0, & + ns1, f_sss) + + call define_hist_field(n_uocn,"uocn","m/s",ustr2D, ucstr, & + "ocean current (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_uocn) + + call define_hist_field(n_vocn,"vocn","m/s",ustr2D, ucstr, & + "ocean current (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_vocn) + + call define_hist_field(n_frzmlt,"frzmlt","W/m^2",tstr2D, tcstr, & + "freeze/melt potential", & + "if >0, new ice forms; if <0, ice melts", c1, c0, & + ns1, f_frzmlt) + + call define_hist_field(n_fswfac,"fswfac","1",tstr2D, tcstr, & + "shortwave scaling factor", & + "ratio of netsw new:old", c1, c0, & + ns1, f_fswfac) + + call define_hist_field(n_fswint_ai,"fswint_ai","W/m^2",tstr2D, tcstr, & + "shortwave absorbed in ice interior", & + "does not include surface", c1, c0, & + ns1, f_fswint_ai) + + call define_hist_field(n_fswabs,"fswabs","W/m^2",tstr2D, tcstr, & + "snow/ice/ocn absorbed solar flux (cpl)", & + "positive downward", c1, c0, & + ns1, f_fswabs) + + call define_hist_field(n_fswabs_ai,"fswabs_ai","W/m^2",tstr2D, tcstr, & + "snow/ice/ocn absorbed solar flux", & + "weighted by ice area", c1, c0, & + ns1, f_fswabs_ai) + +! call define_hist_field(n_albsni,"albsni","%",tstr2D, tcstr, & +! "snow/ice broad band albedo", & +! "scaled (divided) by aice", c100, c0, & +! ns1, f_albsni) + + call define_hist_field(n_albsni,"albsni","%",tstr2D, tcstr, & + "snow/ice broad band albedo", & + "averaged for coszen>0, weighted by aice", c100, c0, & + ns1, f_albsni) + + call define_hist_field(n_alvdr,"alvdr","%",tstr2D, tcstr, & + "visible direct albedo", & + "scaled (divided) by aice", c100, c0, & + ns1, f_alvdr) + + call define_hist_field(n_alidr,"alidr","%",tstr2D, tcstr, & + "near IR direct albedo", & + "scaled (divided) by aice", c100, c0, & + ns1, f_alidr) + + call define_hist_field(n_alvdf,"alvdf","%",tstr2D, tcstr, & + "visible diffuse albedo", & + "scaled (divided) by aice", c100, c0, & + ns1, f_alvdf) + + call define_hist_field(n_alidf,"alidf","%",tstr2D, tcstr, & + "near IR diffuse albedo", & + "scaled (divided) by aice", c100, c0, & + ns1, f_alidf) + + call define_hist_field(n_albice,"albice","%",tstr2D, tcstr, & + "bare ice albedo", & + "averaged for coszen>0, weighted by aice", c100, c0, & + ns1, f_albice) + + call define_hist_field(n_albsno,"albsno","%",tstr2D, tcstr, & + "snow albedo", & + "averaged for coszen>0, weighted by aice", c100, c0, & + ns1, f_albsno) + + call define_hist_field(n_albpnd,"albpnd","%",tstr2D, tcstr, & + "melt pond albedo", & + "averaged for coszen>0, weighted by aice", c100, c0, & + ns1, f_albpnd) + + call define_hist_field(n_coszen,"coszen","radian",tstr2D, tcstr, & + "cosine of the zenith angle", & + "negative below horizon", c1, c0, & + ns1, f_coszen) + + call define_hist_field(n_flat,"flat","W/m^2",tstr2D, tcstr, & + "latent heat flux (cpl)", & + "positive downward", c1, c0, & + ns1, f_flat) + + call define_hist_field(n_flat_ai,"flat_ai","W/m^2",tstr2D, tcstr, & + "latent heat flux", & + "weighted by ice area", c1, c0, & + ns1, f_flat_ai) + + call define_hist_field(n_fsens,"fsens","W/m^2",tstr2D, tcstr, & + "sensible heat flux (cpl)", & + "positive downward", c1, c0, & + ns1, f_fsens) + + call define_hist_field(n_fsens_ai,"fsens_ai","W/m^2",tstr2D, tcstr, & + "sensible heat flux", & + "weighted by ice area", c1, c0, & + ns1, f_fsens_ai) + + call define_hist_field(n_flwup,"flwup","W/m^2",tstr2D, tcstr, & + "upward longwave flux (cpl)", & + "positive downward", c1, c0, & + ns1, f_flwup) + + call define_hist_field(n_flwup_ai,"flwup_ai","W/m^2",tstr2D, tcstr, & + "upward longwave flux", & + "weighted by ice area", c1, c0, & + ns1, f_flwup_ai) + + call define_hist_field(n_evap,"evap","cm/day",tstr2D, tcstr, & + "evaporative water flux (cpl)", & + "none", mps_to_cmpdy/rhofresh, c0, & + ns1, f_evap) + + call define_hist_field(n_evap_ai,"evap_ai","cm/day",tstr2D, tcstr, & + "evaporative water flux", & + "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & + ns1, f_evap_ai) + + call define_hist_field(n_Tair,"Tair","C",tstr2D, tcstr, & + "air temperature", & + "none", c1, -Tffresh, & + ns1, f_Tair) + + call define_hist_field(n_Tref,"Tref","C",tstr2D, tcstr, & + "2m reference temperature", & + "none", c1, -Tffresh, & + ns1, f_Tref) + + call define_hist_field(n_Qref,"Qref","g/kg",tstr2D, tcstr, & + "2m reference specific humidity", & + "none", kg_to_g, c0, & + ns1, f_Qref) + + call define_hist_field(n_congel,"congel","cm/day",tstr2D, tcstr, & + "congelation ice growth", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_congel) + + call define_hist_field(n_frazil,"frazil","cm/day",tstr2D, tcstr, & + "frazil ice growth", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_frazil) + + call define_hist_field(n_snoice,"snoice","cm/day",tstr2D, tcstr, & + "snow-ice formation", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_snoice) + + call define_hist_field(n_dsnow,"dsnow","cm/day",tstr2D, tcstr, & + "snow formation", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_dsnow) + + call define_hist_field(n_meltt,"meltt","cm/day",tstr2D, tcstr, & + "top ice melt", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_meltt) + + call define_hist_field(n_melts,"melts","cm/day",tstr2D, tcstr, & + "top snow melt", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_melts) + + call define_hist_field(n_meltb,"meltb","cm/day",tstr2D, tcstr, & + "basal ice melt", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_meltb) + + call define_hist_field(n_meltl,"meltl","cm/day",tstr2D, tcstr, & + "lateral ice melt", & + "none", mps_to_cmpdy/dt, c0, & + ns1, f_meltl) + + call define_hist_field(n_fresh,"fresh","cm/day",tstr2D, tcstr, & + "freshwtr flx ice to ocn (cpl)", & + "if positive, ocean gains fresh water", & + mps_to_cmpdy/rhofresh, c0, & + ns1, f_fresh) + + call define_hist_field(n_fresh_ai,"fresh_ai","cm/day",tstr2D, tcstr, & + "freshwtr flx ice to ocn", & + "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & + ns1, f_fresh_ai) + + call define_hist_field(n_fsalt,"fsalt","kg/m^2/s",tstr2D, tcstr, & + "salt flux ice to ocn (cpl)", & + "if positive, ocean gains salt", c1, c0, & + ns1, f_fsalt) + + call define_hist_field(n_fsalt_ai,"fsalt_ai","kg/m^2/s",tstr2D, tcstr, & + "salt flux ice to ocean", & + "weighted by ice area", c1, c0, & + ns1, f_fsalt_ai) + + call define_hist_field(n_fhocn,"fhocn","W/m^2",tstr2D, tcstr, & + "heat flux ice to ocn (cpl)", & + "if positive, ocean gains heat", c1, c0, & + ns1, f_fhocn) + + call define_hist_field(n_fhocn_ai,"fhocn_ai","W/m^2",tstr2D, tcstr, & + "heat flux ice to ocean", & + "weighted by ice area", c1, c0, & + ns1, f_fhocn_ai) + + call define_hist_field(n_fswthru,"fswthru","W/m^2",tstr2D, tcstr, & + "SW thru ice to ocean (cpl)", & + "if positive, ocean gains heat", c1, c0, & + ns1, f_fswthru) + + call define_hist_field(n_fswthru_ai,"fswthru_ai","W/m^2",tstr2D, tcstr,& + "SW flux thru ice to ocean", & + "weighted by ice area", c1, c0, & + ns1, f_fswthru_ai) + + call define_hist_field(n_strairx,"strairx","N/m^2",ustr2D, ucstr, & + "atm/ice stress (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_strairx) + + call define_hist_field(n_strairy,"strairy","N/m^2",ustr2D, ucstr, & + "atm/ice stress (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_strairy) + + call define_hist_field(n_strtltx,"strtltx","N/m^2",ustr2D, ucstr, & + "sea sfc tilt stress (x)", & + "none", c1, c0, & + ns1, f_strtltx) + + call define_hist_field(n_strtlty,"strtlty","N/m^2",ustr2D, ucstr, & + "sea sfc tilt stress (y)", & + "none", c1, c0, & + ns1, f_strtlty) + + call define_hist_field(n_strcorx,"strcorx","N/m^2",ustr2D, ucstr, & + "coriolis stress (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_strcorx) + + call define_hist_field(n_strcory,"strcory","N/m^2",ustr2D, ucstr, & + "coriolis stress (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_strcory) + + call define_hist_field(n_strocnx,"strocnx","N/m^2",ustr2D, ucstr, & + "ocean/ice stress (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_strocnx) + + call define_hist_field(n_strocny,"strocny","N/m^2",ustr2D, ucstr, & + "ocean/ice stress (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_strocny) + + call define_hist_field(n_strintx,"strintx","N/m^2",ustr2D, ucstr, & + "internal ice stress (x)", & + "positive is x direction on U grid", c1, c0, & + ns1, f_strintx) + + call define_hist_field(n_strinty,"strinty","N/m^2",ustr2D, ucstr, & + "internal ice stress (y)", & + "positive is y direction on U grid", c1, c0, & + ns1, f_strinty) + + call define_hist_field(n_strength,"strength","N/m",tstr2D, tcstr, & + "compressive ice strength", & + "none", c1, c0, & + ns1, f_strength) + + call define_hist_field(n_divu,"divu","%/day",tstr2D, tcstr, & + "strain rate (divergence)", & + "none", secday*c100, c0, & + ns1, f_divu) + + call define_hist_field(n_shear,"shear","%/day",tstr2D, tcstr, & + "strain rate (shear)", & + "none", secday*c100, c0, & + ns1, f_shear) + + call define_hist_field(n_sig1,"sig1","1",ustr2D, ucstr, & + "norm. principal stress 1", & + "sig1 is instantaneous", c1, c0, & + ns1, f_sig1) + + call define_hist_field(n_sig2,"sig2","1",ustr2D, ucstr, & + "norm. principal stress 2", & + "sig2 is instantaneous", c1, c0, & + ns1, f_sig2) + + call define_hist_field(n_dvidtt,"dvidtt","cm/day",tstr2D, tcstr, & + "ice volume tendency thermo", & + "none", mps_to_cmpdy, c0, & + ns1, f_dvidtt) + + call define_hist_field(n_dvsdtt,"dvsdtt","cm/day",tstr2D, tcstr, & + "snow volume tendency thermo", & + "none", mps_to_cmpdy, c0, & + ns1, f_dvsdtt) + + call define_hist_field(n_dvidtd,"dvidtd","cm/day",tstr2D, tcstr, & + "ice volume tendency dynamics", & + "none", mps_to_cmpdy, c0, & + ns1, f_dvidtd) + + call define_hist_field(n_dvsdtd,"dvsdtd","cm/day",tstr2D, tcstr, & + "snow volume tendency dynamics", & + "none", mps_to_cmpdy, c0, & + ns1, f_dvsdtd) + + call define_hist_field(n_daidtt,"daidtt","%/day",tstr2D, tcstr, & + "area tendency thermo", & + "none", secday*c100, c0, & + ns1, f_daidtt) + + call define_hist_field(n_daidtd,"daidtd","%/day",tstr2D, tcstr, & + "area tendency dynamics", & + "none", secday*c100, c0, & + ns1, f_daidtd) + + call define_hist_field(n_dagedtt,"dagedtt","day/day",tstr2D, tcstr, & + "age tendency thermo", & + "excludes time step increment", c1, c0, & + ns1, f_dagedtt) + + call define_hist_field(n_dagedtd,"dagedtd","day/day",tstr2D, tcstr, & + "age tendency dynamics", & + "excludes time step increment", c1, c0, & + ns1, f_dagedtd) + + call define_hist_field(n_mlt_onset,"mlt_onset","day of year", & + tstr2D, tcstr,"melt onset date", & + "midyear restart gives erroneous dates", c1, c0, & + ns1, f_mlt_onset) + + call define_hist_field(n_frz_onset,"frz_onset","day of year", & + tstr2D, tcstr,"freeze onset date", & + "midyear restart gives erroneous dates", c1, c0, & + ns1, f_frz_onset) + + call define_hist_field(n_hisnap,"hisnap","m",tstr2D, tcstr, & + "ice volume snapshot", & + "none", c1, c0, & + ns1, f_hisnap) + + call define_hist_field(n_aisnap,"aisnap","1",tstr2D, tcstr, & + "ice area snapshot", & + "none", c1, c0, & + ns1, f_aisnap) + + call define_hist_field(n_trsig,"trsig","N/m^2",tstr2D, tcstr, & + "internal stress tensor trace", & + "ice strength approximation", c1, c0, & + ns1, f_trsig) + + call define_hist_field(n_icepresent,"ice_present","1",tstr2D, tcstr, & + "fraction of time-avg interval that ice is present", & + "ice extent flag", c1, c0, & + ns1, f_icepresent) + + call define_hist_field(n_fsurf_ai,"fsurf_ai","W/m^2",tstr2D, tcstr, & + "net surface heat flux", & + "positive downward, excludes conductive flux, weighted by ice area", & + c1, c0, & + ns1, f_fsurf_ai) + + call define_hist_field(n_fcondtop_ai,"fcondtop_ai","W/m^2", & + tstr2D, tcstr,"top surface conductive heat flux", & + "positive downward, weighted by ice area", c1, c0, & + ns1, f_fcondtop_ai) + + call define_hist_field(n_fmeltt_ai,"fmeltt_ai","W/m^2",tstr2D, tcstr, & + "net surface heat flux causing melt", & + "always >= 0, weighted by ice area", c1, c0, & + ns1, f_fmeltt_ai) + + call define_hist_field(n_a11,"a11"," ",tstr2D, tcstr, & + "a11: component a11 of the structure tensor", & + "none", c1, c0, & + ns1, f_a11) + + call define_hist_field(n_a12,"a12"," ",tstr2D, tcstr, & + "a12: component a12 of the structure tensor", & + "none", c1, c0, & + ns1, f_a12) + + call define_hist_field(n_e11,"e11","1/s",tstr2D, tcstr, & + "e11: component e11 of the strain rate tensor", & + "none", c1, c0, & + ns1, f_e11) + + call define_hist_field(n_e12,"e12","1/s",tstr2D, tcstr, & + "e12: component e12 of the strain rate tensor", & + "none", c1, c0, & + ns1, f_e12) + + call define_hist_field(n_e22,"e22","1/s",tstr2D, tcstr, & + "e22: component e22 of the strain rate tensor", & + "none", c1, c0, & + ns1, f_e22) + + call define_hist_field(n_s11,"s11","kg/s^2",tstr2D, tcstr, & + "s11: component s11 of the stress tensor", & + "none", c1, c0, & + ns1, f_s11) + + call define_hist_field(n_s12,"s12","kg/s^2",tstr2D, tcstr, & + "s12: component s12 of the stress tensor", & + "none", c1, c0, & + ns1, f_s12) + + call define_hist_field(n_s22,"s22","kg/s^2",tstr2D, tcstr, & + "s22: component s12 of the stress tensor", & + "none", c1, c0, & + ns1, f_s22) + + call define_hist_field(n_yieldstress11,"yieldstress11","kg/s^2",tstr2D, tcstr, & + "yieldstress11: component 11 of the yieldstress tensor", & + "none", c1, c0, & + ns1, f_yieldstress11) + + call define_hist_field(n_yieldstress12,"yieldstress12","kg/s^2",tstr2D, tcstr, & + "yieldstress12: component 12 of the yieldstress tensor", & + "none", c1, c0, & + ns1, f_yieldstress12) + + call define_hist_field(n_yieldstress22,"yieldstress22","kg/s^2",tstr2D, tcstr, & + "yieldstress22: component 12 of the yieldstress tensor", & + "none", c1, c0, & + ns1, f_yieldstress22) + + ! Tracers + + ! Ice Age + call define_hist_field(n_iage,"iage","years",tstr2D, tcstr, & + "sea ice age", & + "none", c1/(secday*days_per_year), c0, & + ns1, f_iage) + + ! First Year Ice Area + call define_hist_field(n_FY,"FYarea"," ",tstr2D, tcstr, & + "first-year ice area", & + "weighted by ice area", c1, c0, & + ns1, f_FY) + + endif ! if (histfreq(ns1) /= 'x') then + enddo ! ns1 + + ! other 2D history variables + + ! mechanical redistribution + call init_hist_mechred_2D + + ! melt ponds + if (tr_pond) call init_hist_pond_2D + + ! biogeochemistry + if (tr_aero .or. tr_brine .or. skl_bgc) call init_hist_bgc_2D + + if (formdrag) call init_hist_drag_2D + + !----------------------------------------------------------------- + ! 3D (category) variables looped separately for ordering + !----------------------------------------------------------------- + do ns1 = 1, nstreams + if (histfreq(ns1) /= 'x') then + + call define_hist_field(n_aicen,"aicen","1",tstr3Dc, tcstr, & + "ice area, categories","none", c1, c0, & + ns1, f_aicen) + + call define_hist_field(n_vicen,"vicen","m",tstr3Dc, tcstr, & + "ice volume, categories","none", c1, c0, & + ns1, f_vicen) + + call define_hist_field(n_vsnon,"vsnon","m",tstr3Dc, tcstr, & + "snow depth on ice, categories","volume per unit area of snow", c1, c0, & + ns1, f_vsnon) + + call define_hist_field(n_fsurfn_ai,"fsurfn_ai","W/m^2",tstr3Dc, tcstr, & + "net surface heat flux, categories","weighted by ice area", c1, c0, & + ns1, f_fsurfn_ai) + + call define_hist_field(n_fcondtopn_ai,"fcondtopn_ai","W/m^2",tstr3Dc, tcstr, & + "top sfc conductive heat flux, cat","weighted by ice area", c1, c0, & + ns1, f_fcondtopn_ai) + + call define_hist_field(n_fmelttn_ai,"fmelttn_ai","W/m^2",tstr3Dc, tcstr, & + "net sfc heat flux causing melt, cat","weighted by ice area", c1, c0, & + ns1, f_fmelttn_ai) + + call define_hist_field(n_flatn_ai,"flatn_ai","W/m^2",tstr3Dc, tcstr, & + "latent heat flux, category","weighted by ice area", c1, c0, & + ns1, f_flatn_ai) + + call define_hist_field(n_fsensn_ai,"fsensn_ai","W/m^2",tstr3Dc, tcstr, & + "sensible heat flux, category","weighted by ice area", c1, c0, & + ns1, f_fsensn_ai) + + call define_hist_field(n_Tn_top,"Tn_top","K",tstr3Dc, tcstr, & + "temperature of the top layer (snow or ice), categories","multilayer scheme", c1, c0, & + ns1, f_Tn_top) + + call define_hist_field(n_keffn_top,"keffn_top","W/m^2/K",tstr3Dc, tcstr, & + "effective thermal conductivity of the top ice layer, categories", & + "multilayer scheme", c1, c0, & + ns1, f_keffn_top) + + endif ! if (histfreq(ns1) /= 'x') then + enddo ! ns1 + + ! other 3D (category) history variables + + ! mechanical redistribution + call init_hist_mechred_3Dc + + ! melt ponds + if (tr_pond) call init_hist_pond_3Dc + + ! biogeochemistry + if (tr_brine) call init_hist_bgc_3Dc + + !----------------------------------------------------------------- + ! 3D (vertical) variables must be looped separately + !----------------------------------------------------------------- + +! do ns1 = 1, nstreams +! if (histfreq(ns1) /= 'x') then + +! call define_hist_field(n_field3dz,"field3dz","1",tstr3Dz, tcstr, & +! "example 3dz field", & +! "vertical profile", c1, c0, & +! ns1, f_field3dz) + +! endif ! if (histfreq(ns1) /= 'x') then +! enddo ! ns1 + + !----------------------------------------------------------------- + ! 4D (categories, vertical) variables must be looped separately + !----------------------------------------------------------------- + + do ns1 = 1, nstreams + if (histfreq(ns1) /= 'x') then + + call define_hist_field(n_Tinz,"Tinz","C",tstr4Di, tcstr, & + "ice internal temperatures on CICE grid", & + "vertical profile", c1, c0, & + ns1, f_Tinz) + + call define_hist_field(n_Sinz,"Sinz","ppt",tstr4Di, tcstr, & + "ice internal bulk salinity", & + "vertical profile", c1, c0, & + ns1, f_Sinz) + + endif ! if (histfreq(ns1) /= 'x') then + enddo ! ns1 + + do ns1 = 1, nstreams + if (histfreq(ns1) /= 'x') then + + call define_hist_field(n_Tsnz,"Tsnz","C",tstr4Ds, tcstr, & + "snow internal temperatures", & + "vertical profile", c1, c0, & + ns1, f_Tsnz) + + endif ! if (histfreq(ns1) /= 'x') then + enddo + + if (f_Tinz (1:1) /= 'x') then + if (allocated(Tinz4d)) deallocate(Tinz4d) + allocate(Tinz4d(nx_block,ny_block,nzilyr,ncat_hist)) + endif + if (f_Sinz (1:1) /= 'x') then + if (allocated(Sinz4d)) deallocate(Sinz4d) + allocate(Sinz4d(nx_block,ny_block,nzilyr,ncat_hist)) + endif + if (f_Tsnz (1:1) /= 'x') then + if (allocated(Tsnz4d)) deallocate(Tsnz4d) + allocate(Tsnz4d(nx_block,ny_block,nzslyr,ncat_hist)) + endif + if (f_Sinz (1:1) /= 'x') then + if (allocated(Sinz4d)) deallocate(Sinz4d) + allocate(Sinz4d(nx_block,ny_block,nzilyr,ncat_hist)) + endif + + ! other 4D history variables + + ! biogeochemistry + if (tr_brine) call init_hist_bgc_4Db + + !----------------------------------------------------------------- + ! fill igrd array with namelist values + !----------------------------------------------------------------- + + igrd=.true. + + igrd(n_tmask ) = f_tmask + igrd(n_blkmask ) = f_blkmask + igrd(n_tarea ) = f_tarea + igrd(n_uarea ) = f_uarea + igrd(n_dxt ) = f_dxt + igrd(n_dyt ) = f_dyt + igrd(n_dxu ) = f_dxu + igrd(n_dyu ) = f_dyu + igrd(n_HTN ) = f_HTN + igrd(n_HTE ) = f_HTE + igrd(n_ANGLE ) = f_ANGLE + igrd(n_ANGLET ) = f_ANGLET + + igrdz=.true. + igrdz(n_NCAT ) = f_NCAT + igrdz(n_VGRDi ) = f_VGRDi + igrdz(n_VGRDs ) = f_VGRDs + igrdz(n_VGRDb ) = f_VGRDb + + !----------------------------------------------------------------- + ! diagnostic output + !----------------------------------------------------------------- + + ntmp(:) = 0 + if (my_task == master_task) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'The following variables will be ', & + 'written to the history tape: ' + write(nu_diag,101) 'description','units','variable','frequency','x' + do n=1,num_avail_hist_fields_tot + if (avail_hist_fields(n)%vhistfreq_n /= 0) & + write(nu_diag,100) avail_hist_fields(n)%vdesc, & + avail_hist_fields(n)%vunit, avail_hist_fields(n)%vname, & + avail_hist_fields(n)%vhistfreq,avail_hist_fields(n)%vhistfreq_n + do ns = 1, nstreams + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + ntmp(ns)=ntmp(ns)+1 + enddo + enddo ! num_avail_hist_fields_tot + write(nu_diag,*) ' ' + endif + 100 format (1x,a40,2x,a16,2x,a12,1x,a1,2x,i6) + 101 format (2x,a19,10x,a16,9x,a12,2x,a,3x,a1) + + call broadcast_array(ntmp, master_task) + do ns = 1, nstreams + if (ntmp(ns)==0) histfreq_n(ns) = 0 + enddo + + !----------------------------------------------------------------- + ! initialize the history arrays + !----------------------------------------------------------------- + + if (allocated(a2D)) deallocate(a2D) + if (num_avail_hist_fields_2D > 0) & + allocate(a2D(nx_block,ny_block,num_avail_hist_fields_2D,max_blocks)) + + if (allocated(a3Dc)) deallocate(a3Dc) + if (num_avail_hist_fields_3Dc > 0) & + allocate(a3Dc(nx_block,ny_block,ncat_hist,num_avail_hist_fields_3Dc,max_blocks)) + + nzlyr = max(nzilyr, nzslyr) + if (allocated(a3Dz)) deallocate(a3Dz) + if (num_avail_hist_fields_3Dz > 0) & + allocate(a3Dz(nx_block,ny_block,nzlyr,num_avail_hist_fields_3Dz,max_blocks)) + + nzlyrb = nzblyr + if (allocated(a3Db)) deallocate(a3Db) + if (num_avail_hist_fields_3Db > 0) & + allocate(a3Db(nx_block,ny_block,nzlyrb,num_avail_hist_fields_3Db,max_blocks)) + + if (allocated(a4Di)) deallocate(a4Di) + if (num_avail_hist_fields_4Di > 0) & + allocate(a4Di(nx_block,ny_block,nzilyr,ncat_hist,num_avail_hist_fields_4Di,max_blocks)) + if (allocated(a4Ds)) deallocate(a4Ds) + if (num_avail_hist_fields_4Ds > 0) & + allocate(a4Ds(nx_block,ny_block,nzslyr,ncat_hist,num_avail_hist_fields_4Ds,max_blocks)) + if (allocated(a4Db)) deallocate(a4Db) + if (num_avail_hist_fields_4Db > 0) & + allocate(a4Db(nx_block,ny_block,nzblyr,ncat_hist,num_avail_hist_fields_4Db,max_blocks)) + + if (allocated(a2D)) a2D (:,:,:,:) = c0 + if (allocated(a3Dc)) a3Dc(:,:,:,:,:) = c0 + if (allocated(a3Dz)) a3Dz(:,:,:,:,:) = c0 + if (allocated(a3Db)) a3Db(:,:,:,:,:) = c0 + if (allocated(a4Di)) a4Di(:,:,:,:,:,:) = c0 + if (allocated(a4Ds)) a4Ds(:,:,:,:,:,:) = c0 + if (allocated(a4Db)) a4Db(:,:,:,:,:,:) = c0 + avgct(:) = c0 + albcnt(:,:,:,:) = c0 + + if (restart .and. yday >= c2) then +! restarting midyear gives erroneous onset dates + mlt_onset = 999._dbl_kind + frz_onset = 999._dbl_kind + else + mlt_onset = c0 + frz_onset = c0 + endif + + end subroutine init_hist + +!======================================================================= + +! accumulate average ice quantities or snapshots +! +! author: Elizabeth C. Hunke, LANL + + subroutine accum_hist (dt) + + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_constants, only: c0, c1, p25, puny, secday, depressT, & + awtvdr, awtidr, awtvdf, awtidf, Lfresh, rhos, cp_ice, spval + use ice_domain, only: blocks_ice, nblocks + use ice_grid, only: tmask, lmask_n, lmask_s +#ifdef AusCOM + use ice_grid, only: umask +!ars599: 27032014 +! was in subroutine ice_write_hist of ice_history_write +! but seems to be here rather than subroutine ice_write_hist +! new code set in ice_constants +! use ice_shortwave, only: awtvdr, awtidr, awtvdf, awtidf +! use ice_constants, only: awtvdr, awtidr, awtvdf, awtidf +! however since already used at line #1145 so mark out +#endif + use ice_calendar, only: new_year, write_history, & + write_ic, time, histfreq, nstreams, month, & + new_month + use ice_dyn_eap, only: a11, a12, e11, e12, e22, s11, s12, s22, & + yieldstress11, yieldstress12, yieldstress22 + use ice_dyn_shared, only: kdyn, principal_stress + use ice_flux, only: fsw, flw, fsnow, frain, sst, sss, uocn, vocn, & + frzmlt_init, fswfac, fswabs, fswthru, alvdr, alvdf, alidr, alidf, & + albice, albsno, albpnd, coszen, flat, fsens, flwout, evap, & + Tair, Tref, Qref, congel, frazil, snoice, dsnow, & + melts, meltb, meltt, meltl, fresh, fsalt, fresh_ai, fsalt_ai, & + fhocn, fhocn_ai, uatm, vatm, & + fswthru_ai, strairx, strairy, strtltx, strtlty, strintx, strinty, & + strocnx, strocny, fm, daidtt, dvidtt, dvsdtt, daidtd, dvidtd, dvsdtd, fsurf, & + fcondtop, fsurfn, fcondtopn, flatn, fsensn, albcnt, prs_sig, & + stressp_1, stressm_1, stress12_1, & + stressp_2, stressm_2, stress12_2, & + stressp_3, stressm_3, stress12_3, & + stressp_4, stressm_4, stress12_4, sig1, sig2, & + mlt_onset, frz_onset, dagedtt, dagedtd, fswint_ai, Tn_top, & + keffn_top + use ice_atmo, only: formdrag + use ice_history_shared ! almost everything + use ice_history_write, only: ice_write_hist + use ice_history_bgc, only: accum_hist_bgc + use ice_history_mechred, only: accum_hist_mechred + use ice_history_pond, only: accum_hist_pond + use ice_history_drag, only: accum_hist_drag + use ice_state ! almost everything + use ice_therm_shared, only: calculate_Tin_from_qin, Tmlt, ktherm + use ice_therm_mushy, only: temperature_mush, temperature_snow + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_readwrite + use ice_zbgc_shared, only: skl_bgc + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + i,j,k,ic,n,ns,nn, & + iblk , & ! block index + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nstrm ! nstreams (1 if writing initial condition) + + real (kind=dbl_kind) :: & + ravgct , & ! 1/avgct + ravgctz ! 1/avgct + + real (kind=dbl_kind) :: & + qn , & ! temporary variable for enthalpy + Tmlts ! temporary variable for melting temperature + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + worka, workb + + type (block) :: & + this_block ! block information for current block + + !--------------------------------------------------------------- + ! increment step counter + !--------------------------------------------------------------- + + n2D = num_avail_hist_fields_2D + n3Dccum = n2D + num_avail_hist_fields_3Dc + n3Dzcum = n3Dccum + num_avail_hist_fields_3Dz + n3Dbcum = n3Dzcum + num_avail_hist_fields_3Db + n4Dicum = n3Dbcum + num_avail_hist_fields_4Di + n4Dscum = n4Dicum + num_avail_hist_fields_4Ds + n4Dbcum = n4Dscum + num_avail_hist_fields_4Db ! should equal num_avail_hist_fields_tot + + do ns = 1,nstreams + if (.not. hist_avg .or. histfreq(ns) == '1') then ! write snapshots + do n = 1,n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a2D(:,:,n,:) = c0 + enddo + do n = n2D + 1, n3Dccum + nn = n - n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a3Dc(:,:,:,nn,:) = c0 + enddo + do n = n3Dccum + 1, n3Dzcum + nn = n - n3Dccum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a3Dz(:,:,:,nn,:) = c0 + enddo + do n = n3Dzcum + 1, n3Dbcum + nn = n - n3Dzcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a3Db(:,:,:,nn,:) = c0 + enddo + do n = n3Dbcum + 1, n4Dicum + nn = n - n3Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a4Di(:,:,:,:,nn,:) = c0 + enddo + do n = n4Dicum + 1, n4Dscum + nn = n - n4Dicum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a4Ds(:,:,:,:,nn,:) = c0 + enddo + do n = n4Dscum + 1, n4Dbcum + nn = n - n4Dscum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & + a4Db(:,:,:,:,nn,:) = c0 + enddo + avgct(ns) = c1 + else ! write averages over time histfreq + avgct(ns) = avgct(ns) + c1 +! if (avgct(ns) == c1) time_beg(ns) = (time-dt)/int(secday) + if (avgct(ns) == c1) then + time_beg(ns) = (time-dt)/int(secday) + time_beg(ns) = real(time_beg(ns),kind=real_kind) + endif + endif + enddo + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & + !$OMP k,n,qn,ns,worka,workb) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + workb(:,:) = aice_init(:,:,iblk) + +! if (f_example(1:1) /= 'x') & +! call accum_hist_field(n_example,iblk, vice(:,:,iblk), a2D) + if (f_hi (1:1) /= 'x') & + call accum_hist_field(n_hi, iblk, vice(:,:,iblk), a2D) + if (f_hs (1:1) /= 'x') & + call accum_hist_field(n_hs, iblk, vsno(:,:,iblk), a2D) + if (f_Tsfc (1:1) /= 'x') & + call accum_hist_field(n_Tsfc, iblk, trcr(:,:,nt_Tsfc,iblk), a2D) + if (f_aice (1:1) /= 'x') & + call accum_hist_field(n_aice, iblk, aice(:,:,iblk), a2D) + if (f_uvel (1:1) /= 'x') & + call accum_hist_field(n_uvel, iblk, uvel(:,:,iblk), a2D) + if (f_vvel (1:1) /= 'x') & + call accum_hist_field(n_vvel, iblk, vvel(:,:,iblk), a2D) + if (f_uatm (1:1) /= 'x') & + call accum_hist_field(n_uatm, iblk, uatm(:,:,iblk), a2D) + if (f_vatm (1:1) /= 'x') & + call accum_hist_field(n_vatm, iblk, vatm(:,:,iblk), a2D) + + if (f_sice (1:1) /= 'x') then + do j = jlo, jhi + do i = ilo, ihi + worka(i,j) = c0 + do k = 1, nzilyr + worka(i,j) = worka(i,j) + trcr(i,j,nt_sice+k-1,iblk) + enddo + worka(i,j) = worka(i,j) / nzilyr + enddo + enddo + call accum_hist_field(n_sice, iblk, worka(:,:), a2D) + endif + + if (f_fswdn (1:1) /= 'x') & + call accum_hist_field(n_fswdn, iblk, fsw(:,:,iblk), a2D) + if (f_flwdn (1:1) /= 'x') & + call accum_hist_field(n_flwdn, iblk, flw(:,:,iblk), a2D) + if (f_snow (1:1) /= 'x') & + call accum_hist_field(n_snow, iblk, fsnow(:,:,iblk), a2D) + if (f_snow_ai(1:1) /= 'x') & + call accum_hist_field(n_snow_ai,iblk, fsnow(:,:,iblk)*workb(:,:), a2D) + if (f_rain (1:1) /= 'x') & + call accum_hist_field(n_rain, iblk, frain(:,:,iblk), a2D) + if (f_rain_ai(1:1) /= 'x') & + call accum_hist_field(n_rain_ai,iblk, frain(:,:,iblk)*workb(:,:), a2D) + + if (f_sst (1:1) /= 'x') & + call accum_hist_field(n_sst, iblk, sst(:,:,iblk), a2D) + if (f_sss (1:1) /= 'x') & + call accum_hist_field(n_sss, iblk, sss(:,:,iblk), a2D) + if (f_uocn (1:1) /= 'x') & + call accum_hist_field(n_uocn, iblk, uocn(:,:,iblk), a2D) + if (f_vocn (1:1) /= 'x') & + call accum_hist_field(n_vocn, iblk, vocn(:,:,iblk), a2D) + if (f_frzmlt (1:1) /= 'x') & + call accum_hist_field(n_frzmlt, iblk, frzmlt_init(:,:,iblk), a2D) + + if (f_fswfac (1:1) /= 'x') & + call accum_hist_field(n_fswfac, iblk, fswfac(:,:,iblk), a2D) + if (f_fswabs (1:1) /= 'x') & + call accum_hist_field(n_fswabs, iblk, fswabs(:,:,iblk), a2D) + + if (f_fswint_ai (1:1) /= 'x') & + call accum_hist_field(n_fswint_ai, iblk, fswint_ai(:,:,iblk), a2D) + + if (f_fswabs_ai(1:1)/= 'x') & + call accum_hist_field(n_fswabs_ai, iblk, fswabs(:,:,iblk)*workb(:,:), a2D) + + if (f_albsni (1:1) /= 'x') & + call accum_hist_field(n_albsni, iblk, & + (awtvdr*alvdr(:,:,iblk) & + + awtidr*alidr(:,:,iblk) & + + awtvdf*alvdf(:,:,iblk) & + + awtidf*alidf(:,:,iblk))*aice(:,:,iblk), a2D) +! awtvdr*alvdr(:,:,iblk) & +! + awtidr*alidr(:,:,iblk) & +! + awtvdf*alvdf(:,:,iblk) & +! + awtidf*alidf(:,:,iblk), a2D) + if (f_alvdr (1:1) /= 'x') & + call accum_hist_field(n_alvdr, iblk, alvdr(:,:,iblk), a2D) + if (f_alidr (1:1) /= 'x') & + call accum_hist_field(n_alidr, iblk, alidr(:,:,iblk), a2D) + if (f_alvdf (1:1) /= 'x') & + call accum_hist_field(n_alvdf, iblk, alvdf(:,:,iblk), a2D) + if (f_alidf (1:1) /= 'x') & + call accum_hist_field(n_alidf, iblk, alidf(:,:,iblk), a2D) + + if (f_albice (1:1) /= 'x') & + call accum_hist_field(n_albice, iblk, albice(:,:,iblk), a2D) + if (f_albsno (1:1) /= 'x') & + call accum_hist_field(n_albsno, iblk, albsno(:,:,iblk), a2D) + if (f_albpnd (1:1) /= 'x') & + call accum_hist_field(n_albpnd, iblk, albpnd(:,:,iblk), a2D) + if (f_coszen (1:1) /= 'x') & + call accum_hist_field(n_coszen, iblk, coszen(:,:,iblk), a2D) + + if (f_flat (1:1) /= 'x') & + call accum_hist_field(n_flat, iblk, flat(:,:,iblk), a2D) + if (f_flat_ai(1:1) /= 'x') & + call accum_hist_field(n_flat_ai,iblk, flat(:,:,iblk)*workb(:,:), a2D) + if (f_fsens (1:1) /= 'x') & + call accum_hist_field(n_fsens, iblk, fsens(:,:,iblk), a2D) + if (f_fsens_ai(1:1)/= 'x') & + call accum_hist_field(n_fsens_ai,iblk, fsens(:,:,iblk)*workb(:,:), a2D) + if (f_flwup (1:1) /= 'x') & + call accum_hist_field(n_flwup, iblk, flwout(:,:,iblk), a2D) + if (f_flwup_ai(1:1)/= 'x') & + call accum_hist_field(n_flwup_ai,iblk, flwout(:,:,iblk)*workb(:,:), a2D) + if (f_evap (1:1) /= 'x') & + call accum_hist_field(n_evap, iblk, evap(:,:,iblk), a2D) + if (f_evap_ai(1:1) /= 'x') & + call accum_hist_field(n_evap_ai,iblk, evap(:,:,iblk)*workb(:,:), a2D) + + if (f_Tair (1:1) /= 'x') & + call accum_hist_field(n_Tair, iblk, Tair(:,:,iblk), a2D) + if (f_Tref (1:1) /= 'x') & + call accum_hist_field(n_Tref, iblk, Tref(:,:,iblk), a2D) + if (f_Qref (1:1) /= 'x') & + call accum_hist_field(n_Qref, iblk, Qref(:,:,iblk), a2D) + if (f_congel (1:1) /= 'x') & + call accum_hist_field(n_congel, iblk, congel(:,:,iblk), a2D) + if (f_frazil (1:1) /= 'x') & + call accum_hist_field(n_frazil, iblk, frazil(:,:,iblk), a2D) + if (f_snoice (1:1) /= 'x') & + call accum_hist_field(n_snoice, iblk, snoice(:,:,iblk), a2D) + if (f_dsnow (1:1) /= 'x') & + call accum_hist_field(n_dsnow, iblk, dsnow(:,:,iblk), a2D) + if (f_meltt (1:1) /= 'x') & + call accum_hist_field(n_meltt, iblk, meltt(:,:,iblk), a2D) + if (f_melts (1:1) /= 'x') & + call accum_hist_field(n_melts, iblk, melts(:,:,iblk), a2D) + if (f_meltb (1:1) /= 'x') & + call accum_hist_field(n_meltb, iblk, meltb(:,:,iblk), a2D) + if (f_meltl (1:1) /= 'x') & + call accum_hist_field(n_meltl, iblk, meltl(:,:,iblk), a2D) + + if (f_fresh (1:1) /= 'x') & + call accum_hist_field(n_fresh, iblk, fresh(:,:,iblk), a2D) + if (f_fresh_ai(1:1)/= 'x') & + call accum_hist_field(n_fresh_ai,iblk, fresh_ai(:,:,iblk), a2D) + if (f_fsalt (1:1) /= 'x') & + call accum_hist_field(n_fsalt, iblk, fsalt(:,:,iblk), a2D) + if (f_fsalt_ai(1:1)/= 'x') & + call accum_hist_field(n_fsalt_ai,iblk, fsalt_ai(:,:,iblk), a2D) + + if (f_fhocn (1:1) /= 'x') & + call accum_hist_field(n_fhocn, iblk, fhocn(:,:,iblk), a2D) + if (f_fhocn_ai(1:1)/= 'x') & + call accum_hist_field(n_fhocn_ai,iblk, fhocn_ai(:,:,iblk), a2D) + if (f_fswthru(1:1) /= 'x') & + call accum_hist_field(n_fswthru, iblk, fswthru(:,:,iblk), a2D) + if (f_fswthru_ai(1:1)/= 'x') & + call accum_hist_field(n_fswthru_ai,iblk, fswthru_ai(:,:,iblk), a2D) + + if (f_strairx(1:1) /= 'x') & + call accum_hist_field(n_strairx, iblk, strairx(:,:,iblk), a2D) + if (f_strairy(1:1) /= 'x') & + call accum_hist_field(n_strairy, iblk, strairy(:,:,iblk), a2D) + if (f_strtltx(1:1) /= 'x') & + call accum_hist_field(n_strtltx, iblk, strtltx(:,:,iblk), a2D) + if (f_strtlty(1:1) /= 'x') & + call accum_hist_field(n_strtlty, iblk, strtlty(:,:,iblk), a2D) + if (f_strcorx(1:1) /= 'x') & + call accum_hist_field(n_strcorx, iblk, fm(:,:,iblk)*vvel(:,:,iblk), a2D) + if (f_strcory(1:1) /= 'x') & + call accum_hist_field(n_strcory, iblk,-fm(:,:,iblk)*uvel(:,:,iblk), a2D) + if (f_strocnx(1:1) /= 'x') & + call accum_hist_field(n_strocnx, iblk, strocnx(:,:,iblk), a2D) + if (f_strocny(1:1) /= 'x') & + call accum_hist_field(n_strocny, iblk, strocny(:,:,iblk), a2D) + if (f_strintx(1:1) /= 'x') & + call accum_hist_field(n_strintx, iblk, strintx(:,:,iblk), a2D) + if (f_strinty(1:1) /= 'x') & + call accum_hist_field(n_strinty, iblk, strinty(:,:,iblk), a2D) + if (f_strength(1:1)/= 'x') & + call accum_hist_field(n_strength,iblk, strength(:,:,iblk), a2D) + +! The following fields (divu, shear, sig1, and sig2) will be smeared +! if averaged over more than a few days. +! Snapshots may be more useful (see below). + +! if (f_divu (1:1) /= 'x') & +! call accum_hist_field(n_divu, iblk, divu(:,:,iblk), a2D) +! if (f_shear (1:1) /= 'x') & +! call accum_hist_field(n_shear, iblk, shear(:,:,iblk), a2D) +! if (f_sig1 (1:1) /= 'x') & +! call accum_hist_field(n_sig1, iblk, sig1(:,:,iblk), a2D) +! if (f_sig2 (1:1) /= 'x') & +! call accum_hist_field(n_sig2, iblk, sig2(:,:,iblk), a2D) +! if (f_trsig (1:1) /= 'x') & +! call accum_hist_field(n_trsig, iblk, trsig(:,:,iblk), a2D) + + if (f_dvidtt (1:1) /= 'x') & + call accum_hist_field(n_dvidtt, iblk, dvidtt(:,:,iblk), a2D) + if (f_dvsdtt (1:1) /= 'x') & + call accum_hist_field(n_dvsdtt, iblk, dvsdtt(:,:,iblk), a2D) + if (f_dvidtd (1:1) /= 'x') & + call accum_hist_field(n_dvidtd, iblk, dvidtd(:,:,iblk), a2D) + if (f_dvsdtd (1:1) /= 'x') & + call accum_hist_field(n_dvsdtd, iblk, dvsdtd(:,:,iblk), a2D) + if (f_daidtt (1:1) /= 'x') & + call accum_hist_field(n_daidtt, iblk, daidtt(:,:,iblk), a2D) + if (f_daidtd (1:1) /= 'x') & + call accum_hist_field(n_daidtd, iblk, daidtd(:,:,iblk), a2D) + if (f_dagedtt (1:1) /= 'x') & + call accum_hist_field(n_dagedtt, iblk, dagedtt(:,:,iblk), a2D) + if (f_dagedtd (1:1) /= 'x') & + call accum_hist_field(n_dagedtd, iblk, dagedtd(:,:,iblk), a2D) + + if (f_fsurf_ai(1:1)/= 'x') & + call accum_hist_field(n_fsurf_ai,iblk, fsurf(:,:,iblk)*workb(:,:), a2D) + if (f_fcondtop_ai(1:1)/= 'x') & + call accum_hist_field(n_fcondtop_ai, iblk, & + fcondtop(:,:,iblk)*workb(:,:), a2D) + + if (f_icepresent(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) worka(i,j) = c1 + enddo + enddo + call accum_hist_field(n_icepresent, iblk, worka(:,:), a2D) + endif + + ! 3D category fields + if (f_aicen (1:1) /= 'x') & +!BX: call accum_hist_field(n_aicen-n2D, iblk, ncat_hist, & + call accum_hist_field_3D3D(n_aicen-n2D, iblk, ncat_hist, & + aicen(:,:,1:ncat_hist,iblk), a3Dc) + if (f_vicen (1:1) /= 'x') & +!BX: call accum_hist_field(n_vicen-n2D, iblk, ncat_hist, & + call accum_hist_field_3D3D(n_vicen-n2D, iblk, ncat_hist, & + vicen(:,:,1:ncat_hist,iblk), a3Dc) + if (f_vsnon (1:1) /= 'x') & +!BX: call accum_hist_field(n_vsnon-n2D, iblk, ncat_hist, & + call accum_hist_field_3D3D(n_vsnon-n2D, iblk, ncat_hist, & + + vsnon(:,:,1:ncat_hist,iblk), a3Dc) + if (f_Tn_top (1:1) /= 'x') & + call accum_hist_field(n_Tn_top-n2D, iblk, ncat_hist, & + Tn_top(:,:,1:ncat_hist,iblk), a3Dc) + if (f_keffn_top (1:1) /= 'x') & + call accum_hist_field(n_keffn_top-n2D, iblk, ncat_hist, & + keffn_top(:,:,1:ncat_hist,iblk), a3Dc) + if (f_fsurfn_ai (1:1) /= 'x') & + call accum_hist_field(n_fsurfn_ai-n2D, iblk, ncat_hist, & + fsurfn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + if (f_fcondtopn_ai (1:1) /= 'x') & + call accum_hist_field(n_fcondtopn_ai-n2D, iblk, ncat_hist, & + fcondtopn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + if (f_flatn_ai (1:1) /= 'x') & + call accum_hist_field(n_flatn_ai-n2D, iblk, ncat_hist, & + flatn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + if (f_fsensn_ai (1:1) /= 'x') & + call accum_hist_field(n_fsensn_ai-n2D, iblk, ncat_hist, & + fsensn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + ! Calculate surface heat flux that causes melt (calculated by the + ! atmos in HadGEM3 so needed for checking purposes) + if (f_fmelttn_ai (1:1) /= 'x') & + call accum_hist_field(n_fmelttn_ai-n2D, iblk, ncat_hist, & + max(fsurfn(:,:,1:ncat_hist,iblk) - fcondtopn(:,:,1:ncat_hist,iblk),c0) & + *aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + +! example for 3D field (x,y,z) +! if (f_field3dz (1:1) /= 'x') & +! call accum_hist_field(n_field3dz-n3Dccum, iblk, nzilyr, & +! field3dz(:,:,1:nzilyr,iblk), a3Dz) + + ! 4D category fields + if (f_Tinz (1:1) /= 'x') then + Tinz4d(:,:,:,:) = c0 + if (ktherm == 2) then + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzilyr + Tinz4d(i,j,k,n) = temperature_mush( & + trcrn(i,j,nt_qice+k-1,n,iblk), trcrn(i,j,nt_sice+k-1,n,iblk)) + enddo + enddo + enddo + enddo + else + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzilyr + qn = trcrn(i,j,nt_qice+k-1,n,iblk) +! Tinz4d(i,j,k,n) = calculate_Tin_from_qin(qn,Tmlt(k)) + Tmlts = -trcrn(i,j,nt_sice+k-1,n,iblk)*depressT + Tinz4d(i,j,k,n) = calculate_Tin_from_qin(qn,Tmlts) + enddo + enddo + enddo + enddo + endif + call accum_hist_field(n_Tinz-n3Dbcum, iblk, nzilyr, ncat_hist, & + Tinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) + endif + if (f_Sinz (1:1) /= 'x') then + Sinz4d(:,:,:,:) = c0 + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (vicen(i,j,n,iblk) > puny) then + Sinz4d(i,j,1:nzilyr,n) = trcrn(i,j,nt_sice:nt_sice+nzilyr-1,n,iblk) + endif + enddo + enddo + enddo + call accum_hist_field(n_Sinz-n3Dbcum, iblk, nzilyr, ncat_hist, & + Sinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) + endif + + if (f_Tsnz (1:1) /= 'x') then + Tsnz4d(:,:,:,:) = c0 + if (ktherm == 2) then + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzslyr + qn = trcrn(i,j,nt_qsno+k-1,n,iblk) + Tsnz4d(i,j,k,n) = temperature_snow(trcrn(i,j,nt_qsno+k-1,n,iblk)) + enddo + enddo + enddo + enddo + else + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzslyr + qn = trcrn(i,j,nt_qsno+k-1,n,iblk) + Tsnz4d(i,j,k,n) = (Lfresh + qn/rhos)/cp_ice + enddo + enddo + enddo + enddo + endif + call accum_hist_field(n_Tsnz-n4Dicum, iblk, nzslyr, ncat_hist, & + Tsnz4d(:,:,1:nzslyr,1:ncat_hist), a4Ds) + endif + + ! Calculate aggregate surface melt flux by summing category values + if (f_fmeltt_ai(1:1) /= 'x') then + do ns = 1, nstreams + if (n_fmeltt_ai(ns) /= 0) then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + do n=1,ncat_hist + worka(i,j) = worka(i,j) + a3Dc(i,j,n,n_fmelttn_ai(ns)-n2D,iblk) + enddo ! n + endif ! tmask + enddo ! i + enddo ! j + a2D(:,:,n_fmeltt_ai(ns),iblk) = worka(:,:) + endif + enddo + endif + + !--------------------------------------------------------------- + ! accumulate other history output + !--------------------------------------------------------------- + + ! mechanical redistribution + call accum_hist_mechred (iblk) + + ! melt ponds + if (tr_pond) call accum_hist_pond (iblk) + + ! biogeochemistry + if (tr_aero .or. tr_brine .or. skl_bgc) call accum_hist_bgc (iblk) + + ! form drag + if (formdrag) call accum_hist_drag (iblk) + + enddo ! iblk + !$OMP END PARALLEL DO + + !--------------------------------------------------------------- + ! Write output files at prescribed intervals + !--------------------------------------------------------------- + + nstrm = nstreams + if (write_ic) nstrm = 1 + + do ns = 1, nstrm + if (write_history(ns) .or. write_ic) then + + !--------------------------------------------------------------- + ! Mask out land points and convert units + !--------------------------------------------------------------- + + ravgct = c1/avgct(ns) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & + !$OMP n,nn,ravgctz) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do n = 1, num_avail_hist_fields_2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) then + + do j = jlo, jhi + do i = ilo, ihi +#ifdef AusCOM + if (n_uocn(ns)==n.or.n_vocn(ns)==n) then + if (.not. umask(i,j,iblk)) then ! mask out land points + a2D(i,j,n,iblk) = spval + else ! convert units + a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & + * ravgct + avail_hist_fields(n)%conb + endif + else + if (.not. tmask(i,j,iblk)) then ! mask out land points + a2D(i,j,n,iblk) = spval + else ! convert units + a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & + * ravgct + avail_hist_fields(n)%conb + endif + endif +#else + if (.not. tmask(i,j,iblk)) then ! mask out land points + a2D(i,j,n,iblk) = spval + else ! convert units + a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & + * ravgct + avail_hist_fields(n)%conb + endif +#endif + enddo ! i + enddo ! j + + ! back out albedo/zenith angle dependence + if (avail_hist_fields(n)%vname(1:6) == 'albice') then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + ravgctz = c0 + if (albcnt(i,j,iblk,ns) > puny) & + ravgctz = c1/albcnt(i,j,iblk,ns) + if (f_albice (1:1) /= 'x' .and. n_albice(ns) /= 0) & + a2D(i,j,n_albice(ns),iblk) = & + a2D(i,j,n_albice(ns),iblk)*avgct(ns)*ravgctz + if (f_albsno (1:1) /= 'x' .and. n_albsno(ns) /= 0) & + a2D(i,j,n_albsno(ns),iblk) = & + a2D(i,j,n_albsno(ns),iblk)*avgct(ns)*ravgctz + if (f_albpnd (1:1) /= 'x' .and. n_albpnd(ns) /= 0) & + a2D(i,j,n_albpnd(ns),iblk) = & + a2D(i,j,n_albpnd(ns),iblk)*avgct(ns)*ravgctz + endif + enddo ! i + enddo ! j + endif + if (avail_hist_fields(n)%vname(1:6) == 'albsni') then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + ravgctz = c0 + if (albcnt(i,j,iblk,ns) > puny) & + ravgctz = c1/albcnt(i,j,iblk,ns) + if (f_albsni (1:1) /= 'x' .and. n_albsni(ns) /= 0) & + a2D(i,j,n_albsni(ns),iblk) = & + a2D(i,j,n_albsni(ns),iblk)*avgct(ns)*ravgctz + endif + enddo ! i + enddo ! j + endif + + endif + enddo ! n + + do n = 1, num_avail_hist_fields_3Dc + nn = n2D + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a3Dc(i,j,k,n,iblk) = spval + else ! convert units + a3Dc(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Dc(i,j,k,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! k + endif + enddo ! n + + do n = 1, num_avail_hist_fields_3Dz + nn = n3Dccum + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, nzlyr + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a3Dz(i,j,k,n,iblk) = spval + else ! convert units + a3Dz(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Dz(i,j,k,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! k + endif + enddo ! n + do n = 1, num_avail_hist_fields_3Db + nn = n3Dzcum + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, nzblyr + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a3Db(i,j,k,n,iblk) = spval + else ! convert units + a3Db(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Db(i,j,k,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! k + endif + enddo ! n + + do n = 1, num_avail_hist_fields_4Di + nn = n3Dbcum + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, nzilyr + do ic = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a4Di(i,j,k,ic,n,iblk) = spval + else ! convert units + a4Di(i,j,k,ic,n,iblk) = avail_hist_fields(nn)%cona*a4Di(i,j,k,ic,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! ic + enddo ! k + endif + enddo ! n + + do n = 1, num_avail_hist_fields_4Ds + nn = n4Dicum + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, nzslyr + do ic = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a4Ds(i,j,k,ic,n,iblk) = spval + else ! convert units + a4Ds(i,j,k,ic,n,iblk) = avail_hist_fields(nn)%cona*a4Ds(i,j,k,ic,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! ic + enddo ! k + endif + enddo ! n + do n = 1, num_avail_hist_fields_4Db + nn = n4Dscum + n + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + do k = 1, nzblyr + do ic = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a4Db(i,j,k,ic,n,iblk) = spval + else ! convert units + a4Db(i,j,k,ic,n,iblk) = avail_hist_fields(nn)%cona*a4Db(i,j,k,ic,n,iblk) & + * ravgct + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! ic + enddo ! k + endif + enddo ! n + + !--------------------------------------------------------------- + ! snapshots + !--------------------------------------------------------------- + + ! compute sig1 and sig2 + + call principal_stress (nx_block, ny_block, & + stressp_1 (:,:,iblk), & + stressm_1 (:,:,iblk), & + stress12_1(:,:,iblk), & + prs_sig (:,:,iblk), & + sig1 (:,:,iblk), & + sig2 (:,:,iblk)) + + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + if (n_divu (ns) /= 0) a2D(i,j,n_divu(ns), iblk) = spval + if (n_shear (ns) /= 0) a2D(i,j,n_shear(ns), iblk) = spval + if (n_sig1 (ns) /= 0) a2D(i,j,n_sig1(ns), iblk) = spval + if (n_sig2 (ns) /= 0) a2D(i,j,n_sig2(ns), iblk) = spval + if (n_mlt_onset(ns) /= 0) a2D(i,j,n_mlt_onset(ns),iblk) = spval + if (n_frz_onset(ns) /= 0) a2D(i,j,n_frz_onset(ns),iblk) = spval + if (n_hisnap (ns) /= 0) a2D(i,j,n_hisnap(ns), iblk) = spval + if (n_aisnap (ns) /= 0) a2D(i,j,n_aisnap(ns), iblk) = spval + if (n_trsig (ns) /= 0) a2D(i,j,n_trsig(ns), iblk) = spval + if (n_iage (ns) /= 0) a2D(i,j,n_iage(ns), iblk) = spval + if (n_FY (ns) /= 0) a2D(i,j,n_FY(ns), iblk) = spval + if (n_Tn_top (ns) /= 0) a3Dc(i,j,:,n_Tn_top(ns)-n2D,iblk) = spval + if (n_keffn_top (ns) /= 0) a3Dc(i,j,:,n_keffn_top(ns)-n2D,iblk) = spval + + if (n_a11 (ns) /= 0) a2D(i,j,n_a11(ns), iblk) = spval + if (n_a12 (ns) /= 0) a2D(i,j,n_a12(ns), iblk) = spval + if (n_e11 (ns) /= 0) a2D(i,j,n_e11(ns), iblk) = spval + if (n_e12 (ns) /= 0) a2D(i,j,n_e12(ns), iblk) = spval + if (n_e22 (ns) /= 0) a2D(i,j,n_e22(ns), iblk) = spval + if (n_s11 (ns) /= 0) a2D(i,j,n_s11(ns), iblk) = spval + if (n_s12 (ns) /= 0) a2D(i,j,n_s12(ns), iblk) = spval + if (n_s22 (ns) /= 0) a2D(i,j,n_s22(ns), iblk) = spval + if (n_yieldstress11 (ns) /= 0) a2D(i,j,n_yieldstress11(ns),iblk) = spval + if (n_yieldstress12 (ns) /= 0) a2D(i,j,n_yieldstress12(ns),iblk) = spval + if (n_yieldstress22 (ns) /= 0) a2D(i,j,n_yieldstress22(ns),iblk) = spval + else + if (n_divu (ns) /= 0) a2D(i,j,n_divu(ns),iblk) = & + divu (i,j,iblk)*avail_hist_fields(n_divu(ns))%cona + if (n_shear (ns) /= 0) a2D(i,j,n_shear(ns),iblk) = & + shear(i,j,iblk)*avail_hist_fields(n_shear(ns))%cona + if (n_sig1 (ns) /= 0) a2D(i,j,n_sig1(ns),iblk) = & + sig1 (i,j,iblk)*avail_hist_fields(n_sig1(ns))%cona + if (n_sig2 (ns) /= 0) a2D(i,j,n_sig2(ns),iblk) = & + sig2 (i,j,iblk)*avail_hist_fields(n_sig2(ns))%cona + if (n_mlt_onset(ns) /= 0) a2D(i,j,n_mlt_onset(ns),iblk) = & + mlt_onset(i,j,iblk) + if (n_frz_onset(ns) /= 0) a2D(i,j,n_frz_onset(ns),iblk) = & + frz_onset(i,j,iblk) + if (n_hisnap (ns) /= 0) a2D(i,j,n_hisnap(ns),iblk) = & + vice(i,j,iblk) + if (n_aisnap (ns) /= 0) a2D(i,j,n_aisnap(ns),iblk) = & + aice(i,j,iblk) + + if (kdyn == 2) then ! for EAP dynamics different time of output + if (n_trsig (ns) /= 0) a2D(i,j,n_trsig(ns),iblk ) = & + prs_sig(i,j,iblk) + else + if (n_trsig (ns) /= 0) a2D(i,j,n_trsig(ns),iblk ) = & + p25*(stressp_1(i,j,iblk) & + + stressp_2(i,j,iblk) & + + stressp_3(i,j,iblk) & + + stressp_4(i,j,iblk)) + endif + + if (n_iage (ns) /= 0) a2D(i,j,n_iage(ns),iblk) = & + trcr(i,j,nt_iage,iblk)*avail_hist_fields(n_iage(ns))%cona + if (n_FY (ns) /= 0) a2D(i,j,n_FY(ns),iblk) = & + trcr(i,j,nt_FY,iblk)*avail_hist_fields(n_FY(ns))%cona + + if (n_Tn_top (ns) /= 0) a3Dc(i,j,:,n_Tn_top(ns)-n2D,iblk) = & + Tn_top(i,j,:,iblk)*avail_hist_fields(n_Tn_top(ns))%cona + if (n_keffn_top(ns) /= 0) a3Dc(i,j,:,n_keffn_top(ns)-n2D,iblk)= & + keffn_top(i,j,:,iblk)*avail_hist_fields(n_keffn_top(ns))%cona + + if (n_a11 (ns) /= 0) a2D(i,j,n_a11(ns),iblk) = & + a11 (i,j,iblk)*avail_hist_fields(n_a11(ns))%cona + if (n_a12 (ns) /= 0) a2D(i,j,n_a12(ns),iblk) = & + a12 (i,j,iblk)*avail_hist_fields(n_a12(ns))%cona + if (n_e11 (ns) /= 0) a2D(i,j,n_e11(ns),iblk) = & + e11 (i,j,iblk)*avail_hist_fields(n_e11(ns))%cona + if (n_e12 (ns) /= 0) a2D(i,j,n_e12(ns),iblk) = & + e12 (i,j,iblk)*avail_hist_fields(n_e12(ns))%cona + if (n_e22 (ns) /= 0) a2D(i,j,n_e22(ns),iblk) = & + e22 (i,j,iblk)*avail_hist_fields(n_e22(ns))%cona + if (n_s11 (ns) /= 0) a2D(i,j,n_s11(ns),iblk) = & + s11 (i,j,iblk)*avail_hist_fields(n_s11(ns))%cona + if (n_s12 (ns) /= 0) a2D(i,j,n_s12(ns),iblk) = & + s12 (i,j,iblk)*avail_hist_fields(n_s12(ns))%cona + if (n_s22 (ns) /= 0) a2D(i,j,n_s22(ns),iblk) = & + s22 (i,j,iblk)*avail_hist_fields(n_s22(ns))%cona + if (n_yieldstress11 (ns) /= 0) a2D(i,j,n_yieldstress11(ns),iblk) = & + yieldstress11 (i,j,iblk)*avail_hist_fields(n_yieldstress11(ns))%cona + if (n_yieldstress12 (ns) /= 0) a2D(i,j,n_yieldstress12(ns),iblk) = & + yieldstress12 (i,j,iblk)*avail_hist_fields(n_yieldstress12(ns))%cona + if (n_yieldstress22 (ns) /= 0) a2D(i,j,n_yieldstress22(ns),iblk) = & + yieldstress22 (i,j,iblk)*avail_hist_fields(n_yieldstress22(ns))%cona + endif + enddo ! i + enddo ! j + + enddo ! iblk + !$OMP END PARALLEL DO + + time_end(ns) = time/int(secday) + time_end(ns) = real(time_end(ns),kind=real_kind) + + !--------------------------------------------------------------- + ! write file + !--------------------------------------------------------------- + + call ice_timer_start(timer_readwrite) ! reading/writing + call ice_write_hist (ns) + call ice_timer_stop(timer_readwrite) ! reading/writing + + !--------------------------------------------------------------- + ! reset to zero + !------------------------------------------------------------ + if (write_ic) then + if (allocated(a2D)) a2D (:,:,:,:) = c0 + if (allocated(a3Dc)) a3Dc(:,:,:,:,:) = c0 + if (allocated(a3Dz)) a3Dz(:,:,:,:,:) = c0 + if (allocated(a3Db)) a3Db(:,:,:,:,:) = c0 + if (allocated(a4Di)) a4Di(:,:,:,:,:,:) = c0 + if (allocated(a4Ds)) a4Ds(:,:,:,:,:,:) = c0 + if (allocated(a4Db)) a4Db(:,:,:,:,:,:) = c0 + avgct(:) = c0 + albcnt(:,:,:,:) = c0 + write_ic = .false. ! write initial condition once at most + else + avgct(ns) = c0 + albcnt(:,:,:,ns) = c0 + endif +! if (write_history(ns)) albcnt(:,:,:,ns) = c0 + + do n = 1,n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a2D(:,:,n,:) = c0 + enddo + do n = n2D + 1, n3Dccum + nn = n - n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a3Dc(:,:,:,nn,:) = c0 + enddo + do n = n3Dccum + 1, n3Dzcum + nn = n - n3Dccum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a3Dz(:,:,:,nn,:) = c0 + enddo + do n = n3Dzcum + 1, n3Dbcum + nn = n - n3Dzcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a3Db(:,:,:,nn,:) = c0 + enddo + do n = n3Dbcum + 1, n4Dicum + nn = n - n3Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a4Di(:,:,:,:,nn,:) = c0 + enddo + do n = n4Dicum + 1, n4Dscum + nn = n - n4Dicum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a4Ds(:,:,:,:,nn,:) = c0 + enddo + do n = n4Dscum + 1, n4Dbcum + nn = n - n4Dscum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a4Db(:,:,:,:,nn,:) = c0 + enddo + + endif ! write_history or write_ic + enddo ! nstreams + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + if (new_year) then + do j=jlo,jhi + do i=ilo,ihi + ! reset NH Jan 1 + if (lmask_n(i,j,iblk)) mlt_onset(i,j,iblk) = c0 + ! reset SH Jan 1 + if (lmask_s(i,j,iblk)) frz_onset(i,j,iblk) = c0 + enddo + enddo + endif ! new_year + + if ( (month .eq. 7) .and. new_month ) then + do j=jlo,jhi + do i=ilo,ihi + ! reset SH Jul 1 + if (lmask_s(i,j,iblk)) mlt_onset(i,j,iblk) = c0 + ! reset NH Jul 1 + if (lmask_n(i,j,iblk)) frz_onset(i,j,iblk) = c0 + enddo + enddo + endif ! 1st of July + enddo ! iblk + !$OMP END PARALLEL DO + + end subroutine accum_hist + +!======================================================================= + + end module ice_history + +!======================================================================= diff --git a/source/ice_history_bgc.F90 b/source/ice_history_bgc.F90 new file mode 100755 index 00000000..4b52f727 --- /dev/null +++ b/source/ice_history_bgc.F90 @@ -0,0 +1,682 @@ +! SVN:$Id: ice_history_bgc.F90 745 2013-09-28 18:22:36Z eclare $ +!======================================================================= +! Biogeochemistry history output +! +! authors Elizabeth C. Hunke and Nicole Jeffery, LANL +! +! 2012 Elizabeth Hunke split code from ice_history.F90 + + module ice_history_bgc + + use ice_kinds_mod + use ice_constants + use ice_domain_size, only: max_nstrm, max_aero, n_aero, nblyr + use ice_zbgc_shared + + implicit none + private + public :: init_hist_bgc_2D, init_hist_bgc_3Dc, & + init_hist_bgc_4Db, accum_hist_bgc + save + + !--------------------------------------------------------------- + ! flags: write to output file if true or histfreq value + !--------------------------------------------------------------- + + character (len=max_nstrm), public :: & + f_faero_atm = 'x', f_faero_ocn = 'x', & + f_aero = 'x', f_aeron = 'x', & + f_fNO = 'x', f_fNO_ai = 'x', & + f_fNH = 'x', f_fNH_ai = 'x', & + f_fN = 'x', f_fN_ai = 'x', & + f_fSil = 'x', f_fSil_ai = 'x', & + f_bgc_N_sk = 'x', f_bgc_C_sk = 'x', & + f_bgc_chl_sk = 'x', f_bgc_Nit_sk = 'x', & + f_bgc_Am_sk = 'x', f_bgc_Sil_sk = 'x', & + f_bgc_DMSPp_sk = 'x', f_bgc_DMSPd_sk = 'x', & + f_bgc_DMS_sk = 'x', f_bgc_Sil_ml = 'x', & + f_bgc_Nit_ml = 'x', f_bgc_Am_ml = 'x', & + f_bgc_DMSP_ml = 'x', f_bgc_DMS_ml = 'x', & + f_bTin = 'x', f_bphi = 'x', & + f_bgc_NO = 'x', & + f_bgc_N = 'x', f_bgc_NH = 'x', & + f_bgc_C = 'x', f_bgc_chl = 'x', & + f_bgc_DMSPp = 'x', f_bgc_DMSPd = 'x', & + f_bgc_DMS = 'x', f_bgc_Sil = 'x', & + f_bgc_S = 'x', & + f_fbri = 'x', f_hbri = 'x', & + f_grownet = 'x', f_PPnet = 'x' + + !--------------------------------------------------------------- + ! namelist variables + !--------------------------------------------------------------- + + namelist / icefields_bgc_nml / & + f_faero_atm , f_faero_ocn , & + f_aero , f_aeron , & + f_fNO , f_fNO_ai , & + f_fNH , f_fNH_ai , & + f_fN , f_fN_ai , & + f_fSil , f_fSil_ai , & + f_bgc_N_sk , f_bgc_C_sk , & + f_bgc_chl_sk , f_bgc_Nit_sk , & + f_bgc_Am_sk , f_bgc_Sil_sk , & + f_bgc_DMSPp_sk, f_bgc_DMSPd_sk, & + f_bgc_DMS_sk , f_bgc_Sil_ml , & + f_bgc_Nit_ml , f_bgc_Am_ml , & + f_bgc_DMSP_ml , f_bgc_DMS_ml , & + f_bTin , f_bphi , & + f_bgc_NO , & + f_bgc_N , f_bgc_NH , & + f_bgc_C , f_bgc_chl , & + f_bgc_DMSPp , f_bgc_DMSPd , & + f_bgc_DMS , f_bgc_Sil , & + f_bgc_S , & + f_fbri , f_hbri , & + f_grownet , f_PPnet + + !--------------------------------------------------------------- + ! field indices + !--------------------------------------------------------------- + + integer(kind=int_kind), dimension(max_aero,max_nstrm) :: & + n_faero_atm , & + n_faero_ocn , & + n_aerosn1 , & + n_aerosn2 , & + n_aeroic1 , & + n_aeroic2 + + integer(kind=int_kind), dimension(max_nstrm) :: & + n_fNO , n_fNO_ai , & + n_fNH , n_fNH_ai , & + n_fN , n_fN_ai , & + n_fSil , n_fSil_ai , & + n_bgc_N_sk , n_bgc_C_sk , & + n_bgc_chl_sk , n_bgc_Nit_sk , & + n_bgc_Am_sk , n_bgc_Sil_sk , & + n_bgc_DMSPp_sk, n_bgc_DMSPd_sk, & + n_bgc_DMS_sk , n_bgc_Sil_ml , & + n_bgc_Nit_ml , n_bgc_Am_ml , & + n_bgc_DMSP_ml , n_bgc_DMS_ml , & + n_bTin , n_bphi , & + n_bgc_NO , & + n_bgc_N , n_bgc_NH , & + n_bgc_C , n_bgc_chl , & + n_bgc_DMSPp , n_bgc_DMSPd , & + n_bgc_DMS , n_bgc_Sil , & + n_bgc_S , & + n_fbri , n_hbri , & + n_grownet , n_PPnet + +!======================================================================= + + contains + +!======================================================================= + + subroutine init_hist_bgc_2D + + use ice_broadcast, only: broadcast_scalar + use ice_calendar, only: nstreams + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, c1 + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_nml, nml_filename, & + get_fileunit, release_fileunit + use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_state, only: tr_aero, tr_brine + + integer (kind=int_kind) :: n, ns + integer (kind=int_kind) :: nml_error ! namelist i/o error flag + character (len=3) :: nchar + character (len=16) :: vname_in ! variable name + + !----------------------------------------------------------------- + ! read namelist + !----------------------------------------------------------------- + + call get_fileunit(nu_nml) + if (my_task == master_task) then + open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + if (nml_error /= 0) then + nml_error = -1 + else + nml_error = 1 + endif + do while (nml_error > 0) + read(nu_nml, nml=icefields_bgc_nml,iostat=nml_error) + end do + if (nml_error == 0) close(nu_nml) + endif + call release_fileunit(nu_nml) + + call broadcast_scalar(nml_error, master_task) + if (nml_error /= 0) then + close (nu_nml) + call abort_ice('ice: error reading icefields_bgc_nml') + endif + + if (.not. tr_aero) then + f_faero_atm = 'x' + f_faero_ocn = 'x' + f_aero = 'x' + f_aeron = 'x' ! NOTE not implemented + endif + + if (.not. tr_brine) then + f_fbri = 'x' + f_hbri = 'x' + endif + if (.not. skl_bgc) then + f_bgc_N_sk = 'x' + f_bgc_C_sk = 'x' + f_bgc_chl_sk = 'x' + f_bgc_Nit_sk = 'x' + f_bgc_Am_sk = 'x' + f_bgc_Sil_sk = 'x' + f_bgc_DMSPp_sk = 'x' + f_bgc_DMSPd_sk = 'x' + f_bgc_DMS_sk = 'x' + f_bgc_Nit_ml = 'x' + f_bgc_Am_ml = 'x' + f_bgc_Sil_ml = 'x' + f_bgc_DMSP_ml = 'x' + f_bgc_DMS_ml = 'x' + + f_fNO = 'x' + f_fNO_ai = 'x' + f_fNH = 'x' + f_fNH_ai = 'x' + f_fN = 'x' + f_fN_ai = 'x' + f_fSil = 'x' + f_fSil_ai = 'x' + f_PPnet = 'x' + endif + if (.not. tr_bgc_C_sk) f_bgc_C_sk = 'x' + if (.not. tr_bgc_chl_sk) f_bgc_chl_sk = 'x' + if (.not. tr_bgc_Nit_sk) then + f_bgc_Nit_sk = 'x' + f_bgc_Nit_ml = 'x' + endif + if (.not. tr_bgc_Am_sk) then + f_bgc_Am_sk = 'x' + f_bgc_Am_ml = 'x' + endif + if (.not. tr_bgc_Sil_sk) then + f_bgc_Sil_sk = 'x' + f_bgc_Sil_ml = 'x' + endif + if (.not. tr_bgc_DMS_sk) then + f_bgc_DMS_sk = 'x' + f_bgc_DMSPp_sk = 'x' + f_bgc_DMSPd_sk = 'x' + f_bgc_DMS_ml = 'x' + f_bgc_DMS_ml = 'x' + endif + + call broadcast_scalar (f_faero_atm, master_task) + call broadcast_scalar (f_faero_ocn, master_task) + call broadcast_scalar (f_aero, master_task) + call broadcast_scalar (f_aeron, master_task) + + call broadcast_scalar (f_fbri, master_task) + call broadcast_scalar (f_hbri, master_task) + + call broadcast_scalar (f_fNO, master_task) + call broadcast_scalar (f_fNO_ai, master_task) + call broadcast_scalar (f_fNH, master_task) + call broadcast_scalar (f_fNH_ai, master_task) + call broadcast_scalar (f_fN, master_task) + call broadcast_scalar (f_fN_ai, master_task) + call broadcast_scalar (f_fSil, master_task) + call broadcast_scalar (f_fSil_ai, master_task) + call broadcast_scalar (f_bgc_N_sk, master_task) + call broadcast_scalar (f_bgc_C_sk, master_task) + call broadcast_scalar (f_bgc_chl_sk, master_task) + call broadcast_scalar (f_bgc_Nit_sk, master_task) + call broadcast_scalar (f_bgc_Am_sk, master_task) + call broadcast_scalar (f_bgc_Sil_sk, master_task) + call broadcast_scalar (f_bgc_DMSPp_sk, master_task) + call broadcast_scalar (f_bgc_DMSPd_sk, master_task) + call broadcast_scalar (f_bgc_DMS_sk, master_task) + call broadcast_scalar (f_bgc_Nit_ml, master_task) + call broadcast_scalar (f_bgc_Am_ml, master_task) + call broadcast_scalar (f_bgc_Sil_ml, master_task) + call broadcast_scalar (f_bgc_DMSP_ml, master_task) + call broadcast_scalar (f_bgc_DMS_ml, master_task) + call broadcast_scalar (f_bTin, master_task) + call broadcast_scalar (f_bphi, master_task) + call broadcast_scalar (f_PPnet, master_task) + call broadcast_scalar (f_grownet, master_task) + + ! 2D variables + do ns = 1, nstreams + + ! Aerosols + if (f_aero(1:1) /= 'x') then + do n=1,n_aero + write(nchar,'(i3.3)') n + write(vname_in,'(a,a)') 'aerosnossl', trim(nchar) + call define_hist_field(n_aerosn1(n,:),vname_in,"kg/kg", & + tstr2D, tcstr,"snow ssl aerosol mass","none", c1, c0, & + ns, f_aero) + write(vname_in,'(a,a)') 'aerosnoint', trim(nchar) + call define_hist_field(n_aerosn2(n,:),vname_in,"kg/kg", & + tstr2D, tcstr,"snow int aerosol mass","none", c1, c0, & + ns, f_aero) + write(vname_in,'(a,a)') 'aeroicessl', trim(nchar) + call define_hist_field(n_aeroic1(n,:),vname_in,"kg/kg", & + tstr2D, tcstr,"ice ssl aerosol mass","none", c1, c0, & + ns, f_aero) + write(vname_in,'(a,a)') 'aeroiceint', trim(nchar) + call define_hist_field(n_aeroic2(n,:),vname_in,"kg/kg", & + tstr2D, tcstr,"ice int aerosol mass","none", c1, c0, & + ns, f_aero) + enddo + endif + + if (f_faero_atm(1:1) /= 'x') then + do n=1,n_aero + write(nchar,'(i3.3)') n + write(vname_in,'(a,a)') 'faero_atm', trim(nchar) + call define_hist_field(n_faero_atm(n,:),vname_in,"kg/m^2 s", & + tstr2D, tcstr,"aerosol deposition rate","none", c1, c0, & + ns, f_faero_atm) + enddo + endif + + if (f_faero_ocn(1:1) /= 'x') then + do n=1,n_aero + write(nchar,'(i3.3)') n + write(vname_in,'(a,a)') 'faero_ocn', trim(nchar) + call define_hist_field(n_faero_ocn(n,:),vname_in,"kg/m^2 s", & + tstr2D, tcstr,"aerosol flux to ocean","none", c1, c0, & + ns, f_faero_ocn) + enddo + endif + + ! skeletal layer tracers + if (f_bgc_N_sk(1:1) /= 'x') & + call define_hist_field(n_bgc_N_sk,"algal_N","mmol/m^2",tstr2D, tcstr, & + "ice bottom algae (nitrogen)", & + "skeletal layer: bottom 2-3 cm", c1, c0, & + ns, f_bgc_N_sk) + if (f_bgc_C_sk(1:1) /= 'x') & + call define_hist_field(n_bgc_C_sk,"algal_C","mmol/m^2",tstr2D, tcstr, & + "ice bottom algae (carbon)", & + "skeletal layer: bottom 2-3 cm", c1, c0, & + ns, f_bgc_C_sk) + if (f_bgc_chl_sk(1:1) /= 'x') & + call define_hist_field(n_bgc_chl_sk,"algal_chl","mmol/m^2?",tstr2D, tcstr, & + "ice bottom algae (chlorophyll)", & + "skeletal layer: bottom 2-3 cm", c1, c0, & + ns, f_bgc_chl_sk) + if (f_bgc_Nit_sk(1:1) /= 'x') & + call define_hist_field(n_bgc_Nit_sk,"skl_Nit","mmol/m^2",tstr2D, tcstr, & + "skeletal nutrient (nitrate)", & + "skeletal layer: bottom 2-3 cm", c1, c0, & + ns, f_bgc_Nit_sk) + if (f_bgc_Am_sk(1:1) /= 'x') & + call define_hist_field(n_bgc_Am_sk,"skl_Am","mmol/m^2",tstr2D, tcstr, & + "skeletal nutrient (ammonia/um)", & + "skeletal layer: bottom 2-3 cm", c1, c0, & + ns, f_bgc_Am_sk) + if (f_bgc_Sil_sk(1:1) /= 'x') & + call define_hist_field(n_bgc_Sil_sk,"skl_Sil","mmol/m^2",tstr2D, tcstr, & + "skeletal nutrient (silicate)", & + "skelelal layer: bottom 2-3 cm", c1, c0, & + ns, f_bgc_Sil_sk) + if (f_bgc_Nit_ml(1:1) /= 'x') & + call define_hist_field(n_bgc_Nit_ml,"ml_Nit","mmol/m^3",tstr2D, tcstr, & + "mixed layer nutrient (nitrate)", & + "upper ocean", c1, c0, & + ns, f_bgc_Nit_ml) + if (f_bgc_Am_ml(1:1) /= 'x') & + call define_hist_field(n_bgc_Am_ml,"ml_Am","mmol/m^3",tstr2D, tcstr, & + "mixed layer nutrient (ammonia/um)", & + "upper ocean", c1, c0, & + ns, f_bgc_Am_ml) + if (f_bgc_Sil_ml(1:1) /= 'x') & + call define_hist_field(n_bgc_Sil_ml,"ml_Sil","mmol/m^3",tstr2D, tcstr, & + "mixed layer nutrient (silicate)", & + "upper ocean", c1, c0, & + ns, f_bgc_Sil_ml) + if (f_bgc_DMSPp_sk(1:1) /= 'x') & + call define_hist_field(n_bgc_DMSPp_sk,"skl_DMSPp","mmol/m^2",tstr2D, tcstr, & + "particulate S in algae (DMSPp)", & + "skeletal layer: bottom 2-3 cm", c1, c0, & + ns, f_bgc_DMSPp_sk) + if (f_bgc_DMSPd_sk(1:1) /= 'x') & + call define_hist_field(n_bgc_DMSPd_sk,"skl_DMSPd","mmol/m^2",tstr2D, tcstr, & + "dissolved skl precursor (DSMPd)", & + "skeletal layer: bottom 2-3 cm", c1, c0, & + ns, f_bgc_DMSPd_sk) + if (f_bgc_DMS_sk(1:1) /= 'x') & + call define_hist_field(n_bgc_DMS_sk,"skl_DMS","mmol/m^2",tstr2D, tcstr, & + "dissolved skl trace gas (DMS)", & + "skeletal layer: bottom 2-3 cm", c1, c0, & + ns, f_bgc_DMS_sk) + if (f_bgc_DMSP_ml(1:1) /= 'x') & + call define_hist_field(n_bgc_DMSP_ml,"ml_DMSP","mmol/m^3",tstr2D, tcstr, & + "mixed layer precursor (DMSP)", & + "upper ocean", c1, c0, & + ns, f_bgc_DMSP_ml) + if (f_bgc_DMS_ml(1:1) /= 'x') & + call define_hist_field(n_bgc_DMS_ml,"ml_DMS","mmol/m^3",tstr2D, tcstr, & + "mixed layer trace gas (DMS)", & + "upper ocean", c1, c0, & + ns, f_bgc_DMS_ml) + + ! zbgc + if (f_fNO(1:1) /= 'x') & + call define_hist_field(n_fNO,"fNO","mmol/m^2/s",tstr2D, tcstr, & + "nitrate flux ice to ocn (cpl)", & + "if positive, ocean gains nitrate", c1, c0, & + ns, f_fNO) + + if (f_fNO_ai(1:1) /= 'x') & + call define_hist_field(n_fNO_ai,"fNO_ai","mmol/m^2/s",tstr2D, tcstr, & + "nitrate flux ice to ocean", & + "weighted by ice area", c1, c0, & + ns, f_fNO_ai) + + if (f_fNH(1:1) /= 'x') & + call define_hist_field(n_fNH,"fNH","mmol/m^2/s",tstr2D, tcstr, & + "ammonium flux ice to ocn (cpl)", & + "if positive, ocean gains ammonium", c1, c0, & + ns, f_fNH) + + if (f_fNH_ai(1:1) /= 'x') & + call define_hist_field(n_fNH_ai,"fNH_ai","mmol/m^2/s",tstr2D, tcstr, & + "ammonium flux ice to ocean", & + "weighted by ice area", c1, c0, & + ns, f_fNH_ai) + + if (f_fN(1:1) /= 'x') & + call define_hist_field(n_fN,"fN","mmol/m^2/s",tstr2D, tcstr, & + "algal N flux ice to ocn (cpl)", & + "if positive, ocean gains algal N", c1, c0, & + ns, f_fN) + + if (f_fN_ai(1:1) /= 'x') & + call define_hist_field(n_fN_ai,"fN_ai","mmol/m^2/s",tstr2D, tcstr, & + "algal N flux ice to ocean", & + "weighted by ice area", c1, c0, & + ns, f_fN_ai) + + if (f_fSil(1:1) /= 'x') & + call define_hist_field(n_fSil,"fSil","mmol/m^2/s",tstr2D, tcstr, & + "silicate flux ice to ocn (cpl)", & + "if positive, ocean gains silicate", c1, c0, & + ns, f_fSil) + + if (f_fSil_ai(1:1) /= 'x') & + call define_hist_field(n_fSil_ai,"fSil_ai","mmol/m^2/s",tstr2D, tcstr, & + "silicate flux ice to ocean", & + "weighted by ice area", c1, c0, & + ns, f_fSil_ai) + + ! both skl and zbgc + + if (f_PPnet(1:1) /= 'x') & + call define_hist_field(n_PPnet,"PP_net","mg C/d/m^2",tstr2D, tcstr, & + "Net Primary Production", & + "weighted by ice area", secday, c0, & + ns, f_PPnet) + if (f_grownet(1:1) /= 'x') & + call define_hist_field(n_grownet,"grow_net","/d",tstr2D, tcstr, & + "Net specific growth", & + "weighted by ice area", secday, c0, & + ns, f_grownet) + + ! brine + if (f_hbri(1:1) /= 'x') & + call define_hist_field(n_hbri,"hbrine","m",tstr2D, tcstr, & + "Area-averaged brine height", & + "distance from ice bottom to brine surface", c1, c0, & + ns, f_hbri) + + enddo ! nstreams + + end subroutine init_hist_bgc_2D + +!======================================================================= + + subroutine init_hist_bgc_3Dc + + use ice_broadcast, only: broadcast_scalar + use ice_calendar, only: nstreams + use ice_constants, only: c0, c1 + use ice_history_shared, only: tstr3Dc, tcstr, define_hist_field + + integer (kind=int_kind) :: ns + + ! 3D (category) variables must be looped separately + do ns = 1, nstreams + if (f_fbri(1:1) /= 'x') & + call define_hist_field(n_fbri,"fbrine","1",tstr3Dc, tcstr, & + "brine tracer fraction of ice volume, cat", & + "none", c1, c0, & + ns, f_fbri) + enddo ! ns + + end subroutine init_hist_bgc_3Dc + +!======================================================================= + + subroutine init_hist_bgc_4Db + + use ice_calendar, only: nstreams + use ice_constants, only: c0, c1, c100, secday + use ice_history_shared, only: tstr4Db, tcstr, define_hist_field + + integer (kind=int_kind) :: ns + + ! biology vertical grid + + do ns = 1, nstreams + + if (f_bTin(1:1) /= 'x') & + call define_hist_field(n_bTin,"bTizn","C",tstr4Db, tcstr, & + "ice internal temperatures on bio grid", & + "interpolated to bio grid", c1, c0, & + ns, f_bTin) + + if (f_bphi(1:1) /= 'x') & + call define_hist_field(n_bphi,"bphizn","%",tstr4Db, tcstr, & + "porosity", "brine volume fraction", c100, c0, & + ns, f_bphi) + + enddo !ns + + end subroutine init_hist_bgc_4Db + +!======================================================================= + +! write average ice quantities or snapshots + + subroutine accum_hist_bgc (iblk) + + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_constants, only: c0, puny + use ice_domain, only: blocks_ice + use ice_domain_size, only: ncat, nblyr + use ice_flux, only: faero_atm, faero_ocn, sss + use ice_history_shared, only: n2D, a2D, a3Dc, n3Dccum, a3Db, & + n4Dscum, a4Db, & + ncat_hist, accum_hist_field, nzblyr + use ice_state, only: trcrn, trcr, aicen, vice, vicen, nt_aero, nt_fbri, & + nt_bgc_N_sk, nt_bgc_C_sk, nt_bgc_chl_sk, nt_bgc_Nit_sk, & + nt_bgc_Am_sk, nt_bgc_Sil_sk, nt_bgc_DMSPp_sk, nt_bgc_DMSPd_sk, & + nt_bgc_DMS_sk, nt_bgc_Nit_ml, nt_bgc_Am_ml, nt_bgc_Sil_ml, & + nt_bgc_DMSP_ml, nt_bgc_DMS_ml + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + integer (kind=int_kind) :: & + i, j, n, & ! loop indices + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), dimension (nx_block,ny_block,nblyr+2,ncat) :: & + workzn + + type (block) :: & + this_block ! block information for current block + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + ! Aerosols + if (f_faero_atm(1:1) /= 'x') then + do n=1,n_aero + call accum_hist_field(n_faero_atm(n,:),iblk, & + faero_atm(:,:,n,iblk), a2D) + enddo + endif + if (f_faero_ocn(1:1) /= 'x') then + do n=1,n_aero + call accum_hist_field(n_faero_ocn(n,:),iblk, & + faero_ocn(:,:,n,iblk), a2D) + enddo + endif + if (f_aero(1:1) /= 'x') then + do n=1,n_aero + call accum_hist_field(n_aerosn1(n,:), iblk, & + trcr(:,:,nt_aero +4*(n-1),iblk)/rhos, a2D) + call accum_hist_field(n_aerosn2(n,:), iblk, & + trcr(:,:,nt_aero+1+4*(n-1),iblk)/rhos, a2D) + call accum_hist_field(n_aeroic1(n,:), iblk, & + trcr(:,:,nt_aero+2+4*(n-1),iblk)/rhoi, a2D) + call accum_hist_field(n_aeroic2(n,:), iblk, & + trcr(:,:,nt_aero+3+4*(n-1),iblk)/rhoi, a2D) + enddo + endif + + ! skeletal layer bgc + if (f_bgc_N_sk(1:1)/= 'x') & + call accum_hist_field(n_bgc_N_sk, iblk, & + trcr(:,:,nt_bgc_N_sk, iblk), a2D) + + if (f_bgc_C_sk(1:1)/= 'x') & + call accum_hist_field(n_bgc_C_sk, iblk, & + trcr(:,:,nt_bgc_C_sk, iblk), a2D) + if (f_bgc_chl_sk(1:1)/= 'x') & + call accum_hist_field(n_bgc_chl_sk, iblk, & + trcr(:,:,nt_bgc_chl_sk, iblk), a2D) + if (f_bgc_Nit_sk(1:1)/= 'x') & + call accum_hist_field(n_bgc_Nit_sk, iblk, & + trcr(:,:,nt_bgc_Nit_sk, iblk), a2D) + if (f_bgc_Am_sk(1:1)/= 'x') & + call accum_hist_field(n_bgc_Am_sk, iblk, & + trcr(:,:,nt_bgc_Am_sk, iblk), a2D) + if (f_bgc_Sil_sk(1:1)/= 'x') & + call accum_hist_field(n_bgc_Sil_sk, iblk, & + trcr(:,:,nt_bgc_Sil_sk, iblk), a2D) + if (f_bgc_DMSPp_sk(1:1)/= 'x') & + call accum_hist_field(n_bgc_DMSPp_sk,iblk, & + trcr(:,:,nt_bgc_DMSPp_sk,iblk), a2D) + + if (f_bgc_DMSPd_sk(1:1)/= 'x') & + call accum_hist_field(n_bgc_DMSPd_sk,iblk, & + trcr(:,:,nt_bgc_DMSPd_sk,iblk), a2D) + if (f_bgc_DMS_sk(1:1)/= 'x') & + call accum_hist_field(n_bgc_DMS_sk, iblk, & + trcr(:,:,nt_bgc_DMS_sk, iblk), a2D) + if (f_bgc_Nit_ml(1:1)/= 'x') & + call accum_hist_field(n_bgc_Nit_ml, iblk, & + ocean_bio(:,:,nlt_bgc_NO, iblk), a2D) + if (f_bgc_Am_ml(1:1)/= 'x') & + call accum_hist_field(n_bgc_Am_ml, iblk, & + ocean_bio(:,:,nlt_bgc_NH, iblk), a2D) + if (f_bgc_Sil_ml(1:1)/= 'x') & + call accum_hist_field(n_bgc_Sil_ml, iblk, & + ocean_bio(:,:,nlt_bgc_Sil, iblk), a2D) + if (f_bgc_DMSP_ml(1:1)/= 'x') & + call accum_hist_field(n_bgc_DMSP_ml, iblk, & + ocean_bio(:,:,nlt_bgc_DMSPp, iblk), a2D) + if (f_bgc_DMS_ml(1:1)/= 'x') & + call accum_hist_field(n_bgc_DMS_ml, iblk, & + ocean_bio(:,:,nlt_bgc_DMS, iblk), a2D) + + ! zbgc + if (f_fNO (1:1) /= 'x') & + call accum_hist_field(n_fNO, iblk, & + flux_bio(:,:,nlt_bgc_NO,iblk), a2D) + if (f_fNO_ai(1:1)/= 'x') & + call accum_hist_field(n_fNO_ai, iblk, & + flux_bio_ai(:,:,nlt_bgc_NO,iblk), a2D) + + if (f_fNH (1:1) /= 'x') & + call accum_hist_field(n_fNH, iblk, & + flux_bio(:,:,nlt_bgc_NH,iblk), a2D) + if (f_fNH_ai(1:1)/= 'x') & + call accum_hist_field(n_fNH_ai, iblk, & + flux_bio_ai(:,:,nlt_bgc_NH,iblk), a2D) + + if (f_fN (1:1) /= 'x') & + call accum_hist_field(n_fN, iblk, & + flux_bio(:,:,nlt_bgc_N,iblk), a2D) + if (f_fN_ai(1:1)/= 'x') & + call accum_hist_field(n_fN_ai, iblk, & + flux_bio_ai(:,:,nlt_bgc_N,iblk), a2D) + + if (f_fSil (1:1) /= 'x') & + call accum_hist_field(n_fSil, iblk, & + flux_bio(:,:,nlt_bgc_Sil,iblk), a2D) + if (f_fSil_ai(1:1)/= 'x') & + call accum_hist_field(n_fSil_ai, iblk, & + flux_bio_ai(:,:,nlt_bgc_Sil,iblk), a2D) + if (f_PPnet (1:1) /= 'x') & + call accum_hist_field(n_PPnet, iblk, & + PP_net(:,:,iblk), a2D) + if (f_grownet (1:1) /= 'x') & + call accum_hist_field(n_grownet, iblk, & + grow_net(:,:,iblk), a2D) + if (f_hbri (1:1) /= 'x') & + call accum_hist_field(n_hbri, iblk, & + hbri(:,:,iblk), a2D) + + ! 3D category fields + + if (f_fbri (1:1) /= 'x') & + call accum_hist_field(n_fbri-n2D, iblk, ncat_hist, & + trcrn(:,:,nt_fbri,1:ncat_hist,iblk), a3Dc) + + if (f_bTin (1:1) /= 'x') & + call accum_hist_field(n_bTin-n4Dscum, iblk, nzblyr, ncat_hist, & + bTiz(:,:,1:nzblyr,1:ncat_hist,iblk), a4Db) + + if (f_bphi (1:1) /= 'x') then + workzn(:,:,:,:) = c0 + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (aicen(i,j,n,iblk) > c0) then + workzn(i,j,1:nzblyr,n) = bphi(i,j,1:nzblyr,n,iblk) + endif + enddo !j + enddo !i + enddo !n + call accum_hist_field(n_bphi-n4Dscum, iblk, nzblyr, ncat_hist, & + workzn(:,:,1:nzblyr,1:ncat_hist), a4Db) + endif + + end subroutine accum_hist_bgc + +!======================================================================= + + end module ice_history_bgc + +!======================================================================= diff --git a/source/ice_history_drag.F90 b/source/ice_history_drag.F90 new file mode 100755 index 00000000..c26a3265 --- /dev/null +++ b/source/ice_history_drag.F90 @@ -0,0 +1,286 @@ +! SVN:$Id: ice_history_drag.F90 936 2015-03-17 15:46:44Z eclare $ +!======================================================================= + +! 2013 module for form drag parameters +! authors Michel Tsamados, David Schroeder, CPOM + + module ice_history_drag + + use ice_kinds_mod + use ice_domain_size, only: max_nstrm + + implicit none + private + public :: accum_hist_drag, init_hist_drag_2D + save + + !--------------------------------------------------------------- + ! flags: write to output file if true or histfreq value + !--------------------------------------------------------------- + + character (len=max_nstrm), public :: & + f_Cdn_atm = 'x', f_Cdn_ocn = 'x' , & + f_drag = 'x' + + !--------------------------------------------------------------- + ! namelist variables + !--------------------------------------------------------------- + + namelist / icefields_drag_nml / & + f_Cdn_atm, f_Cdn_ocn , & + f_drag + + !--------------------------------------------------------------- + ! field indices + !--------------------------------------------------------------- + + integer (kind=int_kind), dimension(max_nstrm) :: & + n_hfreebd , n_hdraft, & + n_hridge , n_distrdg, & + n_hkeel , n_dkeel, & + n_lfloe , n_dfloe, & + n_Cdn_atm , n_Cdn_ocn, & + n_Cdn_atm_skin , n_Cdn_atm_floe, & + n_Cdn_atm_pond , n_Cdn_atm_rdg, & + n_Cdn_ocn_skin , n_Cdn_ocn_floe, & + n_Cdn_ocn_keel , n_Cdn_atm_ratio + +!======================================================================= + + contains + +!======================================================================= + +! Initialize history files +! authors Elizabeth C. Hunke, LANL + + subroutine init_hist_drag_2D + + use ice_broadcast, only: broadcast_scalar + use ice_calendar, only: nstreams + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, c1, secday, c100, mps_to_cmpdy + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_nml, nml_filename, & + get_fileunit, release_fileunit + use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_state, only: tr_lvl + + integer (kind=int_kind) :: ns + integer (kind=int_kind) :: nml_error ! namelist i/o error flag + + !----------------------------------------------------------------- + ! read namelist + !----------------------------------------------------------------- + + call get_fileunit(nu_nml) + if (my_task == master_task) then + open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + if (nml_error /= 0) then + nml_error = -1 + else + nml_error = 1 + endif + do while (nml_error > 0) + read(nu_nml, nml=icefields_drag_nml,iostat=nml_error) + if (nml_error > 0) read(nu_nml,*) ! for Nagware compiler + end do + if (nml_error == 0) close(nu_nml) + endif + call release_fileunit(nu_nml) + + call broadcast_scalar(nml_error, master_task) + if (nml_error /= 0) then + close (nu_nml) + call abort_ice('ice: error reading icefields_drag_nml') + endif + + call broadcast_scalar (f_Cdn_atm, master_task) + call broadcast_scalar (f_Cdn_ocn, master_task) + call broadcast_scalar (f_drag, master_task) + + ! 2D variables + + do ns = 1, nstreams + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_hfreebd,"hfreebd","m",tstr2D, tcstr, & + "hfreebd: freeboard", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_hdraft,"hdraft","m",tstr2D, tcstr, & + "hdraft: draught", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_hridge,"hridge","m",tstr2D, tcstr, & + "hridge: ridge height", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_distrdg,"distrdg","m",tstr2D, tcstr, & + "distrdg: distance between ridges", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_hkeel,"hkeel","m",tstr2D, tcstr, & + "hkeel: keel depth", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_dkeel,"dkeel","m",tstr2D, tcstr, & + "dkeel: distance between keels", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_lfloe,"lfloe","m",tstr2D, tcstr, & + "lfloe: floe length", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_dfloe,"dfloe","m",tstr2D, tcstr, & + "dfloe: distance between floes", & + "none", c1, c0, & + ns, f_drag) + + if (f_Cdn_atm(1:1) /= 'x') & + call define_hist_field(n_Cdn_atm,"Cdn_atm","none",tstr2D, tcstr, & + "Ca: total ice-atm drag coefficient", & + "none", c1, c0, & + ns, f_Cdn_atm) + + if (f_Cdn_ocn(1:1) /= 'x') & + call define_hist_field(n_Cdn_ocn,"Cdn_ocn","none",tstr2D, tcstr, & + "Cdn_ocn: total ice-ocn drag coefficient", & + "none", c1, c0, & + ns, f_Cdn_ocn) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_Cdn_atm_skin,"Cdn_atm_skin","none", & + tstr2D, tcstr, & + "Cdn_atm_skin: neutral skin ice-atm drag coefficient", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_Cdn_atm_floe,"Cdn_atm_floe","none", & + tstr2D, tcstr, & + "Cdn_atm_floe: neutral floe edge ice-atm drag coefficient", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_Cdn_atm_pond,"Cdn_atm_pond","none", & + tstr2D, tcstr, & + "Cdn_atm_pond: neutral pond edge ice-atm drag coefficient", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_Cdn_atm_rdg,"Cdn_atm_rdg","none", & + tstr2D, tcstr, & + "Cdn_atm_rdg: neutral ridge ice-atm drag coefficient", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_Cdn_ocn_skin,"Cdn_ocn_skin","none", & + tstr2D, tcstr, & + "Cdn_ocn_skin: neutral skin ice-ocn drag coefficient", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_Cdn_ocn_floe,"Cdn_ocn_floe","none", & + tstr2D, tcstr, & + "Cdn_ocn_floe: neutral floe edge ice-ocn drag coefficient", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_Cdn_ocn_keel,"Cdn_ocn_keel","none", & + tstr2D, tcstr, & + "Cdn_ocn_keel: neutral keel ice-ocn drag coefficient", & + "none", c1, c0, & + ns, f_drag) + + if (f_drag(1:1) /= 'x') & + call define_hist_field(n_Cdn_atm_ratio,"Cdn_atm_ratio", & + "none",tstr2D, tcstr, & + "Cdn_atm_ratio: ratio total drag / neutral drag (atm)", & + "none", c1, c0, & + ns, f_drag) + + enddo ! nstreams + + end subroutine init_hist_drag_2D + +!======================================================================= + +! accumulate average ice quantities or snapshots + + subroutine accum_hist_drag (iblk) + + use ice_constants, only: c1 + use ice_history_shared, only: n2D, a2D, a3Dc, ncat_hist, & + accum_hist_field + use ice_atmo, only: hfreebd, hdraft, hridge, distrdg, hkeel, & + dkeel, lfloe, dfloe, Cdn_atm, Cdn_atm_skin, Cdn_atm_floe, & + Cdn_atm_pond, Cdn_atm_rdg, Cdn_atm_ratio, Cdn_ocn_skin, & + Cdn_ocn_keel, Cdn_ocn_floe, Cdn_ocn + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + ! 2D fields + + if (f_Cdn_atm (1:1) /= 'x') & + call accum_hist_field(n_Cdn_atm, iblk, Cdn_atm(:,:,iblk), a2D) + if (f_Cdn_ocn (1:1) /= 'x') & + call accum_hist_field(n_Cdn_ocn, iblk, Cdn_ocn(:,:,iblk), a2D) + if (f_drag (1:1) /= 'x') then + call accum_hist_field(n_hfreebd, iblk, hfreebd(:,:,iblk), a2D) + call accum_hist_field(n_hdraft, iblk, hdraft(:,:,iblk), a2D) + call accum_hist_field(n_hridge, iblk, hridge(:,:,iblk), a2D) + call accum_hist_field(n_distrdg, iblk, distrdg(:,:,iblk), a2D) + call accum_hist_field(n_hkeel, iblk, hkeel(:,:,iblk), a2D) + call accum_hist_field(n_dkeel, iblk, dkeel(:,:,iblk), a2D) + call accum_hist_field(n_lfloe, iblk, lfloe(:,:,iblk), a2D) + call accum_hist_field(n_dfloe, iblk, dfloe(:,:,iblk), a2D) + call accum_hist_field(n_Cdn_atm_rdg, & + iblk, Cdn_atm_rdg(:,:,iblk), a2D) + call accum_hist_field(n_Cdn_atm_floe, & + iblk, Cdn_atm_floe(:,:,iblk), a2D) + call accum_hist_field(n_Cdn_atm_pond, & + iblk, Cdn_atm_pond(:,:,iblk), a2D) + call accum_hist_field(n_Cdn_atm_skin, & + iblk, Cdn_atm_skin(:,:,iblk), a2D) + call accum_hist_field(n_Cdn_atm_ratio, & + iblk, Cdn_atm_ratio(:,:,iblk), a2D) + call accum_hist_field(n_Cdn_ocn_keel, & + iblk, Cdn_ocn_keel(:,:,iblk), a2D) + call accum_hist_field(n_Cdn_ocn_floe, & + iblk, Cdn_ocn_floe(:,:,iblk), a2D) + call accum_hist_field(n_Cdn_ocn_skin, & + iblk, Cdn_ocn_skin(:,:,iblk), a2D) + end if + + end subroutine accum_hist_drag + +!======================================================================= + + end module ice_history_drag + +!======================================================================= diff --git a/source/ice_history_mechred.F90 b/source/ice_history_mechred.F90 new file mode 100755 index 00000000..e63feb7a --- /dev/null +++ b/source/ice_history_mechred.F90 @@ -0,0 +1,377 @@ +! SVN:$Id: ice_history_mechred.F90 700 2013-08-15 19:17:39Z eclare $ +!======================================================================= + +! Mechanical redistribution history output +! +! 2012 Elizabeth Hunke split code from ice_history.F90 + + module ice_history_mechred + + use ice_kinds_mod + use ice_domain_size, only: max_nstrm + + implicit none + private + public :: accum_hist_mechred, init_hist_mechred_2D, init_hist_mechred_3Dc + save + + !--------------------------------------------------------------- + ! flags: write to output file if true or histfreq value + !--------------------------------------------------------------- + + character (len=max_nstrm), public :: & + f_ardg = 'm', f_vrdg = 'm', & + f_alvl = 'm', f_vlvl = 'm', & + f_dardg1dt = 'm', f_dardg2dt = 'm', & + f_dvirdgdt = 'm', f_opening = 'm', & + f_ardgn = 'x', f_vrdgn = 'x', & + f_dardg1ndt = 'x', f_dardg2ndt = 'x', & + f_dvirdgndt = 'x', & + f_aparticn = 'x', f_krdgn = 'x', & + f_aredistn = 'x', f_vredistn = 'x', & + f_araftn = 'x', f_vraftn = 'x' + + !--------------------------------------------------------------- + ! namelist variables + !--------------------------------------------------------------- + + namelist / icefields_mechred_nml / & + f_ardg, f_vrdg , & + f_alvl, f_vlvl , & + f_dardg1dt, f_dardg2dt , & + f_dvirdgdt, f_opening , & + f_ardgn, f_vrdgn , & + f_dardg1ndt, f_dardg2ndt, & + f_dvirdgndt, & + f_aparticn, f_krdgn , & + f_aredistn, f_vredistn , & + f_araftn, f_vraftn + + !--------------------------------------------------------------- + ! field indices + !--------------------------------------------------------------- + + integer (kind=int_kind), dimension(max_nstrm) :: & + n_ardg , n_vrdg , & + n_alvl , n_vlvl , & + n_dardg1dt , n_dardg2dt , & + n_dvirdgdt , n_opening , & + n_ardgn , n_vrdgn , & + n_dardg1ndt , n_dardg2ndt , & + n_dvirdgndt , & + n_aparticn , n_krdgn , & + n_aredistn , n_vredistn , & + n_araftn , n_vraftn + +!======================================================================= + + contains + +!======================================================================= + +! Initialize history files +! authors Elizabeth C. Hunke, LANL + + subroutine init_hist_mechred_2D + + use ice_broadcast, only: broadcast_scalar + use ice_calendar, only: nstreams + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, c1, secday, c100, mps_to_cmpdy + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_nml, nml_filename, & + get_fileunit, release_fileunit + use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_state, only: tr_lvl + + integer (kind=int_kind) :: ns + integer (kind=int_kind) :: nml_error ! namelist i/o error flag + + !----------------------------------------------------------------- + ! read namelist + !----------------------------------------------------------------- + + call get_fileunit(nu_nml) + if (my_task == master_task) then + open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + if (nml_error /= 0) then + nml_error = -1 + else + nml_error = 1 + endif + do while (nml_error > 0) + read(nu_nml, nml=icefields_mechred_nml,iostat=nml_error) + end do + if (nml_error == 0) close(nu_nml) + endif + call release_fileunit(nu_nml) + + call broadcast_scalar(nml_error, master_task) + if (nml_error /= 0) then + close (nu_nml) + call abort_ice('ice: error reading icefields_mechred_nml') + endif + + if (.not. tr_lvl) then + f_ardg = 'x' + f_vrdg = 'x' + f_alvl = 'x' + f_vlvl = 'x' + f_ardgn = 'x' + f_vrdgn = 'x' + f_araftn = 'x' + f_vraftn = 'x' + endif + if (f_araftn /= 'x' .or. f_vraftn /= 'x') f_ardgn = f_araftn + + call broadcast_scalar (f_ardg, master_task) + call broadcast_scalar (f_vrdg, master_task) + call broadcast_scalar (f_alvl, master_task) + call broadcast_scalar (f_vlvl, master_task) + call broadcast_scalar (f_dardg1dt, master_task) + call broadcast_scalar (f_dardg2dt, master_task) + call broadcast_scalar (f_dvirdgdt, master_task) + call broadcast_scalar (f_opening, master_task) + call broadcast_scalar (f_ardgn, master_task) + call broadcast_scalar (f_vrdgn, master_task) + call broadcast_scalar (f_dardg1ndt, master_task) + call broadcast_scalar (f_dardg2ndt, master_task) + call broadcast_scalar (f_dvirdgndt, master_task) + call broadcast_scalar (f_krdgn, master_task) + call broadcast_scalar (f_aparticn, master_task) + call broadcast_scalar (f_aredistn, master_task) + call broadcast_scalar (f_vredistn, master_task) + call broadcast_scalar (f_araftn, master_task) + call broadcast_scalar (f_vraftn, master_task) + + ! 2D variables + + do ns = 1, nstreams + + if (f_alvl(1:1) /= 'x') & + call define_hist_field(n_alvl,"alvl","1",tstr2D, tcstr, & + "level ice area fraction", & + "none", c1, c0, & + ns, f_alvl) + if (f_vlvl(1:1) /= 'x') & + call define_hist_field(n_vlvl,"vlvl","m",tstr2D, tcstr, & + "level ice volume", & + "grid cell mean level ice thickness", c1, c0, & + ns, f_vlvl) + if (f_ardg(1:1) /= 'x') & + call define_hist_field(n_ardg,"ardg","1",tstr2D, tcstr, & + "ridged ice area fraction", & + "none", c1, c0, & + ns, f_ardg) + if (f_vrdg(1:1) /= 'x') & + call define_hist_field(n_vrdg,"vrdg","m",tstr2D, tcstr, & + "ridged ice volume", & + "grid cell mean level ridged thickness", c1, c0, & + ns, f_vrdg) + + if (f_dardg1dt(1:1) /= 'x') & + call define_hist_field(n_dardg1dt,"dardg1dt","%/day",tstr2D, tcstr, & + "ice area ridging rate", & + "none", secday*c100, c0, & + ns, f_dardg1dt) + + if (f_dardg2dt(1:1) /= 'x') & + call define_hist_field(n_dardg2dt,"dardg2dt","%/day",tstr2D, tcstr, & + "ridge area formation rate", & + "none", secday*c100, c0, & + ns, f_dardg2dt) + + if (f_dvirdgdt(1:1) /= 'x') & + call define_hist_field(n_dvirdgdt,"dvirdgdt","cm/day",tstr2D, tcstr, & + "ice volume ridging rate", & + "none", mps_to_cmpdy, c0, & + ns, f_dvirdgdt) + + if (f_opening(1:1) /= 'x') & + call define_hist_field(n_opening,"opening","%/day",tstr2D, tcstr, & + "lead area opening rate", & + "none", secday*c100, c0, & + ns, f_opening) + + enddo ! nstreams + + end subroutine init_hist_mechred_2D + +!======================================================================= + + subroutine init_hist_mechred_3Dc + + use ice_constants, only: c0, c1, secday, c100, mps_to_cmpdy + use ice_calendar, only: nstreams + use ice_exit, only: abort_ice + use ice_history_shared, only: tstr3Dc, tcstr, define_hist_field + + integer (kind=int_kind) :: ns + + !----------------------------------------------------------------- + ! 3D (category) variables must be looped separately + !----------------------------------------------------------------- + + do ns = 1, nstreams + + if (f_ardgn(1:1) /= 'x') & + call define_hist_field(n_ardgn,"ardgn","1",tstr3Dc, tcstr, & + "ridged ice area fraction, category", & + "none", c1, c0, & + ns, f_ardgn) + + if (f_vrdgn(1:1) /= 'x') & + call define_hist_field(n_vrdgn,"vrdgn","m",tstr3Dc, tcstr, & + "ridged ice volume, category", & + "grid cell mean ridged ice thickness", c1, c0, & + ns, f_vrdgn) + + if (f_dardg1ndt(1:1) /= 'x') & + call define_hist_field(n_dardg1ndt,"dardg1ndt","%/day",tstr3Dc, tcstr, & + "ice area ridging rate, category", & + "none", secday*c100, c0, & + ns, f_dardg1ndt) + + if (f_dardg2ndt(1:1) /= 'x') & + call define_hist_field(n_dardg2ndt,"dardg2ndt","%/day",tstr3Dc, tcstr, & + "ridge area formation rate, category", & + "none", secday*c100, c0, & + ns, f_dardg2ndt) + + if (f_dvirdgndt(1:1) /= 'x') & + call define_hist_field(n_dvirdgndt,"dvirdgndt","cm/day",tstr3Dc, tcstr, & + "ice volume ridging rate, category", & + "none", mps_to_cmpdy, c0, & + ns, f_dvirdgndt) + + if (f_krdgn(1:1) /= 'x') & + call define_hist_field(n_krdgn,"krdgn","1",tstr3Dc, tcstr, & + "ridging thickness factor, category", & + "mean ridge thickness/thickness of ridging ice", c1, c0, & + ns, f_krdgn) + + if (f_aparticn(1:1) /= 'x') & + call define_hist_field(n_aparticn,"aparticn","1",tstr3Dc, tcstr, & + "ridging ice participation function, category", & + "fraction of new ridge area added to cat", c1, c0, & + ns, f_aparticn) + + if (f_aredistn(1:1) /= 'x') & + call define_hist_field(n_aredistn,"aredistn","1",tstr3Dc, tcstr, & + "ridging ice area redistribution function, category", & + "fraction of new ridge volume added to cat", c1, c0, & + ns, f_aredistn) + + if (f_vredistn(1:1) /= 'x') & + call define_hist_field(n_vredistn,"vredistn","1",tstr3Dc, tcstr, & + "ridging ice volume redistribution function, category", & + "none", c1, c0, & + ns, f_vredistn) + + if (f_araftn(1:1) /= 'x') & + call define_hist_field(n_araftn,"araftn","1",tstr3Dc, tcstr, & + "rafted ice area fraction, category", & + "none", c1, c0, & + ns, f_araftn) + + if (f_vraftn(1:1) /= 'x') & + call define_hist_field(n_vraftn,"vraftn","1",tstr3Dc, tcstr, & + "rafted ice volume, category", & + "none", c1, c0, & + ns, f_vraftn) + + enddo ! ns + + end subroutine init_hist_mechred_3Dc + +!======================================================================= + +! accumulate average ice quantities or snapshots +! author: Elizabeth C. Hunke, LANL + + subroutine accum_hist_mechred (iblk) + + use ice_constants, only: c1 + use ice_history_shared, only: n2D, a2D, a3Dc, ncat_hist, & + accum_hist_field + use ice_state, only: aice, vice, trcr, nt_alvl, nt_vlvl, & + aicen, vicen, trcrn + use ice_flux, only: dardg1dt, dardg2dt, dvirdgdt, dardg1ndt,& + dardg2ndt, dvirdgndt, krdgn, aparticn, aredistn, vredistn, & + araftn, vraftn, opening + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + ! 2D fields + + if (f_alvl(1:1)/= 'x') & + call accum_hist_field(n_alvl, iblk, & + aice(:,:,iblk) * trcr(:,:,nt_alvl,iblk), a2D) + if (f_vlvl(1:1)/= 'x') & + call accum_hist_field(n_vlvl, iblk, & + vice(:,:,iblk) * trcr(:,:,nt_vlvl,iblk), a2D) + if (f_ardg(1:1)/= 'x') & + call accum_hist_field(n_ardg, iblk, & + aice(:,:,iblk) * (c1 - trcr(:,:,nt_alvl,iblk)), a2D) + if (f_vrdg(1:1)/= 'x') & + call accum_hist_field(n_vrdg, iblk, & + vice(:,:,iblk) * (c1 - trcr(:,:,nt_vlvl,iblk)), a2D) + if (f_dardg1dt(1:1)/= 'x') & + call accum_hist_field(n_dardg1dt,iblk, dardg1dt(:,:,iblk), a2D) + if (f_dardg2dt(1:1)/= 'x') & + call accum_hist_field(n_dardg2dt,iblk, dardg2dt(:,:,iblk), a2D) + if (f_dvirdgdt(1:1)/= 'x') & + call accum_hist_field(n_dvirdgdt,iblk, dvirdgdt(:,:,iblk), a2D) + if (f_opening(1:1) /= 'x') & + call accum_hist_field(n_opening, iblk, opening(:,:,iblk), a2D) + + ! 3D category fields + + if (f_ardgn(1:1)/= 'x') & + call accum_hist_field(n_ardgn-n2D, iblk, ncat_hist, & + aicen(:,:,1:ncat_hist,iblk) & + * (c1 - trcrn(:,:,nt_alvl,1:ncat_hist,iblk)), a3Dc) + if (f_vrdgn(1:1)/= 'x') & + call accum_hist_field(n_vrdgn-n2D, iblk, ncat_hist, & + vicen(:,:,1:ncat_hist,iblk) & + * (c1 - trcrn(:,:,nt_vlvl,1:ncat_hist,iblk)), a3Dc) + if (f_dardg1ndt(1:1)/= 'x') & + call accum_hist_field(n_dardg1ndt-n2D, iblk, ncat_hist, & + dardg1ndt(:,:,1:ncat_hist,iblk), a3Dc) + if (f_dardg2ndt(1:1)/= 'x') & + call accum_hist_field(n_dardg2ndt-n2D, iblk, ncat_hist, & + dardg2ndt(:,:,1:ncat_hist,iblk), a3Dc) + if (f_dvirdgndt(1:1)/= 'x') & + call accum_hist_field(n_dvirdgndt-n2D, iblk, ncat_hist, & + dvirdgndt(:,:,1:ncat_hist,iblk), a3Dc) + if (f_krdgn(1:1)/= 'x') & + call accum_hist_field(n_krdgn-n2D, iblk, ncat_hist, & + krdgn(:,:,1:ncat_hist,iblk), a3Dc) + if (f_aparticn(1:1)/= 'x') & + call accum_hist_field(n_aparticn-n2D, iblk, ncat_hist, & + aparticn(:,:,1:ncat_hist,iblk), a3Dc) + if (f_aredistn(1:1)/= 'x') & + call accum_hist_field(n_aredistn-n2D, iblk, ncat_hist, & + aredistn(:,:,1:ncat_hist,iblk), a3Dc) + if (f_vredistn(1:1)/= 'x') & + call accum_hist_field(n_vredistn-n2D, iblk, ncat_hist, & + vredistn(:,:,1:ncat_hist,iblk), a3Dc) + if (f_araftn(1:1)/= 'x') & + call accum_hist_field(n_araftn-n2D, iblk, ncat_hist, & + araftn(:,:,1:ncat_hist,iblk), a3Dc) + if (f_vraftn(1:1)/= 'x') & + call accum_hist_field(n_vraftn-n2D, iblk, ncat_hist, & + vraftn(:,:,1:ncat_hist,iblk), a3Dc) + + end subroutine accum_hist_mechred + +!======================================================================= + + end module ice_history_mechred + +!======================================================================= diff --git a/source/ice_history_pond.F90 b/source/ice_history_pond.F90 new file mode 100755 index 00000000..263620d0 --- /dev/null +++ b/source/ice_history_pond.F90 @@ -0,0 +1,356 @@ +! SVN:$Id: ice_history_pond.F90 700 2013-08-15 19:17:39Z eclare $ +!======================================================================= + +! Melt pond history output +! +! 2012 Elizabeth Hunke split code from ice_history.F90 + + module ice_history_pond + + use ice_kinds_mod + use ice_domain_size, only: max_nstrm + + implicit none + private + public :: accum_hist_pond, init_hist_pond_2D, init_hist_pond_3Dc + save + + !--------------------------------------------------------------- + ! flags: write to output file if true or histfreq value + !--------------------------------------------------------------- + + character (len=max_nstrm), public :: & + f_apondn = 'm', f_apeffn = 'm', & + f_hpondn = 'm', & + f_apond = 'x', f_apond_ai = 'x', & + f_hpond = 'x', f_hpond_ai = 'x', & + f_ipond = 'x', f_ipond_ai = 'x', & + f_apeff = 'x', f_apeff_ai = 'x' + + !--------------------------------------------------------------- + ! namelist variables + !--------------------------------------------------------------- + + namelist / icefields_pond_nml / & + f_apondn, f_apeffn , & + f_hpondn, & + f_apond, f_apond_ai , & + f_hpond, f_hpond_ai , & + f_ipond, f_ipond_ai , & + f_apeff, f_apeff_ai + + !--------------------------------------------------------------- + ! field indices + !--------------------------------------------------------------- + + integer (kind=int_kind), dimension(max_nstrm) :: & + n_apondn , n_apeffn , & + n_hpondn , & + n_apond , n_apond_ai, & + n_hpond , n_hpond_ai, & + n_ipond , n_ipond_ai, & + n_apeff , n_apeff_ai + +!======================================================================= + + contains + +!======================================================================= + + subroutine init_hist_pond_2D + + use ice_broadcast, only: broadcast_scalar + use ice_calendar, only: nstreams + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, c1 + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_nml, nml_filename, & + get_fileunit, release_fileunit + use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_state, only: tr_pond + + integer (kind=int_kind) :: ns + integer (kind=int_kind) :: nml_error ! namelist i/o error flag + + !----------------------------------------------------------------- + ! read namelist + !----------------------------------------------------------------- + + call get_fileunit(nu_nml) + if (my_task == master_task) then + open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + if (nml_error /= 0) then + nml_error = -1 + else + nml_error = 1 + endif + do while (nml_error > 0) + read(nu_nml, nml=icefields_pond_nml,iostat=nml_error) + end do + if (nml_error == 0) close(nu_nml) + endif + call release_fileunit(nu_nml) + + call broadcast_scalar(nml_error, master_task) + if (nml_error /= 0) then + close (nu_nml) + call abort_ice('ice: error reading icefields_pond_nml') + endif + + if (.not. tr_pond) then + f_apondn = 'x' + f_hpondn = 'x' + f_apeffn = 'x' + f_apond = 'x' + f_hpond = 'x' + f_ipond = 'x' + f_apeff = 'x' + f_apond_ai = 'x' + f_hpond_ai = 'x' + f_ipond_ai = 'x' + f_apeff_ai = 'x' + endif + + call broadcast_scalar (f_apondn, master_task) + call broadcast_scalar (f_hpondn, master_task) + call broadcast_scalar (f_apeffn, master_task) + call broadcast_scalar (f_apond, master_task) + call broadcast_scalar (f_hpond, master_task) + call broadcast_scalar (f_ipond, master_task) + call broadcast_scalar (f_apeff, master_task) + call broadcast_scalar (f_apond_ai, master_task) + call broadcast_scalar (f_hpond_ai, master_task) + call broadcast_scalar (f_ipond_ai, master_task) + call broadcast_scalar (f_apeff_ai, master_task) + + ! 2D variables + do ns = 1, nstreams + + if (f_apond(1:1) /= 'x') & + call define_hist_field(n_apond,"apond","1",tstr2D, tcstr, & + "melt pond fraction of sea ice", & + "none", c1, c0, & + ns, f_apond) + + if (f_apond_ai(1:1) /= 'x') & + call define_hist_field(n_apond_ai,"apond_ai","1",tstr2D, tcstr, & + "melt pond fraction of grid cell", & + "weighted by ice area", c1, c0, & + ns, f_apond) + + if (f_hpond(1:1) /= 'x') & + call define_hist_field(n_hpond,"hpond","m",tstr2D, tcstr, & + "mean melt pond depth over sea ice", & + "none", c1, c0, & + ns, f_hpond) + + if (f_hpond_ai(1:1) /= 'x') & + call define_hist_field(n_hpond_ai,"hpond_ai","m",tstr2D, tcstr, & + "mean melt pond depth over grid cell", & + "weighted by ice area", c1, c0, & + ns, f_hpond) + + if (f_ipond(1:1) /= 'x') & + call define_hist_field(n_ipond,"ipond","m",tstr2D, tcstr, & + "mean pond ice thickness over sea ice", & + "none", c1, c0, & + ns, f_ipond) + + if (f_ipond_ai(1:1) /= 'x') & + call define_hist_field(n_ipond_ai,"ipond_ai","m",tstr2D, tcstr, & + "mean pond ice thickness over grid cell", & + "weighted by ice area", c1, c0, & + ns, f_ipond_ai) + + if (f_apeff(1:1) /= 'x') & + call define_hist_field(n_apeff,"apeff","1",tstr2D, tcstr, & + "radiation-effective pond area fraction of sea ice", & + "none", c1, c0, & + ns, f_apeff) + + if (f_apeff_ai(1:1) /= 'x') & + call define_hist_field(n_apeff_ai,"apeff_ai","1",tstr2D, tcstr, & + "radiation-effective pond area fraction over grid cell", & + "weighted by ice area", c1, c0, & + ns, f_apeff_ai) + + enddo ! nstreams + + end subroutine init_hist_pond_2D + +!======================================================================= + + subroutine init_hist_pond_3Dc + + use ice_calendar, only: nstreams + use ice_constants, only: c0, c1 + use ice_history_shared, only: tstr3Dc, tcstr, define_hist_field + use ice_state, only: tr_pond + + integer (kind=int_kind) :: ns + + ! 3D (category) variables must be looped separately + do ns = 1, nstreams + + if (f_apondn(1:1) /= 'x') & + call define_hist_field(n_apondn,"apondn","1",tstr3Dc, tcstr, & + "melt pond fraction, category","none", c1, c0, & + ns, f_apondn) + + if (f_hpondn(1:1) /= 'x') & + call define_hist_field(n_hpondn,"hpondn","m",tstr3Dc, tcstr, & + "melt pond depth, category","none", c1, c0, & + ns, f_hpondn) + + if (f_apeffn(1:1) /= 'x') & + call define_hist_field(n_apeffn,"apeffn","1",tstr3Dc, tcstr, & + "effective melt pond fraction, category", & + "none", c1, c0, & + ns, f_apeffn) + + enddo ! ns + + end subroutine init_hist_pond_3Dc + +!======================================================================= + +! accumulate average ice quantities or snapshots + + subroutine accum_hist_pond (iblk) + + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_constants, only: c0, puny + use ice_domain, only: blocks_ice + use ice_flux, only: apeff_ai + use ice_history_shared, only: n2D, a2D, a3Dc, ncat_hist, & + accum_hist_field + use ice_shortwave, only: apeffn + use ice_state, only: tr_pond_cesm, tr_pond_lvl, tr_pond_topo, & + aice, trcr, trcrn, nt_apnd, nt_hpnd, nt_ipnd, nt_alvl + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + integer (kind=int_kind) :: & + i,j, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + worka + + type (block) :: & + this_block ! block information for current block + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + if (tr_pond_cesm) then + if (f_apond(1:1)/= 'x') & + call accum_hist_field(n_apond, iblk, & + trcr(:,:,nt_apnd,iblk), a2D) + if (f_apond_ai(1:1)/= 'x') & + call accum_hist_field(n_apond_ai, iblk, & + aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk), a2D) + if (f_hpond(1:1)/= 'x') & + call accum_hist_field(n_hpond, iblk, & + trcr(:,:,nt_apnd,iblk) & + * trcr(:,:,nt_hpnd,iblk), a2D) + if (f_hpond_ai(1:1)/= 'x') & + call accum_hist_field(n_hpond_ai, iblk, & + aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk) & + * trcr(:,:,nt_hpnd,iblk), a2D) + + elseif (tr_pond_lvl) then + if (f_apond(1:1)/= 'x') & + call accum_hist_field(n_apond, iblk, & + trcr(:,:,nt_alvl,iblk) * trcr(:,:,nt_apnd,iblk), a2D) + if (f_apond_ai(1:1)/= 'x') & + call accum_hist_field(n_apond_ai, iblk, & + aice(:,:,iblk) & + * trcr(:,:,nt_alvl,iblk) * trcr(:,:,nt_apnd,iblk), a2D) + if (f_hpond(1:1)/= 'x') & + call accum_hist_field(n_hpond, iblk, & + trcr(:,:,nt_alvl,iblk) * trcr(:,:,nt_apnd,iblk) & + * trcr(:,:,nt_hpnd,iblk), a2D) + if (f_hpond_ai(1:1)/= 'x') & + call accum_hist_field(n_hpond_ai, iblk, & + aice(:,:,iblk) & + * trcr(:,:,nt_alvl,iblk) * trcr(:,:,nt_apnd,iblk) & + * trcr(:,:,nt_hpnd,iblk), a2D) + if (f_ipond(1:1)/= 'x') & + call accum_hist_field(n_ipond, iblk, & + trcr(:,:,nt_alvl,iblk) * trcr(:,:,nt_apnd,iblk) & + * trcr(:,:,nt_ipnd,iblk), a2D) + if (f_ipond_ai(1:1)/= 'x') & + call accum_hist_field(n_ipond_ai, iblk, & + aice(:,:,iblk) & + * trcr(:,:,nt_alvl,iblk) * trcr(:,:,nt_apnd,iblk) & + * trcr(:,:,nt_ipnd,iblk), a2D) + + elseif (tr_pond_topo) then + + if (f_apond(1:1)/= 'x') & + call accum_hist_field(n_apond, iblk, & + trcr(:,:,nt_apnd,iblk), a2D) + if (f_apond_ai(1:1)/= 'x') & + call accum_hist_field(n_apond_ai, iblk, & + aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk), a2D) + if (f_hpond(1:1)/= 'x') & + call accum_hist_field(n_hpond, iblk, & + trcr(:,:,nt_apnd,iblk) & + * trcr(:,:,nt_hpnd,iblk), a2D) + if (f_hpond_ai(1:1)/= 'x') & + call accum_hist_field(n_hpond_ai, iblk, & + aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk) & + * trcr(:,:,nt_hpnd,iblk), a2D) + if (f_ipond(1:1)/= 'x') & + call accum_hist_field(n_ipond, iblk, & + trcr(:,:,nt_apnd,iblk) & + * trcr(:,:,nt_ipnd,iblk), a2D) + if (f_ipond_ai(1:1)/= 'x') & + call accum_hist_field(n_ipond_ai, iblk, & + aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk) & + * trcr(:,:,nt_ipnd,iblk), a2D) + endif ! ponds + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + if (f_apeff (1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) worka(i,j) = apeff_ai(i,j,iblk) & + / aice(i,j,iblk) + enddo + enddo + call accum_hist_field(n_apeff, iblk, worka(:,:), a2D) + endif + if (f_apeff_ai(1:1) /= 'x') & + call accum_hist_field(n_apeff_ai, iblk, apeff_ai(:,:,iblk), a2D) + + ! 3D category fields + if (f_apondn (1:1) /= 'x') & + call accum_hist_field(n_apondn-n2D, iblk, ncat_hist, & + trcrn(:,:,nt_apnd,1:ncat_hist,iblk), a3Dc) + if (f_apeffn (1:1) /= 'x') & + call accum_hist_field(n_apeffn-n2D, iblk, ncat_hist, & + apeffn(:,:,1:ncat_hist,iblk), a3Dc) + if (f_hpondn (1:1) /= 'x') & + call accum_hist_field(n_hpondn-n2D, iblk, ncat_hist, & + trcrn(:,:,nt_apnd,1:ncat_hist,iblk) & + * trcrn(:,:,nt_hpnd,1:ncat_hist,iblk), a3Dc) + + end subroutine accum_hist_pond + +!======================================================================= + + end module ice_history_pond + +!======================================================================= diff --git a/source/ice_history_shared.F90 b/source/ice_history_shared.F90 new file mode 100755 index 00000000..ffefcd5a --- /dev/null +++ b/source/ice_history_shared.F90 @@ -0,0 +1,838 @@ +! SVN:$Id: ice_history_shared.F90 843 2014-10-02 19:54:30Z eclare $ +!======================================================================= +! +! Output files: netCDF or binary data, Fortran unformatted dumps +! +! The following variables are currently hard-wired as snapshots +! (instantaneous rather than time-averages): +! divu, shear, sig1, sig2, trsig, mlt_onset, frz_onset, hisnap, aisnap +! +! Options for histfreq: '1','h','d','m','y','x', where x means that +! output stream will not be used (recommended for efficiency). +! histfreq_n can be any nonnegative integer, where 0 means that the +! corresponding histfreq frequency will not be used. +! The flags (f_) can be set to '1','h','d','m','y' or 'x', where +! n means the field will not be written. To output the same field at +! more than one frequency, for instance monthy and daily, set +! f_ = 'md'. +! +! authors Tony Craig and Bruce Briegleb, NCAR +! Elizabeth C. Hunke and William H. Lipscomb, LANL +! C. M. Bitz, UW +! +! 2012 Elizabeth Hunke split code from ice_history.F90 + + module ice_history_shared + + use ice_kinds_mod + use ice_fileunits, only: nu_diag + use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, max_nstrm + + implicit none + save + + private + public :: define_hist_field, accum_hist_field, icefields_nml, construct_filename + logical (kind=log_kind), public :: & + hist_avg ! if true, write averaged data instead of snapshots + + character (len=char_len), public :: & + history_file , & ! output file for history + incond_file ! output file for snapshot initial conditions + + character (len=char_len_long), public :: & + history_dir , & ! directory name for history file + incond_dir ! directory for snapshot initial conditions + + character (len=char_len_long), public :: & + pointer_file ! input pointer file for restarts + + !--------------------------------------------------------------- + ! Instructions for adding a field: (search for 'example') + ! Here: + ! (1) Add to frequency flags (f_) + ! (2) Add to namelist (here and also in ice_in) + ! (3) Add to index list + ! In init_hist: + ! (4) Add define_hist_field call with vname, vdesc, vunit, + ! and vcomment, vcellmeas, and conversion factor if necessary. + ! (5) Add flag to broadcast list + ! (6) Add accum_hist_field call with appropriate variable + !--------------------------------------------------------------- + + type, public :: ice_hist_field + character (len=16) :: vname ! variable name + character (len=16) :: vunit ! variable units + character (len=25) :: vcoord ! variable coordinates + character (len=16) :: vcellmeas ! variable cell measures + character (len=55) :: vdesc ! variable description + character (len=55) :: vcomment ! variable description + real (kind=dbl_kind) :: cona ! multiplicative conversion factor + real (kind=dbl_kind) :: conb ! additive conversion factor + character (len=1) :: vhistfreq ! frequency of history output + integer (kind=int_kind) :: vhistfreq_n ! number of vhistfreq intervals + end type + + integer (kind=int_kind), parameter, public :: & + max_avail_hist_fields = 600 ! Max number of history fields + + integer (kind=int_kind), public :: & + num_avail_hist_fields_tot = 0, & ! Current, total number of defined fields + num_avail_hist_fields_2D = 0, & ! Number of 2D fields + num_avail_hist_fields_3Dz = 0, & ! Number of 3D fields (vertical) + num_avail_hist_fields_3Dc = 0, & ! Number of 3D fields (categories) + num_avail_hist_fields_3Db = 0, & ! Number of 3D fields (vertical biology) + num_avail_hist_fields_4Di = 0, & ! Number of 4D fields (categories,vertical), ice + num_avail_hist_fields_4Ds = 0, & ! Number of 4D fields (categories,vertical), snow + num_avail_hist_fields_4Db = 0 ! Number of 4D fields (categories,vertical), ice-biology + + integer (kind=int_kind), public :: & ! cumulative counts + n2D , & ! num_avail_hist_fields_2D + n3Dccum , & ! n2D + num_avail_hist_fields_3Dc + n3Dzcum , & ! n3Dccum + num_avail_hist_fields_3Dz + n3Dbcum , & ! n3Dzcum + num_avail_hist_fields_3Db + n4Dicum , & ! n3Dbcum + num_avail_hist_fields_4Di + n4Dscum , & ! n4Dicum + num_avail_hist_fields_4Ds + n4Dbcum , & ! n4Dscum + num_avail_hist_fields_4Db + nzlyr , & ! vertical dimension (temp variable) + nzlyrb ! vertical dimension of biology grid (temp variable) + + ! for now, ice and snow have same dimensions in netcdf + ! could set nzilyr = nilyr + nslyr and write Tin+Tsn together into Tinz + integer (kind=int_kind), parameter, public :: & + nzilyr = nilyr, & ! vertical dimension (allows alternative grids) + nzslyr = nslyr, & + nzblyr = nblyr+2 + + type (ice_hist_field), dimension(max_avail_hist_fields), public :: & + avail_hist_fields + + integer (kind=int_kind), parameter, public :: & + nvar = 12 , & ! number of grid fields that can be written + ! excluding grid vertices + nvarz = 4 , & ! number of category/vertical grid fields written + ncat_hist = ncat ! number of ice categories written <= ncat + + real (kind=real_kind), public :: time_beg(max_nstrm), & ! bounds for averaging + time_end(max_nstrm), & + time_bounds(2) + + real (kind=dbl_kind), allocatable, public :: & + a2D (:,:,:,:) , & ! field accumulations/averages, 2D + a3Dz(:,:,:,:,:) , & ! field accumulations/averages, 3D vertical + a3Db(:,:,:,:,:) , & ! field accumulations/averages, 3D vertical biology + a3Dc(:,:,:,:,:) , & ! field accumulations/averages, 3D categories + a4Di(:,:,:,:,:,:), & ! field accumulations/averages, 4D categories,vertical, ice + a4Ds(:,:,:,:,:,:), & ! field accumulations/averages, 4D categories,vertical, snow + a4Db(:,:,:,:,:,:) ! field accumulations/averages, 4D categories,vertical, bio + + real (kind=dbl_kind), allocatable, public :: & + Tinz4d (:,:,:,:) , & ! array for Tin + Tsnz4d (:,:,:,:) , & ! array for Tsn + Sinz4d (:,:,:,:) ! array for Sin + + real (kind=dbl_kind), public :: & + avgct(max_nstrm) ! average sample counter + + logical (kind=log_kind), public :: & + igrd (nvar), & ! true if grid field is written to output file + igrdz(nvarz) ! true if category/vertical grid field is written + + character (len=25), public, parameter :: & + tcstr = 'area: tarea' , & ! vcellmeas for T cell quantities + ucstr = 'area: uarea' , & ! vcellmeas for U cell quantities + tstr2D = 'TLON TLAT time' , & ! vcoord for T cell quantities, 2D + ustr2D = 'ULON ULAT time' , & ! vcoord for U cell quantities, 2D + tstr3Dz = 'TLON TLAT VGRD time', & ! vcoord for T cell quantities, 3D + ustr3Dz = 'ULON ULAT VGRD time', & ! vcoord for U cell quantities, 3D + tstr3Dc = 'TLON TLAT NCAT time', & ! vcoord for T cell quantities, 3D + ustr3Dc = 'ULON ULAT NCAT time', & ! vcoord for U cell quantities, 3D + tstr3Db = 'TLON TLAT VGRDb time', & ! vcoord for T cell quantities, 3D + ustr3Db = 'ULON ULAT VGRDb time', & ! vcoord for U cell quantities, 3D + +!ferret + tstr4Di = 'TLON TLAT VGRDi NCAT', & ! vcoord for T cell, 4D, ice + ustr4Di = 'ULON ULAT VGRDi NCAT', & ! vcoord for U cell, 4D, ice + tstr4Ds = 'TLON TLAT VGRDs NCAT', & ! vcoord for T cell, 4D, snow + ustr4Ds = 'ULON ULAT VGRDs NCAT', & ! vcoord for U cell, 4D, snow + tstr4Db = 'TLON TLAT VGRDb NCAT', & ! vcoord for T cell, 4D, bio + ustr4Db = 'ULON ULAT VGRDb NCAT' ! vcoord for U cell, 4D, bio +!ferret +! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time +! ustr4Di = 'ULON ULAT VGRDi NCAT time', & ! index on 4D variables. +! tstr4Ds = 'TLON TLAT VGRDs NCAT time', & ! Use 'ferret' lines instead +! ustr4Ds = 'ULON ULAT VGRDs NCAT time', & ! (below also) +! tstr4Db = 'TLON TLAT VGRDb NCAT time', & +! ustr4Db = 'ULON ULAT VGRDb NCAT time' + + !--------------------------------------------------------------- + ! flags: write to output file if true or histfreq value + !--------------------------------------------------------------- + + logical (kind=log_kind), public :: & + f_tmask = .true., f_blkmask = .true., & + f_tarea = .true., f_uarea = .true., & + f_dxt = .true., f_dyt = .true., & + f_dxu = .true., f_dyu = .true., & + f_HTN = .true., f_HTE = .true., & + f_ANGLE = .true., f_ANGLET = .true., & + f_bounds = .true., f_NCAT = .true., & + f_VGRDi = .true., f_VGRDs = .true., & + f_VGRDb = .true. + + character (len=max_nstrm), public :: & +! f_example = 'md', & + f_hi = 'm', f_hs = 'm', & + f_Tsfc = 'm', f_aice = 'm', & + f_uvel = 'm', f_vvel = 'm', & + f_uatm = 'm', f_vatm = 'm', & + f_fswdn = 'm', f_flwdn = 'm', & + f_snow = 'm', f_snow_ai = 'm', & + f_rain = 'm', f_rain_ai = 'm', & + f_sst = 'm', f_sss = 'm', & + f_uocn = 'm', f_vocn = 'm', & + f_sice = 'm', f_frzmlt = 'm', & + f_fswfac = 'm', f_fswint_ai = 'x', & + f_fswabs = 'm', f_fswabs_ai = 'm', & + f_albsni = 'm', & + f_alvdr = 'm', f_alidr = 'm', & + f_alvdf = 'm', f_alidf = 'm', & + f_albice = 'm', f_albsno = 'm', & + f_albpnd = 'm', f_coszen = 'm', & + f_flat = 'm', f_flat_ai = 'm', & + f_fsens = 'm', f_fsens_ai = 'm', & + f_flwup = 'm', f_flwup_ai = 'm', & + f_evap = 'm', f_evap_ai = 'm', & + f_Tair = 'm', & + f_Tref = 'm', f_Qref = 'm', & + f_congel = 'm', f_frazil = 'm', & + f_snoice = 'm', f_dsnow = 'm', & + f_meltt = 'm', f_melts = 'm', & + f_meltb = 'm', f_meltl = 'm', & + f_fresh = 'm', f_fresh_ai = 'm', & + f_fsalt = 'm', f_fsalt_ai = 'm', & + f_fhocn = 'm', f_fhocn_ai = 'm', & + f_fswthru = 'm', f_fswthru_ai = 'm', & + f_strairx = 'm', f_strairy = 'm', & + f_strtltx = 'm', f_strtlty = 'm', & + f_strcorx = 'm', f_strcory = 'm', & + f_strocnx = 'm', f_strocny = 'm', & + f_strintx = 'm', f_strinty = 'm', & + f_strength = 'm', & + f_divu = 'm', f_shear = 'm', & + f_sig1 = 'm', f_sig2 = 'm', & + f_dvidtt = 'm', f_dvidtd = 'm', & + f_dvsdtt = 'm', f_dvsdtd = 'm', & + f_daidtt = 'm', f_daidtd = 'm', & + f_dagedtt = 'm', f_dagedtd = 'm', & + f_mlt_onset = 'm', f_frz_onset = 'm', & + f_iage = 'm', f_FY = 'm', & + f_hisnap = 'm', f_aisnap = 'm', & + f_aicen = 'x', f_vicen = 'x', & + f_vsnon = 'x', & + f_trsig = 'm', f_icepresent = 'm', & + f_fsurf_ai = 'm', f_fcondtop_ai= 'm', & + f_fmeltt_ai = 'm', & + f_fsurfn_ai = 'x' ,f_fcondtopn_ai='x', & + f_fmelttn_ai= 'x', f_flatn_ai = 'x', & + f_fsensn_ai = 'x', & +! f_field3dz = 'x', & + f_Tn_top = 'm', f_keffn_top = 'm', & + f_Tinz = 'x', f_Sinz = 'x', & + f_Tsnz = 'x', & + f_a11 = 'x', f_a12 = 'x', & + f_e11 = 'x', f_e12 = 'x', & + f_e22 = 'x', & + f_s11 = 'x', f_s12 = 'x', & + f_s22 = 'x', & + f_yieldstress11 = 'x', & + f_yieldstress12 = 'x', & + f_yieldstress22 = 'x' + + !--------------------------------------------------------------- + ! namelist variables + !--------------------------------------------------------------- + + namelist / icefields_nml / & + f_tmask , f_blkmask , & + f_tarea , f_uarea , & + f_dxt , f_dyt , & + f_dxu , f_dyu , & + f_HTN , f_HTE , & + f_ANGLE , f_ANGLET , & + f_bounds , f_NCAT , & + f_VGRDi , f_VGRDs , & + f_VGRDb , & +! f_example , & + f_hi, f_hs , & + f_Tsfc, f_aice , & + f_uvel, f_vvel , & + f_uatm, f_vatm , & + f_fswdn, f_flwdn , & + f_snow, f_snow_ai , & + f_rain, f_rain_ai , & + f_sst, f_sss , & + f_uocn, f_vocn , & + f_sice, f_frzmlt , & + f_fswfac, f_fswint_ai, & + f_fswabs, f_fswabs_ai, & + f_albsni , & + f_alvdr, f_alidr , & + f_alvdf, f_alidf , & + f_albice, f_albsno , & + f_albpnd, f_coszen , & + f_flat, f_flat_ai , & + f_fsens, f_fsens_ai , & + f_flwup, f_flwup_ai , & + f_evap, f_evap_ai , & + f_Tair , & + f_Tref, f_Qref , & + f_congel, f_frazil , & + f_snoice, f_dsnow , & + f_meltt, f_melts , & + f_meltb, f_meltl , & + f_fresh, f_fresh_ai , & + f_fsalt, f_fsalt_ai , & + f_fhocn, f_fhocn_ai , & + f_fswthru, f_fswthru_ai,& + f_strairx, f_strairy , & + f_strtltx, f_strtlty , & + f_strcorx, f_strcory , & + f_strocnx, f_strocny , & + f_strintx, f_strinty , & + f_strength, & + f_divu, f_shear , & + f_sig1, f_sig2 , & + f_dvidtt, f_dvidtd , & + f_dvsdtt, f_dvsdtd , & + f_daidtt, f_daidtd , & + f_dagedtt, f_dagedtd , & + f_mlt_onset, f_frz_onset, & + f_iage, f_FY , & + f_hisnap, f_aisnap , & + f_aicen, f_vicen , & + f_vsnon, & + f_trsig, f_icepresent,& + f_fsurf_ai, f_fcondtop_ai,& + f_fmeltt_ai, & + f_fsurfn_ai,f_fcondtopn_ai,& + f_fmelttn_ai,f_flatn_ai, & + f_fsensn_ai, & +! f_field3dz, & + f_Tn_top, f_keffn_top, & + f_Tinz, f_Sinz, & + f_Tsnz, & + f_a11, f_a12 , & + f_e11, f_e12 , & + f_e22 , & + f_s11, f_s12 , & + f_s22 , & + f_yieldstress11 , & + f_yieldstress12 , & + f_yieldstress22 + + !--------------------------------------------------------------- + ! field indices + !--------------------------------------------------------------- + + integer (kind=int_kind), parameter, public :: & + n_tmask = 1, & + n_blkmask = 2, & + n_tarea = 3, & + n_uarea = 4, & + n_dxt = 5, & + n_dyt = 6, & + n_dxu = 7, & + n_dyu = 8, & + n_HTN = 9, & + n_HTE = 10, & + n_ANGLE = 11, & + n_ANGLET = 12, & + + n_NCAT = 1, & + n_VGRDi = 2, & + n_VGRDs = 3, & + n_VGRDb = 4, & + + n_lont_bnds = 1, & + n_latt_bnds = 2, & + n_lonu_bnds = 3, & + n_latu_bnds = 4 + + integer (kind=int_kind), dimension(max_nstrm), public :: & +! n_example , & + n_hi , n_hs , & + n_Tsfc , n_aice , & + n_uvel , n_vvel , & + n_uatm , n_vatm , & + n_sice , & + n_fswdn , n_flwdn , & + n_snow , n_snow_ai , & + n_rain , n_rain_ai , & + n_sst , n_sss , & + n_uocn , n_vocn , & + n_frzmlt , n_fswfac , & + n_fswint_ai, & + n_fswabs , n_fswabs_ai , & + n_albsni , & + n_alvdr , n_alidr , & + n_alvdf , n_alidf , & + n_albice , n_albsno , & + n_albpnd , n_coszen , & + n_flat , n_flat_ai , & + n_fsens , n_fsens_ai , & + n_flwup , n_flwup_ai , & + n_evap , n_evap_ai , & + n_Tair , & + n_Tref , n_Qref , & + n_congel , n_frazil , & + n_snoice , n_dsnow , & + n_meltt , n_melts , & + n_meltb , n_meltl , & + n_fresh , n_fresh_ai , & + n_fsalt , n_fsalt_ai , & + n_vsnon, & + n_fhocn , n_fhocn_ai , & + n_fswthru , n_fswthru_ai , & + n_strairx , n_strairy , & + n_strtltx , n_strtlty , & + n_strcorx , n_strcory , & + n_strocnx , n_strocny , & + n_strintx , n_strinty , & + n_strength , & + n_divu , n_shear , & + n_sig1 , n_sig2 , & + n_dvidtt , n_dvidtd , & + n_dvsdtt , n_dvsdtd , & + n_daidtt , n_daidtd , & + n_dagedtt , n_dagedtd , & + n_mlt_onset , n_frz_onset , & + n_hisnap , n_aisnap , & + n_trsig , n_icepresent , & + n_iage , n_FY , & + n_fsurf_ai , & + n_fcondtop_ai, n_fmeltt_ai , & + n_aicen , n_vicen , & + n_fsurfn_ai , & + n_fcondtopn_ai, & + n_fmelttn_ai , & + n_flatn_ai , & + n_fsensn_ai , & +! n_field3dz , & + n_Tn_top , & + n_keffn_top , & + n_Tinz , n_Sinz , & + n_Tsnz, & + n_a11 , n_a12 , & + n_e11 , n_e12 , & + n_e22 , & + n_s11 , n_s12 , & + n_s22 , & + n_yieldstress11, n_yieldstress12, & + n_yieldstress22 + + interface accum_hist_field ! generic interface + module procedure accum_hist_field_2D, & + accum_hist_field_3D, & + accum_hist_field_4D + end interface + +!======================================================================= + + contains + +!======================================================================= + + subroutine construct_filename(ncfile,suffix,ns) + + use ice_calendar, only: time, sec, nyr, month, daymo, & + mday, write_ic, histfreq, histfreq_n, & + year_init, new_year, new_month, new_day, & + dt + use ice_restart_shared, only: lenstr + + character (char_len_long), intent(inout) :: ncfile + character (len=2), intent(in) :: suffix + integer (kind=int_kind), intent(in) :: ns + + integer (kind=int_kind) :: iyear, imonth, iday, isec + + iyear = nyr + year_init - 1 ! set year_init=1 in ice_in to get iyear=nyr + imonth = month + iday = mday + isec = sec - dt + +#ifdef CCSMCOUPLED + if (write_ic) isec = sec +#endif + ! construct filename + if (write_ic) then + write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & + incond_file(1:lenstr(incond_file)),'.',iyear,'-', & + imonth,'-',iday,'-',isec,'.',suffix + else + + if (hist_avg .and. histfreq(ns) /= '1') then + if (histfreq(ns) == 'h'.or.histfreq(ns) == 'H') then + ! do nothing + elseif (new_year) then + iyear = iyear - 1 + imonth = 12 + iday = daymo(imonth) + elseif (new_month) then + imonth = month - 1 + iday = daymo(imonth) + elseif (new_day) then + iday = iday - 1 + endif + endif + + if (histfreq(ns) == '1') then ! instantaneous, write every dt + write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & + history_file(1:lenstr(history_file)),'_inst.', & + iyear,'-',imonth,'-',iday,'-',sec,'.',suffix + + elseif (hist_avg) then ! write averaged data + + if (histfreq(ns) == 'd'.or.histfreq(ns) == 'D') then ! daily + write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,a)') & + history_file(1:lenstr(history_file)), & + '.',iyear,'-',imonth,'-',iday,'.',suffix + elseif (histfreq(ns) == 'h'.or.histfreq(ns) == 'H') then ! hourly + write(ncfile,'(a,a,i2.2,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & + history_file(1:lenstr(history_file)),'_',histfreq_n(ns),'h.', & + iyear,'-',imonth,'-',iday,'-',sec,'.',suffix + elseif (histfreq(ns) == 'm'.or.histfreq(ns) == 'M') then ! monthly + write(ncfile,'(a,a,i4.4,a,i2.2,a,a)') & + history_file(1:lenstr(history_file)),'.', & + iyear,'-',imonth,'.',suffix + elseif (histfreq(ns) == 'y'.or.histfreq(ns) == 'Y') then ! yearly + write(ncfile,'(a,a,i4.4,a,a)') & + history_file(1:lenstr(history_file)),'.', iyear,'.',suffix + endif + + else ! instantaneous with histfreq > dt + write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & + history_file(1:lenstr(history_file)),'_inst.', & + iyear,'-',imonth,'-',iday,'-',sec,'.',suffix + endif + endif + + end subroutine construct_filename + +!======================================================================= + +! Initializes description of an available field and returns location +! in the available fields array for use in later calls. +! +! 2009 Created by D. Bailey following POP + + subroutine define_hist_field(id, vname, vunit, vcoord, vcellmeas, & + vdesc, vcomment, cona, conb, & + ns, vhistfreq) + + use ice_calendar, only: histfreq, histfreq_n, nstreams + use ice_domain_size, only: max_nstrm + use ice_exit, only: abort_ice + + integer (int_kind), dimension(max_nstrm), intent(out) :: & + id ! location in avail_fields array for use in + ! later routines + + character (len=*), intent(in) :: & + vname , & ! variable names + vunit , & ! variable units + vcoord , & ! variable coordinates + vcellmeas , & ! variables cell measures + vdesc , & ! variable descriptions + vcomment ! variable comments + + real (kind=dbl_kind), intent(in) :: & + cona , & ! multiplicative conversion factor + conb ! additive conversion factor + + character (len=*), intent(in) :: & + vhistfreq ! history frequency + + integer (kind=int_kind), intent(in) :: & + ns ! history file stream index + + integer (kind=int_kind) :: & + ns1 , & ! variable stream loop index + lenf ! length of namelist string + + character (len=40) :: stmp + + if (histfreq(ns) == 'x') then + call abort_ice("define_hist_fields has histfreq x") + endif + + if (ns == 1) id(:) = 0 + lenf = len(trim(vhistfreq)) + + do ns1 = 1, lenf + if (vhistfreq(ns1:ns1) == histfreq(ns)) then + + num_avail_hist_fields_tot = num_avail_hist_fields_tot + 1 + + if (vcoord(11:14) == 'time') then + num_avail_hist_fields_2D = num_avail_hist_fields_2D + 1 + elseif (vcoord(11:14) == 'NCAT' .and. vcoord(16:19) == 'time') then + num_avail_hist_fields_3Dc = num_avail_hist_fields_3Dc + 1 + elseif (vcoord(11:15) == 'VGRDi' .and. vcoord(17:20) == 'time') then + num_avail_hist_fields_3Dz = num_avail_hist_fields_3Dz + 1 + elseif (vcoord(11:15) == 'VGRDb' .and. vcoord(17:20) == 'time') then + num_avail_hist_fields_3Db = num_avail_hist_fields_3Db + 1 + elseif (vcoord(11:15) == 'VGRDi' .and. vcoord(17:20) == 'NCAT') then + num_avail_hist_fields_4Di = num_avail_hist_fields_4Di + 1 + elseif (vcoord(11:15) == 'VGRDs' .and. vcoord(17:20) == 'NCAT') then + num_avail_hist_fields_4Ds = num_avail_hist_fields_4Ds + 1 + elseif (vcoord(11:15) == 'VGRDb' .and. vcoord(17:20) == 'NCAT') then + num_avail_hist_fields_4Db = num_avail_hist_fields_4Db + 1 + endif + + if (num_avail_hist_fields_tot > max_avail_hist_fields) & + call abort_ice("Need to increase max_avail_hist_fields") + + if (num_avail_hist_fields_tot /= & + num_avail_hist_fields_2D + & + num_avail_hist_fields_3Dc + & + num_avail_hist_fields_3Dz + & + num_avail_hist_fields_3Db + & + num_avail_hist_fields_4Di + & + num_avail_hist_fields_4Ds + & + num_avail_hist_fields_4Db) & + call abort_ice("num_avail_hist_fields error") + + id(ns) = num_avail_hist_fields_tot + + stmp = vname + if (ns > 1) & + write(stmp,'(a,a1,a1)') trim(stmp),'_',vhistfreq(ns1:ns1) + + avail_hist_fields(id(ns))%vname = trim(stmp) + avail_hist_fields(id(ns))%vunit = trim(vunit) + avail_hist_fields(id(ns))%vcoord = trim(vcoord) + avail_hist_fields(id(ns))%vcellmeas = trim(vcellmeas) + avail_hist_fields(id(ns))%vdesc = trim(vdesc) + avail_hist_fields(id(ns))%vcomment = trim(vcomment) + avail_hist_fields(id(ns))%cona = cona + avail_hist_fields(id(ns))%conb = conb + avail_hist_fields(id(ns))%vhistfreq = vhistfreq(ns1:ns1) + avail_hist_fields(id(ns))%vhistfreq_n = histfreq_n(ns) + + endif + enddo + + end subroutine define_hist_field + +!======================================================================= + +! Accumulates a history field +! +! 2009 Created by D. Bailey following POP +! 2010 Generalized dimension of variables by N. Jeffery, E. Hunke + + subroutine accum_hist_field_2D(id, iblk, field_accum, field) + + use ice_blocks, only: block, get_block + use ice_calendar, only: nstreams + use ice_domain, only: blocks_ice + use ice_domain_size, only: max_nstrm + use ice_grid, only: tmask +#ifdef AusCOM + use ice_grid, only: umask +#endif + + integer (int_kind), dimension(max_nstrm), intent(in) :: & + id ! location in avail_fields array for use in + ! later routines + + integer (kind=int_kind), intent(in) :: iblk + + real (kind=dbl_kind), intent(in) :: & + field_accum(:,:) + + real (kind=dbl_kind), intent(inout) :: & + field(:,:,:,:) + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: i,j, ilo, ihi, jlo, jhi, ns, idns + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + do ns = 1, nstreams + idns = id(ns) + if (idns > 0) then + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi +#ifdef AusCOM + if (idns==n_uocn(ns).or.idns==n_vocn(ns)) then + if (umask(i,j,iblk)) then + field(i,j,idns, iblk) = field(i,j,idns, iblk) + field_accum(i,j) + endif + else + if (tmask(i,j,iblk)) then + field(i,j,idns, iblk) = field(i,j,idns, iblk) + field_accum(i,j) + endif + endif +#else + if (tmask(i,j,iblk)) then + field(i,j,idns, iblk) = field(i,j,idns, iblk) + field_accum(i,j) + endif +#endif + enddo + enddo + + endif + enddo + + end subroutine accum_hist_field_2D + +!======================================================================= + +! Accumulates a history field +! +! 2009 Created by D. Bailey following POP +! 2010 Generalized dimension of variables by N. Jeffery, E. Hunke + + subroutine accum_hist_field_3D(id, iblk, ndim, field_accum, field) + + use ice_blocks, only: block, get_block + use ice_calendar, only: nstreams + use ice_domain, only: blocks_ice + use ice_domain_size, only: max_nstrm + use ice_grid, only: tmask + + integer (int_kind), dimension(max_nstrm), intent(in) :: & + id ! location in avail_fields array for use in + ! later routines + + integer (kind=int_kind), intent(in) :: iblk + + integer (kind=int_kind), intent(in) :: & + ndim ! third dimension size + + real (kind=dbl_kind), intent(in) :: & + field_accum(:,:,:) + + real (kind=dbl_kind), intent(inout) :: & + field(:,:,:,:,:) + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: i,j,k, ilo, ihi, jlo, jhi, ns, idns + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + do ns = 1, nstreams + idns = id(ns) + if (idns > 0) then + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do k = 1, ndim + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + field(i,j,k,idns,iblk) = field(i,j,k,idns,iblk) + field_accum(i,j,k) + endif + enddo + enddo + enddo + + endif + enddo + + end subroutine accum_hist_field_3D + +!======================================================================= + +! Accumulates a history field +! +! 2009 Created by D. Bailey following POP +! 2010 Generalized dimension of variables by N. Jeffery, E. Hunke + + subroutine accum_hist_field_4D(id, iblk, ndim3, ndim4, field_accum, field) + + use ice_blocks, only: block, get_block + use ice_calendar, only: nstreams + use ice_domain, only: blocks_ice + use ice_domain_size, only: max_nstrm + use ice_grid, only: tmask + + integer (int_kind), dimension(max_nstrm), intent(in) :: & + id ! location in avail_fields array for use in + ! later routines + + integer (kind=int_kind), intent(in) :: iblk + + integer (kind=int_kind), intent(in) :: & + ndim3 , & ! third dimension size + ndim4 ! fourth dimension size + + real (kind=dbl_kind), intent(in) :: & + field_accum(:,:,:,:) + + real (kind=dbl_kind), intent(inout) :: & + field(:,:,:,:,:,:) + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: i,j,k,n,ilo, ihi, jlo, jhi, ns, idns + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + do ns = 1, nstreams + idns = id(ns) + if (idns > 0) then + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do k = 1, ndim4 + do n = 1, ndim3 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + field(i,j,n,k,idns,iblk) = field(i,j,n,k,idns,iblk) + field_accum(i,j,n,k) + endif + enddo + enddo + enddo + enddo + + endif + enddo + + end subroutine accum_hist_field_4D + +!======================================================================= + + end module ice_history_shared + +!======================================================================= diff --git a/source/ice_history_shared.F90_spo b/source/ice_history_shared.F90_spo new file mode 100755 index 00000000..bce69b31 --- /dev/null +++ b/source/ice_history_shared.F90_spo @@ -0,0 +1,899 @@ +! SVN:$Id: ice_history_shared.F90 843 2014-10-02 19:54:30Z eclare $ +!======================================================================= +! +! Output files: netCDF or binary data, Fortran unformatted dumps +! +! The following variables are currently hard-wired as snapshots +! (instantaneous rather than time-averages): +! divu, shear, sig1, sig2, trsig, mlt_onset, frz_onset, hisnap, aisnap +! +! Options for histfreq: '1','h','d','m','y','x', where x means that +! output stream will not be used (recommended for efficiency). +! histfreq_n can be any nonnegative integer, where 0 means that the +! corresponding histfreq frequency will not be used. +! The flags (f_) can be set to '1','h','d','m','y' or 'x', where +! n means the field will not be written. To output the same field at +! more than one frequency, for instance monthy and daily, set +! f_ = 'md'. +! +! authors Tony Craig and Bruce Briegleb, NCAR +! Elizabeth C. Hunke and William H. Lipscomb, LANL +! C. M. Bitz, UW +! +! 2012 Elizabeth Hunke split code from ice_history.F90 + + module ice_history_shared + + use ice_kinds_mod + use ice_fileunits, only: nu_diag + use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, max_nstrm + + implicit none + save + + private + public :: define_hist_field, accum_hist_field, icefields_nml, construct_filename +!BX: + public :: accum_hist_field_3D3D +! + logical (kind=log_kind), public :: & + hist_avg ! if true, write averaged data instead of snapshots + + character (len=char_len), public :: & + history_file , & ! output file for history + incond_file ! output file for snapshot initial conditions + + character (len=char_len_long), public :: & + history_dir , & ! directory name for history file + incond_dir ! directory for snapshot initial conditions + + character (len=char_len_long), public :: & + pointer_file ! input pointer file for restarts + + !--------------------------------------------------------------- + ! Instructions for adding a field: (search for 'example') + ! Here: + ! (1) Add to frequency flags (f_) + ! (2) Add to namelist (here and also in ice_in) + ! (3) Add to index list + ! In init_hist: + ! (4) Add define_hist_field call with vname, vdesc, vunit, + ! and vcomment, vcellmeas, and conversion factor if necessary. + ! (5) Add flag to broadcast list + ! (6) Add accum_hist_field call with appropriate variable + !--------------------------------------------------------------- + + type, public :: ice_hist_field + character (len=16) :: vname ! variable name + character (len=16) :: vunit ! variable units + character (len=25) :: vcoord ! variable coordinates + character (len=16) :: vcellmeas ! variable cell measures + character (len=55) :: vdesc ! variable description + character (len=55) :: vcomment ! variable description + real (kind=dbl_kind) :: cona ! multiplicative conversion factor + real (kind=dbl_kind) :: conb ! additive conversion factor + character (len=1) :: vhistfreq ! frequency of history output + integer (kind=int_kind) :: vhistfreq_n ! number of vhistfreq intervals + end type + + integer (kind=int_kind), parameter, public :: & + max_avail_hist_fields = 600 ! Max number of history fields + + integer (kind=int_kind), public :: & + num_avail_hist_fields_tot = 0, & ! Current, total number of defined fields + num_avail_hist_fields_2D = 0, & ! Number of 2D fields + num_avail_hist_fields_3Dz = 0, & ! Number of 3D fields (vertical) + num_avail_hist_fields_3Dc = 0, & ! Number of 3D fields (categories) + num_avail_hist_fields_3Db = 0, & ! Number of 3D fields (vertical biology) + num_avail_hist_fields_4Di = 0, & ! Number of 4D fields (categories,vertical), ice + num_avail_hist_fields_4Ds = 0, & ! Number of 4D fields (categories,vertical), snow + num_avail_hist_fields_4Db = 0 ! Number of 4D fields (categories,vertical), ice-biology + + integer (kind=int_kind), public :: & ! cumulative counts + n2D , & ! num_avail_hist_fields_2D + n3Dccum , & ! n2D + num_avail_hist_fields_3Dc + n3Dzcum , & ! n3Dccum + num_avail_hist_fields_3Dz + n3Dbcum , & ! n3Dzcum + num_avail_hist_fields_3Db + n4Dicum , & ! n3Dbcum + num_avail_hist_fields_4Di + n4Dscum , & ! n4Dicum + num_avail_hist_fields_4Ds + n4Dbcum , & ! n4Dscum + num_avail_hist_fields_4Db + nzlyr , & ! vertical dimension (temp variable) + nzlyrb ! vertical dimension of biology grid (temp variable) + + ! for now, ice and snow have same dimensions in netcdf + ! could set nzilyr = nilyr + nslyr and write Tin+Tsn together into Tinz + integer (kind=int_kind), parameter, public :: & + nzilyr = nilyr, & ! vertical dimension (allows alternative grids) + nzslyr = nslyr, & + nzblyr = nblyr+2 + + type (ice_hist_field), dimension(max_avail_hist_fields), public :: & + avail_hist_fields + + integer (kind=int_kind), parameter, public :: & + nvar = 12 , & ! number of grid fields that can be written + ! excluding grid vertices + nvarz = 4 , & ! number of category/vertical grid fields written + ncat_hist = ncat ! number of ice categories written <= ncat + + real (kind=real_kind), public :: time_beg(max_nstrm), & ! bounds for averaging + time_end(max_nstrm), & + time_bounds(2) + + real (kind=dbl_kind), allocatable, public :: & + a2D (:,:,:,:) , & ! field accumulations/averages, 2D + a3Dz(:,:,:,:,:) , & ! field accumulations/averages, 3D vertical + a3Db(:,:,:,:,:) , & ! field accumulations/averages, 3D vertical biology + a3Dc(:,:,:,:,:) , & ! field accumulations/averages, 3D categories + a4Di(:,:,:,:,:,:), & ! field accumulations/averages, 4D categories,vertical, ice + a4Ds(:,:,:,:,:,:), & ! field accumulations/averages, 4D categories,vertical, snow + a4Db(:,:,:,:,:,:) ! field accumulations/averages, 4D categories,vertical, bio + + real (kind=dbl_kind), allocatable, public :: & + Tinz4d (:,:,:,:) , & ! array for Tin + Tsnz4d (:,:,:,:) , & ! array for Tsn + Sinz4d (:,:,:,:) ! array for Sin + + real (kind=dbl_kind), public :: & + avgct(max_nstrm) ! average sample counter + + logical (kind=log_kind), public :: & + igrd (nvar), & ! true if grid field is written to output file + igrdz(nvarz) ! true if category/vertical grid field is written + + character (len=25), public, parameter :: & + tcstr = 'area: tarea' , & ! vcellmeas for T cell quantities + ucstr = 'area: uarea' , & ! vcellmeas for U cell quantities + tstr2D = 'TLON TLAT time' , & ! vcoord for T cell quantities, 2D + ustr2D = 'ULON ULAT time' , & ! vcoord for U cell quantities, 2D + tstr3Dz = 'TLON TLAT VGRD time', & ! vcoord for T cell quantities, 3D + ustr3Dz = 'ULON ULAT VGRD time', & ! vcoord for U cell quantities, 3D + tstr3Dc = 'TLON TLAT NCAT time', & ! vcoord for T cell quantities, 3D + ustr3Dc = 'ULON ULAT NCAT time', & ! vcoord for U cell quantities, 3D + tstr3Db = 'TLON TLAT VGRDb time', & ! vcoord for T cell quantities, 3D + ustr3Db = 'ULON ULAT VGRDb time', & ! vcoord for U cell quantities, 3D + +!ferret + tstr4Di = 'TLON TLAT VGRDi NCAT', & ! vcoord for T cell, 4D, ice + ustr4Di = 'ULON ULAT VGRDi NCAT', & ! vcoord for U cell, 4D, ice + tstr4Ds = 'TLON TLAT VGRDs NCAT', & ! vcoord for T cell, 4D, snow + ustr4Ds = 'ULON ULAT VGRDs NCAT', & ! vcoord for U cell, 4D, snow + tstr4Db = 'TLON TLAT VGRDb NCAT', & ! vcoord for T cell, 4D, bio + ustr4Db = 'ULON ULAT VGRDb NCAT' ! vcoord for U cell, 4D, bio +!ferret +! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time +! ustr4Di = 'ULON ULAT VGRDi NCAT time', & ! index on 4D variables. +! tstr4Ds = 'TLON TLAT VGRDs NCAT time', & ! Use 'ferret' lines instead +! ustr4Ds = 'ULON ULAT VGRDs NCAT time', & ! (below also) +! tstr4Db = 'TLON TLAT VGRDb NCAT time', & +! ustr4Db = 'ULON ULAT VGRDb NCAT time' + + !--------------------------------------------------------------- + ! flags: write to output file if true or histfreq value + !--------------------------------------------------------------- + + logical (kind=log_kind), public :: & + f_tmask = .true., f_blkmask = .true., & + f_tarea = .true., f_uarea = .true., & + f_dxt = .true., f_dyt = .true., & + f_dxu = .true., f_dyu = .true., & + f_HTN = .true., f_HTE = .true., & + f_ANGLE = .true., f_ANGLET = .true., & + f_bounds = .true., f_NCAT = .true., & + f_VGRDi = .true., f_VGRDs = .true., & + f_VGRDb = .true. + + character (len=max_nstrm), public :: & +! f_example = 'md', & + f_hi = 'm', f_hs = 'm', & + f_Tsfc = 'm', f_aice = 'm', & + f_uvel = 'm', f_vvel = 'm', & + f_uatm = 'm', f_vatm = 'm', & + f_fswdn = 'm', f_flwdn = 'm', & + f_snow = 'm', f_snow_ai = 'm', & + f_rain = 'm', f_rain_ai = 'm', & + f_sst = 'm', f_sss = 'm', & + f_uocn = 'm', f_vocn = 'm', & + f_sice = 'm', f_frzmlt = 'm', & + f_fswfac = 'm', f_fswint_ai = 'x', & + f_fswabs = 'm', f_fswabs_ai = 'm', & + f_albsni = 'm', & + f_alvdr = 'm', f_alidr = 'm', & + f_alvdf = 'm', f_alidf = 'm', & + f_albice = 'm', f_albsno = 'm', & + f_albpnd = 'm', f_coszen = 'm', & + f_flat = 'm', f_flat_ai = 'm', & + f_fsens = 'm', f_fsens_ai = 'm', & + f_flwup = 'm', f_flwup_ai = 'm', & + f_evap = 'm', f_evap_ai = 'm', & + f_Tair = 'm', & + f_Tref = 'm', f_Qref = 'm', & + f_congel = 'm', f_frazil = 'm', & + f_snoice = 'm', f_dsnow = 'm', & + f_meltt = 'm', f_melts = 'm', & + f_meltb = 'm', f_meltl = 'm', & + f_fresh = 'm', f_fresh_ai = 'm', & + f_fsalt = 'm', f_fsalt_ai = 'm', & + f_fhocn = 'm', f_fhocn_ai = 'm', & + f_fswthru = 'm', f_fswthru_ai = 'm', & + f_strairx = 'm', f_strairy = 'm', & + f_strtltx = 'm', f_strtlty = 'm', & + f_strcorx = 'm', f_strcory = 'm', & + f_strocnx = 'm', f_strocny = 'm', & + f_strintx = 'm', f_strinty = 'm', & + f_strength = 'm', & + f_divu = 'm', f_shear = 'm', & + f_sig1 = 'm', f_sig2 = 'm', & + f_dvidtt = 'm', f_dvidtd = 'm', & + f_dvsdtt = 'm', f_dvsdtd = 'm', & + f_daidtt = 'm', f_daidtd = 'm', & + f_dagedtt = 'm', f_dagedtd = 'm', & + f_mlt_onset = 'm', f_frz_onset = 'm', & + f_iage = 'm', f_FY = 'm', & + f_hisnap = 'm', f_aisnap = 'm', & + f_aicen = 'x', f_vicen = 'x', & + f_vsnon = 'x', & + f_trsig = 'm', f_icepresent = 'm', & + f_fsurf_ai = 'm', f_fcondtop_ai= 'm', & + f_fmeltt_ai = 'm', & + f_fsurfn_ai = 'x' ,f_fcondtopn_ai='x', & + f_fmelttn_ai= 'x', f_flatn_ai = 'x', & + f_fsensn_ai = 'x', & +! f_field3dz = 'x', & + f_Tn_top = 'm', f_keffn_top = 'm', & + f_Tinz = 'x', f_Sinz = 'x', & + f_Tsnz = 'x', & + f_a11 = 'x', f_a12 = 'x', & + f_e11 = 'x', f_e12 = 'x', & + f_e22 = 'x', & + f_s11 = 'x', f_s12 = 'x', & + f_s22 = 'x', & + f_yieldstress11 = 'x', & + f_yieldstress12 = 'x', & + f_yieldstress22 = 'x' + + !--------------------------------------------------------------- + ! namelist variables + !--------------------------------------------------------------- + + namelist / icefields_nml / & + f_tmask , f_blkmask , & + f_tarea , f_uarea , & + f_dxt , f_dyt , & + f_dxu , f_dyu , & + f_HTN , f_HTE , & + f_ANGLE , f_ANGLET , & + f_bounds , f_NCAT , & + f_VGRDi , f_VGRDs , & + f_VGRDb , & +! f_example , & + f_hi, f_hs , & + f_Tsfc, f_aice , & + f_uvel, f_vvel , & + f_uatm, f_vatm , & + f_fswdn, f_flwdn , & + f_snow, f_snow_ai , & + f_rain, f_rain_ai , & + f_sst, f_sss , & + f_uocn, f_vocn , & + f_sice, f_frzmlt , & + f_fswfac, f_fswint_ai, & + f_fswabs, f_fswabs_ai, & + f_albsni , & + f_alvdr, f_alidr , & + f_alvdf, f_alidf , & + f_albice, f_albsno , & + f_albpnd, f_coszen , & + f_flat, f_flat_ai , & + f_fsens, f_fsens_ai , & + f_flwup, f_flwup_ai , & + f_evap, f_evap_ai , & + f_Tair , & + f_Tref, f_Qref , & + f_congel, f_frazil , & + f_snoice, f_dsnow , & + f_meltt, f_melts , & + f_meltb, f_meltl , & + f_fresh, f_fresh_ai , & + f_fsalt, f_fsalt_ai , & + f_fhocn, f_fhocn_ai , & + f_fswthru, f_fswthru_ai,& + f_strairx, f_strairy , & + f_strtltx, f_strtlty , & + f_strcorx, f_strcory , & + f_strocnx, f_strocny , & + f_strintx, f_strinty , & + f_strength, & + f_divu, f_shear , & + f_sig1, f_sig2 , & + f_dvidtt, f_dvidtd , & + f_dvsdtt, f_dvsdtd , & + f_daidtt, f_daidtd , & + f_dagedtt, f_dagedtd , & + f_mlt_onset, f_frz_onset, & + f_iage, f_FY , & + f_hisnap, f_aisnap , & + f_aicen, f_vicen , & + f_vsnon, & + f_trsig, f_icepresent,& + f_fsurf_ai, f_fcondtop_ai,& + f_fmeltt_ai, & + f_fsurfn_ai,f_fcondtopn_ai,& + f_fmelttn_ai,f_flatn_ai, & + f_fsensn_ai, & +! f_field3dz, & + f_Tn_top, f_keffn_top, & + f_Tinz, f_Sinz, & + f_Tsnz, & + f_a11, f_a12 , & + f_e11, f_e12 , & + f_e22 , & + f_s11, f_s12 , & + f_s22 , & + f_yieldstress11 , & + f_yieldstress12 , & + f_yieldstress22 + + !--------------------------------------------------------------- + ! field indices + !--------------------------------------------------------------- + + integer (kind=int_kind), parameter, public :: & + n_tmask = 1, & + n_blkmask = 2, & + n_tarea = 3, & + n_uarea = 4, & + n_dxt = 5, & + n_dyt = 6, & + n_dxu = 7, & + n_dyu = 8, & + n_HTN = 9, & + n_HTE = 10, & + n_ANGLE = 11, & + n_ANGLET = 12, & + + n_NCAT = 1, & + n_VGRDi = 2, & + n_VGRDs = 3, & + n_VGRDb = 4, & + + n_lont_bnds = 1, & + n_latt_bnds = 2, & + n_lonu_bnds = 3, & + n_latu_bnds = 4 + + integer (kind=int_kind), dimension(max_nstrm), public :: & +! n_example , & + n_hi , n_hs , & + n_Tsfc , n_aice , & + n_uvel , n_vvel , & + n_uatm , n_vatm , & + n_sice , & + n_fswdn , n_flwdn , & + n_snow , n_snow_ai , & + n_rain , n_rain_ai , & + n_sst , n_sss , & + n_uocn , n_vocn , & + n_frzmlt , n_fswfac , & + n_fswint_ai, & + n_fswabs , n_fswabs_ai , & + n_albsni , & + n_alvdr , n_alidr , & + n_alvdf , n_alidf , & + n_albice , n_albsno , & + n_albpnd , n_coszen , & + n_flat , n_flat_ai , & + n_fsens , n_fsens_ai , & + n_flwup , n_flwup_ai , & + n_evap , n_evap_ai , & + n_Tair , & + n_Tref , n_Qref , & + n_congel , n_frazil , & + n_snoice , n_dsnow , & + n_meltt , n_melts , & + n_meltb , n_meltl , & + n_fresh , n_fresh_ai , & + n_fsalt , n_fsalt_ai , & + n_vsnon, & + n_fhocn , n_fhocn_ai , & + n_fswthru , n_fswthru_ai , & + n_strairx , n_strairy , & + n_strtltx , n_strtlty , & + n_strcorx , n_strcory , & + n_strocnx , n_strocny , & + n_strintx , n_strinty , & + n_strength , & + n_divu , n_shear , & + n_sig1 , n_sig2 , & + n_dvidtt , n_dvidtd , & + n_dvsdtt , n_dvsdtd , & + n_daidtt , n_daidtd , & + n_dagedtt , n_dagedtd , & + n_mlt_onset , n_frz_onset , & + n_hisnap , n_aisnap , & + n_trsig , n_icepresent , & + n_iage , n_FY , & + n_fsurf_ai , & + n_fcondtop_ai, n_fmeltt_ai , & + n_aicen , n_vicen , & + n_fsurfn_ai , & + n_fcondtopn_ai, & + n_fmelttn_ai , & + n_flatn_ai , & + n_fsensn_ai , & +! n_field3dz , & + n_Tn_top , & + n_keffn_top , & + n_Tinz , n_Sinz , & + n_Tsnz, & + n_a11 , n_a12 , & + n_e11 , n_e12 , & + n_e22 , & + n_s11 , n_s12 , & + n_s22 , & + n_yieldstress11, n_yieldstress12, & + n_yieldstress22 + + interface accum_hist_field ! generic interface + module procedure accum_hist_field_2D, & + accum_hist_field_3D, & + accum_hist_field_4D + end interface + +!======================================================================= + + contains + +!======================================================================= + + subroutine construct_filename(ncfile,suffix,ns) + + use ice_calendar, only: time, sec, nyr, month, daymo, & + mday, write_ic, histfreq, histfreq_n, & + year_init, new_year, new_month, new_day, & + dt + use ice_restart_shared, only: lenstr + + character (char_len_long), intent(inout) :: ncfile + character (len=2), intent(in) :: suffix + integer (kind=int_kind), intent(in) :: ns + + integer (kind=int_kind) :: iyear, imonth, iday, isec + + iyear = nyr + year_init - 1 ! set year_init=1 in ice_in to get iyear=nyr + imonth = month + iday = mday + isec = sec - dt + +#ifdef CCSMCOUPLED + if (write_ic) isec = sec +#endif + ! construct filename + if (write_ic) then + write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & + incond_file(1:lenstr(incond_file)),'.',iyear,'-', & + imonth,'-',iday,'-',isec,'.',suffix + else + + if (hist_avg .and. histfreq(ns) /= '1') then + if (histfreq(ns) == 'h'.or.histfreq(ns) == 'H') then + ! do nothing + elseif (new_year) then + iyear = iyear - 1 + imonth = 12 + iday = daymo(imonth) + elseif (new_month) then + imonth = month - 1 + iday = daymo(imonth) + elseif (new_day) then + iday = iday - 1 + endif + endif + + if (histfreq(ns) == '1') then ! instantaneous, write every dt + write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & + history_file(1:lenstr(history_file)),'_inst.', & + iyear,'-',imonth,'-',iday,'-',sec,'.',suffix + + elseif (hist_avg) then ! write averaged data + + if (histfreq(ns) == 'd'.or.histfreq(ns) == 'D') then ! daily + write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,a)') & + history_file(1:lenstr(history_file)), & + '.',iyear,'-',imonth,'-',iday,'.',suffix + elseif (histfreq(ns) == 'h'.or.histfreq(ns) == 'H') then ! hourly + write(ncfile,'(a,a,i2.2,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & + history_file(1:lenstr(history_file)),'_',histfreq_n(ns),'h.', & + iyear,'-',imonth,'-',iday,'-',sec,'.',suffix + elseif (histfreq(ns) == 'm'.or.histfreq(ns) == 'M') then ! monthly + write(ncfile,'(a,a,i4.4,a,i2.2,a,a)') & + history_file(1:lenstr(history_file)),'.', & + iyear,'-',imonth,'.',suffix + elseif (histfreq(ns) == 'y'.or.histfreq(ns) == 'Y') then ! yearly + write(ncfile,'(a,a,i4.4,a,a)') & + history_file(1:lenstr(history_file)),'.', iyear,'.',suffix + endif + + else ! instantaneous with histfreq > dt + write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & + history_file(1:lenstr(history_file)),'_inst.', & + iyear,'-',imonth,'-',iday,'-',sec,'.',suffix + endif + endif + + end subroutine construct_filename + +!======================================================================= + +! Initializes description of an available field and returns location +! in the available fields array for use in later calls. +! +! 2009 Created by D. Bailey following POP + + subroutine define_hist_field(id, vname, vunit, vcoord, vcellmeas, & + vdesc, vcomment, cona, conb, & + ns, vhistfreq) + + use ice_calendar, only: histfreq, histfreq_n, nstreams + use ice_domain_size, only: max_nstrm + use ice_exit, only: abort_ice + + integer (int_kind), dimension(max_nstrm), intent(out) :: & + id ! location in avail_fields array for use in + ! later routines + + character (len=*), intent(in) :: & + vname , & ! variable names + vunit , & ! variable units + vcoord , & ! variable coordinates + vcellmeas , & ! variables cell measures + vdesc , & ! variable descriptions + vcomment ! variable comments + + real (kind=dbl_kind), intent(in) :: & + cona , & ! multiplicative conversion factor + conb ! additive conversion factor + + character (len=*), intent(in) :: & + vhistfreq ! history frequency + + integer (kind=int_kind), intent(in) :: & + ns ! history file stream index + + integer (kind=int_kind) :: & + ns1 , & ! variable stream loop index + lenf ! length of namelist string + + character (len=40) :: stmp + + if (histfreq(ns) == 'x') then + call abort_ice("define_hist_fields has histfreq x") + endif + + if (ns == 1) id(:) = 0 + lenf = len(trim(vhistfreq)) + + do ns1 = 1, lenf + if (vhistfreq(ns1:ns1) == histfreq(ns)) then + + num_avail_hist_fields_tot = num_avail_hist_fields_tot + 1 + + if (vcoord(11:14) == 'time') then + num_avail_hist_fields_2D = num_avail_hist_fields_2D + 1 + elseif (vcoord(11:14) == 'NCAT' .and. vcoord(16:19) == 'time') then + num_avail_hist_fields_3Dc = num_avail_hist_fields_3Dc + 1 + elseif (vcoord(11:15) == 'VGRDi' .and. vcoord(17:20) == 'time') then + num_avail_hist_fields_3Dz = num_avail_hist_fields_3Dz + 1 + elseif (vcoord(11:15) == 'VGRDb' .and. vcoord(17:20) == 'time') then + num_avail_hist_fields_3Db = num_avail_hist_fields_3Db + 1 + elseif (vcoord(11:15) == 'VGRDi' .and. vcoord(17:20) == 'NCAT') then + num_avail_hist_fields_4Di = num_avail_hist_fields_4Di + 1 + elseif (vcoord(11:15) == 'VGRDs' .and. vcoord(17:20) == 'NCAT') then + num_avail_hist_fields_4Ds = num_avail_hist_fields_4Ds + 1 + elseif (vcoord(11:15) == 'VGRDb' .and. vcoord(17:20) == 'NCAT') then + num_avail_hist_fields_4Db = num_avail_hist_fields_4Db + 1 + endif + + if (num_avail_hist_fields_tot > max_avail_hist_fields) & + call abort_ice("Need to increase max_avail_hist_fields") + + if (num_avail_hist_fields_tot /= & + num_avail_hist_fields_2D + & + num_avail_hist_fields_3Dc + & + num_avail_hist_fields_3Dz + & + num_avail_hist_fields_3Db + & + num_avail_hist_fields_4Di + & + num_avail_hist_fields_4Ds + & + num_avail_hist_fields_4Db) & + call abort_ice("num_avail_hist_fields error") + + id(ns) = num_avail_hist_fields_tot + + stmp = vname + if (ns > 1) & + write(stmp,'(a,a1,a1)') trim(stmp),'_',vhistfreq(ns1:ns1) + + avail_hist_fields(id(ns))%vname = trim(stmp) + avail_hist_fields(id(ns))%vunit = trim(vunit) + avail_hist_fields(id(ns))%vcoord = trim(vcoord) + avail_hist_fields(id(ns))%vcellmeas = trim(vcellmeas) + avail_hist_fields(id(ns))%vdesc = trim(vdesc) + avail_hist_fields(id(ns))%vcomment = trim(vcomment) + avail_hist_fields(id(ns))%cona = cona + avail_hist_fields(id(ns))%conb = conb + avail_hist_fields(id(ns))%vhistfreq = vhistfreq(ns1:ns1) + avail_hist_fields(id(ns))%vhistfreq_n = histfreq_n(ns) + + endif + enddo + + end subroutine define_hist_field + +!======================================================================= + +! Accumulates a history field +! +! 2009 Created by D. Bailey following POP +! 2010 Generalized dimension of variables by N. Jeffery, E. Hunke + + subroutine accum_hist_field_2D(id, iblk, field_accum, field) + + use ice_blocks, only: block, get_block + use ice_calendar, only: nstreams + use ice_domain, only: blocks_ice + use ice_domain_size, only: max_nstrm + use ice_grid, only: tmask +#ifdef AusCOM + use ice_grid, only: umask +#endif + + integer (int_kind), dimension(max_nstrm), intent(in) :: & + id ! location in avail_fields array for use in + ! later routines + + integer (kind=int_kind), intent(in) :: iblk + + real (kind=dbl_kind), intent(in) :: & + field_accum(:,:) + + real (kind=dbl_kind), intent(inout) :: & + field(:,:,:,:) + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: i,j, ilo, ihi, jlo, jhi, ns, idns + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + do ns = 1, nstreams + idns = id(ns) + if (idns > 0) then + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi +#ifdef AusCOM + if (idns==n_uocn(ns).or.idns==n_vocn(ns)) then + if (umask(i,j,iblk)) then + field(i,j,idns, iblk) = field(i,j,idns, iblk) + field_accum(i,j) + endif + else + if (tmask(i,j,iblk)) then + field(i,j,idns, iblk) = field(i,j,idns, iblk) + field_accum(i,j) + endif + endif +#else + if (tmask(i,j,iblk)) then + field(i,j,idns, iblk) = field(i,j,idns, iblk) + field_accum(i,j) + endif +#endif + enddo + enddo + + endif + enddo + + end subroutine accum_hist_field_2D + +!======================================================================= + +! Accumulates a history field +! +! 2009 Created by D. Bailey following POP +! 2010 Generalized dimension of variables by N. Jeffery, E. Hunke + + subroutine accum_hist_field_3D(id, iblk, ndim, field_accum, field) + + use ice_blocks, only: block, get_block + use ice_calendar, only: nstreams + use ice_domain, only: blocks_ice + use ice_domain_size, only: max_nstrm + use ice_grid, only: tmask + + integer (int_kind), dimension(max_nstrm), intent(in) :: & + id ! location in avail_fields array for use in + ! later routines + + integer (kind=int_kind), intent(in) :: iblk + + integer (kind=int_kind), intent(in) :: & + ndim ! third dimension size + + real (kind=dbl_kind), intent(in) :: & + field_accum(:,:,:) + + real (kind=dbl_kind), intent(inout) :: & + field(:,:,:,:,:) + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: i,j,k, ilo, ihi, jlo, jhi, ns, idns + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + do ns = 1, nstreams + idns = id(ns) + if (idns > 0) then + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do k = 1, ndim + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + field(i,j,k,idns,iblk) = field(i,j,k,idns,iblk) + field_accum(i,j,k) + endif + enddo + enddo + enddo + + endif + enddo + + end subroutine accum_hist_field_3D + +!BX: + subroutine accum_hist_field_3D3D(id, iblk, ndim, field_accum, field) + + use ice_blocks, only: block, get_block + use ice_calendar, only: nstreams + use ice_domain, only: blocks_ice + use ice_domain_size, only: max_nstrm + use ice_grid, only: tmask + + integer (int_kind), dimension(max_nstrm), intent(in) :: & + id ! location in avail_fields array for use in + ! later routines + + integer (kind=int_kind), intent(in) :: iblk + + integer (kind=int_kind), intent(in) :: & + ndim ! third dimension size + + real (kind=dbl_kind), intent(in) :: & + field_accum(:,:,:) + + real (kind=dbl_kind), intent(inout) :: & + field(:,:,:,:,:) + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: i,j,k, ilo, ihi, jlo, jhi, ns, idns + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + do ns = 1, nstreams + idns = id(ns) + if (idns > 0) then + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do k = 1, ndim + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + field(i,j,k,idns,iblk) = field(i,j,k,idns,iblk) + field_accum(i,j,k) + endif + enddo + enddo + enddo + + endif + enddo + + end subroutine accum_hist_field_3D3D + +!======================================================================= + +! Accumulates a history field +! +! 2009 Created by D. Bailey following POP +! 2010 Generalized dimension of variables by N. Jeffery, E. Hunke + + subroutine accum_hist_field_4D(id, iblk, ndim3, ndim4, field_accum, field) + + use ice_blocks, only: block, get_block + use ice_calendar, only: nstreams + use ice_domain, only: blocks_ice + use ice_domain_size, only: max_nstrm + use ice_grid, only: tmask + + integer (int_kind), dimension(max_nstrm), intent(in) :: & + id ! location in avail_fields array for use in + ! later routines + + integer (kind=int_kind), intent(in) :: iblk + + integer (kind=int_kind), intent(in) :: & + ndim3 , & ! third dimension size + ndim4 ! fourth dimension size + + real (kind=dbl_kind), intent(in) :: & + field_accum(:,:,:,:) + + real (kind=dbl_kind), intent(inout) :: & + field(:,:,:,:,:,:) + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: i,j,k,n,ilo, ihi, jlo, jhi, ns, idns + + !--------------------------------------------------------------- + ! increment field + !--------------------------------------------------------------- + + do ns = 1, nstreams + idns = id(ns) + if (idns > 0) then + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do k = 1, ndim4 + do n = 1, ndim3 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + field(i,j,n,k,idns,iblk) = field(i,j,n,k,idns,iblk) + field_accum(i,j,n,k) + endif + enddo + enddo + enddo + enddo + + endif + enddo + + end subroutine accum_hist_field_4D + +!======================================================================= + + end module ice_history_shared + +!======================================================================= diff --git a/source/ice_init.F90 b/source/ice_init.F90 new file mode 100755 index 00000000..71f99edd --- /dev/null +++ b/source/ice_init.F90 @@ -0,0 +1,1754 @@ +! SVN:$Id: ice_init.F90 843 2014-10-02 19:54:30Z eclare $ +!======================================================================= + +! parameter and variable initializations +! +! authors Elizabeth C. Hunke and William H. Lipscomb, LANL +! C. M. Bitz, UW +! +! 2004 WHL: Block structure added +! 2006 ECH: Added namelist variables, warnings. +! Replaced old default initial ice conditions with 3.14 version. +! Converted to free source form (F90). + + module ice_init + + use ice_kinds_mod + use ice_communicate, only: my_task, master_task + + implicit none + save + + character(len=char_len_long) :: & + ice_ic ! method of ice cover initialization + ! 'default' => latitude and sst dependent + ! 'none' => no ice + ! note: restart = .true. overwrites + +!======================================================================= + + contains + +!======================================================================= + +! Namelist variables, set to default values; may be altered +! at run time +! +! author Elizabeth C. Hunke, LANL + + subroutine input_data + + use ice_age, only: restart_age + use ice_broadcast, only: broadcast_scalar, broadcast_array +!ars599: 24042015 for the namelist variables + use ice_constants, only: c0, c1, puny, dragio, & + awtvdr, awtidr, awtvdf, awtidf, Tocnfrz + use ice_diagnostics, only: diag_file, print_global, print_points, latpnt, lonpnt + use ice_domain_size, only: max_nstrm, nilyr, nslyr, max_ntrcr, ncat, n_aero + use ice_fileunits, only: nu_nml, nu_diag, nml_filename, diag_type, & + ice_stdout, get_fileunit, release_fileunit, bfbflag + use ice_fileunits, only: inst_suffix + use ice_calendar, only: year_init, istep0, histfreq, histfreq_n, & + dumpfreq, dumpfreq_n, diagfreq, nstreams, & + npt, dt, ndtd, days_per_year, use_leap_years, & + write_ic, dump_last + use ice_restart_shared, only: & + restart, restart_ext, restart_dir, restart_file, pointer_file, & + runid, runtype, use_restart_time, restart_format, lcdf64 + use ice_history_shared, only: hist_avg, history_dir, history_file, & + incond_dir, incond_file + use ice_exit, only: abort_ice + use ice_itd, only: kitd, kcatbound + use ice_ocean, only: oceanmixed_ice, tfrz_option + use ice_firstyear, only: restart_FY + use ice_flux, only: update_ocn_f, l_mpond_fresh + use ice_forcing, only: & + ycycle, fyear_init, dbug, & + atm_data_type, atm_data_dir, precip_units, & + atm_data_format, ocn_data_format, & + sss_data_type, sst_data_type, ocn_data_dir, & + oceanmixed_file, restore_sst, trestore + use ice_grid, only: grid_file, gridcpl_file, kmt_file, grid_type, grid_format + use ice_lvl, only: restart_lvl + use ice_mechred, only: kstrength, krdg_partic, krdg_redist, mu_rdg, Cf + use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve + use ice_shortwave, only: albicev, albicei, albsnowv, albsnowi, ahmax, & + shortwave, albedo_type, R_ice, R_pnd, & + R_snw, dT_mlt, rsnw_mlt, kalg +!ars599: 24092014 (CODE: petteri) +!#if defined(AusCOM) || defined(ACCICE) +#ifdef AusCOM + ! AusCOM specific namelist parameters + use ice_dyn_shared, only: cosw, sinw + use ice_shortwave, only: snowpatch, dT_mlt, dalb_mlt + use ice_therm_vertical, only: chio +#endif + use ice_atmo, only: atmbndy, calc_strair, formdrag, highfreq, natmiter + use ice_transport_driver, only: advection + use ice_state, only: tr_iage, tr_FY, tr_lvl, tr_pond, & + tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_aero, & + nt_Tsfc, nt_qice, nt_qsno, nt_sice, nt_iage, nt_FY, & + nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, nt_aero, & + ntrcr + use ice_meltpond_cesm, only: restart_pond_cesm, hs0 + use ice_meltpond_topo, only: hp1, restart_pond_topo + use ice_meltpond_lvl, only: restart_pond_lvl, dpscale, frzpnd, & + rfracmin, rfracmax, pndaspect, hs1 + use ice_aerosol, only: restart_aero + use ice_therm_shared, only: ktherm, calc_Tsfc, conduct, cap_fluxes + use ice_therm_vertical, only: ustar_min, fbot_xfer_type, saltmax + use ice_therm_mushy, only: a_rapid_mode, Rac_rapid_mode, aspect_rapid_mode, & + dSdt_slow_mode, phi_c_slow_mode, & + phi_i_mushy + use ice_restoring, only: restore_ice +#ifdef CCSMCOUPLED + use shr_file_mod, only: shr_file_setIO +#endif + + ! local variables + + integer (kind=int_kind) :: & + nml_error, & ! namelist i/o error flag + n ! loop index + + character (len=6) :: chartmp + character (len=32) :: str + + logical :: exists + + real (kind=real_kind) :: rpcesm, rplvl, rptopo + + !----------------------------------------------------------------- + ! Namelist variables. + !----------------------------------------------------------------- + + namelist /setup_nml/ & + days_per_year, use_leap_years, year_init, istep0, & + dt, npt, ndtd, & + runtype, runid, bfbflag, & + ice_ic, restart, restart_dir, restart_file, & + restart_ext, use_restart_time, restart_format, lcdf64, & + pointer_file, dumpfreq, dumpfreq_n, dump_last, & + diagfreq, diag_type, diag_file, & + print_global, print_points, latpnt, lonpnt, & + dbug, histfreq, histfreq_n, hist_avg, & + history_dir, history_file, & + write_ic, incond_dir, incond_file + + namelist /grid_nml/ & + grid_format, grid_type, grid_file, kmt_file, & + kcatbound, gridcpl_file + + namelist /thermo_nml/ & + kitd, ktherm, conduct, & + a_rapid_mode, Rac_rapid_mode, aspect_rapid_mode, & +!ars599: 24092014 (CODE: petteri) +#ifdef AusCOM + chio, & +#endif + saltmax, dSdt_slow_mode, phi_c_slow_mode, phi_i_mushy + + namelist /dynamics_nml/ & + kdyn, ndte, revised_evp, yield_curve, & + advection, & +!ars599: 24092014 (CODE: petteri) +#ifdef AusCOM + cosw, sinw, dragio, & +#endif + kstrength, krdg_partic, krdg_redist, mu_rdg, & + Cf + + namelist /shortwave_nml/ & + shortwave, albedo_type, & + albicev, albicei, albsnowv, albsnowi, & +!ars599: 24092014 (CODE: petteri) +!#if defined(AusCOM) || defined(ACCICE) +#ifdef AusCOM + snowpatch, dalb_mlt, awtvdr, & + awtidr, awtvdf, awtidf, Tocnfrz, & +#endif + ahmax, R_ice, R_pnd, R_snw, & + dT_mlt, rsnw_mlt, kalg + + namelist /ponds_nml/ & + hs0, dpscale, frzpnd, & + rfracmin, rfracmax, pndaspect, hs1, & + hp1 + + namelist /forcing_nml/ & + atmbndy, fyear_init, ycycle, atm_data_format,& + atm_data_type, atm_data_dir, calc_strair, calc_Tsfc, & + precip_units, update_ocn_f, l_mpond_fresh, ustar_min, & + fbot_xfer_type, & + oceanmixed_ice, ocn_data_format, sss_data_type, sst_data_type, & + ocn_data_dir, oceanmixed_file, restore_sst, trestore, & + restore_ice, formdrag, highfreq, natmiter, & + tfrz_option, cap_fluxes + + namelist /tracer_nml/ & + tr_iage, restart_age, & + tr_FY, restart_FY, & + tr_lvl, restart_lvl, & + tr_pond_cesm, restart_pond_cesm, & + tr_pond_lvl, restart_pond_lvl, & + tr_pond_topo, restart_pond_topo, & + tr_aero, restart_aero + + !----------------------------------------------------------------- + ! default values + !----------------------------------------------------------------- + + days_per_year = 365 ! number of days in a year + use_leap_years= .false.! if true, use leap years (Feb 29) + year_init = 0 ! initial year + istep0 = 0 ! no. of steps taken in previous integrations, + ! real (dumped) or imagined (to set calendar) +#ifndef CCSMCOUPLED + dt = 3600.0_dbl_kind ! time step, s +#endif + npt = 99999 ! total number of time steps (dt) + diagfreq = 24 ! how often diag output is written + print_points = .false. ! if true, print point data + print_global = .true. ! if true, print global diagnostic data + bfbflag = .false. ! if true, do bit-for-bit computations + diag_type = 'stdout' + diag_file = 'ice_diag.d' + histfreq(1) = '1' ! output frequency option for different streams + histfreq(2) = 'h' ! output frequency option for different streams + histfreq(3) = 'd' ! output frequency option for different streams + histfreq(4) = 'm' ! output frequency option for different streams + histfreq(5) = 'y' ! output frequency option for different streams + histfreq_n(:) = 1 ! output frequency + hist_avg = .true. ! if true, write time-averages (not snapshots) + history_dir = './' ! write to executable dir for default + history_file = 'iceh' ! history file name prefix + write_ic = .false. ! write out initial condition + incond_dir = history_dir ! write to history dir for default + incond_file = 'iceh_ic'! file prefix + dumpfreq='y' ! restart frequency option + dumpfreq_n = 1 ! restart frequency + dump_last = .false. ! write restart on last time step + restart = .false. ! if true, read restart files for initialization + restart_dir = './' ! write to executable dir for default + restart_file = 'iced' ! restart file name prefix + restart_ext = .false. ! if true, read/write ghost cells + use_restart_time = .true. ! if true, use time info written in file + pointer_file = 'ice.restart_file' + restart_format = 'nc' ! file format ('bin'=binary or 'nc'=netcdf or 'pio') + lcdf64 = .false. ! 64 bit offset for netCDF + ice_ic = 'default' ! latitude and sst-dependent + grid_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) + grid_type = 'rectangular' ! define rectangular grid internally + grid_file = 'unknown_grid_file' + gridcpl_file = 'unknown_gridcpl_file' + kmt_file = 'unknown_kmt_file' + + kitd = 1 ! type of itd conversions (0 = delta, 1 = linear) + kcatbound = 1 ! category boundary formula (0 = old, 1 = new, etc) + kdyn = 1 ! type of dynamics (1 = evp, 2 = eap) + ndtd = 1 ! dynamic time steps per thermodynamic time step + ndte = 120 ! subcycles per dynamics timestep: ndte=dt_dyn/dte + revised_evp = .false. ! if true, use revised procedure for evp dynamics + yield_curve = 'ellipse' + kstrength = 1 ! 1 = Rothrock 75 strength, 0 = Hibler 79 + krdg_partic = 1 ! 1 = new participation, 0 = Thorndike et al 75 + krdg_redist = 1 ! 1 = new redistribution, 0 = Hibler 80 + mu_rdg = 3 ! e-folding scale of ridged ice, krdg_partic=1 (m^0.5) + Cf = 17.0_dbl_kind ! ratio of ridging work to PE change in ridging + advection = 'remap' ! incremental remapping transport scheme + shortwave = 'default' ! 'default' or 'dEdd' (delta-Eddington) + albedo_type = 'default'! or 'constant' + ktherm = 1 ! 0 = 0-layer, 1 = BL99, 2 = mushy thermo + saltmax = 3.2_dbl_kind ! maximum salinity at ice base (Weeks & Ackley 1986) + conduct = 'bubbly' ! 'MU71' or 'bubbly' (Pringle et al 2007) + calc_Tsfc = .true. ! calculate surface temperature + cap_fluxes = .false. ! Check top conductive flux before sending it to the + ! thermo solver, and send some of the energy straight + ! to the bottom of the ice if it's likely to crash + ! the solver. + ! Only relevant if calc_Tsfc = .false. + update_ocn_f = .false. ! include fresh water and salt fluxes for frazil + ustar_min = 0.005 ! minimum friction velocity for ocean heat flux (m/s) + l_mpond_fresh = .false. ! logical switch for including meltpond freshwater + ! flux feedback to ocean model + fbot_xfer_type = 'constant' ! transfer coefficient type for ocn heat flux + R_ice = 0.00_dbl_kind ! tuning parameter for sea ice + R_pnd = 0.00_dbl_kind ! tuning parameter for ponded sea ice + R_snw = 1.50_dbl_kind ! tuning parameter for snow over sea ice + dT_mlt = 1.5_dbl_kind ! change in temp to give non-melt to melt change + ! in snow grain radius + rsnw_mlt = 1500._dbl_kind ! maximum melting snow grain radius + kalg = 0.60_dbl_kind ! algae absorption coefficient for 0.5 m thick layer + ! 0.5 m path of 75 mg Chl a / m2 + hp1 = 0.01_dbl_kind ! critical pond lid thickness for topo ponds + hs0 = 0.03_dbl_kind ! snow depth for transition to bare sea ice (m) + hs1 = 0.03_dbl_kind ! snow depth for transition to bare pond ice (m) + dpscale = c1 ! alter e-folding time scale for flushing + frzpnd = 'cesm' ! melt pond refreezing parameterization + rfracmin = 0.15_dbl_kind ! minimum retained fraction of meltwater + rfracmax = 0.85_dbl_kind ! maximum retained fraction of meltwater + pndaspect = 0.8_dbl_kind ! ratio of pond depth to area fraction + albicev = 0.78_dbl_kind ! visible ice albedo for h > ahmax + albicei = 0.36_dbl_kind ! near-ir ice albedo for h > ahmax + albsnowv = 0.98_dbl_kind ! cold snow albedo, visible + albsnowi = 0.70_dbl_kind ! cold snow albedo, near IR + ahmax = 0.3_dbl_kind ! thickness above which ice albedo is constant (m) +!ars599: 24092014 (CODE: petteri) +!#if defined(AusCOM) || defined(ACCICE) +! mark out dT_mlt +! 4 Jan 2007 BPB Following are appropriate for complete cloud +! in a summer polar atmosphere with 1.5m bare sea ice surface: +! .636/.364 vis/nir with only 0.5% direct for each band. +#ifdef AusCOM + snowpatch = 0.02_dbl_kind ! parameter for fractional snow area (m) + awtvdr = 0.00318_dbl_kind ! visible, direct ! for history and + awtidr = 0.00182_dbl_kind ! near IR, direct ! diagnostics + awtvdf = 0.63282_dbl_kind ! visible, diffuse + awtidf = 0.36218_dbl_kind ! near IR, diffuse + cosw = c1 ! cos(ocean turning angle) ! turning angle = 0 + sinw = c0 ! sin(ocean turning angle) ! turning angle = 0 +! dT_mlt = c1 ! change in temp to give dalb_mlt +! ! albedo change + dalb_mlt = -0.075_dbl_kind ! albedo change per dT_mlt change + ! in temp for ice + dragio = 0.00536_dbl_kind! ice-ocn drag coefficient + Tocnfrz = -1.8_dbl_kind ! freezing temp of seawater (C), + ! used as Tsfcn for open water + chio = 0.006_dbl_kind ! unitless param for basal heat flx ala McPhee and Maykut +#endif + atmbndy = 'default' ! or 'constant' + + fyear_init = 1900 ! first year of forcing cycle + ycycle = 1 ! number of years in forcing cycle + atm_data_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) + atm_data_type = 'default' + atm_data_dir = ' ' + calc_strair = .true. ! calculate wind stress + formdrag = .false. ! calculate form drag + highfreq = .false. ! calculate high frequency RASM coupling + natmiter = 5 ! number of iterations for atm boundary layer calcs + precip_units = 'mks' ! 'mm_per_month' or + ! 'mm_per_sec' = 'mks' = kg/m^2 s + tfrz_option = 'mushy' ! freezing temp formulation + oceanmixed_ice = .false. ! if true, use internal ocean mixed layer + ocn_data_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) + sss_data_type = 'default' + sst_data_type = 'default' + ocn_data_dir = ' ' + oceanmixed_file = 'unknown_oceanmixed_file' ! ocean forcing data + restore_sst = .false. ! restore sst if true + trestore = 90 ! restoring timescale, days (0 instantaneous) + restore_ice = .false. ! restore ice state on grid edges if true + dbug = .false. ! true writes diagnostics for input forcing + + latpnt(1) = 90._dbl_kind ! latitude of diagnostic point 1 (deg) + lonpnt(1) = 0._dbl_kind ! longitude of point 1 (deg) + latpnt(2) = -65._dbl_kind ! latitude of diagnostic point 2 (deg) + lonpnt(2) = -45._dbl_kind ! longitude of point 2 (deg) + +#ifndef CCSMCOUPLED + runid = 'unknown' ! run ID used in CESM and for machine 'bering' + runtype = 'initial' ! run type: 'initial', 'continue' +#endif + + ! extra tracers + tr_iage = .false. ! ice age + restart_age = .false. ! ice age restart + tr_FY = .false. ! ice age + restart_FY = .false. ! ice age restart + tr_lvl = .false. ! level ice + restart_lvl = .false. ! level ice restart + tr_pond_cesm = .false. ! CESM melt ponds + restart_pond_cesm = .false. ! melt ponds restart + tr_pond_lvl = .false. ! level-ice melt ponds + restart_pond_lvl = .false. ! melt ponds restart + tr_pond_topo = .false. ! explicit melt ponds (topographic) + restart_pond_topo = .false. ! melt ponds restart + tr_aero = .false. ! aerosols + restart_aero = .false. ! aerosols restart + + ! mushy layer gravity drainage physics + a_rapid_mode = 0.5e-3_dbl_kind ! channel radius for rapid drainage mode (m) + Rac_rapid_mode = 10.0_dbl_kind ! critical Rayleigh number + aspect_rapid_mode = 1.0_dbl_kind ! aspect ratio (larger is wider) + dSdt_slow_mode = -1.5e-7_dbl_kind ! slow mode drainage strength (m s-1 K-1) + phi_c_slow_mode = 0.05_dbl_kind ! critical liquid fraction porosity cutoff + phi_i_mushy = 0.85_dbl_kind ! liquid fraction of congelation ice + + !----------------------------------------------------------------- + ! read from input file + !----------------------------------------------------------------- + +#ifdef CCSMCOUPLED + nml_filename = 'ice_in'//trim(inst_suffix) +#endif + + call get_fileunit(nu_nml) + + if (my_task == master_task) then + open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + if (nml_error /= 0) then + nml_error = -1 + else + nml_error = 1 + endif + + do while (nml_error > 0) + print*,'Reading setup_nml' + read(nu_nml, nml=setup_nml,iostat=nml_error) + if (nml_error /= 0) exit + print*,'Reading grid_nml' + read(nu_nml, nml=grid_nml,iostat=nml_error) + if (nml_error /= 0) exit + print*,'Reading tracer_nml' + read(nu_nml, nml=tracer_nml,iostat=nml_error) + if (nml_error /= 0) exit + print*,'Reading thermo_nml' + read(nu_nml, nml=thermo_nml,iostat=nml_error) + if (nml_error /= 0) exit + print*,'Reading dynamics_nml' + read(nu_nml, nml=dynamics_nml,iostat=nml_error) + if (nml_error /= 0) exit + print*,'Reading shortwave_nml' + read(nu_nml, nml=shortwave_nml,iostat=nml_error) + if (nml_error /= 0) exit + print*,'Reading ponds_nml' + read(nu_nml, nml=ponds_nml,iostat=nml_error) + if (nml_error /= 0) exit + print*,'Reading forcing_nml' + read(nu_nml, nml=forcing_nml,iostat=nml_error) + if (nml_error /= 0) exit + end do + if (nml_error == 0) close(nu_nml) + endif + call broadcast_scalar(nml_error, master_task) + if (nml_error /= 0) then + call abort_ice('ice: error reading namelist') + endif + call release_fileunit(nu_nml) + + !----------------------------------------------------------------- + ! set up diagnostics output and resolve conflicts + !----------------------------------------------------------------- + +#ifdef CCSMCOUPLED + ! Note in CCSMCOUPLED mode diag_file is not utilized and + ! runid and runtype are obtained from the driver, not from the namelist + + if (my_task == master_task) then + history_file = trim(runid) // ".cice" // trim(inst_suffix) //".h" + restart_file = trim(runid) // ".cice" // trim(inst_suffix) //".r" + incond_file = trim(runid) // ".cice" // trim(inst_suffix) //".i" + inquire(file='ice_modelio.nml'//trim(inst_suffix),exist=exists) + if (exists) then + call get_fileUnit(nu_diag) + call shr_file_setIO('ice_modelio.nml'//trim(inst_suffix),nu_diag) + end if + else + ! each task gets unique ice log filename when if test is true, for debugging + if (1 == 0) then + call get_fileUnit(nu_diag) + write(str,'(a,i4.4)') "ice.log.task_",my_task + open(nu_diag,file=str) + endif + end if + if (trim(ice_ic) /= 'default' .and. trim(ice_ic) /= 'none') then + restart = .true. + end if +#else + if (trim(diag_type) == 'file') call get_fileunit(nu_diag) +#endif + + if (my_task == master_task) then + if (trim(diag_type) == 'file') then + write(ice_stdout,*) 'Diagnostic output will be in file ',diag_file + open (nu_diag, file=diag_file, status='unknown') + endif + write(nu_diag,*) '--------------------------------' + write(nu_diag,*) ' CICE model diagnostic output ' + write(nu_diag,*) '--------------------------------' + write(nu_diag,*) ' ' + endif + + if (trim(runtype) == 'continue') restart = .true. + if (trim(runtype) /= 'continue' .and. (restart)) then + if (ice_ic == 'none' .or. ice_ic == 'default') then + if (my_task == master_task) then + write(nu_diag,*) & + 'WARNING: runtype, restart, ice_ic are inconsistent:' + write(nu_diag,*) trim(runtype), restart, trim(ice_ic) + write(nu_diag,*) & + 'WARNING: Need ice_ic = .' + write(nu_diag,*) & + 'WARNING: Initializing using ice_ic conditions' + endif + restart = .false. + endif + endif + if (trim(runtype) == 'initial' .and. .not.(restart)) then + if (ice_ic /= 'none' .and. ice_ic /= 'default') then + if (my_task == master_task) then + write(nu_diag,*) & + 'WARNING: runtype, restart, ice_ic are inconsistent:' + write(nu_diag,*) trim(runtype), restart, trim(ice_ic) + write(nu_diag,*) & + 'WARNING: Initializing with NO ICE: ' + write(nu_diag,*) ' ' + endif + ice_ic = 'none' + endif + endif + +#ifndef ncdf + ! netcdf is unavailable + grid_format = 'bin' + atm_data_format = 'bin' + ocn_data_format = 'bin' +#endif + + chartmp = advection(1:6) + if (chartmp /= 'upwind' .and. chartmp /= 'remap ') advection = 'remap' + + if (ncat == 1 .and. kitd == 1) then + if (my_task == master_task) then + write (nu_diag,*) 'Remapping the ITD is not allowed for ncat=1.' + write (nu_diag,*) 'Use kitd = 0 (delta function ITD) with kcatbound = 0' + write (nu_diag,*) 'or for column configurations use kcatbound = -1' + call abort_ice('Error: kitd incompatability: ncat=1 and kitd=1') + endif + endif + + if (ncat /= 1 .and. kcatbound == -1) then + if (my_task == master_task) then + write (nu_diag,*) & + 'WARNING: ITD required for ncat > 1' + write (nu_diag,*) & + 'WARNING: Setting kitd and kcatbound to default values' + endif + kitd = 1 + kcatbound = 0 + endif + + if (kdyn == 2 .and. revised_evp) then + if (my_task == master_task) then + write (nu_diag,*) & + 'WARNING: revised_evp = T with EAP dynamics' + write (nu_diag,*) & + 'WARNING: Setting revised_evp = F' + endif + revised_evp = .false. + endif + + rpcesm = c0 + rplvl = c0 + rptopo = c0 + if (tr_pond_cesm) rpcesm = c1 + if (tr_pond_lvl ) rplvl = c1 + if (tr_pond_topo) rptopo = c1 + + tr_pond = .false. ! explicit melt ponds + if (rpcesm + rplvl + rptopo > puny) tr_pond = .true. + + if (rpcesm + rplvl + rptopo > c1 + puny) then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: Must use only one melt pond scheme' + call abort_ice('ice: multiple melt pond schemes') + endif + endif + + if (tr_pond_lvl .and. .not. tr_lvl) then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: tr_pond_lvl=T but tr_lvl=F' + write (nu_diag,*) 'WARNING: Setting tr_lvl=T' + endif + tr_lvl = .true. + endif + + if (tr_pond_lvl .and. abs(hs0) > puny) then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: tr_pond_lvl=T and hs0/=0' + write (nu_diag,*) 'WARNING: Setting hs0=0' + endif + hs0 = c0 + endif + + if (tr_pond_cesm .and. trim(frzpnd) /= 'cesm') then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: tr_pond_cesm=T' + write (nu_diag,*) 'WARNING: frzpnd, dpscale not used' + endif + frzpnd = 'cesm' + endif + + if (trim(shortwave) /= 'dEdd' .and. tr_pond .and. calc_tsfc) then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: Must use dEdd shortwave' + write (nu_diag,*) 'WARNING: with tr_pond and calc_tsfc=T.' + write (nu_diag,*) 'WARNING: Setting shortwave = dEdd' + endif + shortwave = 'dEdd' + endif + + if (tr_aero .and. n_aero==0) then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: aerosols activated but' + write (nu_diag,*) 'WARNING: not allocated in tracer array.' + write (nu_diag,*) 'WARNING: Activate in compilation script.' + endif + call abort_ice('ice: aerosol tracer conflict: comp_ice, ice_in') + endif + + if (tr_aero .and. trim(shortwave) /= 'dEdd') then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: aerosols activated but dEdd' + write (nu_diag,*) 'WARNING: shortwave is not.' + write (nu_diag,*) 'WARNING: Setting shortwave = dEdd' + endif + shortwave = 'dEdd' + endif + + rfracmin = min(max(rfracmin,c0),c1) + rfracmax = min(max(rfracmax,c0),c1) + + if (trim(atm_data_type) == 'monthly' .and. calc_strair) & + calc_strair = .false. + + if (ktherm == 2 .and. .not. calc_Tsfc) then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: ktherm = 2 and calc_Tsfc = F' + write (nu_diag,*) 'WARNING: Setting calc_Tsfc = T' + endif + calc_Tsfc = .true. + endif + + if (cap_fluxes .and. calc_Tsfc) then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: cap_fluxes = T and calc_Tsfc = T' + write (nu_diag,*) 'WARNING: cap_fluxes only valid when using UM-style coupling, i.e. calc_Tsfc=F' + write (nu_diag,*) 'WARNING: Setting cap_fluxes = F' + endif + cap_fluxes = .false. + endif + + if (ktherm == 1 .and. trim(tfrz_option) /= 'linear_salt') then + if (my_task == master_task) then + write (nu_diag,*) & + 'WARNING: ktherm = 1 and tfrz_option = ',trim(tfrz_option) + write (nu_diag,*) & + 'WARNING: For consistency, set tfrz_option = linear_salt' + endif + endif + + if (ktherm == 2 .and. trim(tfrz_option) /= 'mushy') then + if (my_task == master_task) then + write (nu_diag,*) & + 'WARNING: ktherm = 2 and tfrz_option = ',trim(tfrz_option) + write (nu_diag,*) & + 'WARNING: For consistency, set tfrz_option = mushy' + endif + endif + + if (trim(atm_data_type) == 'hadgem' .and. & + trim(precip_units) /= 'mks') then + if (my_task == master_task) & + write (nu_diag,*) & + 'WARNING: HadGEM atmospheric data chosen with wrong precip_units' + write (nu_diag,*) & + 'WARNING: Changing precip_units to mks (i.e. kg/m2 s).' + precip_units='mks' + endif + + if (formdrag) then + if (trim(atmbndy) == 'constant') then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: atmbndy = constant not allowed with formdrag' + write (nu_diag,*) 'WARNING: Setting atmbndy = default' + endif + atmbndy = 'default' + endif + + if (.not. calc_strair) then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: formdrag=T but calc_strair=F' + write (nu_diag,*) 'WARNING: Setting calc_strair=T' + endif + calc_strair = .true. + endif + + if (tr_pond_cesm) then + if (my_task == master_task) then + write (nu_diag,*) 'ERROR: formdrag=T but frzpnd=''cesm''' + call abort_ice('ice_init: Formdrag and no hlid') + endif + endif + + if (.not. tr_lvl) then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: formdrag=T but tr_lvl=F' + write (nu_diag,*) 'WARNING: Setting tr_lvl=T' + endif + tr_lvl = .true. + endif + endif + + if (trim(fbot_xfer_type) == 'Cdn_ocn' .and. .not. formdrag) then + if (my_task == master_task) then + write (nu_diag,*) 'WARNING: formdrag=F but fbot_xfer_type=Cdn_ocn' + write (nu_diag,*) 'WARNING: Setting fbot_xfer_type = constant' + endif + fbot_xfer_type = 'constant' + endif + + + call broadcast_scalar(days_per_year, master_task) + call broadcast_scalar(use_leap_years, master_task) + call broadcast_scalar(year_init, master_task) + call broadcast_scalar(istep0, master_task) + call broadcast_scalar(dt, master_task) + call broadcast_scalar(npt, master_task) + call broadcast_scalar(diagfreq, master_task) + call broadcast_scalar(print_points, master_task) + call broadcast_scalar(print_global, master_task) + call broadcast_scalar(bfbflag, master_task) + call broadcast_scalar(diag_type, master_task) + call broadcast_scalar(diag_file, master_task) + do n = 1, max_nstrm + call broadcast_scalar(histfreq(n), master_task) + enddo + call broadcast_array(histfreq_n, master_task) + call broadcast_scalar(hist_avg, master_task) + call broadcast_scalar(history_dir, master_task) + call broadcast_scalar(history_file, master_task) + call broadcast_scalar(write_ic, master_task) + call broadcast_scalar(incond_dir, master_task) + call broadcast_scalar(incond_file, master_task) + call broadcast_scalar(dumpfreq, master_task) + call broadcast_scalar(dumpfreq_n, master_task) + call broadcast_scalar(dump_last, master_task) + call broadcast_scalar(restart_file, master_task) + call broadcast_scalar(restart, master_task) + call broadcast_scalar(restart_dir, master_task) + call broadcast_scalar(restart_ext, master_task) + call broadcast_scalar(use_restart_time, master_task) + call broadcast_scalar(restart_format, master_task) + call broadcast_scalar(lcdf64, master_task) + call broadcast_scalar(pointer_file, master_task) + call broadcast_scalar(ice_ic, master_task) + call broadcast_scalar(grid_format, master_task) + call broadcast_scalar(grid_type, master_task) + call broadcast_scalar(grid_file, master_task) + call broadcast_scalar(gridcpl_file, master_task) + call broadcast_scalar(kmt_file, master_task) + call broadcast_scalar(kitd, master_task) + call broadcast_scalar(kcatbound, master_task) + call broadcast_scalar(kdyn, master_task) + call broadcast_scalar(ndtd, master_task) + call broadcast_scalar(ndte, master_task) + call broadcast_scalar(revised_evp, master_task) + call broadcast_scalar(yield_curve, master_task) + call broadcast_scalar(kstrength, master_task) + call broadcast_scalar(krdg_partic, master_task) + call broadcast_scalar(krdg_redist, master_task) + call broadcast_scalar(mu_rdg, master_task) + call broadcast_scalar(Cf, master_task) + call broadcast_scalar(advection, master_task) + call broadcast_scalar(shortwave, master_task) + call broadcast_scalar(albedo_type, master_task) + call broadcast_scalar(saltmax, master_task) + call broadcast_scalar(ktherm, master_task) + call broadcast_scalar(conduct, master_task) + call broadcast_scalar(R_ice, master_task) + call broadcast_scalar(R_pnd, master_task) + call broadcast_scalar(R_snw, master_task) + call broadcast_scalar(dT_mlt, master_task) + call broadcast_scalar(rsnw_mlt, master_task) + call broadcast_scalar(kalg, master_task) + call broadcast_scalar(hp1, master_task) + call broadcast_scalar(hs0, master_task) + call broadcast_scalar(hs1, master_task) + call broadcast_scalar(dpscale, master_task) + call broadcast_scalar(frzpnd, master_task) + call broadcast_scalar(rfracmin, master_task) + call broadcast_scalar(rfracmax, master_task) + call broadcast_scalar(pndaspect, master_task) + call broadcast_scalar(albicev, master_task) + call broadcast_scalar(albicei, master_task) + call broadcast_scalar(albsnowv, master_task) + call broadcast_scalar(albsnowi, master_task) + call broadcast_scalar(ahmax, master_task) +!ars599: 24032014 (CODE OZ-ICE) +!#if defined(AusCOM) || defined(ACCICE) +#ifdef AusCOM + call broadcast_scalar(snowpatch, master_task) + call broadcast_scalar(dT_mlt, master_task) + call broadcast_scalar(dalb_mlt, master_task) + call broadcast_scalar(awtvdr, master_task) + call broadcast_scalar(awtvdf, master_task) + call broadcast_scalar(awtidr, master_task) + call broadcast_scalar(awtidf, master_task) + call broadcast_scalar(cosw, master_task) + call broadcast_scalar(sinw, master_task) + call broadcast_scalar(dragio, master_task) + call broadcast_scalar(chio, master_task) + call broadcast_scalar(Tocnfrz, master_task) +#endif + call broadcast_scalar(atmbndy, master_task) + call broadcast_scalar(fyear_init, master_task) + call broadcast_scalar(ycycle, master_task) + call broadcast_scalar(atm_data_format, master_task) + call broadcast_scalar(atm_data_type, master_task) + call broadcast_scalar(atm_data_dir, master_task) + call broadcast_scalar(calc_strair, master_task) + call broadcast_scalar(calc_Tsfc, master_task) + call broadcast_scalar(cap_fluxes, master_task) + call broadcast_scalar(formdrag, master_task) + call broadcast_scalar(highfreq, master_task) + call broadcast_scalar(natmiter, master_task) + call broadcast_scalar(update_ocn_f, master_task) + call broadcast_scalar(l_mpond_fresh, master_task) + call broadcast_scalar(ustar_min, master_task) + call broadcast_scalar(fbot_xfer_type, master_task) + call broadcast_scalar(precip_units, master_task) + call broadcast_scalar(oceanmixed_ice, master_task) + call broadcast_scalar(tfrz_option, master_task) + call broadcast_scalar(ocn_data_format, master_task) + call broadcast_scalar(sss_data_type, master_task) + call broadcast_scalar(sst_data_type, master_task) + call broadcast_scalar(ocn_data_dir, master_task) + call broadcast_scalar(oceanmixed_file, master_task) + call broadcast_scalar(restore_sst, master_task) + call broadcast_scalar(trestore, master_task) + call broadcast_scalar(restore_ice, master_task) + call broadcast_scalar(dbug, master_task) + call broadcast_array (latpnt(1:2), master_task) + call broadcast_array (lonpnt(1:2), master_task) + call broadcast_scalar(runid, master_task) + call broadcast_scalar(runtype, master_task) + + if (dbug) & ! else only master_task writes to file + call broadcast_scalar(nu_diag, master_task) + + ! tracers + call broadcast_scalar(tr_iage, master_task) + call broadcast_scalar(restart_age, master_task) + call broadcast_scalar(tr_FY, master_task) + call broadcast_scalar(restart_FY, master_task) + call broadcast_scalar(tr_lvl, master_task) + call broadcast_scalar(restart_lvl, master_task) + call broadcast_scalar(tr_pond_cesm, master_task) + call broadcast_scalar(restart_pond_cesm, master_task) + call broadcast_scalar(tr_pond_lvl, master_task) + call broadcast_scalar(restart_pond_lvl, master_task) + call broadcast_scalar(tr_pond_topo, master_task) + call broadcast_scalar(restart_pond_topo, master_task) + call broadcast_scalar(tr_pond, master_task) + call broadcast_scalar(tr_aero, master_task) + call broadcast_scalar(restart_aero, master_task) + call broadcast_scalar(a_rapid_mode, master_task) + call broadcast_scalar(Rac_rapid_mode, master_task) + call broadcast_scalar(aspect_rapid_mode, master_task) + call broadcast_scalar(dSdt_slow_mode, master_task) + call broadcast_scalar(phi_c_slow_mode, master_task) + call broadcast_scalar(phi_i_mushy, master_task) + +#ifdef CCSMCOUPLED + pointer_file = trim(pointer_file) // trim(inst_suffix) +#endif + + !----------------------------------------------------------------- + ! spew + !----------------------------------------------------------------- + + if (my_task == master_task) then + + write(nu_diag,*) ' Document ice_in namelist parameters:' + write(nu_diag,*) ' ==================================== ' + write(nu_diag,*) ' ' + if (trim(runid) /= 'unknown') & + write(nu_diag,*) ' runid = ', & + trim(runid) + write(nu_diag,1030) ' runtype = ', & + trim(runtype) + write(nu_diag,1020) ' days_per_year = ', days_per_year + write(nu_diag,1010) ' use_leap_years = ', use_leap_years + write(nu_diag,1020) ' year_init = ', year_init + write(nu_diag,1020) ' istep0 = ', istep0 + write(nu_diag,1000) ' dt = ', dt + write(nu_diag,1020) ' npt = ', npt + write(nu_diag,1020) ' diagfreq = ', diagfreq + write(nu_diag,1010) ' print_global = ', print_global + write(nu_diag,1010) ' print_points = ', print_points + write(nu_diag,1010) ' bfbflag = ', bfbflag + write(nu_diag,1050) ' histfreq = ', histfreq(:) + write(nu_diag,1040) ' histfreq_n = ', histfreq_n(:) + write(nu_diag,1010) ' hist_avg = ', hist_avg + if (.not. hist_avg) write (nu_diag,*) 'History data will be snapshots' + write(nu_diag,*) ' history_dir = ', & + trim(history_dir) + write(nu_diag,*) ' history_file = ', & + trim(history_file) + if (write_ic) then + write (nu_diag,*) 'Initial condition will be written in ', & + trim(incond_dir) + endif + write(nu_diag,1030) ' dumpfreq = ', & + trim(dumpfreq) + write(nu_diag,1020) ' dumpfreq_n = ', dumpfreq_n + write(nu_diag,1010) ' dump_last = ', dump_last + write(nu_diag,1010) ' restart = ', restart + write(nu_diag,*) ' restart_dir = ', & + trim(restart_dir) + write(nu_diag,*) ' restart_ext = ', restart_ext + write(nu_diag,*) ' restart_format = ', & + trim(restart_format) + write(nu_diag,*) ' lcdf64 = ', & + lcdf64 + write(nu_diag,*) ' restart_file = ', & + trim(restart_file) + write(nu_diag,*) ' pointer_file = ', & + trim(pointer_file) + write(nu_diag,*) ' use_restart_time = ', use_restart_time + write(nu_diag,*) ' ice_ic = ', & + trim(ice_ic) + write(nu_diag,*) ' grid_type = ', & + trim(grid_type) + if (trim(grid_type) /= 'rectangular' .or. & + trim(grid_type) /= 'column') then + write(nu_diag,*) ' grid_file = ', & + trim(grid_file) + write(nu_diag,*) ' gridcpl_file = ', & + trim(gridcpl_file) + write(nu_diag,*) ' kmt_file = ', & + trim(kmt_file) + endif + write(nu_diag,1020) ' kitd = ', kitd + write(nu_diag,1020) ' kcatbound = ', & + kcatbound + write(nu_diag,1020) ' kdyn = ', kdyn + write(nu_diag,1020) ' ndtd = ', ndtd + write(nu_diag,1020) ' ndte = ', ndte + write(nu_diag,1010) ' revised_evp = ', & + revised_evp + if (kdyn == 1) & + write(nu_diag,*) ' yield_curve = ', & + trim(yield_curve) + write(nu_diag,1020) ' kstrength = ', kstrength + write(nu_diag,1020) ' krdg_partic = ', & + krdg_partic + write(nu_diag,1020) ' krdg_redist = ', & + krdg_redist + if (krdg_redist == 1) & + write(nu_diag,1000) ' mu_rdg = ', mu_rdg + if (kstrength == 1) & + write(nu_diag,1000) ' Cf = ', Cf + write(nu_diag,1030) ' advection = ', & + trim(advection) + write(nu_diag,1030) ' shortwave = ', & + trim(shortwave) + + if (trim(shortwave) == 'dEdd') then + write(nu_diag,1000) ' R_ice = ', R_ice + write(nu_diag,1000) ' R_pnd = ', R_pnd + write(nu_diag,1000) ' R_snw = ', R_snw + write(nu_diag,1000) ' dT_mlt = ', dT_mlt + write(nu_diag,1000) ' rsnw_mlt = ', rsnw_mlt + write(nu_diag,1000) ' kalg = ', kalg + write(nu_diag,1000) ' hp1 = ', hp1 + write(nu_diag,1000) ' hs0 = ', hs0 + else + write(nu_diag,1030) ' albedo_type = ', & + trim(albedo_type) + write(nu_diag,1000) ' albicev = ', albicev + write(nu_diag,1000) ' albicei = ', albicei + write(nu_diag,1000) ' albsnowv = ', albsnowv + write(nu_diag,1000) ' albsnowi = ', albsnowi + write(nu_diag,1000) ' ahmax = ', ahmax +!ars599: 24032014 (CODE OZ-ICE) +!#if defined(AusCOM) || defined(ACCICE) +#ifdef AusCOM + write(nu_diag,1005) ' snowpatch = ', snowpatch + write(nu_diag,1000) ' dT_mlt = ', dT_mlt + write(nu_diag,1000) ' dalb_mlt = ', dalb_mlt + write(nu_diag,1005) ' awtvdr = ', awtvdr + write(nu_diag,1005) ' awtidr = ', awtidr + write(nu_diag,1005) ' awtvdf = ', awtvdf + write(nu_diag,1005) ' awtidf = ', awtidf +#endif + endif + + write(nu_diag,1000) ' rfracmin = ', rfracmin + write(nu_diag,1000) ' rfracmax = ', rfracmax + if (tr_pond_lvl) then + write(nu_diag,1000) ' hs1 = ', hs1 + write(nu_diag,1000) ' dpscale = ', dpscale + write(nu_diag,1030) ' frzpnd = ', trim(frzpnd) + endif + if (tr_pond .and. .not. tr_pond_lvl) & + write(nu_diag,1000) ' pndaspect = ', pndaspect + + write(nu_diag,1020) ' ktherm = ', ktherm + write(nu_diag,1005) ' saltmax = ', saltmax + if (ktherm == 1) & + write(nu_diag,1030) ' conduct = ', conduct + if (ktherm == 2) then + write(nu_diag,1005) ' a_rapid_mode = ', a_rapid_mode + write(nu_diag,1005) ' Rac_rapid_mode = ', Rac_rapid_mode + write(nu_diag,1005) ' aspect_rapid_mode = ', aspect_rapid_mode + write(nu_diag,1005) ' dSdt_slow_mode = ', dSdt_slow_mode + write(nu_diag,1005) ' phi_c_slow_mode = ', phi_c_slow_mode + write(nu_diag,1005) ' phi_i_mushy = ', phi_i_mushy + endif + + write(nu_diag,1030) ' atmbndy = ', & + trim(atmbndy) + write(nu_diag,1010) ' formdrag = ', formdrag + write(nu_diag,1010) ' highfreq = ', highfreq + write(nu_diag,1020) ' natmiter = ', natmiter + write(nu_diag,1010) ' calc_strair = ', calc_strair + write(nu_diag,1010) ' calc_Tsfc = ', calc_Tsfc + write(nu_diag,1010) ' cap_fluxes = ', cap_fluxes + + write(nu_diag,1020) ' fyear_init = ', & + fyear_init + write(nu_diag,1020) ' ycycle = ', ycycle + write(nu_diag,*) ' atm_data_type = ', & + trim(atm_data_type) + if (trim(atm_data_type) /= 'default') then + write(nu_diag,*) ' atm_data_dir = ', & + trim(atm_data_dir) + write(nu_diag,*) ' precip_units = ', & + trim(precip_units) + endif + + write(nu_diag,1010) ' update_ocn_f = ', update_ocn_f + write(nu_diag,1010) ' l_mpond_fresh = ', l_mpond_fresh +!ars599: 24032014 (CODE OZ-ICE) +!#if defined(AusCOM) || defined(ACCICE) +#ifdef AusCOM + write(nu_diag,1005) ' Tocnfrz = ', Tocnfrz + write(nu_diag,1005) ' cosw = ', cosw + write(nu_diag,1005) ' sinw = ', sinw + write(nu_diag,1005) ' dragio = ', dragio + write(nu_diag,1005) ' chio = ', chio +#endif + write(nu_diag,1005) ' ustar_min = ', ustar_min + write(nu_diag, *) ' fbot_xfer_type = ', & + trim(fbot_xfer_type) + write(nu_diag,1010) ' oceanmixed_ice = ', & + oceanmixed_ice + write(nu_diag,*) ' tfrz_option = ', & + trim(tfrz_option) + if (trim(sss_data_type) == 'ncar' .or. & + trim(sst_data_type) == 'ncar') then + write(nu_diag,*) ' oceanmixed_file = ', & + trim(oceanmixed_file) + endif + write(nu_diag,*) ' sss_data_type = ', & + trim(sss_data_type) + write(nu_diag,*) ' sst_data_type = ', & + trim(sst_data_type) + if (trim(sss_data_type) /= 'default' .or. & + trim(sst_data_type) /= 'default') then + write(nu_diag,*) ' ocn_data_dir = ', & + trim(ocn_data_dir) + write(nu_diag,1010) ' restore_sst = ', & + restore_sst + endif + write(nu_diag,1010) ' restore_ice = ', & + restore_ice + if (restore_ice .or. restore_sst) & + write(nu_diag,1020) ' trestore = ', trestore + +#ifdef coupled + if( oceanmixed_ice ) then + write (nu_diag,*) 'WARNING WARNING WARNING WARNING ' + write (nu_diag,*) '*Coupled and oceanmixed flags are *' + write (nu_diag,*) '*BOTH ON. Ocean data received from*' + write (nu_diag,*) '*coupler will be altered by mixed *' + write (nu_diag,*) '*layer routine! *' + write (nu_diag,*) ' ' + endif +#endif + + write (nu_diag,*) ' ' + write (nu_diag,'(a30,2f8.2)') 'Diagnostic point 1: lat, lon =', & + latpnt(1), lonpnt(1) + write (nu_diag,'(a30,2f8.2)') 'Diagnostic point 2: lat, lon =', & + latpnt(2), lonpnt(2) + + ! tracers + write(nu_diag,1010) ' tr_iage = ', tr_iage + write(nu_diag,1010) ' restart_age = ', restart_age + write(nu_diag,1010) ' tr_FY = ', tr_FY + write(nu_diag,1010) ' restart_FY = ', restart_FY + write(nu_diag,1010) ' tr_lvl = ', tr_lvl + write(nu_diag,1010) ' restart_lvl = ', restart_lvl + write(nu_diag,1010) ' tr_pond_cesm = ', tr_pond_cesm + write(nu_diag,1010) ' restart_pond_cesm = ', restart_pond_cesm + write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl + write(nu_diag,1010) ' restart_pond_lvl = ', restart_pond_lvl + write(nu_diag,1010) ' tr_pond_topo = ', tr_pond_topo + write(nu_diag,1010) ' restart_pond_topo = ', restart_pond_topo + write(nu_diag,1010) ' tr_aero = ', tr_aero + write(nu_diag,1010) ' restart_aero = ', restart_aero + + nt_Tsfc = 1 ! index tracers, starting with Tsfc = 1 + ntrcr = 1 ! count tracers, starting with Tsfc = 1 + + nt_qice = ntrcr + 1 + ntrcr = ntrcr + nilyr ! qice in nilyr layers + + nt_qsno = ntrcr + 1 + ntrcr = ntrcr + nslyr ! qsno in nslyr layers + + nt_sice = ntrcr + 1 + ntrcr = ntrcr + nilyr ! sice in nilyr layers + + nt_iage = 0 + if (tr_iage) then + ntrcr = ntrcr + 1 + nt_iage = ntrcr ! chronological ice age + endif + + nt_FY = 0 + if (tr_FY) then + ntrcr = ntrcr + 1 + nt_FY = ntrcr ! area of first year ice + endif + + nt_alvl = 0 + nt_vlvl = 0 + if (tr_lvl) then + ntrcr = ntrcr + 1 + nt_alvl = ntrcr + ntrcr = ntrcr + 1 + nt_vlvl = ntrcr + endif + + nt_apnd = 0 + nt_hpnd = 0 + nt_ipnd = 0 + if (tr_pond) then ! all explicit melt pond schemes + ntrcr = ntrcr + 1 + nt_apnd = ntrcr + ntrcr = ntrcr + 1 + nt_hpnd = ntrcr + if (tr_pond_lvl) then + ntrcr = ntrcr + 1 ! refrozen pond ice lid thickness + nt_ipnd = ntrcr ! on level-ice ponds (if frzpnd='hlid') + endif + if (tr_pond_topo) then + ntrcr = ntrcr + 1 ! + nt_ipnd = ntrcr ! refrozen pond ice lid thickness + endif + endif + + nt_aero = 0 + if (tr_aero) then + nt_aero = ntrcr + 1 + ntrcr = ntrcr + 4*n_aero ! 4 dEdd layers, n_aero species + endif + + if (ntrcr > max_ntrcr) then + write(nu_diag,*) 'max_ntrcr < number of namelist tracers' + write(nu_diag,*) 'max_ntrcr = ',max_ntrcr,' ntrcr = ',ntrcr + call abort_ice('max_ntrcr < number of namelist tracers') + endif + + write(nu_diag,*) ' ' + write(nu_diag,1020) 'ntrcr = ', ntrcr + write(nu_diag,*) ' ' + write(nu_diag,1020)'nt_sice = ', nt_sice + write(nu_diag,1020)'nt_qice = ', nt_qice + write(nu_diag,1020)'nt_qsno = ', nt_qsno + write(nu_diag,*)' ' + write(nu_diag,1020)'nilyr', nilyr + write(nu_diag,*)' ' + + 1000 format (a30,2x,f9.2) ! a30 to align formatted, unformatted statements + 1005 format (a30,2x,f9.6) ! float + 1010 format (a30,2x,l6) ! logical + 1020 format (a30,2x,i6) ! integer + 1030 format (a30, a8) ! character + 1040 format (a30,2x,6i6) ! integer + 1050 format (a30,2x,6a6) ! character + + write (nu_diag,*) ' ' + if (grid_type /= 'displaced_pole' .and. & + grid_type /= 'tripole' .and. & + grid_type /= 'column' .and. & + grid_type /= 'rectangular' .and. & + grid_type /= 'cpom_grid' .and. & + grid_type /= 'regional' .and. & + grid_type /= 'latlon' ) then + call abort_ice('ice_init: unknown grid_type') + endif + + endif ! my_task = master_task + + call broadcast_scalar(ntrcr, master_task) + call broadcast_scalar(nt_Tsfc, master_task) + call broadcast_scalar(nt_sice, master_task) + call broadcast_scalar(nt_qice, master_task) + call broadcast_scalar(nt_qsno, master_task) + call broadcast_scalar(nt_iage, master_task) + call broadcast_scalar(nt_FY, master_task) + call broadcast_scalar(nt_alvl, master_task) + call broadcast_scalar(nt_vlvl, master_task) + call broadcast_scalar(nt_apnd, master_task) + call broadcast_scalar(nt_hpnd, master_task) + call broadcast_scalar(nt_ipnd, master_task) + call broadcast_scalar(nt_aero, master_task) + + if (formdrag) then + if (nt_apnd==0) then + write(nu_diag,*)'ERROR: nt_apnd:',nt_apnd + call abort_ice ('formdrag: nt_apnd=0') + elseif (nt_hpnd==0) then + write(nu_diag,*)'ERROR: nt_hpnd:',nt_hpnd + call abort_ice ('formdrag: nt_hpnd=0') + elseif (nt_ipnd==0) then + write(nu_diag,*)'ERROR: nt_ipnd:',nt_ipnd + call abort_ice ('formdrag: nt_ipnd=0') + elseif (nt_alvl==0) then + write(nu_diag,*)'ERROR: nt_alvl:',nt_alvl + call abort_ice ('formdrag: nt_alvl=0') + elseif (nt_vlvl==0) then + write(nu_diag,*)'ERROR: nt_vlvl:',nt_vlvl + call abort_ice ('formdrag: nt_vlvl=0') + endif + endif + + end subroutine input_data + +!======================================================================= + +! Initialize state for the itd model +! +! authors: C. M. Bitz, UW +! William H. Lipscomb, LANL + + subroutine init_state + + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_constants, only: c0 + use ice_domain, only: nblocks, blocks_ice + use ice_domain_size, only: nilyr, nslyr, max_ntrcr, n_aero + use ice_fileunits, only: nu_diag + use ice_flux, only: sst, Tf, Tair, salinz, Tmltz + use ice_grid, only: tmask, ULON, ULAT + use ice_state, only: trcr_depend, tr_iage, tr_FY, tr_lvl, & + tr_pond_cesm, nt_apnd, tr_pond_lvl, nt_alvl, tr_pond_topo, & + nt_Tsfc, nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY, nt_vlvl, & + nt_hpnd, nt_ipnd, tr_aero, nt_aero, aicen, trcrn, vicen, vsnon, & + aice0, aice, vice, vsno, trcr, ntrcr, aice_init, bound_state + use ice_itd, only: aggregate + use ice_exit, only: abort_ice + use ice_therm_shared, only: ktherm, heat_capacity + + integer (kind=int_kind) :: & + ilo, ihi , & ! physical domain indices + jlo, jhi , & ! physical domain indices + iglob(nx_block), & ! global indices + jglob(ny_block), & ! global indices + k , & ! vertical index + it , & ! tracer index + iblk ! block index + + type (block) :: & + this_block ! block information for current block + + !----------------------------------------------------------------- + ! Check number of layers in ice and snow. + !----------------------------------------------------------------- + + if (my_task == master_task) then + + if (nilyr < 1) then + write (nu_diag,*) 'nilyr =', nilyr + write (nu_diag,*) 'Must have at least one ice layer' + call abort_ice ('ice_init: Not enough ice layers') + endif + + if (nslyr < 1) then + write (nu_diag,*) 'nslyr =', nslyr + write (nu_diag,*) 'Must have at least one snow layer' + call abort_ice('ice_init: Not enough snow layers') + endif + + if (.not.heat_capacity) then + + write (nu_diag,*) 'WARNING - Zero-layer thermodynamics' + + if (nilyr > 1) then + write (nu_diag,*) 'nilyr =', nilyr + write (nu_diag,*) & + 'Must have nilyr = 1 if ktherm = 0' + call abort_ice('ice_init: Too many ice layers') + endif + + if (nslyr > 1) then + write (nu_diag,*) 'nslyr =', nslyr + write (nu_diag,*) & + 'Must have nslyr = 1 if heat_capacity = F' + call abort_ice('ice_init: Too many snow layers') + endif + + endif ! heat_capacity = F + + endif ! my_task + + !----------------------------------------------------------------- + ! Set tracer types + !----------------------------------------------------------------- + + trcr_depend(nt_Tsfc) = 0 ! ice/snow surface temperature + do k = 1, nilyr + trcr_depend(nt_sice + k - 1) = 1 ! volume-weighted ice salinity + trcr_depend(nt_qice + k - 1) = 1 ! volume-weighted ice enthalpy + enddo + do k = 1, nslyr + trcr_depend(nt_qsno + k - 1) = 2 ! volume-weighted snow enthalpy + enddo + if (tr_iage) trcr_depend(nt_iage) = 1 ! volume-weighted ice age + if (tr_FY) trcr_depend(nt_FY) = 0 ! area-weighted first-year ice area + if (tr_lvl) trcr_depend(nt_alvl) = 0 ! level ice area + if (tr_lvl) trcr_depend(nt_vlvl) = 1 ! level ice volume + if (tr_pond_cesm) then + trcr_depend(nt_apnd) = 0 ! melt pond area + trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth + endif + if (tr_pond_lvl) then + trcr_depend(nt_apnd) = 2+nt_alvl ! melt pond area + trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth + trcr_depend(nt_ipnd) = 2+nt_apnd ! refrozen pond lid + endif + if (tr_pond_topo) then + trcr_depend(nt_apnd) = 0 ! melt pond area + trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth + trcr_depend(nt_ipnd) = 2+nt_apnd ! refrozen pond lid + endif + if (tr_aero) then ! volume-weighted aerosols + do it = 1, n_aero + trcr_depend(nt_aero+(it-1)*4 ) = 2 ! snow + trcr_depend(nt_aero+(it-1)*4+1) = 2 ! snow + trcr_depend(nt_aero+(it-1)*4+2) = 1 ! ice + trcr_depend(nt_aero+(it-1)*4+3) = 1 ! ice + enddo + endif + + !----------------------------------------------------------------- + ! Set state variables + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & + !$OMP iglob,jglob) + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + iglob = this_block%i_glob + jglob = this_block%j_glob + + call set_state_var (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + iglob, jglob, & + ice_ic, tmask(:,:, iblk), & + ULON (:,:, iblk), ULAT (:,:, iblk), & + Tair (:,:, iblk), sst (:,:, iblk), & + Tf (:,:, iblk), & + salinz(:,:,:, iblk), Tmltz(:,:,:, iblk), & + aicen(:,:, :,iblk), trcrn(:,:,:,:,iblk), & + vicen(:,:, :,iblk), vsnon(:,:, :,iblk)) + + enddo ! iblk + !$OMP END PARALLEL DO + + !----------------------------------------------------------------- + ! ghost cell updates + !----------------------------------------------------------------- + + call bound_state (aicen, trcrn, & + vicen, vsnon) + + !----------------------------------------------------------------- + ! compute aggregate ice state and open water area + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,it) + do iblk = 1, nblocks + + aice(:,:,iblk) = c0 + vice(:,:,iblk) = c0 + vsno(:,:,iblk) = c0 + do it = 1, max_ntrcr + trcr(:,:,it,iblk) = c0 + enddo + + call aggregate (nx_block, ny_block, & + aicen(:,:,:,iblk), & + trcrn(:,:,1:ntrcr,:,iblk), & + vicen(:,:,:,iblk), & + vsnon(:,:,:,iblk), & + aice (:,:, iblk), & + trcr (:,:,1:ntrcr,iblk), & + vice (:,:, iblk), & + vsno (:,:, iblk), & + aice0(:,:, iblk), & + tmask(:,:, iblk), & + ntrcr, & + trcr_depend(1:ntrcr)) + + aice_init(:,:,iblk) = aice(:,:,iblk) + + enddo ! iblk + !$OMP END PARALLEL DO + + end subroutine init_state + +!======================================================================= + +! Initialize state in each ice thickness category +! +! authors: C. M. Bitz +! William H. Lipscomb, LANL + + subroutine set_state_var (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + iglob, jglob, & + ice_ic, tmask, & + ULON, ULAT, & + Tair, sst, & + Tf, & + salinz, Tmltz, & + aicen, trcrn, & + vicen, vsnon) + + use ice_constants, only: c0, c1, c2, c3, p2, p5, rhoi, rhos, Lfresh, & + cp_ice, cp_ocn, Tsmelt, Tffresh, rad_to_deg, puny + use ice_domain_size, only: nilyr, nslyr, nx_global, ny_global, max_ntrcr, ncat + use ice_state, only: nt_Tsfc, nt_qice, nt_qsno, nt_sice, & + nt_fbri, tr_brine, tr_lvl, nt_alvl, nt_vlvl + use ice_itd, only: hin_max + use ice_therm_mushy, only: & + enthalpy_mush, & + liquidus_temperature_mush, & + temperature_mush + use ice_therm_shared, only: heat_capacity, calc_Tsfc, ktherm + use ice_grid, only: grid_type + use ice_forcing, only: atm_data_type + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo, ihi , & ! physical domain indices + jlo, jhi , & ! + iglob(nx_block) , & ! global indices + jglob(ny_block) ! + + character(len=char_len_long), intent(in) :: & + ice_ic ! method of ice cover initialization + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + tmask ! true for ice/ocean cells + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + ULON , & ! latitude of velocity pts (radians) + ULAT ! latitude of velocity pts (radians) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tair , & ! air temperature (K) + Tf , & ! freezing temperature (C) + sst ! sea surface temperature (C) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), & + intent(in) :: & + salinz , & ! initial salinity profile + Tmltz ! initial melting temperature profile + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(out) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_ntrcr,ncat), & + intent(out) :: & + trcrn ! ice tracers + ! 1: surface temperature of ice/snow (C) + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij , & ! horizontal index, combines i and j loops + k , & ! ice layer index + n , & ! thickness category index + it , & ! tracer index + icells ! number of cells initialized with ice + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind) :: & + slope, Ti, sum, hbar, & + ainit(ncat), & + hinit(ncat) + + real (kind=dbl_kind), parameter :: & + hsno_init = 0.20_dbl_kind , & ! initial snow thickness (m) + edge_init_nh = 70._dbl_kind, & ! initial ice edge, N.Hem. (deg) + edge_init_sh = -60._dbl_kind ! initial ice edge, S.Hem. (deg) + + indxi(:) = 0 + indxj(:) = 0 + + ! Initialize state variables. + ! If restarting, these values are overwritten. + + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + aicen(i,j,n) = c0 + vicen(i,j,n) = c0 + vsnon(i,j,n) = c0 + trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature + if (max_ntrcr >= 2) then + do it = 2, max_ntrcr + trcrn(i,j,it,n) = c0 + enddo + endif + if (tr_lvl) trcrn(i,j,nt_alvl,n) = c1 + if (tr_lvl) trcrn(i,j,nt_vlvl,n) = c1 + if (tr_brine) trcrn(i,j,nt_fbri,n) = c1 + do k = 1, nilyr + trcrn(i,j,nt_sice+k-1,n) = salinz(i,j,k) + enddo + do k = 1, nslyr + trcrn(i,j,nt_qsno+k-1,n) = -rhos * Lfresh + enddo + enddo + enddo + enddo + + if (trim(ice_ic) == 'default') then + + !----------------------------------------------------------------- + ! Place ice where ocean surface is cold. + ! Note: If SST is not read from a file, then the ocean is assumed + ! to be at its freezing point everywhere, and ice will + ! extend to the prescribed edges. + !----------------------------------------------------------------- + + if (trim(atm_data_type) == 'box') then + + hbar = c2 ! initial ice thickness + do n = 1, ncat + hinit(n) = c0 + ainit(n) = c0 + if (hbar > hin_max(n-1) .and. hbar < hin_max(n)) then + hinit(n) = hbar + ainit(n) = 0.50 !echmod symm + endif + enddo + + else + + ! initial category areas in cells with ice + hbar = c3 ! initial ice thickness with greatest area + ! Note: the resulting average ice thickness + ! tends to be less than hbar due to the + ! nonlinear distribution of ice thicknesses + sum = c0 + do n = 1, ncat + if (n < ncat) then + hinit(n) = p5*(hin_max(n-1) + hin_max(n)) ! m + else ! n=ncat + hinit(n) = (hin_max(n-1) + c1) ! m + endif + ! parabola, max at h=hbar, zero at h=0, 2*hbar + ainit(n) = max(c0, (c2*hbar*hinit(n) - hinit(n)**2)) + sum = sum + ainit(n) + enddo + do n = 1, ncat + ainit(n) = ainit(n) / (sum + puny/ncat) ! normalize + enddo + + endif ! atm_data_type + + if (trim(grid_type) == 'rectangular') then + + ! place ice on left side of domain + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then + if (ULON(i,j) < -50./rad_to_deg) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! ULON + endif ! tmask + enddo ! i + enddo ! j + + else + + ! place ice at high latitudes where ocean sfc is cold + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then + ! place ice in high latitudes where ocean sfc is cold +#ifdef AusCOM + if ( (sst (i,j) <= Tf(i,j)+c1) .and. & +#else + if ( (sst (i,j) <= Tf(i,j)+p2) .and. & +#endif + (ULAT(i,j) < edge_init_sh/rad_to_deg .or. & + ULAT(i,j) > edge_init_nh/rad_to_deg) ) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! cold surface + endif ! tmask + enddo ! i + enddo ! j + + endif ! rectgrid + + do n = 1, ncat + + ! ice volume, snow volume +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + aicen(i,j,n) = ainit(n) + + if (trim(atm_data_type) == 'box') then + if (hinit(n) > c0) then +! ! constant slope from 0 to 1 in x direction +! aicen(i,j,n) = (real(iglob(i), kind=dbl_kind)-p5) & +! / (real(nx_global,kind=dbl_kind)) +! ! constant slope from 0 to 0.5 in x direction +! aicen(i,j,n) = (real(iglob(i), kind=dbl_kind)-p5) & +! / (real(nx_global,kind=dbl_kind)) * p5 + ! quadratic +! aicen(i,j,n) = max(c0,(real(iglob(i), kind=dbl_kind)-p5) & +! / (real(nx_global,kind=dbl_kind)) & +! * (real(jglob(j), kind=dbl_kind)-p5) & +! / (real(ny_global,kind=dbl_kind)) * p5) + aicen(i,j,n) = max(c0,(real(nx_global, kind=dbl_kind) & + - real(iglob(i), kind=dbl_kind)-p5) & + / (real(nx_global,kind=dbl_kind)) & + * (real(ny_global, kind=dbl_kind) & + - real(jglob(j), kind=dbl_kind)-p5) & + / (real(ny_global,kind=dbl_kind)) * p5) + endif + vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m + else + vicen(i,j,n) = hinit(n) * ainit(n) ! m + endif + vsnon(i,j,n) = min(aicen(i,j,n)*hsno_init,p2*vicen(i,j,n)) + if (tr_brine) trcrn(i,j,nt_fbri,n) = c1 + enddo ! ij + + ! surface temperature + if (calc_Tsfc) then + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + trcrn(i,j,nt_Tsfc,n) = min(Tsmelt, Tair(i,j) - Tffresh) !deg C + enddo + + else ! Tsfc is not calculated by the ice model + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! not used + enddo + + endif ! calc_Tsfc + + ! other tracers + + if (heat_capacity) then + + ! ice enthalpy, salinity + do k = 1, nilyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! assume linear temp profile and compute enthalpy + slope = Tf(i,j) - trcrn(i,j,nt_Tsfc,n) + Ti = trcrn(i,j,nt_Tsfc,n) & + + slope*(real(k,kind=dbl_kind)-p5) & + /real(nilyr,kind=dbl_kind) + + if (ktherm == 2) then + ! enthalpy + trcrn(i,j,nt_qice+k-1,n) = & + enthalpy_mush(Ti, salinz(i,j,k)) + else + trcrn(i,j,nt_qice+k-1,n) = & + -(rhoi * (cp_ice*(Tmltz(i,j,k)-Ti) & + + Lfresh*(c1-Tmltz(i,j,k)/Ti) - cp_ocn*Tmltz(i,j,k))) + endif + + ! salinity + trcrn(i,j,nt_sice+k-1,n) = salinz(i,j,k) + enddo ! ij + enddo ! nilyr + + ! snow enthalpy + do k = 1, nslyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + Ti = min(c0, trcrn(i,j,nt_Tsfc,n)) + trcrn(i,j,nt_qsno+k-1,n) = -rhos*(Lfresh - cp_ice*Ti) + + enddo ! ij + enddo ! nslyr + + else ! one layer with zero heat capacity + + ! ice energy + k = 1 + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + trcrn(i,j,nt_qice+k-1,n) = -rhoi * Lfresh + trcrn(i,j,nt_sice+k-1,n) = salinz(i,j,k) + enddo ! ij + + ! snow energy + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + trcrn(i,j,nt_qsno+k-1,n) = -rhos * Lfresh + enddo ! ij + + endif ! heat_capacity + enddo ! ncat + endif ! ice_ic + + end subroutine set_state_var + +!======================================================================= + + end module ice_init + +!======================================================================= diff --git a/source/ice_itd.F90 b/source/ice_itd.F90 new file mode 100755 index 00000000..f1b7205c --- /dev/null +++ b/source/ice_itd.F90 @@ -0,0 +1,2632 @@ +! SVN:$Id: ice_itd.F90 936 2015-03-17 15:46:44Z eclare $ +!======================================================================= + +! Routines to initialize the ice thickness distribution and +! utilities to redistribute ice among categories. These routines +! are not specific to a particular numerical implementation. +! +! See Bitz, C.M., and W.H. Lipscomb, 1999: +! An energy-conserving thermodynamic model of sea ice, +! J. Geophys. Res., 104, 15,669--15,677. +! +! See Bitz, C.M., M.M. Holland, A.J. Weaver, M. Eby, 2001: +! Simulating the ice-thickness distribution in a climate model, +! J. Geophys. Res., 106, 2441--2464. +! +! authors: C. M. Bitz, UW +! William H. Lipscomb and Elizabeth C. Hunke, LANL +! +! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb +! +! 2004 WHL: Added multiple snow layers, block structure, cleanup_itd +! 2006 ECH: Added WMO standard ice thickness categories as kcatbound=2 +! Streamlined for efficiency +! Converted to free source form (F90) + + module ice_itd + + use ice_kinds_mod + use ice_constants + use ice_communicate, only: my_task, master_task + use ice_domain_size, only: ncat, max_aero, nilyr, nslyr, n_aero, nblyr + use ice_fileunits, only: nu_diag + + implicit none + save + + private + public :: aggregate_area, shift_ice, column_sum, column_conservation_check, & + aggregate, compute_tracers, init_itd, cleanup_itd, reduce_area + + integer (kind=int_kind), public :: & + kitd , & ! type of itd conversions + ! 0 = delta function + ! 1 = linear remap + kcatbound ! 0 = old category boundary formula + ! 1 = new formula giving round numbers + ! 2 = WMO standard + + real (kind=dbl_kind), public :: & + hi_min , & ! minimum ice thickness allowed (m) + hs_min ! minimum snow thickness allowed (m) + + real (kind=dbl_kind), public :: & + aicenmin ! AEW: This variable will replace puny as the min ice conc to + ! allow when zap_small_areas is called. Set equal to puny + ! if zerolayers or standard coupling, set equal to aicenmin_ml + ! if multilayers AND UM-style coupling + + real (kind=dbl_kind), public :: & + hin_max(0:ncat) ! category limits (m) + + character (len=35) :: c_hi_range(ncat) + +!------------------------------------------------------------------- +! a note regarding hi_min and hin_max(0): +! both represent a minimum ice thickness. hin_max(0) is +! intended to be used for particular numerical implementations +! of category conversions in the ice thickness distribution. +! hi_min is a more general purpose parameter, but is specifically +! for maintaining stability in the thermodynamics. +! hin_max(0) = 0.1 m for the delta function itd +! hin_max(0) = 0.0 m for linear remapping +! +! Also note that the upper limit on the thickest category +! is only used for the linear remapping scheme +! and it is not a true upper limit on the thickness +!------------------------------------------------------------------- + +!======================================================================= + + contains + +!======================================================================= + +! Initialize area fraction and thickness boundaries for the itd model +! +! authors: William H. Lipscomb and Elizabeth C. Hunke, LANL +! C. M. Bitz, UW + + subroutine init_itd (calc_Tsfc, heat_capacity) ! Alex West: added these two arguments + ! as per Alison's changes. + ! Needed to control setting of + ! aicenmin for use in + ! zap_small_areas + + logical (kind=log_kind), intent(in) :: & + calc_Tsfc, & ! If T, calculate surface temp + heat_capacity ! If T, ice had nonzero heat capacity + + integer (kind=int_kind) :: & + n ! thickness category index + + real (kind=dbl_kind) :: & + cc1, cc2, cc3, & ! parameters for kcatbound = 0 + x1 , & + rn , & ! real(n) + rncat , & ! real(ncat) + d1 , & ! parameters for kcatbound = 1 (m) + d2 + + real (kind=dbl_kind), dimension(5) :: wmo5 ! data for wmo itd + real (kind=dbl_kind), dimension(6) :: wmo6 ! data for wmo itd + real (kind=dbl_kind), dimension(7) :: wmo7 ! data for wmo itd + + character(len=8) :: c_hinmax1,c_hinmax2 + character(len=2) :: c_nc + + rncat = real(ncat, kind=dbl_kind) + d1 = 3.0_dbl_kind / rncat + d2 = 0.5_dbl_kind / rncat + + hi_min = p01 ! minimum ice thickness allowed (m) for thermo + ! note hi_min is reset to 0.1 for kitd=0, below + + !----------------------------------------------------------------- + ! Choose category boundaries based on one of four options. + ! + ! The first formula (kcatbound = 0) was used in Lipscomb (2001) + ! and in CICE versions 3.0 and 3.1. + ! + ! The second formula is more user-friendly in the sense that it + ! is easy to obtain round numbers for category boundaries: + ! + ! H(n) = n * [d1 + d2*(n-1)] + ! + ! Default values are d1 = 300/ncat, d2 = 50/ncat. + ! For ncat = 5, boundaries in cm are 60, 140, 240, 360, which are + ! close to the standard values given by the first formula. + ! For ncat = 10, boundaries in cm are 30, 70, 120, 180, 250, 330, + ! 420, 520, 630. + ! + ! The third option provides support for World Meteorological + ! Organization classification based on thickness. The full + ! WMO thickness distribution is used if ncat = 7; if ncat=5 + ! or ncat = 6, some of the thinner categories are combined. + ! For ncat = 5, boundaries are 30, 70, 120, 200, >200 cm. + ! For ncat = 6, boundaries are 15, 30, 70, 120, 200, >200 cm. + ! For ncat = 7, boundaries are 10, 15, 30, 70, 120, 200, >200 cm. + ! + ! kcatbound=-1 is available only for 1-category runs, with + ! boundaries 0 and 100 m. + !----------------------------------------------------------------- + + if (kcatbound == -1) then ! single category + hin_max(0) = c0 + hin_max(1) = c100 + + elseif (kcatbound == 0) then ! original scheme + + if (kitd == 1) then + ! linear remapping itd category limits + cc1 = c3/rncat + cc2 = c15*cc1 + cc3 = c3 + + hin_max(0) = c0 ! minimum ice thickness, m + else + ! delta function itd category limits + hi_min = p1 ! minimum ice thickness allowed (m) for thermo + cc1 = max(1.1_dbl_kind/rncat,c1*hi_min) + cc2 = c25*cc1 + cc3 = 2.25_dbl_kind + + ! hin_max(0) should not be zero + ! use some caution in making it less than 0.10 + hin_max(0) = hi_min ! minimum ice thickness, m + endif ! kitd + + do n = 1, ncat + x1 = real(n-1,kind=dbl_kind) / rncat + hin_max(n) = hin_max(n-1) & + + cc1 + cc2*(c1 + tanh(cc3*(x1-c1))) + enddo + + elseif (kcatbound == 1) then ! new scheme + + hin_max(0) = c0 + do n = 1, ncat + rn = real(n, kind=dbl_kind) + hin_max(n) = rn * (d1 + (rn-c1)*d2) + enddo + + elseif (kcatbound == 2) then ! WMO standard + + if (ncat == 5) then + ! thinnest 3 categories combined + data wmo5 / 0.30_dbl_kind, 0.70_dbl_kind, & + 1.20_dbl_kind, 2.00_dbl_kind, & + 999._dbl_kind / + hin_max(0) = c0 + do n = 1, ncat + hin_max(n) = wmo5(n) + enddo + elseif (ncat == 6) then + ! thinnest 2 categories combined + data wmo6 / 0.15_dbl_kind, & + 0.30_dbl_kind, 0.70_dbl_kind, & + 1.20_dbl_kind, 2.00_dbl_kind, & + 999._dbl_kind / + + hin_max(0) = c0 + do n = 1, ncat + hin_max(n) = wmo6(n) + enddo + elseif (ncat == 7) then + ! all thickness categories + data wmo7 / 0.10_dbl_kind, 0.15_dbl_kind, & + 0.30_dbl_kind, 0.70_dbl_kind, & + 1.20_dbl_kind, 2.00_dbl_kind, & + 999._dbl_kind / + hin_max(0) = c0 + do n = 1, ncat + hin_max(n) = wmo7(n) + enddo + else + write (nu_diag,*) 'kcatbound=2 (WMO) must have ncat=5, 6 or 7' + stop + endif + + endif ! kcatbound + + ! AEW: (based on Alison McLaren's vn4 modifications) Set a higher value + ! of aicenmin if we're using multilayers with UM-style coupling. + ! Also allow higher values of hi_min, hs_min to be set (this is a + ! bit ad-hoc). + ! Set aicenmin - this may want to be done via a namelist in future + !----------------------------------------------------------------- + + if (heat_capacity) then + ! Set higher values to help with stability + aicenmin = aicenmin_ml ! 1.e-5. Changed from 1.e-2 + hi_min = p2 ! 0.2m + hs_min = p1 ! 0.1m + else + aicenmin = puny ! Standard CICE setting + endif + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'hin_max(n-1) < Cat n < hin_max(n)' + do n = 1, ncat + write (nu_diag,*) hin_max(n-1),' < Cat ',n, ' < ',hin_max(n) + ! Write integer n to character string + write (c_nc, '(i2)') n + + ! Write hin_max to character string + write (c_hinmax1, '(f6.3)') hin_max(n-1) + write (c_hinmax2, '(f6.3)') hin_max(n) + + ! Save character string to write to history file + c_hi_range(n)=c_hinmax1//'m < hi Cat '//c_nc//' < '//c_hinmax2//'m' + enddo + write (nu_diag,*) ' ' + endif + + end subroutine init_itd + +!======================================================================= + +! Aggregate ice state variables over thickness categories. +! +! authors: C. M. Bitz, UW +! W. H. Lipscomb, LANL + + subroutine aggregate (nx_block, ny_block, & + aicen, trcrn, & + vicen, vsnon, & + aice, trcr, & + vice, vsno, & + aice0, tmask, & + ntrcr, trcr_depend) + + use ice_state, only: nt_apnd, nt_alvl, nt_fbri, & + tr_pond_cesm, tr_pond_lvl, tr_pond_topo + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ntrcr ! number of tracers in use + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(in) :: & + trcrn ! ice tracers + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + tmask ! land/boundary mask, thickness (T-cell) + + integer (kind=int_kind), dimension (ntrcr), intent(in) :: & + trcr_depend ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + aice , & ! concentration of ice + vice , & ! volume per unit area of ice (m) + vsno , & ! volume per unit area of snow (m) + aice0 ! concentration of open water + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr), & + intent(out) :: & + trcr ! ice tracers + + ! local variables + + integer (kind=int_kind) :: & + icells ! number of ocean/ice cells + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, & ! compressed indices in i/j directions + indxj + + integer (kind=int_kind) :: & + i, j, n, it, & ! loop indices + ij ! combined i/j horizontal index + + real (kind=dbl_kind), dimension (:,:), allocatable :: & + atrcr ! sum of aicen*trcrn or vicen*trcrn or vsnon*trcrn + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + icells = 0 + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! tmask + + aice0(i,j) = c1 + aice (i,j) = c0 + vice (i,j) = c0 + vsno (i,j) = c0 + enddo + enddo + + if (icells > 0) then + + allocate (atrcr(icells,ntrcr)) + + !----------------------------------------------------------------- + ! Aggregate + !----------------------------------------------------------------- + + atrcr(:,:) = c0 + + do n = 1, ncat + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + aice(i,j) = aice(i,j) + aicen(i,j,n) + vice(i,j) = vice(i,j) + vicen(i,j,n) + vsno(i,j) = vsno(i,j) + vsnon(i,j,n) + enddo ! ij + + do it = 1, ntrcr + if (trcr_depend(it) == 0) then ! ice area tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcr(ij,it) = atrcr(ij,it) & + + trcrn(i,j,it,n)*aicen(i,j,n) + enddo ! ij + + elseif (trcr_depend(it) == 1) then ! ice volume tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcr(ij,it) = atrcr(ij,it) & + + trcrn(i,j,it,n)*vicen(i,j,n) + enddo ! ij + + elseif (trcr_depend(it) == 2) then ! snow volume tracer + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcr(ij,it) = atrcr(ij,it) & + + trcrn(i,j,it,n)*vsnon(i,j,n) + enddo ! ij + + elseif (trcr_depend(it) == 2+nt_alvl) then ! level ice tracer + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcr(ij,it) = atrcr(ij,it) & + + trcrn(i,j,it,n)*trcrn(i,j,nt_alvl,n)*aicen(i,j,n) + enddo ! ij + + elseif (trcr_depend(it) == 2+nt_apnd .and. & + (tr_pond_cesm .or. tr_pond_topo)) then ! CESM or topo pond area tracer + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcr(ij,it) = atrcr(ij,it) & + + trcrn(i,j,it,n)*trcrn(i,j,nt_apnd,n)*aicen(i,j,n) + enddo ! ij + + elseif (trcr_depend(it) == 2+nt_apnd .and. & + tr_pond_lvl) then ! level-ice pond area tracer + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcr(ij,it) = atrcr(ij,it) & + + trcrn(i,j,it,n)*trcrn(i,j,nt_apnd,n) & + *trcrn(i,j,nt_alvl,n)*aicen(i,j,n) + enddo ! ij + + elseif (trcr_depend(it) == 2+nt_fbri) then ! brine tracer + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcr(ij,it) = atrcr(ij,it) & + + trcrn(i,j,it,n)*trcrn(i,j,nt_fbri,n)*vicen(i,j,n) + enddo ! ij + endif ! trcr_depend + enddo ! ntrcr + enddo ! ncat + + ! Open water fraction + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + aice0(i,j) = max (c1 - aice(i,j), c0) + enddo ! ij + + ! Tracers + + call compute_tracers (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, trcr_depend, & + atrcr(:,:), aice(:,:), & + vice (:,:), vsno(:,:), & + trcr(:,:,:)) + + deallocate (atrcr) + + endif ! icells > 0 + + end subroutine aggregate + +!======================================================================= + +! Aggregate ice area (but not other state variables) over thickness +! categories. +! +! authors: William H. Lipscomb, LANL +! modified Jan 2004 by Clifford Chen, Fujitsu + + subroutine aggregate_area (nx_block, ny_block, & + aicen, aice, aice0) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & + aicen ! concentration of ice + + real (kind=dbl_kind), dimension (:,:), intent(inout) :: & + aice, & ! concentration of ice + aice0 ! concentration of open water + + ! local variables + + integer (kind=int_kind) :: i, j, n + + !----------------------------------------------------------------- + ! Aggregate + !----------------------------------------------------------------- + + aice(:,:) = c0 + + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + aice(i,j) = aice(i,j) + aicen(i,j,n) + enddo ! i + enddo ! j + enddo ! n + + do j = 1, ny_block + do i = 1, nx_block + + ! open water fraction + aice0(i,j) = max (c1 - aice(i,j), c0) + + enddo ! i + enddo ! j + + end subroutine aggregate_area + +!======================================================================= + +! Rebins thicknesses into defined categories +! +! authors: William H. Lipscomb and Elizabeth C. Hunke, LANL + + subroutine rebin (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, trcr_depend, & + aicen, trcrn, & + vicen, vsnon, & + l_stop, & + istop, jstop) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of grid cells with ice + ntrcr ! number of tracers in use + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed i/j indices + + integer (kind=int_kind), dimension (ntrcr), intent(in) :: & + trcr_depend ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + + logical (kind=log_kind), intent(out) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(out) :: & + istop, jstop ! indices of grid cell where model aborts + + ! local variables + + integer (kind=int_kind) :: & + i,j , & ! horizontal indices + n , & ! category index + ij ! combined horizontal index + + logical (kind=log_kind) :: & + shiftflag ! = .true. if ice must be shifted + + integer (kind=int_kind), dimension (icells,ncat) :: & + donor ! donor category index + + real (kind=dbl_kind), dimension (icells,ncat) :: & + daice , & ! ice area transferred + dvice , & ! ice volume transferred + hicen ! ice thickness for each cat (m) + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + l_stop = .false. + istop = 0 + jstop = 0 + + do n = 1, ncat + do ij = 1, icells ! aice(i,j) > puny + i = indxi(ij) + j = indxj(ij) + + donor(ij,n) = 0 + daice(ij,n) = c0 + dvice(ij,n) = c0 + + !----------------------------------------------------------------- + ! Compute ice thickness. + !----------------------------------------------------------------- + if (aicen(i,j,n) > puny) then + hicen(ij,n) = vicen(i,j,n) / aicen(i,j,n) + else + hicen(ij,n) = c0 + endif + enddo ! ij + enddo ! n + + !----------------------------------------------------------------- + ! make sure thickness of cat 1 is at least hin_max(0) + !----------------------------------------------------------------- + do ij = 1, icells ! aice(i,j) > puny + i = indxi(ij) + j = indxj(ij) + + if (aicen(i,j,1) > puny) then + if (hicen(ij,1) <= hin_max(0) .and. hin_max(0) > c0 ) then + aicen(i,j,1) = vicen(i,j,1) / hin_max(0) + hicen(ij,1) = hin_max(0) + endif + endif + enddo ! ij + + !----------------------------------------------------------------- + ! If a category thickness is not in bounds, shift the + ! entire area, volume, and energy to the neighboring category + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! Move thin categories up + !----------------------------------------------------------------- + + do n = 1, ncat-1 ! loop over category boundaries + + !----------------------------------------------------------------- + ! identify thicknesses that are too big + !----------------------------------------------------------------- + shiftflag = .false. + do ij = 1, icells ! aice(i,j) > puny + i = indxi(ij) + j = indxj(ij) + + if (aicen(i,j,n) > puny .and. & + hicen(ij,n) > hin_max(n)) then + shiftflag = .true. + donor(ij,n) = n + daice(ij,n) = aicen(i,j,n) + dvice(ij,n) = vicen(i,j,n) + endif + enddo ! ij + + if (shiftflag) then + + !----------------------------------------------------------------- + ! shift ice between categories + !----------------------------------------------------------------- + call shift_ice (nx_block, ny_block, & + indxi, indxj, & + icells, & + ntrcr, trcr_depend, & + aicen, trcrn, & + vicen, vsnon, & + hicen, donor, & + daice, dvice, & + l_stop, & + istop, jstop) + + if (l_stop) return + + !----------------------------------------------------------------- + ! reset shift parameters + !----------------------------------------------------------------- + + do ij = 1, icells ! aice(i,j) > puny + donor(ij,n) = 0 + daice(ij,n) = c0 + dvice(ij,n) = c0 + enddo + + endif ! shiftflag + + enddo ! n + + !----------------------------------------------------------------- + ! Move thick categories down + !----------------------------------------------------------------- + + do n = ncat-1, 1, -1 ! loop over category boundaries + + !----------------------------------------------------------------- + ! identify thicknesses that are too small + !----------------------------------------------------------------- + shiftflag = .false. + do ij = 1, icells ! aice(i,j) > puny + i = indxi(ij) + j = indxj(ij) + + if (aicen(i,j,n+1) > puny .and. & + hicen(ij,n+1) <= hin_max(n)) then + shiftflag = .true. + donor(ij,n) = n+1 + daice(ij,n) = aicen(i,j,n+1) + dvice(ij,n) = vicen(i,j,n+1) + endif + enddo ! ij + + if (shiftflag) then + + !----------------------------------------------------------------- + ! shift ice between categories + !----------------------------------------------------------------- + call shift_ice (nx_block, ny_block, & + indxi, indxj, & + icells, & + ntrcr, trcr_depend, & + aicen, trcrn, & + vicen, vsnon, & + hicen, donor, & + daice, dvice, & + l_stop, & + istop, jstop) + + if (l_stop) return + + !----------------------------------------------------------------- + ! reset shift parameters + !----------------------------------------------------------------- + + do ij = 1, icells ! aice(i,j) > puny + donor(ij,n) = 0 + daice(ij,n) = c0 + dvice(ij,n) = c0 + enddo + + endif ! shiftflag + + enddo ! n + + + end subroutine rebin + +!======================================================================= + +! Reduce area when ice melts for special case of ncat=1 +! +! Use CSM 1.0-like method of reducing ice area +! when melting occurs: assume only half the ice volume +! change goes to thickness decrease, the other half +! to reduction in ice fraction +! +! authors: C. M. Bitz, UW +! modified by: Elizabeth C. Hunke, LANL + + subroutine reduce_area (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + tmask, & + aicen, vicen, & + aicen_init,vicen_init) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + tmask ! land/boundary mask, thickness (T-cell) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen ! volume per unit area of ice (m) + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + aicen_init, & ! old ice area for category 1 (m) + vicen_init ! old ice volume for category 1 (m) + + ! local variables + + integer (kind=int_kind) :: & + i, j ! horizontal indices + + real (kind=dbl_kind) :: & + hi0 , & ! initial hi + hi1 , & ! current hi + dhi ! hi1 - hi0 + + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then + + hi0 = c0 + if (aicen_init(i,j) > c0) & + hi0 = vicen_init(i,j) / aicen_init(i,j) + + hi1 = c0 + if (aicen(i,j) > c0) & + hi1 = vicen(i,j) / aicen(i,j) + + ! make sure thickness of cat 1 is at least hin_max(0) + if (hi1 <= hin_max(0) .and. hin_max(0) > c0 ) then + aicen(i,j) = vicen(i,j) / hin_max(0) + hi1 = hin_max(0) + endif + + if (aicen(i,j) > c0) then + dhi = hi1 - hi0 + if (dhi < c0) then + hi1 = vicen(i,j) / aicen(i,j) + aicen(i,j) = c2 * vicen(i,j) / (hi1 + hi0) + endif + endif + + endif ! tmask + enddo ! i + enddo ! j + + end subroutine reduce_area + +!======================================================================= + +! Shift ice across category boundaries, conserving area, volume, and +! energy. +! +! authors: William H. Lipscomb and Elizabeth C. Hunke, LANL + + subroutine shift_ice (nx_block, ny_block, & + indxi, indxj, & + icells, & + ntrcr, trcr_depend, & + aicen, trcrn, & + vicen, vsnon, & + hicen, donor, & + daice, dvice, & + l_stop, & + istop, jstop) + + use ice_state, only: nt_apnd, nt_alvl, nt_fbri, & + tr_pond_cesm, tr_pond_lvl, tr_pond_topo + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of ocean/ice cells + ntrcr ! number of tracers in use + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi , & ! compressed indices in i/j directions + indxj + + integer (kind=int_kind), dimension (ntrcr), intent(in) :: & + trcr_depend ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + + ! NOTE: Third index of donor, daice, dvice should be ncat-1, + ! except that compilers would have trouble when ncat = 1 + integer (kind=int_kind), dimension(icells,ncat), & + intent(in) :: & + donor ! donor category index + + real (kind=dbl_kind), dimension(icells,ncat), & + intent(inout) :: & + daice , & ! ice area transferred across boundary + dvice , & ! ice volume transferred across boundary + hicen ! ice thickness for each cat (m) + + logical (kind=log_kind), intent(out) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(out) :: & + istop, jstop ! indices of grid cell where model aborts + + ! local variables + + integer (kind=int_kind) :: & + i, j, m , & ! horizontal indices + n , & ! thickness category index + nr , & ! receiver category + nd , & ! donor category + it ! tracer index + + real (kind=dbl_kind), dimension(icells,ntrcr,ncat) :: & + atrcrn ! aicen*trcrn + + ! real (kind=dbl_kind), dimension(icells,ncat) :: & + ! dvbrine ! brine volume transferred + + real (kind=dbl_kind) :: & + dvsnow , & ! snow volume transferred + datrcr ! aicen*train transferred + + integer (kind=int_kind), dimension (icells) :: & + indxii , & ! compressed indices for i/j directions + indxjj , & + indxij + + integer (kind=int_kind) :: & + ishift , & ! number of cells with ice to transfer + ij ! combined i/j horizontal index + + logical (kind=log_kind) :: & + daice_negative , & ! true if daice < -puny + dvice_negative , & ! true if dvice < -puny + daice_greater_aicen, & ! true if daice > aicen + dvice_greater_vicen ! true if dvice > vicen + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + worka, workb + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + l_stop = .false. + istop = 0 + jstop = 0 + + !----------------------------------------------------------------- + ! Define variables equal to aicen*trcrn, vicen*trcrn, vsnon*trcrn + !----------------------------------------------------------------- + + do n = 1, ncat + do it = 1, ntrcr + if (trcr_depend(it) == 0) then ! ice area tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = aicen(i,j,n)*trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 1) then ! ice volume tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = vicen(i,j,n)*trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2) then ! snow volume tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = vsnon(i,j,n)*trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2+nt_alvl) then ! level ice tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = aicen(i,j,n) & + * trcrn(i,j,nt_alvl,n) & + * trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2+nt_apnd .and. & + (tr_pond_cesm .or. tr_pond_topo)) then ! CESM or topo pond area tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = aicen(i,j,n) & + * trcrn(i,j,nt_apnd,n) & + * trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2+nt_apnd .and. & + tr_pond_lvl) then ! level-ice pond area tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = aicen(i,j,n) & + * trcrn(i,j,nt_alvl,n) & + * trcrn(i,j,nt_apnd,n) & + * trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2+nt_fbri) then ! brine tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = vicen(i,j,n) & + * trcrn(i,j,nt_fbri,n) & + * trcrn(i,j,it,n) + enddo + endif + enddo + + enddo + + !----------------------------------------------------------------- + ! Check for daice or dvice out of range, allowing for roundoff error + !----------------------------------------------------------------- + + do n = 1, ncat-1 + + daice_negative = .false. + dvice_negative = .false. + daice_greater_aicen = .false. + dvice_greater_vicen = .false. + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (donor(ij,n) > 0) then + nd = donor(ij,n) + + if (daice(ij,n) < c0) then + if (daice(ij,n) > -puny*aicen(i,j,nd)) then + daice(ij,n) = c0 ! shift no ice + dvice(ij,n) = c0 + else + daice_negative = .true. + endif + endif + + if (dvice(ij,n) < c0) then + if (dvice(ij,n) > -puny*vicen(i,j,nd)) then + daice(ij,n) = c0 ! shift no ice + dvice(ij,n) = c0 + else + dvice_negative = .true. + endif + endif + + if (daice(ij,n) > aicen(i,j,nd)*(c1-puny)) then + if (daice(ij,n) < aicen(i,j,nd)*(c1+puny)) then + daice(ij,n) = aicen(i,j,nd) + dvice(ij,n) = vicen(i,j,nd) + else + daice_greater_aicen = .true. + endif + endif + + if (dvice(ij,n) > vicen(i,j,nd)*(c1-puny)) then + if (dvice(ij,n) < vicen(i,j,nd)*(c1+puny)) then + daice(ij,n) = aicen(i,j,nd) + dvice(ij,n) = vicen(i,j,nd) + else + dvice_greater_vicen = .true. + endif + endif + + endif ! donor > 0 + enddo ! ij + + !----------------------------------------------------------------- + ! error messages + !----------------------------------------------------------------- + + if (daice_negative) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (donor(ij,n) > 0 .and. & + daice(ij,n) <= -puny*aicen(i,j,nd)) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'shift_ice: negative daice' + write(nu_diag,*) 'i, j:', i, j + write(nu_diag,*) 'boundary, donor cat:', n, nd + write(nu_diag,*) 'daice =', daice(ij,n) + write(nu_diag,*) 'dvice =', dvice(ij,n) + l_stop = .true. + istop = i + jstop = j + endif + enddo + endif + if (l_stop) return + + if (dvice_negative) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (donor(ij,n) > 0 .and. & + dvice(ij,n) <= -puny*vicen(i,j,nd)) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'shift_ice: negative dvice' + write(nu_diag,*) 'i, j:', i, j + write(nu_diag,*) 'boundary, donor cat:', n, nd + write(nu_diag,*) 'daice =', daice(ij,n) + write(nu_diag,*) 'dvice =', dvice(ij,n) + l_stop = .true. + istop = i + jstop = j + endif + enddo + endif + if (l_stop) return + + if (daice_greater_aicen) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (donor(ij,n) > 0) then + nd = donor(ij,n) + if (daice(ij,n) >= aicen(i,j,nd)*(c1+puny)) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'shift_ice: daice > aicen' + write(nu_diag,*) 'i, j:', i, j + write(nu_diag,*) 'boundary, donor cat:', n, nd + write(nu_diag,*) 'daice =', daice(ij,n) + write(nu_diag,*) 'aicen =', aicen(i,j,nd) + l_stop = .true. + istop = i + jstop = j + endif + endif + enddo + endif + if (l_stop) return + + if (dvice_greater_vicen) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (donor(ij,n) > 0) then + nd = donor(ij,n) + if (dvice(ij,n) >= vicen(i,j,nd)*(c1+puny)) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'shift_ice: dvice > vicen' + write(nu_diag,*) 'i, j:', i, j + write(nu_diag,*) 'boundary, donor cat:', n, nd + write(nu_diag,*) 'dvice =', dvice(ij,n) + write(nu_diag,*) 'vicen =', vicen(i,j,nd) + l_stop = .true. + istop = i + jstop = j + endif + endif + enddo + endif + if (l_stop) return + + !----------------------------------------------------------------- + ! transfer volume and energy between categories + !----------------------------------------------------------------- + + ishift = 0 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (daice(ij,n) > c0) then ! daice(n) can be < puny + ishift = ishift + 1 + indxii(ishift) = i + indxjj(ishift) = j + indxij(ishift) = ij + endif ! tmask + enddo + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, ishift + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + nd = donor(m,n) +!echmod worka(i,j) = dvice(m,n) / vicen(i,j,nd) + worka(i,j) = daice(m,n) / aicen(i,j,nd) + if (nd == n) then + nr = nd+1 + else ! nd = n+1 + nr = n + endif + + aicen(i,j,nd) = aicen(i,j,nd) - daice(m,n) + aicen(i,j,nr) = aicen(i,j,nr) + daice(m,n) + + vicen(i,j,nd) = vicen(i,j,nd) - dvice(m,n) + vicen(i,j,nr) = vicen(i,j,nr) + dvice(m,n) + + dvsnow = vsnon(i,j,nd) * worka(i,j) + vsnon(i,j,nd) = vsnon(i,j,nd) - dvsnow + vsnon(i,j,nr) = vsnon(i,j,nr) + dvsnow + workb(i,j) = dvsnow + + enddo ! ij + + do it = 1, ntrcr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, ishift + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + nd = donor(m,n) + if (nd == n) then + nr = nd+1 + else ! nd = n+1 + nr = n + endif + + if (trcr_depend(it) == 0) then + datrcr = daice(m,n)*trcrn(i,j,it,nd) + elseif (trcr_depend(it) == 1) then + datrcr = dvice(m,n)*trcrn(i,j,it,nd) + elseif (trcr_depend(it) == 2) then + datrcr = workb(i,j)*trcrn(i,j,it,nd) + elseif (trcr_depend(it) == 2+nt_alvl) then + datrcr = daice(m,n)*trcrn(i,j,nt_alvl,nd)*trcrn(i,j,it,nd) + elseif (trcr_depend(it) == 2+nt_apnd .and. & + (tr_pond_cesm .or. tr_pond_topo)) then + datrcr = daice(m,n)*trcrn(i,j,nt_apnd,nd)*trcrn(i,j,it,nd) + elseif (trcr_depend(it) == 2+nt_apnd .and. & + tr_pond_lvl) then + datrcr = daice(m,n)*trcrn(i,j,nt_alvl,nd) & + *trcrn(i,j,nt_apnd,nd)*trcrn(i,j,it,nd) + elseif (trcr_depend(it) == 2+nt_fbri) then + datrcr = dvice(m,n)*trcrn(i,j,nt_fbri,nd)*trcrn(i,j,it,nd) + endif + + atrcrn(m,it,nd) = atrcrn(m,it,nd) - datrcr + atrcrn(m,it,nr) = atrcrn(m,it,nr) + datrcr + + enddo ! ij + enddo ! ntrcr + enddo ! boundaries, 1 to ncat-1 + + !----------------------------------------------------------------- + ! Update ice thickness and tracers + !----------------------------------------------------------------- + + do n = 1, ncat + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (aicen(i,j,n) > puny) then + hicen(ij,n) = vicen (i,j,n) / aicen(i,j,n) + else + hicen(ij,n) = c0 + endif + enddo + + call compute_tracers (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, trcr_depend, & + atrcrn(:,:, n), aicen(:,:, n), & + vicen (:,:, n), vsnon(:,:, n), & + trcrn (:,:,:,n)) + + enddo ! ncat + + end subroutine shift_ice + +!======================================================================= + +! For each grid cell, sum field over all ice categories. +! +! author: William H. Lipscomb, LANL + + subroutine column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + nsum, & + xin, xout) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + nsum , & ! number of categories/layers + icells ! number of ice/ocean grid cells + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed i/j indices + + real (kind=dbl_kind), dimension (nx_block,ny_block,nsum), & + intent(in) :: & + xin ! input field + + real (kind=dbl_kind), dimension (icells), intent(out) :: & + xout ! output field + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij , & ! horizontal indices + n ! category/layer index + + do ij = 1, icells + xout(ij) = c0 + enddo + + do n = 1, nsum + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + xout(ij) = xout(ij) + xin(i,j,n) + enddo ! ij + enddo ! n + + end subroutine column_sum + +!======================================================================= + +! For each physical grid cell, check that initial and final values +! of a conserved field are equal to within a small value. +! +! author: William H. Lipscomb, LANL + + subroutine column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + x1, x2, & + max_err, l_stop, & + istop, jstop) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of ice/ocean grid cells + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed i/j indices + + real (kind=dbl_kind), dimension(icells), intent(in) :: & + x1 , & ! initial field + x2 ! final field + + real (kind=dbl_kind), intent(in) :: & + max_err ! max allowed error + + character (len=char_len), intent(in) :: & + fieldid ! field identifier + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(inout) :: & + istop, jstop ! indices of grid cell where model aborts + + ! local variables + + integer (kind=int_kind) :: & + ij ! horizontal indices + + do ij = 1, icells + if (abs (x2(ij)-x1(ij)) > max_err) then + l_stop = .true. + istop = indxi(ij) + jstop = indxj(ij) + + write (nu_diag,*) ' ' + write (nu_diag,*) 'Conservation error: ', trim(fieldid) + write (nu_diag,*) 'i, j =', istop, jstop + write (nu_diag,*) 'Initial value =', x1(ij) + write (nu_diag,*) 'Final value =', x2(ij) + write (nu_diag,*) 'Difference =', x2(ij) - x1(ij) + endif + enddo + + end subroutine column_conservation_check + +!======================================================================= + +! Compute tracer fields. +! Given atrcrn = aicen*trcrn (or vicen*trcrn, vsnon*trcrn), compute trcrn. +! +! author: William H. Lipscomb, LANL + + subroutine compute_tracers (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, trcr_depend, & + atrcrn, aicen, & + vicen, vsnon, & + trcrn) + + use ice_state, only: nt_Tsfc, nt_alvl, nt_apnd, nt_fbri, & + tr_pond_cesm, tr_pond_lvl, tr_pond_topo + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of ice/ocean grid cells + ntrcr ! number of tracers in use + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed i/j indices + + integer (kind=int_kind), dimension (ntrcr), intent(in) :: & + trcr_depend ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon + + real (kind=dbl_kind), dimension (icells,ntrcr), & + intent(in) :: & + atrcrn ! aicen*trcrn or vicen*trcrn or vsnon*trcrn + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr), & + intent(out) :: & + trcrn ! ice tracers + + ! local variables + + integer (kind=int_kind) :: & + i, j, it, ij ! counting indices + + + trcrn(:,:,:) = c0 + + !----------------------------------------------------------------- + ! Compute new tracers + !----------------------------------------------------------------- + + do it = 1, ntrcr + if (it == nt_Tsfc) then ! surface temperature + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (aicen(i,j) > puny) then + trcrn(i,j,it) = atrcrn(ij,it) / aicen(i,j) + else + trcrn(i,j,it) = Tocnfrz + endif + enddo + + elseif (trcr_depend(it) == 0) then ! ice area tracers + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (aicen(i,j) > puny) then + trcrn(i,j,it) = atrcrn(ij,it) / aicen(i,j) + else + trcrn(i,j,it) = c0 + endif + enddo + + elseif (trcr_depend(it) == 1) then ! ice volume tracers + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (vicen(i,j) > c0) then + trcrn(i,j,it) = atrcrn(ij,it) / vicen(i,j) + else + trcrn(i,j,it) = c0 + if (it == nt_fbri) trcrn(i,j,nt_fbri) = c1 + endif + enddo + + elseif (trcr_depend(it) == 2) then ! snow volume tracers + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (vsnon(i,j) > c0) then + trcrn(i,j,it) = atrcrn(ij,it) / vsnon(i,j) + else + trcrn(i,j,it) = c0 + endif + enddo + + elseif (trcr_depend(it) == 2+nt_alvl) then ! level ice tracers + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (trcrn(i,j,nt_alvl)*aicen(i,j) > c0) then + trcrn(i,j,it) = atrcrn(ij,it) / (trcrn(i,j,nt_alvl)*aicen(i,j)) + else + trcrn(i,j,it) = c0 + endif + enddo + + elseif (trcr_depend(it) == 2+nt_apnd .and. & + (tr_pond_cesm .or. tr_pond_topo)) then ! CESM or topo pond area tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (trcrn(i,j,nt_apnd)*aicen(i,j) > c0) then + trcrn(i,j,it) = atrcrn(ij,it) / (trcrn(i,j,nt_apnd)*aicen(i,j)) + else + trcrn(i,j,it) = c0 + endif + enddo + elseif (trcr_depend(it) == 2+nt_apnd .and. & + tr_pond_lvl) then ! level-ice pond area tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (trcrn(i,j,nt_alvl)*trcrn(i,j,nt_apnd)*aicen(i,j) > c0) then + trcrn(i,j,it) = atrcrn(ij,it) & + / (trcrn(i,j,nt_alvl)*trcrn(i,j,nt_apnd)*aicen(i,j)) + else + trcrn(i,j,it) = c0 + endif + enddo + + elseif (trcr_depend(it) == 2+nt_fbri) then ! brine tracers + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (trcrn(i,j,nt_fbri)*vicen(i,j) > c0 ) then + trcrn(i,j,it) = atrcrn(ij,it) / (trcrn(i,j,nt_fbri)*vicen(i,j)) + else + trcrn(i,j,it) = c0 + endif + enddo + endif ! trcr_depend + enddo ! ntrcr + + end subroutine compute_tracers + +!======================================================================= + +! Cleanup subroutine that rebins thickness categories if necessary, +! eliminates very small ice areas while conserving mass and energy, +! aggregates state variables, and does a boundary call. +! It is a good idea to call this subroutine after the thermodynamics +! (thermo_vertical/thermo_itd) and again after the dynamics +! (evp/transport/ridging). +! +! author: William H. Lipscomb, LANL + + subroutine cleanup_itd (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, ntrcr, & + aicen, trcrn, & + vicen, vsnon, & + aice0, aice, & + trcr_depend, fpond, & + fresh, & + fsalt, fhocn, & + faero_ocn, tr_aero, & + tr_pond_topo, & + heat_capacity, & + nbtrcr, first_ice, & + flux_bio, & + l_stop, & + istop, jstop, & + limit_aice_in) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + ntrcr ! number of tracers in use + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + aice , & ! total ice concentration + aice0 ! concentration of open water + + integer (kind=int_kind), dimension(ntrcr), intent(in) :: & + trcr_depend ! tracer dependency information + + logical (kind=log_kind), intent(in) :: & + tr_aero, & ! aerosol flag + tr_pond_topo, & ! topo pond flag + heat_capacity ! if false, ice and snow have zero heat capacity + + logical (kind=log_kind), dimension(nx_block,ny_block,ncat),intent(inout) :: & + first_ice ! For bgc and S tracers. set to true if zapping ice. + + logical (kind=log_kind), intent(out) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(out) :: & + istop, jstop ! indices of grid cell where model aborts + + ! ice-ocean fluxes (required for strict conservation) + + integer (kind=int_kind), intent(in) :: & + nbtrcr ! number of bgc tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout), optional :: & + fpond , & ! fresh water flux to ponds (kg/m^2/s) + fresh , & ! fresh water flux to ocean (kg/m^2/s) + fsalt , & ! salt flux to ocean (kg/m^2/s) + fhocn ! net heat flux to ocean (W/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nbtrcr), & + intent(inout), optional :: & + flux_bio ! net tracer flux to ocean from biology (mmol/m^2/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_aero), & + intent(inout), optional :: & + faero_ocn ! aerosol flux to ocean (kg/m^2/s) + + logical (kind=log_kind), intent(in), optional :: & + limit_aice_in ! if false, allow aice to be out of bounds + ! may want to allow this for unit tests + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + n , & ! category index + icells ! number of grid cells with ice + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, indxj ! compressed i/j indices + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + dfpond , & ! zapped pond water flux (kg/m^2/s) + dfresh , & ! zapped fresh water flux (kg/m^2/s) + dfsalt , & ! zapped salt flux (kg/m^2/s) + dfhocn ! zapped energy flux ( W/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_aero) :: & + dfaero_ocn ! zapped aerosol flux (kg/m^2/s) + + logical (kind=log_kind) :: & + limit_aice ! if true, check for aice out of bounds + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + if (present(limit_aice_in)) then + limit_aice = limit_aice_in + else + limit_aice = .true. + endif + + l_stop = .false. + istop = 0 + jstop = 0 + + dfpond(:,:) = c0 + dfresh(:,:) = c0 + dfsalt(:,:) = c0 + dfhocn(:,:) = c0 + dfaero_ocn(:,:,:) = c0 + + !----------------------------------------------------------------- + ! Compute total ice area. + !----------------------------------------------------------------- + + call aggregate_area (nx_block, ny_block, & + aicen, & + aice, aice0) + + if (limit_aice) then ! check for aice out of bounds + + do j = jlo,jhi + do i = ilo,ihi + if (aice(i,j) > c1+puny .or. aice(i,j) < -puny) then + l_stop = .true. + istop = i + jstop = j + endif + enddo + enddo + + if (l_stop) then ! area out of bounds + i = istop + j = jstop + write(nu_diag,*) ' ' + write(nu_diag,*) 'aggregate ice area out of bounds' + write(nu_diag,*) 'my_task, i, j, aice:', & + my_task, i, j, aice(i,j) + do n = 1, ncat + write(nu_diag,*) 'n, aicen:', n, aicen(i,j,n) + enddo + return + endif ! l_stop + endif ! limit_aice + + !----------------------------------------------------------------- + ! Identify grid cells with ice. + !----------------------------------------------------------------- + + icells = 0 + do j = jlo,jhi + do i = ilo,ihi + if (aice(i,j) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + !----------------------------------------------------------------- + ! Make sure ice in each category is within its thickness bounds. + ! NOTE: The rebin subroutine is needed only in the rare cases + ! when the linear_itd subroutine cannot transfer ice + ! correctly (e.g., very fast ice growth). + !----------------------------------------------------------------- + + call rebin (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, trcr_depend, & + aicen, trcrn, & + vicen, vsnon, & + l_stop, & + istop, jstop) + + if (l_stop) return + + !----------------------------------------------------------------- + ! Zero out ice categories with very small areas. + !----------------------------------------------------------------- + + if (limit_aice) then + call zap_small_areas (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, ntrcr, & + aice, aice0, & + aicen, trcrn, & + vicen, vsnon, & + dfpond, & + dfresh, dfsalt, & + dfhocn, dfaero_ocn,& + tr_aero, tr_pond_topo, & + first_ice,nbtrcr, & + flux_bio, l_stop, & + istop, jstop) + if (l_stop) return + endif ! l_limit_aice + + !------------------------------------------------------------------- + ! Zap snow that has out of bounds temperatures + !------------------------------------------------------------------- + + call zap_snow_temperature(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, ntrcr, & + aicen, & + trcrn, vsnon, & + dfresh, dfhocn, & + dfaero_ocn, tr_aero) + + !------------------------------------------------------------------- + ! Update ice-ocean fluxes for strict conservation + !------------------------------------------------------------------- + + if (present(fpond)) & + fpond (:,:) = fpond(:,:) + dfpond(:,:) + if (present(fresh)) & + fresh (:,:) = fresh(:,:) + dfresh(:,:) + if (present(fsalt)) & + fsalt (:,:) = fsalt(:,:) + dfsalt(:,:) + if (present(fhocn)) & + fhocn (:,:) = fhocn(:,:) + dfhocn(:,:) + if (present(faero_ocn)) & + faero_ocn (:,:,:) = faero_ocn(:,:,:) + dfaero_ocn(:,:,:) + + !---------------------------------------------------------------- + ! If using zero-layer model (no heat capacity), check that the + ! energy of snow and ice is correct. + !---------------------------------------------------------------- + + if (.not. heat_capacity) then + + call zerolayer_check(nx_block, ny_block, & + ntrcr, & + icells, indxi, indxj, & + aicen, & + vicen, vsnon, & + trcrn, l_stop, & + istop, jstop) + + endif + + end subroutine cleanup_itd + +!======================================================================= + +! For each ice category in each grid cell, remove ice if the fractional +! area is less than puny. +! +! author: William H. Lipscomb, LANL + + subroutine zap_small_areas (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, ntrcr, & + aice, aice0, & + aicen, trcrn, & + vicen, vsnon, & + dfpond, & + dfresh, dfsalt, & + dfhocn, dfaero_ocn,& + tr_aero, tr_pond_topo, & + first_ice,nbtrcr, & + flux_bio, l_stop, & + istop, jstop) + + use ice_state, only: nt_Tsfc, nt_qice, nt_qsno, nt_aero, nt_apnd, nt_hpnd, & + nt_fbri, tr_brine + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + ntrcr , & ! number of tracers in use + nbtrcr ! number of biology tracers + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + aice , & ! total ice concentration + aice0 ! concentration of open water + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + dfpond , & ! zapped pond water flux (kg/m^2/s) + dfresh , & ! zapped fresh water flux (kg/m^2/s) + dfsalt , & ! zapped salt flux (kg/m^2/s) + dfhocn ! zapped energy flux ( W/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nbtrcr), & + intent(inout), optional :: & + flux_bio ! Ocean tracer flux from biology (mmol/m^2/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_aero), & + intent(out) :: & + dfaero_ocn ! zapped aerosol flux (kg/m^2/s) + + logical (kind=log_kind), intent(in) :: & + tr_aero, & ! aerosol flag + tr_pond_topo ! pond flag + + logical (kind=log_kind), dimension (nx_block,ny_block,ncat),intent(inout) :: & + first_ice ! For bgc tracers. Set to true if zapping ice + + logical (kind=log_kind), intent(out) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(out) :: & + istop, jstop ! indices of grid cell where model aborts + + ! local variables + + integer (kind=int_kind) :: & + i,j, n, k, it , & ! counting indices + icells , & ! number of cells with ice to zap + ij ! combined i/j horizontal index + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi , & ! compressed indices for i/j directions + indxj + + real (kind=dbl_kind) :: xtmp ! temporary variable + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + l_stop = .false. + istop = 0 + jstop = 0 + + !----------------------------------------------------------------- + ! I. Zap categories with very small areas. + !----------------------------------------------------------------- + + do n = 1, ncat + + !----------------------------------------------------------------- + ! Count categories to be zapped. + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aicen(i,j,n) < -puny) then + write (nu_diag,*) 'Zap ice: negative ice area' + write (nu_diag,*) 'i, j, n, aicen =', & + i, j, n, aicen(i,j,n) + l_stop = .true. + istop = i + jstop = j + return + elseif (abs(aicen(i,j,n)) /= c0 .and. & + abs(aicen(i,j,n)) <= aicenmin) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + if (icells > 0) then + + !----------------------------------------------------------------- + ! Account for tracers important for conservation + !----------------------------------------------------------------- + + if (tr_pond_topo) then +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + xtmp = aicen(i,j,n) & + * trcrn(i,j,nt_apnd,n) * trcrn(i,j,nt_hpnd,n) + dfpond(i,j) = dfpond(i,j) - xtmp + enddo ! ij + endif + + if (tr_aero) then +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + do it = 1, n_aero + xtmp = (vicen(i,j,n)*(trcrn(i,j,nt_aero+2+4*(it-1),n) & + + trcrn(i,j,nt_aero+3+4*(it-1),n)))/dt + dfaero_ocn(i,j,it) = dfaero_ocn(i,j,it) + xtmp + enddo ! n + enddo ! ij + endif + + !----------------------------------------------------------------- + ! Zap ice energy and use ocean heat to melt ice + !----------------------------------------------------------------- + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + xtmp = trcrn(i,j,nt_qice+k-1,n) / dt & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) ! < 0 + dfhocn(i,j) = dfhocn(i,j) + xtmp + trcrn(i,j,nt_qice+k-1,n) = c0 + + enddo ! ij + enddo ! k + + !----------------------------------------------------------------- + ! Zap ice and snow volume, add water and salt to ocean + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + xtmp = (rhoi*vicen(i,j,n)) / dt + dfresh(i,j) = dfresh(i,j) + xtmp + + xtmp = rhoi*vicen(i,j,n)*ice_ref_salinity*p001 / dt + dfsalt(i,j) = dfsalt(i,j) + xtmp + + aice0(i,j) = aice0(i,j) + aicen(i,j,n) + aicen(i,j,n) = c0 + vicen(i,j,n) = c0 + trcrn(i,j,nt_Tsfc,n) = Tocnfrz + enddo ! ij + + !----------------------------------------------------------------- + ! Zap snow + !----------------------------------------------------------------- + + call zap_snow(nx_block, ny_block, & + icells, & + indxi, indxj, & + dt, ntrcr, & + trcrn(:,:,:,n), vsnon(:,:,n), & + dfresh, dfhocn, & + dfaero_ocn, tr_aero) + + !----------------------------------------------------------------- + ! Zap tracers + !----------------------------------------------------------------- + + if (ntrcr >= 2) then + do it = 2, ntrcr + if (tr_brine .and. it == nt_fbri) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + trcrn(i,j,it,n) = c1 + enddo + else + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + trcrn(i,j,it,n) = c0 + enddo + endif + enddo + endif + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + first_ice(i,j,n) = .true. + enddo + + endif ! icells + enddo ! n + + !----------------------------------------------------------------- + ! II. Count cells with excess ice (aice > c1) due to roundoff errors. + ! Zap a little ice in each category so that aice = c1. + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j) > (c1+puny)) then + write (nu_diag,*) 'Zap ice: excess ice area' + write (nu_diag,*) 'i, j, aice =', & + i, j, aice(i,j) + l_stop = .true. + istop = i + jstop = j + return + elseif (aice(i,j) > c1 .and. aice(i,j) < (c1+puny)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + if (icells > 0) then + + do n = 1, ncat + + !----------------------------------------------------------------- + ! Account for tracers important for conservation + !----------------------------------------------------------------- + + if (tr_pond_topo) then +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + xtmp = aicen(i,j,n) & + * trcrn(i,j,nt_apnd,n) * trcrn(i,j,nt_hpnd,n) & + * (aice(i,j)-c1)/aice(i,j) + dfpond(i,j) = dfpond(i,j) - xtmp + enddo ! ij + endif + + if (tr_aero) then +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + do it = 1, n_aero + xtmp = (vsnon(i,j,n)*(trcrn(i,j,nt_aero +4*(it-1),n) & + + trcrn(i,j,nt_aero+1+4*(it-1),n)) & + + vicen(i,j,n)*(trcrn(i,j,nt_aero+2+4*(it-1),n) & + + trcrn(i,j,nt_aero+3+4*(it-1),n))) & + * (aice(i,j)-c1)/aice(i,j) / dt + dfaero_ocn(i,j,it) = dfaero_ocn(i,j,it) + xtmp + enddo ! it + enddo ! ij + endif + + !----------------------------------------------------------------- + ! Zap ice energy and use ocean heat to melt ice + !----------------------------------------------------------------- + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + xtmp = trcrn(i,j,nt_qice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) & + * (aice(i,j)-c1)/aice(i,j) / dt ! < 0 + dfhocn(i,j) = dfhocn(i,j) + xtmp + + enddo ! ij + enddo ! k + + !----------------------------------------------------------------- + ! Zap snow energy and use ocean heat to melt snow + !----------------------------------------------------------------- + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + xtmp = trcrn(i,j,nt_qsno+k-1,n) & + * vsnon(i,j,n)/real(nslyr,kind=dbl_kind) & + * (aice(i,j)-c1)/aice(i,j) / dt ! < 0 + dfhocn(i,j) = dfhocn(i,j) + xtmp + + enddo ! ij + enddo ! k + + !----------------------------------------------------------------- + ! Zap ice and snow volume, add water and salt to ocean + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + xtmp = (rhoi*vicen(i,j,n) + rhos*vsnon(i,j,n)) & + * (aice(i,j)-c1)/aice(i,j) / dt + dfresh(i,j) = dfresh(i,j) + xtmp + + xtmp = rhoi*vicen(i,j,n)*ice_ref_salinity*p001 & + * (aice(i,j)-c1)/aice(i,j) / dt + dfsalt(i,j) = dfsalt(i,j) + xtmp + + aicen(i,j,n) = aicen(i,j,n) * (c1/aice(i,j)) + vicen(i,j,n) = vicen(i,j,n) * (c1/aice(i,j)) + vsnon(i,j,n) = vsnon(i,j,n) * (c1/aice(i,j)) + + enddo ! ij + + ! Note: Tracers are unchanged. + + enddo ! n + + !----------------------------------------------------------------- + ! Correct aice + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + aice(i,j) = c1 + aice0(i,j) = c0 + enddo + + endif ! icells + + end subroutine zap_small_areas + +!======================================================================= + + subroutine zap_snow(nx_block, ny_block, & + icells, & + indxi, indxj, & + dt, ntrcr, & + trcrn, vsnon, & + dfresh, dfhocn, & + dfaero_ocn, tr_aero) + + use ice_state, only: nt_qsno, nt_aero + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of ice/ocean grid cells + ntrcr ! number of tracers in use + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed i/j indices + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr), & + intent(inout) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + dfresh , & ! zapped fresh water flux (kg/m^2/s) + dfhocn ! zapped energy flux ( W/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_aero), & + intent(inout) :: & + dfaero_ocn ! zapped aerosol flux (kg/m^2/s) + + logical (kind=log_kind), intent(in) :: & + tr_aero ! aerosol flag + + ! local variables + + integer (kind=int_kind) :: & + i,j, k, it , & ! counting indices + ij ! combined i/j horizontal index + + real (kind=dbl_kind) :: xtmp + + ! aerosols + if (tr_aero) then +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + do it = 1, n_aero + xtmp = (vsnon(i,j)*(trcrn(i,j,nt_aero +4*(it-1)) & + + trcrn(i,j,nt_aero+1+4*(it-1))))/dt + dfaero_ocn(i,j,it) = dfaero_ocn(i,j,it) + xtmp + enddo ! it + + enddo ! ij + + endif ! tr_aero + + ! snow enthalpy tracer + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + xtmp = trcrn(i,j,nt_qsno+k-1) / dt & + * vsnon(i,j)/real(nslyr,kind=dbl_kind) ! < 0 + dfhocn(i,j) = dfhocn(i,j) + xtmp + trcrn(i,j,nt_qsno+k-1) = c0 + + enddo ! ij + enddo ! k + + ! snow volume +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + xtmp = (rhos*vsnon(i,j)) / dt + dfresh(i,j) = dfresh(i,j) + xtmp + vsnon(i,j) = c0 + + enddo ! ij + + end subroutine zap_snow + +!======================================================================= + + subroutine zap_snow_temperature(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, ntrcr, & + aicen, & + trcrn, vsnon, & + dfresh, dfhocn, & + dfaero_ocn, tr_aero) + + use ice_state, only: nt_qsno + use ice_therm_shared, only: heat_capacity, Tmin + use ice_calendar, only: istep1 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + ntrcr ! number of tracers in use + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + aicen ! concentration of ice + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(inout) :: & + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + dfresh , & ! zapped fresh water flux (kg/m^2/s) + dfhocn ! zapped energy flux ( W/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_aero), & + intent(inout) :: & + dfaero_ocn ! zapped aerosol flux (kg/m^2/s) + + logical (kind=log_kind), intent(in) :: & + tr_aero ! aerosol flag + + ! local variables + + integer (kind=int_kind) :: & + i,j, n, k, it , & ! counting indices + icells , & ! number of cells with ice to zap + ij ! combined i/j horizontal index + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi , & ! compressed indices for i/j directions + indxj + + real (kind=dbl_kind) :: & + rnslyr , & ! real(nslyr) + hsn , & ! snow thickness (m) + zqsn , & ! snow layer enthalpy (J m-2) + zTsn , & ! snow layer temperature (C) + Tmax ! maximum allowed snow temperature + + logical :: & + l_zap ! logical whether zap snow + + rnslyr = real(nslyr,kind=dbl_kind) + + do n = 1, ncat + + !----------------------------------------------------------------- + ! Determine cells to zap + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + + l_zap = .false. + + if (aicen(i,j,n) > puny) then + + ! snow thickness + hsn = vsnon(i,j,n) / aicen(i,j,n) + + ! check each snow layer - zap all if one is bad + do k = 1, nslyr + + ! snow enthalpy and max temperature + if (hsn > hs_min .and. heat_capacity) then + ! zqsn < 0 + zqsn = trcrn(i,j,nt_qsno+k-1,n) + Tmax = -zqsn*puny*rnslyr / & + (rhos*cp_ice*vsnon(i,j,n)) + else + zqsn = -rhos * Lfresh + Tmax = puny + endif + + ! snow temperature + zTsn = (Lfresh + zqsn/rhos)/cp_ice + + ! check for zapping + if (zTsn < Tmin .or. zTsn > Tmax) then + l_zap = .true. + write(nu_diag,*) "zap_snow_temperature: temperature out of bounds!" + write(nu_diag,*) "istep1, my_task, i, j, k:", istep1, my_task, i, j, k + write(nu_diag,*) "zTsn:", zTsn + write(nu_diag,*) "Tmin:", Tmin + write(nu_diag,*) "Tmax:", Tmax + write(nu_diag,*) "zqsn:", zqsn + endif + + enddo ! k + + endif ! aicen > puny + + ! add cell to zap list + if (l_zap) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! l_zap + + enddo ! i + enddo ! j + + !----------------------------------------------------------------- + ! Zap the cells + !----------------------------------------------------------------- + + if (icells > 0) & + call zap_snow(nx_block, ny_block, & + icells, & + indxi, indxj, & + dt, ntrcr, & + trcrn(:,:,:,n), vsnon(:,:,n), & + dfresh, dfhocn, & + dfaero_ocn, tr_aero) + + enddo ! n + + end subroutine zap_snow_temperature + +!======================================================================= +! Checks that the snow and ice energy in the zero layer thermodynamics +! model still agrees with the snow and ice volume. +! If there is an error, the model will abort. +! This subroutine is only called if heat_capacity = .false. +! +! author: Alison McLaren, Met Office +! May 2010: ECH replaced eicen, esnon with trcrn but did not test +! the changes. The loop below runs over n=1,ncat and I added loops +! over k, making the test more stringent. + + subroutine zerolayer_check (nx_block, ny_block, & + ntrcr, & + icells, indxi, indxj, & + aicen, & + vicen, vsnon, & + trcrn, l_stop, & + istop, jstop) + + use ice_state, only: nt_qice, nt_qsno + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + icells ! number of grid cells with ice + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed i/j indices + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + + logical (kind=log_kind), intent(out) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(out) :: & + istop, jstop ! indices of grid cell where model aborts + + ! local variables + + integer (kind=int_kind) :: & + i, j, k , & ! horizontal, vertical indices + n , & ! category index + ij ! combined horizontal index + + real (kind=dbl_kind), parameter :: & + max_error = puny*Lfresh*rhos ! max error in zero layer energy check + ! (so max volume error = puny) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat) :: & + eicen ! energy of melting for each ice layer (J/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat) :: & + esnon ! energy of melting for each snow layer (J/m^2) + + logical (kind=log_kind) :: & + ice_energy_correct , & ! zero layer ice energy check + snow_energy_correct ! zero layer snow energy check + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + worka, workb + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + l_stop = .false. + istop = 0 + jstop = 0 + + worka(:,:) = c0 + workb(:,:) = c0 + + !---------------------------------------------------------------- + ! Calculate difference between ice and snow energies and the + ! energy values derived from the ice and snow volumes + !---------------------------------------------------------------- + + ice_energy_correct = .true. + snow_energy_correct = .true. + + do n=1,ncat + + do ij=1,icells + i=indxi(ij) + j=indxj(ij) + + eicen(i,j,n) = c0 + do k = 1, nilyr + eicen(i,j,n) = eicen(i,j,n) + trcrn(i,j,nt_qice+k-1,n) & + * vicen(i,j,n) / real(nilyr,kind=dbl_kind) + enddo + worka(i,j) = eicen(i,j,n) + rhoi * Lfresh * vicen(i,j,n) + esnon(i,j,n) = c0 + do k = 1, nslyr + esnon(i,j,n) = esnon(i,j,n) + trcrn(i,j,nt_qsno+k-1,n) & + * vsnon(i,j,n) / real(nslyr,kind=dbl_kind) + enddo + workb(i,j) = esnon(i,j,n) + rhos * Lfresh * vsnon(i,j,n) + + if(abs(worka(i,j)) > max_error) then + ice_energy_correct = .false. + endif + + if(abs(workb(i,j)) > max_error) then + snow_energy_correct = .false. + endif + enddo + + !---------------------------------------------------------------- + ! If there is a problem, abort with error message + !---------------------------------------------------------------- + + if (.not. ice_energy_correct) then + + do ij=1,icells + i=indxi(ij) + j=indxj(ij) + + if(abs(worka(i,j)) > max_error) then + write(nu_diag,*) ' ' + write(nu_diag,*) & + 'zerolayer check - wrong ice energy' + write(nu_diag,*) 'i, j, n:', i,j,n + write(nu_diag,*) 'eicen =', eicen(i,j,n) + write(nu_diag,*) 'error=', worka(i,j) + write(nu_diag,*) 'vicen =', vicen(i,j,n) + write(nu_diag,*) 'aicen =', aicen(i,j,n) + l_stop = .true. + istop = i + jstop = j + endif + enddo + + endif + if (l_stop) return + + if (.not. snow_energy_correct) then + + do ij=1,icells + i=indxi(ij) + j=indxj(ij) + + if(abs(workb(i,j)) > max_error) then + write(nu_diag,*) ' ' + write(nu_diag,*) & + 'zerolayer_check - wrong snow energy' + write(nu_diag,*) 'i, j, n:', i,j,n + write(nu_diag,*) 'esnon =', esnon(i,j,n) + write(nu_diag,*) 'error=', workb(i,j) + write(nu_diag,*) 'vsnon =', vsnon(i,j,n) + write(nu_diag,*) 'aicen =', aicen(i,j,n) + l_stop = .true. + istop = i + jstop = j + return + endif + enddo + + endif + + enddo ! ncat + + end subroutine zerolayer_check + +!======================================================================= + + end module ice_itd + +!======================================================================= + + + + + + + + + diff --git a/source/ice_kinds_mod.F90 b/source/ice_kinds_mod.F90 new file mode 100755 index 00000000..86f5d832 --- /dev/null +++ b/source/ice_kinds_mod.F90 @@ -0,0 +1,30 @@ +! SVN:$Id: ice_kinds_mod.F90 700 2013-08-15 19:17:39Z eclare $ +!======================================================================= + +! Defines variable precision for all common data types +! Code originally based on kinds_mod.F in POP +! +! author: Elizabeth C. Hunke and William H. Lipscomb, LANL +! 2006: ECH converted to free source form (F90) + + module ice_kinds_mod + +!======================================================================= + + implicit none + public + save + + integer, parameter :: char_len = 80, & + char_len_long = 256, & + log_kind = kind(.true.), & + int_kind = selected_int_kind(6), & + real_kind = selected_real_kind(6), & + dbl_kind = selected_real_kind(13), & + r16_kind = selected_real_kind(26) + +!======================================================================= + + end module ice_kinds_mod + +!======================================================================= diff --git a/source/ice_lvl.F90 b/source/ice_lvl.F90 new file mode 100755 index 00000000..68ea39e9 --- /dev/null +++ b/source/ice_lvl.F90 @@ -0,0 +1,110 @@ +! SVN:$Id: ice_lvl.F90 746 2013-09-28 22:47:56Z eclare $ +!======================================================================= + +! Ridged ice tracers for sea ice +! +! authors Elizabeth Hunke + + module ice_lvl + + use ice_kinds_mod + + implicit none + + private + public :: init_lvl, write_restart_lvl, read_restart_lvl + + logical (kind=log_kind), public :: & + restart_lvl ! if .true., read lvl tracer restart file + +!======================================================================= + + contains + +!======================================================================= + +! Initialize ice lvl tracers (call prior to reading restart data) + + subroutine init_lvl(nx_block, ny_block, ncat, alvl, vlvl) + + use ice_constants, only: c1 + + integer(kind=int_kind), intent(in) :: & + nx_block , & + ny_block , & + ncat + + real(kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(out) :: & + alvl , & ! level ice area fraction + vlvl ! level ice volume + + alvl(:,:,:) = c1 ! level ice area fraction + vlvl(:,:,:) = c1 ! level ice volume + + end subroutine init_lvl + +!======================================================================= + +! Dumps all values needed for restarting +! +! author Elizabeth C. Hunke, LANL + + subroutine write_restart_lvl() + + use ice_communicate, only: my_task, master_task + use ice_domain_size, only: ncat + use ice_fileunits, only: nu_diag, nu_dump_lvl + use ice_state, only: nt_alvl, nt_vlvl, trcrn + use ice_restart, only: write_restart_field + + ! local variables + + logical (kind=log_kind) :: diag + + diag = .true. + + !----------------------------------------------------------------- + + call write_restart_field(nu_dump_lvl,0,trcrn(:,:,nt_alvl,:,:),'ruf8', & + 'alvl',ncat,diag) + call write_restart_field(nu_dump_lvl,0,trcrn(:,:,nt_vlvl,:,:),'ruf8', & + 'vlvl',ncat,diag) + + end subroutine write_restart_lvl + +!======================================================================= +! Reads all values needed for an ice lvl restart +! +! author Elizabeth C. Hunke, LANL + + subroutine read_restart_lvl() + + use ice_communicate, only: my_task, master_task + use ice_constants, only: field_loc_center, field_type_scalar + use ice_domain_size, only: ncat + use ice_fileunits, only: nu_diag, nu_restart_lvl + use ice_state, only: nt_alvl, nt_vlvl, trcrn + use ice_restart, only: read_restart_field + + ! local variables + + logical (kind=log_kind) :: & + diag + + diag = .true. + + if (my_task == master_task) write(nu_diag,*) 'min/max level ice area, volume' + + call read_restart_field(nu_restart_lvl,0,trcrn(:,:,nt_alvl,:,:),'ruf8', & + 'alvl',ncat,diag,field_loc_center,field_type_scalar) + call read_restart_field(nu_restart_lvl,0,trcrn(:,:,nt_vlvl,:,:),'ruf8', & + 'vlvl',ncat,diag,field_loc_center,field_type_scalar) + + end subroutine read_restart_lvl + +!======================================================================= + + end module ice_lvl + +!======================================================================= diff --git a/source/ice_mechred.F90 b/source/ice_mechred.F90 new file mode 100755 index 00000000..74241734 --- /dev/null +++ b/source/ice_mechred.F90 @@ -0,0 +1,2275 @@ +! SVN:$Id: ice_mechred.F90 843 2014-10-02 19:54:30Z eclare $ +!======================================================================= + +! Driver for ice mechanical redistribution (ridging) +! +! See these references: +! +! Flato, G. M., and W. D. Hibler III, 1995: Ridging and strength +! in modeling the thickness distribution of Arctic sea ice, +! J. Geophys. Res., 100, 18,611-18,626. +! +! Hibler, W. D. III, 1980: Modeling a variable thickness sea ice +! cover, Mon. Wea. Rev., 108, 1943-1973, 1980. +! +! Lipscomb, W. H., E. C. Hunke, W. Maslowski, and J. Jakacki, 2007: +! Improving ridging schemes for high-resolution sea ice models. +! J. Geophys. Res. 112, C03S91, doi:10.1029/2005JC003355. +! +! Rothrock, D. A., 1975: The energetics of the plastic deformation of +! pack ice by ridging, J. Geophys. Res., 80, 4514-4519. +! +! Thorndike, A. S., D. A. Rothrock, G. A. Maykut, and R. Colony, +! 1975: The thickness distribution of sea ice, J. Geophys. Res., +! 80, 4501-4513. +! +! authors: William H. Lipscomb, LANL +! Elizabeth C. Hunke, LANL +! +! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb +! 2004: Block structure added by William Lipscomb +! 2006: New options for participation and redistribution (WHL) +! 2006: Streamlined for efficiency by Elizabeth Hunke +! Converted to free source form (F90) + + module ice_mechred + + use ice_kinds_mod + use ice_constants + use ice_domain_size, only: ncat, max_aero, n_aero, nilyr, nslyr, nblyr + use ice_fileunits, only: nu_diag + use ice_itd, only: hin_max, column_sum, & + column_conservation_check, compute_tracers + + implicit none + save + + private + public :: ice_strength, ridge_ice + +!----------------------------------------------------------------------- +! Ridging parameters +!----------------------------------------------------------------------- + + integer (kind=int_kind), public :: & ! defined in namelist + kstrength , & ! 0 for simple Hibler (1979) formulation + ! 1 for Rothrock (1975) pressure formulation + krdg_partic , & ! 0 for Thorndike et al. (1975) formulation + ! 1 for exponential participation function + krdg_redist ! 0 for Hibler (1980) formulation + ! 1 for exponential redistribution function + + real (kind=dbl_kind), public :: & + mu_rdg, & ! gives e-folding scale of ridged ice (m^.5) (krdg_redist = 1) + Cf ! ratio of ridging work to PE change in ridging (kstrength = 1) + + real (kind=dbl_kind), parameter :: & + Cs = p25 , & ! fraction of shear energy contrbtng to ridging + Cp = p5*gravit*(rhow-rhoi)*rhoi/rhow, & ! proport const for PE + fsnowrdg = p5 , & ! snow fraction that survives in ridging + Gstar = p15 , & ! max value of G(h) that participates + ! (krdg_partic = 0) + astar = p05 , & ! e-folding scale for G(h) participation +!echmod astar = p1 , & ! e-folding scale for G(h) participation + ! (krdg_partic = 1) + maxraft= c1 , & ! max value of hrmin - hi = max thickness + ! of ice that rafts (m) + Hstar = c25 , & ! determines mean thickness of ridged ice (m) + ! (krdg_redist = 0) + ! Flato & Hibler (1995) have Hstar = 100 + Pstar = 2.75e4_dbl_kind, & ! constant in Hibler strength formula + ! (kstrength = 0) + Cstar = c20 ! constant in Hibler strength formula + ! (kstrength = 0) + + logical (kind=log_kind), parameter :: & +! l_conservation_check = .true. ! if true, check conservation + l_conservation_check = .false. ! if true, check conservation + ! (useful for debugging) + +!======================================================================= + + contains + +!======================================================================= + +! Compute changes in the ice thickness distribution due to divergence +! and shear. +! +! author: William H. Lipscomb, LANL + + subroutine ridge_ice (nx_block, ny_block, & + dt, ndtd, & + ntrcr, icells, & + indxi, indxj, & + rdg_conv, rdg_shear, & + aicen, trcrn, & + vicen, vsnon, & + aice0, & + trcr_depend, l_stop, & + istop, jstop, & + dardg1dt, dardg2dt, & + dvirdgdt, opening, & + fpond, & + fresh, fhocn, & + faero_ocn, & + aparticn, krdgn, & + aredistn, vredistn, & + dardg1ndt, dardg2ndt, & + dvirdgndt, & + araftn, vraftn) + + use ice_state, only: nt_qice, nt_qsno, tr_brine, nt_fbri, nt_sice + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of cells with ice present + ndtd , & ! number of dynamics subcycles + ntrcr ! number of tracers in use + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with ice + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + rdg_conv, & ! normalized energy dissipation due to convergence (1/s) + rdg_shear ! normalized energy dissipation due to shear (1/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + aice0 ! concentration of open water + + integer (kind=int_kind), dimension(ntrcr), intent(in) :: & + trcr_depend + + logical (kind=log_kind), intent(out) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(out) :: & + istop, jstop ! indices of grid cell where model aborts + + ! optional history fields + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout), optional :: & + dardg1dt , & ! rate of fractional area loss by ridging ice (1/s) + dardg2dt , & ! rate of fractional area gain by new ridges (1/s) + dvirdgdt , & ! rate of ice volume ridged (m/s) + opening , & ! rate of opening due to divergence/shear (1/s) + fpond , & ! fresh water flux to ponds (kg/m^2/s) + fresh , & ! fresh water flux to ocean (kg/m^2/s) + fhocn ! net heat flux to ocean (W/m^2) + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(out), optional :: & + dardg1ndt , & ! rate of fractional area loss by ridging ice (1/s) + dardg2ndt , & ! rate of fractional area gain by new ridges (1/s) + dvirdgndt , & ! rate of ice volume ridged (m/s) + aparticn , & ! participation function + krdgn , & ! mean ridge thickness/thickness of ridging ice + araftn , & ! rafting ice area + vraftn , & ! rafting ice volume + aredistn , & ! redistribution function: fraction of new ridge area + vredistn ! redistribution function: fraction of new ridge volume + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_aero), & + intent(inout), optional :: & + faero_ocn ! aerosol flux to ocean (kg/m^2/s) + + ! local variables + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat) :: & + eicen ! energy of melting for each ice layer (J/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat) :: & + esnon , & ! energy of melting for each snow layer (J/m^2) + vbrin, & ! ice volume with defined by brine height (m) + sicen ! Bulk salt in h ice (ppt*m) + + real (kind=dbl_kind), dimension (icells) :: & + asum , & ! sum of ice and open water area + aksum , & ! ratio of area removed to area ridged + msnow_mlt , & ! mass of snow added to ocean (kg m-2) + esnow_mlt , & ! energy needed to melt snow in ocean (J m-2) + mpond , & ! mass of pond added to ocean (kg m-2) + closing_net, & ! net rate at which area is removed (1/s) + ! (ridging ice area - area of new ridges) / dt + divu_adv , & ! divu as implied by transport scheme (1/s) + opning , & ! rate of opening due to divergence/shear + ! opning is a local variable; + ! opening is the history diagnostic variable + ardg1 , & ! fractional area loss by ridging ice + ardg2 , & ! fractional area gain by new ridges + virdg , & ! ice volume ridged + aopen ! area opening due to divergence/shear + + real (kind=dbl_kind), dimension (icells,max_aero) :: & + maero ! aerosol mass added to ocean (kg m-2) + + real (kind=dbl_kind), dimension (icells,0:ncat) :: & + apartic ! participation function; fraction of ridging + ! and closing associated w/ category n + + real (kind=dbl_kind), dimension (icells,ncat) :: & + hrmin , & ! minimum ridge thickness + hrmax , & ! maximum ridge thickness (krdg_redist = 0) + hrexp , & ! ridge e-folding thickness (krdg_redist = 1) + krdg , & ! mean ridge thickness/thickness of ridging ice + ardg1n , & ! area of ice ridged + ardg2n , & ! area of new ridges + virdgn , & ! ridging ice volume + mraftn ! rafting ice mask + + real (kind=dbl_kind), dimension (icells) :: & + vice_init, vice_final, & ! ice volume summed over categories + vsno_init, vsno_final, & ! snow volume summed over categories + eice_init, eice_final, & ! ice energy summed over layers + vbri_init, vbri_final, & ! ice volume in fbri*vicen summed over categories + Shi_init , Shi_final , & ! ice bulk salinity summed over categories + esno_init, esno_final ! snow energy summed over layers + + integer (kind=int_kind), parameter :: & + nitermax = 20 ! max number of ridging iterations + + integer (kind=int_kind) :: & + i,j , & ! horizontal indices + n , & ! thickness category index + niter , & ! iteration counter + ij , & ! horizontal index, combines i and j loops + k ! vertical index + + real (kind=dbl_kind) :: & + dti ! 1 / dt + + logical (kind=log_kind) :: & + iterate_ridging ! if true, repeat the ridging + + character (len=char_len) :: & + fieldid ! field identifier + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + l_stop = .false. + istop = 0 + jstop = 0 + + do ij = 1, icells + msnow_mlt(ij) = c0 + esnow_mlt(ij) = c0 + maero (ij,:) = c0 + mpond (ij) = c0 + ardg1 (ij) = c0 + ardg2 (ij) = c0 + virdg (ij) = c0 + ardg1n (ij,:) = c0 + ardg2n (ij,:) = c0 + virdgn (ij,:) = c0 + mraftn (ij,:) = c0 +! aopen (ij) = c0 + enddo + + !----------------------------------------------------------------- + ! Compute area of ice plus open water before ridging. + !----------------------------------------------------------------- + call asum_ridging (nx_block, ny_block, & + icells, indxi, indxj, & + aicen, aice0, & + asum) + + !----------------------------------------------------------------- + ! Compute the area opening and closing. + !----------------------------------------------------------------- + call ridge_prep (nx_block, ny_block, & + icells, indxi, indxj, & + dt, & + rdg_conv, rdg_shear, & + asum, closing_net, & + divu_adv, opning) + + !----------------------------------------------------------------- + ! Compute initial values of conserved quantities. + !----------------------------------------------------------------- + + if (l_conservation_check) then + + eicen(:,:,:) = c0 + esnon(:,:,:) = c0 + vbrin(:,:,:) = c0 + sicen(:,:,:) = c0 + + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + vbrin(i,j,n) = vicen(i,j,n) + if (tr_brine) vbrin(i,j,n) = trcrn(i,j,nt_fbri,n) * vicen(i,j,n) + enddo + enddo + + do k = 1, nilyr + do j = 1, ny_block + do i = 1, nx_block + eicen(i,j,n) = eicen(i,j,n) + trcrn(i,j,nt_qice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + do k = 1, nslyr + do j = 1, ny_block + do i = 1, nx_block + esnon(i,j,n) = esnon(i,j,n) + trcrn(i,j,nt_qsno+k-1,n) & + * vsnon(i,j,n)/real(nslyr,kind=dbl_kind) + enddo + enddo + enddo + + + do k = 1, nilyr + do j = 1, ny_block + do i = 1, nx_block + sicen(i,j,n) = sicen(i,j,n) + trcrn(i,j,nt_sice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + + enddo + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vicen, vice_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vsnon, vsno_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + eicen, eice_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + esnon, esno_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vbrin, vbri_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + sicen, Shi_init) + + endif + + do niter = 1, nitermax + + !----------------------------------------------------------------- + ! Compute the thickness distribution of ridging ice + ! and various quantities associated with the new ridged ice. + !----------------------------------------------------------------- + + call ridge_itd (nx_block, ny_block, & + icells, indxi, indxj, & + aicen, vicen, & + aice0, & + aksum, apartic, & + hrmin, hrmax, & + hrexp, krdg, & + aparticn, krdgn, mraftn) + + !----------------------------------------------------------------- + ! Redistribute area, volume, and energy. + !----------------------------------------------------------------- + + call ridge_shift (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, dt, & + aicen, trcrn, & + vicen, vsnon, & + aice0, trcr_depend, & + aksum, apartic, & + hrmin, hrmax, & + hrexp, krdg, & + closing_net, opning, & + ardg1, ardg2, & + virdg, aopen, & + ardg1n, ardg2n, & + virdgn, & + msnow_mlt, esnow_mlt, & + maero, mpond, & + l_stop, & + istop, jstop, & + aredistn, vredistn) + + if (l_stop) return + + !----------------------------------------------------------------- + ! Make sure the new area = 1. If not (because the closing + ! and opening rates were reduced above), prepare to ridge again + ! with new rates. + !----------------------------------------------------------------- + + call asum_ridging (nx_block, ny_block, & + icells, indxi, indxj, & + aicen, aice0, & + asum) + + call ridge_check (nx_block, ny_block, & + icells, indxi, indxj, & + dt, & + asum, closing_net, & + divu_adv, opning, & + iterate_ridging) + + !----------------------------------------------------------------- + ! If done, exit. If not, prepare to ridge again. + !----------------------------------------------------------------- + + if (iterate_ridging) then + write(nu_diag,*) 'Repeat ridging, niter =', niter + else + exit + endif + + if (niter == nitermax) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Exceeded max number of ridging iterations' + write(nu_diag,*) 'max =',nitermax + l_stop = .true. + return + endif + + enddo ! niter + + !----------------------------------------------------------------- + ! Compute final values of conserved quantities. + ! Check for conservation (allowing for snow thrown into ocean). + !----------------------------------------------------------------- + + if (l_conservation_check) then + + eicen(:,:,:) = c0 + esnon(:,:,:) = c0 + sicen(:,:,:) = c0 + vbrin(:,:,:) = c0 + + do n = 1, ncat + do k = 1, nilyr + do j = 1, ny_block + do i = 1, nx_block + eicen(i,j,n) = eicen(i,j,n) + trcrn(i,j,nt_qice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + do k = 1, nslyr + do j = 1, ny_block + do i = 1, nx_block + esnon(i,j,n) = esnon(i,j,n) + trcrn(i,j,nt_qsno+k-1,n) & + * vsnon(i,j,n)/real(nslyr,kind=dbl_kind) + enddo + enddo + enddo + + + do j = 1, ny_block + do i = 1, nx_block + vbrin(i,j,n) = vicen(i,j,n) + if (tr_brine) vbrin(i,j,n) = trcrn(i,j,nt_fbri,n) * vbrin(i,j,n) + enddo + enddo + + + do k = 1, nilyr + do j = 1, ny_block + do i = 1, nx_block + sicen(i,j,n) = sicen(i,j,n) + trcrn(i,j,nt_sice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + + enddo + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vicen, vice_final) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vsnon, vsno_final) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + eicen, eice_final) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + esnon, esno_final) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + sicen, Shi_final) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vbrin, vbri_final) + + do ij = 1, icells + vsno_final(ij) = vsno_final(ij) + msnow_mlt(ij)/rhos + esno_final(ij) = esno_final(ij) + esnow_mlt(ij) + enddo + + fieldid = 'vice, ridging' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + vice_init, vice_final, & + puny, l_stop, & + istop, jstop) + if (l_stop) return + + fieldid = 'vsno, ridging' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + vsno_init, vsno_final, & + puny, l_stop, & + istop, jstop) + if (l_stop) return + + fieldid = 'eice, ridging' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + eice_init, eice_final, & + puny*Lfresh*rhoi, & + l_stop, & + istop, jstop) + if (l_stop) return + + fieldid = 'esno, ridging' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + esno_init, esno_final, & + puny*Lfresh*rhos, & + l_stop, & + istop, jstop) + if (l_stop) return + + fieldid = 'sice, ridging' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + Shi_init, Shi_final, & + puny, l_stop, & + istop, jstop) + if (l_stop) return + + fieldid = 'vbrin, ridging' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + vbri_init, vbri_final, & + puny*c10, l_stop, & + istop, jstop) + if (l_stop) return + endif ! l_conservation_check + + !----------------------------------------------------------------- + ! Compute ridging diagnostics. + !----------------------------------------------------------------- + + dti = c1/dt + + if (present(dardg1dt)) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + dardg1dt(i,j) = ardg1(ij)*dti + enddo + endif + if (present(dardg2dt)) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + dardg2dt(i,j) = ardg2(ij)*dti + enddo + endif + if (present(dvirdgdt)) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + dvirdgdt(i,j) = virdg(ij)*dti + enddo + endif + if (present(opening)) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + opening(i,j) = aopen(ij)*dti + enddo + endif + if (present(dardg1ndt)) then + do n = 1, ncat + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + dardg1ndt(i,j,n) = ardg1n(ij,n)*dti + enddo + enddo + endif + if (present(dardg2ndt)) then + do n = 1, ncat + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + dardg2ndt(i,j,n) = ardg2n(ij,n)*dti + enddo + enddo + endif + if (present(dvirdgndt)) then + do n = 1, ncat + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + dvirdgndt(i,j,n) = virdgn(ij,n)*dti + enddo + enddo + endif + if (present(araftn)) then + do n = 1, ncat + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + araftn(i,j,n) = mraftn(ij,n)*ardg2n(ij,n) +! araftn(i,j,n) = mraftn(ij,n)*ardg1n(ij,n)*p5 + enddo + enddo + endif + if (present(vraftn)) then + do n = 1, ncat + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + vraftn(i,j,n) = mraftn(ij,n)*virdgn(ij,n) + enddo + enddo + endif + + !----------------------------------------------------------------- + ! Update fresh water and heat fluxes due to snow melt. + !----------------------------------------------------------------- + + ! use thermodynamic time step (ndtd*dt here) to average properly + dti = c1/(ndtd*dt) + + if (present(fresh)) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + fresh(i,j) = fresh(i,j) + msnow_mlt(ij)*dti + enddo + endif + if (present(fhocn)) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + fhocn(i,j) = fhocn(i,j) + esnow_mlt(ij)*dti + enddo + endif + if (present(faero_ocn)) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + faero_ocn(i,j,:) = faero_ocn(i,j,:) + maero(ij,:)*dti + enddo + endif + if (present(fpond)) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + fpond(i,j) = fpond(i,j) - mpond(ij) ! units change later + enddo + endif + + !----------------------------------------------------------------- + ! Check for fractional ice area > 1. + !----------------------------------------------------------------- + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (abs(asum(ij) - c1) > puny) then + l_stop = .true. + istop = i + jstop = j + + write(nu_diag,*) ' ' + write(nu_diag,*) 'Ridging error: total area > 1' + write(nu_diag,*) 'i, j, area:', i, j, asum(ij) + write(nu_diag,*) 'n, aicen:' + write(nu_diag,*) 0, aice0(i,j) + do n = 1, ncat + write(nu_diag,*) n, aicen(i,j,n) + enddo + return + endif + enddo + + end subroutine ridge_ice + +!======================================================================= + +! Find the total area of ice plus open water in each grid cell. +! +! This is similar to the aggregate_area subroutine except that the +! total area can be greater than 1, so the open water area is +! included in the sum instead of being computed as a residual. +! +! author: William H. Lipscomb, LANL + + subroutine asum_ridging (nx_block, ny_block, & + icells, indxi, indxj, & + aicen, aice0, & + asum) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with ice present + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with ice + + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + aicen ! concentration of ice in each category + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + aice0 ! concentration of open water + + real (kind=dbl_kind), dimension (icells), intent(out):: & + asum ! sum of ice and open water area + + ! local variables + + integer (kind=int_kind) :: & + i, j, n, & + ij ! horizontal index, combines i and j loops + + !----------------------------------------------------------------- + ! open water + !----------------------------------------------------------------- + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + asum(ij) = aice0(i,j) + enddo + + !----------------------------------------------------------------- + ! ice categories + !----------------------------------------------------------------- + + do n = 1, ncat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + asum(ij) = asum(ij) + aicen(i,j,n) + enddo + enddo + + end subroutine asum_ridging + +!======================================================================= + +! Initialize arrays, compute area of closing and opening +! +! author: William H. Lipscomb, LANL + + subroutine ridge_prep (nx_block, ny_block, & + icells, indxi, indxj, & + dt, & + rdg_conv, rdg_shear, & + asum, closing_net, & + divu_adv, opning) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with ice present + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with ice + + + real (kind=dbl_kind), intent(in) :: & + dt ! time step (s) + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + rdg_conv, & ! normalized energy dissipation due to convergence (1/s) + rdg_shear ! normalized energy dissipation due to shear (1/s) + + real (kind=dbl_kind), dimension(icells), & + intent(inout):: & + asum ! sum of ice and open water area + + real (kind=dbl_kind), dimension(icells), & + intent(out):: & + closing_net, & ! net rate at which area is removed (1/s) + divu_adv , & ! divu as implied by transport scheme (1/s) + opning ! rate of opening due to divergence/shear + + ! local variables + + real (kind=dbl_kind), parameter :: & + big = 1.0e+8_dbl_kind + + integer (kind=int_kind) :: & + i,j, & ! horizontal indices + ij ! horizontal index, combines i and j loops + + ! Set hin_max(ncat) to a big value to ensure that all ridged ice + ! is thinner than hin_max(ncat). + hin_max(ncat) = big + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !----------------------------------------------------------------- + ! Compute the net rate of closing due to convergence + ! and shear, based on Flato and Hibler (1995). + ! + ! For the elliptical yield curve: + ! rdg_conv = -min (divu, 0) + ! rdg_shear = (1/2) * (Delta - abs(divu)) + ! Note that the shear term also accounts for divergence. + ! + ! The energy dissipation rate is equal to the net closing rate + ! times the ice strength. + ! + ! NOTE: The NET closing rate is equal to the rate that open water + ! area is removed, plus the rate at which ice area is removed by + ! ridging, minus the rate at which area is added in new ridges. + ! The GROSS closing rate is equal to the first two terms (open + ! water closing and thin ice ridging) without the third term + ! (thick, newly ridged ice). + ! + ! rdg_conv is calculated differently in EAP (update_ice_rdg) and + ! represents closing_net directly. In that case, rdg_shear=0. + !----------------------------------------------------------------- + + closing_net(ij) = Cs*rdg_shear(i,j) + rdg_conv(i,j) + + !----------------------------------------------------------------- + ! Compute divu_adv, the divergence rate given by the transport/ + ! advection scheme, which may not be equal to divu as computed + ! from the velocity field. + ! + ! If divu_adv < 0, make sure the closing rate is large enough + ! to give asum = 1.0 after ridging. + !----------------------------------------------------------------- + + divu_adv(ij) = (c1-asum(ij)) / dt + + if (divu_adv(ij) < c0) & + closing_net(ij) = max(closing_net(ij), -divu_adv(ij)) + + !----------------------------------------------------------------- + ! Compute the (non-negative) opening rate that will give + ! asum = 1.0 after ridging. + !----------------------------------------------------------------- + opning(ij) = closing_net(ij) + divu_adv(ij) + + enddo + + end subroutine ridge_prep + +!======================================================================= + +! Compute the thickness distribution of the ice and open water +! participating in ridging and of the resulting ridges. +! +! This version includes new options for ridging participation and +! redistribution. +! The new participation scheme (krdg_partic = 1) improves stability +! by increasing the time scale for large changes in ice strength. +! The new exponential redistribution function (krdg_redist = 1) improves +! agreement between ITDs of modeled and observed ridges. +! +! author: William H. Lipscomb, LANL +! +! 2006: Changed subroutine name to ridge_itd +! Added new options for ridging participation and redistribution. + + subroutine ridge_itd (nx_block, ny_block, & + icells, indxi, indxj, & + aicen, vicen, & + aice0, & + aksum, apartic, & + hrmin, hrmax, & + hrexp, krdg, & + aparticn, krdgn, mraft) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with ice present + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with ice + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + aicen , & ! concentration of ice + vicen ! volume per unit area of ice (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + aice0 ! concentration of open water + + real (kind=dbl_kind), dimension (icells), intent(out):: & + aksum ! ratio of area removed to area ridged + + real (kind=dbl_kind), dimension (icells,0:ncat), & + intent(out) :: & + apartic ! participation function; fraction of ridging + ! and closing associated w/ category n + + real (kind=dbl_kind), dimension (icells,ncat), & + intent(out) :: & + hrmin , & ! minimum ridge thickness + hrmax , & ! maximum ridge thickness (krdg_redist = 0) + hrexp , & ! ridge e-folding thickness (krdg_redist = 1) + krdg ! mean ridge thickness/thickness of ridging ice + + ! diagnostic, category values + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(out), optional :: & + aparticn , & ! participation function + krdgn ! mean ridge thickness/thickness of ridging ice + + real (kind=dbl_kind), dimension (icells,ncat), & + intent(out), optional :: & + mraft ! rafting ice mask + + ! local variables + + integer (kind=int_kind) :: & + i,j , & ! horizontal indices + n , & ! thickness category index + ij ! horizontal index, combines i and j loops + + real (kind=dbl_kind), parameter :: & + Gstari = c1/Gstar, & + astari = c1/astar + + real (kind=dbl_kind), dimension(icells,-1:ncat) :: & + Gsum ! Gsum(n) = sum of areas in categories 0 to n + + real (kind=dbl_kind), dimension(icells) :: & + work ! temporary work array + + real (kind=dbl_kind) :: & + hi , & ! ice thickness for each cat (m) + hrmean , & ! mean ridge thickness (m) + xtmp ! temporary variable + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + do ij = 1, icells + Gsum (ij,-1) = c0 ! by definition + Gsum (ij,0) = c1 ! to avoid divzero below + apartic(ij,0) = c0 + enddo + + do n = 1, ncat + do ij = 1, icells + Gsum (ij,n) = c1 ! to avoid divzero below + apartic(ij,n) = c0 + hrmin (ij,n) = c0 + hrmax (ij,n) = c0 + hrexp (ij,n) = c0 + krdg (ij,n) = c1 + enddo + enddo + + !----------------------------------------------------------------- + ! Compute the thickness distribution of ice participating in ridging. + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! First compute the cumulative thickness distribution function Gsum, + ! where Gsum(n) is the fractional area in categories 0 to n. + ! Ignore categories with very small areas. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (aice0(i,j) > puny) then + Gsum(ij,0) = aice0(i,j) + else + Gsum(ij,0) = Gsum(ij,-1) + endif + enddo + + do n = 1, ncat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (aicen(i,j,n) > puny) then + Gsum(ij,n) = Gsum(ij,n-1) + aicen(i,j,n) + else + Gsum(ij,n) = Gsum(ij,n-1) + endif + enddo + enddo + + ! normalize + + do ij = 1, icells + work(ij) = c1 / Gsum(ij,ncat) + enddo + do n = 0, ncat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + Gsum(ij,n) = Gsum(ij,n) * work(ij) + enddo + enddo + + !----------------------------------------------------------------- + ! Compute the participation function apartic; this is analogous to + ! a(h) = b(h)g(h) as defined in Thorndike et al. (1975). + ! + ! area lost from category n due to ridging/closing + ! apartic(n) = -------------------------------------------------- + ! total area lost due to ridging/closing + ! + !----------------------------------------------------------------- + + if (krdg_partic == 0) then ! Thornike et al. 1975 formulation + + !----------------------------------------------------------------- + ! Assume b(h) = (2/Gstar) * (1 - G(h)/Gstar). + ! The expressions for apartic are found by integrating b(h)g(h) between + ! the category boundaries. + !----------------------------------------------------------------- + + do n = 0, ncat + do ij = 1, icells + if (Gsum(ij,n) < Gstar) then + apartic(ij,n) = Gstari*(Gsum(ij,n)-Gsum(ij,n-1)) * & + (c2 - (Gsum(ij,n-1)+Gsum(ij,n))*Gstari) + elseif (Gsum(ij,n-1) < Gstar) then + apartic(ij,n) = Gstari * (Gstar-Gsum(ij,n-1)) * & + (c2 - (Gsum(ij,n-1)+Gstar)*Gstari) + endif + enddo ! ij + enddo ! n + + elseif (krdg_partic==1) then ! exponential dependence on G(h) + + !----------------------------------------------------------------- + ! b(h) = exp(-G(h)/astar) + ! apartic(n) = [exp(-G(n-1)/astar - exp(-G(n)/astar] / [1-exp(-1/astar)]. + ! The expression for apartic is found by integrating b(h)g(h) + ! between the category boundaries. + !----------------------------------------------------------------- + + ! precompute exponential terms using Gsum as work array + + xtmp = c1 / (c1 - exp(-astari)) + + do n = -1, ncat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + Gsum(ij,n) = exp(-Gsum(ij,n)*astari) * xtmp + enddo ! ij + enddo ! n + + do n = 0, ncat + do ij = 1, icells + apartic(ij,n) = Gsum(ij,n-1) - Gsum(ij,n) + enddo ! ij + enddo ! n + + endif ! krdg_partic + + !----------------------------------------------------------------- + ! Compute variables related to ITD of ridged ice: + ! + ! krdg = mean ridge thickness / thickness of ridging ice + ! hrmin = min ridge thickness + ! hrmax = max ridge thickness (krdg_redist = 0) + ! hrexp = ridge e-folding scale (krdg_redist = 1) + !---------------------------------------------------------------- + + if (krdg_redist == 0) then ! Hibler 1980 formulation + + !----------------------------------------------------------------- + ! Assume ridged ice is uniformly distributed between hrmin and hrmax. + ! + ! This parameterization is a modified version of Hibler (1980). + ! In the original paper the min ridging thickness is hrmin = 2*hi, + ! and the max thickness is hrmax = 2*sqrt(hi*Hstar). + ! + ! Here the min thickness is hrmin = min(2*hi, hi+maxraft), + ! so thick ridging ice is not required to raft. + ! + !----------------------------------------------------------------- + + do n = 1, ncat + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (aicen(i,j,n) > puny) then + hi = vicen(i,j,n) / aicen(i,j,n) + hrmin(ij,n) = min(c2*hi, hi + maxraft) + hrmax(ij,n) = c2*sqrt(Hstar*hi) + hrmax(ij,n) = max(hrmax(ij,n), hrmin(ij,n)+puny) + hrmean = p5 * (hrmin(ij,n) + hrmax(ij,n)) + krdg(ij,n) = hrmean / hi + + ! diagnostic rafting mask not implemented + endif + + enddo ! ij + enddo ! n + + else ! krdg_redist = 1; exponential redistribution + + !----------------------------------------------------------------- + ! The ridge ITD is a negative exponential: + ! + ! g(h) ~ exp[-(h-hrmin)/hrexp], h >= hrmin + ! + ! where hrmin is the minimum thickness of ridging ice and + ! hrexp is the e-folding thickness. + ! + ! Here, assume as above that hrmin = min(2*hi, hi+maxraft). + ! That is, the minimum ridge thickness results from rafting, + ! unless the ice is thicker than maxraft. + ! + ! Also, assume that hrexp = mu_rdg*sqrt(hi). + ! The parameter mu_rdg is tuned to give e-folding scales mostly + ! in the range 2-4 m as observed by upward-looking sonar. + ! + ! Values of mu_rdg in the right column give ice strengths + ! roughly equal to values of Hstar in the left column + ! (within ~10 kN/m for typical ITDs): + ! + ! Hstar mu_rdg + ! + ! 25 3.0 + ! 50 4.0 + ! 75 5.0 + ! 100 6.0 + !----------------------------------------------------------------- + + do n = 1, ncat + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (aicen(i,j,n) > puny) then + hi = vicen(i,j,n) / aicen(i,j,n) + hi = max(hi,puny) + hrmin(ij,n) = min(c2*hi, hi + maxraft) + hrexp(ij,n) = mu_rdg * sqrt(hi) + krdg(ij,n) = (hrmin(ij,n) + hrexp(ij,n)) / hi + + !echmod: check computational efficiency + ! diagnostic rafting mask + if (present(mraft)) then + mraft(ij,n) = max(c0, sign(c1, hi+maxraft-hrmin(ij,n))) + xtmp = mraft(ij,n)*((c2*hi+hrexp(ij,n))/hi - krdg(ij,n)) + mraft(ij,n) = max(c0, sign(c1, puny-abs(xtmp))) + endif + endif + enddo + enddo + + endif ! krdg_redist + + !---------------------------------------------------------------- + ! Compute aksum = net ice area removed / total area participating. + ! For instance, if a unit area of ice with h = 1 participates in + ! ridging to form a ridge with a = 1/3 and h = 3, then + ! aksum = 1 - 1/3 = 2/3. + !---------------------------------------------------------------- + + do ij = 1, icells + aksum(ij) = apartic(ij,0) ! area participating = area removed + enddo + + do n = 1, ncat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + ! area participating > area removed + aksum(ij) = aksum(ij) & + + apartic(ij,n) * (c1 - c1/krdg(ij,n)) + enddo + enddo + + ! diagnostics + if (present(aparticn)) then + do n = 1, ncat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + aparticn(i,j,n) = apartic(ij,n) + enddo + enddo + endif + if (present(krdgn)) then + do n = 1, ncat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + krdgn(i,j,n) = krdg(ij,n) + enddo + enddo + endif + + end subroutine ridge_itd + +!======================================================================= + +! Remove area, volume, and energy from each ridging category +! and add to thicker ice categories. +! +! Tracers: Ridging conserves ice volume and therefore conserves volume +! tracers. It does not conserve ice area, and therefore a portion of area +! tracers are lost (corresponding to the net closing). Area tracers on +! ice that participates in ridging are carried onto the resulting ridged +! ice (except the portion that are lost due to closing). Therefore, +! tracers must be decremented if they are lost to the ocean during ridging +! (e.g. snow, ponds) or if they are being carried only on the level ice +! area. +! +! author: William H. Lipscomb, LANL + + subroutine ridge_shift (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, dt, & + aicen, trcrn, & + vicen, vsnon, & + aice0, trcr_depend, & + aksum, apartic, & + hrmin, hrmax, & + hrexp, krdg, & + closing_net, opning, & + ardg1, ardg2, & + virdg, aopen, & + ardg1nn, ardg2nn, & + virdgnn, & + msnow_mlt, esnow_mlt, & + maero, mpond, & + l_stop, & + istop, jstop, & + aredistn, vredistn) + + use ice_state, only: nt_qsno, & + nt_alvl, nt_vlvl, nt_aero, tr_lvl, tr_aero, & + nt_apnd, nt_hpnd, nt_ipnd, tr_pond, & + tr_pond_cesm, tr_pond_lvl, tr_pond_topo, & + nt_fbri + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of cells with ice present + ntrcr ! number of tracers in use + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with ice + + real (kind=dbl_kind), intent(in) :: & + dt ! time step (s) + + integer (kind=int_kind), dimension (ntrcr), intent(in) :: & + trcr_depend ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + aice0 ! concentration of open water + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + aksum ! ratio of area removed to area ridged + + real (kind=dbl_kind), dimension (icells,0:ncat), intent(in) :: & + apartic ! participation function; fraction of ridging + ! and closing associated w/ category n + + real (kind=dbl_kind), dimension (icells,ncat), intent(in) :: & + hrmin , & ! minimum ridge thickness + hrmax , & ! maximum ridge thickness (krdg_redist = 0) + hrexp , & ! ridge e-folding thickness (krdg_redist = 1) + krdg ! mean ridge thickness/thickness of ridging ice + + real (kind=dbl_kind), dimension(icells), intent(inout) :: & + closing_net, & ! net rate at which area is removed (1/s) + opning , & ! rate of opening due to divergence/shear (1/s) + ardg1 , & ! fractional area loss by ridging ice + ardg2 , & ! fractional area gain by new ridges + virdg , & ! ice volume ridged (m) + aopen ! area opened due to divergence/shear + + real (kind=dbl_kind), dimension(icells,ncat), intent(inout) :: & + ardg1nn , & ! area of ice ridged + ardg2nn , & ! area of new ridges + virdgnn ! ridging ice volume + + real (kind=dbl_kind), dimension(icells), intent(inout) :: & + msnow_mlt, & ! mass of snow added to ocean (kg m-2) + esnow_mlt, & ! energy needed to melt snow in ocean (J m-2) + mpond ! mass of pond added to ocean (kg m-2) + + real (kind=dbl_kind), dimension(icells,max_aero), intent(inout) :: & + maero ! aerosol mass added to ocean (kg m-2) + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(inout) :: & + istop, jstop ! indices of grid cell where model aborts + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout), optional :: & + aredistn , & ! redistribution function: fraction of new ridge area + vredistn ! redistribution function: fraction of new ridge volume + + ! local variables + + integer (kind=int_kind) :: & + i,j , & ! horizontal indices + n, nr , & ! thickness category indices + k , & ! ice layer index + it , & ! tracer index + ij, m , & ! horizontal indices, combine i and j loops + iridge ! number of cells with nonzero ridging + + integer (kind=int_kind), dimension (icells) :: & + indxii, indxjj, & ! compressed indices + indxij ! compressed indices + + real (kind=dbl_kind), dimension (icells,ncat) :: & + aicen_init , & ! ice area before ridging + vicen_init , & ! ice volume before ridging + vsnon_init ! snow volume before ridging + + real (kind=dbl_kind), dimension(icells,ntrcr,ncat) :: & + atrcrn ! aicen*trcrn + + real (kind=dbl_kind), dimension (icells) :: & + closing_gross ! rate at which area removed, not counting + ! area of new ridges + +! ECH note: the following arrays only need be defined on iridge cells + real (kind=dbl_kind), dimension (icells) :: & + afrac , & ! fraction of category area ridged + ardg1n , & ! area of ice ridged + ardg2n , & ! area of new ridges + virdgn , & ! ridging ice volume + vsrdgn , & ! ridging snow volume + dhr , & ! hrmax - hrmin + dhr2 , & ! hrmax^2 - hrmin^2 + farea , & ! fraction of new ridge area going to nr + fvol ! fraction of new ridge volume going to nr + + real (kind=dbl_kind) :: & + esrdgn ! ridging snow energy + + real (kind=dbl_kind) :: & + hi1 , & ! thickness of ridging ice + hexp , & ! ridge e-folding thickness + hL, hR , & ! left and right limits of integration + expL, expR , & ! exponentials involving hL, hR + tmpfac , & ! factor by which opening/closing rates are cut + wk1 ! work variable + + !----------------------------------------------------------------- + ! Define variables equal to aicen*trcrn, vicen*trcrn, vsnon*trcrn + !----------------------------------------------------------------- + + do n = 1, ncat + do it = 1, ntrcr + if (trcr_depend(it) == 0) then ! ice area tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = aicen(i,j,n)*trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 1) then ! ice volume tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = vicen(i,j,n)*trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2) then ! snow volume tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = vsnon(i,j,n)*trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2+nt_fbri) then ! brine tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = vicen(i,j,n) & + * trcrn(i,j,nt_fbri,n) & + * trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2+nt_alvl) then ! level ice tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = aicen(i,j,n) & + * trcrn(i,j,nt_alvl,n) & + * trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2+nt_apnd .and. & + (tr_pond_cesm .or. tr_pond_topo)) then ! CESM or topo pond area tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = aicen(i,j,n) & + * trcrn(i,j,nt_apnd,n) & + * trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2+nt_apnd .and. & + tr_pond_lvl) then ! level-ice pond area tracer + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + atrcrn(ij,it,n) = aicen(i,j,n) & + * trcrn(i,j,nt_alvl,n) & + * trcrn(i,j,nt_apnd,n) & + * trcrn(i,j,it,n) + enddo + endif + enddo + enddo + + !----------------------------------------------------------------- + ! Based on the ITD of ridging and ridged ice, convert the net + ! closing rate to a gross closing rate. + ! NOTE: 0 < aksum <= 1 + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + closing_gross(ij) = closing_net(ij) / aksum(ij) + + !----------------------------------------------------------------- + ! Reduce the closing rate if more than 100% of the open water + ! would be removed. Reduce the opening rate proportionately. + !----------------------------------------------------------------- + + if (apartic(ij,0) > c0) then + wk1 = apartic(ij,0) * closing_gross(ij) * dt + if (wk1 > aice0(i,j)) then + tmpfac = aice0(i,j) / wk1 + closing_gross(ij) = closing_gross(ij) * tmpfac + opning(ij) = opning(ij) * tmpfac + endif + endif + + enddo ! ij + + !----------------------------------------------------------------- + ! Reduce the closing rate if more than 100% of any ice category + ! would be removed. Reduce the opening rate proportionately. + !----------------------------------------------------------------- + do n = 1, ncat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (aicen(i,j,n) > puny .and. apartic(ij,n) > c0) then + wk1 = apartic(ij,n) * closing_gross(ij) * dt + if (wk1 > aicen(i,j,n)) then + tmpfac = aicen(i,j,n) / wk1 + closing_gross(ij) = closing_gross(ij) * tmpfac + opning(ij) = opning(ij) * tmpfac + endif + endif + + enddo ! ij + enddo ! n + + !----------------------------------------------------------------- + ! Compute change in open water area due to closing and opening. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + aice0(i,j) = aice0(i,j) & + - apartic(ij,0)*closing_gross(ij)*dt & + + opning(ij)*dt + if (aice0(i,j) < -puny) then + l_stop = .true. + istop = i + jstop = j + + write (nu_diag,*) ' ' + write (nu_diag,*) 'Ridging error: aice0 < 0' + write (nu_diag,*) 'i, j, aice0:', i, j, aice0(i,j) + return + + elseif (aice0(i,j) < c0) then ! roundoff error + aice0(i,j) = c0 + endif + + aopen(ij) = opning(ij)*dt ! optional diagnostic + + enddo + + !----------------------------------------------------------------- + ! Save initial state variables + !----------------------------------------------------------------- + + do n = 1, ncat + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + aicen_init(ij,n) = aicen(i,j,n) + vicen_init(ij,n) = vicen(i,j,n) + vsnon_init(ij,n) = vsnon(i,j,n) + enddo + enddo + + !----------------------------------------------------------------- + ! Compute the area, volume, and energy of ice ridging in each + ! category, along with the area of the resulting ridge. + !----------------------------------------------------------------- + + do n = 1, ncat + + !----------------------------------------------------------------- + ! Identify grid cells with nonzero ridging + !----------------------------------------------------------------- + + iridge = 0 + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (aicen_init(ij,n) > puny .and. apartic(ij,n) > c0 & + .and. closing_gross(ij) > c0) then + iridge = iridge + 1 + indxii(iridge) = i + indxjj(iridge) = j + indxij(iridge) = ij + endif + enddo ! ij + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + !----------------------------------------------------------------- + ! Compute area of ridging ice (ardg1n) and of new ridge (ardg2n). + ! Make sure ridging fraction <=1. (Roundoff errors can give + ! ardg1 slightly greater than aicen.) + !----------------------------------------------------------------- + + ardg1n(ij) = apartic(m,n)*closing_gross(m)*dt + + if (ardg1n(ij) > aicen_init(m,n) + puny) then + l_stop = .true. + istop = i + jstop = j + + write (nu_diag,*) ' ' + write (nu_diag,*) 'Ridging error: ardg > aicen' + write (nu_diag,*) 'i, j, n:', i, j, n + write (nu_diag,*) 'ardg, aicen:', & + ardg1n(ij), aicen_init(m,n) + return + else + ardg1n(ij) = min(aicen_init(m,n), ardg1n(ij)) + endif + + ardg2n(ij) = ardg1n(ij) / krdg(m,n) + afrac(ij) = ardg1n(ij) / aicen_init(m,n) + + !----------------------------------------------------------------- + ! Subtract area, volume, and energy from ridging category n. + ! Note: Tracer values are unchanged. + !----------------------------------------------------------------- + + virdgn(ij) = vicen_init(m,n) * afrac(ij) + vsrdgn(ij) = vsnon_init(m,n) * afrac(ij) + + aicen(i,j,n) = aicen(i,j,n) - ardg1n(ij) + vicen(i,j,n) = vicen(i,j,n) - virdgn(ij) + vsnon(i,j,n) = vsnon(i,j,n) - vsrdgn(ij) + + !----------------------------------------------------------------- + ! Increment ridging diagnostics + !----------------------------------------------------------------- + + ardg1(m) = ardg1(m) + ardg1n(ij) + ardg2(m) = ardg2(m) + ardg2n(ij) + virdg(m) = virdg(m) + virdgn(ij) + + ardg1nn(m,n) = ardg1n(ij) + ardg2nn(m,n) = ardg2n(ij) + virdgnn(m,n) = virdgn(ij) + + !----------------------------------------------------------------- + ! Place part of the snow and tracer lost by ridging into the ocean. + !----------------------------------------------------------------- + + msnow_mlt(m) = msnow_mlt(m) + rhos*vsrdgn(ij)*(c1-fsnowrdg) + + if (tr_aero) then + do it = 1, n_aero + maero(m,it) = maero(m,it) & + + vsrdgn(ij)*(c1-fsnowrdg) & + *(trcrn(i,j,nt_aero +4*(it-1),n) & + + trcrn(i,j,nt_aero+1+4*(it-1),n)) + enddo + endif + + if (tr_pond_topo) then + mpond(m) = mpond(m) + ardg1n(ij) & + * trcrn(i,j,nt_apnd,n) & + * trcrn(i,j,nt_hpnd,n) + endif + + !----------------------------------------------------------------- + ! Compute quantities used to apportion ice among categories + ! in the nr loop below + !----------------------------------------------------------------- + + dhr(ij) = hrmax(ij,n) - hrmin(m,n) + dhr2(ij) = hrmax(ij,n) * hrmax(ij,n) - hrmin(m,n) * hrmin(m,n) + + enddo ! ij + + !----------------------------------------------------------------- + ! Increment energy needed to melt snow in ocean. + ! Note that esnow_mlt < 0; the ocean must cool to melt snow. + !----------------------------------------------------------------- + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + esrdgn = vsrdgn(ij) * trcrn(i,j,nt_qsno+k-1,n) & + / real(nslyr,kind=dbl_kind) + esnow_mlt(m) = esnow_mlt(m) + esrdgn*(c1-fsnowrdg) + enddo + enddo + + !----------------------------------------------------------------- + ! Subtract area- and volume-weighted tracers from category n. + !----------------------------------------------------------------- + + do it = 1, ntrcr + if (trcr_depend(it) == 0) then ! ice area tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,n) = atrcrn(m,it,n) & + - ardg1n(ij)*trcrn(i,j,it,n) + enddo + + elseif (trcr_depend(it) == 1) then ! ice volume tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,n) = atrcrn(m,it,n) & + - virdgn(ij)*trcrn(i,j,it,n) + enddo + + elseif (trcr_depend(it) == 2) then ! snow volume tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,n) = atrcrn(m,it,n) & + - vsrdgn(ij)*trcrn(i,j,it,n) + enddo + + elseif (trcr_depend(it) == 2+nt_alvl) then ! level ice tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,n) = atrcrn(m,it,n) & + - ardg1n(ij)*trcrn(i,j,nt_alvl,n)*trcrn(i,j,it,n) + enddo + + elseif (trcr_depend(it) == 2+nt_apnd .and. & + (tr_pond_cesm .or. tr_pond_topo)) then ! CESM or topo pond area tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,n) = atrcrn(m,it,n) & + - ardg1n(ij)*trcrn(i,j,nt_apnd,n)*trcrn(i,j,it,n) + enddo + + elseif (trcr_depend(it) == 2+nt_apnd .and. & + tr_pond_lvl) then ! level-ice pond area tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,n) = atrcrn(m,it,n) & + - ardg1n(ij) & + * trcrn(i,j,nt_alvl,n) & + * trcrn(i,j,nt_apnd,n) & + * trcrn(i,j,it,n) + enddo + + elseif (trcr_depend(it) == 2+nt_fbri) then ! brine tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,n) = atrcrn(m,it,n) & + - virdgn(ij)*trcrn(i,j,it,n)*trcrn(i,j,nt_fbri,n) + enddo + endif ! trcr_depend + enddo ! ntrcr + + !----------------------------------------------------------------- + ! Add area, volume, and energy of new ridge to each category nr. + !----------------------------------------------------------------- + + do nr = 1, ncat + + if (krdg_redist == 0) then ! Hibler 1980 formulation + + do ij = 1, iridge + m = indxij(ij) + + !----------------------------------------------------------------- + ! Compute the fraction of ridged ice area and volume going to + ! thickness category nr. + !----------------------------------------------------------------- + + if (hrmin(m,n) >= hin_max(nr) .or. & + hrmax(ij,n) <= hin_max(nr-1)) then + hL = c0 + hR = c0 + else + hL = max (hrmin(m,n), hin_max(nr-1)) + hR = min (hrmax(ij,n), hin_max(nr)) + endif + + farea(ij) = (hR-hL) / dhr(ij) + fvol (ij) = (hR*hR - hL*hL) / dhr2(ij) + + enddo ! ij + + else ! krdg_redist = 1; 2005 exponential formulation + + !----------------------------------------------------------------- + ! Compute the fraction of ridged ice area and volume going to + ! thickness category nr. + !----------------------------------------------------------------- + + if (nr < ncat) then + + do ij = 1, iridge + m = indxij(ij) + + hi1 = hrmin(m,n) + hexp = hrexp(m,n) + + if (hi1 >= hin_max(nr)) then + farea(ij) = c0 + fvol (ij) = c0 + else + hL = max (hi1, hin_max(nr-1)) + hR = hin_max(nr) + expL = exp(-(hL-hi1)/hexp) + expR = exp(-(hR-hi1)/hexp) + farea(ij) = expL - expR + fvol (ij) = ((hL + hexp)*expL & + - (hR + hexp)*expR) / (hi1 + hexp) + endif + enddo ! ij + + else ! nr = ncat + + do ij = 1, iridge + m = indxij(ij) + + hi1 = hrmin(m,n) + hexp = hrexp(m,n) + + hL = max (hi1, hin_max(nr-1)) + expL = exp(-(hL-hi1)/hexp) + farea(ij) = expL + fvol (ij) = (hL + hexp)*expL / (hi1 + hexp) + enddo + + endif ! nr < ncat + + ! diagnostics + if (n ==1) then ! only for thinnest ridging ice + if (present(aredistn)) then + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + aredistn(i,j,nr) = farea(ij)*ardg2n(ij) + enddo + endif + if (present(vredistn)) then + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + vredistn(i,j,nr) = fvol(ij)*virdgn(ij) + enddo + endif + endif + + endif ! krdg_redist + + !----------------------------------------------------------------- + ! Transfer ice area, ice volume, and snow volume to category nr. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + aicen(i,j,nr) = aicen(i,j,nr) + farea(ij)*ardg2n(ij) + vicen(i,j,nr) = vicen(i,j,nr) + fvol(ij) *virdgn(ij) + vsnon(i,j,nr) = vsnon(i,j,nr) & + + fvol(ij)*vsrdgn(ij)*fsnowrdg + enddo + + !----------------------------------------------------------------- + ! Transfer area-weighted and volume-weighted tracers to category nr. + ! Note: The global sum aicen*trcrn of ice area tracers + ! (trcr_depend = 0) is not conserved by ridging. + ! However, ridging conserves the global sum of volume + ! tracers (trcr_depend = 1 or 2). + ! Tracers associated with level ice, or that are otherwise lost + ! from ridging ice, are not transferred. + ! We assume that all pond water is lost from ridging ice. + !----------------------------------------------------------------- + + do it = 1, ntrcr + if (trcr_depend(it) == 0) then ! ice area tracer + if (it /= nt_alvl) then +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,nr) = atrcrn(m,it,nr) & + + farea(ij)*ardg2n(ij)*trcrn(i,j,it,n) + enddo + endif + elseif (trcr_depend(it) == 1) then ! ice volume tracer + if (it /= nt_vlvl) then +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,nr) = atrcrn(m,it,nr) & + + fvol(ij)*virdgn(ij)*trcrn(i,j,it,n) + enddo + endif + elseif (trcr_depend(it) == 2) then ! snow volume tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,nr) = atrcrn(m,it,nr) & + + fvol(ij)*vsrdgn(ij)*fsnowrdg*trcrn(i,j,it,n) + enddo + elseif (trcr_depend(it) == 2+nt_fbri) then ! brine tracer +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iridge + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + atrcrn(m,it,nr) = atrcrn(m,it,nr) & ! + + fvol(ij)*virdgn(ij)*trcrn(i,j,nt_fbri,n)*trcrn(i,j,it,n) + enddo + endif ! trcr_depend + enddo ! ntrcr + + enddo ! nr (new ridges) + enddo ! n (ridging categories) + + !----------------------------------------------------------------- + ! Compute new tracers + !----------------------------------------------------------------- + + do n = 1, ncat + call compute_tracers (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, trcr_depend, & + atrcrn(:,:,n), aicen(:,:, n), & + vicen (:,:, n), vsnon(:,:, n), & + trcrn(:,:,:,n)) + enddo + + end subroutine ridge_shift + +!======================================================================= + +! Make sure ice area <=1. If not, prepare to repeat ridging. +! +! authors William H. Lipscomb, LANL + + subroutine ridge_check (nx_block, ny_block, & + icells, indxi, indxj, & + dt, & + asum, closing_net, & + divu_adv, opning, & + iterate_ridging) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with ice present + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with ice + + + real (kind=dbl_kind), intent(in) :: & + dt ! time step (s) + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + asum ! sum of ice and open water area + + real (kind=dbl_kind), dimension (icells), & + intent(inout) :: & + closing_net, & ! net rate at which area is removed (1/s) + divu_adv , & ! divu as implied by transport scheme (1/s) + opning ! rate of opening due to divergence/shear + + logical (kind=log_kind), intent(out) :: & + iterate_ridging ! if true, repeat the ridging + + ! local variables + + integer (kind=int_kind) :: & + ij ! horizontal index, combines i and j loops + + iterate_ridging = .false. + + do ij = 1, icells + if (abs(asum(ij) - c1) < puny) then + closing_net(ij) = c0 + opning (ij) = c0 + else + iterate_ridging = .true. + divu_adv(ij) = (c1 - asum(ij)) / dt + closing_net(ij) = max(c0, -divu_adv(ij)) + opning(ij) = max(c0, divu_adv(ij)) + endif + enddo + + end subroutine ridge_check + +!======================================================================= + +! Compute the strength of the ice pack, defined as the energy (J m-2) +! dissipated per unit area removed from the ice pack under compression, +! and assumed proportional to the change in potential energy caused +! by ridging. +! +! See Rothrock (1975) and Hibler (1980). +! +! For simpler strength parameterization, see this reference: +! Hibler, W. D. III, 1979: A dynamic-thermodynamic sea ice model, +! J. Phys. Oceanog., 9, 817-846. +! +! authors: William H. Lipscomb, LANL +! Elizabeth C. Hunke, LANL + + subroutine ice_strength (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icells, & + indxi, indxj, & + aice, vice, & + aice0, aicen, & + vicen, strength) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beg and end of physical domain + + integer (kind=int_kind), intent(in) :: & + icells ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + aice , & ! concentration of ice + vice , & ! volume per unit area of ice (m) + aice0 ! concentration of open water + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + aicen , & ! concentration of ice + vicen ! volume per unit area of ice (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & + strength ! ice strength (N/m) + + ! local variables + + real (kind=dbl_kind), dimension (icells) :: & + asum , & ! sum of ice and open water area + aksum ! ratio of area removed to area ridged + + real (kind=dbl_kind), dimension (icells,0:ncat) :: & + apartic ! participation function; fraction of ridging + ! and closing associated w/ category n + + real (kind=dbl_kind), dimension (icells,ncat) :: & + hrmin , & ! minimum ridge thickness + hrmax , & ! maximum ridge thickness (krdg_redist = 0) + hrexp , & ! ridge e-folding thickness (krdg_redist = 1) + krdg ! mean ridge thickness/thickness of ridging ice + + integer (kind=int_kind) :: & + i,j , & ! horizontal indices + n , & ! thickness category index + ij ! horizontal index, combines i and j loops + + real (kind=dbl_kind) :: & + hi , & ! ice thickness (m) + h2rdg , & ! mean value of h^2 for new ridge + dh2rdg ! change in mean value of h^2 per unit area + ! consumed by ridging + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + strength(:,:) = c0 + + if (kstrength == 1) then ! Rothrock '75 formulation + + !----------------------------------------------------------------- + ! Compute thickness distribution of ridging and ridged ice. + !----------------------------------------------------------------- + + call asum_ridging (nx_block, ny_block, & + icells, indxi, indxj, & + aicen, aice0, & + asum) + + call ridge_itd (nx_block, ny_block, & + icells, indxi, indxj, & + aicen, vicen, & + aice0, & + aksum, apartic, & + hrmin, hrmax, & + hrexp, krdg) + + !----------------------------------------------------------------- + ! Compute ice strength based on change in potential energy, + ! as in Rothrock (1975) + !----------------------------------------------------------------- + + if (krdg_redist==0) then ! Hibler 1980 formulation + + do n = 1, ncat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (aicen(i,j,n) > puny .and. apartic(ij,n) > c0)then + hi = vicen(i,j,n) / aicen(i,j,n) + h2rdg = p333 * (hrmax(ij,n)**3 - hrmin(ij,n)**3) & + / (hrmax(ij,n) - hrmin(ij,n)) + dh2rdg = -hi*hi + h2rdg/krdg(ij,n) + strength(i,j) = strength(i,j) & + + apartic(ij,n) * dh2rdg + endif ! aicen > puny + enddo ! ij + enddo ! n + + elseif (krdg_redist==1) then ! exponential formulation + + do n = 1, ncat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (aicen(i,j,n) > puny .and. apartic(ij,n) > c0)then + hi = vicen(i,j,n) / aicen(i,j,n) + h2rdg = hrmin(ij,n)*hrmin(ij,n) & + + c2*hrmin(ij,n)*hrexp(ij,n) & + + c2*hrexp(ij,n)*hrexp(ij,n) + dh2rdg = -hi*hi + h2rdg/krdg(ij,n) + strength(i,j) = strength(i,j) & + + apartic(ij,n) * dh2rdg + endif + enddo ! ij + enddo ! n + + endif ! krdg_redist + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + strength(i,j) = Cf * Cp * strength(i,j) / aksum(ij) + ! Cp = (g/2)*(rhow-rhoi)*(rhoi/rhow) + ! Cf accounts for frictional dissipation + enddo ! ij + + else ! kstrength /= 1: Hibler (1979) form + + !----------------------------------------------------------------- + ! Compute ice strength as in Hibler (1979) + !----------------------------------------------------------------- + + do j = jlo, jhi + do i = ilo, ihi + strength(i,j) = Pstar*vice(i,j)*exp(-Cstar*(c1-aice(i,j))) + enddo ! j + enddo ! i + + endif ! kstrength + + end subroutine ice_strength + +!======================================================================= + + end module ice_mechred + +!======================================================================= diff --git a/source/ice_meltpond_cesm.F90 b/source/ice_meltpond_cesm.F90 new file mode 100755 index 00000000..cede7da4 --- /dev/null +++ b/source/ice_meltpond_cesm.F90 @@ -0,0 +1,261 @@ +! SVN:$Id: ice_meltpond_cesm.F90 746 2013-09-28 22:47:56Z eclare $ +!======================================================================= + +! CESM meltpond parameterization +! +! This meltpond parameterization was developed for use with the delta- +! Eddington radiation scheme, and only affects the radiation budget in +! the model. That is, although the pond volume is tracked, that liquid +! water is not used elsewhere in the model for mass budgets or other +! physical processes. +! +! authors David A. Bailey (NCAR) +! Marika M. Holland (NCAR) +! Elizabeth C. Hunke (LANL) + + module ice_meltpond_cesm + + use ice_kinds_mod + use ice_constants + + implicit none + + private + public :: init_meltponds_cesm, compute_ponds_cesm, & + write_restart_pond_cesm, read_restart_pond_cesm + + logical (kind=log_kind), public :: & + restart_pond_cesm ! if .true., read meltponds restart file + + real (kind=dbl_kind), public :: & + hs0 ! snow depth for transition to bare sea ice (m) + +!======================================================================= + + contains + +!======================================================================= + +! Initialize melt ponds. + + subroutine init_meltponds_cesm(nx_block, ny_block, ncat, & + apnd, hpnd) + + integer(kind=int_kind), intent(in) :: & + nx_block , & + ny_block , & + ncat + + real(kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(out) :: & + apnd , & ! melt pond area fraction + hpnd ! melt pond depth + + apnd(:,:,:) = c0 + hpnd(:,:,:) = c0 + + end subroutine init_meltponds_cesm + +!======================================================================= + + subroutine compute_ponds_cesm(nx_block,ny_block, & + ilo, ihi, jlo, jhi, & + dt, hi_min, & + pndaspect, & + rfrac, meltt, & + melts, frain, & + aicen, vicen, vsnon, & + Tsfcn, apnd, hpnd) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), intent(in) :: & + dt, & ! time step (s) + hi_min, & ! minimum ice thickness allowed for thermo (m) + pndaspect ! ratio of pond depth to pond fraction + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in) :: & + rfrac, & ! water fraction retained for melt ponds + meltt, & + melts, & + frain, & + aicen, & + vicen, & + vsnon + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in) :: & + Tsfcn + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + apnd, & + hpnd + +! local temporary variables + + real (kind=dbl_kind), dimension(nx_block,ny_block) :: & + volpn + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, indxj ! compressed indices for cells with ice melting + + integer (kind=int_kind) :: i,j,ij,icells + + real (kind=dbl_kind) :: & + hi , & ! ice thickness (m) + hs , & ! snow depth (m) + dTs , & ! surface temperature diff for freeze-up (C) + Tp , & ! pond freezing temperature (C) + apondn, & + hpondn + + real (kind=dbl_kind), parameter :: & + Td = c2 , & ! temperature difference for freeze-up (C) + rexp = p01 , & ! pond contraction scaling + dpthhi = 0.9_dbl_kind ! ratio of pond depth to ice thickness + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + volpn(:,:) = hpnd(:,:) * apnd(:,:) * aicen(:,:) + + !----------------------------------------------------------------- + ! Identify grid cells where ice can melt + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aicen(i,j) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + hi = vicen(i,j)/aicen(i,j) + hs = vsnon(i,j)/aicen(i,j) + + if (hi < hi_min) then + + !-------------------------------------------------------------- + ! Remove ponds on thin ice + !-------------------------------------------------------------- + apondn = c0 + hpondn = c0 + volpn (i,j) = c0 + + else + + !----------------------------------------------------------- + ! Update pond volume + !----------------------------------------------------------- + volpn(i,j) = volpn(i,j) & + + rfrac(i,j)/rhofresh*(meltt(i,j)*rhoi & + + melts(i,j)*rhos & + + frain(i,j)* dt)& + * aicen(i,j) + + !----------------------------------------------------------- + ! Shrink pond volume under freezing conditions + !----------------------------------------------------------- + Tp = Timelt - Td + dTs = max(Tp - Tsfcn(i,j),c0) + volpn(i,j) = volpn(i,j) * exp(rexp*dTs/Tp) + volpn(i,j) = max(volpn(i,j), c0) + + ! fraction of ice covered by ponds + apondn = min (sqrt(volpn(i,j)/(pndaspect*aicen(i,j))), c1) + hpondn = pndaspect * apondn + ! fraction of grid cell covered by ponds + apondn = apondn * aicen(i,j) + + !----------------------------------------------------------- + ! Limit pond depth + !----------------------------------------------------------- + hpondn = min(hpondn, dpthhi*hi) + + endif + + !----------------------------------------------------------- + ! Reload tracer array + !----------------------------------------------------------- + apnd(i,j) = apondn / aicen(i,j) + hpnd(i,j) = hpondn + + enddo + + end subroutine compute_ponds_cesm + +!======================================================================= +! +! Dumps all values needed for restarting +! +! authors Elizabeth C. Hunke, LANL +! David A. Bailey, NCAR + + subroutine write_restart_pond_cesm() + + use ice_domain_size, only: ncat + use ice_fileunits, only: nu_dump_pond + use ice_state, only: trcrn, nt_apnd, nt_hpnd + use ice_restart, only: write_restart_field + + ! local variables + + logical (kind=log_kind) :: diag + + diag = .true. + + call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_apnd,:,:),'ruf8', & + 'apnd',ncat,diag) + call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_hpnd,:,:),'ruf8', & + 'hpnd',ncat,diag) + + end subroutine write_restart_pond_cesm + +!======================================================================= + +! Reads all values needed for a meltpond volume restart +! +! authors Elizabeth C. Hunke, LANL +! David A. Bailey, NCAR + + subroutine read_restart_pond_cesm() + + use ice_domain_size, only: ncat + use ice_communicate, only: my_task, master_task + use ice_fileunits, only: nu_diag, nu_restart_pond + use ice_state, only: trcrn, nt_apnd, nt_hpnd + use ice_restart, only: read_restart_field + + ! local variables + + logical (kind=log_kind) :: & + diag + + diag = .true. + + if (my_task == master_task) write(nu_diag,*) 'min/max cesm ponds' + + call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_apnd,:,:),'ruf8', & + 'apnd',ncat,diag,field_loc_center,field_type_scalar) + call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_hpnd,:,:),'ruf8', & + 'hpnd',ncat,diag,field_loc_center,field_type_scalar) + + end subroutine read_restart_pond_cesm + +!======================================================================= + + end module ice_meltpond_cesm + +!======================================================================= diff --git a/source/ice_meltpond_lvl.F90 b/source/ice_meltpond_lvl.F90 new file mode 100755 index 00000000..212e1bcc --- /dev/null +++ b/source/ice_meltpond_lvl.F90 @@ -0,0 +1,484 @@ +! SVN:$Id: ice_meltpond_lvl.F90 820 2014-08-26 19:08:29Z eclare $ +!======================================================================= + +! Level-ice meltpond parameterization +! +! This meltpond parameterization was developed for use with the delta- +! Eddington radiation scheme, and only affects the radiation budget in +! the model. That is, although the pond volume is tracked, that liquid +! water is not used elsewhere in the model for mass budgets or other +! physical processes. +! +! authors Elizabeth Hunke (LANL) +! David Hebert (NRL Stennis) +! Olivier Lecomte (Univ. Louvain) + + module ice_meltpond_lvl + + use ice_kinds_mod + use ice_constants + use ice_blocks, only: nx_block, ny_block + use ice_domain_size, only: max_blocks, ncat + + implicit none + + private + public :: init_meltponds_lvl, compute_ponds_lvl, & + write_restart_pond_lvl, read_restart_pond_lvl + + logical (kind=log_kind), public :: & + restart_pond_lvl ! if .true., read meltponds restart file + + character (len=char_len), public :: & + frzpnd ! pond refreezing parameterization + + real (kind=dbl_kind), public :: & + dpscale, & ! alter e-folding time scale for flushing + rfracmin, & ! minimum retained fraction of meltwater + rfracmax, & ! maximum retained fraction of meltwater + pndaspect, & ! ratio of pond depth to pond fraction + hs1 ! tapering parameter for snow on pond ice + + real (kind=dbl_kind), public, & + dimension (nx_block,ny_block,ncat,max_blocks) :: & + dhsn, & ! depth difference for snow on sea ice and pond ice + ffracn ! fraction of fsurfn used to melt ipond + +!======================================================================= + + contains + +!======================================================================= + +! Initialize melt ponds. + + subroutine init_meltponds_lvl(nx_block, ny_block, ncat, & + apnd, hpnd, ipnd, dhsn) + + integer(kind=int_kind), intent(in) :: & + nx_block , & + ny_block , & + ncat + + real(kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(out) :: & + apnd , & ! melt pond area fraction + hpnd , & ! melt pond depth + ipnd , & ! melt pond refrozen lid thickness + dhsn ! depth difference for snow on sea ice and pond ice + + apnd(:,:,:) = c0 + hpnd(:,:,:) = c0 + ipnd(:,:,:) = c0 + dhsn(:,:,:) = c0 + + end subroutine init_meltponds_lvl + +!======================================================================= + + subroutine compute_ponds_lvl(nx_block,ny_block, & + ilo, ihi, jlo, jhi, & + dt, hi_min, & + dpscale, frzpnd, & + pndaspect, & + rfrac, meltt, melts, & + frain, Tair, fsurfn,& + dhs, ffrac, & + aicen, vicen, vsnon, & + qicen, sicen, & + Tsfcn, alvl, & + apnd, hpnd, ipnd) + + use ice_constants, only: viscosity_dyn + use ice_domain_size, only: nilyr + use ice_therm_shared, only: ktherm + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), intent(in) :: & + dt, & ! time step (s) + hi_min, & ! minimum ice thickness allowed for thermo (m) + dpscale, & ! alter e-folding time scale for flushing + pndaspect ! ratio of pond depth to pond fraction + + character (len=char_len), intent(in) :: & + frzpnd ! pond refreezing parameterization + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in) :: & + Tsfcn, & ! surface temperature (C) + alvl, & ! fraction of level ice + rfrac, & ! water fraction retained for melt ponds + meltt, & ! top melt rate (m/s) + melts, & ! snow melt rate (m/s) + frain, & ! rainfall rate (kg/m2/s) + Tair, & ! air temperature (K) + fsurfn,& ! atm-ice surface heat flux (W/m2) + aicen, & ! ice area fraction + vicen, & ! ice volume (m) + vsnon ! snow volume (m) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + apnd, hpnd, ipnd + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), intent(in) :: & + qicen, & ! ice layer enthalpy (J m-3) + sicen ! salinity (ppt) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in) :: & + dhs ! depth difference for snow on sea ice and pond ice + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(out) :: & + ffrac ! fraction of fsurfn over pond used to melt ipond + + ! local temporary variables + + real (kind=dbl_kind), dimension(nx_block,ny_block) :: & + volpn ! pond volume per unit area (m) + + real (kind=dbl_kind), dimension (nilyr) :: & + Tmlt ! melting temperature (C) + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, indxj ! compressed indices for cells with ice melting + + integer (kind=int_kind) :: i, j, ij, icells ! indices + + real (kind=dbl_kind) :: & + hi , & ! ice thickness (m) + hs , & ! snow depth (m) + dTs , & ! surface temperature diff for freeze-up (C) + Tp , & ! pond freezing temperature (C) + Ts , & ! surface air temperature (C) + apondn , & ! local pond area + hpondn , & ! local pond depth (m) + dvn , & ! change in pond volume (m) + hlid, alid , & ! refrozen lid thickness, area + dhlid , & ! change in refrozen lid thickness + bdt , & ! 2 kice dT dt / (rhoi Lfresh) + alvl_tmp , & ! level ice fraction of ice area + draft, deltah, pressure_head, perm, drain ! for permeability + + real (kind=dbl_kind), parameter :: & + Td = c2 , & ! temperature difference for freeze-up (C) + rexp = p01 ! pond contraction scaling + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + volpn(i,j) = hpnd(i,j) & + * aicen(i,j) * alvl(i,j) * apnd(i,j) + ffrac(i,j) = c0 + enddo + enddo + + !----------------------------------------------------------------- + ! Identify grid cells where ponds can be + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aicen(i,j)*alvl(i,j) > puny**2) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + hi = vicen(i,j)/aicen(i,j) + hs = vsnon(i,j)/aicen(i,j) + alvl_tmp = alvl(i,j) + + if (hi < hi_min) then + + !----------------------------------------------------------- + ! Remove ponds on thin ice + !----------------------------------------------------------- + + apondn = c0 + hpondn = c0 + volpn (i,j) = c0 + hlid = c0 + + else + + !----------------------------------------------------------- + ! initialize pond area as fraction of ice + !----------------------------------------------------------- + apondn = apnd(i,j)*alvl_tmp + + !----------------------------------------------------------- + ! update pond volume + !----------------------------------------------------------- + ! add melt water + dvn = rfrac(i,j)/rhofresh*(meltt(i,j)*rhoi & + + melts(i,j)*rhos & + + frain(i,j)* dt)*aicen(i,j) + + ! shrink pond volume under freezing conditions + if (trim(frzpnd) == 'cesm') then + Tp = Timelt - Td + dTs = max(Tp - Tsfcn(i,j),c0) + dvn = dvn - volpn(i,j) * (c1 - exp(rexp*dTs/Tp)) + + else + ! trim(frzpnd) == 'hlid' Stefan approximation + ! assumes pond is fresh (freezing temperature = 0 C) + ! and ice grows from existing pond ice + hlid = ipnd(i,j) + if (dvn == c0) then ! freeze pond + Ts = Tair(i,j) - Tffresh + if (Ts < c0) then + ! if (Ts < -c2) then ! as in meltpond_cesm + bdt = -c2*Ts*kice*dt/(rhoi*Lfresh) + dhlid = p5*sqrt(bdt) ! open water freezing + if (hlid > dhlid) dhlid = p5*bdt/hlid ! existing ice + dhlid = min(dhlid, hpnd(i,j)*rhofresh/rhoi) + hlid = hlid + dhlid + else + dhlid = c0 ! to account for surface inversions + endif + else ! convert refrozen pond ice back to water + dhlid = max(fsurfn(i,j)*dt / (rhoi*Lfresh), c0) ! > 0 + dhlid = -min(dhlid, hlid) ! < 0 + hlid = max(hlid + dhlid, c0) + if (hs - dhs(i,j) < puny) then ! pond ice is snow-free + ffrac(i,j) = c1 ! fraction of fsurfn over pond used to melt ipond + if (fsurfn(i,j) > puny) & + ffrac(i,j) = min(-dhlid*rhoi*Lfresh/(dt*fsurfn(i,j)), c1) + endif + endif + alid = apondn * aicen(i,j) + dvn = dvn - dhlid*alid*rhoi/rhofresh + endif + + volpn(i,j) = volpn(i,j) + dvn + + !----------------------------------------------------------- + ! update pond area and depth + !----------------------------------------------------------- + if (volpn(i,j) <= c0) then + volpn(i,j) = c0 + apondn = c0 + endif + + if (apondn*aicen(i,j) > puny) then ! existing ponds + apondn = max(c0, min(alvl_tmp, & + apondn + 0.5*dvn/(pndaspect*apondn*aicen(i,j)))) + hpondn = c0 + if (apondn > puny) & + hpondn = volpn(i,j)/(apondn*aicen(i,j)) + + elseif (alvl_tmp*aicen(i,j) > c10*puny) then ! new ponds + apondn = min (sqrt(volpn(i,j)/(pndaspect*aicen(i,j))), alvl_tmp) + hpondn = pndaspect * apondn + + else ! melt water runs off deformed ice + apondn = c0 + hpondn = c0 + endif + apondn = max(apondn, c0) + + ! limit pond depth to maintain nonnegative freeboard + hpondn = min(hpondn, ((rhow-rhoi)*hi - rhos*hs)/rhofresh) + + ! fraction of grid cell covered by ponds + apondn = apondn * aicen(i,j) + + volpn(i,j) = hpondn*apondn + if (volpn(i,j) <= c0) then + volpn(i,j) = c0 + apondn = c0 + hpondn = c0 + hlid = c0 + endif + + !----------------------------------------------------------- + ! drainage due to permeability (flushing) + ! setting dpscale = 0 turns this off + ! NOTE this uses the initial salinity and melting T profiles + !----------------------------------------------------------- + + if (ktherm /= 2 .and. hpondn > c0 .and. dpscale > puny) then + draft = (rhos*hs + rhoi*hi)/rhow + hpondn + deltah = hpondn + hi - draft + pressure_head = gravit * rhow * max(deltah, c0) + Tmlt(:) = -sicen(i,j,:) * depressT + call brine_permeability(nilyr, qicen(i,j,:), & + vicen(i,j), sicen(i,j,:), Tmlt, perm) + drain = perm*pressure_head*dt / (viscosity_dyn*hi) * dpscale + deltah = min(drain, hpondn) + dvn = -deltah*apondn + volpn(i,j) = volpn(i,j) + dvn + apondn = max(c0, min(apondn & + + 0.5*dvn/(pndaspect*apondn), alvl_tmp*aicen(i,j))) + hpondn = c0 + if (apondn > puny) hpondn = volpn(i,j)/apondn + endif + + endif + + !----------------------------------------------------------- + ! Reload tracer array + !----------------------------------------------------------- + + hpnd(i,j) = hpondn + apnd(i,j) = apondn / (aicen(i,j)*alvl_tmp) + if (trim(frzpnd) == 'hlid') ipnd(i,j) = hlid + + enddo + + end subroutine compute_ponds_lvl + +!======================================================================= + +! determine the liquid fraction of brine in the ice and the permeability + + subroutine brine_permeability(nilyr, qicen, vicen, salin, Tmlt, perm) + + use ice_therm_shared, only: calculate_Tin_from_qin + + integer (kind=int_kind), intent(in) :: & + nilyr ! number of ice layers + + real (kind=dbl_kind), dimension(nilyr), intent(in) :: & + qicen, & ! enthalpy for each ice layer (J m-3) + salin, & ! salinity (ppt) + Tmlt ! melting temperature (C) + + real (kind=dbl_kind), intent(in) :: & + vicen ! ice volume (m) + + real (kind=dbl_kind), intent(out) :: & + perm ! permeability (m^2) + + ! local variables + + real (kind=dbl_kind) :: & + Sbr ! brine salinity + + real (kind=dbl_kind), dimension(nilyr) :: & + Tin, & ! ice temperature (C) + phi ! liquid fraction + + integer (kind=int_kind) :: k + + !----------------------------------------------------------------- + ! Compute ice temperatures from enthalpies using quadratic formula + !----------------------------------------------------------------- + + do k = 1,nilyr + Tin(k) = calculate_Tin_from_qin(qicen(k),Tmlt(k)) + enddo + + !----------------------------------------------------------------- + ! brine salinity and liquid fraction + !----------------------------------------------------------------- + + do k = 1,nilyr + Sbr = c1/(1.e-3_dbl_kind - depressT/Tin(k)) ! Notz thesis eq 3.6 + phi(k) = salin(k)/Sbr ! liquid fraction + if (phi(k) < 0.05) phi(k) = c0 ! impermeable + enddo + + !----------------------------------------------------------------- + ! permeability + !----------------------------------------------------------------- + + perm = 3.0e-8_dbl_kind * (minval(phi))**3 + + end subroutine brine_permeability + +!======================================================================= +! +! Dumps all values needed for restarting +! +! authors Elizabeth C. Hunke, LANL + + subroutine write_restart_pond_lvl() + + use ice_domain_size, only: ncat + use ice_fileunits, only: nu_dump_pond + use ice_flux, only: fsnow + use ice_state, only: trcrn, nt_apnd, nt_hpnd, nt_ipnd + use ice_restart, only: write_restart_field + + ! local variables + + logical (kind=log_kind) :: diag + + diag = .true. + + call write_restart_field(nu_dump_pond,0, trcrn(:,:,nt_apnd,:,:),'ruf8', & + 'apnd',ncat,diag) + call write_restart_field(nu_dump_pond,0, trcrn(:,:,nt_hpnd,:,:),'ruf8', & + 'hpnd',ncat,diag) + call write_restart_field(nu_dump_pond,0, trcrn(:,:,nt_ipnd,:,:),'ruf8', & + 'ipnd',ncat,diag) + call write_restart_field(nu_dump_pond,0, fsnow(:,:, :),'ruf8', & + 'fsnow',1,diag) + call write_restart_field(nu_dump_pond,0, dhsn(:,:, :,:),'ruf8', & + 'dhs',ncat,diag) + call write_restart_field(nu_dump_pond,0,ffracn(:,:, :,:),'ruf8', & + 'ffrac',ncat,diag) + + end subroutine write_restart_pond_lvl + +!======================================================================= + +! Reads all values needed for a meltpond volume restart +! +! authors Elizabeth C. Hunke, LANL + + subroutine read_restart_pond_lvl() + + use ice_domain_size, only: ncat + use ice_communicate, only: my_task, master_task + use ice_fileunits, only: nu_diag, nu_restart_pond + use ice_flux, only: fsnow + use ice_state, only: trcrn, nt_apnd, nt_hpnd, nt_ipnd + use ice_restart, only: read_restart_field + + ! local variables + + logical (kind=log_kind) :: & + diag + + diag = .true. + + if (my_task == master_task) write(nu_diag,*) 'min/max level-ice ponds' + + call read_restart_field(nu_restart_pond,0, trcrn(:,:,nt_apnd,:,:),'ruf8', & + 'apnd',ncat,diag,field_loc_center,field_type_scalar) + call read_restart_field(nu_restart_pond,0, trcrn(:,:,nt_hpnd,:,:),'ruf8', & + 'hpnd',ncat,diag,field_loc_center,field_type_scalar) + call read_restart_field(nu_restart_pond,0, trcrn(:,:,nt_ipnd,:,:),'ruf8', & + 'ipnd',ncat,diag,field_loc_center,field_type_scalar) + call read_restart_field(nu_restart_pond,0, fsnow(:,:, :),'ruf8', & + 'fsnow',1,diag,field_loc_center,field_type_scalar) + call read_restart_field(nu_restart_pond,0, dhsn(:,:, :,:),'ruf8', & + 'dhs',ncat,diag,field_loc_center,field_type_scalar) + call read_restart_field(nu_restart_pond,0,ffracn(:,:, :,:),'ruf8', & + 'ffrac',ncat,diag,field_loc_center,field_type_scalar) + + end subroutine read_restart_pond_lvl + +!======================================================================= + + end module ice_meltpond_lvl + +!======================================================================= diff --git a/source/ice_meltpond_topo.F90 b/source/ice_meltpond_topo.F90 new file mode 100755 index 00000000..b74291dd --- /dev/null +++ b/source/ice_meltpond_topo.F90 @@ -0,0 +1,967 @@ +! SVN:$Id: ice_meltpond_topo.F90 820 2014-08-26 19:08:29Z eclare $ +!======================================================================= + +! Melt pond evolution based on the ice topography as inferred from +! the ice thickness distribution. This code is based on (but differs +! from) that described in +! +! Flocco, D. and D. L. Feltham, 2007. A continuum model of melt pond +! evolution on Arctic sea ice. J. Geophys. Res. 112, C08016, doi: +! 10.1029/2006JC003836. +! +! Flocco, D., D. L. Feltham and A. K. Turner, 2010. Incorporation of a +! physically based melt pond scheme into the sea ice component of a +! climate model. J. Geophys. Res. 115, C08012, doi: 10.1029/2009JC005568. +! +! authors Daniela Flocco (UCL) +! Adrian Turner (UCL) +! 2010 ECH added module based on original code from Daniela Flocco, UCL +! 2012 DSCHR modifications + + module ice_meltpond_topo + + use ice_kinds_mod + use ice_constants + use ice_domain_size, only: nilyr, ncat + + implicit none + + private + public :: init_meltponds_topo, compute_ponds_topo, & + write_restart_pond_topo, read_restart_pond_topo + + logical (kind=log_kind), public :: & + restart_pond_topo ! if .true., read meltponds restart file + + real (kind=dbl_kind), public :: & + hp1 ! critical parameter for pond ice thickness + +!======================================================================= + + contains + +!======================================================================= + +! Initialize melt ponds. + + subroutine init_meltponds_topo(nx_block, ny_block, ncat, & + apnd, hpnd, ipnd) + + integer(kind=int_kind), intent(in) :: & + nx_block , & + ny_block , & + ncat + + real(kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(out) :: & + apnd , & ! melt pond area fraction + hpnd , & ! melt pond depth + ipnd ! melt pond refrozen lid thickness + + apnd(:,:,:) = c0 + hpnd(:,:,:) = c0 + ipnd(:,:,:) = c0 + + end subroutine init_meltponds_topo + +!======================================================================= + + subroutine compute_ponds_topo(nx_block,ny_block, & + ilo, ihi, jlo, jhi, & + dt, & + aice, aicen, & + vice, vicen, & + vsno, vsnon, & + potT, meltt, & + fsurf, fpond, & + Tsfcn, Tf, & + qicen, sicen, & + apnd, hpnd, ipnd) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), intent(in) :: & + dt ! time step (s) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + aice, & ! total ice area fraction + vsno, & ! total snow volume (m) + Tf ! ocean freezing temperature [= ice bottom temperature] (degC) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + vice, & ! total ice volume (m) + fpond ! fresh water flux to ponds (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + aicen, & ! ice area fraction, per category + vsnon ! snow volume, per category (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + vicen ! ice volume, per category (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + Tsfcn + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr,ncat), & + intent(in) :: & + qicen, & + sicen + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + apnd, & + hpnd, & + ipnd + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + potT, & ! air potential temperature + meltt, & ! total surface meltwater flux + fsurf ! thermodynamic heat flux at ice/snow surface (W/m^2) + + ! local variables + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat) :: & + volpn, & ! pond volume per unit area, per category (m) + vuin ! water-equivalent volume of ice lid on melt pond ('upper ice', m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat) :: & + apondn,& ! pond area fraction, per category + hpondn ! pond depth, per category (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + volp ! total volume of pond, per unit area of pond (m) + + real (kind=dbl_kind) :: & + hi, & ! ice thickness (m) + dHui, & ! change in thickness of ice lid (m) + omega, & ! conduction + dTice, & ! temperature difference across ice lid (C) + dvice, & ! change in ice volume (m) + Tavg, & ! mean surface temperature across categories (C) + Tp, & ! pond freezing temperature (C) + dvn ! change in melt pond volume for fresh water budget + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, indxj ! compressed indices for cells with ice melting + + integer (kind=int_kind) :: n,i,j,ij,icells,indxij ! loop indices + + integer (kind=int_kind), dimension (ncat) :: & + kcells ! cells where ice lid combines with vice + + integer (kind=int_kind), dimension (nx_block*ny_block,ncat) :: & + indxii, indxjj ! i,j indices for kcells loop + + real (kind=dbl_kind), parameter :: & + hicemin = p1 , & ! minimum ice thickness with ponds (m) + Td = p15 , & ! temperature difference for freeze-up (C) + rhoi_L = Lfresh * rhoi, & ! (J/m^3) + min_volp = 1.e-4_dbl_kind ! minimum pond volume (m) + + !--------------------------------------------------------------- + ! initialize + !--------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + volp(i,j) = c0 + enddo + enddo + do n = 1, ncat + do j = jlo, jhi + do i = ilo, ihi + ! load tracers + volp(i,j) = volp(i,j) + hpnd(i,j,n) & + * apnd(i,j,n) * aicen(i,j,n) + vuin (i,j,n) = ipnd(i,j,n) & + * apnd(i,j,n) * aicen(i,j,n) + + hpondn(i,j,n) = c0 ! pond depth, per category + apondn(i,j,n) = c0 ! pond area, per category + enddo + enddo + indxii(:,n) = 0 + indxjj(:,n) = 0 + kcells (n) = 0 + enddo + + ! The freezing temperature for meltponds is assumed slightly below 0C, + ! as if meltponds had a little salt in them. The salt budget is not + ! altered for meltponds, but if it were then an actual pond freezing + ! temperature could be computed. + + Tp = Timelt - Td + + !----------------------------------------------------------------- + ! Identify grid cells with ponds + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + hi = c0 + if (aice(i,j) > puny) hi = vice(i,j)/aice(i,j) + if ( aice(i,j) > p01 .and. hi > hicemin .and. & + volp(i,j) > min_volp*aice(i,j)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + else ! remove ponds on thin ice + fpond(i,j) = fpond(i,j) - volp(i,j) + volpn(i,j,:) = c0 + vuin (i,j,:) = c0 + volp (i,j) = c0 + endif + enddo ! i + enddo ! j + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !-------------------------------------------------------------- + ! calculate pond area and depth + !-------------------------------------------------------------- + call pond_area(dt,aice(i,j), vice(i,j),vsno(i,j), & + aicen(i,j,:), vicen(i,j,:), vsnon(i,j,:), & + qicen(i,j,:,:), sicen(i,j,:,:), & + volpn(i,j,:), volp(i,j), & + Tsfcn(i,j,:), Tf(i,j), & + apondn(i,j,:), hpondn(i,j,:), dvn) + + fpond(i,j) = fpond(i,j) - dvn + + ! mean surface temperature + Tavg = c0 + do n = 1, ncat + Tavg = Tavg + Tsfcn(i,j,n)*aicen(i,j,n) + enddo + Tavg = Tavg / aice(i,j) + + do n = 1, ncat-1 + + if (vuin(i,j,n) > puny) then + + !---------------------------------------------------------------- + ! melting: floating upper ice layer melts in whole or part + !---------------------------------------------------------------- + ! Use Tsfc for each category + if (Tsfcn(i,j,n) > Tp) then + + dvice = min(meltt(i,j)*apondn(i,j,n), vuin(i,j,n)) + if (dvice > puny) then + vuin (i,j,n) = vuin (i,j,n) - dvice + volpn(i,j,n) = volpn(i,j,n) + dvice + volp (i,j) = volp (i,j) + dvice + fpond(i,j) = fpond(i,j) + dvice + + if (vuin(i,j,n) < puny .and. volpn(i,j,n) > puny) then + ! ice lid melted and category is pond covered + volpn(i,j,n) = volpn(i,j,n) + vuin(i,j,n) + fpond(i,j) = fpond(i,j) + vuin(i,j,n) + vuin(i,j,n) = c0 + endif + hpondn(i,j,n) = volpn(i,j,n) / apondn(i,j,n) + endif + + !---------------------------------------------------------------- + ! freezing: existing upper ice layer grows + !---------------------------------------------------------------- + else if (volpn(i,j,n) > puny) then ! Tsfcn(i,j,n) <= Tp + + ! differential growth of base of surface floating ice layer + dTice = max(-Tsfcn(i,j,n)-Td, c0) ! > 0 + omega = kice*DTice/rhoi_L + dHui = sqrt(c2*omega*dt + (vuin(i,j,n)/aicen(i,j,n))**2) & + - vuin(i,j,n)/aicen(i,j,n) + + dvice = min(dHui*apondn(i,j,n), volpn(i,j,n)) + if (dvice > puny) then + vuin (i,j,n) = vuin (i,j,n) + dvice + volpn(i,j,n) = volpn(i,j,n) - dvice + volp (i,j) = volp (i,j) - dvice + fpond(i,j) = fpond(i,j) - dvice + hpondn(i,j,n) = volpn(i,j,n) / apondn(i,j,n) + endif + + endif ! Tsfcn(i,j,n) + + !---------------------------------------------------------------- + ! freezing: upper ice layer begins to form + ! note: albedo does not change + !---------------------------------------------------------------- + else ! vuin < puny + + ! thickness of newly formed ice + ! the surface temperature of a meltpond is the same as that + ! of the ice underneath (0C), and the thermodynamic surface + ! flux is the same + dHui = max(-fsurf(i,j)*dt/rhoi_L, c0) + dvice = min(dHui*apondn(i,j,n), volpn(i,j,n)) + if (dvice > puny) then + vuin (i,j,n) = dvice + volpn(i,j,n) = volpn(i,j,n) - dvice + volp (i,j) = volp (i,j) - dvice + fpond(i,j) = fpond(i,j) - dvice + hpondn(i,j,n)= volpn(i,j,n) / apondn(i,j,n) + endif + + endif ! vuin + + enddo ! ncat + + enddo ! ij + + !--------------------------------------------------------------- + ! remove ice lid if there is no liquid pond + ! vuin may be nonzero on category ncat due to dynamics + !--------------------------------------------------------------- + + do j = jlo, jhi + do i = ilo, ihi + do n = 1, ncat + if (aicen(i,j,n) > puny .and. volpn(i,j,n) < puny & + .and. vuin (i,j,n) > puny) then + kcells(n) = kcells(n) + 1 + indxij = kcells(n) + indxii(indxij,n) = i + indxjj(indxij,n) = j + endif + enddo + enddo ! i + enddo ! j + + do n = 1, ncat + + if (kcells(n) > 0) then + do ij = 1, kcells(n) + i = indxii(ij,n) + j = indxjj(ij,n) + vuin(i,j,n) = c0 + enddo ! ij + endif + + ! reload tracers + do j = jlo, jhi + do i = ilo, ihi + if (apondn(i,j,n) > puny) then + ipnd(i,j,n) = vuin(i,j,n) / apondn(i,j,n) + else + vuin(i,j,n) = c0 + ipnd(i,j,n) = c0 + endif + if (aicen(i,j,n) > puny) then + apnd(i,j,n) = apondn(i,j,n) / aicen(i,j,n) + hpnd(i,j,n) = hpondn(i,j,n) + else + apnd(i,j,n) = c0 + hpnd(i,j,n) = c0 + ipnd(i,j,n) = c0 + endif + enddo ! i + enddo ! j + + enddo ! n + + end subroutine compute_ponds_topo + +!======================================================================= + +! Computes melt pond area, pond depth and melting rates + + subroutine pond_area(dt, & + aice, vice, vsno, & + aicen, vicen, vsnon, & + qicen, sicen, & + volpn, volp, & + Tsfcn, Tf, & + apondn, hpondn, dvolp) + + use ice_constants, only: viscosity_dyn + use ice_exit, only: abort_ice + use ice_therm_shared, only: ktherm + + real (kind=dbl_kind), intent(in) :: & + dt, aice, vice, vsno, Tf + + real (kind=dbl_kind), dimension(ncat), intent(in) :: & + aicen, vicen, vsnon, Tsfcn + + real (kind=dbl_kind), dimension(nilyr,ncat), intent(in) :: & + qicen, & + sicen + + real (kind=dbl_kind), dimension(ncat), intent(inout) :: & + volpn + + real (kind=dbl_kind), intent(inout) :: & + volp, dvolp + + real (kind=dbl_kind), dimension(ncat), intent(out) :: & + apondn, hpondn + + ! local variables + + integer (kind=int_kind) :: & + n, ns, & + m_index, & + permflag + + real (kind=dbl_kind), dimension(ncat) :: & + hicen, & + hsnon, & + asnon, & + alfan, & + betan, & + cum_max_vol, & + reduced_aicen + + real (kind=dbl_kind), dimension(0:ncat) :: & + cum_max_vol_tmp + + real (kind=dbl_kind) :: & + hpond, & + drain, & + floe_weight, & + pressure_head, & + hsl_rel, & + deltah, & + perm, & + apond + + !-----------| + ! | + ! |-----------| + !___________|___________|______________________________________sea-level + ! | | + ! | |---^--------| + ! | | | | + ! | | | |-----------| |------- + ! | | |alfan(n)| | | + ! | | | | |--------------| + ! | | | | | | + !---------------------------v------------------------------------------- + ! | | ^ | | | + ! | | | | |--------------| + ! | | |betan(n)| | | + ! | | | |-----------| |------- + ! | | | | + ! | |---v------- | + ! | | + ! |-----------| + ! | + !-----------| + + !------------------------------------------------------------------- + ! initialize + !------------------------------------------------------------------- + + do n = 1, ncat + + apondn(n) = c0 + hpondn(n) = c0 + + if (aicen(n) < puny) then + hicen(n) = c0 + hsnon(n) = c0 + reduced_aicen(n) = c0 + asnon(n) = c0 + else + hicen(n) = vicen(n) / aicen(n) + hsnon(n) = vsnon(n) / aicen(n) + reduced_aicen(n) = c1 ! n=ncat + if (n < ncat) reduced_aicen(n) = aicen(n) & + * max(0.2_dbl_kind,(-0.024_dbl_kind*hicen(n) + 0.832_dbl_kind)) + asnon(n) = reduced_aicen(n) + endif + +! This choice for alfa and beta ignores hydrostatic equilibium of categories. +! Hydrostatic equilibium of the entire ITD is accounted for below, assuming +! a surface topography implied by alfa=0.6 and beta=0.4, and rigidity across all +! categories. alfa and beta partition the ITD - they are areas not thicknesses! +! Multiplying by hicen, alfan and betan (below) are thus volumes per unit area. +! Here, alfa = 60% of the ice area (and since hice is constant in a category, +! alfan = 60% of the ice volume) in each category lies above the reference line, +! and 40% below. Note: p6 is an arbitrary choice, but alfa+beta=1 is required. + + alfan(n) = p6 * hicen(n) + betan(n) = p4 * hicen(n) + + cum_max_vol(n) = c0 + cum_max_vol_tmp(n) = c0 + + enddo ! ncat + + cum_max_vol_tmp(0) = c0 + drain = c0 + dvolp = c0 + + !-------------------------------------------------------------------------- + ! the maximum amount of water that can be contained up to each ice category + !-------------------------------------------------------------------------- + + do n = 1, ncat-1 ! last category can not hold any volume + + if (alfan(n+1) >= alfan(n) .and. alfan(n+1) > c0) then + + ! total volume in level including snow + cum_max_vol_tmp(n) = cum_max_vol_tmp(n-1) + & + (alfan(n+1) - alfan(n)) * sum(reduced_aicen(1:n)) + + + ! subtract snow solid volumes from lower categories in current level + do ns = 1, n + cum_max_vol_tmp(n) = cum_max_vol_tmp(n) & + - rhos/rhow * & ! fraction of snow that is occupied by solid + asnon(ns) * & ! area of snow from that category + max(min(hsnon(ns)+alfan(ns)-alfan(n), alfan(n+1)-alfan(n)), c0) + ! thickness of snow from ns layer in n layer + enddo + + else ! assume higher categories unoccupied + cum_max_vol_tmp(n) = cum_max_vol_tmp(n-1) + endif + if (cum_max_vol_tmp(n) < c0) call abort_ice('negative melt pond volume') + + enddo + cum_max_vol_tmp(ncat) = cum_max_vol_tmp(ncat-1) ! last category holds no volume + cum_max_vol (1:ncat) = cum_max_vol_tmp(1:ncat) + + !---------------------------------------------------------------- + ! is there more meltwater than can be held in the floe? + !---------------------------------------------------------------- + if (volp >= cum_max_vol(ncat)) then + drain = volp - cum_max_vol(ncat) + puny + volp = volp - drain + dvolp = drain + if (volp < puny) then + dvolp = dvolp + volp + volp = c0 + endif + endif + + ! height and area corresponding to the remaining volume + + call calc_hpond(reduced_aicen, asnon, hsnon, alfan, & + volp, cum_max_vol, hpond, m_index) + + do n=1, m_index + hpondn(n) = max((hpond - alfan(n) + alfan(1)), c0) + apondn(n) = reduced_aicen(n) + enddo + apond = sum(apondn(1:m_index)) + + !------------------------------------------------------------------------ + ! drainage due to ice permeability - Darcy's law + !------------------------------------------------------------------------ + + ! sea water level + floe_weight = (vsno*rhos + rhoi*vice + rhow*volp) / aice + hsl_rel = floe_weight / rhow & + - ((sum(betan(:)*aicen(:))/aice) + alfan(1)) + + deltah = hpond - hsl_rel + pressure_head = gravit * rhow * max(deltah, c0) + + ! drain if ice is permeable + permflag = 0 + if (ktherm /= 2 .and. pressure_head > c0) then + do n = 1, ncat-1 + if (hicen(n) > c0) then + call permeability_phi(qicen(:,n),sicen(:,n),Tsfcn(n),Tf,vicen(n),perm) + if (perm > c0) permflag = 1 + drain = perm*apondn(n)*pressure_head*dt / (viscosity_dyn*hicen(n)) + dvolp = dvolp + min(drain, volp) + volp = max(volp - drain, c0) + if (volp < puny) then + dvolp = dvolp + volp + volp = c0 + endif + endif + enddo + + ! adjust melt pond dimensions + if (permflag > 0) then + ! recompute pond depth + call calc_hpond(reduced_aicen, asnon, hsnon, alfan, & + volp, cum_max_vol, hpond, m_index) + do n=1, m_index + hpondn(n) = hpond - alfan(n) + alfan(1) + apondn(n) = reduced_aicen(n) + enddo + apond = sum(apondn(1:m_index)) + endif + endif ! pressure_head + + !------------------------------------------------------------------------ + ! total melt pond volume in category does not include snow volume + ! snow in melt ponds is not melted + !------------------------------------------------------------------------ + + ! Calculate pond volume for lower categories + do n=1,m_index-1 + volpn(n) = apondn(n) * hpondn(n) & + - (rhos/rhow) * asnon(n) * min(hsnon(n), hpondn(n)) + enddo + + ! Calculate pond volume for highest category = remaining pond volume + if (m_index == 1) volpn(m_index) = volp + if (m_index > 1) then + if (volp > sum(volpn(1:m_index-1))) then + volpn(m_index) = volp - sum(volpn(1:m_index-1)) + else + volpn(m_index) = c0 + hpondn(m_index) = c0 + apondn(m_index) = c0 + ! If remaining pond volume is negative reduce pond volume of + ! lower category + if (volp+puny < sum(volpn(1:m_index-1))) & + volpn(m_index-1) = volpn(m_index-1) - sum(volpn(1:m_index-1)) + & + volp + endif + endif + + do n=1,m_index + if (apondn(n) > puny) then + hpondn(n) = volpn(n) / apondn(n) + else + dvolp = dvolp + volpn(n) + hpondn(n) = c0 + volpn(n) = c0 + apondn(n) = c0 + end if + enddo + do n = m_index+1, ncat + hpondn(n) = c0 + apondn(n) = c0 + volpn (n) = c0 + enddo + + end subroutine pond_area + +!======================================================================= + + subroutine calc_hpond(aicen, asnon, hsnon, alfan, & + volp, cum_max_vol, & + hpond, m_index) + + real (kind=dbl_kind), dimension(ncat), intent(in) :: & + aicen, & + asnon, & + hsnon, & + alfan, & + cum_max_vol + + real (kind=dbl_kind), intent(in) :: & + volp + + real (kind=dbl_kind), intent(out) :: & + hpond + + integer (kind=int_kind), intent(out) :: & + m_index + + integer :: n, ns + + real (kind=dbl_kind), dimension(0:ncat+1) :: & + hitl, & + aicetl + + real (kind=dbl_kind) :: & + rem_vol, & + area, & + vol, & + tmp + + !---------------------------------------------------------------- + ! hpond is zero if volp is zero - have we fully drained? + !---------------------------------------------------------------- + + if (volp < puny) then + hpond = c0 + m_index = 0 + else + + !---------------------------------------------------------------- + ! Calculate the category where water fills up to + !---------------------------------------------------------------- + + !----------| + ! | + ! | + ! |----------| -- -- + !__________|__________|_________________________________________ ^ + ! | | rem_vol ^ | Semi-filled + ! | |----------|-- -- -- - ---|-- ---- -- -- --v layer + ! | | | | + ! | | | |hpond + ! | | |----------| | |------- + ! | | | | | | + ! | | | |---v-----| + ! | | m_index | | | + !------------------------------------------------------------- + + m_index = 0 ! 1:m_index categories have water in them + do n = 1, ncat + if (volp <= cum_max_vol(n)) then + m_index = n + if (n == 1) then + rem_vol = volp + else + rem_vol = volp - cum_max_vol(n-1) + endif + exit ! to break out of the loop + endif + enddo + m_index = min(ncat-1, m_index) + + !---------------------------------------------------------------- + ! semi-filled layer may have m_index different snows in it + !---------------------------------------------------------------- + + !----------------------------------------------------------- ^ + ! | alfan(m_index+1) + ! | + !hitl(3)--> |----------| | + !hitl(2)--> |------------| * * * * *| | + !hitl(1)--> |----------|* * * * * * |* * * * * | | + !hitl(0)-->------------------------------------------------- | ^ + ! various snows from lower categories | |alfa(m_index) + + ! hitl - heights of the snow layers from thinner and current categories + ! aicetl - area of each snow depth in this layer + + hitl(:) = c0 + aicetl(:) = c0 + do n = 1, m_index + hitl(n) = max(min(hsnon(n) + alfan(n) - alfan(m_index), & + alfan(m_index+1) - alfan(m_index)), c0) + aicetl(n) = asnon(n) + + aicetl(0) = aicetl(0) + (aicen(n) - asnon(n)) + enddo + hitl(m_index+1) = alfan(m_index+1) - alfan(m_index) + aicetl(m_index+1) = c0 + + !---------------------------------------------------------------- + ! reorder array according to hitl + ! snow heights not necessarily in height order + !---------------------------------------------------------------- + + do ns = 1, m_index+1 + do n = 0, m_index - ns + 1 + if (hitl(n) > hitl(n+1)) then ! swap order + tmp = hitl(n) + hitl(n) = hitl(n+1) + hitl(n+1) = tmp + tmp = aicetl(n) + aicetl(n) = aicetl(n+1) + aicetl(n+1) = tmp + endif + enddo + enddo + + !---------------------------------------------------------------- + ! divide semi-filled layer into set of sublayers each vertically homogenous + !---------------------------------------------------------------- + + !hitl(3)---------------------------------------------------------------- + ! | * * * * * * * * + ! |* * * * * * * * * + !hitl(2)---------------------------------------------------------------- + ! | * * * * * * * * | * * * * * * * * + ! |* * * * * * * * * |* * * * * * * * * + !hitl(1)---------------------------------------------------------------- + ! | * * * * * * * * | * * * * * * * * | * * * * * * * * + ! |* * * * * * * * * |* * * * * * * * * |* * * * * * * * * + !hitl(0)---------------------------------------------------------------- + ! aicetl(0) aicetl(1) aicetl(2) aicetl(3) + + ! move up over layers incrementing volume + do n = 1, m_index+1 + + area = sum(aicetl(:)) - & ! total area of sub-layer + (rhos/rhow) * sum(aicetl(n:ncat+1)) ! area of sub-layer occupied by snow + + vol = (hitl(n) - hitl(n-1)) * area ! thickness of sub-layer times area + + if (vol >= rem_vol) then ! have reached the sub-layer with the depth within + hpond = rem_vol / area + hitl(n-1) + alfan(m_index) - alfan(1) + exit + else ! still in sub-layer below the sub-layer with the depth + rem_vol = rem_vol - vol + endif + + enddo + + endif + + end subroutine calc_hpond + +!======================================================================= + +! determine the liquid fraction of brine in the ice and the permeability + + subroutine permeability_phi(qicen, sicen, Tsfcn, Tf, vicen, perm) + + use ice_exit, only: abort_ice + use ice_therm_shared, only: calculate_Tin_from_qin, heat_capacity + use ice_constants, only: ice_ref_salinity + + real (kind=dbl_kind), dimension(nilyr), intent(in) :: & + qicen, & ! energy of melting for each ice layer (J/m2) + sicen ! salinity (ppt) + + real (kind=dbl_kind), intent(in) :: & + vicen, & ! ice volume + Tsfcn, & ! sea ice surface skin temperature (degC) + Tf ! ocean freezing temperature [= ice bottom temperature] (degC) + + real (kind=dbl_kind), intent(out) :: & + perm ! permeability + + ! local variables + + real (kind=dbl_kind) :: & + Tmlt, & ! melting temperature + Sbr ! brine salinity + + real (kind=dbl_kind), dimension(nilyr) :: & + Tin, & ! ice temperature + phi ! liquid fraction + + integer (kind=int_kind) :: k + + !----------------------------------------------------------------- + ! Compute ice temperatures from enthalpies using quadratic formula + !----------------------------------------------------------------- + + if (heat_capacity) then + do k = 1,nilyr + Tmlt = -sicen(k) * depressT + Tin(k) = calculate_Tin_from_qin(qicen(k),Tmlt) + enddo + else + Tin(1) = (Tsfcn + Tf) / c2 + endif + + !----------------------------------------------------------------- + ! brine salinity and liquid fraction + !----------------------------------------------------------------- + + if (maxval(Tin) <= -c2) then + + ! Assur 1958 + do k = 1,nilyr + Sbr = - 1.2_dbl_kind & + -21.8_dbl_kind * Tin(k) & + - 0.919_dbl_kind * Tin(k)**2 & + - 0.01878_dbl_kind * Tin(k)**3 + if (heat_capacity) then + phi(k) = sicen(k)/Sbr ! liquid fraction + else + phi(k) = ice_ref_salinity / Sbr ! liquid fraction + endif + enddo ! k + + else + + ! Notz 2005 thesis eq. 3.2 + do k = 1,nilyr + Sbr = -17.6_dbl_kind * Tin(k) & + - 0.389_dbl_kind * Tin(k)**2 & + - 0.00362_dbl_kind* Tin(k)**3 + if (Sbr == c0) call abort_ice( & + 'zero brine salinity in topo pond permeability') + if (heat_capacity) then + phi(k) = sicen(k) / Sbr ! liquid fraction + else + phi(k) = ice_ref_salinity / Sbr ! liquid fraction + endif + + enddo + + endif + + !----------------------------------------------------------------- + ! permeability + !----------------------------------------------------------------- + + perm = 3.0e-08_dbl_kind * (minval(phi))**3 + + end subroutine permeability_phi + +!======================================================================= + +! Dumps all values needed for restarting +! +! authors Elizabeth C. Hunke, LANL +! David A. Bailey, NCAR + + subroutine write_restart_pond_topo() + + use ice_domain_size, only: ncat + use ice_fileunits, only: nu_dump_pond + use ice_state, only: trcrn, nt_apnd, nt_hpnd, nt_ipnd + use ice_restart, only: write_restart_field + + ! local variables + + logical (kind=log_kind) :: diag + + diag = .true. + + call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_apnd,:,:),'ruf8', & + 'apnd',ncat,diag) + call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_hpnd,:,:),'ruf8', & + 'hpnd',ncat,diag) + call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_ipnd,:,:),'ruf8', & + 'ipnd',ncat,diag) + + end subroutine write_restart_pond_topo + +!======================================================================= + +! Reads all values needed for a meltpond volume restart +! +! authors Elizabeth C. Hunke, LANL +! David A. Bailey, NCAR + + subroutine read_restart_pond_topo() + + use ice_domain_size, only: ncat + use ice_communicate, only: my_task, master_task + use ice_fileunits, only: nu_diag, nu_restart_pond + use ice_state, only: trcrn, nt_apnd, nt_hpnd, nt_ipnd + use ice_restart, only: read_restart_field + + ! local variables + + logical (kind=log_kind) :: & + diag + + diag = .true. + + if (my_task == master_task) write(nu_diag,*) 'min/max topo ponds' + + call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_apnd,:,:),'ruf8', & + 'apnd',ncat,diag,field_loc_center,field_type_scalar) + call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_hpnd,:,:),'ruf8', & + 'hpnd',ncat,diag,field_loc_center,field_type_scalar) + call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_ipnd,:,:),'ruf8', & + 'ipnd',ncat,diag,field_loc_center,field_type_scalar) + + end subroutine read_restart_pond_topo + +!======================================================================= + + end module ice_meltpond_topo + +!======================================================================= diff --git a/source/ice_ocean.F90 b/source/ice_ocean.F90 new file mode 100755 index 00000000..2a3c6ebe --- /dev/null +++ b/source/ice_ocean.F90 @@ -0,0 +1,233 @@ +! SVN:$Id: ice_ocean.F90 936 2015-03-17 15:46:44Z eclare $ +!======================================================================= + +! Ocean mixed layer calculation (internal to sea ice model). +! Allows heat storage in ocean for uncoupled runs. +! +! authors: John Weatherly, CRREL +! C.M. Bitz, UW +! Elizabeth C. Hunke, LANL +! Bruce P. Briegleb, NCAR +! William H. Lipscomb, LANL +! +! 2004: Block structure added by William Lipscomb +! 2005: Ocean-to-atmosphere fluxes added as 3D arrays, William Lipscomb +! 2006: Converted to free source form (F90) by Elizabeth Hunke + + module ice_ocean + + use ice_kinds_mod + use ice_constants + + implicit none + save + + private + public :: ocean_mixed_layer + + logical (kind=log_kind), public :: & + oceanmixed_ice ! if true, use ocean mixed layer + +!ars599: 26032014: change to public + real (kind=dbl_kind), parameter, public :: & + cprho = cp_ocn*rhow + + character(len=char_len), public :: & + tfrz_option ! form of ocean freezing temperature + ! 'minus1p8' = -1.8 C + ! 'linear_salt' = -depressT * sss + ! 'mushy' conforms with ktherm=2 + +!======================================================================= + + contains + +!======================================================================= + +! Compute the mixed layer heat balance and update the SST. +! Compute the energy available to freeze or melt ice. +! NOTE: SST changes due to fluxes through the ice are computed in +! ice_therm_vertical. + + subroutine ocean_mixed_layer (dt, iblk) + + use ice_blocks, only: nx_block, ny_block + use ice_state, only: aice + use ice_flux, only: sst, Tf, Qa, uatm, vatm, wind, potT, rhoa, zlvl, & + frzmlt, fhocn, fswthru, flw, flwout_ocn, fsens_ocn, flat_ocn, evap_ocn, & + alvdr_ocn, alidr_ocn, alvdf_ocn, alidf_ocn, swidf, swvdf, swidr, swvdr, & + qdp, hmix, strairx_ocn, strairy_ocn, Tref_ocn, Qref_ocn + use ice_grid, only: tmask + use ice_atmo, only: atmo_boundary_layer, atmbndy, atmo_boundary_const, & + Cdn_atm, Cdn_atm_ratio + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + real (kind=dbl_kind) :: & + TsfK , & ! surface temperature (K) + swabs ! surface absorbed shortwave heat flux (W/m^2) + + real (kind=dbl_kind), parameter :: & + frzmlt_max = c1000 ! max magnitude of frzmlt (W/m^2) + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij ! combined ij index + + real (kind=dbl_kind), dimension(nx_block,ny_block) :: & + delt , & ! potential temperature difference (K) + delq , & ! specific humidity difference (kg/kg) + shcoef, & ! transfer coefficient for sensible heat + lhcoef ! transfer coefficient for latent heat + + integer (kind=int_kind) :: & + icells ! number of ocean cells + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! compressed indices for ocean cells + + !----------------------------------------------------------------- + ! Identify ocean cells. + ! Set fluxes to zero in land cells. + !----------------------------------------------------------------- + + icells = 0 + indxi(:) = 0 + indxj(:) = 0 + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j,iblk)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + else + sst (i,j,iblk) = c0 + frzmlt (i,j,iblk) = c0 + flwout_ocn(i,j,iblk) = c0 + fsens_ocn (i,j,iblk) = c0 + flat_ocn (i,j,iblk) = c0 + evap_ocn (i,j,iblk) = c0 + endif + enddo ! i + enddo ! j + + !----------------------------------------------------------------- + ! Compute boundary layer quantities + !----------------------------------------------------------------- + + if (trim(atmbndy) == 'constant') then + call atmo_boundary_const (nx_block, ny_block, & + 'ocn', icells, & + indxi, indxj, & + uatm (:,:,iblk), & + vatm (:,:,iblk), & + wind (:,:,iblk), & + rhoa (:,:,iblk), & + strairx_ocn(:,:,iblk), & + strairy_ocn(:,:,iblk), & + sst (:,:,iblk), & + potT (:,:,iblk), & + Qa (:,:,iblk), & + delt (:,:), & + delq (:,:), & + lhcoef (:,:), & + shcoef (:,:), & + Cdn_atm(:,:,iblk)) + + else ! default + call atmo_boundary_layer (nx_block, ny_block, & + 'ocn', icells, & + indxi, indxj, & + sst (:,:,iblk), & + potT (:,:,iblk), & + uatm (:,:,iblk), & + vatm (:,:,iblk), & + wind (:,:,iblk), & + zlvl (:,:,iblk), & + Qa (:,:,iblk), & + rhoa (:,:,iblk), & + strairx_ocn(:,:,iblk), & + strairy_ocn(:,:,iblk), & + Tref_ocn (:,:,iblk), & + Qref_ocn (:,:,iblk), & + delt (:,:), & + delq (:,:), & + lhcoef (:,:), & + shcoef (:,:), & + Cdn_atm(:,:,iblk), & + Cdn_atm_ratio(:,:,iblk)) + endif + + !----------------------------------------------------------------- + ! Ocean albedo + ! For now, assume albedo = albocn in each spectral band. + !----------------------------------------------------------------- + + alvdr_ocn(:,:,iblk) = albocn + alidr_ocn(:,:,iblk) = albocn + alvdf_ocn(:,:,iblk) = albocn + alidf_ocn(:,:,iblk) = albocn + + !----------------------------------------------------------------- + ! Compute ocean fluxes and update SST + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! shortwave radiative flux + swabs = (c1-alvdr_ocn(i,j,iblk)) * swvdr(i,j,iblk) & + + (c1-alidr_ocn(i,j,iblk)) * swidr(i,j,iblk) & + + (c1-alvdf_ocn(i,j,iblk)) * swvdf(i,j,iblk) & + + (c1-alidf_ocn(i,j,iblk)) * swidf(i,j,iblk) + + ! ocean surface temperature in Kelvin + TsfK = sst(i,j,iblk) + Tffresh + + ! longwave radiative flux + flwout_ocn(i,j,iblk) = -stefan_boltzmann * TsfK**4 + + ! downward latent and sensible heat fluxes + fsens_ocn(i,j,iblk) = shcoef(i,j) * delt(i,j) + flat_ocn (i,j,iblk) = lhcoef(i,j) * delq(i,j) + evap_ocn (i,j,iblk) = -flat_ocn(i,j,iblk) / Lvap + + ! Compute sst change due to exchange with atm/ice above + sst(i,j,iblk) = sst(i,j,iblk) + dt * ( & + (fsens_ocn(i,j,iblk) + flat_ocn(i,j,iblk) + flwout_ocn(i,j,iblk) & + + flw(i,j,iblk) + swabs) * (c1-aice(i,j,iblk)) & + + fhocn(i,j,iblk) + fswthru(i,j,iblk)) & ! these are *aice + / (cprho*hmix(i,j,iblk)) + + ! adjust qdp if cooling of mixed layer would occur when sst <= Tf + if (sst(i,j,iblk) <= Tf(i,j,iblk) .and. qdp(i,j,iblk) > c0) qdp(i,j,iblk) = c0 + + ! computed T change due to exchange with deep layers: + sst(i,j,iblk) = sst(i,j,iblk) - qdp(i,j,iblk)*dt/(cprho*hmix(i,j,iblk)) + + ! compute potential to freeze or melt ice + frzmlt(i,j,iblk) = (Tf(i,j,iblk)-sst(i,j,iblk))*cprho*hmix(i,j,iblk)/dt + frzmlt(i,j,iblk) = min(max(frzmlt(i,j,iblk),-frzmlt_max),frzmlt_max) + + ! if sst is below freezing, reset sst to Tf + if (sst(i,j,iblk) <= Tf(i,j,iblk)) sst(i,j,iblk) = Tf(i,j,iblk) + + enddo ! ij + + end subroutine ocean_mixed_layer + +!======================================================================= + + end module ice_ocean + +!======================================================================= diff --git a/source/ice_orbital.F90 b/source/ice_orbital.F90 new file mode 100755 index 00000000..08b0be4e --- /dev/null +++ b/source/ice_orbital.F90 @@ -0,0 +1,145 @@ +! SVN:$Id: ice_orbital.F90 820 2014-08-26 19:08:29Z eclare $ +!======================================================================= + +! Orbital parameters computed from date +! author: Bruce P. Briegleb, NCAR +! +! 2006: Converted to free source form (F90) by Elizabeth Hunke + + module ice_orbital + + use ice_kinds_mod + + implicit none + private + public :: init_orbit, compute_coszen + save + + integer (kind=int_kind) :: iyear_AD ! Year to calculate orbit for + + real(kind=dbl_kind),public :: eccen !Earth's orbital eccentricity + real(kind=dbl_kind),public :: obliqr !Earth's obliquity in radians + real(kind=dbl_kind),public :: lambm0 !Mean longitude of perihelion at the + !vernal equinox (radians) + real(kind=dbl_kind),public :: mvelpp !Earth's moving vernal equinox longitude + !of perihelion + pi (radians) + real(kind=dbl_kind) :: obliq ! obliquity in degrees + real(kind=dbl_kind) :: mvelp ! moving vernal equinox long + real(kind=dbl_kind) :: delta ! solar declination angle in radians + real(kind=dbl_kind) :: eccf ! earth orbit eccentricity factor + + logical(kind=log_kind) :: log_print ! Flags print of status/error + +!======================================================================= + + contains + +!======================================================================= + +! Uses share routines to compute orbital parameters +! for the specified date. +! +! author: Bruce P. Briegleb, NCAR + + subroutine init_orbit + + use shr_orb_mod, only: shr_orb_params + + iyear_AD = 1950 + log_print = .false. ! if true, write out orbital parameters + + call shr_orb_params( iyear_AD , eccen , obliq , mvelp , & + obliqr , lambm0 , mvelpp, log_print ) + + end subroutine init_orbit + +!======================================================================= + +! Uses orbital and lat/lon info to compute cosine solar zenith angle +! for the specified date. +! +! author: Bruce P. Briegleb, NCAR + + subroutine compute_coszen (nx_block, ny_block, & + icells, & + indxi, indxj, & + tlat, tlon, & + coszen, dt) + + use ice_calendar, only: yday, sec, calendar_type, nextsw_cday, days_per_year + use ice_constants, only: c0, c2, p5, pi, secday + use shr_orb_mod, only: shr_orb_decl + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of ice-covered grid cells + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, indxj ! indices for ice-covered cells + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + tlat, tlon ! latitude and longitude (radians) + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(out) :: & + coszen ! cosine solar zenith angle + ! negative for sun below horizon + + real (kind=dbl_kind), intent(in) :: & + dt ! thermodynamic time step + + ! local variables + + real (kind=dbl_kind) :: ydayp1 ! day of year plus one time step + + integer (kind=int_kind) :: & + i , & ! domain longitude index + j , & ! domain latitude index + ij ! horizontal index, combines i and j loops + + +! Solar declination for next time step + +#ifdef CCSMCOUPLED + if (calendar_type == "GREGORIAN") then + ydayp1 = min(nextsw_cday, real(days_per_year,kind=dbl_kind)) + else + ydayp1 = nextsw_cday + endif + + !--- update coszen when nextsw_cday valid + if (ydayp1 > -0.5_dbl_kind) then +#else + ydayp1 = yday + sec/secday +#endif + + call shr_orb_decl(ydayp1, eccen, mvelpp, lambm0, & + obliqr, delta, eccf) + + coszen(:,:) = c0 ! sun at horizon + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) +!lipscomb - function inlined to improve vector efficiency +! coszen(i,j) = shr_orb_cosz(ydayp1, & +! tlat(i,j),tlon(i,j),delta) + + coszen(i,j) = sin(tlat(i,j))*sin(delta) & + + cos(tlat(i,j))*cos(delta) & + *cos((sec/secday-p5)*c2*pi + tlon(i,j)) !cos(hour angle) + enddo + +#ifdef CCSMCOUPLED + endif +#endif + + end subroutine compute_coszen + +!======================================================================= + + end module ice_orbital + +!======================================================================= diff --git a/source/ice_read_write.F90 b/source/ice_read_write.F90 new file mode 100755 index 00000000..a78f1f1b --- /dev/null +++ b/source/ice_read_write.F90 @@ -0,0 +1,2092 @@ +! SVN:$Id: ice_read_write.F90 936 2015-03-17 15:46:44Z eclare $ +!======================================================================= + +! Routines for opening, reading and writing external files +! +! author: Tony Craig, NCAR +! +! 2004: Block structure added by William Lipscomb, LANL +! 2006: Converted to free source form (F90) by Elizabeth Hunke +! 2007: netcdf versions added by Alison McLaren & Ann Keen, Met Office + + module ice_read_write + + use ice_kinds_mod + use ice_constants, only: c0, spval_dbl, & + field_loc_noupdate, field_type_noupdate + use ice_communicate, only: my_task, master_task + use ice_broadcast, only: broadcast_scalar + use ice_domain, only: distrb_info + use ice_domain_size, only: max_blocks, nx_global, ny_global, ncat + use ice_blocks, only: nx_block, ny_block, nghost + use ice_exit, only: abort_ice + +#ifdef ncdf + use netcdf +#endif + + implicit none + + private + public :: ice_open, & + ice_open_ext, & + ice_open_nc, & + ice_read, & + ice_read_ext, & + ice_read_nc, & + ice_read_global, & + ice_read_global_nc, & + ice_read_nc_uv, & + ice_write, & + ice_write_nc, & + ice_write_ext, & + ice_close_nc + + interface ice_write + module procedure ice_write_xyt, & + ice_write_xyzt + end interface + + interface ice_read + module procedure ice_read_xyt, & + ice_read_xyzt + end interface + + interface ice_read_nc + module procedure ice_read_nc_xy, & + ice_read_nc_xyz, & + ice_read_nc_point, & + ice_read_nc_z + end interface + + interface ice_write_nc + module procedure ice_write_nc_xy, & + ice_write_nc_xyz + end interface + +!======================================================================= + + contains + +!======================================================================= + +! Opens an unformatted file for reading. +! nbits indicates whether the file is sequential or direct access. +! +! author: Tony Craig, NCAR + + subroutine ice_open(nu, filename, nbits) + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number + nbits ! no. of bits per variable (0 for sequential access) + + character (*) :: filename + + if (my_task == master_task) then + + if (nbits == 0) then ! sequential access + + open(nu,file=filename,form='unformatted') + + else ! direct access + open(nu,file=filename,recl=nx_global*ny_global*nbits/8, & + form='unformatted',access='direct') + endif ! nbits = 0 + + endif ! my_task = master_task + + end subroutine ice_open + +!======================================================================= + +! Opens an unformatted file for reading, incl ghost cells (direct access). +! nbits indicates whether the file is sequential or direct access. +! +! authors: Tony Craig, NCAR +! David Hebert, NRLSSC + + subroutine ice_open_ext(nu, filename, nbits) + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number + nbits ! no. of bits per variable (0 for sequential access) + + character (*) :: filename + + integer (kind=int_kind) :: & + nx, ny ! grid dimensions including ghost cells + + if (my_task == master_task) then + + if (nbits == 0) then ! sequential access + + open(nu,file=filename,form='unformatted') + + else ! direct access + + nx = nx_global + 2*nghost + ny = ny_global + 2*nghost + + open(nu,file=filename,recl=nx*ny*nbits/8, & + form='unformatted',access='direct') + endif ! nbits = 0 + + endif ! my_task = master_task + + end subroutine ice_open_ext + +!======================================================================= + +! Read an unformatted file and scatter to processors. +! work is a real array, atype indicates the format of the data. +! If the optional variables field_loc and field_type are present, +! the ghost cells are filled using values from the global array. +! This prevents them from being filled with zeroes in land cells +! (subroutine ice_HaloUpdate need not be called). +! +! author: Tony Craig, NCAR + + subroutine ice_read_xyt(nu, nrec, work, atype, diag, & + field_loc, field_type, & + ignore_eof, hit_eof) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: scatter_global + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number + nrec ! record number (0 for sequential access) + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + intent(out) :: & + work ! output array (real, 8-byte) + + character (len=4), intent(in) :: & + atype ! format for input array + ! (real/integer, 4-byte/8-byte) + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + integer (kind=int_kind), optional, intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + logical (kind=log_kind), optional, intent(in) :: ignore_eof + logical (kind=log_kind), optional, intent(out) :: hit_eof + + ! local variables + + integer (kind=int_kind) :: i, j, ios + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + logical (kind=log_kind) :: ignore_eof_use + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + real (kind=real_kind), dimension(:,:), allocatable :: & + work_gr + + integer(kind=int_kind), dimension(:,:), allocatable :: & + work_gi4 + + integer(selected_int_kind(13)), dimension(:,:), allocatable :: & + work_gi8 + + if (my_task == master_task) then + allocate(work_g1(nx_global,ny_global)) + else + allocate(work_g1(1,1)) ! to save memory + endif + + if (my_task == master_task) then + + !------------------------------------------------------------------- + ! Read global array according to format atype + !------------------------------------------------------------------- + if (present(hit_eof)) hit_eof = .false. + + if (atype == 'ida4') then + allocate(work_gi4(nx_global,ny_global)) + read(nu,rec=nrec) work_gi4 + work_g1 = real(work_gi4,kind=dbl_kind) + deallocate(work_gi4) + elseif (atype == 'ida8') then + allocate(work_gi8(nx_global,ny_global)) + read(nu,rec=nrec) work_gi8 + work_g1 = real(work_gi8,kind=dbl_kind) + deallocate(work_gi8) + elseif (atype == 'rda4') then + allocate(work_gr(nx_global,ny_global)) + read(nu,rec=nrec) work_gr + work_g1 = work_gr + deallocate(work_gr) + elseif (atype == 'rda8') then + read(nu,rec=nrec) work_g1 + elseif (atype == 'ruf8') then + if (present(ignore_eof)) then + ignore_eof_use = ignore_eof + else + ignore_eof_use = .false. + endif + if (ignore_eof_use) then + ! Read line from file, checking for end-of-file + read(nu, iostat=ios) ((work_g1(i,j),i=1,nx_global), & + j=1,ny_global) + if (present(hit_eof)) hit_eof = ios < 0 + else + read(nu) ((work_g1(i,j),i=1,nx_global),j=1,ny_global) + endif + else + write(nu_diag,*) ' ERROR: reading unknown atype ',atype + endif + endif ! my_task = master_task + + if (present(hit_eof)) then + call broadcast_scalar(hit_eof,master_task) + if (hit_eof) then + deallocate(work_g1) + return + endif + endif + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + if (my_task==master_task .and. diag) then + amin = minval(work_g1) + amax = maxval(work_g1, mask = work_g1 /= spval_dbl) + asum = sum(work_g1, mask = work_g1 /= spval_dbl) + write(nu_diag,*) ' read_global ',nu, nrec, amin, amax, asum + endif + + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- + + if (present(field_loc)) then + call scatter_global(work, work_g1, master_task, distrb_info, & + field_loc, field_type) + else + call scatter_global(work, work_g1, master_task, distrb_info, & + field_loc_noupdate, field_type_noupdate) + endif + + deallocate(work_g1) + + end subroutine ice_read_xyt + +!======================================================================= +! Read an unformatted file and scatter to processors. +! work is a real array, atype indicates the format of the data. +! If the optional variables field_loc and field_type are present, +! the ghost cells are filled using values from the global array. +! This prevents them from being filled with zeroes in land cells +! (subroutine ice_HaloUpdate need not be called). +! +! author: Tony Craig, NCAR + + subroutine ice_read_xyzt(nu, nrec, work, atype, diag, & + field_loc, field_type, & + ignore_eof, hit_eof) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: scatter_global + use ice_domain_size, only: nblyr + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number + nrec ! record number (0 for sequential access) + + real (kind=dbl_kind), dimension(nx_block,ny_block,nblyr+2,max_blocks), & + intent(out) :: & + work ! output array (real, 8-byte) + + character (len=4), intent(in) :: & + atype ! format for input array + ! (real/integer, 4-byte/8-byte) + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + integer (kind=int_kind), optional, intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + logical (kind=log_kind), optional, intent(in) :: ignore_eof + logical (kind=log_kind), optional, intent(out) :: hit_eof + + ! local variables + + integer (kind=int_kind) :: i, j, k, ios + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + logical (kind=log_kind) :: ignore_eof_use + + + real (kind=dbl_kind), dimension(:,:,:), allocatable :: & + work_g4 + + integer(kind=int_kind), dimension(:,:,:), allocatable :: & + work_gi5 + + integer(selected_int_kind(13)), dimension(:,:,:), allocatable :: & + work_gi9 + + real (kind=real_kind), dimension(:,:,:), allocatable :: & + work_gr3 + + if (my_task == master_task) then + allocate(work_g4(nx_global,ny_global,nblyr+2)) + else + allocate(work_g4(1,1,nblyr+2)) ! to save memory + endif + + if (my_task == master_task) then + + !------------------------------------------------------------------- + ! Read global array according to format atype + !------------------------------------------------------------------- + if (present(hit_eof)) hit_eof = .false. + + if (atype == 'ida4') then + allocate(work_gi5(nx_global,ny_global,nblyr+2)) + read(nu,rec=nrec) work_gi5 + work_g4 = real(work_gi5,kind=dbl_kind) + deallocate(work_gi5) + elseif (atype == 'ida8') then + allocate(work_gi9(nx_global,ny_global,nblyr+2)) + read(nu,rec=nrec) work_gi9 + work_g4 = real(work_gi9,kind=dbl_kind) + deallocate(work_gi9) + elseif (atype == 'rda4') then + allocate(work_gr3(nx_global,ny_global,nblyr+2)) + read(nu,rec=nrec) work_gr3 + work_g4 = work_gr3 + deallocate(work_gr3) + elseif (atype == 'rda8') then + read(nu,rec=nrec) work_g4 + elseif (atype == 'ruf8') then + if (present(ignore_eof)) then + ignore_eof_use = ignore_eof + else + ignore_eof_use = .false. + endif + if (ignore_eof_use) then + ! Read line from file, checking for end-of-file + read(nu, iostat=ios) (((work_g4(i,j,k),i=1,nx_global), & + j=1,ny_global), & + k=1,nblyr+2) + if (present(hit_eof)) hit_eof = ios < 0 + else + read(nu) (((work_g4(i,j,k),i=1,nx_global),j=1,ny_global),& + k=1,nblyr+2) + endif + else + write(nu_diag,*) ' ERROR: reading unknown atype ',atype + endif + endif ! my_task = master_task + + if (present(hit_eof)) then + call broadcast_scalar(hit_eof,master_task) + if (hit_eof) then + deallocate(work_g4) + return + endif + endif + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + if (my_task==master_task .and. diag) then + amin = minval(work_g4) + amax = maxval(work_g4, mask = work_g4 /= spval_dbl) + asum = sum (work_g4, mask = work_g4 /= spval_dbl) + write(nu_diag,*) ' read_global ',nu, nrec, amin, amax, asum + endif + + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- + + do k = 1, nblyr+2 + + if (present(field_loc)) then + call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & + field_loc, field_type) + + else + + call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & + field_loc_noupdate, field_type_noupdate) + endif + + enddo !k + deallocate(work_g4) + + end subroutine ice_read_xyzt + +!======================================================================= + +! Read an unformatted file +! Just like ice_read except that it returns a global array. +! work_g is a real array, atype indicates the format of the data +! +! Adapted by William Lipscomb, LANL, from ice_read + + subroutine ice_read_global (nu, nrec, work_g, atype, diag, & + ignore_eof, hit_eof) + + use ice_fileunits, only: nu_diag + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number + nrec ! record number (0 for sequential access) + + real (kind=dbl_kind), dimension(nx_global,ny_global), & + intent(out) :: & + work_g ! output array (real, 8-byte) + + character (len=4) :: & + atype ! format for input array + ! (real/integer, 4-byte/8-byte) + + logical (kind=log_kind) :: & + diag ! if true, write diagnostic output + + logical (kind=log_kind), optional, intent(in) :: ignore_eof + logical (kind=log_kind), optional, intent(out) :: hit_eof + + ! local variables + + integer (kind=int_kind) :: i, j, ios + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + logical (kind=log_kind) :: ignore_eof_use + + real (kind=real_kind), dimension(:,:), allocatable :: & + work_gr + + integer(kind=int_kind), dimension(:,:), allocatable :: & + work_gi4 + + integer(selected_int_kind(13)), dimension(:,:), allocatable :: & + work_gi8 + + work_g(:,:) = c0 + + if (my_task == master_task) then + + !------------------------------------------------------------------- + ! Read global array according to format atype + !------------------------------------------------------------------- + if (present(hit_eof)) hit_eof = .false. + + if (atype == 'ida4') then + allocate(work_gi4(nx_global,ny_global)) + read(nu,rec=nrec) work_gi4 + work_g = real(work_gi4,kind=dbl_kind) + deallocate(work_gi4) + elseif (atype == 'ida8') then + allocate(work_gi8(nx_global,ny_global)) + read(nu,rec=nrec) work_gi8 + work_g = real(work_gi8,kind=dbl_kind) + deallocate(work_gi8) + elseif (atype == 'rda4') then + allocate(work_gr(nx_global,ny_global)) + read(nu,rec=nrec) work_gr + work_g = work_gr + deallocate(work_gr) + elseif (atype == 'rda8') then + read(nu,rec=nrec) work_g + elseif (atype == 'ruf8') then + if (present(ignore_eof)) then + ignore_eof_use = ignore_eof + else + ignore_eof_use = .false. + endif + if (ignore_eof_use) then + ! Read line from file, checking for end-of-file + read(nu, iostat=ios) ((work_g(i,j),i=1,nx_global), & + j=1,ny_global) + if (present(hit_eof)) hit_eof = ios < 0 + else + read(nu) ((work_g(i,j),i=1,nx_global),j=1,ny_global) + endif + else + write(nu_diag,*) ' ERROR: reading unknown atype ',atype + endif + endif ! my_task = master_task + + if (present(hit_eof)) then + call broadcast_scalar(hit_eof,master_task) + if (hit_eof) return + endif + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + if (my_task == master_task .and. diag) then + amin = minval(work_g) + amax = maxval(work_g, mask = work_g /= spval_dbl) + asum = sum (work_g, mask = work_g /= spval_dbl) + write(nu_diag,*) ' read_global ',nu, nrec, amin, amax,asum + endif + + end subroutine ice_read_global + +!======================================================================= + +! Read an unformatted file and scatter to processors, incl ghost cells. +! work is a real array, atype indicates the format of the data. +! (subroutine ice_HaloUpdate need not be called). + + subroutine ice_read_ext(nu, nrec, work, atype, diag, & + field_loc, field_type, & + ignore_eof, hit_eof) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: scatter_global_ext + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number + nrec ! record number (0 for sequential access) + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + intent(out) :: & + work ! output array (real, 8-byte) + + character (len=4), intent(in) :: & + atype ! format for input array + ! (real/integer, 4-byte/8-byte) + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + integer (kind=int_kind), optional, intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + logical (kind=log_kind), optional, intent(in) :: ignore_eof + logical (kind=log_kind), optional, intent(out) :: hit_eof + + ! local variables + + integer (kind=int_kind) :: i, j, ios, nx, ny + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + logical (kind=log_kind) :: ignore_eof_use + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + real (kind=real_kind), dimension(:,:), allocatable :: & + work_gr + + integer(kind=int_kind), dimension(:,:), allocatable :: & + work_gi4 + + integer(selected_int_kind(13)), dimension(:,:), allocatable :: & + work_gi8 + + nx = nx_global + 2*nghost + ny = ny_global + 2*nghost + + if (my_task == master_task) then + allocate(work_g1(nx,ny)) + else + allocate(work_g1(1,1)) ! to save memory + endif + + if (my_task == master_task) then + + !------------------------------------------------------------------- + ! Read global array according to format atype + !------------------------------------------------------------------- + if (present(hit_eof)) hit_eof = .false. + + if (atype == 'ida4') then + allocate(work_gi4(nx,ny)) + read(nu,rec=nrec) work_gi4 + work_g1 = real(work_gi4,kind=dbl_kind) + deallocate(work_gi4) + elseif (atype == 'ida8') then + allocate(work_gi8(nx,ny)) + read(nu,rec=nrec) work_gi8 + work_g1 = real(work_gi8,kind=dbl_kind) + deallocate(work_gi8) + elseif (atype == 'rda4') then + allocate(work_gr(nx,ny)) + read(nu,rec=nrec) work_gr + work_g1 = work_gr + deallocate(work_gr) + elseif (atype == 'rda8') then + read(nu,rec=nrec) work_g1 + elseif (atype == 'ruf8') then + if (present(ignore_eof)) then + ignore_eof_use = ignore_eof + else + ignore_eof_use = .false. + endif + if (ignore_eof_use) then + ! Read line from file, checking for end-of-file + read(nu, iostat=ios) ((work_g1(i,j),i=1,nx), & + j=1,ny) + if (present(hit_eof)) hit_eof = ios < 0 + else + read(nu) ((work_g1(i,j),i=1,nx),j=1,ny) + endif + else + write(nu_diag,*) ' ERROR: reading unknown atype ',atype + endif + endif ! my_task = master_task + + if (present(hit_eof)) then + call broadcast_scalar(hit_eof,master_task) + if (hit_eof) then + deallocate(work_g1) + return + endif + endif + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + if (my_task==master_task .and. diag) then + amin = minval(work_g1) + amax = maxval(work_g1, mask = work_g1 /= spval_dbl) + asum = sum (work_g1, mask = work_g1 /= spval_dbl) + write(nu_diag,*) ' read_global ',nu, nrec, amin, amax, asum + endif + + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are always updated + !------------------------------------------------------------------- + + call scatter_global_ext(work, work_g1, master_task, distrb_info) + + deallocate(work_g1) + + end subroutine ice_read_ext + +!======================================================================= + +! Writes an unformatted file +! work is a real array, atype indicates the format of the data + + subroutine ice_write_xyt(nu, nrec, work, atype, diag) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: gather_global + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number + nrec ! record number (0 for sequential access) + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + intent(in) :: & + work ! input array (real, 8-byte) + + character (len=4), intent(in) :: & + atype ! format for output array + ! (real/integer, 4-byte/8-byte) + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + ! local variables + + integer (kind=int_kind) :: i, j + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + real (kind=real_kind), dimension(:,:), allocatable :: & + work_gr + + integer(kind=int_kind), dimension(:,:), allocatable :: & + work_gi4 + + integer(selected_int_kind(13)), dimension(:,:), allocatable :: & + work_gi8 + + !------------------------------------------------------------------- + ! Gather data from individual processors + !------------------------------------------------------------------- + + if (my_task == master_task) then + allocate(work_g1(nx_global,ny_global)) + else + allocate(work_g1(1,1)) ! to save memory + endif + + call gather_global(work_g1, work, master_task, distrb_info, spc_val=c0) + + if (my_task == master_task) then + + !------------------------------------------------------------------- + ! Write global array according to format atype + !------------------------------------------------------------------- + if (atype == 'ida4') then + allocate(work_gi4(nx_global,ny_global)) + work_gi4 = nint(work_g1) + write(nu,rec=nrec) work_gi4 + deallocate(work_gi4) + elseif (atype == 'ida8') then + allocate(work_gi8(nx_global,ny_global)) + work_gi8 = nint(work_g1) + write(nu,rec=nrec) work_gi8 + deallocate(work_gi8) + elseif (atype == 'rda4') then + allocate(work_gr(nx_global,ny_global)) + work_gr = work_g1 + write(nu,rec=nrec) work_gr + deallocate(work_gr) + elseif (atype == 'rda8') then + write(nu,rec=nrec) work_g1 + elseif (atype == 'ruf8') then + write(nu) ((work_g1(i,j),i=1,nx_global),j=1,ny_global) + else + write(nu_diag,*) ' ERROR: writing unknown atype ',atype + endif + + !------------------------------------------------------------------- + ! diagnostics + !------------------------------------------------------------------- + if (diag) then + amin = minval(work_g1) + amax = maxval(work_g1, mask = work_g1 /= spval_dbl) + asum = sum (work_g1, mask = work_g1 /= spval_dbl) + write(nu_diag,*) ' write_global ', nu, nrec, amin, amax, asum + endif + + endif ! my_task = master_task + + deallocate(work_g1) + + end subroutine ice_write_xyt + +!======================================================================= + +! Writes an unformatted file +! work is a real array, atype indicates the format of the data + + subroutine ice_write_xyzt(nu, nrec, work, atype, diag) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: gather_global + use ice_domain_size, only: nblyr + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number + nrec ! record number (0 for sequential access) + + real (kind=dbl_kind), dimension(nx_block,ny_block,nblyr+2,max_blocks), & + intent(in) :: & + work ! input array (real, 8-byte) + + character (len=4), intent(in) :: & + atype ! format for output array + ! (real/integer, 4-byte/8-byte) + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + ! local variables + + integer (kind=int_kind) :: i, j, k + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + real (kind=dbl_kind), dimension(:,:,:), allocatable :: & + work_g4 + + real (kind=real_kind), dimension(:,:,:), allocatable :: & + work_gr3 + + integer(kind=int_kind), dimension(:,:,:), allocatable :: & + work_gi5 + + integer(selected_int_kind(13)), dimension(:,:,:), allocatable :: & + work_gi9 + + !------------------------------------------------------------------- + ! Gather data from individual processors + !------------------------------------------------------------------- + + if (my_task == master_task) then + allocate(work_g4(nx_global,ny_global,nblyr+2)) + else + allocate(work_g4(1,1,nblyr+2)) ! to save memory + endif + do k = 1,nblyr+2 + call gather_global(work_g4(:,:,k), work(:,:,k,:), master_task, & + distrb_info, spc_val=c0) + enddo !k + + if (my_task == master_task) then + + !------------------------------------------------------------------- + ! Write global array according to format atype + !------------------------------------------------------------------- + if (atype == 'ida4') then + allocate(work_gi5(nx_global,ny_global,nblyr+2)) + work_gi5 = nint(work_g4) + write(nu,rec=nrec) work_gi5 + deallocate(work_gi5) + elseif (atype == 'ida8') then + allocate(work_gi9(nx_global,ny_global,nblyr+2)) + work_gi9 = nint(work_g4) + write(nu,rec=nrec) work_gi9 + deallocate(work_gi9) + elseif (atype == 'rda4') then + allocate(work_gr3(nx_global,ny_global,nblyr+2)) + work_gr3 = work_g4 + write(nu,rec=nrec) work_gr3 + deallocate(work_gr3) + elseif (atype == 'rda8') then + write(nu,rec=nrec) work_g4 + elseif (atype == 'ruf8') then + write(nu)(((work_g4(i,j,k),i=1,nx_global),j=1,ny_global), & + k=1,nblyr+2) + else + write(nu_diag,*) ' ERROR: writing unknown atype ',atype + endif + + !------------------------------------------------------------------- + ! diagnostics + !------------------------------------------------------------------- + if (diag) then + amin = minval(work_g4) + amax = maxval(work_g4, mask = work_g4 /= spval_dbl) + asum = sum (work_g4, mask = work_g4 /= spval_dbl) + write(nu_diag,*) ' write_global ', nu, nrec, amin, amax, asum + endif + + endif ! my_task = master_task + + deallocate(work_g4) + + end subroutine ice_write_xyzt + +!======================================================================= +! +! Writes an unformatted file, including ghost cells +! work is a real array, atype indicates the format of the data +! +! author: Tony Craig, NCAR + + subroutine ice_write_ext(nu, nrec, work, atype, diag) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: gather_global_ext + + integer (kind=int_kind), intent(in) :: & + nu , & ! unit number + nrec ! record number (0 for sequential access) + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + intent(in) :: & + work ! input array (real, 8-byte) + + character (len=4), intent(in) :: & + atype ! format for output array + ! (real/integer, 4-byte/8-byte) + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + ! local variables + + integer (kind=int_kind) :: i, j, nx, ny + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + real (kind=real_kind), dimension(:,:), allocatable :: & + work_gr + + integer(kind=int_kind), dimension(:,:), allocatable :: & + work_gi4 + + integer(selected_int_kind(13)), dimension(:,:), allocatable :: & + work_gi8 + + !------------------------------------------------------------------- + ! Gather data from individual processors + !------------------------------------------------------------------- + + nx = nx_global + 2*nghost + ny = ny_global + 2*nghost + + if (my_task == master_task) then + allocate(work_g1(nx,ny)) + else + allocate(work_g1(1,1)) ! to save memory + endif + + call gather_global_ext(work_g1, work, master_task, distrb_info, spc_val=c0) + + if (my_task == master_task) then + + !------------------------------------------------------------------- + ! Write global array according to format atype + !------------------------------------------------------------------- + if (atype == 'ida4') then + allocate(work_gi4(nx,ny)) + work_gi4 = nint(work_g1) + write(nu,rec=nrec) work_gi4 + deallocate(work_gi4) + elseif (atype == 'ida8') then + allocate(work_gi8(nx,ny)) + work_gi8 = nint(work_g1) + write(nu,rec=nrec) work_gi8 + deallocate(work_gi8) + elseif (atype == 'rda4') then + allocate(work_gr(nx,ny)) + work_gr = work_g1 + write(nu,rec=nrec) work_gr + deallocate(work_gr) + elseif (atype == 'rda8') then + write(nu,rec=nrec) work_g1 + elseif (atype == 'ruf8') then + write(nu) ((work_g1(i,j),i=1,nx),j=1,ny) + else + write(nu_diag,*) ' ERROR: writing unknown atype ',atype + endif + + !------------------------------------------------------------------- + ! diagnostics + !------------------------------------------------------------------- + if (diag) then + amin = minval(work_g1) + amax = maxval(work_g1, mask = work_g1 /= spval_dbl) + asum = sum (work_g1, mask = work_g1 /= spval_dbl) + write(nu_diag,*) ' write_global ', nu, nrec, amin, amax, asum + endif + + endif ! my_task = master_task + + deallocate(work_g1) + + end subroutine ice_write_ext + +!======================================================================= + +! Opens a netCDF file for reading +! Adapted by Alison McLaren, Met Office from ice_open + + subroutine ice_open_nc(filename, fid) + + character (char_len_long), intent(in) :: & + filename ! netCDF filename + + integer (kind=int_kind), intent(out) :: & + fid ! unit number + + ! local variables + +#ifdef ncdf + integer (kind=int_kind) :: & + status ! status variable from netCDF routine + + if (my_task == master_task) then + + status = nf90_open(filename, NF90_NOWRITE, fid) + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_open_nc: Cannot open '//trim(filename) ) + endif + + endif ! my_task = master_task + +#else + fid = -999 ! to satisfy intent(out) attribute +#endif + end subroutine ice_open_nc + +!======================================================================= + +! Read a netCDF file and scatter to processors. +! If the optional variables field_loc and field_type are present, +! the ghost cells are filled using values from the global array. +! This prevents them from being filled with zeroes in land cells +! (subroutine ice_HaloUpdate need not be called). +! +! Adapted by Alison McLaren, Met Office from ice_read + + subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & + field_loc, field_type, restart_ext) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: scatter_global, scatter_global_ext + + integer (kind=int_kind), intent(in) :: & + fid , & ! file id + nrec ! record number + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + +#ifndef AusCOM + character (len=*), intent(in) :: & +#else + character*(*), intent(in) :: & +#endif + varname ! field name in netcdf file + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + intent(out) :: & + work ! output array (real, 8-byte) + + logical (kind=log_kind), optional, intent(in) :: & + restart_ext ! if true, read extended grid + + integer (kind=int_kind), optional, intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + ! local variables + +#ifdef ncdf +! netCDF file diagnostics: + integer (kind=int_kind) :: & + varid , & ! variable id + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + dimlen ! size of dimension + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + character (char_len) :: & + dimname ! dimension name + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + integer (kind=int_kind) :: nx, ny + +#ifdef ORCA_GRID + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g2 + + if (.not. present(restart_ext)) then + if (my_task == master_task) then + allocate(work_g2(nx_global+2,ny_global+1)) + else + allocate(work_g2(1,1)) ! to save memory + endif + endif +#endif + + nx = nx_global + ny = ny_global + + if (present(restart_ext)) then + if (restart_ext) then + nx = nx_global + 2*nghost + ny = ny_global + 2*nghost + endif + endif + + if (my_task == master_task) then + allocate(work_g1(nx,ny)) + else + allocate(work_g1(1,1)) ! to save memory + endif + + if (my_task == master_task) then + + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- + + status = nf90_inq_varid(fid, trim(varname), varid) + + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_read_nc_xy: Cannot find variable '//trim(varname) ) + endif + + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- + +#ifndef ORCA_GRID + status = nf90_get_var( fid, varid, work_g1, & + start=(/1,1,nrec/), & + count=(/nx,ny,1/) ) +#else + if (.not. present(restart_ext)) then + status = nf90_get_var( fid, varid, work_g2, & + start=(/1,1,nrec/), & + count=(/nx_global+2,ny_global+1,1/) ) + work_g1 = work_g2(2:nx_global+1,1:ny_global) + else + status = nf90_get_var( fid, varid, work_g1, & + start=(/1,1,nrec/), & + count=(/nx,ny,1/) ) + endif +#endif + + endif ! my_task = master_task + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + + if (my_task==master_task .and. diag) then +! write(nu_diag,*) & +! 'ice_read_nc_xy, fid= ',fid, ', nrec = ',nrec, & +! ', varname = ',trim(varname) +! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) +! write(nu_diag,*) 'ndim= ',ndim,', nvar= ',nvar +! do id=1,ndim +! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) +! write(nu_diag,*) 'Dim name = ',trim(dimname),', size = ',dimlen +! enddo + amin = minval(work_g1) + amax = maxval(work_g1, mask = work_g1 /= spval_dbl) + asum = sum (work_g1, mask = work_g1 /= spval_dbl) + write(nu_diag,*) ' min, max, sum =', amin, amax, asum + endif + + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- + + if (present(restart_ext)) then + if (restart_ext) then + call scatter_global_ext(work, work_g1, master_task, distrb_info) + endif + else + if (present(field_loc)) then + call scatter_global(work, work_g1, master_task, distrb_info, & + field_loc, field_type) + else + call scatter_global(work, work_g1, master_task, distrb_info, & + field_loc_noupdate, field_type_noupdate) + endif + endif + + deallocate(work_g1) +#ifdef ORCA_GRID + if (.not. present(restart_ext)) deallocate(work_g2) +#endif + +#else + work = c0 ! to satisfy intent(out) attribute +#endif + end subroutine ice_read_nc_xy + +!======================================================================= + +! Read a netCDF file and scatter to processors. +! If the optional variables field_loc and field_type are present, +! the ghost cells are filled using values from the global array. +! This prevents them from being filled with zeroes in land cells +! (subroutine ice_HaloUpdate need not be called). +! +! Adapted by David Bailey, NCAR from ice_read_nc_xy + + subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & + field_loc, field_type, restart_ext) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: scatter_global, scatter_global_ext + + integer (kind=int_kind), intent(in) :: & + fid , & ! file id + nrec ! record number + +#ifndef AusCOM + character (len=*), intent(in) :: & +#else + character*(*), intent(in) :: & +#endif + varname ! field name in netcdf file + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat,max_blocks), & + intent(out) :: & + work ! output array (real, 8-byte) + + logical (kind=log_kind), optional, intent(in) :: & + restart_ext ! if true, read extended grid + + integer (kind=int_kind), optional, intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + ! local variables + +#ifdef ncdf +! netCDF file diagnostics: + integer (kind=int_kind) :: & + varid , & ! variable id + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + n, & ! ncat index + dimlen ! size of dimension + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + character (char_len) :: & + dimname ! dimension name + + real (kind=dbl_kind), dimension(:,:,:), allocatable :: & + work_g1 + + integer (kind=int_kind) :: nx, ny + +#ifdef ORCA_GRID + real (kind=dbl_kind), dimension(:,:,:), allocatable :: & + work_g2 + + if (.not. present(restart_ext)) then + if (my_task == master_task) then + allocate(work_g2(nx_global+2,ny_global+1,ncat)) + else + allocate(work_g2(1,1,ncat)) ! to save memory + endif + endif +#endif + + nx = nx_global + ny = ny_global + + if (present(restart_ext)) then + if (restart_ext) then + nx = nx_global + 2*nghost + ny = ny_global + 2*nghost + endif + endif + + if (my_task == master_task) then + allocate(work_g1(nx,ny,ncat)) + else + allocate(work_g1(1,1,ncat)) ! to save memory + endif + + if (my_task == master_task) then + + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- + + status = nf90_inq_varid(fid, trim(varname), varid) + + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_read_nc_xyz: Cannot find variable '//trim(varname) ) + endif + + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- + +#ifndef ORCA_GRID + status = nf90_get_var( fid, varid, work_g1, & + start=(/1,1,1,nrec/), & + count=(/nx,ny,ncat,1/) ) +#else + if (.not. present(restart_ext)) then + status = nf90_get_var( fid, varid, work_g2, & + start=(/1,1,1,nrec/), & + count=(/nx_global+2,ny_global+1,ncat,1/) ) + work_g1 = work_g2(2:nx_global+1,1:ny_global,:) + else + status = nf90_get_var( fid, varid, work_g1, & + start=(/1,1,1,nrec/), & + count=(/nx,ny,ncat,1/) ) + endif +#endif + + endif ! my_task = master_task + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + + if (my_task==master_task .and. diag) then +! write(nu_diag,*) & +! 'ice_read_nc_xyz, fid= ',fid, ', nrec = ',nrec, & +! ', varname = ',trim(varname) +! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) +! write(nu_diag,*) 'ndim= ',ndim,', nvar= ',nvar +! do id=1,ndim +! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) +! write(nu_diag,*) 'Dim name = ',trim(dimname),', size = ',dimlen +! enddo + do n=1,ncat + amin = minval(work_g1(:,:,n)) + amax = maxval(work_g1(:,:,n), mask = work_g1(:,:,n) /= spval_dbl) + asum = sum (work_g1(:,:,n), mask = work_g1(:,:,n) /= spval_dbl) + write(nu_diag,*) ' min, max, sum =', amin, amax, asum + enddo + endif + + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- + + if (present(restart_ext)) then + if (restart_ext) then + do n=1,ncat + call scatter_global_ext(work(:,:,n,:), work_g1(:,:,n), & + master_task, distrb_info) + enddo + endif + else + if (present(field_loc)) then + do n=1,ncat + call scatter_global(work(:,:,n,:), work_g1(:,:,n), master_task, & + distrb_info, field_loc, field_type) + enddo + else + do n=1,ncat + call scatter_global(work(:,:,n,:), work_g1(:,:,n), master_task, & + distrb_info, field_loc_noupdate, field_type_noupdate) + enddo + endif + endif + + deallocate(work_g1) +#ifdef ORCA_GRID + if (.not. present(restart_ext)) deallocate(work_g2) +#endif + +#else + work = c0 ! to satisfy intent(out) attribute +#endif + end subroutine ice_read_nc_xyz + +!======================================================================= + +! Read a netCDF file +! Adapted by Alison McLaren, Met Office from ice_read + + subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & + field_loc, field_type) + + use ice_fileunits, only: nu_diag + integer (kind=int_kind), intent(in) :: & + fid , & ! file id + nrec ! record number + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + character (char_len), intent(in) :: & + varname ! field name in netcdf file + + real (kind=dbl_kind), & + intent(out) :: & + work ! output variable (real, 8-byte) + + integer (kind=int_kind), optional, intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + ! local variables + +#ifdef ncdf +! netCDF file diagnostics: + integer (kind=int_kind) :: & + varid, & ! netcdf id for field + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + dimlen ! size of dimension + + real (kind=dbl_kind), dimension(1) :: & + workg ! temporary work variable + + character (char_len) :: & + dimname ! dimension name + + if (my_task == master_task) then + + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- + + status = nf90_inq_varid(fid, trim(varname), varid) + + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_read_nc_point: Cannot find variable '//trim(varname) ) + endif + + !-------------------------------------------------------------- + ! Read point variable + !-------------------------------------------------------------- + + status = nf90_get_var(fid, varid, workg, & + start= (/ nrec /), & + count=(/ 1 /) ) + + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_read_nc_point: Cannot get variable '//trim(varname) ) + endif + endif ! my_task = master_task + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + + if (my_task==master_task .and. diag) then + write(nu_diag,*) & + 'ice_read_nc_point, fid= ',fid, ', nrec = ',nrec, & + ', varname = ',trim(varname) + status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) + write(nu_diag,*) 'ndim= ',ndim,', nvar= ',nvar + do id=1,ndim + status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) + write(nu_diag,*) 'Dim name = ',trim(dimname),', size = ',dimlen + enddo + endif + + work = workg(1) + +#else + work = c0 ! to satisfy intent(out) attribute +#endif + end subroutine ice_read_nc_point + +!======================================================================= + +! Adapted by Nicole Jeffery, LANL + + subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & + field_loc, field_type) + + use ice_domain_size, only: nilyr + use ice_fileunits, only: nu_diag + + integer (kind=int_kind), intent(in) :: & + fid , & ! file id + nrec ! record number + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + character (char_len), intent(in) :: & + varname ! field name in netcdf file + + real (kind=dbl_kind), dimension(nilyr), & + intent(out) :: & + work ! output array (real, 8-byte) + + integer (kind=int_kind), optional, intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + ! local variables + + real (kind=dbl_kind), dimension(:), allocatable :: & + work_z + +#ifdef ncdf +! netCDF file diagnostics: + integer (kind=int_kind) :: & + varid, & ! netcdf id for field + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + dimlen ! size of dimension + + character (char_len) :: & + dimname ! dimension name + + allocate(work_z(nilyr)) + + if (my_task == master_task) then + + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- + + status = nf90_inq_varid(fid, trim(varname), varid) + + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_read_nc: Cannot find variable '//trim(varname) ) + endif + + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- + + status = nf90_get_var( fid, varid, work_z, & + start=(/1,nrec/), & + count=(/nilyr,1/) ) + + endif ! my_task = master_task + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + + if (my_task==master_task .and. diag) then + write(nu_diag,*) & + 'ice_read_nc_z, fid= ',fid, ', nrec = ',nrec, & + ', varname = ',trim(varname) + status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) + write(nu_diag,*) 'ndim= ',ndim,', nvar= ',nvar + do id=1,ndim + status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) + write(nu_diag,*) 'Dim name = ',trim(dimname),', size = ',dimlen + enddo + endif + + work(:) = work_z(:) + deallocate(work_z) + +#else + work = c0 ! to satisfy intent(out) attribute +#endif + end subroutine ice_read_nc_z + +!======================================================================= + +! Write a netCDF file. +! +! Adapted by David Bailey, NCAR + + subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & + restart_ext) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: gather_global, gather_global_ext + + integer (kind=int_kind), intent(in) :: & + fid , & ! file id + varid , & ! variable id + nrec ! record number + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + logical (kind=log_kind), optional, intent(in) :: & + restart_ext ! if true, write extended grid + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + intent(in) :: & + work ! output array (real, 8-byte) + + ! local variables + +#ifdef ncdf +! netCDF file diagnostics: + integer (kind=int_kind) :: & + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + dimlen ! size of dimension + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + character (char_len) :: & + dimname ! dimension name + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + integer (kind=int_kind) :: nx, ny + + nx = nx_global + ny = ny_global + + if (present(restart_ext)) then + if (restart_ext) then + nx = nx_global + 2*nghost + ny = ny_global + 2*nghost + endif + endif + + if (my_task == master_task) then + allocate(work_g1(nx,ny)) + else + allocate(work_g1(1,1)) ! to save memory + endif + + if (present(restart_ext)) then + if (restart_ext) then + call gather_global_ext(work_g1, work, master_task, distrb_info, spc_val=c0) + endif + else + call gather_global(work_g1, work, master_task, distrb_info, spc_val=c0) + endif + + if (my_task == master_task) then + + !-------------------------------------------------------------- + ! Write global array + !-------------------------------------------------------------- + + status = nf90_put_var( fid, varid, work_g1, & + start=(/1,1,nrec/), & + count=(/nx,ny,1/) ) + + endif ! my_task = master_task + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + + if (my_task==master_task .and. diag) then +! write(nu_diag,*) & +! 'ice_write_nc_xy, fid= ',fid, ', nrec = ',nrec, & +! ', varid = ',varid +! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) +! write(nu_diag,*) 'ndim= ',ndim,', nvar= ',nvar +! do id=1,ndim +! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) +! write(nu_diag,*) 'Dim name = ',trim(dimname),', size = ',dimlen +! enddo + amin = minval(work_g1) + amax = maxval(work_g1, mask = work_g1 /= spval_dbl) + asum = sum (work_g1, mask = work_g1 /= spval_dbl) + write(nu_diag,*) ' min, max, sum =', amin, amax, asum + endif + + deallocate(work_g1) + +#endif + end subroutine ice_write_nc_xy + +!======================================================================= + +! Write a netCDF file. +! +! Adapted by David Bailey, NCAR + + subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & + restart_ext) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: gather_global, gather_global_ext + + integer (kind=int_kind), intent(in) :: & + fid , & ! file id + varid , & ! variable id + nrec ! record number + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + logical (kind=log_kind), optional, intent(in) :: & + restart_ext ! if true, read extended grid + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat,max_blocks), & + intent(in) :: & + work ! output array (real, 8-byte) + + ! local variables + +#ifdef ncdf +! netCDF file diagnostics: + integer (kind=int_kind) :: & + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + n, & ! ncat index + dimlen ! size of dimension + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + character (char_len) :: & + dimname ! dimension name + + real (kind=dbl_kind), dimension(:,:,:), allocatable :: & + work_g1 + + integer (kind=int_kind) :: nx, ny + + nx = nx_global + ny = ny_global + + if (present(restart_ext)) then + if (restart_ext) then + nx = nx_global + 2*nghost + ny = ny_global + 2*nghost + endif + endif + + if (my_task == master_task) then + allocate(work_g1(nx,ny,ncat)) + else + allocate(work_g1(1,1,ncat)) ! to save memory + endif + + if (present(restart_ext)) then + if (restart_ext) then + do n=1,ncat + call gather_global_ext(work_g1(:,:,n), work(:,:,n,:), & + master_task, distrb_info, spc_val=c0) + enddo + endif + else + do n=1,ncat + call gather_global(work_g1(:,:,n), work(:,:,n,:), & + master_task, distrb_info, spc_val=c0) + enddo + endif + + if (my_task == master_task) then + + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- + + status = nf90_put_var( fid, varid, work_g1, & + start=(/1,1,1,nrec/), & + count=(/nx,ny,ncat,1/) ) + + endif ! my_task = master_task + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + + if (my_task==master_task .and. diag) then +! write(nu_diag,*) & +! 'ice_write_nc_xyz, fid= ',fid, ', nrec = ',nrec, & +! ', varid = ',varid +! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) +! write(nu_diag,*) 'ndim= ',ndim,', nvar= ',nvar +! do id=1,ndim +! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) +! write(nu_diag,*) 'Dim name = ',trim(dimname),', size = ',dimlen +! enddo + amin = 10000._dbl_kind + amax = -10000._dbl_kind + do n=1,ncat + amin = minval(work_g1(:,:,n)) + amax = maxval(work_g1(:,:,n), mask = work_g1(:,:,n) /= spval_dbl) + asum = sum (work_g1(:,:,n), mask = work_g1(:,:,n) /= spval_dbl) + write(nu_diag,*) ' min, max, sum =', amin, amax, asum + enddo + endif + + deallocate(work_g1) + +#endif + end subroutine ice_write_nc_xyz + +!======================================================================= + +! Read a netcdf file. +! Just like ice_read_nc except that it returns a global array. +! work_g is a real array +! +! Adapted by William Lipscomb, LANL, from ice_read +! Adapted by Ann Keen, Met Office, to read from a netcdf file + + subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) + + use ice_fileunits, only: nu_diag + + integer (kind=int_kind), intent(in) :: & + fid , & ! file id + nrec ! record number + +#ifndef AusCOM + character (char_len), intent(in) :: & +#else + character*(*), intent(in) :: & +#endif + varname ! field name in netcdf file + + real (kind=dbl_kind), dimension(nx_global,ny_global), & + intent(out) :: & + work_g ! output array (real, 8-byte) + + logical (kind=log_kind) :: & + diag ! if true, write diagnostic output + + ! local variables + +#ifdef ncdf +! netCDF file diagnostics: + integer (kind=int_kind) :: & + varid, & ! netcdf id for field + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + dimlen ! size of dimension + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + character (char_len) :: & + dimname ! dimension name +! +#ifdef ORCA_GRID + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g3 + + if (my_task == master_task) then + allocate(work_g3(nx_global+2,ny_global+1)) + else + allocate(work_g3(1,1)) ! to save memory + endif + + work_g3(:,:) = c0 +#endif + work_g(:,:) = c0 + + if (my_task == master_task) then + + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- + + status = nf90_inq_varid(fid, trim(varname), varid) + + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_read_global_nc: Cannot find variable '//trim(varname) ) + endif + + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- + +#ifndef ORCA_GRID + status = nf90_get_var( fid, varid, work_g, & + start=(/1,1,nrec/), & + count=(/nx_global,ny_global,1/) ) +#else + status = nf90_get_var( fid, varid, work_g3, & + start=(/1,1,nrec/), & + count=(/nx_global+2,ny_global+1,1/) ) + work_g=work_g3(2:nx_global+1,1:ny_global) +#endif + endif ! my_task = master_task + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + + if (my_task == master_task .and. diag) then +! write(nu_diag,*) & +! 'ice_read_global_nc, fid= ',fid, ', nrec = ',nrec, & +! ', varname = ',trim(varname) +! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) +! write(nu_diag,*) 'ndim= ',ndim,', nvar= ',nvar +! do id=1,ndim +! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) +! write(nu_diag,*) 'Dim name = ',trim(dimname),', size = ',dimlen +! enddo + amin = minval(work_g) + amax = maxval(work_g, mask = work_g /= spval_dbl) + asum = sum (work_g, mask = work_g /= spval_dbl) + write(nu_diag,*) 'min, max, sum = ', amin, amax, asum + endif + +#ifdef ORCA_GRID + deallocate(work_g3) +#endif + +#else + work_g = c0 ! to satisfy intent(out) attribute +#endif + end subroutine ice_read_global_nc + +!======================================================================= + +! Closes a netCDF file +! author: Alison McLaren, Met Office + + subroutine ice_close_nc(fid) + + integer (kind=int_kind), intent(in) :: & + fid ! unit number + + ! local variables + +#ifdef ncdf + integer (kind=int_kind) :: & + status ! status variable from netCDF routine + + if (my_task == master_task) then + status = nf90_close(fid) + endif +#endif + + end subroutine ice_close_nc + +!======================================================================= + +! Read a netCDF file and scatter to processors. +! If the optional variables field_loc and field_type are present, +! the ghost cells are filled using values from the global array. +! This prevents them from being filled with zeroes in land cells +! (subroutine ice_HaloUpdate need not be called). +! +! Adapted by Elizabeth Hunke for reading 3D ocean currents + + subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & + field_loc, field_type, restart_ext) + + use ice_fileunits, only: nu_diag + use ice_gather_scatter, only: scatter_global, scatter_global_ext + + integer (kind=int_kind), intent(in) :: & + fid , & ! file id + nrec , & ! record number + nzlev ! z level + + logical (kind=log_kind), intent(in) :: & + diag ! if true, write diagnostic output + + character (len=*), intent(in) :: & + varname ! field name in netcdf file + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + intent(out) :: & + work ! output array (real, 8-byte) + + logical (kind=log_kind), optional, intent(in) :: & + restart_ext ! if true, read extended grid + + integer (kind=int_kind), optional, intent(in) :: & + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) + + ! local variables + +#ifdef ncdf +! netCDF file diagnostics: + integer (kind=int_kind) :: & + varid , & ! variable id + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + dimlen ! size of dimension + + real (kind=dbl_kind) :: & + amin, amax, asum ! min, max values and sum of input array + + character (char_len) :: & + dimname ! dimension name + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + integer (kind=int_kind) :: nx, ny + + nx = nx_global + ny = ny_global + + if (present(restart_ext)) then + if (restart_ext) then + nx = nx_global + 2*nghost + ny = ny_global + 2*nghost + endif + endif + + if (my_task == master_task) then + allocate(work_g1(nx,ny)) + else + allocate(work_g1(1,1)) ! to save memory + endif + + if (my_task == master_task) then + + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- + + status = nf90_inq_varid(fid, trim(varname), varid) + + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_read_nc_xy: Cannot find variable '//trim(varname) ) + endif + + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- + + status = nf90_get_var( fid, varid, work_g1, & + start=(/1,1,nzlev,nrec/), & + count=(/nx,ny,1,1/) ) + + endif ! my_task = master_task + + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- + + if (my_task==master_task .and. diag) then + amin = minval(work_g1) + amax = maxval(work_g1, mask = work_g1 /= spval_dbl) + asum = sum (work_g1, mask = work_g1 /= spval_dbl) + write(nu_diag,*) ' min, max, sum =', amin, amax, asum + endif + + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- + + if (present(restart_ext)) then + if (restart_ext) then + call scatter_global_ext(work, work_g1, master_task, distrb_info) + endif + else + if (present(field_loc)) then + call scatter_global(work, work_g1, master_task, distrb_info, & + field_loc, field_type) + else + call scatter_global(work, work_g1, master_task, distrb_info, & + field_loc_noupdate, field_type_noupdate) + endif + endif + + deallocate(work_g1) + +#else + work = c0 ! to satisfy intent(out) attribute +#endif + end subroutine ice_read_nc_uv + +!======================================================================= + + end module ice_read_write + +!======================================================================= diff --git a/source/ice_restart_driver.F90 b/source/ice_restart_driver.F90 new file mode 100755 index 00000000..0a1a5cc7 --- /dev/null +++ b/source/ice_restart_driver.F90 @@ -0,0 +1,881 @@ +! SVN:$Id: ice_restart_driver.F90 607 2013-03-29 15:49:42Z eclare $ +!======================================================================= + +! Read and write ice model restart files +! +! authors Elizabeth C. Hunke, LANL +! William H. Lipscomb LANL +! David Bailey, NCAR +! +! 2004-05: Block structure added by William Lipscomb +! Restart module separated from history module +! 2006 ECH: Accepted some CCSM code into mainstream CICE +! Converted to free source form (F90) +! 2008 ECH: Rearranged order in which internal stresses are written and read +! 2010 ECH: Changed eice, esno to qice, qsno +! 2012 ECH: Added routines for reading/writing extended grid +! 2013 DAB: Added generic interfaces for writing restart fields. + + module ice_restart_driver + + use ice_kinds_mod + use ice_restart_shared, only: & + restart, restart_ext, restart_dir, restart_file, pointer_file, & + runid, runtype, use_restart_time, restart_format, lcdf64, lenstr + use ice_restart +#ifdef AusCOM + use cpl_parameters, only: runtime0 +#endif + + implicit none + private + public :: dumpfile, restartfile, restartfile_v4 + save + +!======================================================================= + + contains + +!======================================================================= + +!======================================================================= +!---! these subroutines write/read Fortran unformatted data files .. +!======================================================================= + +! Dumps all values needed for a restart +! author Elizabeth C. Hunke, LANL + + subroutine dumpfile(filename_spec) + + use ice_blocks, only: nx_block, ny_block + use ice_calendar, only: sec, month, mday, nyr, istep1, & + time, time_forc, year_init + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, c1 + use ice_domain, only: nblocks + use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks + use ice_fileunits, only: nu_diag, nu_rst_pointer, nu_dump + use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & + strocnxT, strocnyT, sst, frzmlt, iceumask, coszen, & + stressp_1, stressp_2, stressp_3, stressp_4, & + stressm_1, stressm_2, stressm_3, stressm_4, & + stress12_1, stress12_2, stress12_3, stress12_4 + use ice_ocean, only: oceanmixed_ice + use ice_read_write, only: ice_open, ice_write + use ice_state, only: aicen, vicen, vsnon, trcrn, & + nt_Tsfc, nt_sice, nt_qice, nt_qsno, uvel, vvel + + character(len=char_len_long), intent(in), optional :: filename_spec + + ! local variables + + integer (kind=int_kind) :: & + i, j, k, n, iblk, & ! counting indices + iyear, imonth, iday ! year, month, day + + character(len=char_len_long) :: filename + + logical (kind=log_kind) :: diag + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + character (len=3) :: nchar + + if (present(filename_spec)) then + call init_restart_write(filename_spec) + else + call init_restart_write + endif + + diag = .true. + + !----------------------------------------------------------------- + ! state variables + ! Tsfc is the only tracer written to binary files. All other + ! tracers are written to their own dump/restart binary files. + !----------------------------------------------------------------- + + call write_restart_field(nu_dump,0,aicen(:,:,:,:),'ruf8','aicen',ncat,diag) + call write_restart_field(nu_dump,0,vicen(:,:,:,:),'ruf8','vicen',ncat,diag) + call write_restart_field(nu_dump,0,vsnon(:,:,:,:),'ruf8','vsnon',ncat,diag) + call write_restart_field(nu_dump,0,trcrn(:,:,nt_Tsfc,:,:),'ruf8','Tsfcn',ncat,diag) + + do k=1,nilyr + write(nchar,'(i3.3)') k + call write_restart_field(nu_dump,0,trcrn(:,:,nt_sice+k-1,:,:),'ruf8', & + 'sice'//trim(nchar),ncat,diag) + enddo + + do k=1,nilyr + write(nchar,'(i3.3)') k + call write_restart_field(nu_dump,0,trcrn(:,:,nt_qice+k-1,:,:),'ruf8', & + 'qice'//trim(nchar),ncat,diag) + enddo + + do k=1,nslyr + write(nchar,'(i3.3)') k + call write_restart_field(nu_dump,0,trcrn(:,:,nt_qsno+k-1,:,:),'ruf8', & + 'qsno'//trim(nchar),ncat,diag) + enddo + + !----------------------------------------------------------------- + ! velocity + !----------------------------------------------------------------- + call write_restart_field(nu_dump,0,uvel,'ruf8','uvel',1,diag) + call write_restart_field(nu_dump,0,vvel,'ruf8','vvel',1,diag) + + !----------------------------------------------------------------- + ! radiation fields + !----------------------------------------------------------------- +#ifdef CCSMCOUPLED + call write_restart_field(nu_dump,0,coszen,'ruf8','coszen',1,diag) +#endif + call write_restart_field(nu_dump,0,scale_factor,'ruf8','scale_factor',1,diag) + + call write_restart_field(nu_dump,0,swvdr,'ruf8','swvdr',1,diag) + call write_restart_field(nu_dump,0,swvdf,'ruf8','swvdf',1,diag) + call write_restart_field(nu_dump,0,swidr,'ruf8','swidr',1,diag) + call write_restart_field(nu_dump,0,swidf,'ruf8','swidf',1,diag) + + !----------------------------------------------------------------- + ! ocean stress (for bottom heat flux in thermo) + !----------------------------------------------------------------- + call write_restart_field(nu_dump,0,strocnxT,'ruf8','strocnxT',1,diag) + call write_restart_field(nu_dump,0,strocnyT,'ruf8','strocnyT',1,diag) + + !----------------------------------------------------------------- + ! internal stress + !----------------------------------------------------------------- + call write_restart_field(nu_dump,0,stressp_1,'ruf8','stressp_1',1,diag) + call write_restart_field(nu_dump,0,stressp_3,'ruf8','stressp_3',1,diag) + call write_restart_field(nu_dump,0,stressp_2,'ruf8','stressp_2',1,diag) + call write_restart_field(nu_dump,0,stressp_4,'ruf8','stressp_4',1,diag) + + call write_restart_field(nu_dump,0,stressm_1,'ruf8','stressm_1',1,diag) + call write_restart_field(nu_dump,0,stressm_3,'ruf8','stressm_3',1,diag) + call write_restart_field(nu_dump,0,stressm_2,'ruf8','stressm_2',1,diag) + call write_restart_field(nu_dump,0,stressm_4,'ruf8','stressm_4',1,diag) + + call write_restart_field(nu_dump,0,stress12_1,'ruf8','stress12_1',1,diag) + call write_restart_field(nu_dump,0,stress12_3,'ruf8','stress12_3',1,diag) + call write_restart_field(nu_dump,0,stress12_2,'ruf8','stress12_2',1,diag) + call write_restart_field(nu_dump,0,stress12_4,'ruf8','stress12_4',1,diag) + + !----------------------------------------------------------------- + ! ice mask for dynamics + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (iceumask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) + + ! for mixed layer model + if (oceanmixed_ice) then + call write_restart_field(nu_dump,0,sst,'ruf8','sst',1,diag) + call write_restart_field(nu_dump,0,frzmlt,'ruf8','frzmlt',1,diag) + endif + + end subroutine dumpfile + +!======================================================================= + +! Restarts from a dump +! author Elizabeth C. Hunke, LANL + + subroutine restartfile (ice_ic) + + use ice_boundary, only: ice_HaloUpdate_stress + use ice_broadcast, only: broadcast_scalar + use ice_blocks, only: nghost, nx_block, ny_block + use ice_calendar, only: istep0, istep1, time, time_forc, calendar, npt + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, p5, & + field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_vector + use ice_domain, only: nblocks, distrb_info, halo_info + use ice_domain_size, only: nilyr, nslyr, ncat, nx_global, ny_global, & + max_ntrcr, max_blocks + use ice_fileunits, only: nu_diag, nu_rst_pointer, nu_restart + use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & + strocnxT, strocnyT, sst, frzmlt, iceumask, coszen, & + stressp_1, stressp_2, stressp_3, stressp_4, & + stressm_1, stressm_2, stressm_3, stressm_4, & + stress12_1, stress12_2, stress12_3, stress12_4 + use ice_gather_scatter, only: scatter_global_stress + use ice_grid, only: tmask, grid_type + use ice_itd, only: aggregate + use ice_ocean, only: oceanmixed_ice + use ice_read_write, only: ice_open, ice_read, ice_read_global + use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & + aice0, aicen, vicen, vsnon, trcrn, aice_init, & + nt_Tsfc, nt_sice, nt_qice, nt_qsno, uvel, vvel + + character (*), optional :: ice_ic + + ! local variables + + integer (kind=int_kind) :: & + i, j, k, n, iblk, & ! counting indices + iignore ! dummy variable + + real (kind=real_kind) :: & + rignore ! dummy variable + + character(len=char_len_long) :: & + filename, filename0 + + logical (kind=log_kind) :: & + diag + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1, work_g2 + + character (len=3) :: nchar + + call init_restart_read(ice_ic) + + diag = .true. + + !----------------------------------------------------------------- + ! state variables + ! Tsfc is the only tracer read in this file. All other + ! tracers are in their own dump/restart files. + !----------------------------------------------------------------- + if (my_task == master_task) & + write(nu_diag,*) ' min/max area, vol ice, vol snow, Tsfc' + + call read_restart_field(nu_restart,0,aicen,'ruf8', & + 'aicen',ncat,diag,field_loc_center, field_type_scalar) + call read_restart_field(nu_restart,0,vicen,'ruf8', & + 'vicen',ncat,diag,field_loc_center, field_type_scalar) + call read_restart_field(nu_restart,0,vsnon,'ruf8', & + 'vsnon',ncat,diag,field_loc_center, field_type_scalar) + call read_restart_field(nu_restart,0,trcrn(:,:,nt_Tsfc,:,:),'ruf8', & + 'Tsfcn',ncat,diag,field_loc_center, field_type_scalar) + + if (my_task == master_task) & + write(nu_diag,*) 'min/max sice for each layer' + do k=1,nilyr + write(nchar,'(i3.3)') k + call read_restart_field(nu_restart,0,trcrn(:,:,nt_sice+k-1,:,:),'ruf8', & + 'sice'//trim(nchar),ncat,diag,field_loc_center,field_type_scalar) + enddo + + if (my_task == master_task) & + write(nu_diag,*) 'min/max qice for each layer' + do k=1,nilyr + write(nchar,'(i3.3)') k + call read_restart_field(nu_restart,0,trcrn(:,:,nt_qice+k-1,:,:),'ruf8', & + 'qice'//trim(nchar),ncat,diag,field_loc_center,field_type_scalar) + enddo + + if (my_task == master_task) & + write(nu_diag,*) 'min/max qsno for each layer' + do k=1,nslyr + write(nchar,'(i3.3)') k + call read_restart_field(nu_restart,0,trcrn(:,:,nt_qsno+k-1,:,:),'ruf8', & + 'qsno'//trim(nchar),ncat,diag,field_loc_center,field_type_scalar) + enddo + + !----------------------------------------------------------------- + ! velocity + !----------------------------------------------------------------- + if (my_task == master_task) & + write(nu_diag,*) 'min/max velocity components' + + call read_restart_field(nu_restart,0,uvel,'ruf8', & + 'uvel',1,diag,field_loc_NEcorner, field_type_vector) + call read_restart_field(nu_restart,0,vvel,'ruf8', & + 'vvel',1,diag,field_loc_NEcorner, field_type_vector) + + !----------------------------------------------------------------- + ! radiation fields + !----------------------------------------------------------------- + + if (my_task == master_task) & + write(nu_diag,*) 'radiation fields' + +#ifdef CCSMCOUPLED + call read_restart_field(nu_restart,0,coszen,'ruf8', & + 'coszen',1,diag, field_loc_center, field_type_scalar) +#endif + call read_restart_field(nu_restart,0,scale_factor,'ruf8', & + 'scale_factor',1,diag, field_loc_center, field_type_scalar) + call read_restart_field(nu_restart,0,swvdr,'ruf8', & + 'swvdr',1,diag,field_loc_center, field_type_scalar) + call read_restart_field(nu_restart,0,swvdf,'ruf8', & + 'swvdf',1,diag,field_loc_center, field_type_scalar) + call read_restart_field(nu_restart,0,swidr,'ruf8', & + 'swidr',1,diag,field_loc_center, field_type_scalar) + call read_restart_field(nu_restart,0,swidf,'ruf8', & + 'swidf',1,diag,field_loc_center, field_type_scalar) + + !----------------------------------------------------------------- + ! ocean stress + !----------------------------------------------------------------- + if (my_task == master_task) & + write(nu_diag,*) 'min/max ocean stress components' + + call read_restart_field(nu_restart,0,strocnxT,'ruf8', & + 'strocnxT',1,diag,field_loc_center, field_type_vector) + call read_restart_field(nu_restart,0,strocnyT,'ruf8', & + 'strocnyT',1,diag,field_loc_center, field_type_vector) + + !----------------------------------------------------------------- + ! internal stress + ! The stress tensor must be read and scattered in pairs in order + ! to properly match corner values across a tripole grid cut. + !----------------------------------------------------------------- + if (my_task == master_task) write(nu_diag,*) & + 'internal stress components' + + call read_restart_field(nu_restart,0,stressp_1,'ruf8', & + 'stressp_1',1,diag,field_loc_center,field_type_scalar) ! stressp_1 + call read_restart_field(nu_restart,0,stressp_3,'ruf8', & + 'stressp_3',1,diag,field_loc_center,field_type_scalar) ! stressp_3 + call read_restart_field(nu_restart,0,stressp_2,'ruf8', & + 'stressp_2',1,diag,field_loc_center,field_type_scalar) ! stressp_2 + call read_restart_field(nu_restart,0,stressp_4,'ruf8', & + 'stressp_4',1,diag,field_loc_center,field_type_scalar) ! stressp_4 + + call read_restart_field(nu_restart,0,stressm_1,'ruf8', & + 'stressm_1',1,diag,field_loc_center,field_type_scalar) ! stressm_1 + call read_restart_field(nu_restart,0,stressm_3,'ruf8', & + 'stressm_3',1,diag,field_loc_center,field_type_scalar) ! stressm_3 + call read_restart_field(nu_restart,0,stressm_2,'ruf8', & + 'stressm_2',1,diag,field_loc_center,field_type_scalar) ! stressm_2 + call read_restart_field(nu_restart,0,stressm_4,'ruf8', & + 'stressm_4',1,diag,field_loc_center,field_type_scalar) ! stressm_4 + + call read_restart_field(nu_restart,0,stress12_1,'ruf8', & + 'stress12_1',1,diag,field_loc_center,field_type_scalar) ! stress12_1 + call read_restart_field(nu_restart,0,stress12_3,'ruf8', & + 'stress12_3',1,diag,field_loc_center,field_type_scalar) ! stress12_1 + + call read_restart_field(nu_restart,0,stress12_2,'ruf8', & + 'stress12_2',1,diag,field_loc_center,field_type_scalar) ! stress12_2 + call read_restart_field(nu_restart,0,stress12_4,'ruf8', & + 'stress12_4',1,diag,field_loc_center,field_type_scalar) ! stress12_4 + + if (trim(grid_type) == 'tripole') then + call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_3, stressp_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_2, stressp_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_4, stressp_2, halo_info, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stressm_1, stressm_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_3, stressm_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_2, stressm_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_4, stressm_2, halo_info, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & + field_loc_center, field_type_scalar) + endif + + !----------------------------------------------------------------- + ! ice mask for dynamics + !----------------------------------------------------------------- + if (my_task == master_task) & + write(nu_diag,*) 'ice mask for dynamics' + + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceumask',1,diag,field_loc_center, field_type_scalar) + + iceumask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceumask(i,j,iblk) = .true. + enddo + enddo + enddo + !$OMP END PARALLEL DO + + ! for mixed layer model + if (oceanmixed_ice) then + + if (my_task == master_task) & + write(nu_diag,*) 'min/max sst, frzmlt' + + call read_restart_field(nu_restart,0,sst,'ruf8', & + 'sst',1,diag,field_loc_center, field_type_scalar) + call read_restart_field(nu_restart,0,frzmlt,'ruf8', & + 'frzmlt',1,diag,field_loc_center, field_type_scalar) + endif + + !----------------------------------------------------------------- + ! Ensure unused stress values in west and south ghost cells are 0 + !----------------------------------------------------------------- + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, nghost + do i = 1, nx_block + stressp_1 (i,j,iblk) = c0 + stressp_2 (i,j,iblk) = c0 + stressp_3 (i,j,iblk) = c0 + stressp_4 (i,j,iblk) = c0 + stressm_1 (i,j,iblk) = c0 + stressm_2 (i,j,iblk) = c0 + stressm_3 (i,j,iblk) = c0 + stressm_4 (i,j,iblk) = c0 + stress12_1(i,j,iblk) = c0 + stress12_2(i,j,iblk) = c0 + stress12_3(i,j,iblk) = c0 + stress12_4(i,j,iblk) = c0 + enddo + enddo + do j = 1, ny_block + do i = 1, nghost + stressp_1 (i,j,iblk) = c0 + stressp_2 (i,j,iblk) = c0 + stressp_3 (i,j,iblk) = c0 + stressp_4 (i,j,iblk) = c0 + stressm_1 (i,j,iblk) = c0 + stressm_2 (i,j,iblk) = c0 + stressm_3 (i,j,iblk) = c0 + stressm_4 (i,j,iblk) = c0 + stress12_1(i,j,iblk) = c0 + stress12_2(i,j,iblk) = c0 + stress12_3(i,j,iblk) = c0 + stress12_4(i,j,iblk) = c0 + enddo + enddo + enddo + !$OMP END PARALLEL DO + + !----------------------------------------------------------------- + ! Ensure ice is binned in correct categories + ! (should not be necessary unless restarting from a run with + ! different category boundaries). + ! + ! If called, this subroutine does not give exact restart. + !----------------------------------------------------------------- +!!! call cleanup_itd + + !----------------------------------------------------------------- + ! compute aggregate ice state and open water area + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call aggregate (nx_block, ny_block, & + aicen(:,:,:,iblk), & + trcrn(:,:,:,:,iblk),& + vicen(:,:,:,iblk), & + vsnon(:,:,:,iblk), & + aice (:,:, iblk), & + trcr (:,:,:,iblk), & + vice (:,:, iblk), & + vsno (:,:, iblk), & + aice0(:,:, iblk), & + tmask(:,:, iblk), & + max_ntrcr, & + trcr_depend) + + aice_init(:,:,iblk) = aice(:,:,iblk) + + enddo + !$OMP END PARALLEL DO + + ! if runid is bering then need to correct npt for istep0 + if (trim(runid) == 'bering') then + npt = npt - istep0 + endif + + end subroutine restartfile + +!======================================================================= + +! Restarts from a CICE v4.1 (binary) dump +! author Elizabeth C. Hunke, LANL + + subroutine restartfile_v4 (ice_ic) + + use ice_broadcast, only: broadcast_scalar + use ice_blocks, only: nghost, nx_block, ny_block + use ice_calendar, only: istep0, istep1, time, time_forc, calendar, npt + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0, p5, & + field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_vector + use ice_domain, only: nblocks, distrb_info + use ice_domain_size, only: nilyr, nslyr, ncat, nx_global, ny_global, & + max_ntrcr, max_blocks + use ice_fileunits, only: nu_diag, nu_rst_pointer, nu_restart + use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & + strocnxT, strocnyT, sst, frzmlt, iceumask, & + stressp_1, stressp_2, stressp_3, stressp_4, & + stressm_1, stressm_2, stressm_3, stressm_4, & + stress12_1, stress12_2, stress12_3, stress12_4 + use ice_gather_scatter, only: scatter_global_stress + use ice_grid, only: tmask + use ice_itd, only: aggregate + use ice_ocean, only: oceanmixed_ice + use ice_read_write, only: ice_open, ice_read, ice_read_global + use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & + aice0, aicen, vicen, vsnon, trcrn, aice_init, & + nt_Tsfc, nt_sice, nt_qice, nt_qsno, uvel, vvel + + character (*), optional :: ice_ic + + ! local variables + + integer (kind=int_kind) :: & + i, j, k, n, iblk, & ! counting indices + iignore ! dummy variable + + real (kind=real_kind) :: & + rignore ! dummy variable + + character(len=char_len_long) :: & + filename, filename0 + + logical (kind=log_kind) :: & + diag + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1, work_g2 + + if (present(ice_ic)) then + filename = ice_ic + elseif (my_task == master_task) then + open(nu_rst_pointer,file=pointer_file) + read(nu_rst_pointer,'(a)') filename0 + filename = trim(filename0) +#ifdef AusCOM + write(nu_diag,*) 'XXX: restart_dir = ', restart_dir + write(nu_diag,*) 'XXX: org restart file => ', filename +!ars599: 28042015 restart issue +! filename = trim(restart_dir)//trim(filename) + filename = trim(filename) + write(nu_diag,*) 'XXX: restart file => ', filename +#endif + close(nu_rst_pointer) + write(nu_diag,*) 'Read ',pointer_file(1:lenstr(pointer_file)) + endif + + call ice_open(nu_restart,filename,0) + + if (my_task == master_task) & + write(nu_diag,*) 'Using restart dump=', trim(filename) + + if (use_restart_time) then + + if (my_task == master_task) then + read (nu_restart) istep0,time,time_forc + write(nu_diag,*) 'Restart read at istep=',istep0,time,time_forc + endif +#ifndef AusCOM + call calendar(time) +#else + call calendar(time-runtime0) +#endif + call broadcast_scalar(istep0,master_task) + istep1 = istep0 + call broadcast_scalar(time,master_task) + call broadcast_scalar(time_forc,master_task) + call calendar(time) + + else + + if (my_task == master_task) & + read (nu_restart) iignore,rignore,rignore + + endif + + diag = .true. ! write min/max diagnostics for field + + !----------------------------------------------------------------- + ! state variables + ! Tsfc is the only tracer read in this file. All other + ! tracers are in their own dump/restart files. + !----------------------------------------------------------------- + do n=1,ncat + if (my_task == master_task) & + write(nu_diag,*) 'cat ',n, & + ' min/max area, vol ice, vol snow, Tsfc' + + call ice_read(nu_restart,0,aicen(:,:,n,:),'ruf8',diag, & + field_loc_center, field_type_scalar) + call ice_read(nu_restart,0,vicen(:,:,n,:),'ruf8',diag, & + field_loc_center, field_type_scalar) + call ice_read(nu_restart,0,vsnon(:,:,n,:),'ruf8',diag, & + field_loc_center, field_type_scalar) + call ice_read(nu_restart,0,trcrn(:,:,nt_Tsfc,n,:),'ruf8',diag, & + field_loc_center, field_type_scalar) + + if (my_task == master_task) & + write(nu_diag,*) 'cat ',n, 'min/max sice for each layer' + do k=1,nilyr + call ice_read(nu_restart,0,trcrn(:,:,nt_sice+k-1,n,:),'ruf8',diag, & + field_loc_center, field_type_scalar) + enddo + + if (my_task == master_task) & + write(nu_diag,*) 'cat ',n, 'min/max qice for each layer' + do k=1,nilyr + call ice_read(nu_restart,0,trcrn(:,:,nt_qice+k-1,n,:),'ruf8',diag, & + field_loc_center, field_type_scalar) + enddo + + if (my_task == master_task) & + write(nu_diag,*) 'cat ',n, 'min/max qsno for each layer' + do k=1,nslyr + call ice_read(nu_restart,0,trcrn(:,:,nt_qsno+k-1,n,:),'ruf8',diag, & + field_loc_center, field_type_scalar) + enddo + enddo ! ncat + + !----------------------------------------------------------------- + ! velocity + !----------------------------------------------------------------- + if (my_task == master_task) & + write(nu_diag,*) 'min/max velocity components' + + call ice_read(nu_restart,0,uvel,'ruf8',diag, & + field_loc_NEcorner, field_type_vector) + call ice_read(nu_restart,0,vvel,'ruf8',diag, & + field_loc_NEcorner, field_type_vector) + + !----------------------------------------------------------------- + ! radiation fields + !----------------------------------------------------------------- + + if (my_task == master_task) & + write(nu_diag,*) 'radiation fields' + + call ice_read(nu_restart,0,scale_factor,'ruf8',diag, & + field_loc_center, field_type_scalar) + call ice_read(nu_restart,0,swvdr,'ruf8',diag, & + field_loc_center, field_type_scalar) + call ice_read(nu_restart,0,swvdf,'ruf8',diag, & + field_loc_center, field_type_scalar) + call ice_read(nu_restart,0,swidr,'ruf8',diag, & + field_loc_center, field_type_scalar) + call ice_read(nu_restart,0,swidf,'ruf8',diag, & + field_loc_center, field_type_scalar) + + !----------------------------------------------------------------- + ! ocean stress + !----------------------------------------------------------------- + if (my_task == master_task) & + write(nu_diag,*) 'min/max ocean stress components' + + call ice_read(nu_restart,0,strocnxT,'ruf8',diag, & + field_loc_center, field_type_vector) + call ice_read(nu_restart,0,strocnyT,'ruf8',diag, & + field_loc_center, field_type_vector) + + !----------------------------------------------------------------- + ! internal stress + ! The stress tensor must be read and scattered in pairs in order + ! to properly match corner values across a tripole grid cut. + !----------------------------------------------------------------- + if (my_task == master_task) write(nu_diag,*) & + 'internal stress components' + + allocate (work_g1(nx_global,ny_global), & + work_g2(nx_global,ny_global)) + + call ice_read_global(nu_restart,0,work_g1,'ruf8',diag) ! stressp_1 + call ice_read_global(nu_restart,0,work_g2,'ruf8',diag) ! stressp_3 + call scatter_global_stress(stressp_1, work_g1, work_g2, & + master_task, distrb_info) + call scatter_global_stress(stressp_3, work_g2, work_g1, & + master_task, distrb_info) + + call ice_read_global(nu_restart,0,work_g1,'ruf8',diag) ! stressp_2 + call ice_read_global(nu_restart,0,work_g2,'ruf8',diag) ! stressp_4 + call scatter_global_stress(stressp_2, work_g1, work_g2, & + master_task, distrb_info) + call scatter_global_stress(stressp_4, work_g2, work_g1, & + master_task, distrb_info) + + call ice_read_global(nu_restart,0,work_g1,'ruf8',diag) ! stressm_1 + call ice_read_global(nu_restart,0,work_g2,'ruf8',diag) ! stressm_3 + call scatter_global_stress(stressm_1, work_g1, work_g2, & + master_task, distrb_info) + call scatter_global_stress(stressm_3, work_g2, work_g1, & + master_task, distrb_info) + + call ice_read_global(nu_restart,0,work_g1,'ruf8',diag) ! stressm_2 + call ice_read_global(nu_restart,0,work_g2,'ruf8',diag) ! stressm_4 + call scatter_global_stress(stressm_2, work_g1, work_g2, & + master_task, distrb_info) + call scatter_global_stress(stressm_4, work_g2, work_g1, & + master_task, distrb_info) + + call ice_read_global(nu_restart,0,work_g1,'ruf8',diag) ! stress12_1 + call ice_read_global(nu_restart,0,work_g2,'ruf8',diag) ! stress12_3 + call scatter_global_stress(stress12_1, work_g1, work_g2, & + master_task, distrb_info) + call scatter_global_stress(stress12_3, work_g2, work_g1, & + master_task, distrb_info) + + call ice_read_global(nu_restart,0,work_g1,'ruf8',diag) ! stress12_2 + call ice_read_global(nu_restart,0,work_g2,'ruf8',diag) ! stress12_4 + call scatter_global_stress(stress12_2, work_g1, work_g2, & + master_task, distrb_info) + call scatter_global_stress(stress12_4, work_g2, work_g1, & + master_task, distrb_info) + + deallocate (work_g1, work_g2) + + !----------------------------------------------------------------- + ! ice mask for dynamics + !----------------------------------------------------------------- + if (my_task == master_task) & + write(nu_diag,*) 'ice mask for dynamics' + + call ice_read(nu_restart,0,work1,'ruf8',diag, & + field_loc_center, field_type_scalar) + + iceumask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceumask(i,j,iblk) = .true. + enddo + enddo + enddo + !$OMP END PARALLEL DO + + ! for mixed layer model + if (oceanmixed_ice) then + + if (my_task == master_task) & + write(nu_diag,*) 'min/max sst, frzmlt' + + call ice_read(nu_restart,0,sst,'ruf8',diag, & + field_loc_center, field_type_scalar) + call ice_read(nu_restart,0,frzmlt,'ruf8',diag, & + field_loc_center, field_type_scalar) + endif + + if (my_task == master_task) close(nu_restart) + + !----------------------------------------------------------------- + ! Ensure unused stress values in west and south ghost cells are 0 + !----------------------------------------------------------------- + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, nghost + do i = 1, nx_block + stressp_1 (i,j,iblk) = c0 + stressp_2 (i,j,iblk) = c0 + stressp_3 (i,j,iblk) = c0 + stressp_4 (i,j,iblk) = c0 + stressm_1 (i,j,iblk) = c0 + stressm_2 (i,j,iblk) = c0 + stressm_3 (i,j,iblk) = c0 + stressm_4 (i,j,iblk) = c0 + stress12_1(i,j,iblk) = c0 + stress12_2(i,j,iblk) = c0 + stress12_3(i,j,iblk) = c0 + stress12_4(i,j,iblk) = c0 + enddo + enddo + do j = 1, ny_block + do i = 1, nghost + stressp_1 (i,j,iblk) = c0 + stressp_2 (i,j,iblk) = c0 + stressp_3 (i,j,iblk) = c0 + stressp_4 (i,j,iblk) = c0 + stressm_1 (i,j,iblk) = c0 + stressm_2 (i,j,iblk) = c0 + stressm_3 (i,j,iblk) = c0 + stressm_4 (i,j,iblk) = c0 + stress12_1(i,j,iblk) = c0 + stress12_2(i,j,iblk) = c0 + stress12_3(i,j,iblk) = c0 + stress12_4(i,j,iblk) = c0 + enddo + enddo + enddo + !$OMP END PARALLEL DO + + !----------------------------------------------------------------- + ! Ensure ice is binned in correct categories + ! (should not be necessary unless restarting from a run with + ! different category boundaries). + ! + ! If called, this subroutine does not give exact restart. + !----------------------------------------------------------------- +!!! call cleanup_itd + + !----------------------------------------------------------------- + ! compute aggregate ice state and open water area + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + + call aggregate (nx_block, ny_block, & + aicen(:,:,:,iblk), & + trcrn(:,:,:,:,iblk),& + vicen(:,:,:,iblk), & + vsnon(:,:,:,iblk), & + aice (:,:, iblk), & + trcr (:,:,:,iblk), & + vice (:,:, iblk), & + vsno (:,:, iblk), & + aice0(:,:, iblk), & + tmask(:,:, iblk), & + max_ntrcr, & + trcr_depend) + + aice_init(:,:,iblk) = aice(:,:,iblk) + + enddo + !$OMP END PARALLEL DO + + ! creates netcdf if restart_format = 'nc' + filename = trim(restart_dir) // '/iced.converted' + call dumpfile(filename) + call final_restart + ! stop + + ! if runid is bering then need to correct npt for istep0 + if (trim(runid) == 'bering') then + npt = npt - istep0 + endif + + end subroutine restartfile_v4 + +!======================================================================= + + end module ice_restart_driver + +!======================================================================= diff --git a/source/ice_restart_shared.F90 b/source/ice_restart_shared.F90 new file mode 100755 index 00000000..93061f26 --- /dev/null +++ b/source/ice_restart_shared.F90 @@ -0,0 +1,63 @@ +! SVN:$Id: ice_restart_shared.F90 607 2013-03-29 15:49:42Z eclare $ +!======================================================================= + + module ice_restart_shared + + use ice_kinds_mod + implicit none + private + public :: lenstr + + logical (kind=log_kind), public :: & + restart , & ! if true, initialize using restart file instead of defaults + restart_ext, & ! if true, read/write extended grid (with ghost cells) + use_restart_time ! if true, use time written in core restart file + + character (len=char_len), public :: & + restart_file , & ! output file for restart dump + runtype ! initial, continue, hybrid, branch + + character (len=char_len_long), public :: & + restart_dir , & ! directory name for restart dump + runid ! identifier for CESM coupled run or bering + + character (len=char_len_long), public :: & + pointer_file ! input pointer file for restarts + + character (len=char_len), public :: & + restart_format ! format of restart files 'nc' + + logical (kind=log_kind), public :: lcdf64 + +!======================================================================= + + contains + +!======================================================================= + +! Compute length of string by finding first non-blank +! character from the right. + + integer function lenstr(label) + + character*(*) label + + ! local variables + + integer (kind=int_kind) :: & + length, & ! length of character string + n ! loop index + + length = len(label) + do n=length,1,-1 + if( label(n:n) /= ' ' ) exit + enddo + lenstr = n + + end function lenstr + +!======================================================================= + + end module ice_restart_shared + +!======================================================================= diff --git a/source/ice_restoring.F90 b/source/ice_restoring.F90 new file mode 100755 index 00000000..114988ac --- /dev/null +++ b/source/ice_restoring.F90 @@ -0,0 +1,705 @@ +! SVN:$Id: ice_restoring.F90 825 2014-08-29 15:37:09Z eclare $ +!======================================================================= +! +! Reads and interpolates forcing data for atmosphere and ocean quantities. +! +! authors: Elizabeth C. Hunke, LANL + + module ice_restoring + + use ice_kinds_mod + use ice_blocks, only: nx_block, ny_block + use ice_domain_size, only: ncat, max_blocks, max_ntrcr + use ice_forcing, only: trestore, trest + use ice_state, only: aicen, vicen, vsnon, trcrn, ntrcr, bound_state, & + aice_init, aice0, aice, vice, vsno, trcr, trcr_depend + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + implicit none + private + public :: ice_HaloRestore_init, ice_HaloRestore + save + + logical (kind=log_kind), public :: & + restore_ice ! restore ice state if true + + !----------------------------------------------------------------- + ! state of the ice for each category + !----------------------------------------------------------------- + + real (kind=dbl_kind), dimension (:,:,:,:), allocatable :: & + aicen_rest , & ! concentration of ice + vicen_rest , & ! volume per unit area of ice (m) + vsnon_rest ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (:,:,:,:,:), allocatable :: & + trcrn_rest ! tracers + +!======================================================================= + + contains + +!======================================================================= + +! Allocates and initializes arrays needed for restoring the ice state +! in cells surrounding the grid. + + + subroutine ice_HaloRestore_init + + use ice_blocks, only: block, get_block, nblocks_x, nblocks_y + use ice_communicate, only: my_task, master_task + use ice_constants, only: c0 + use ice_domain, only: ew_boundary_type, ns_boundary_type, & + nblocks, blocks_ice + use ice_fileunits, only: nu_diag + use ice_grid, only: tmask + use ice_flux, only: sst, Tf, Tair, salinz, Tmltz + use ice_itd, only: aggregate + use ice_restart_shared, only: restart_ext + + integer (int_kind) :: & + i,j,iblk,nt,n, &! dummy loop indices + ilo,ihi,jlo,jhi, &! beginning and end of physical domain + iglob(nx_block), &! global indices + jglob(ny_block), &! global indices + iblock, jblock, &! block indices + ibc, &! ghost cell column or row + npad ! padding column/row counter + + character (len=7), parameter :: & +! restore_ic = 'defined' ! otherwise restore to initial ice state + restore_ic = 'initial' ! restore to initial ice state + + type (block) :: & + this_block ! block info for current block + + if (.not. restore_ice) return + + if (ew_boundary_type == 'open' .and. & + ns_boundary_type == 'open' .and. .not.(restart_ext)) then + if (my_task == master_task) write (nu_diag,*) & + 'WARNING: Setting restart_ext = T for open boundaries' + restart_ext = .true. + endif + + allocate (aicen_rest(nx_block,ny_block,ncat,max_blocks), & + vicen_rest(nx_block,ny_block,ncat,max_blocks), & + vsnon_rest(nx_block,ny_block,ncat,max_blocks), & + trcrn_rest(nx_block,ny_block,ntrcr,ncat,max_blocks)) + +!----------------------------------------------------------------------- +! initialize +! halo cells have to be filled manually at this stage +! these arrays could be set to values read from a file... +!----------------------------------------------------------------------- + + if (trim(restore_ic) == 'defined') then + + ! restore to defined ice state + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & + !$OMP iglob,jglob,iblock,jblock) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + iglob = this_block%i_glob + jglob = this_block%j_glob + iblock = this_block%iblock + jblock = this_block%jblock + + call set_restore_var (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + iglob, jglob, & + iblock, jblock, & + Tair (:,:, iblk), sst (:,:, iblk), & + Tf (:,:, iblk), & + salinz(:,:,:, iblk), Tmltz(:,:,:, iblk), & + tmask(:,:, iblk), & + aicen_rest(:,:, :,iblk), & + trcrn_rest(:,:,:,:,iblk), ntrcr, & + vicen_rest(:,:, :,iblk), & + vsnon_rest(:,:, :,iblk)) + enddo ! iblk + !$OMP END PARALLEL DO + + else ! restore_ic + + ! restore to initial ice state + +! the easy way +! aicen_rest(:,:,:,:) = aicen(:,:,:,:) +! vicen_rest(:,:,:,:) = vicen(:,:,:,:) +! vsnon_rest(:,:,:,:) = vsnon(:,:,:,:) +! trcrn_rest(:,:,:,:,:) = trcrn(:,:,1:ntrcr,:,:) + +! the more precise way + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & + !$OMP i,j,n,nt,ibc,npad) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + if (this_block%iblock == 1) then ! west edge + if (trim(ew_boundary_type) /= 'cyclic') then + do n = 1, ncat + do j = 1, ny_block + do i = 1, ilo + aicen_rest(i,j,n,iblk) = aicen(ilo,j,n,iblk) + vicen_rest(i,j,n,iblk) = vicen(ilo,j,n,iblk) + vsnon_rest(i,j,n,iblk) = vsnon(ilo,j,n,iblk) + do nt = 1, ntrcr + trcrn_rest(i,j,nt,n,iblk) = trcrn(ilo,j,nt,n,iblk) + enddo + enddo + enddo + enddo + endif + endif + + if (this_block%iblock == nblocks_x) then ! east edge + if (trim(ew_boundary_type) /= 'cyclic') then + ! locate ghost cell column (avoid padding) + ibc = nx_block + do i = nx_block, 1, -1 + npad = 0 + if (this_block%i_glob(i) == 0) then + do j = 1, ny_block + npad = npad + this_block%j_glob(j) + enddo + endif + if (npad /= 0) ibc = ibc - 1 + enddo + + do n = 1, ncat + do j = 1, ny_block + do i = ihi, ibc + aicen_rest(i,j,n,iblk) = aicen(ihi,j,n,iblk) + vicen_rest(i,j,n,iblk) = vicen(ihi,j,n,iblk) + vsnon_rest(i,j,n,iblk) = vsnon(ihi,j,n,iblk) + do nt = 1, ntrcr + trcrn_rest(i,j,nt,n,iblk) = trcrn(ihi,j,nt,n,iblk) + enddo + enddo + enddo + enddo + endif + endif + + if (this_block%jblock == 1) then ! south edge + if (trim(ns_boundary_type) /= 'cyclic') then + do n = 1, ncat + do j = 1, jlo + do i = 1, nx_block + aicen_rest(i,j,n,iblk) = aicen(i,jlo,n,iblk) + vicen_rest(i,j,n,iblk) = vicen(i,jlo,n,iblk) + vsnon_rest(i,j,n,iblk) = vsnon(i,jlo,n,iblk) + do nt = 1, ntrcr + trcrn_rest(i,j,nt,n,iblk) = trcrn(ilo,j,nt,n,iblk) + enddo + enddo + enddo + enddo + endif + endif + + if (this_block%jblock == nblocks_y) then ! north edge + if (trim(ns_boundary_type) /= 'cyclic' .and. & + trim(ns_boundary_type) /= 'tripole' .and. & + trim(ns_boundary_type) /= 'tripoleT') then + ! locate ghost cell row (avoid padding) + ibc = ny_block + do j = ny_block, 1, -1 + npad = 0 + if (this_block%j_glob(j) == 0) then + do i = 1, nx_block + npad = npad + this_block%i_glob(i) + enddo + endif + if (npad /= 0) ibc = ibc - 1 + enddo + + do n = 1, ncat + do j = jhi, ibc + do i = 1, nx_block + aicen_rest(i,j,n,iblk) = aicen(i,jhi,n,iblk) + vicen_rest(i,j,n,iblk) = vicen(i,jhi,n,iblk) + vsnon_rest(i,j,n,iblk) = vsnon(i,jhi,n,iblk) + do nt = 1, ntrcr + trcrn_rest(i,j,nt,n,iblk) = trcrn(ihi,j,nt,n,iblk) + enddo + enddo + enddo + enddo + endif + endif + + enddo ! iblk + !$OMP END PARALLEL DO + + endif ! restore_ic + + if (my_task == master_task) & + write (nu_diag,*) 'ice restoring timescale = ',trestore,' days' + + end subroutine ice_HaloRestore_init + +!======================================================================= + +! initialize restoring variables, based on set_state_var +! this routine assumes boundaries are not cyclic + + subroutine set_restore_var (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + iglob, jglob, & + iblock, jblock, & + Tair, sst, & + Tf, & + salinz, Tmltz, & + tmask, aicen, & + trcrn, ntrcr, & + vicen, vsnon) + +! authors: E. C. Hunke, LANL + + use ice_blocks, only: nblocks_x, nblocks_y + use ice_constants, only: c0, c1, c2, p2, p5, rhoi, rhos, Lfresh, & + cp_ice, cp_ocn, Tsmelt, Tffresh + use ice_domain_size, only: nilyr, nslyr, ncat + use ice_state, only: nt_Tsfc, nt_qice, nt_qsno, nt_sice, nt_fbri, tr_brine + use ice_itd, only: hin_max + use ice_therm_mushy, only: enthalpy_mush + use ice_therm_shared, only: ktherm + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo, ihi , & ! physical domain indices + jlo, jhi , & ! + iglob(nx_block) , & ! global indices + jglob(ny_block) , & ! + iblock , & ! block indices + jblock , & ! + ntrcr ! number of tracers in use + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tair , & ! air temperature (K) + Tf , & ! freezing temperature (C) + sst ! sea surface temperature (C) ! currently not used + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), & + intent(in) :: & + salinz , & ! initial salinity profile + Tmltz ! initial melting temperature profile + + logical (kind=log_kind), dimension (nx_block,ny_block), & + intent(in) :: & + tmask ! true for ice/ocean cells + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(out) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(out) :: & + trcrn ! ice tracers + ! 1: surface temperature of ice/snow (C) + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij , & ! horizontal index, combines i and j loops + ibc , & ! ghost cell column or row + npad , & ! padding column/row counter + k , & ! ice layer index + n , & ! thickness category index + it , & ! tracer index + icells ! number of cells initialized with ice + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! compressed indices for cells with restoring + + real (kind=dbl_kind) :: & + slope, Ti, hbar, & + ainit(ncat), & ! initial ice concentration + hinit(ncat), & ! initial ice thickness + hsno_init ! initial snow thickness + + indxi(:) = 0 + indxj(:) = 0 + + !----------------------------------------------------------------- + ! Initialize restoring variables everywhere on grid + !----------------------------------------------------------------- + + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + aicen(i,j,n) = c0 + vicen(i,j,n) = c0 + vsnon(i,j,n) = c0 + trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature + if (ntrcr >= 2) then + do it = 2, ntrcr + trcrn(i,j,it,n) = c0 + enddo + endif + if (tr_brine) trcrn(i,j,nt_fbri,n) = c1 + enddo + enddo + enddo + + !----------------------------------------------------------------- + ! initial area and thickness in ice-occupied restoring cells + !----------------------------------------------------------------- + + hbar = c2 ! initial ice thickness + hsno_init = 0.20_dbl_kind ! initial snow thickness (m) + do n = 1, ncat + hinit(n) = c0 + ainit(n) = c0 + if (hbar > hin_max(n-1) .and. hbar < hin_max(n)) then + hinit(n) = hbar + ainit(n) = 0.95_dbl_kind ! initial ice concentration + endif + enddo + + !----------------------------------------------------------------- + ! Define cells where ice is placed (or other values are used) + ! Edges using initial values (zero, above) are commented out + !----------------------------------------------------------------- + + icells = 0 + if (iblock == 1) then ! west edge + do j = 1, ny_block + do i = 1, ilo + if (tmask(i,j)) then +! icells = icells + 1 +! indxi(icells) = i +! indxj(icells) = j + endif + enddo + enddo + endif + + if (iblock == nblocks_x) then ! east edge + ! locate ghost cell column (avoid padding) + ibc = nx_block + do i = nx_block, 1, -1 + npad = 0 + if (iglob(i) == 0) then + do j = 1, ny_block + npad = npad + jglob(j) + enddo + endif + if (npad /= 0) ibc = ibc - 1 + enddo + + do j = 1, ny_block + do i = ihi, ibc + if (tmask(i,j)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + endif + + if (jblock == 1) then ! south edge + do j = 1, jlo + do i = 1, nx_block + if (tmask(i,j)) then +! icells = icells + 1 +! indxi(icells) = i +! indxj(icells) = j + endif + enddo + enddo + endif + + if (jblock == nblocks_y) then ! north edge + ! locate ghost cell row (avoid padding) + ibc = ny_block + do j = ny_block, 1, -1 + npad = 0 + if (jglob(j) == 0) then + do i = 1, nx_block + npad = npad + iglob(i) + enddo + endif + if (npad /= 0) ibc = ibc - 1 + enddo + + do j = jhi, ibc + do i = 1, nx_block + if (tmask(i,j)) then +! icells = icells + 1 +! indxi(icells) = i +! indxj(icells) = j + endif + enddo + enddo + endif + + !----------------------------------------------------------------- + ! Set restoring variables + !----------------------------------------------------------------- + + do n = 1, ncat + + ! ice volume, snow volume +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + aicen(i,j,n) = ainit(n) + vicen(i,j,n) = hinit(n) * ainit(n) ! m + vsnon(i,j,n) = min(aicen(i,j,n)*hsno_init,p2*vicen(i,j,n)) + enddo ! ij + + ! surface temperature + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + trcrn(i,j,nt_Tsfc,n) = min(Tsmelt, Tair(i,j) - Tffresh) !deg C + enddo + + ! ice enthalpy, salinity + do k = 1, nilyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! assume linear temp profile and compute enthalpy + slope = Tf(i,j) - trcrn(i,j,nt_Tsfc,n) + Ti = trcrn(i,j,nt_Tsfc,n) & + + slope*(real(k,kind=dbl_kind)-p5) & + /real(nilyr,kind=dbl_kind) + + if (ktherm == 2) then + ! enthalpy + trcrn(i,j,nt_qice+k-1,n) = & + enthalpy_mush(Ti, salinz(i,j,k)) + else + trcrn(i,j,nt_qice+k-1,n) = & + -(rhoi * (cp_ice*(Tmltz(i,j,k)-Ti) & + + Lfresh*(c1-Tmltz(i,j,k)/Ti) - cp_ocn*Tmltz(i,j,k))) + endif + + ! salinity + trcrn(i,j,nt_sice+k-1,n) = salinz(i,j,k) + enddo ! ij + enddo ! nilyr + + ! snow enthalpy + do k = 1, nslyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + Ti = min(c0, trcrn(i,j,nt_Tsfc,n)) + trcrn(i,j,nt_qsno+k-1,n) = -rhos*(Lfresh - cp_ice*Ti) + + enddo ! ij + enddo ! nslyr + + enddo ! ncat + + end subroutine set_restore_var + +!======================================================================= + +! This subroutine is intended for restoring the ice state to desired +! values in cells surrounding the grid. +! Note: This routine will need to be modified for nghost > 1. +! We assume padding occurs only on east and north edges. + + subroutine ice_HaloRestore + + use ice_blocks, only: block, get_block, nblocks_x, nblocks_y + use ice_calendar, only: dt + use ice_constants, only: secday + use ice_domain, only: ew_boundary_type, ns_boundary_type, & + nblocks, blocks_ice + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,iblk,nt,n, &! dummy loop indices + ilo,ihi,jlo,jhi, &! beginning and end of physical domain + ibc, &! ghost cell column or row + npad ! padding column/row counter + + type (block) :: & + this_block ! block info for current block + + real (dbl_kind) :: & + ctime ! dt/trest + + call ice_timer_start(timer_bound) + +!----------------------------------------------------------------------- +! +! Initialize +! +!----------------------------------------------------------------------- + + ! for now, use same restoring constant as for SST + if (trestore == 0) then + trest = dt ! use data instantaneously + else + trest = real(trestore,kind=dbl_kind) * secday ! seconds + endif + ctime = dt/trest + +!----------------------------------------------------------------------- +! +! Restore values in cells surrounding the grid +! +!----------------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & + !$OMP i,j,n,nt,ibc,npad) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + if (this_block%iblock == 1) then ! west edge + if (trim(ew_boundary_type) /= 'cyclic') then + do n = 1, ncat + do j = 1, ny_block + do i = 1, ilo + aicen(i,j,n,iblk) = aicen(i,j,n,iblk) & + + (aicen_rest(i,j,n,iblk)-aicen(i,j,n,iblk))*ctime + vicen(i,j,n,iblk) = vicen(i,j,n,iblk) & + + (vicen_rest(i,j,n,iblk)-vicen(i,j,n,iblk))*ctime + vsnon(i,j,n,iblk) = vsnon(i,j,n,iblk) & + + (vsnon_rest(i,j,n,iblk)-vsnon(i,j,n,iblk))*ctime + do nt = 1, ntrcr + trcrn(i,j,nt,n,iblk) = trcrn(i,j,nt,n,iblk) & + + (trcrn_rest(i,j,nt,n,iblk)-trcrn(i,j,nt,n,iblk))*ctime + enddo + enddo + enddo + enddo + endif + endif + + if (this_block%iblock == nblocks_x) then ! east edge + if (trim(ew_boundary_type) /= 'cyclic') then + ! locate ghost cell column (avoid padding) + ibc = nx_block + do i = nx_block, 1, -1 + npad = 0 + if (this_block%i_glob(i) == 0) then + do j = 1, ny_block + npad = npad + this_block%j_glob(j) + enddo + endif + if (npad /= 0) ibc = ibc - 1 + enddo + + do n = 1, ncat + do j = 1, ny_block + do i = ihi, ibc + aicen(i,j,n,iblk) = aicen(i,j,n,iblk) & + + (aicen_rest(i,j,n,iblk)-aicen(i,j,n,iblk))*ctime + vicen(i,j,n,iblk) = vicen(i,j,n,iblk) & + + (vicen_rest(i,j,n,iblk)-vicen(i,j,n,iblk))*ctime + vsnon(i,j,n,iblk) = vsnon(i,j,n,iblk) & + + (vsnon_rest(i,j,n,iblk)-vsnon(i,j,n,iblk))*ctime + do nt = 1, ntrcr + trcrn(i,j,nt,n,iblk) = trcrn(i,j,nt,n,iblk) & + + (trcrn_rest(i,j,nt,n,iblk)-trcrn(i,j,nt,n,iblk))*ctime + enddo + enddo + enddo + enddo + endif + endif + + if (this_block%jblock == 1) then ! south edge + if (trim(ns_boundary_type) /= 'cyclic') then + do n = 1, ncat + do j = 1, jlo + do i = 1, nx_block + aicen(i,j,n,iblk) = aicen(i,j,n,iblk) & + + (aicen_rest(i,j,n,iblk)-aicen(i,j,n,iblk))*ctime + vicen(i,j,n,iblk) = vicen(i,j,n,iblk) & + + (vicen_rest(i,j,n,iblk)-vicen(i,j,n,iblk))*ctime + vsnon(i,j,n,iblk) = vsnon(i,j,n,iblk) & + + (vsnon_rest(i,j,n,iblk)-vsnon(i,j,n,iblk))*ctime + do nt = 1, ntrcr + trcrn(i,j,nt,n,iblk) = trcrn(i,j,nt,n,iblk) & + + (trcrn_rest(i,j,nt,n,iblk)-trcrn(i,j,nt,n,iblk))*ctime + enddo + enddo + enddo + enddo + endif + endif + + if (this_block%jblock == nblocks_y) then ! north edge + if (trim(ns_boundary_type) /= 'cyclic' .and. & + trim(ns_boundary_type) /= 'tripole' .and. & + trim(ns_boundary_type) /= 'tripoleT') then + ! locate ghost cell row (avoid padding) + ibc = ny_block + do j = ny_block, 1, -1 + npad = 0 + if (this_block%j_glob(j) == 0) then + do i = 1, nx_block + npad = npad + this_block%i_glob(i) + enddo + endif + if (npad /= 0) ibc = ibc - 1 + enddo + + do n = 1, ncat + do j = jhi, ibc + do i = 1, nx_block + aicen(i,j,n,iblk) = aicen(i,j,n,iblk) & + + (aicen_rest(i,j,n,iblk)-aicen(i,j,n,iblk))*ctime + vicen(i,j,n,iblk) = vicen(i,j,n,iblk) & + + (vicen_rest(i,j,n,iblk)-vicen(i,j,n,iblk))*ctime + vsnon(i,j,n,iblk) = vsnon(i,j,n,iblk) & + + (vsnon_rest(i,j,n,iblk)-vsnon(i,j,n,iblk))*ctime + do nt = 1, ntrcr + trcrn(i,j,nt,n,iblk) = trcrn(i,j,nt,n,iblk) & + + (trcrn_rest(i,j,nt,n,iblk)-trcrn(i,j,nt,n,iblk))*ctime + enddo + enddo + enddo + enddo + endif + endif + + enddo ! iblk + !$OMP END PARALLEL DO + + call ice_timer_stop(timer_bound) + + end subroutine ice_HaloRestore + +!======================================================================= + + end module ice_restoring + +!======================================================================= diff --git a/source/ice_shortwave.F90 b/source/ice_shortwave.F90 new file mode 100755 index 00000000..52e51597 --- /dev/null +++ b/source/ice_shortwave.F90 @@ -0,0 +1,3935 @@ +! SVN:$Id: ice_shortwave.F90 843 2014-10-02 19:54:30Z eclare $ +!======================================================================= +! +! The albedo and absorbed/transmitted flux parameterizations for +! snow over ice, bare ice and ponded ice. +! +! Presently, two methods are included: +! (1) CCSM3 +! (2) Delta-Eddington +! as two distinct routines. +! Either can be called from the ice driver. +! +! The Delta-Eddington method is described here: +! +! Briegleb, B. P., and B. Light (2007): A Delta-Eddington Multiple +! Scattering Parameterization for Solar Radiation in the Sea Ice +! Component of the Community Climate System Model, NCAR Technical +! Note NCAR/TN-472+STR February 2007 +! +! name: originally ice_albedo +! +! authors: Bruce P. Briegleb, NCAR +! Elizabeth C. Hunke and William H. Lipscomb, LANL +! 2005, WHL: Moved absorbed_solar from ice_therm_vertical to this +! module and changed name from ice_albedo +! 2006, WHL: Added Delta Eddington routines from Bruce Briegleb +! 2006, ECH: Changed data statements in Delta Eddington routines (no +! longer hardwired) +! Converted to free source form (F90) +! 2007, BPB: Completely updated Delta-Eddington code, so that: +! (1) multiple snow layers enabled (i.e. nslyr > 1) +! (2) included SSL for snow surface absorption +! (3) added Sswabs for internal snow layer absorption +! (4) variable sea ice layers allowed (i.e. not hardwired) +! (5) updated all inherent optical properties +! (6) included algae absorption for sea ice lowest layer +! (7) very complete internal documentation included +! 2007, ECH: Improved efficiency +! 2008, BPB: Added aerosols to Delta Eddington code +! 2013, ECH: merged with NCAR version, cleaned up + + module ice_shortwave + + use ice_kinds_mod + use ice_domain_size, only: nilyr, nslyr, ncat, n_aero, max_blocks, max_aero + use ice_constants + !use ice_blocks, only: nx_block, ny_block, block, get_block + use ice_blocks, only: nx_block, ny_block + use ice_diagnostics, only: npnt, print_points, pmloc, piloc, pjloc + use ice_fileunits, only: nu_diag + use ice_communicate, only: my_task + +#ifdef AusCOM + use cpl_parameters, only : cst_ocn_albedo, ocn_albedo + use ice_grid, only : TLAT +#endif + + implicit none + + private + public :: init_shortwave, run_dEdd, shortwave_ccsm3 + + character (len=char_len), public :: & + shortwave, & ! shortwave method, 'default' ('ccsm3') or 'dEdd' + albedo_type ! albedo parameterization, 'default' ('ccsm3') or 'constant' + ! shortwave='dEdd' overrides this parameter + + ! baseline albedos for ccsm3 shortwave, set in namelist + real (kind=dbl_kind), public :: & + albicev , & ! visible ice albedo for h > ahmax + albicei , & ! near-ir ice albedo for h > ahmax + albsnowv, & ! cold snow albedo, visible + albsnowi, & ! cold snow albedo, near IR +!ars599: 24032014 +! not sure if that is right!! +!ars599: 26032014 +! dT_mlt has been defined in new code +! mark out and use new code +! however are they the same varialbes? +!#if defined(AusCOM) || defined(ACCICE) +#ifdef AusCOM + snowpatch, & ! parameter for fractional snow area (m) +! dT_mlt , & ! change in temp to give dalb_mlt + ! albedo change + dalb_mlt , & ! albedo change per dT_mlt change + ! in temp for ice +#endif + ahmax ! thickness above which ice albedo is constant (m) + + ! category albedos + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ncat,max_blocks), public, save :: & + alvdrn , & ! visible direct albedo (fraction) + alidrn , & ! near-ir direct albedo (fraction) + alvdfn , & ! visible diffuse albedo (fraction) + alidfn ! near-ir diffuse albedo (fraction) + + ! albedo components for history + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ncat,max_blocks), public, save :: & + albicen, & ! bare ice + albsnon, & ! snow + albpndn, & ! pond + apeffn ! effective pond area used for radiation calculation + + ! shortwave components + real (kind=dbl_kind), & + dimension (nx_block,ny_block,nilyr,ncat,max_blocks), public, save :: & + Iswabsn ! SW radiation absorbed in ice layers (W m-2) + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,nslyr,ncat,max_blocks), public, save :: & + Sswabsn ! SW radiation absorbed in snow layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat,max_blocks), & + public, save :: & + fswsfcn , & ! SW absorbed at ice/snow surface (W m-2) + fswthrun , & ! SW through ice to ocean (W/m^2) + fswintn ! SW absorbed in ice interior, below surface (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr+1,ncat,max_blocks), & + public, save :: & + fswpenln ! visible SW entering ice layers (W m-2) + + ! dEdd tuning parameters, set in namelist + real (kind=dbl_kind), public :: & + R_ice , & ! sea ice tuning parameter; +1 > 1sig increase in albedo + R_pnd , & ! ponded ice tuning parameter; +1 > 1sig increase in albedo + R_snw , & ! snow tuning parameter; +1 > ~.01 change in broadband albedo + dT_mlt, & ! change in temp for non-melt to melt snow grain radius change (C) + rsnw_mlt, & ! maximum melting snow grain radius (10^-6 m) + kalg ! algae absorption coefficient for 0.5 m thick layer + + real (kind=dbl_kind), parameter, public :: & + hi_ssl = 0.050_dbl_kind, & ! ice surface scattering layer thickness (m) + hs_ssl = 0.040_dbl_kind ! snow surface scattering layer thickness (m) + + real (kind=dbl_kind), parameter :: & + hpmin = 0.005_dbl_kind, & ! minimum allowed melt pond depth (m) + hp0 = 0.200_dbl_kind ! pond depth below which transition to bare ice + + real (kind=dbl_kind) :: & + exp_min ! minimum exponential value + +#ifdef AusCOM +!ars599: 26032014: change to public + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + ocn_albedo2D +#endif + +!======================================================================= + + contains + +!======================================================================= +! +! Initialize shortwave + + subroutine init_shortwave + + use ice_calendar, only: nstreams + use ice_domain, only: nblocks, blocks_ice + use ice_flux, only: alvdf, alidf, alvdr, alidr, & + alvdr_ai, alidr_ai, alvdf_ai, alidf_ai, & + swvdr, swvdf, swidr, swidf, & + albice, albsno, albpnd, apeff_ai, albcnt, coszen, fsnow + use ice_orbital, only: init_orbit + use ice_state, only: aicen, vicen, vsnon, trcrn, nt_Tsfc + use ice_blocks, only: block, get_block + use ice_grid, only: tmask, tlat, tlon + use ice_meltpond_lvl, only: dhsn, ffracn + + integer (kind=int_kind) :: & + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! indirect indices for cells with aicen > puny + + integer (kind=int_kind) :: & + i, j, ij , & ! horizontal indices + iblk , & ! block index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + n ! thickness category index + + real (kind=dbl_kind) :: cszn ! counter for history averaging + + type (block) :: & + this_block ! block information for current block + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,n) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + alvdf(i,j,iblk) = c0 + alidf(i,j,iblk) = c0 + alvdr(i,j,iblk) = c0 + alidr(i,j,iblk) = c0 + alvdr_ai(i,j,iblk) = c0 + alidr_ai(i,j,iblk) = c0 + alvdf_ai(i,j,iblk) = c0 + alidf_ai(i,j,iblk) = c0 + enddo + enddo + + ! Initialize + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + alvdrn(i,j,n,iblk) = c0 + alidrn(i,j,n,iblk) = c0 + alvdfn(i,j,n,iblk) = c0 + alidfn(i,j,n,iblk) = c0 + fswsfcn(i,j,n,iblk) = c0 + fswintn(i,j,n,iblk) = c0 + fswthrun(i,j,n,iblk) = c0 + enddo ! i + enddo ! j + enddo ! ncat + + fswpenln(:,:,:,:,iblk) = c0 + Iswabsn(:,:,:,:,iblk) = c0 + Sswabsn(:,:,:,:,iblk) = c0 + + enddo ! iblk + !$OMP END PARALLEL DO + + if (trim(shortwave) == 'dEdd') then ! delta Eddington + +#ifndef CCSMCOUPLED + ! These come from the driver in the coupled model. + call init_orbit ! initialize orbital parameters +#endif + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + ! initialize delta Eddington + call run_dEdd(ilo, ihi, jlo, jhi, & + aicen(:,:,:,iblk), vicen(:,:,:,iblk), & + vsnon(:,:,:,iblk), trcrn(:,:,:,:,iblk), & + tlat(:,:,iblk), tlon(:,:,iblk), & + tmask(:,:,iblk), & + swvdr(:,:,iblk), swvdf(:,:,iblk), & + swidr(:,:,iblk), swidf(:,:,iblk), & + coszen(:,:,iblk), fsnow(:,:,iblk), & + alvdrn(:,:,:,iblk), alvdfn(:,:,:,iblk), & + alidrn(:,:,:,iblk), alidfn(:,:,:,iblk), & + fswsfcn(:,:,:,iblk), fswintn(:,:,:,iblk), & + fswthrun(:,:,:,iblk), fswpenln(:,:,:,:,iblk), & + Sswabsn(:,:,:,:,iblk), Iswabsn(:,:,:,:,iblk), & + albicen(:,:,:,iblk), albsnon(:,:,:,iblk), & + albpndn(:,:,:,iblk), apeffn(:,:,:,iblk), & + dhsn(:,:,:,iblk), ffracn(:,:,:,iblk), & + initonly = .true. ) + + enddo + !$OMP END PARALLEL DO + + else ! basic (ccsm3) shortwave + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + +#ifndef AusCOM + call shortwave_ccsm3(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + aicen(:,:,:,iblk), vicen(:,:,:,iblk), & + vsnon(:,:,:,iblk), & + trcrn(:,:,nt_Tsfc,:,iblk), & + swvdr(:,:, iblk), swvdf(:,:, iblk), & + swidr(:,:, iblk), swidf(:,:, iblk), & + alvdrn(:,:,:,iblk), alidrn(:,:,:,iblk), & + alvdfn(:,:,:,iblk), alidfn(:,:,:,iblk), & + fswsfcn(:,:,:,iblk), fswintn(:,:,:,iblk), & + fswthrun(:,:,:,iblk), & + fswpenln(:,:,:,:,iblk), & + Iswabsn(:,:,:,:,iblk), & + Sswabsn(:,:,:,:,iblk), & + albicen(:,:,:,iblk), albsnon(:,:,:,iblk), & + coszen(:,:,iblk)) +#else + if (cst_ocn_albedo) then + ocn_albedo2D(:,:,iblk) = ocn_albedo + else + ocn_albedo2D(:,:,iblk) = 0.069 - 0.011 * cos(2.0*TLAT(:,:,iblk)) + !latitude-dependent profile of Large & Yeager (2009) + endif + + call shortwave_ccsm3(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + aicen(:,:,:,iblk), vicen(:,:,:,iblk), & + vsnon(:,:,:,iblk), & + trcrn(:,:,nt_Tsfc,:,iblk), & + swvdr(:,:, iblk), swvdf(:,:, iblk), & + swidr(:,:, iblk), swidf(:,:, iblk), & + alvdrn(:,:,:,iblk), alidrn(:,:,:,iblk), & + alvdfn(:,:,:,iblk), alidfn(:,:,:,iblk), & + fswsfcn(:,:,:,iblk), fswintn(:,:,:,iblk), & + fswthrun(:,:,:,iblk), & + fswpenln(:,:,:,:,iblk), & + Iswabsn(:,:,:,:,iblk), & + Sswabsn(:,:,:,:,iblk), & + albicen(:,:,:,iblk), albsnon(:,:,:,iblk), & + coszen(:,:,iblk), & + ocn_albedo2D(:,:,iblk) ) +#endif + enddo ! nblocks + !$OMP END PARALLEL DO + + endif + + !----------------------------------------------------------------- + ! Aggregate albedos + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,ilo,ihi,jlo,jhi,this_block, & + !$OMP ij,icells,cszn,indxi,indxj) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do n = 1, ncat + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aicen(i,j,n,iblk) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + alvdf(i,j,iblk) = alvdf(i,j,iblk) & + + alvdfn(i,j,n,iblk)*aicen(i,j,n,iblk) + alidf(i,j,iblk) = alidf(i,j,iblk) & + + alidfn(i,j,n,iblk)*aicen(i,j,n,iblk) + alvdr(i,j,iblk) = alvdr(i,j,iblk) & + + alvdrn(i,j,n,iblk)*aicen(i,j,n,iblk) + alidr(i,j,iblk) = alidr(i,j,iblk) & + + alidrn(i,j,n,iblk)*aicen(i,j,n,iblk) + + if (coszen(i,j,iblk) > puny) then ! sun above horizon + albice(i,j,iblk) = albice(i,j,iblk) & + + albicen(i,j,n,iblk)*aicen(i,j,n,iblk) + albsno(i,j,iblk) = albsno(i,j,iblk) & + + albsnon(i,j,n,iblk)*aicen(i,j,n,iblk) + albpnd(i,j,iblk) = albpnd(i,j,iblk) & + + albpndn(i,j,n,iblk)*aicen(i,j,n,iblk) + endif + + apeff_ai(i,j,iblk) = apeff_ai(i,j,iblk) & + + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) + enddo + + enddo ! ncat + + !---------------------------------------------------------------- + ! Store grid box mean albedos and fluxes before scaling by aice + !---------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + alvdf_ai (i,j,iblk) = alvdf (i,j,iblk) + alidf_ai (i,j,iblk) = alidf (i,j,iblk) + alvdr_ai (i,j,iblk) = alvdr (i,j,iblk) + alidr_ai (i,j,iblk) = alidr (i,j,iblk) + + ! for history averaging + cszn = c0 + if (coszen(i,j,iblk) > puny) cszn = c1 + do n = 1, nstreams + albcnt(i,j,iblk,n) = albcnt(i,j,iblk,n) + cszn + enddo + enddo + enddo + + enddo ! nblocks + !$OMP END PARALLEL DO + + end subroutine init_shortwave + +!======================================================================= +! +! Driver for basic solar radiation from CCSM3. Albedos and absorbed solar. + + subroutine shortwave_ccsm3 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + aicen, vicen, & + vsnon, Tsfcn, & + swvdr, swvdf, & + swidr, swidf, & + alvdrn, alidrn, & + alvdfn, alidfn, & + fswsfc, fswint, & + fswthru, fswpenl, & + Iswabs, SSwabs, & + albin, albsn, & +#ifndef AusCOM + coszen) +#else + coszen, & + ocn_albedo2Da) +#endif + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + aicen , & ! concentration of ice per category + vicen , & ! volume of ice per category + vsnon , & ! volume of ice per category + Tsfcn ! surface temperature + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + swvdr , & ! sw down, visible, direct (W/m^2) + swvdf , & ! sw down, visible, diffuse (W/m^2) + swidr , & ! sw down, near IR, direct (W/m^2) + swidf ! sw down, near IR, diffuse (W/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + alvdrn , & ! visible, direct, avg (fraction) + alidrn , & ! near-ir, direct, avg (fraction) + alvdfn , & ! visible, diffuse, avg (fraction) + alidfn , & ! near-ir, diffuse, avg (fraction) + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + fswint , & ! SW absorbed in ice interior, below surface (W m-2) + fswthru , & ! SW through ice to ocean (W m-2) + albin , & ! bare ice albedo + albsn ! snow albedo + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr+1,ncat), & + intent(inout) :: & + fswpenl ! SW entering ice layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + coszen ! cosine(zenith angle) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr,ncat), & + intent(inout) :: & + Iswabs ! SW absorbed in particular layer (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr,ncat), & + intent(inout) :: & + Sswabs ! SW absorbed in particular layer (W m-2) + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + icells , & ! number of ice-covered grid cells + n ! thickness category index + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi , & ! indices for ice-covered cells + indxj + + ! ice and snow albedo for each category + + real (kind=dbl_kind), dimension (nx_block,ny_block):: & + alvdrni, & ! visible, direct, ice (fraction) + alidrni, & ! near-ir, direct, ice (fraction) + alvdfni, & ! visible, diffuse, ice (fraction) + alidfni, & ! near-ir, diffuse, ice (fraction) + alvdrns, & ! visible, direct, snow (fraction) + alidrns, & ! near-ir, direct, snow (fraction) + alvdfns, & ! visible, diffuse, snow (fraction) + alidfns ! near-ir, diffuse, snow (fraction) + +#ifdef AusCOM + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + ocn_albedo2Da +#endif + + !----------------------------------------------------------------- + ! Solar radiation: albedo and absorbed shortwave + !----------------------------------------------------------------- + + ! For basic shortwave, set coszen to a constant between 0 and 1. + coszen(:,:) = p5 ! sun above the horizon + + do n = 1, ncat + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aicen(i,j,n) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + Sswabs(:,:,:,n) = c0 + + !----------------------------------------------------------------- + ! Compute albedos for ice and snow. + !----------------------------------------------------------------- + +#ifndef AusCOM + if (trim(albedo_type) == 'constant') then + call constant_albedos (nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen(:,:,n), & + vsnon(:,:,n), & + Tsfcn(:,:,n), & + alvdrni, alidrni, & + alvdfni, alidfni, & + alvdrns, alidrns, & + alvdfns, alidfns, & + alvdrn(:,:,n), & + alidrn(:,:,n), & + alvdfn(:,:,n), & + alidfn(:,:,n), & + albin(:,:,n), & + albsn(:,:,n)) + else ! default + call compute_albedos (nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen(:,:,n), & + vicen(:,:,n), & + vsnon(:,:,n), & + Tsfcn(:,:,n), & + alvdrni, alidrni, & + alvdfni, alidfni, & + alvdrns, alidrns, & + alvdfns, alidfns, & + alvdrn(:,:,n), & + alidrn(:,:,n), & + alvdfn(:,:,n), & + alidfn(:,:,n), & + albin(:,:,n), & + albsn(:,:,n)) + endif +#else + if (trim(albedo_type) == 'constant') then + call constant_albedos (nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen(:,:,n), & + vsnon(:,:,n), & + Tsfcn(:,:,n), & + alvdrni, alidrni, & + alvdfni, alidfni, & + alvdrns, alidrns, & + alvdfns, alidfns, & + alvdrn(:,:,n), & + alidrn(:,:,n), & + alvdfn(:,:,n), & + alidfn(:,:,n), & + albin(:,:,n), & + albsn(:,:,n), & + ocn_albedo2Da) + else ! default + call compute_albedos (nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen(:,:,n), & + vicen(:,:,n), & + vsnon(:,:,n), & + Tsfcn(:,:,n), & + alvdrni, alidrni, & + alvdfni, alidfni, & + alvdrns, alidrns, & + alvdfns, alidfns, & + alvdrn(:,:,n), & + alidrn(:,:,n), & + alvdfn(:,:,n), & + alidfn(:,:,n), & + albin(:,:,n), & + albsn(:,:,n), & + ocn_albedo2Da) + endif +#endif + + !----------------------------------------------------------------- + ! Compute solar radiation absorbed in ice and penetrating to ocean. + !----------------------------------------------------------------- + + call absorbed_solar (nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen(:,:,n), & + vicen(:,:,n), & + vsnon(:,:,n), & + swvdr, swvdf, & + swidr, swidf, & + alvdrni, alvdfni, & + alidrni, alidfni, & + alvdrns, alvdfns, & + alidrns, alidfns, & + fswsfc(:,:,n), & + fswint(:,:,n), & + fswthru(:,:,n), & + fswpenl(:,:,:,n), & + Iswabs(:,:,:,n)) + + enddo ! ncat + + end subroutine shortwave_ccsm3 + +!======================================================================= +! +! Compute albedos for each thickness category + + subroutine compute_albedos (nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen, vicen, & + vsnon, Tsfcn, & + alvdrni, alidrni, & + alvdfni, alidfni, & + alvdrns, alidrns, & + alvdfns, alidfns, & + alvdrn, alidrn, & + alvdfn, alidfn, & +#ifndef AusCOM + albin, albsn) +#else + albin, albsn, & + ocn_albedo2Da) +#endif + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of ice-covered grid cells + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi , & ! compressed indices for ice-covered cells + indxj + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + aicen , & ! concentration of ice per category + vicen , & ! volume of ice per category + vsnon , & ! volume of ice per category + Tsfcn ! surface temperature + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + alvdrni , & ! visible, direct, ice (fraction) + alidrni , & ! near-ir, direct, ice (fraction) + alvdfni , & ! visible, diffuse, ice (fraction) + alidfni , & ! near-ir, diffuse, ice (fraction) + alvdrns , & ! visible, direct, snow (fraction) + alidrns , & ! near-ir, direct, snow (fraction) + alvdfns , & ! visible, diffuse, snow (fraction) + alidfns , & ! near-ir, diffuse, snow (fraction) + alvdrn , & ! visible, direct, avg (fraction) + alidrn , & ! near-ir, direct, avg (fraction) + alvdfn , & ! visible, diffuse, avg (fraction) + alidfn , & ! near-ir, diffuse, avg (fraction) + albin , & ! bare ice + albsn ! snow +#ifdef AusCOM + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + ocn_albedo2Da +#endif + + ! local variables + + real (kind=dbl_kind), parameter :: & + dT_melt = c1 , & ! change in temp to give dalb_mlt + ! albedo change + dalb_mlt = -0.075_dbl_kind, & ! albedo change per dT_melt change + ! in temp for ice + dalb_mltv = -p1 , & ! albedo vis change per dT_melt change + ! in temp for snow + dalb_mlti = -p15 ! albedo nir change per dT_melt change + ! in temp for snow + + integer (kind=int_kind) :: & + i, j + + real (kind=dbl_kind) :: & + hi , & ! ice thickness (m) + hs , & ! snow thickness (m) + albo, & ! effective ocean albedo, function of ice thickness + fh , & ! piecewise linear function of thickness + fT , & ! piecewise linear function of surface temperature + dTs , & ! difference of Tsfc and Timelt + fhtan,& ! factor used in albedo dependence on ice thickness + asnow ! fractional area of snow cover + + integer (kind=int_kind) :: & + ij ! horizontal index, combines i and j loops + + fhtan = atan(ahmax*c4) + + do j = 1, ny_block + do i = 1, nx_block +!ars599: 21032014 (2D_code) +! according to dhb599 cice use ond-D ocean albedo +! AusCOm use 2D. +#ifndef AusCOM + alvdrni(i,j) = albocn + alidrni(i,j) = albocn + alvdfni(i,j) = albocn + alidfni(i,j) = albocn + + alvdrns(i,j) = albocn + alidrns(i,j) = albocn + alvdfns(i,j) = albocn + alidfns(i,j) = albocn + + alvdrn(i,j) = albocn + alidrn(i,j) = albocn + alvdfn(i,j) = albocn + alidfn(i,j) = albocn + +#else + alvdrni(i,j) = ocn_albedo2Da(i,j) + alidrni(i,j) = ocn_albedo2Da(i,j) + alvdfni(i,j) = ocn_albedo2Da(i,j) + alidfni(i,j) = ocn_albedo2Da(i,j) + + alvdrns(i,j) = ocn_albedo2Da(i,j) + alidrns(i,j) = ocn_albedo2Da(i,j) + alvdfns(i,j) = ocn_albedo2Da(i,j) + alidfns(i,j) = ocn_albedo2Da(i,j) + + alvdrn(i,j) = ocn_albedo2Da(i,j) + alidrn(i,j) = ocn_albedo2Da(i,j) + alvdfn(i,j) = ocn_albedo2Da(i,j) + alidfn(i,j) = ocn_albedo2Da(i,j) +#endif + albin(i,j) = c0 + albsn(i,j) = c0 + enddo + enddo + + !----------------------------------------------------------------- + ! Compute albedo for each thickness category. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + hi = vicen(i,j) / aicen(i,j) + hs = vsnon(i,j) / aicen(i,j) + + ! bare ice, thickness dependence + fh = min(atan(hi*c4)/fhtan,c1) +#ifndef AusCOM + albo = albocn*(c1-fh) +#else + albo = ocn_albedo2Da(i,j)*(c1-fh) +#endif + alvdfni(i,j) = albicev*fh + albo + alidfni(i,j) = albicei*fh + albo + + ! bare ice, temperature dependence + dTs = Timelt - Tsfcn(i,j) + fT = min(dTs/dT_melt-c1,c0) + alvdfni(i,j) = alvdfni(i,j) - dalb_mlt*fT + alidfni(i,j) = alidfni(i,j) - dalb_mlt*fT + + ! avoid negative albedos for thin, bare, melting ice +#ifndef AusCOM + alvdfni(i,j) = max (alvdfni(i,j), albocn) + alidfni(i,j) = max (alidfni(i,j), albocn) +#else + alvdfni(i,j) = max (alvdfni(i,j), ocn_albedo2Da(i,j)) + alidfni(i,j) = max (alidfni(i,j), ocn_albedo2Da(i,j)) +#endif + + if (hs > puny) then + + alvdfns(i,j) = albsnowv + alidfns(i,j) = albsnowi + + ! snow on ice, temperature dependence + alvdfns(i,j) = alvdfns(i,j) - dalb_mltv*fT + alidfns(i,j) = alidfns(i,j) - dalb_mlti*fT + + endif ! hs > puny + + ! direct albedos (same as diffuse for now) + alvdrni(i,j) = alvdfni(i,j) + alidrni(i,j) = alidfni(i,j) + alvdrns(i,j) = alvdfns(i,j) + alidrns(i,j) = alidfns(i,j) + + + ! fractional area of snow cover + if (hs > puny) then + asnow = hs / (hs + snowpatch) + else + asnow = c0 + endif + + ! combine ice and snow albedos (for coupler) + alvdfn(i,j) = alvdfni(i,j)*(c1-asnow) + & + alvdfns(i,j)*asnow + alidfn(i,j) = alidfni(i,j)*(c1-asnow) + & + alidfns(i,j)*asnow + alvdrn(i,j) = alvdrni(i,j)*(c1-asnow) + & + alvdrns(i,j)*asnow + alidrn(i,j) = alidrni(i,j)*(c1-asnow) + & + alidrns(i,j)*asnow + + ! save ice and snow albedos (for history) + albin(i,j) = awtvdr*alvdrni(i,j) + awtidr*alidrni(i,j) & + + awtvdf*alvdfni(i,j) + awtidf*alidfni(i,j) + albsn(i,j) = awtvdr*alvdrns(i,j) + awtidr*alidrns(i,j) & + + awtvdf*alvdfns(i,j) + awtidf*alidfns(i,j) + + enddo ! ij + + end subroutine compute_albedos + +!======================================================================= +! +! Compute albedos for each thickness category + + subroutine constant_albedos (nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen, & + vsnon, Tsfcn, & + alvdrni, alidrni, & + alvdfni, alidfni, & + alvdrns, alidrns, & + alvdfns, alidfns, & + alvdrn, alidrn, & + alvdfn, alidfn, & +#ifndef AusCOM + albin, albsn) +#else + albin, albsn, & + ocn_albedo2Da) +#endif + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of ice-covered grid cells + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi , & ! compressed indices for ice-covered cells + indxj + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + aicen , & ! concentration of ice per category + vsnon , & ! volume of ice per category + Tsfcn ! surface temperature + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + alvdrni , & ! visible, direct, ice (fraction) + alidrni , & ! near-ir, direct, ice (fraction) + alvdfni , & ! visible, diffuse, ice (fraction) + alidfni , & ! near-ir, diffuse, ice (fraction) + alvdrns , & ! visible, direct, snow (fraction) + alidrns , & ! near-ir, direct, snow (fraction) + alvdfns , & ! visible, diffuse, snow (fraction) + alidfns , & ! near-ir, diffuse, snow (fraction) + alvdrn , & ! visible, direct, avg (fraction) + alidrn , & ! near-ir, direct, avg (fraction) + alvdfn , & ! visible, diffuse, avg (fraction) + alidfn , & ! near-ir, diffuse, avg (fraction) + albin , & ! bare ice + albsn ! snow + +#ifdef AusCOM + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + ocn_albedo2Da +#endif + ! local variables + + real (kind=dbl_kind), parameter :: & + warmice = 0.68_dbl_kind, & + coldice = 0.70_dbl_kind, & + warmsnow = 0.77_dbl_kind, & + coldsnow = 0.81_dbl_kind + + integer (kind=int_kind) :: & + i, j + + real (kind=dbl_kind) :: & + hs ! snow thickness (m) + + integer (kind=int_kind) :: & + ij ! horizontal index, combines i and j loops + + do j = 1, ny_block + do i = 1, nx_block +!ars599: 21032014 (2D_code) +#ifndef AusCOM + alvdrn(i,j) = albocn + alidrn(i,j) = albocn + alvdfn(i,j) = albocn + alidfn(i,j) = albocn +#else + alvdrn(i,j) = ocn_albedo2Da(i,j) + alidrn(i,j) = ocn_albedo2Da(i,j) + alvdfn(i,j) = ocn_albedo2Da(i,j) + alidfn(i,j) = ocn_albedo2Da(i,j) +#endif + albin(i,j) = c0 + albsn(i,j) = c0 + enddo + enddo + + !----------------------------------------------------------------- + ! Compute albedo for each thickness category. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + hs = vsnon(i,j) / aicen(i,j) + + if (hs > puny) then + ! snow, temperature dependence + if (Tsfcn(i,j) >= -c2*puny) then + alvdfn(i,j) = warmsnow + alidfn(i,j) = warmsnow + else + alvdfn(i,j) = coldsnow + alidfn(i,j) = coldsnow + endif + else ! hs < puny + ! bare ice, temperature dependence + if (Tsfcn(i,j) >= -c2*puny) then + alvdfn(i,j) = warmice + alidfn(i,j) = warmice + else + alvdfn(i,j) = coldice + alidfn(i,j) = coldice + endif + endif ! hs > puny + + ! direct albedos (same as diffuse for now) + alvdrn (i,j) = alvdfn(i,j) + alidrn (i,j) = alidfn(i,j) + + alvdrni(i,j) = alvdrn(i,j) + alidrni(i,j) = alidrn(i,j) + alvdrns(i,j) = alvdrn(i,j) + alidrns(i,j) = alidrn(i,j) + alvdfni(i,j) = alvdfn(i,j) + alidfni(i,j) = alidfn(i,j) + alvdfns(i,j) = alvdfn(i,j) + alidfns(i,j) = alidfn(i,j) + + ! save ice and snow albedos (for history) + albin(i,j) = awtvdr*alvdrni(i,j) + awtidr*alidrni(i,j) & + + awtvdf*alvdfni(i,j) + awtidf*alidfni(i,j) + albsn(i,j) = awtvdr*alvdrns(i,j) + awtidr*alidrns(i,j) & + + awtvdf*alvdfns(i,j) + awtidf*alidfns(i,j) + + enddo ! ij + + end subroutine constant_albedos + +!======================================================================= +! +! Compute solar radiation absorbed in ice and penetrating to ocean +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW + + subroutine absorbed_solar (nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen, & + vicen, vsnon, & + swvdr, swvdf, & + swidr, swidf, & + alvdrni, alvdfni, & + alidrni, alidfni, & + alvdrns, alvdfns, & + alidrns, alidfns, & + fswsfc, fswint, & + fswthru, fswpenl, & + Iswabs) + + use ice_therm_shared, only: heat_capacity + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + aicen , & ! fractional ice area + vicen , & ! ice volume + vsnon , & ! snow volume + swvdr , & ! sw down, visible, direct (W/m^2) + swvdf , & ! sw down, visible, diffuse (W/m^2) + swidr , & ! sw down, near IR, direct (W/m^2) + swidf , & ! sw down, near IR, diffuse (W/m^2) + alvdrni , & ! visible, direct albedo,ice + alidrni , & ! near-ir, direct albedo,ice + alvdfni , & ! visible, diffuse albedo,ice + alidfni , & ! near-ir, diffuse albedo,ice + alvdrns , & ! visible, direct albedo, snow + alidrns , & ! near-ir, direct albedo, snow + alvdfns , & ! visible, diffuse albedo, snow + alidfns ! near-ir, diffuse albedo, snow + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out):: & + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + fswint , & ! SW absorbed in ice interior, below surface (W m-2) + fswthru ! SW through ice to ocean (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), & + intent(out) :: & + Iswabs ! SW absorbed in particular layer (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr+1), & + intent(out) :: & + fswpenl ! visible SW entering ice layers (W m-2) + + ! local variables + + real (kind=dbl_kind), parameter :: & + i0vis = 0.70_dbl_kind ! fraction of penetrating solar rad (visible) + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij , & ! horizontal index, combines i and j loops + k ! ice layer index + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + fswpen , & ! SW penetrating beneath surface (W m-2) + trantop , & ! transmitted frac of penetrating SW at layer top + tranbot ! transmitted frac of penetrating SW at layer bot + + real (kind=dbl_kind) :: & + swabs , & ! net SW down at surface (W m-2) + swabsv , & ! swabs in vis (wvlngth < 700nm) (W/m^2) + swabsi , & ! swabs in nir (wvlngth > 700nm) (W/m^2) + fswpenvdr , & ! penetrating SW, vis direct + fswpenvdf , & ! penetrating SW, vis diffuse + hi , & ! ice thickness (m) + hs , & ! snow thickness (m) + hilyr , & ! ice layer thickness + asnow ! fractional area of snow cover + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + fswsfc (i,j) = c0 + fswint (i,j) = c0 + fswthru(i,j) = c0 + fswpen (i,j) = c0 + trantop(i,j) = c0 + tranbot(i,j) = c0 + enddo + enddo + Iswabs (:,:,:) = c0 + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + hs = vsnon(i,j) / aicen(i,j) + + !----------------------------------------------------------------- + ! Fractional snow cover + !----------------------------------------------------------------- + if (hs > puny) then + asnow = hs / (hs + snowpatch) + else + asnow = c0 + endif + + !----------------------------------------------------------------- + ! Shortwave flux absorbed at surface, absorbed internally, + ! and penetrating to mixed layer. + ! This parameterization assumes that all IR is absorbed at the + ! surface; only visible is absorbed in the ice interior or + ! transmitted to the ocean. + !----------------------------------------------------------------- + + swabsv = swvdr(i,j) * ( (c1-alvdrni(i,j))*(c1-asnow) & + + (c1-alvdrns(i,j))*asnow ) & + + swvdf(i,j) * ( (c1-alvdfni(i,j))*(c1-asnow) & + + (c1-alvdfns(i,j))*asnow ) + + swabsi = swidr(i,j) * ( (c1-alidrni(i,j))*(c1-asnow) & + + (c1-alidrns(i,j))*asnow ) & + + swidf(i,j) * ( (c1-alidfni(i,j))*(c1-asnow) & + + (c1-alidfns(i,j))*asnow ) + + swabs = swabsv + swabsi + + fswpenvdr = swvdr(i,j) * (c1-alvdrni(i,j)) * (c1-asnow) * i0vis + fswpenvdf = swvdf(i,j) * (c1-alvdfni(i,j)) * (c1-asnow) * i0vis + + ! no penetrating radiation in near IR +! fswpenidr = swidr(i,j) * (c1-alidrni(i,j)) * (c1-asnow) * i0nir +! fswpenidf = swidf(i,j) * (c1-alidfni(i,j)) * (c1-asnow) * i0nir + + fswpen(i,j) = fswpenvdr + fswpenvdf + + fswsfc(i,j) = swabs - fswpen(i,j) + + trantop(i,j) = c1 ! transmittance at top of ice + + enddo ! ij + + !----------------------------------------------------------------- + ! penetrating SW absorbed in each ice layer + !----------------------------------------------------------------- + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + hi = vicen(i,j) / aicen(i,j) + hilyr = hi / real(nilyr,kind=dbl_kind) + + tranbot(i,j) = exp (-kappav * hilyr * real(k,kind=dbl_kind)) + Iswabs(i,j,k) = fswpen(i,j) * (trantop(i,j)-tranbot(i,j)) + + ! bottom of layer k = top of layer k+1 + trantop(i,j) = tranbot(i,j) + + ! bgc layer model + if (k == 1) then ! surface flux + fswpenl(i,j,k) = fswpen(i,j) + fswpenl(i,j,k+1) = fswpen(i,j) * tranbot(i,j) + else + fswpenl(i,j,k+1) = fswpen(i,j) * tranbot(i,j) + endif + enddo ! ij + enddo ! nilyr + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! SW penetrating thru ice into ocean + fswthru(i,j) = fswpen(i,j) * tranbot(i,j) + + ! SW absorbed in ice interior + fswint(i,j) = fswpen(i,j) - fswthru(i,j) + enddo ! ij + + !---------------------------------------------------------------- + ! if zero-layer model (no heat capacity), no SW is absorbed in ice + ! interior, so add to surface absorption + !---------------------------------------------------------------- + + if (.not. heat_capacity) then + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! SW absorbed at snow/ice surface + fswsfc(i,j) = fswsfc(i,j) + fswint(i,j) + + ! SW absorbed in ice interior (nilyr = 1) + fswint(i,j) = c0 + Iswabs(i,j,1) = c0 + + enddo ! ij + + endif ! heat_capacity + + end subroutine absorbed_solar + +! End ccsm3 shortwave method +!======================================================================= +! Begin Delta-Eddington shortwave method + +! Compute initial data for Delta-Eddington method, specifically, +! the approximate exponential look-up table. +! +! author: Bruce P. Briegleb, NCAR +! 2011 ECH modified for melt pond tracers +! 2013 ECH merged with NCAR version + + subroutine run_dEdd(ilo,ihi,jlo,jhi, & + aicen, vicen, & + vsnon, trcrn, & + tlat, tlon, & + tmask, & + swvdr, swvdf, & + swidr, swidf, & + coszen, fsnow, & + alvdrn, alvdfn, & + alidrn, alidfn, & + fswsfcn, fswintn, & + fswthrun, fswpenln, & + Sswabsn, Iswabsn, & + albicen, albsnon, & + albpndn, apeffn, & + dhsn, ffracn, & + initonly ) + + use ice_calendar, only: dt + use ice_itd, only: hs_min + use ice_meltpond_cesm, only: hs0 + use ice_meltpond_topo, only: hp1 + use ice_meltpond_lvl, only: hs1, pndaspect + use ice_orbital, only: compute_coszen + use ice_state, only: ntrcr, nt_Tsfc, nt_alvl, nt_apnd, nt_hpnd, nt_ipnd, & + tr_pond_cesm, tr_pond_lvl, tr_pond_topo + use ice_domain_size, only: max_ntrcr + + integer (kind=int_kind), intent(in) :: & + ilo,ihi,jlo,jhi + + logical(kind=log_kind), dimension(nx_block,ny_block), intent(in) :: & + tmask ! land/boundary mask, thickness (T-cell) + + real(kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + tlat, & ! latitude of temp pts (radians) + tlon, & ! longitude of temp pts (radians) + swvdr, & ! sw down, visible, direct (W/m^2) + swvdf, & ! sw down, visible, diffuse (W/m^2) + swidr, & ! sw down, near IR, direct (W/m^2) + swidf, & ! sw down, near IR, diffuse (W/m^2) + fsnow ! snowfall rate (kg/m^2 s) + + real(kind=dbl_kind), dimension(nx_block,ny_block,ncat), intent(in) :: & + aicen, & ! concentration of ice + vicen, & ! volume per unit area of ice (m) + vsnon, & ! volume per unit area of snow (m) + ffracn ! fraction of fsurfn used to melt ipond + + real(kind=dbl_kind), dimension(nx_block,ny_block,max_ntrcr,ncat), intent(in) :: & + trcrn ! tracers + + real(kind=dbl_kind), dimension(nx_block,ny_block,ncat), intent(inout) :: & + dhsn ! depth difference for snow on sea ice and pond ice + + real(kind=dbl_kind), dimension(nx_block,ny_block), intent(out) :: & + coszen ! cosine solar zenith angle, < 0 for sun below horizon + + real(kind=dbl_kind), dimension(nx_block,ny_block,ncat), intent(inout) :: & + alvdrn, & ! visible direct albedo (fraction) + alvdfn, & ! near-ir direct albedo (fraction) + alidrn, & ! visible diffuse albedo (fraction) + alidfn, & ! near-ir diffuse albedo (fraction) + fswsfcn, & ! SW absorbed at ice/snow surface (W m-2) + fswintn, & ! SW absorbed in ice interior, below surface (W m-2) + fswthrun, & ! SW through ice to ocean (W/m^2) + albicen, & ! albedo bare ice + albsnon, & ! albedo snow + albpndn, & ! albedo pond + apeffn ! effective pond area used for radiation calculation + + real(kind=dbl_kind), dimension(nx_block,ny_block,nslyr,ncat), intent(inout) :: & + Sswabsn ! SW radiation absorbed in snow layers (W m-2) + + real(kind=dbl_kind), dimension(nx_block,ny_block,nilyr,ncat), intent(inout) :: & + Iswabsn ! SW radiation absorbed in ice layers (W m-2) + + real(kind=dbl_kind), dimension(nx_block,ny_block,nilyr+1,ncat), intent(inout) :: & + fswpenln ! visible SW entering ice layers (W m-2) + + logical (kind=log_kind), optional :: & + initonly ! flag to indicate init only, default is false + + ! local temporary variables + + integer (kind=int_kind) :: & + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! indirect indices for cells with aicen > puny + + ! other local variables + ! snow variables for Delta-Eddington shortwave + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + fsn , & ! snow horizontal fraction + hsn ! snow depth (m) + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr) :: & + rhosnwn , & ! snow density (kg/m3) + rsnwn ! snow grain radius (micrometers) + + ! pond variables for Delta-Eddington shortwave + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + fpn , & ! pond fraction of ice cover + hpn ! actual pond depth (m) + + integer (kind=int_kind) :: & + i, j, ij , & ! horizontal indices + n ! thickness category index + + real (kind=dbl_kind) :: & + ipn , & ! refrozen pond ice thickness (m), mean over ice fraction + hp , & ! pond depth + hs , & ! snow depth + asnow , & ! fractional area of snow cover + rp , & ! volume fraction of retained melt water to total liquid content + hmx , & ! maximum available snow infiltration equivalent depth + dhs , & ! local difference in snow depth on sea ice and pond ice + spn , & ! snow depth on refrozen pond (m) + tmp ! 0 or 1 + + logical (kind=log_kind) :: & + linitonly ! local initonly value + + real (kind=dbl_kind), parameter :: & + argmax = c10 ! maximum argument of exponential + + linitonly = .false. + if (present(initonly)) then + linitonly = initonly + endif + + exp_min = exp(-argmax) + + ! identify ice-ocean cells + icells = 0 + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + ! cosine of the zenith angle + call compute_coszen (nx_block, ny_block, & + icells, & + indxi, indxj, & + tlat (:,:), tlon(:,:), & + coszen(:,:), dt) + + do n = 1, ncat + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aicen(i,j,n) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + ! note that rhoswn, rsnw, fp, hp and Sswabs ARE NOT dimensioned with ncat + ! BPB 19 Dec 2006 + + ! set snow properties + call shortwave_dEdd_set_snow(nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen(:,:,n), vsnon(:,:,n), & + trcrn(:,:,nt_Tsfc,n), fsn, hsn, & + rhosnwn, rsnwn) + + ! set pond properties + if (tr_pond_cesm) then + apeffn(:,:,n) = c0 ! for history + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! fraction of ice area + fpn(i,j) = trcrn(i,j,nt_apnd,n) + ! pond depth over fraction fpn + hpn(i,j) = trcrn(i,j,nt_hpnd,n) + ! snow infiltration + if (hsn(i,j) >= hs_min .and. hs0 > puny) then + asnow = min(hsn(i,j)/hs0, c1) ! delta-Eddington formulation + fpn(i,j) = (c1 - asnow) * fpn(i,j) + hpn(i,j) = pndaspect * fpn(i,j) + endif + apeffn(i,j,n) = fpn(i,j) ! for history + enddo + + elseif (tr_pond_lvl) then + apeffn(:,:,n) = c0 ! for history + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + fpn(i,j) = c0 ! fraction of ice covered in pond + hpn(i,j) = c0 ! pond depth over fpn + + ! refrozen pond lid thickness avg over ice + ! allow snow to cover pond ice + ipn = trcrn(i,j,nt_alvl,n) * trcrn(i,j,nt_apnd,n) & + * trcrn(i,j,nt_ipnd,n) + dhs = dhsn(i,j,n) ! snow depth difference, sea ice - pond + if (.not. linitonly .and. ipn > puny .and. & + dhs < puny .and. fsnow(i,j)*dt > hs_min) & + dhs = hsn(i,j) - fsnow(i,j)*dt ! initialize dhs>0 + spn = hsn(i,j) - dhs ! snow depth on pond ice + if (.not. linitonly .and. ipn*spn < puny) dhs = c0 + dhsn(i,j,n) = dhs ! save: constant until reset to 0 + + ! not using ipn assumes that lid ice is perfectly clear + ! if (ipn <= 0.3_dbl_kind) then + + ! fraction of ice area + fpn(i,j) = trcrn(i,j,nt_apnd,n) * trcrn(i,j,nt_alvl,n) + ! pond depth over fraction fpn + hpn(i,j) = trcrn(i,j,nt_hpnd,n) + + ! reduce effective pond area absorbing surface heat flux + ! due to flux already having been used to melt pond ice + fpn(i,j) = (c1 - ffracn(i,j,n)) * fpn(i,j) + + ! taper pond area with snow on pond ice + if (dhs > puny .and. spn >= puny .and. hs1 > puny) then + asnow = min(spn/hs1, c1) + fpn(i,j) = (c1 - asnow) * fpn(i,j) + endif + + ! infiltrate snow + hp = hpn(i,j) + if (hp > puny) then + hs = hsn(i,j) + rp = rhofresh*hp/(rhofresh*hp + rhos*hs) + if (rp < p15) then + fpn(i,j) = c0 + hpn(i,j) = c0 + else + hmx = hs*(rhofresh - rhos)/rhofresh + tmp = max(c0, sign(c1, hp-hmx)) ! 1 if hp>=hmx, else 0 + hp = (rhofresh*hp + rhos*hs*tmp) & + / (rhofresh - rhos*(c1-tmp)) + hsn(i,j) = hs - hp*fpn(i,j)*(c1-tmp) + hpn(i,j) = hp * tmp + fpn(i,j) = fpn(i,j) * tmp + endif + fsn(i,j) = min(fsn(i,j), c1-fpn(i,j)) + + endif ! hp > puny + + ! endif ! masking by lid ice + apeffn(i,j,n) = fpn(i,j) ! for history + enddo ! ij + + elseif (tr_pond_topo) then + apeffn(:,:,n) = c0 ! for history + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + ! Lid effective if thicker than hp1 + if (trcrn(i,j,nt_apnd,n)*aicen(i,j,n) > puny .and. & + trcrn(i,j,nt_ipnd,n) < hp1) then + fpn(i,j) = trcrn(i,j,nt_apnd,n) + else + fpn(i,j) = c0 + endif + if (trcrn(i,j,nt_apnd,n) > puny) then + hpn(i,j) = trcrn(i,j,nt_hpnd,n) + else + fpn(i,j) = c0 + hpn(i,j) = c0 + endif + + ! If ponds are present snow fraction reduced to + ! non-ponded part dEdd scheme + fsn(i,j) = min(fsn(i,j), c1-fpn(i,j)) + + apeffn(i,j,n) = fpn(i,j) + enddo + else + call shortwave_dEdd_set_pond(nx_block, ny_block, & + icells, & + indxi, indxj, & + trcrn(:,:,nt_Tsfc,n), & + fsn, fpn, & + hpn) + apeffn(:,:,n) = fpn(:,:) ! for history + fpn = c0 + hpn = c0 + endif + + call shortwave_dEdd(nx_block, ny_block, & + ntrcr, icells, & + indxi, indxj, & + coszen(:,:), & + aicen(:,:,n), vicen(:,:,n), & + hsn, fsn, & + rhosnwn, rsnwn, & + fpn, hpn, & + trcrn(:,:,1:ntrcr,n), & + swvdr(:,:), swvdf(:,:), & + swidr(:,:), swidf(:,:), & + alvdrn(:,:,n), alvdfn(:,:,n), & + alidrn(:,:,n), alidfn(:,:,n), & + fswsfcn(:,:,n), fswintn(:,:,n), & + fswthrun(:,:,n), & + Sswabsn(:,:,:,n), & + Iswabsn(:,:,:,n), & + albicen(:,:,n), & + albsnon(:,:,n), albpndn(:,:,n), & + fswpenln(:,:,:,n)) + + enddo ! ncat + + end subroutine run_dEdd + +!======================================================================= +! +! Compute snow/bare ice/ponded ice shortwave albedos, absorbed and transmitted +! flux using the Delta-Eddington solar radiation method as described in: +! +! "A Delta-Eddington Multiple Scattering Parameterization for Solar Radiation +! in the Sea Ice Component of the Community Climate System Model" +! B.P.Briegleb and B.Light NCAR/TN-472+STR February 2007 +! +! Compute shortwave albedos and fluxes for three surface types: +! snow over ice, bare ice and ponded ice. +! +! Albedos and fluxes are output for later use by thermodynamic routines. +! Invokes three calls to compute_dEdd, which sets inherent optical properties +! appropriate for the surface type. Within compute_dEdd, a call to solution_dEdd +! evaluates the Delta-Eddington solution. The final albedos and fluxes are then +! evaluated in compute_dEdd. Albedos and fluxes are transferred to output in +! this routine. +! +! NOTE regarding albedo diagnostics: This method yields zero albedo values +! if there is no incoming solar and thus the albedo diagnostics are masked +! out when the sun is below the horizon. To estimate albedo from the history +! output (post-processing), compute ice albedo using +! (1 - albedo)*swdn = swabs. -ECH +! +! author: Bruce P. Briegleb, NCAR +! 2013: E Hunke merged with NCAR version +! + subroutine shortwave_dEdd (nx_block, ny_block, & + ntrcr, icells, & + indxi, indxj, & + coszen, & + aice, vice, & + hs, fs, & + rhosnw, rsnw, & + fp, hp, & + trcr, & + swvdr, swvdf, & + swidr, swidf, & + alvdr, alvdf, & + alidr, alidf, & + fswsfc, fswint, & + fswthru, Sswabs, & + Iswabs, albice, & + albsno, albpnd, & + fswpenl) + + use ice_state, only: nt_aero, tr_aero + + integer (kind=int_kind), & + intent(in) :: & + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + icells ! number of ice-covered grid cells + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi , & ! compressed indices for ice-covered cells + indxj + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + coszen , & ! cosine of solar zenith angle + aice , & ! concentration of ice + vice , & ! volume of ice + hs , & ! snow depth + fs ! horizontal coverage of snow + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr), & + intent(in) :: & + rhosnw , & ! density in snow layer (kg/m3) + rsnw ! grain radius in snow layer (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr), & + intent(in) :: & + trcr ! aerosol tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + fp , & ! pond fractional coverage (0 to 1) + hp , & ! pond depth (m) + swvdr , & ! sw down, visible, direct (W/m^2) + swvdf , & ! sw down, visible, diffuse (W/m^2) + swidr , & ! sw down, near IR, direct (W/m^2) + swidf ! sw down, near IR, diffuse (W/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + alvdr , & ! visible, direct, albedo (fraction) + alvdf , & ! visible, diffuse, albedo (fraction) + alidr , & ! near-ir, direct, albedo (fraction) + alidf , & ! near-ir, diffuse, albedo (fraction) + fswsfc , & ! SW absorbed at snow/bare ice/pondedi ice surface (W m-2) + fswint , & ! SW interior absorption (below surface, above ocean,W m-2) + fswthru ! SW through snow/bare ice/ponded ice into ocean (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr+1), & + intent(inout) :: & + fswpenl ! visible SW entering ice layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr), & + intent(inout) :: & + Sswabs ! SW absorbed in snow layer (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), & + intent(inout) :: & + Iswabs ! SW absorbed in ice layer (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + albice , & ! bare ice albedo, for history + albsno , & ! snow albedo, for history + albpnd ! pond albedo, for history + + ! local variables + + real (kind=dbl_kind),dimension (nx_block,ny_block) :: & + fnidr ! fraction of direct to total down surface flux in nir + + real (kind=dbl_kind), dimension(nx_block,ny_block) :: & + hstmp , & ! snow thickness (set to 0 for bare ice case) + hi , & ! ice thickness (all sea ice layers, m) + fi ! snow/bare ice fractional coverage (0 to 1) + + real (kind=dbl_kind), dimension (nx_block,ny_block,4*n_aero) :: & + aero_mp ! aerosol mass path in kg/m2 + + integer (kind=int_kind) :: & + srftyp ! surface type over ice: (0=air, 1=snow, 2=pond) + + integer (kind=int_kind) :: & + i , & ! longitude index + j , & ! latitude index + ij , & ! horizontal index, combines i and j loops + k , & ! level index + na , & ! aerosol index + icells_DE ! number of cells in Delta-Eddington calculation + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi_DE , & ! compressed indices for Delta-Eddington cells + indxj_DE + + real (kind=dbl_kind) :: & + vsno ! volume of snow + + ! for printing points + integer (kind=int_kind) :: & + n ! point number for prints + logical (kind=log_kind) :: & + dbug ! true/false flag + + real (kind=dbl_kind) :: & + swdn , & ! swvdr(i,j)+swvdf(i,j)+swidr(i,j)+swidf(i,j) + swab , & ! fswsfc(i,j)+fswint(i,j)+fswthru(i,j) + swalb ! (1.-swab/(swdn+.0001)) + + ! for history + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + avdrl , & ! visible, direct, albedo (fraction) + avdfl , & ! visible, diffuse, albedo (fraction) + aidrl , & ! near-ir, direct, albedo (fraction) + aidfl ! near-ir, diffuse, albedo (fraction) + +!----------------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + ! zero storage albedos and fluxes for accumulation over surface types: + hstmp(i,j) = c0 + hi(i,j) = c0 + fi(i,j) = c0 + alvdr(i,j) = c0 + alvdf(i,j) = c0 + alidr(i,j) = c0 + alidf(i,j) = c0 + avdrl(i,j) = c0 + avdfl(i,j) = c0 + aidrl(i,j) = c0 + aidfl(i,j) = c0 + fswsfc(i,j) = c0 + fswint(i,j) = c0 + fswthru(i,j) = c0 + ! compute fraction of nir down direct to total over all points: + fnidr(i,j) = c0 + if( swidr(i,j) + swidf(i,j) > puny ) then + fnidr(i,j) = swidr(i,j)/(swidr(i,j)+swidf(i,j)) + endif + albice(i,j) = c0 + albsno(i,j) = c0 + albpnd(i,j) = c0 + enddo + enddo + fswpenl(:,:,:) = c0 + Sswabs(:,:,:) = c0 + Iswabs(:,:,:) = c0 + + ! compute aerosol mass path + + aero_mp(:,:,:) = c0 + if( tr_aero ) then +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + ! assume 4 layers for each aerosol, a snow SSL, snow below SSL, + ! sea ice SSL, and sea ice below SSL, in that order. + do na = 1, 4*n_aero, 4 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + vsno = hs(i,j) * aice(i,j) + if (coszen(i,j) > puny) then ! sun above horizon + aero_mp(i,j,na ) = trcr(i,j,nt_aero-1+na )*vsno + aero_mp(i,j,na+1) = trcr(i,j,nt_aero-1+na+1)*vsno + aero_mp(i,j,na+2) = trcr(i,j,nt_aero-1+na+2)*vice(i,j) + aero_mp(i,j,na+3) = trcr(i,j,nt_aero-1+na+3)*vice(i,j) + endif ! aice > 0 and coszen > 0 + enddo ! ij + enddo ! na + endif ! if aerosols + + ! compute shortwave radiation accounting for snow/ice (both snow over + ! ice and bare ice) and ponded ice (if any): + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + ! find bare ice points + icells_DE = 0 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + ! sea ice points with sun above horizon + if (coszen(i,j) > puny) then + ! evaluate sea ice thickness and fraction + hi(i,j) = vice(i,j) / aice(i,j) + fi(i,j) = c1 - fs(i,j) - fp(i,j) + ! bare sea ice points + if(fi(i,j) > c0) then + icells_DE = icells_DE + 1 + indxi_DE(icells_DE) = i + indxj_DE(icells_DE) = j + ! bare ice + endif ! fi > 0 + endif ! coszen > 0 + enddo ! ij + + ! calculate bare sea ice + srftyp = 0 + if (icells_DE > 0) & + call compute_dEdd & + (nx_block,ny_block, & + icells_DE, indxi_DE, indxj_DE, fnidr, coszen, & + swvdr, swvdf, swidr, swidf, srftyp, & + hstmp, rhosnw, rsnw, hi, hp, & + fi, aero_mp, avdrl, avdfl, & + aidrl, aidfl, & + fswsfc, fswint, & + fswthru, Sswabs, & + Iswabs, fswpenl) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + alvdr(i,j) = alvdr(i,j) + avdrl(i,j) *fi(i,j) + alvdf(i,j) = alvdf(i,j) + avdfl(i,j) *fi(i,j) + alidr(i,j) = alidr(i,j) + aidrl(i,j) *fi(i,j) + alidf(i,j) = alidf(i,j) + aidfl(i,j) *fi(i,j) + ! for history + albice(i,j) = albice(i,j) & + + awtvdr*avdrl(i,j) + awtidr*aidrl(i,j) & + + awtvdf*avdfl(i,j) + awtidf*aidfl(i,j) + enddo + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + ! find snow-covered ice points + icells_DE = 0 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + ! sea ice points with sun above horizon + if (coszen(i,j) > puny) then + ! snow-covered sea ice points + if(fs(i,j) > c0) then + icells_DE = icells_DE + 1 + indxi_DE(icells_DE) = i + indxj_DE(icells_DE) = j + ! snow-covered ice + endif ! fs > 0 + endif ! coszen > 0 + enddo ! ij + + ! calculate snow covered sea ice + srftyp = 1 + if (icells_DE > 0) & + call compute_dEdd & + (nx_block,ny_block, & + icells_DE, indxi_DE, indxj_DE, fnidr, coszen, & + swvdr, swvdf, swidr, swidf, srftyp, & + hs, rhosnw, rsnw, hi, hp, & + fs, aero_mp, avdrl, avdfl, & + aidrl, aidfl, & + fswsfc, fswint, & + fswthru, Sswabs, & + Iswabs, fswpenl) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + alvdr(i,j) = alvdr(i,j) + avdrl(i,j) *fs(i,j) + alvdf(i,j) = alvdf(i,j) + avdfl(i,j) *fs(i,j) + alidr(i,j) = alidr(i,j) + aidrl(i,j) *fs(i,j) + alidf(i,j) = alidf(i,j) + aidfl(i,j) *fs(i,j) + ! for history + albsno(i,j) = albsno(i,j) & + + awtvdr*avdrl(i,j) + awtidr*aidrl(i,j) & + + awtvdf*avdfl(i,j) + awtidf*aidfl(i,j) + enddo + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + ! find ponded points + icells_DE = 0 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + hi(i,j) = c0 + ! sea ice points with sun above horizon + if (coszen(i,j) > puny) then + hi(i,j) = vice(i,j) / aice(i,j) + ! if non-zero pond fraction and sufficient pond depth + if( fp(i,j) > puny .and. hp(i,j) > hpmin ) then + icells_DE = icells_DE + 1 + indxi_DE(icells_DE) = i + indxj_DE(icells_DE) = j + ! ponded ice + endif + endif ! coszen > puny + enddo ! ij + + ! calculate ponded ice + srftyp = 2 + if (icells_DE > 0) & + call compute_dEdd & + (nx_block,ny_block, & + icells_DE, indxi_DE, indxj_DE, fnidr, coszen, & + swvdr, swvdf, swidr, swidf, srftyp, & + hs, rhosnw, rsnw, hi, hp, & + fp, aero_mp, avdrl, avdfl, & + aidrl, aidfl, & + fswsfc, fswint, & + fswthru, Sswabs, & + Iswabs, fswpenl) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + alvdr(i,j) = alvdr(i,j) + avdrl(i,j) *fp(i,j) + alvdf(i,j) = alvdf(i,j) + avdfl(i,j) *fp(i,j) + alidr(i,j) = alidr(i,j) + aidrl(i,j) *fp(i,j) + alidf(i,j) = alidf(i,j) + aidfl(i,j) *fp(i,j) + ! for history + albpnd(i,j) = albpnd(i,j) & + + awtvdr*avdrl(i,j) + awtidr*aidrl(i,j) & + + awtvdf*avdfl(i,j) + awtidf*aidfl(i,j) + enddo + + dbug = .false. + if (dbug .and. print_points) then + do n = 1, npnt + if (my_task == pmloc(n)) then + i = piloc(n) + j = pjloc(n) + if( coszen(i,j) > .01_dbl_kind ) then + write(nu_diag,*) ' my_task = ',my_task & + ,' printing point = ',n & + ,' i and j = ',i,j + write(nu_diag,*) ' coszen = ', & + coszen(i,j) + write(nu_diag,*) ' swvdr swvdf = ', & + swvdr(i,j),swvdf(i,j) + write(nu_diag,*) ' swidr swidf = ', & + swidr(i,j),swidf(i,j) + write(nu_diag,*) ' aice = ', & + aice(i,j) + write(nu_diag,*) ' hs = ', & + hs(i,j) + write(nu_diag,*) ' hp = ', & + hp(i,j) + write(nu_diag,*) ' fs = ', & + fs(i,j) + write(nu_diag,*) ' fi = ', & + fi(i,j) + write(nu_diag,*) ' fp = ', & + fp(i,j) + write(nu_diag,*) ' hi = ', & + hi(i,j) + write(nu_diag,*) ' alvdr alvdf = ', & + alvdr(i,j),alvdf(i,j) + write(nu_diag,*) ' alidr alidf = ', & + alidr(i,j),alidf(i,j) + write(nu_diag,*) ' fswsfc fswint fswthru = ', & + fswsfc(i,j),fswint(i,j),fswthru(i,j) + swdn = swvdr(i,j)+swvdf(i,j)+swidr(i,j)+swidf(i,j) + swab = fswsfc(i,j)+fswint(i,j)+fswthru(i,j) + swalb = (1.-swab/(swdn+.0001)) + write(nu_diag,*) ' swdn swab swalb = ',swdn,swab,swalb + do k = 1, nslyr + write(nu_diag,*) ' snow layer k = ', k, & + ' rhosnw = ', & + rhosnw(i,j,k), & + ' rsnw = ', & + rsnw(i,j,k) + enddo + do k = 1, nslyr + write(nu_diag,*) ' snow layer k = ', k, & + ' Sswabs(k) = ', Sswabs(i,j,k) + enddo + do k = 1, nilyr + write(nu_diag,*) ' sea ice layer k = ', k, & + ' Iswabs(k) = ', Iswabs(i,j,k) + enddo + endif ! coszen(i,j) > .01 + endif ! my_task + enddo ! n for printing points + endif ! if print_points + + end subroutine shortwave_dEdd + +!======================================================================= +! +! Evaluate snow/ice/ponded ice inherent optical properties (IOPs), and +! then calculate the multiple scattering solution by calling solution_dEdd. +! +! author: Bruce P. Briegleb, NCAR +! 2013: E Hunke merged with NCAR version + + subroutine compute_dEdd & + (nx_block,ny_block, & + icells_DE, indxi_DE, indxj_DE, fnidr, coszen, & + swvdr, swvdf, swidr, swidf, srftyp, & + hs, rhosnw, rsnw, hi, hp, & + fi, aero_mp, alvdr, alvdf, & + alidr, alidf, & + fswsfc, fswint, & + fswthru, Sswabs, & + Iswabs, fswpenl) + + use ice_therm_shared, only: heat_capacity + use ice_state, only: tr_aero + + integer (kind=int_kind), & + intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells_DE ! number of sea ice grid cells for surface type + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi_DE, & ! compressed indices for sea ice cells for surface type + indxj_DE + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + fnidr , & ! fraction of direct to total down flux in nir + coszen , & ! cosine solar zenith angle + swvdr , & ! shortwave down at surface, visible, direct (W/m^2) + swvdf , & ! shortwave down at surface, visible, diffuse (W/m^2) + swidr , & ! shortwave down at surface, near IR, direct (W/m^2) + swidf ! shortwave down at surface, near IR, diffuse (W/m^2) + + integer (kind=int_kind), intent(in) :: & + srftyp ! surface type over ice: (0=air, 1=snow, 2=pond) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in) :: & + hs ! snow thickness (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr), & + intent(in) :: & + rhosnw , & ! snow density in snow layer (kg/m3) + rsnw ! snow grain radius in snow layer (m) + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in) :: & + hi , & ! ice thickness (m) + hp , & ! pond depth (m) + fi ! snow/bare ice fractional coverage (0 to 1) + + real (kind=dbl_kind), dimension (nx_block,ny_block,4*n_aero), & + intent(in) :: & + aero_mp ! aerosol mass path in kg/m2 + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + alvdr , & ! visible, direct, albedo (fraction) + alvdf , & ! visible, diffuse, albedo (fraction) + alidr , & ! near-ir, direct, albedo (fraction) + alidf , & ! near-ir, diffuse, albedo (fraction) + fswsfc , & ! SW absorbed at snow/bare ice/pondedi ice surface (W m-2) + fswint , & ! SW interior absorption (below surface, above ocean,W m-2) + fswthru ! SW through snow/bare ice/ponded ice into ocean (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr+1), & + intent(inout) :: & + fswpenl ! visible SW entering ice layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr), & + intent(inout) :: & + Sswabs ! SW absorbed in snow layer (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), & + intent(inout) :: & + Iswabs ! SW absorbed in ice layer (W m-2) + +!----------------------------------------------------------------------- +! +! Set up optical property profiles, based on snow, sea ice and ponded +! ice IOPs from: +! +! Briegleb, B. P., and B. Light (2007): A Delta-Eddington Multiple +! Scattering Parameterization for Solar Radiation in the Sea Ice +! Component of the Community Climate System Model, NCAR Technical +! Note NCAR/TN-472+STR February 2007 +! +! Computes column Delta-Eddington radiation solution for specific +! surface type: either snow over sea ice, bare sea ice, or ponded sea ice. +! +! Divides solar spectrum into 3 intervals: 0.2-0.7, 0.7-1.19, and +! 1.19-5.0 micro-meters. The latter two are added (using an assumed +! partition of incident shortwave in the 0.7-5.0 micro-meter band between +! the 0.7-1.19 and 1.19-5.0 micro-meter band) to give the final output +! of 0.2-0.7 visible and 0.7-5.0 near-infrared albedos and fluxes. +! +! Specifies vertical layer optical properties based on input snow depth, +! density and grain radius, along with ice and pond depths, then computes +! layer by layer Delta-Eddington reflectivity, transmissivity and combines +! layers (done by calling routine solution_dEdd). Finally, surface albedos +! and internal fluxes/flux divergences are evaluated. +! +! Description of the level and layer index conventions. This is +! for the standard case of one snow layer and four sea ice layers. +! +! Please read the following; otherwise, there is 99.9% chance you +! will be confused about indices at some point in time........ :) +! +! CICE4.0 snow treatment has one snow layer above the sea ice. This +! snow layer has finite heat capacity, so that surface absorption must +! be distinguished from internal. The Delta-Eddington solar radiation +! thus adds extra surface scattering layers to both snow and sea ice. +! Note that in the following, we assume a fixed vertical layer structure +! for the radiation calculation. In other words, we always have the +! structure shown below for one snow and four sea ice layers, but for +! ponded ice the pond fills "snow" layer 1 over the sea ice, and for +! bare sea ice the top layers over sea ice are treated as transparent air. +! +! SSL = surface scattering layer for either snow or sea ice +! DL = drained layer for sea ice immediately under sea ice SSL +! INT = interior layers for sea ice below the drained layer. +! +! Notice that the radiation level starts with 0 at the top. Thus, +! the total number radiation layers is klev+1, where klev is the +! sum of nslyr, the number of CCSM snow layers, and nilyr, the +! number of CCSM sea ice layers, plus the sea ice SSL: +! klev = 1 + nslyr + nilyr +! +! For the standard case illustrated below, nslyr=1, nilyr=4, +! and klev=6, with the number of layer interfaces klevp=klev+1. +! Layer interfaces are the surfaces on which reflectivities, +! transmissivities and fluxes are evaluated. +! +! CCSM3 Sea Ice Model Delta-Eddington Solar Radiation +! Layers and Interfaces +! Layer Index Interface Index +! --------------------- --------------------- 0 +! 0 \\\ snow SSL \\\ +! snow layer 1 --------------------- 1 +! 1 rest of snow layer +! +++++++++++++++++++++ +++++++++++++++++++++ 2 +! 2 \\\ sea ice SSL \\\ +! sea ice layer 1 --------------------- 3 +! 3 sea ice DL +! --------------------- --------------------- 4 +! +! sea ice layer 2 4 sea ice INT +! +! --------------------- --------------------- 5 +! +! sea ice layer 3 5 sea ice INT +! +! --------------------- --------------------- 6 +! +! sea ice layer 4 6 sea ice INT +! +! --------------------- --------------------- 7 +! +! When snow lies over sea ice, the radiation absorbed in the +! snow SSL is used for surface heating, and that in the rest +! of the snow layer for its internal heating. For sea ice in +! this case, all of the radiant heat absorbed in both the +! sea ice SSL and the DL are used for sea ice layer 1 heating. +! +! When pond lies over sea ice, and for bare sea ice, all of the +! radiant heat absorbed within and above the sea ice SSL is used +! for surface heating, and that absorbed in the sea ice DL is +! used for sea ice layer 1 heating. +! +! Basically, vertical profiles of the layer extinction optical depth (tau), +! single scattering albedo (w0) and asymmetry parameter (g) are required over +! the klev+1 layers, where klev+1 = 2 + nslyr + nilyr. All of the surface type +! information and snow/ice iop properties are evaulated in this routine, so +! the tau,w0,g profiles can be passed to solution_dEdd for multiple scattering +! evaluation. Snow, bare ice and ponded ice iops are contained in data arrays +! in this routine. +! +!----------------------------------------------------------------------- + + ! local variables + + integer (kind=int_kind) :: & + i , & ! longitude index + j , & ! latitude index + k , & ! level index + ij , & ! horizontal index, combines i and j loops + ns , & ! spectral index + nr , & ! index for grain radius tables + ki , & ! index for internal absorption + km , & ! k starting index for snow, sea ice internal absorption + kp , & ! k+1 or k+2 index for snow, sea ice internal absorption + ksrf , & ! level index for surface absorption + ksnow , & ! level index for snow density and grain size + kii ! level starting index for sea ice (nslyr+1) + + integer (kind=int_kind), parameter :: & + klev = nslyr + nilyr + 1 , & ! number of radiation layers - 1 + klevp = klev + 1 ! number of radiation interfaces - 1 + ! (0 layer is included also) + + integer (kind=int_kind), parameter :: & + nspint = 3 , & ! number of solar spectral intervals + nmbrad = 32 ! number of snow grain radii in tables + + real (kind=dbl_kind), dimension(icells_DE) :: & + avdr , & ! visible albedo, direct (fraction) + avdf , & ! visible albedo, diffuse (fraction) + aidr , & ! near-ir albedo, direct (fraction) + aidf ! near-ir albedo, diffuse (fraction) + + real (kind=dbl_kind), dimension(icells_DE) :: & + fsfc , & ! shortwave absorbed at snow/bare ice/ponded ice surface (W m-2) + fint , & ! shortwave absorbed in interior (W m-2) + fthru ! shortwave through snow/bare ice/ponded ice to ocean (W/m^2) + + real (kind=dbl_kind), dimension(icells_DE,nslyr) :: & + Sabs ! shortwave absorbed in snow layer (W m-2) + + real (kind=dbl_kind), dimension(icells_DE,nilyr) :: & + Iabs ! shortwave absorbed in ice layer (W m-2) + + real (kind=dbl_kind), dimension(icells_DE,nilyr+1) :: & + fthrul ! shortwave through to ice layers (W m-2) + + real (kind=dbl_kind), dimension (icells_DE,nspint) :: & + wghtns ! spectral weights + + real (kind=dbl_kind), parameter :: & + cp67 = 0.67_dbl_kind , & ! nir band weight parameter + cp33 = 0.33_dbl_kind , & ! nir band weight parameter + cp78 = 0.78_dbl_kind , & ! nir band weight parameter + cp22 = 0.22_dbl_kind , & ! nir band weight parameter + cp01 = 0.01_dbl_kind ! for ocean visible albedo + + real (kind=dbl_kind), dimension (0:klev,icells_DE) :: & + tau , & ! layer extinction optical depth + w0 , & ! layer single scattering albedo + g ! layer asymmetry parameter + + ! following arrays are defined at model interfaces; 0 is the top of the + ! layer above the sea ice; klevp is the sea ice/ocean interface. + real (kind=dbl_kind), dimension (0:klevp,icells_DE) :: & + trndir , & ! solar beam down transmission from top + trntdr , & ! total transmission to direct beam for layers above + trndif , & ! diffuse transmission to diffuse beam for layers above + rupdir , & ! reflectivity to direct radiation for layers below + rupdif , & ! reflectivity to diffuse radiation for layers below + rdndif ! reflectivity to diffuse radiation for layers above + + real (kind=dbl_kind), dimension (0:klevp,icells_DE) :: & + dfdir , & ! down-up flux at interface due to direct beam at top surface + dfdif ! down-up flux at interface due to diffuse beam at top surface + + real (kind=dbl_kind) :: & + refk , & ! interface k multiple scattering term + delr , & ! snow grain radius interpolation parameter + ! inherent optical properties (iop) for snow + Qs , & ! Snow extinction efficiency + ks , & ! Snow extinction coefficient (/m) + ws , & ! Snow single scattering albedo + gs ! Snow asymmetry parameter + + real (kind=dbl_kind), dimension(nslyr,icells_DE) :: & + frsnw ! snow grain radius in snow layer * adjustment factor (m) + + ! actual used ice and ponded ice IOPs, allowing for tuning + ! modifications of the above "_mn" value + real (kind=dbl_kind), dimension (nspint) :: & + ki_ssl , & ! Surface-scattering-layer ice extinction coefficient (/m) + wi_ssl , & ! Surface-scattering-layer ice single scattering albedo + gi_ssl , & ! Surface-scattering-layer ice asymmetry parameter + ki_dl , & ! Drained-layer ice extinction coefficient (/m) + wi_dl , & ! Drained-layer ice single scattering albedo + gi_dl , & ! Drained-layer ice asymmetry parameter + ki_int , & ! Interior-layer ice extinction coefficient (/m) + wi_int , & ! Interior-layer ice single scattering albedo + gi_int , & ! Interior-layer ice asymmetry parameter + ki_p_ssl , & ! Ice under pond srf scat layer extinction coefficient (/m) + wi_p_ssl , & ! Ice under pond srf scat layer single scattering albedo + gi_p_ssl , & ! Ice under pond srf scat layer asymmetry parameter + ki_p_int , & ! Ice under pond extinction coefficient (/m) + wi_p_int , & ! Ice under pond single scattering albedo + gi_p_int ! Ice under pond asymmetry parameter + + real (kind=dbl_kind), dimension(0:klev,icells_DE) :: & + dzk ! layer thickness + + real (kind=dbl_kind) :: & + dz , & ! snow, sea ice or pond water layer thickness + dz_ssl , & ! snow or sea ice surface scattering layer thickness + fs ! scaling factor to reduce (nilyr<4) or increase (nilyr>4) DL + ! extinction coefficient to maintain DL optical depth constant + ! with changing number of sea ice layers, to approximately + ! conserve computed albedo for constant physical depth of sea + ! ice when the number of sea ice layers vary + real (kind=dbl_kind) :: & + sig , & ! scattering coefficient for tuning + kabs , & ! absorption coefficient for tuning + sigp ! modified scattering coefficient for tuning + + real (kind=dbl_kind), dimension (icells_DE) :: & + albodr , & ! spectral ocean albedo to direct rad + albodf ! spectral ocean albedo to diffuse rad + + ! for melt pond transition to bare sea ice for small pond depths + real (kind=dbl_kind) :: & + sig_i , & ! ice scattering coefficient (/m) + sig_p , & ! pond scattering coefficient (/m) + kext ! weighted extinction coefficient (/m) + + ! aerosol optical properties from Mark Flanner, 26 June 2008 + ! order assumed: hydrophobic black carbon, hydrophilic black carbon, + ! four dust aerosols by particle size range: + ! dust1(.05-0.5 micron), dust2(0.5-1.25 micron), + ! dust3(1.25-2.5 micron), dust4(2.5-5.0 micron) + ! spectral bands same as snow/sea ice: (0.3-0.7 micron, 0.7-1.19 micron + ! and 1.19-5.0 micron in wavelength) + + integer (kind=int_kind) :: & + na ! aerosol index + + real (kind=dbl_kind) :: & + taer , & ! total aerosol extinction optical depth + waer , & ! total aerosol single scatter albedo + gaer , & ! total aerosol asymmetry parameter + swdr , & ! shortwave down at surface, direct (W/m^2) + swdf , & ! shortwave down at surface, diffuse (W/m^2) + rnilyr , & ! real(nilyr) + rnslyr , & ! real(nslyr) + rns , & ! real(ns) + tmp_0, tmp_ks, tmp_kl ! temp variables + + ! snow grain radii (micro-meters) for table + real (kind=dbl_kind), dimension(nmbrad), parameter :: & + rsnw_tab = (/ & ! snow grain radius for each table entry (micro-meters) + 5._dbl_kind, 7._dbl_kind, 10._dbl_kind, 15._dbl_kind, & + 20._dbl_kind, 30._dbl_kind, 40._dbl_kind, 50._dbl_kind, & + 65._dbl_kind, 80._dbl_kind, 100._dbl_kind, 120._dbl_kind, & + 140._dbl_kind, 170._dbl_kind, 200._dbl_kind, 240._dbl_kind, & + 290._dbl_kind, 350._dbl_kind, 420._dbl_kind, 500._dbl_kind, & + 570._dbl_kind, 660._dbl_kind, 760._dbl_kind, 870._dbl_kind, & + 1000._dbl_kind, 1100._dbl_kind, 1250._dbl_kind, 1400._dbl_kind, & + 1600._dbl_kind, 1800._dbl_kind, 2000._dbl_kind, 2500._dbl_kind/) + + ! snow extinction efficiency (unitless) + real (kind=dbl_kind), dimension (nspint,nmbrad), parameter :: & + Qs_tab = reshape((/ & + 2.131798_dbl_kind, 2.187756_dbl_kind, 2.267358_dbl_kind, & + 2.104499_dbl_kind, 2.148345_dbl_kind, 2.236078_dbl_kind, & + 2.081580_dbl_kind, 2.116885_dbl_kind, 2.175067_dbl_kind, & + 2.062595_dbl_kind, 2.088937_dbl_kind, 2.130242_dbl_kind, & + 2.051403_dbl_kind, 2.072422_dbl_kind, 2.106610_dbl_kind, & + 2.039223_dbl_kind, 2.055389_dbl_kind, 2.080586_dbl_kind, & + 2.032383_dbl_kind, 2.045751_dbl_kind, 2.066394_dbl_kind, & + 2.027920_dbl_kind, 2.039388_dbl_kind, 2.057224_dbl_kind, & + 2.023444_dbl_kind, 2.033137_dbl_kind, 2.048055_dbl_kind, & + 2.020412_dbl_kind, 2.028840_dbl_kind, 2.041874_dbl_kind, & + 2.017608_dbl_kind, 2.024863_dbl_kind, 2.036046_dbl_kind, & + 2.015592_dbl_kind, 2.022021_dbl_kind, 2.031954_dbl_kind, & + 2.014083_dbl_kind, 2.019887_dbl_kind, 2.028853_dbl_kind, & + 2.012368_dbl_kind, 2.017471_dbl_kind, 2.025353_dbl_kind, & + 2.011092_dbl_kind, 2.015675_dbl_kind, 2.022759_dbl_kind, & + 2.009837_dbl_kind, 2.013897_dbl_kind, 2.020168_dbl_kind, & + 2.008668_dbl_kind, 2.012252_dbl_kind, 2.017781_dbl_kind, & + 2.007627_dbl_kind, 2.010813_dbl_kind, 2.015678_dbl_kind, & + 2.006764_dbl_kind, 2.009577_dbl_kind, 2.013880_dbl_kind, & + 2.006037_dbl_kind, 2.008520_dbl_kind, 2.012382_dbl_kind, & + 2.005528_dbl_kind, 2.007807_dbl_kind, 2.011307_dbl_kind, & + 2.005025_dbl_kind, 2.007079_dbl_kind, 2.010280_dbl_kind, & + 2.004562_dbl_kind, 2.006440_dbl_kind, 2.009333_dbl_kind, & + 2.004155_dbl_kind, 2.005898_dbl_kind, 2.008523_dbl_kind, & + 2.003794_dbl_kind, 2.005379_dbl_kind, 2.007795_dbl_kind, & + 2.003555_dbl_kind, 2.005041_dbl_kind, 2.007329_dbl_kind, & + 2.003264_dbl_kind, 2.004624_dbl_kind, 2.006729_dbl_kind, & + 2.003037_dbl_kind, 2.004291_dbl_kind, 2.006230_dbl_kind, & + 2.002776_dbl_kind, 2.003929_dbl_kind, 2.005700_dbl_kind, & + 2.002590_dbl_kind, 2.003627_dbl_kind, 2.005276_dbl_kind, & + 2.002395_dbl_kind, 2.003391_dbl_kind, 2.004904_dbl_kind, & + 2.002071_dbl_kind, 2.002922_dbl_kind, 2.004241_dbl_kind/), & + (/nspint,nmbrad/)) + + ! snow single scattering albedo (unitless) + real (kind=dbl_kind), dimension (nspint,nmbrad), parameter :: & + ws_tab = reshape((/ & + 0.9999994_dbl_kind, 0.9999673_dbl_kind, 0.9954589_dbl_kind, & + 0.9999992_dbl_kind, 0.9999547_dbl_kind, 0.9938576_dbl_kind, & + 0.9999990_dbl_kind, 0.9999382_dbl_kind, 0.9917989_dbl_kind, & + 0.9999985_dbl_kind, 0.9999123_dbl_kind, 0.9889724_dbl_kind, & + 0.9999979_dbl_kind, 0.9998844_dbl_kind, 0.9866190_dbl_kind, & + 0.9999970_dbl_kind, 0.9998317_dbl_kind, 0.9823021_dbl_kind, & + 0.9999960_dbl_kind, 0.9997800_dbl_kind, 0.9785269_dbl_kind, & + 0.9999951_dbl_kind, 0.9997288_dbl_kind, 0.9751601_dbl_kind, & + 0.9999936_dbl_kind, 0.9996531_dbl_kind, 0.9706974_dbl_kind, & + 0.9999922_dbl_kind, 0.9995783_dbl_kind, 0.9667577_dbl_kind, & + 0.9999903_dbl_kind, 0.9994798_dbl_kind, 0.9621007_dbl_kind, & + 0.9999885_dbl_kind, 0.9993825_dbl_kind, 0.9579541_dbl_kind, & + 0.9999866_dbl_kind, 0.9992862_dbl_kind, 0.9541924_dbl_kind, & + 0.9999838_dbl_kind, 0.9991434_dbl_kind, 0.9490959_dbl_kind, & + 0.9999810_dbl_kind, 0.9990025_dbl_kind, 0.9444940_dbl_kind, & + 0.9999772_dbl_kind, 0.9988171_dbl_kind, 0.9389141_dbl_kind, & + 0.9999726_dbl_kind, 0.9985890_dbl_kind, 0.9325819_dbl_kind, & + 0.9999670_dbl_kind, 0.9983199_dbl_kind, 0.9256405_dbl_kind, & + 0.9999605_dbl_kind, 0.9980117_dbl_kind, 0.9181533_dbl_kind, & + 0.9999530_dbl_kind, 0.9976663_dbl_kind, 0.9101540_dbl_kind, & + 0.9999465_dbl_kind, 0.9973693_dbl_kind, 0.9035031_dbl_kind, & + 0.9999382_dbl_kind, 0.9969939_dbl_kind, 0.8953134_dbl_kind, & + 0.9999289_dbl_kind, 0.9965848_dbl_kind, 0.8865789_dbl_kind, & + 0.9999188_dbl_kind, 0.9961434_dbl_kind, 0.8773350_dbl_kind, & + 0.9999068_dbl_kind, 0.9956323_dbl_kind, 0.8668233_dbl_kind, & + 0.9998975_dbl_kind, 0.9952464_dbl_kind, 0.8589990_dbl_kind, & + 0.9998837_dbl_kind, 0.9946782_dbl_kind, 0.8476493_dbl_kind, & + 0.9998699_dbl_kind, 0.9941218_dbl_kind, 0.8367318_dbl_kind, & + 0.9998515_dbl_kind, 0.9933966_dbl_kind, 0.8227881_dbl_kind, & + 0.9998332_dbl_kind, 0.9926888_dbl_kind, 0.8095131_dbl_kind, & + 0.9998148_dbl_kind, 0.9919968_dbl_kind, 0.7968620_dbl_kind, & + 0.9997691_dbl_kind, 0.9903277_dbl_kind, 0.7677887_dbl_kind/), & + (/nspint,nmbrad/)) + + ! snow asymmetry parameter (unitless) + real (kind=dbl_kind), dimension (nspint,nmbrad), parameter :: & + gs_tab = reshape((/ & + 0.859913_dbl_kind, 0.848003_dbl_kind, 0.824415_dbl_kind, & + 0.867130_dbl_kind, 0.858150_dbl_kind, 0.848445_dbl_kind, & + 0.873381_dbl_kind, 0.867221_dbl_kind, 0.861714_dbl_kind, & + 0.878368_dbl_kind, 0.874879_dbl_kind, 0.874036_dbl_kind, & + 0.881462_dbl_kind, 0.879661_dbl_kind, 0.881299_dbl_kind, & + 0.884361_dbl_kind, 0.883903_dbl_kind, 0.890184_dbl_kind, & + 0.885937_dbl_kind, 0.886256_dbl_kind, 0.895393_dbl_kind, & + 0.886931_dbl_kind, 0.887769_dbl_kind, 0.899072_dbl_kind, & + 0.887894_dbl_kind, 0.889255_dbl_kind, 0.903285_dbl_kind, & + 0.888515_dbl_kind, 0.890236_dbl_kind, 0.906588_dbl_kind, & + 0.889073_dbl_kind, 0.891127_dbl_kind, 0.910152_dbl_kind, & + 0.889452_dbl_kind, 0.891750_dbl_kind, 0.913100_dbl_kind, & + 0.889730_dbl_kind, 0.892213_dbl_kind, 0.915621_dbl_kind, & + 0.890026_dbl_kind, 0.892723_dbl_kind, 0.918831_dbl_kind, & + 0.890238_dbl_kind, 0.893099_dbl_kind, 0.921540_dbl_kind, & + 0.890441_dbl_kind, 0.893474_dbl_kind, 0.924581_dbl_kind, & + 0.890618_dbl_kind, 0.893816_dbl_kind, 0.927701_dbl_kind, & + 0.890762_dbl_kind, 0.894123_dbl_kind, 0.930737_dbl_kind, & + 0.890881_dbl_kind, 0.894397_dbl_kind, 0.933568_dbl_kind, & + 0.890975_dbl_kind, 0.894645_dbl_kind, 0.936148_dbl_kind, & + 0.891035_dbl_kind, 0.894822_dbl_kind, 0.937989_dbl_kind, & + 0.891097_dbl_kind, 0.895020_dbl_kind, 0.939949_dbl_kind, & + 0.891147_dbl_kind, 0.895212_dbl_kind, 0.941727_dbl_kind, & + 0.891189_dbl_kind, 0.895399_dbl_kind, 0.943339_dbl_kind, & + 0.891225_dbl_kind, 0.895601_dbl_kind, 0.944915_dbl_kind, & + 0.891248_dbl_kind, 0.895745_dbl_kind, 0.945950_dbl_kind, & + 0.891277_dbl_kind, 0.895951_dbl_kind, 0.947288_dbl_kind, & + 0.891299_dbl_kind, 0.896142_dbl_kind, 0.948438_dbl_kind, & + 0.891323_dbl_kind, 0.896388_dbl_kind, 0.949762_dbl_kind, & + 0.891340_dbl_kind, 0.896623_dbl_kind, 0.950916_dbl_kind, & + 0.891356_dbl_kind, 0.896851_dbl_kind, 0.951945_dbl_kind, & + 0.891386_dbl_kind, 0.897399_dbl_kind, 0.954156_dbl_kind/), & + (/nspint,nmbrad/)) + + ! inherent optical property (iop) arrays for ice and ponded ice + ! mn = specified mean (or base) value + ! ki = extinction coefficient (/m) + ! wi = single scattering albedo + ! gi = asymmetry parameter + + ! ice surface scattering layer (ssl) iops + real (kind=dbl_kind), dimension (nspint), parameter :: & + ki_ssl_mn = (/ 1000.1_dbl_kind, 1003.7_dbl_kind, 7042._dbl_kind/), & + wi_ssl_mn = (/ .9999_dbl_kind, .9963_dbl_kind, .9088_dbl_kind/), & + gi_ssl_mn = (/ .94_dbl_kind, .94_dbl_kind, .94_dbl_kind/) + + ! ice drained layer (dl) iops + real (kind=dbl_kind), dimension (nspint), parameter :: & + ki_dl_mn = (/ 100.2_dbl_kind, 107.7_dbl_kind, 1309._dbl_kind /), & + wi_dl_mn = (/ .9980_dbl_kind, .9287_dbl_kind, .0305_dbl_kind /), & + gi_dl_mn = (/ .94_dbl_kind, .94_dbl_kind, .94_dbl_kind /) + + ! ice interior layer (int) iops + real (kind=dbl_kind), dimension (nspint), parameter :: & + ki_int_mn = (/ 20.2_dbl_kind, 27.7_dbl_kind, 1445._dbl_kind /), & + wi_int_mn = (/ .9901_dbl_kind, .7223_dbl_kind, .0277_dbl_kind /), & + gi_int_mn = (/ .94_dbl_kind, .94_dbl_kind, .94_dbl_kind /) + + ! ponded ice surface scattering layer (ssl) iops + real (kind=dbl_kind), dimension (nspint), parameter :: & + ki_p_ssl_mn = (/ 70.2_dbl_kind, 77.7_dbl_kind, 1309._dbl_kind/), & + wi_p_ssl_mn = (/ .9972_dbl_kind, .9009_dbl_kind, .0305_dbl_kind/), & + gi_p_ssl_mn = (/ .94_dbl_kind, .94_dbl_kind, .94_dbl_kind /) + + ! ponded ice interior layer (int) iops + real (kind=dbl_kind), dimension (nspint), parameter :: & + ki_p_int_mn = (/ 20.2_dbl_kind, 27.7_dbl_kind, 1445._dbl_kind/), & + wi_p_int_mn = (/ .9901_dbl_kind, .7223_dbl_kind, .0277_dbl_kind/), & + gi_p_int_mn = (/ .94_dbl_kind, .94_dbl_kind, .94_dbl_kind /) + + ! inherent optical property (iop) arrays for pond water and underlying ocean + ! kw = Pond water extinction coefficient (/m) + ! ww = Pond water single scattering albedo + ! gw = Pond water asymmetry parameter + real (kind=dbl_kind), dimension (nspint), parameter :: & + kw = (/ 0.20_dbl_kind, 12.0_dbl_kind, 729._dbl_kind /), & + ww = (/ 0.00_dbl_kind, 0.00_dbl_kind, 0.00_dbl_kind /), & + gw = (/ 0.00_dbl_kind, 0.00_dbl_kind, 0.00_dbl_kind /) + + real (kind=dbl_kind), parameter :: & + rhoi = 917.0_dbl_kind,& ! pure ice mass density (kg/m3) + fr_max = 1.00_dbl_kind, & ! snow grain adjustment factor max + fr_min = 0.80_dbl_kind, & ! snow grain adjustment factor min + ! tuning parameters + ! ice and pond scat coeff fractional change for +- one-sigma in albedo + fp_ice = 0.15_dbl_kind, & ! ice fraction of scat coeff for + stn dev in alb + fm_ice = 0.15_dbl_kind, & ! ice fraction of scat coeff for - stn dev in alb + fp_pnd = 2.00_dbl_kind, & ! ponded ice fraction of scat coeff for + stn dev in alb + fm_pnd = 0.50_dbl_kind ! ponded ice fraction of scat coeff for - stn dev in alb + + ! aerosol optical properties -> band | + ! v aerosol + ! for combined dust category, use category 4 properties + real (kind=dbl_kind), dimension(nspint,max_aero), parameter :: & + kaer_tab = reshape((/ & ! aerosol mass extinction cross section (m2/kg) + 11580.61872, 5535.41835, 2793.79690, & + 25798.96479, 11536.03871, 4688.24207, & + 196.49772, 204.14078, 214.42287, & + 2665.85867, 2256.71027, 820.36024, & + 840.78295, 1028.24656, 1163.03298, & + 387.51211, 414.68808, 450.29814/), & + (/nspint,max_aero/)), & + waer_tab = reshape((/ & ! aerosol single scatter albedo (fraction) + 0.29003, 0.17349, 0.06613, & + 0.51731, 0.41609, 0.21324, & + 0.84467, 0.94216, 0.95666, & + 0.97764, 0.99402, 0.98552, & + 0.94146, 0.98527, 0.99093, & + 0.90034, 0.96543, 0.97678/), & + (/nspint,max_aero/)), & + gaer_tab = reshape((/ & ! aerosol asymmetry parameter (cos(theta)) + 0.35445, 0.19838, 0.08857, & + 0.52581, 0.32384, 0.14970, & + 0.83162, 0.78306, 0.74375, & + 0.68861, 0.70836, 0.54171, & + 0.70239, 0.66115, 0.71983, & + 0.78734, 0.73580, 0.64411/), & + (/nspint,max_aero/)) + +!----------------------------------------------------------------------- +! Initialize and tune bare ice/ponded ice iops + + rnilyr = c1/real(nilyr,kind=dbl_kind) + rnslyr = c1/real(nslyr,kind=dbl_kind) + kii = nslyr + 1 + + ! initialize albedos and fluxes to 0 + fthrul(:,:) = c0 + Iabs(:,:) = c0 + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + + avdr(ij) = c0 + avdf(ij) = c0 + aidr(ij) = c0 + aidf(ij) = c0 + fsfc(ij) = c0 + fint(ij) = c0 + fthru(ij) = c0 + + ! spectral weights + ! weights 2 (0.7-1.19 micro-meters) and 3 (1.19-5.0 micro-meters) + ! are chosen based on 1D calculations using ratio of direct to total + ! near-infrared solar (0.7-5.0 micro-meter) which indicates clear/cloudy + ! conditions: more cloud, the less 1.19-5.0 relative to the + ! 0.7-1.19 micro-meter due to cloud absorption. + wghtns(ij,1) = c1 + wghtns(ij,2) = cp67 + (cp78-cp67)*(c1-fnidr(i,j)) +! wghtns(ij,3) = cp33 + (cp22-cp33)*(c1-fnidr(i,j)) + wghtns(ij,3) = c1 - wghtns(ij,2) + + ! find snow grain adjustment factor, dependent upon clear/overcast sky + ! estimate. comparisons with SNICAR show better agreement with DE when + ! this factor is included (clear sky near 1 and overcast near 0.8 give + ! best agreement). Multiply by rnsw here for efficiency. + do k = 1, nslyr + frsnw(k,ij) = (fr_max*fnidr(i,j) + fr_min*(c1-fnidr(i,j)))*rsnw(i,j,k) + Sabs(ij,k) = c0 + enddo + + ! layer thicknesses + ! snow + dz = hs(i,j)*rnslyr + ! for small enough snow thickness, ssl thickness half of top snow layer +!ech: note this is highly resolution dependent! + dzk(0,ij) = min(hs_ssl, dz/c2) + dzk(1,ij) = dz - dzk(0,ij) + if (nslyr > 1) then + do k = 2, nslyr + dzk(k,ij) = dz + enddo + endif + + ! ice + dz = hi(i,j)*rnilyr + ! empirical reduction in sea ice ssl thickness for ice thinner than 1.5m; + ! factor of 30 gives best albedo comparison with limited observations + dz_ssl = hi_ssl +!ech: note hardwired parameters +! if( hi(i,j) < 1.5_dbl_kind ) dz_ssl = hi(i,j)/30._dbl_kind + dz_ssl = min(hi_ssl, hi(i,j)/30._dbl_kind) + ! set sea ice ssl thickness to half top layer if sea ice thin enough +!ech: note this is highly resolution dependent! + dz_ssl = min(dz_ssl, dz/c2) + + dzk(kii,ij) = dz_ssl + dzk(kii+1,ij) = dz - dz_ssl + if (kii+2 <= klev) then + do k = kii+2, klev + dzk(k,ij) = dz + enddo + endif + enddo ! ij + + ! adjust sea ice iops with tuning parameters; tune only the + ! scattering coefficient by factors of R_ice, R_pnd, where + ! R values of +1 correspond approximately to +1 sigma changes in albedo, and + ! R values of -1 correspond approximately to -1 sigma changes in albedo + ! Note: the albedo change becomes non-linear for R values > +1 or < -1 + if( R_ice >= c0 ) then + do ns = 1, nspint + sigp = ki_ssl_mn(ns)*wi_ssl_mn(ns)*(c1+fp_ice*R_ice) + ki_ssl(ns) = sigp+ki_ssl_mn(ns)*(c1-wi_ssl_mn(ns)) + wi_ssl(ns) = sigp/ki_ssl(ns) + gi_ssl(ns) = gi_ssl_mn(ns) + + sigp = ki_dl_mn(ns)*wi_dl_mn(ns)*(c1+fp_ice*R_ice) + ki_dl(ns) = sigp+ki_dl_mn(ns)*(c1-wi_dl_mn(ns)) + wi_dl(ns) = sigp/ki_dl(ns) + gi_dl(ns) = gi_dl_mn(ns) + + sigp = ki_int_mn(ns)*wi_int_mn(ns)*(c1+fp_ice*R_ice) + ki_int(ns) = sigp+ki_int_mn(ns)*(c1-wi_int_mn(ns)) + wi_int(ns) = sigp/ki_int(ns) + gi_int(ns) = gi_int_mn(ns) + enddo + else !if( R_ice < c0 ) then + do ns = 1, nspint + sigp = ki_ssl_mn(ns)*wi_ssl_mn(ns)*(c1+fm_ice*R_ice) + sigp = max(sigp, c0) + ki_ssl(ns) = sigp+ki_ssl_mn(ns)*(c1-wi_ssl_mn(ns)) + wi_ssl(ns) = sigp/ki_ssl(ns) + gi_ssl(ns) = gi_ssl_mn(ns) + + sigp = ki_dl_mn(ns)*wi_dl_mn(ns)*(c1+fm_ice*R_ice) + sigp = max(sigp, c0) + ki_dl(ns) = sigp+ki_dl_mn(ns)*(c1-wi_dl_mn(ns)) + wi_dl(ns) = sigp/ki_dl(ns) + gi_dl(ns) = gi_dl_mn(ns) + + sigp = ki_int_mn(ns)*wi_int_mn(ns)*(c1+fm_ice*R_ice) + sigp = max(sigp, c0) + ki_int(ns) = sigp+ki_int_mn(ns)*(c1-wi_int_mn(ns)) + wi_int(ns) = sigp/ki_int(ns) + gi_int(ns) = gi_int_mn(ns) + enddo + endif ! adjust ice iops + + ! adjust ponded ice iops with tuning parameters + if( R_pnd >= c0 ) then + do ns = 1, nspint + sigp = ki_p_ssl_mn(ns)*wi_p_ssl_mn(ns)*(c1+fp_pnd*R_pnd) + ki_p_ssl(ns) = sigp+ki_p_ssl_mn(ns)*(c1-wi_p_ssl_mn(ns)) + wi_p_ssl(ns) = sigp/ki_p_ssl(ns) + gi_p_ssl(ns) = gi_p_ssl_mn(ns) + + sigp = ki_p_int_mn(ns)*wi_p_int_mn(ns)*(c1+fp_pnd*R_pnd) + ki_p_int(ns) = sigp+ki_p_int_mn(ns)*(c1-wi_p_int_mn(ns)) + wi_p_int(ns) = sigp/ki_p_int(ns) + gi_p_int(ns) = gi_p_int_mn(ns) + enddo + else !if( R_pnd < c0 ) then + do ns = 1, nspint + sigp = ki_p_ssl_mn(ns)*wi_p_ssl_mn(ns)*(c1+fm_pnd*R_pnd) + sigp = max(sigp, c0) + ki_p_ssl(ns) = sigp+ki_p_ssl_mn(ns)*(c1-wi_p_ssl_mn(ns)) + wi_p_ssl(ns) = sigp/ki_p_ssl(ns) + gi_p_ssl(ns) = gi_p_ssl_mn(ns) + + sigp = ki_p_int_mn(ns)*wi_p_int_mn(ns)*(c1+fm_pnd*R_pnd) + sigp = max(sigp, c0) + ki_p_int(ns) = sigp+ki_p_int_mn(ns)*(c1-wi_p_int_mn(ns)) + wi_p_int(ns) = sigp/ki_p_int(ns) + gi_p_int(ns) = gi_p_int_mn(ns) + enddo + endif ! adjust ponded ice iops + + ! use srftyp to determine interface index of surface absorption + if (srftyp == 1) then + ! snow covered sea ice + ksrf = 1 + else + ! bare sea ice or ponded ice + ksrf = nslyr + 2 + endif + +!----------------------------------------------------------------------- + + ! begin spectral loop + do ns = 1, nspint + + ! set optical properties of air/snow/pond overlying sea ice + ! air + if( srftyp == 0 ) then + do ij = 1, icells_DE + do k=0,nslyr + tau(k,ij) = c0 + w0(k,ij) = c0 + g(k,ij) = c0 + enddo + enddo + ! snow + else if( srftyp == 1 ) then + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + ! interpolate snow iops using input snow grain radius, + ! snow density and tabular data + do k=0,nslyr + ! use top rsnw, rhosnw for snow ssl and rest of top layer + ksnow = k - min(k-1,0) + ! find snow iops using input snow density and snow grain radius: + if( frsnw(ksnow,ij) < rsnw_tab(1) ) then + Qs = Qs_tab(ns,1) + ws = ws_tab(ns,1) + gs = gs_tab(ns,1) + else if( frsnw(ksnow,ij) >= rsnw_tab(nmbrad) ) then + Qs = Qs_tab(ns,nmbrad) + ws = ws_tab(ns,nmbrad) + gs = gs_tab(ns,nmbrad) + else + ! linear interpolation in rsnw + do nr=2,nmbrad + if( rsnw_tab(nr-1) <= frsnw(ksnow,ij) .and. & + frsnw(ksnow,ij) < rsnw_tab(nr)) then + delr = (frsnw(ksnow,ij) - rsnw_tab(nr-1)) / & + (rsnw_tab(nr) - rsnw_tab(nr-1)) + Qs = Qs_tab(ns,nr-1)*(c1-delr) + & + Qs_tab(ns,nr)*delr + ws = ws_tab(ns,nr-1)*(c1-delr) + & + ws_tab(ns,nr)*delr + gs = gs_tab(ns,nr-1)*(c1-delr) + & + gs_tab(ns,nr)*delr + endif + enddo ! nr + endif + ks = Qs*((rhosnw(i,j,ksnow)/rhoi)*3._dbl_kind / & + (4._dbl_kind*frsnw(ksnow,ij)*1.0e-6_dbl_kind)) + + tau(k,ij) = ks*dzk(k,ij) + w0(k,ij) = ws + g(k,ij) = gs + enddo ! k + ! aerosol in snow + if (tr_aero) then + taer = c0 + waer = c0 + gaer = c0 + do na=1,4*n_aero,4 + taer = taer + & + aero_mp(i,j,na)*kaer_tab(ns,(1+(na-1)/4)) + waer = waer + & + aero_mp(i,j,na)*kaer_tab(ns,(1+(na-1)/4))* & + waer_tab(ns,(1+(na-1)/4)) + gaer = gaer + & + aero_mp(i,j,na)*kaer_tab(ns,(1+(na-1)/4))* & + waer_tab(ns,(1+(na-1)/4))*gaer_tab(ns,(1+(na-1)/4)) + enddo ! na + gaer = gaer/(waer+puny) + waer = waer/(taer+puny) + do k=1,nslyr + taer = c0 + waer = c0 + gaer = c0 + do na=1,4*n_aero,4 + taer = taer + & + (aero_mp(i,j,na+1)*rnslyr)*kaer_tab(ns,(1+(na-1)/4)) + waer = waer + & + (aero_mp(i,j,na+1)*rnslyr)*kaer_tab(ns,(1+(na-1)/4))* & + waer_tab(ns,(1+(na-1)/4)) + gaer = gaer + & + (aero_mp(i,j,na+1)*rnslyr)*kaer_tab(ns,(1+(na-1)/4))* & + waer_tab(ns,(1+(na-1)/4))*gaer_tab(ns,(1+(na-1)/4)) + enddo ! na + gaer = gaer/(waer+puny) + waer = waer/(taer+puny) + g(k,ij) = (g(k,ij)*w0(k,ij)*tau(k,ij) + gaer*waer*taer) / & + (w0(k,ij)*tau(k,ij) + waer*taer) + w0(k,ij) = (w0(k,ij)*tau(k,ij) + waer*taer) / & + (tau(k,ij) + taer) + tau(k,ij) = tau(k,ij) + taer + enddo ! k + endif ! tr_aero + enddo ! ij + + ! pond + else !if( srftyp == 2 ) then + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + ! pond water layers evenly spaced + dz = hp(i,j)/(c1/rnslyr+c1) + do k=0,nslyr + tau(k,ij) = kw(ns)*dz + w0(k,ij) = ww(ns) + g(k,ij) = gw(ns) + ! no aerosol in pond + enddo ! k + enddo ! ij ... optical properties above sea ice set + endif ! srftyp + + ! set optical properties of sea ice + + ! bare or snow-covered sea ice layers + if( srftyp <= 1 ) then + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + ! ssl + k = kii + tau(k,ij) = ki_ssl(ns)*dzk(k,ij) + w0(k,ij) = wi_ssl(ns) + g(k,ij) = gi_ssl(ns) + ! dl + k = kii + 1 + ! scale dz for dl relative to 4 even-layer-thickness 1.5m case + fs = p25/rnilyr + tau(k,ij) = ki_dl(ns)*dzk(k,ij)*fs + w0(k,ij) = wi_dl(ns) + g(k,ij) = gi_dl(ns) + ! int above lowest layer + if (kii+2 <= klev-1) then + do k = kii+2, klev-1 + tau(k,ij) = ki_int(ns)*dzk(k,ij) + w0(k,ij) = wi_int(ns) + g(k,ij) = gi_int(ns) + enddo + endif + ! lowest layer + k = klev + ! add algae to lowest sea ice layer, visible only: + kabs = ki_int(ns)*(c1-wi_int(ns)) + if( ns == 1 ) then + ! total layer absorption optical depth fixed at value + ! of kalg*0.50m, independent of actual layer thickness + kabs = kabs + kalg*(0.50_dbl_kind/dzk(k,ij)) + endif + sig = ki_int(ns)*wi_int(ns) + tau(k,ij) = (kabs+sig)*dzk(k,ij) + w0(k,ij) = (sig/(sig+kabs)) + g(k,ij) = gi_int(ns) + ! aerosol in sea ice + if (tr_aero) then + k = kii ! sea ice SSL + taer = c0 + waer = c0 + gaer = c0 + do na=1,4*n_aero,4 + taer = taer + & + aero_mp(i,j,na+2)*kaer_tab(ns,(1+(na-1)/4)) + waer = waer + & + aero_mp(i,j,na+2)*kaer_tab(ns,(1+(na-1)/4))* & + waer_tab(ns,(1+(na-1)/4)) + gaer = gaer + & + aero_mp(i,j,na+2)*kaer_tab(ns,(1+(na-1)/4))* & + waer_tab(ns,(1+(na-1)/4))*gaer_tab(ns,(1+(na-1)/4)) + enddo ! na + gaer = gaer/(waer+puny) + waer = waer/(taer+puny) + g(k,ij) = (g(k,ij)*w0(k,ij)*tau(k,ij) + gaer*waer*taer) / & + (w0(k,ij)*tau(k,ij) + waer*taer) + w0(k,ij) = (w0(k,ij)*tau(k,ij) + waer*taer) / & + (tau(k,ij) + taer) + tau(k,ij) = tau(k,ij) + taer + do k = kii+1, klev + taer = c0 + waer = c0 + gaer = c0 + do na=1,4*n_aero,4 + taer = taer + & + (aero_mp(i,j,na+3)*rnilyr)*kaer_tab(ns,(1+(na-1)/4)) + waer = waer + & + (aero_mp(i,j,na+3)*rnilyr)*kaer_tab(ns,(1+(na-1)/4))* & + waer_tab(ns,(1+(na-1)/4)) + gaer = gaer + & + (aero_mp(i,j,na+3)*rnilyr)*kaer_tab(ns,(1+(na-1)/4))* & + waer_tab(ns,(1+(na-1)/4))*gaer_tab(ns,(1+(na-1)/4)) + enddo ! na + gaer = gaer/(waer+puny) + waer = waer/(taer+puny) + g(k,ij) = (g(k,ij)*w0(k,ij)*tau(k,ij) + gaer*waer*taer) / & + (w0(k,ij)*tau(k,ij) + waer*taer) + w0(k,ij) = (w0(k,ij)*tau(k,ij) + waer*taer) / & + (tau(k,ij) + taer) + tau(k,ij) = tau(k,ij) + taer + enddo ! k + endif ! tr_aero + enddo ! ij + + ! sea ice layers under ponds + else !if( srftyp == 2 ) then + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + k = kii + tau(k,ij) = ki_p_ssl(ns)*dzk(k,ij) + w0(k,ij) = wi_p_ssl(ns) + g(k,ij) = gi_p_ssl(ns) + k = kii + 1 + tau(k,ij) = ki_p_int(ns)*dzk(k,ij) + w0(k,ij) = wi_p_int(ns) + g(k,ij) = gi_p_int(ns) + if (kii+2 <= klev) then + do k = kii+2, klev + tau(k,ij) = ki_p_int(ns)*dzk(k,ij) + w0(k,ij) = wi_p_int(ns) + g(k,ij) = gi_p_int(ns) + enddo ! k + endif + ! adjust pond iops if pond depth within specified range + if( hpmin <= hp(i,j) .and. hp(i,j) <= hp0 ) then + k = kii + sig_i = ki_ssl(ns)*wi_ssl(ns) + sig_p = ki_p_ssl(ns)*wi_p_ssl(ns) + sig = sig_i + (sig_p-sig_i)*(hp(i,j)/hp0) + kext = sig + ki_p_ssl(ns)*(c1-wi_p_ssl(ns)) + tau(k,ij) = kext*dzk(k,ij) + w0(k,ij) = sig/kext + g(k,ij) = gi_p_int(ns) + k = kii + 1 + ! scale dz for dl relative to 4 even-layer-thickness 1.5m case + fs = p25/rnilyr + sig_i = ki_dl(ns)*wi_dl(ns)*fs + sig_p = ki_p_int(ns)*wi_p_int(ns) + sig = sig_i + (sig_p-sig_i)*(hp(i,j)/hp0) + kext = sig + ki_p_int(ns)*(c1-wi_p_int(ns)) + tau(k,ij) = kext*dzk(k,ij) + w0(k,ij) = sig/kext + g(k,ij) = gi_p_int(ns) + if (kii+2 <= klev) then + do k = kii+2, klev + sig_i = ki_int(ns)*wi_int(ns) + sig_p = ki_p_int(ns)*wi_p_int(ns) + sig = sig_i + (sig_p-sig_i)*(hp(i,j)/hp0) + kext = sig + ki_p_int(ns)*(c1-wi_p_int(ns)) + tau(k,ij) = kext*dzk(k,ij) + w0(k,ij) = sig/kext + g(k,ij) = gi_p_int(ns) + enddo ! k + endif + endif ! small pond depth transition to bare sea ice + enddo ! ij ... optical properties of sea ice set + endif ! srftyp + + ! set reflectivities for ocean underlying sea ice + do ij = 1, icells_DE + rns = real(ns-1, kind=dbl_kind) + albodr(ij) = cp01 * (c1 - min(rns, c1)) + albodf(ij) = cp01 * (c1 - min(rns, c1)) + enddo ! ij + + ! layer input properties now completely specified: tau, w0, g, + ! albodr, albodf; now compute the Delta-Eddington solution + ! reflectivities and transmissivities for each layer; then, + ! combine the layers going downwards accounting for multiple + ! scattering between layers, and finally start from the + ! underlying ocean and combine successive layers upwards to + ! the surface; see comments in solution_dEdd for more details. + + call solution_dEdd & + (nx_block, ny_block, & + icells_DE, indxi_DE, indxj_DE, coszen, srftyp, & + tau, w0, g, albodr, albodf, & + trndir, trntdr, trndif, rupdir, rupdif, & + rdndif) + + ! the interface reflectivities and transmissivities required + ! to evaluate interface fluxes are returned from solution_dEdd; + ! now compute up and down fluxes for each interface, using the + ! combined layer properties at each interface: + ! + ! layers interface + ! + ! --------------------- k + ! k + ! --------------------- + + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + do k=0,klevp + ! interface scattering + refk = c1/(c1 - rdndif(k,ij)*rupdif(k,ij)) + ! dir tran ref from below times interface scattering, plus diff + ! tran and ref from below times interface scattering + ! fdirup(k,ij) = (trndir(k,ij)*rupdir(k,ij) + & + ! (trntdr(k,ij)-trndir(k,ij)) & + ! *rupdif(k,ij))*refk + ! dir tran plus total diff trans times interface scattering plus + ! dir tran with up dir ref and down dif ref times interface scattering + ! fdirdn(k,ij) = trndir(k,ij) + (trntdr(k,ij) & + ! - trndir(k,ij) + trndir(k,ij) & + ! *rupdir(k,ij)*rdndif(k,ij))*refk + ! diffuse tran ref from below times interface scattering + ! fdifup(k,ij) = trndif(k,ij)*rupdif(k,ij)*refk + ! diffuse tran times interface scattering + ! fdifdn(k,ij) = trndif(k,ij)*refk + + ! dfdir = fdirdn - fdirup + dfdir(k,ij) = trndir(k,ij) & + + (trntdr(k,ij)-trndir(k,ij)) * (c1 - rupdif(k,ij)) * refk & + - trndir(k,ij)*rupdir(k,ij) * (c1 - rdndif(k,ij)) * refk + if (dfdir(k,ij) < puny) dfdir(k,ij) = c0 !echmod necessary? + ! dfdif = fdifdn - fdifup + dfdif(k,ij) = trndif(k,ij) * (c1 - rupdif(k,ij)) * refk + if (dfdif(k,ij) < puny) dfdif(k,ij) = c0 !echmod necessary? + enddo ! k + enddo ! ij + + ! calculate final surface albedos and fluxes- + ! all absorbed flux above ksrf is included in surface absorption + + if( ns == 1) then ! visible + + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + swdr = swvdr(i,j) + swdf = swvdf(i,j) + avdr(ij) = rupdir(0,ij) + avdf(ij) = rupdif(0,ij) + + tmp_0 = dfdir(0 ,ij)*swdr + dfdif(0 ,ij)*swdf + tmp_ks = dfdir(ksrf ,ij)*swdr + dfdif(ksrf ,ij)*swdf + tmp_kl = dfdir(klevp,ij)*swdr + dfdif(klevp,ij)*swdf + + ! for layer biology: save visible only + do k = nslyr+2, klevp ! Start at DL layer of ice after SSL scattering + fthrul(ij,k-nslyr-1) = dfdir(k,ij)*swdr + dfdif(k,ij)*swdf + enddo + + fsfc(ij) = fsfc(ij) + tmp_0 - tmp_ks + fint(ij) = fint(ij) + tmp_ks - tmp_kl + fthru(ij) = fthru(ij) + tmp_kl + + ! if snow covered ice, set snow internal absorption; else, Sabs=0 + if( srftyp == 1 ) then + ki = 0 + do k=1,nslyr + ! skip snow SSL, since SSL absorption included in the surface + ! absorption fsfc above + km = k + kp = km + 1 + ki = ki + 1 + Sabs(ij,ki) = Sabs(ij,ki) & + + dfdir(km,ij)*swdr + dfdif(km,ij)*swdf & + - (dfdir(kp,ij)*swdr + dfdif(kp,ij)*swdf) + enddo ! k + endif + + ! complex indexing to insure proper absorptions for sea ice + ki = 0 + do k=nslyr+2,nslyr+1+nilyr + ! for bare ice, DL absorption for sea ice layer 1 + km = k + kp = km + 1 + ! modify for top sea ice layer for snow over sea ice + if( srftyp == 1 ) then + ! must add SSL and DL absorption for sea ice layer 1 + if( k == nslyr+2 ) then + km = k - 1 + kp = km + 2 + endif + endif + ki = ki + 1 + Iabs(ij,ki) = Iabs(ij,ki) & + + dfdir(km,ij)*swdr + dfdif(km,ij)*swdf & + - (dfdir(kp,ij)*swdr + dfdif(kp,ij)*swdf) + enddo ! k + enddo ! ij + + else !if(ns > 1) then ! near IR + + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + + swdr = swidr(i,j) + swdf = swidf(i,j) + + ! let fr1 = alb_1*swd*wght1 and fr2 = alb_2*swd*wght2 be the ns=2,3 + ! reflected fluxes respectively, where alb_1, alb_2 are the band + ! albedos, swd = nir incident shortwave flux, and wght1, wght2 are + ! the 2,3 band weights. thus, the total reflected flux is: + ! fr = fr1 + fr2 = alb_1*swd*wght1 + alb_2*swd*wght2 hence, the + ! 2,3 nir band albedo is alb = fr/swd = alb_1*wght1 + alb_2*wght2 + + aidr(ij) = aidr(ij) + rupdir(0,ij)*wghtns(ij,ns) + aidf(ij) = aidf(ij) + rupdif(0,ij)*wghtns(ij,ns) + + tmp_0 = dfdir(0 ,ij)*swdr + dfdif(0 ,ij)*swdf + tmp_ks = dfdir(ksrf ,ij)*swdr + dfdif(ksrf ,ij)*swdf + tmp_kl = dfdir(klevp,ij)*swdr + dfdif(klevp,ij)*swdf + + tmp_0 = tmp_0 * wghtns(ij,ns) + tmp_ks = tmp_ks * wghtns(ij,ns) + tmp_kl = tmp_kl * wghtns(ij,ns) + + fsfc(ij) = fsfc(ij) + tmp_0 - tmp_ks + fint(ij) = fint(ij) + tmp_ks - tmp_kl + fthru(ij) = fthru(ij) + tmp_kl + + ! if snow covered ice, set snow internal absorption; else, Sabs=0 + if( srftyp == 1 ) then + ki = 0 + do k=1,nslyr + ! skip snow SSL, since SSL absorption included in the surface + ! absorption fsfc above + km = k + kp = km + 1 + ki = ki + 1 + Sabs(ij,ki) = Sabs(ij,ki) & + + (dfdir(km,ij)*swdr + dfdif(km,ij)*swdf & + - (dfdir(kp,ij)*swdr + dfdif(kp,ij)*swdf)) & + * wghtns(ij,ns) + enddo ! k + endif + + ! complex indexing to insure proper absorptions for sea ice + ki = 0 + do k=nslyr+2,nslyr+1+nilyr + ! for bare ice, DL absorption for sea ice layer 1 + km = k + kp = km + 1 + ! modify for top sea ice layer for snow over sea ice + if( srftyp == 1 ) then + ! must add SSL and DL absorption for sea ice layer 1 + if( k == nslyr+2 ) then + km = k - 1 + kp = km + 2 + endif + endif + ki = ki + 1 + Iabs(ij,ki) = Iabs(ij,ki) & + + (dfdir(km,ij)*swdr + dfdif(km,ij)*swdf & + - (dfdir(kp,ij)*swdr + dfdif(kp,ij)*swdf)) & + * wghtns(ij,ns) + enddo ! k + enddo ! ij + + endif ! ns = 1, ns > 1 + + enddo ! end spectral loop ns + + ! accumulate fluxes over bare sea ice +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + alvdr(i,j) = avdr(ij) + alvdf(i,j) = avdf(ij) + alidr(i,j) = aidr(ij) + alidf(i,j) = aidf(ij) + fswsfc(i,j) = fswsfc(i,j) + fsfc(ij) *fi(i,j) + fswint(i,j) = fswint(i,j) + fint(ij) *fi(i,j) + fswthru(i,j) = fswthru(i,j) + fthru(ij)*fi(i,j) + enddo ! ij + + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + Sswabs(i,j,k) = Sswabs(i,j,k) + Sabs(ij,k)*fi(i,j) + enddo ! ij + enddo ! k + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + Iswabs(i,j,k) = Iswabs(i,j,k) + Iabs(ij,k)*fi(i,j) + + ! bgc layer + fswpenl(i,j,k) = fswpenl(i,j,k) + fthrul(ij,k)* fi(i,j) + if (k == nilyr) then + fswpenl(i,j,k+1) = fswpenl(i,j,k+1) + fthrul(ij,k+1)*fi(i,j) + endif + enddo ! ij + enddo ! k + + !---------------------------------------------------------------- + ! if ice has zero heat capacity, no SW can be absorbed + ! in the ice/snow interior, so add to surface absorption. + ! Note: nilyr = nslyr = 1 for this case + !---------------------------------------------------------------- + + if (.not. heat_capacity) then + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + + ! SW absorbed at snow/ice surface + fswsfc(i,j) = fswsfc(i,j) + Iswabs(i,j,1) + Sswabs(i,j,1) + + ! SW absorbed in ice interior + fswint(i,j) = c0 + Iswabs(i,j,1) = c0 + Sswabs(i,j,1) = c0 + + enddo ! ij + + endif ! heat_capacity + + end subroutine compute_dEdd + +!======================================================================= +! +! Given input vertical profiles of optical properties, evaluate the +! monochromatic Delta-Eddington solution. +! +! author: Bruce P. Briegleb, NCAR +! 2013: E Hunke merged with NCAR version + subroutine solution_dEdd & + (nx_block, ny_block, & + icells_DE, indxi_DE, indxj_DE, coszen, srftyp, & + tau, w0, g, albodr, albodf, & + trndir, trntdr, trndif, rupdir, rupdif, & + rdndif) + + integer (kind=int_kind), & + intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells_DE ! number of sea ice grid cells for surface type + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi_DE, & ! compressed indices for sea ice cells for surface type + indxj_DE + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + coszen ! cosine solar zenith angle + + integer (kind=int_kind), intent(in) :: & + srftyp ! surface type over ice: (0=air, 1=snow, 2=pond) + + integer (kind=int_kind), parameter :: & + klev = nslyr + nilyr + 1 , & ! number of radiation layers - 1 + klevp = klev + 1 ! number of radiation interfaces - 1 + + real (kind=dbl_kind), dimension(0:klev,icells_DE), & + intent(in) :: & + tau , & ! layer extinction optical depth + w0 , & ! layer single scattering albedo + g ! layer asymmetry parameter + + real (kind=dbl_kind), dimension(icells_DE), & + intent(in) :: & + albodr , & ! ocean albedo to direct rad + albodf ! ocean albedo to diffuse rad + + ! following arrays are defined at model interfaces; 0 is the top of the + ! layer above the sea ice; klevp is the sea ice/ocean interface. + real (kind=dbl_kind), dimension (0:klevp,icells_DE), & + intent(out) :: & + trndir , & ! solar beam down transmission from top + trntdr , & ! total transmission to direct beam for layers above + trndif , & ! diffuse transmission to diffuse beam for layers above + rupdir , & ! reflectivity to direct radiation for layers below + rupdif , & ! reflectivity to diffuse radiation for layers below + rdndif ! reflectivity to diffuse radiation for layers above + +!----------------------------------------------------------------------- +! +! Delta-Eddington solution for snow/air/pond over sea ice +! +! Generic solution for a snow/air/pond input column of klev+1 layers, +! with srftyp determining at what interface fresnel refraction occurs. +! +! Computes layer reflectivities and transmissivities, from the top down +! to the lowest interface using the Delta-Eddington solutions for each +! layer; combines layers from top down to lowest interface, and from the +! lowest interface (underlying ocean) up to the top of the column. +! +! Note that layer diffuse reflectivity and transmissivity are computed +! by integrating the direct over several gaussian angles. This is +! because the diffuse reflectivity expression sometimes is negative, +! but the direct reflectivity is always well-behaved. We assume isotropic +! radiation in the upward and downward hemispheres for this integration. +! +! Assumes monochromatic (spectrally uniform) properties across a band +! for the input optical parameters. +! +! If total transmission of the direct beam to the interface above a particular +! layer is less than trmin, then no further Delta-Eddington solutions are +! evaluated for layers below. +! +! The following describes how refraction is handled in the calculation. +! +! First, we assume that radiation is refracted when entering either +! sea ice at the base of the surface scattering layer, or water (i.e. melt +! pond); we assume that radiation does not refract when entering snow, nor +! upon entering sea ice from a melt pond, nor upon entering the underlying +! ocean from sea ice. +! +! To handle refraction, we define a "fresnel" layer, which physically +! is of neglible thickness and is non-absorbing, which can be combined to +! any sea ice layer or top of melt pond. The fresnel layer accounts for +! refraction of direct beam and associated reflection and transmission for +! solar radiation. A fresnel layer is combined with the top of a melt pond +! or to the surface scattering layer of sea ice if no melt pond lies over it. +! +! Some caution must be exercised for the fresnel layer, because any layer +! to which it is combined is no longer a homogeneous layer, as are all other +! individual layers. For all other layers for example, the direct and diffuse +! reflectivities/transmissivities (R/T) are the same for radiation above or +! below the layer. This is the meaning of homogeneous! But for the fresnel +! layer this is not so. Thus, the R/T for this layer must be distinguished +! for radiation above from that from radiation below. For generality, we +! treat all layers to be combined as inhomogeneous. +! +!----------------------------------------------------------------------- + + ! local variables + + integer (kind=int_kind) :: & + kfrsnl ! radiation interface index for fresnel layer + + ! following variables are defined for each layer; 0 refers to the top + ! layer. In general we must distinguish directions above and below in + ! the diffuse reflectivity and transmissivity, as layers are not assumed + ! to be homogeneous (apart from the single layer Delta-Edd solutions); + ! the direct is always from above. + real (kind=dbl_kind), dimension (0:klev) :: & + rdir , & ! layer reflectivity to direct radiation + rdif_a , & ! layer reflectivity to diffuse radiation from above + rdif_b , & ! layer reflectivity to diffuse radiation from below + tdir , & ! layer transmission to direct radiation (solar beam + diffuse) + tdif_a , & ! layer transmission to diffuse radiation from above + tdif_b , & ! layer transmission to diffuse radiation from below + trnlay ! solar beam transm for layer (direct beam only) + + integer (kind=int_kind) :: & + i , & ! longitude index + j , & ! latitude index + ij , & ! longitude/latitude index + k ! level index + + real (kind=dbl_kind), parameter :: & + trmin = 0.001_dbl_kind ! minimum total transmission allowed + ! total transmission is that due to the direct beam; i.e. it includes + ! both the directly transmitted solar beam and the diffuse downwards + ! transmitted radiation resulting from scattering out of the direct beam + real (kind=dbl_kind) :: & + tautot , & ! layer optical depth + wtot , & ! layer single scattering albedo + gtot , & ! layer asymmetry parameter + ftot , & ! layer forward scattering fraction + ts , & ! layer scaled extinction optical depth + ws , & ! layer scaled single scattering albedo + gs , & ! layer scaled asymmetry parameter + rintfc , & ! reflection (multiple) at an interface + refkp1 , & ! interface multiple scattering for k+1 + refkm1 , & ! interface multiple scattering for k-1 + tdrrdir , & ! direct tran times layer direct ref + tdndif ! total down diffuse = tot tran - direct tran + + ! perpendicular and parallel relative to plane of incidence and scattering + real (kind=dbl_kind) :: & + R1 , & ! perpendicular polarization reflection amplitude + R2 , & ! parallel polarization reflection amplitude + T1 , & ! perpendicular polarization transmission amplitude + T2 , & ! parallel polarization transmission amplitude + Rf_dir_a , & ! fresnel reflection to direct radiation + Tf_dir_a , & ! fresnel transmission to direct radiation + Rf_dif_a , & ! fresnel reflection to diff radiation from above + Rf_dif_b , & ! fresnel reflection to diff radiation from below + Tf_dif_a , & ! fresnel transmission to diff radiation from above + Tf_dif_b ! fresnel transmission to diff radiation from below + + ! refractive index for sea ice, water; pre-computed, band-independent, + ! diffuse fresnel reflectivities + real (kind=dbl_kind), parameter :: & + refindx = 1.310_dbl_kind , & ! refractive index of sea ice (water also) + cp063 = 0.063_dbl_kind , & ! diffuse fresnel reflectivity from above + cp455 = 0.455_dbl_kind ! diffuse fresnel reflectivity from below + + real (kind=dbl_kind), dimension(icells_DE) :: & + mu0 , & ! cosine solar zenith angle incident + mu0nij ! cosine solar zenith angle in medium below fresnel level + + real (kind=dbl_kind) :: & + mu0n ! cosine solar zenith angle in medium + + real (kind=dbl_kind) :: & + alpha , & ! term in direct reflectivity and transmissivity + gamma , & ! term in direct reflectivity and transmissivity + el , & ! term in alpha,gamma,n,u + taus , & ! scaled extinction optical depth + omgs , & ! scaled single particle scattering albedo + asys , & ! scaled asymmetry parameter + u , & ! term in diffuse reflectivity and transmissivity + n , & ! term in diffuse reflectivity and transmissivity + lm , & ! temporary for el + mu , & ! cosine solar zenith for either snow or water + ne ! temporary for n + + real (kind=dbl_kind) :: & + w , & ! dummy argument for statement function + uu , & ! dummy argument for statement function + gg , & ! dummy argument for statement function + e , & ! dummy argument for statement function + f , & ! dummy argument for statement function + t , & ! dummy argument for statement function + et ! dummy argument for statement function + + real (kind=dbl_kind) :: & + alp , & ! temporary for alpha + gam , & ! temporary for gamma + ue , & ! temporary for u + extins , & ! extinction + amg , & ! alp - gam + apg ! alp + gam + + integer (kind=int_kind), parameter :: & + ngmax = 8 ! number of gaussian angles in hemisphere + + real (kind=dbl_kind), dimension (ngmax), parameter :: & + gauspt & ! gaussian angles (radians) + = (/ .9894009_dbl_kind, .9445750_dbl_kind, & + .8656312_dbl_kind, .7554044_dbl_kind, & + .6178762_dbl_kind, .4580168_dbl_kind, & + .2816036_dbl_kind, .0950125_dbl_kind/), & + gauswt & ! gaussian weights + = (/ .0271525_dbl_kind, .0622535_dbl_kind, & + .0951585_dbl_kind, .1246290_dbl_kind, & + .1495960_dbl_kind, .1691565_dbl_kind, & + .1826034_dbl_kind, .1894506_dbl_kind/) + + integer (kind=int_kind) :: & + ng ! gaussian integration index + + real (kind=dbl_kind) :: & + gwt , & ! gaussian weight + swt , & ! sum of weights + trn , & ! layer transmission + rdr , & ! rdir for gaussian integration + tdr , & ! tdir for gaussian integration + smr , & ! accumulator for rdif gaussian integration + smt ! accumulator for tdif gaussian integration + + ! Delta-Eddington solution expressions + alpha(w,uu,gg,e) = p75*w*uu*((c1 + gg*(c1-w))/(c1 - e*e*uu*uu)) + gamma(w,uu,gg,e) = p5*w*((c1 + c3*gg*(c1-w)*uu*uu) & + / (c1-e*e*uu*uu)) + n(uu,et) = ((uu+c1)*(uu+c1)/et ) - ((uu-c1)*(uu-c1)*et) + u(w,gg,e) = c1p5*(c1 - w*gg)/e + el(w,gg) = sqrt(c3*(c1-w)*(c1 - w*gg)) + taus(w,f,t) = (c1 - w*f)*t + omgs(w,f) = (c1 - f)*w/(c1 - w*f) + asys(gg,f) = (gg - f)/(c1 - f) + +!----------------------------------------------------------------------- + + ! initialize all output to 0 + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + + do k = 0, klevp + trndir(k,ij) = c0 + trntdr(k,ij) = c0 + trndif(k,ij) = c0 + rupdir(k,ij) = c0 + rupdif(k,ij) = c0 + rdndif(k,ij) = c0 + enddo + + ! initialize top interface of top layer + trndir(0,ij) = c1 + trntdr(0,ij) = c1 + trndif(0,ij) = c1 + rdndif(0,ij) = c0 + + ! mu0 is cosine solar zenith angle above the fresnel level; make + ! sure mu0 is large enough for stable and meaningful radiation + ! solution: .01 is like sun just touching horizon with its lower edge + mu0(ij) = max(coszen(i,j),p01) + + ! mu0n is cosine solar zenith angle used to compute the layer + ! Delta-Eddington solution; it is initially computed to be the + ! value below the fresnel level, i.e. the cosine solar zenith + ! angle below the fresnel level for the refracted solar beam: + mu0nij(ij) = sqrt(c1-((c1-mu0(ij)**2)/(refindx*refindx))) + + enddo ! ij + + ! compute level of fresnel refraction + ! if ponded sea ice, fresnel level is the top of the pond. + kfrsnl = 0 + ! if snow over sea ice or bare sea ice, fresnel level is + ! at base of sea ice SSL (and top of the sea ice DL); the + ! snow SSL counts for one, then the number of snow layers, + ! then the sea ice SSL which also counts for one: + if( srftyp < 2 ) kfrsnl = nslyr + 2 + + ! proceed down one layer at a time; if the total transmission to + ! the interface just above a given layer is less than trmin, then no + ! Delta-Eddington computation for that layer is done. + + do ij = 1, icells_DE + i = indxi_DE(ij) + j = indxj_DE(ij) + + ! begin main level loop + do k = 0, klev + + ! initialize all layer apparent optical properties to 0 + rdir (k) = c0 + rdif_a(k) = c0 + rdif_b(k) = c0 + tdir (k) = c0 + tdif_a(k) = c0 + tdif_b(k) = c0 + trnlay(k) = c0 + + ! compute next layer Delta-eddington solution only if total transmission + ! of radiation to the interface just above the layer exceeds trmin. + + if (trntdr(k,ij) > trmin ) then + + ! calculation over layers with penetrating radiation + + tautot = tau(k,ij) + wtot = w0(k,ij) + gtot = g(k,ij) + ftot = gtot*gtot + + ts = taus(wtot,ftot,tautot) + ws = omgs(wtot,ftot) + gs = asys(gtot,ftot) + lm = el(ws,gs) + ue = u(ws,gs,lm) + + mu0n = mu0nij(ij) + ! if level k is above fresnel level and the cell is non-pond, use the + ! non-refracted beam instead + if( srftyp < 2 .and. k < kfrsnl ) mu0n = mu0(ij) + + extins = max(exp_min, exp(-lm*ts)) + ne = n(ue,extins) + + ! first calculation of rdif, tdif using Delta-Eddington formulas + +! rdif_a(k) = (ue+c1)*(ue-c1)*(c1/extins - extins)/ne + rdif_a(k) = (ue**2-c1)*(c1/extins - extins)/ne + tdif_a(k) = c4*ue/ne + + ! evaluate rdir,tdir for direct beam + trnlay(k) = max(exp_min, exp(-ts/mu0n)) + alp = alpha(ws,mu0n,gs,lm) + gam = gamma(ws,mu0n,gs,lm) + apg = alp + gam + amg = alp - gam + rdir(k) = apg*rdif_a(k) + amg*(tdif_a(k)*trnlay(k) - c1) + tdir(k) = apg*tdif_a(k) + (amg* rdif_a(k)-apg+c1)*trnlay(k) + + ! recalculate rdif,tdif using direct angular integration over rdir,tdir, + ! since Delta-Eddington rdif formula is not well-behaved (it is usually + ! biased low and can even be negative); use ngmax angles and gaussian + ! integration for most accuracy: + R1 = rdif_a(k) ! use R1 as temporary + T1 = tdif_a(k) ! use T1 as temporary + swt = c0 + smr = c0 + smt = c0 + do ng=1,ngmax + mu = gauspt(ng) + gwt = gauswt(ng) + swt = swt + mu*gwt + trn = max(exp_min, exp(-ts/mu)) + alp = alpha(ws,mu,gs,lm) + gam = gamma(ws,mu,gs,lm) + apg = alp + gam + amg = alp - gam + rdr = apg*R1 + amg*T1*trn - amg + tdr = apg*T1 + amg*R1*trn - apg*trn + trn + smr = smr + mu*rdr*gwt + smt = smt + mu*tdr*gwt + enddo ! ng + rdif_a(k) = smr/swt + tdif_a(k) = smt/swt + + ! homogeneous layer + rdif_b(k) = rdif_a(k) + tdif_b(k) = tdif_a(k) + + ! add fresnel layer to top of desired layer if either + ! air or snow overlies ice; we ignore refraction in ice + ! if a melt pond overlies it: + + if( k == kfrsnl ) then + ! compute fresnel reflection and transmission amplitudes + ! for two polarizations: 1=perpendicular and 2=parallel to + ! the plane containing incident, reflected and refracted rays. + R1 = (mu0(ij) - refindx*mu0n) / & + (mu0(ij) + refindx*mu0n) + R2 = (refindx*mu0(ij) - mu0n) / & + (refindx*mu0(ij) + mu0n) + T1 = c2*mu0(ij) / & + (mu0(ij) + refindx*mu0n) + T2 = c2*mu0(ij) / & + (refindx*mu0(ij) + mu0n) + + ! unpolarized light for direct beam + Rf_dir_a = p5 * (R1*R1 + R2*R2) + Tf_dir_a = p5 * (T1*T1 + T2*T2)*refindx*mu0n/mu0(ij) + + ! precalculated diffuse reflectivities and transmissivities + ! for incident radiation above and below fresnel layer, using + ! the direct albedos and accounting for complete internal + ! reflection from below; precalculated because high order + ! number of gaussian points (~256) is required for convergence: + + ! above + Rf_dif_a = cp063 + Tf_dif_a = c1 - Rf_dif_a + ! below + Rf_dif_b = cp455 + Tf_dif_b = c1 - Rf_dif_b + + ! the k = kfrsnl layer properties are updated to combined + ! the fresnel (refractive) layer, always taken to be above + ! the present layer k (i.e. be the top interface): + + rintfc = c1 / (c1-Rf_dif_b*rdif_a(k)) + tdir(k) = Tf_dir_a*tdir(k) + & + Tf_dir_a*rdir(k) * & + Rf_dif_b*rintfc*tdif_a(k) + rdir(k) = Rf_dir_a + & + Tf_dir_a*rdir(k) * & + rintfc*Tf_dif_b + rdif_a(k) = Rf_dif_a + & + Tf_dif_a*rdif_a(k) * & + rintfc*Tf_dif_b + rdif_b(k) = rdif_b(k) + & + tdif_b(k)*Rf_dif_b * & + rintfc*tdif_a(k) + tdif_a(k) = tdif_a(k)*rintfc*Tf_dif_a + tdif_b(k) = tdif_b(k)*rintfc*Tf_dif_b + + ! update trnlay to include fresnel transmission + trnlay(k) = Tf_dir_a*trnlay(k) + + endif ! k = kfrsnl + + endif ! trntdr(k,ij) > trmin + + ! initialize current layer properties to zero; only if total + ! transmission to the top interface of the current layer exceeds the + ! minimum, will these values be computed below: + ! Calculate the solar beam transmission, total transmission, and + ! reflectivity for diffuse radiation from below at interface k, + ! the top of the current layer k: + ! + ! layers interface + ! + ! --------------------- k-1 + ! k-1 + ! --------------------- k + ! k + ! --------------------- + ! For k = klevp + ! note that we ignore refraction between sea ice and underlying ocean: + ! + ! layers interface + ! + ! --------------------- k-1 + ! k-1 + ! --------------------- k + ! \\\\\\\ ocean \\\\\\\ + + trndir(k+1,ij) = trndir(k,ij)*trnlay(k) + refkm1 = c1/(c1 - rdndif(k,ij)*rdif_a(k)) + tdrrdir = trndir(k,ij)*rdir(k) + tdndif = trntdr(k,ij) - trndir(k,ij) + trntdr(k+1,ij) = trndir(k,ij)*tdir(k) + & + (tdndif + tdrrdir*rdndif(k,ij))*refkm1*tdif_a(k) + rdndif(k+1,ij) = rdif_b(k) + & + (tdif_b(k)*rdndif(k,ij)*refkm1*tdif_a(k)) + trndif(k+1,ij) = trndif(k,ij)*refkm1*tdif_a(k) + + enddo ! k end main level loop + + ! compute reflectivity to direct and diffuse radiation for layers + ! below by adding succesive layers starting from the underlying + ! ocean and working upwards: + ! + ! layers interface + ! + ! --------------------- k + ! k + ! --------------------- k+1 + ! k+1 + ! --------------------- + + rupdir(klevp,ij) = albodr(ij) + rupdif(klevp,ij) = albodf(ij) + + do k=klev,0,-1 + ! interface scattering + refkp1 = c1/( c1 - rdif_b(k)*rupdif(k+1,ij)) + ! dir from top layer plus exp tran ref from lower layer, interface + ! scattered and tran thru top layer from below, plus diff tran ref + ! from lower layer with interface scattering tran thru top from below + rupdir(k,ij) = rdir(k) & + + ( trnlay(k) *rupdir(k+1,ij) & + + (tdir(k)-trnlay(k))*rupdif(k+1,ij))*refkp1*tdif_b(k) + ! dif from top layer from above, plus dif tran upwards reflected and + ! interface scattered which tran top from below + rupdif(k,ij) = rdif_a(k) + tdif_a(k)*rupdif(k+1,ij)*refkp1*tdif_b(k) + enddo ! k + enddo ! ij + + end subroutine solution_dEdd + +!======================================================================= +! +! Set snow horizontal coverage, density and grain radius diagnostically +! for the Delta-Eddington solar radiation method. +! +! author: Bruce P. Briegleb, NCAR +! 2013: E Hunke merged with NCAR version + + subroutine shortwave_dEdd_set_snow(nx_block, ny_block, & + icells, & + indxi, indxj, & + aice, vsno, & + Tsfc, fs, hs, & + rhosnw, rsnw) + + use ice_itd, only: hs_min + use ice_meltpond_cesm, only: hs0 + + integer (kind=int_kind), & + intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of ice-covered grid cells + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi , & ! compressed indices for ice-covered cells + indxj + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + aice , & ! concentration of ice + vsno , & ! volume of snow + Tsfc ! surface temperature + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + fs , & ! horizontal coverage of snow + hs ! snow depth + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr), & + intent(out) :: & + rhosnw , & ! density in snow layer (kg/m3) + rsnw ! grain radius in snow layer (micro-meters) + + ! local variables + + integer (kind=int_kind) :: & + i , & ! longitude index + j , & ! latitude index + ij , & ! horizontal index, combines i and j loops + ks ! snow vertical index + + real (kind=dbl_kind) :: & + fT , & ! piecewise linear function of surface temperature + dTs , & ! difference of Tsfc and Timelt + rsnw_nm ! actual used nonmelt snow grain radius (micro-meters) + + real (kind=dbl_kind), parameter :: & + ! units for the following are 1.e-6 m (micro-meters) + rsnw_fresh = 100._dbl_kind, & ! freshly-fallen snow grain radius + rsnw_nonmelt = 500._dbl_kind, & ! nonmelt snow grain radius + rsnw_sig = 250._dbl_kind ! assumed sigma for snow grain radius + +!----------------------------------------------------------------------- + + fs(:,:) = c0 + hs(:,:) = c0 + do ks = 1, nslyr + do j = 1, ny_block + do i = 1, nx_block + rhosnw(i,j,ks) = c0 + rsnw(i,j,ks) = c0 + enddo + enddo + enddo + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! set snow horizontal fraction + hs(i,j) = vsno(i,j) / aice(i,j) + + if (hs(i,j) >= hs_min) then + fs(i,j) = c1 + if (hs0 > puny) fs(i,j) = min(hs(i,j)/hs0, c1) + endif + + ! bare ice, temperature dependence + dTs = Timelt - Tsfc(i,j) + fT = -min(dTs/dT_mlt-c1,c0) + ! tune nonmelt snow grain radius if desired: note that + ! the sign is negative so that if R_snw is 1, then the + ! snow grain radius is reduced and thus albedo increased. + rsnw_nm = rsnw_nonmelt - R_snw*rsnw_sig + rsnw_nm = max(rsnw_nm, rsnw_fresh) + rsnw_nm = min(rsnw_nm, rsnw_mlt) + do ks = 1, nslyr + ! snow density ccsm3 constant value + rhosnw(i,j,ks) = rhos + ! snow grain radius between rsnw_nonmelt and rsnw_mlt + rsnw(i,j,ks) = rsnw_nm + (rsnw_mlt-rsnw_nm)*fT + rsnw(i,j,ks) = max(rsnw(i,j,ks), rsnw_fresh) + rsnw(i,j,ks) = min(rsnw(i,j,ks), rsnw_mlt) + enddo ! ks + enddo ! ij + + end subroutine shortwave_dEdd_set_snow + +!======================================================================= +! +! Set pond fraction and depth diagnostically for +! the Delta-Eddington solar radiation method. +! +! author: Bruce P. Briegleb, NCAR +! 2013: E Hunke merged with NCAR version + + subroutine shortwave_dEdd_set_pond(nx_block, ny_block, & + icells, & + indxi, indxj, & + Tsfc, & + fs, fp, & + hp) + + integer (kind=int_kind), & + intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of ice-covered grid cells + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi , & ! compressed indices for ice-covered cells + indxj + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + Tsfc , & ! surface temperature + fs ! horizontal coverage of snow + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + fp , & ! pond fractional coverage (0 to 1) + hp ! pond depth (m) + + ! local variables + + integer (kind=int_kind) :: & + i , & ! longitude index + j , & ! latitude index + ij ! horizontal index, combines i and j loops + + real (kind=dbl_kind) :: & + fT , & ! piecewise linear function of surface temperature + dTs ! difference of Tsfc and Timelt + + real (kind=dbl_kind), parameter :: & + dT_pnd = c1 ! change in temp for pond fraction and depth + +!----------------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + fp(i,j) = c0 + hp(i,j) = c0 + enddo + enddo + + ! find pond fraction and depth for ice points +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + ! bare ice, temperature dependence + dTs = Timelt - Tsfc(i,j) + fT = -min(dTs/dT_pnd-c1,c0) + ! pond + fp(i,j) = 0.3_dbl_kind*fT*(c1-fs(i,j)) + hp(i,j) = 0.3_dbl_kind*fT*(c1-fs(i,j)) + enddo ! ij + + end subroutine shortwave_dEdd_set_pond + +! End Delta-Eddington shortwave method +!======================================================================= + + end module ice_shortwave + +!======================================================================= diff --git a/source/ice_spacecurve.F90 b/source/ice_spacecurve.F90 new file mode 100755 index 00000000..f9fc962e --- /dev/null +++ b/source/ice_spacecurve.F90 @@ -0,0 +1,1757 @@ +!BOP +! !MODULE: ice_spacecurve + +module ice_spacecurve + +! !DESCRIPTION: +! This module contains routines necessary to +! create space-filling curves. +! +! !REVISION HISTORY: +! SVN:$Id: ice_spacecurve.F90 825 2014-08-29 15:37:09Z eclare $ +! +! author: John Dennis, NCAR + +! !USES: + use ice_kinds_mod + use ice_communicate, only: my_task, master_task + use ice_exit, only: abort_ice + use ice_fileunits + + implicit none + +! !PUBLIC TYPES: + + type, public :: factor_t + integer(int_kind) :: numfact ! The # of factors for a value + integer(int_kind), dimension(:),pointer :: factors ! The factors + integer(int_kind), dimension(:), pointer :: used + end type + +! !PUBLIC MEMBER FUNCTIONS: + + public :: GenSpaceCurve, & + IsLoadBalanced + + public :: Factor, & + IsFactorable, & + PrintFactor, & + ProdFactor, & + MatchFactor + +! !PRIVATE MEMBER FUNCTIONS: + + private :: map, & + PeanoM, & + Hilbert, & + Cinco, & + GenCurve + + private :: FirstFactor, & + FindandMark + + integer(int_kind), dimension(:,:), allocatable :: & + dir, &! direction to move along each level + ordered ! the ordering + integer(int_kind), dimension(:), allocatable :: & + pos ! position along each of the axes + + integer(int_kind) :: & + maxdim, &! dimensionality of entire space + vcnt ! visitation count + + logical :: verbose=.FALSE. + + type (factor_t), public,save :: fact ! stores the factorization + +!EOP +!EOC +!*********************************************************************** + +contains + +!*********************************************************************** +!BOP +! !IROUTINE: Cinco +! !INTERFACE: + + recursive function Cinco(l,type,ma,md,ja,jd) result(ierr) + +! !DESCRIPTION: +! This subroutine implements a Cinco space-filling curve. +! Cinco curves connect a Nb x Nb block of points where +! +! Nb = 5^p +! +! !REVISION HISTORY: +! same as module +! + + +! !INPUT PARAMETERS + + integer(int_kind), intent(in) :: & + l, & ! level of the space-filling curve + type, & ! type of SFC curve + ma, & ! Major axis [0,1] + md, & ! direction of major axis [-1,1] + ja, & ! joiner axis [0,1] + jd ! direction of joiner axis [-1,1] + +! !OUTPUT PARAMETERS + + integer(int_kind) :: ierr ! error return code + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer(int_kind) :: & + lma, &! local major axis (next level) + lmd, &! local major direction (next level) + lja, &! local joiner axis (next level) + ljd, &! local joiner direction (next level) + ltype, &! type of SFC on next level + ll ! next level down + + logical :: debug = .FALSE. + +!----------------------------------------------------------------------- + ll = l + if(ll .gt. 1) ltype = fact%factors(ll-1) ! Set the next type of space curve + + !-------------------------------------------------------------- + ! Position [0,0] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,21) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'Cinco: After Position [0,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,0] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,22) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [1,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,23) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [2,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,1] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,24) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [2,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,2] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = ma + ljd = -md + + if(ll .gt. 1) then + if(debug) write(*,25) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [2,2] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,2] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,26) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [1,2] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,1] + !-------------------------------------------------------------- + lma = ma + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,27) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [1,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [0,1] + !-------------------------------------------------------------- + lma = ma + lmd = -md + lja = MOD(ma+1,maxdim) + ljd = md + + if(ll .gt. 1) then + if(debug) write(*,28) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [0,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [0,2] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,29) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [0,2] ',pos + endif + + !-------------------------------------------------------------- + ! Position [0,3] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,30) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [0,3] ',pos + endif + + !-------------------------------------------------------------- + ! Position [0,4] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,31) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [0,4] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,4] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = MOD(ma+1,maxdim) + ljd = -md + + if(ll .gt. 1) then + if(debug) write(*,32) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [1,4] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,3] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = ma + ljd = md + + if(ll .gt. 1) then + if(debug) write(*,33) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [1,3] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,3] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,34) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [2,3] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,4] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,35) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [2,4] ',pos + endif + + !-------------------------------------------------------------- + ! Position [3,4] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,36) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [3,4] ',pos + endif + + !-------------------------------------------------------------- + ! Position [4,4] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = MOD(ma+1,maxdim) + ljd = -md + + if(ll .gt. 1) then + if(debug) write(*,37) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [4,4] ',pos + endif + + !-------------------------------------------------------------- + ! Position [4,3] + !-------------------------------------------------------------- + lma = ma + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,38) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [4,3] ',pos + endif + + !-------------------------------------------------------------- + ! Position [3,3] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,39) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [3,3] ',pos + endif + + !-------------------------------------------------------------- + ! Position [3,2] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = ma + ljd = md + + if(ll .gt. 1) then + if(debug) write(*,40) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [3,2] ',pos + endif + + !-------------------------------------------------------------- + ! Position [4,2] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = MOD(ma+1,maxdim) + ljd = -md + + if(ll .gt. 1) then + if(debug) write(*,41) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [4,2] ',pos + endif + + !-------------------------------------------------------------- + ! Position [4,1] + !-------------------------------------------------------------- + lma = ma + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,42) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [4,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [3,1] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,43) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [3,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [3,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = ma + ljd = md + + if(ll .gt. 1) then + if(debug) write(*,44) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [3,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [4,0] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = ja + ljd = jd + + if(ll .gt. 1) then + if(debug) write(*,45) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'After Position [4,0] ',pos + endif + + 21 format('Call Cinco Pos [0,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 22 format('Call Cinco Pos [1,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 23 format('Call Cinco Pos [2,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 24 format('Call Cinco Pos [2,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 25 format('Call Cinco Pos [2,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 26 format('Call Cinco Pos [1,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 27 format('Call Cinco Pos [1,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 28 format('Call Cinco Pos [0,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 29 format('Call Cinco Pos [0,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 30 format('Call Cinco Pos [0,3] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 31 format('Call Cinco Pos [0,4] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 32 format('Call Cinco Pos [1,4] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 33 format('Call Cinco Pos [1,3] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 34 format('Call Cinco Pos [2,3] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 35 format('Call Cinco Pos [2,4] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 36 format('Call Cinco Pos [3,4] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 37 format('Call Cinco Pos [4,4] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 38 format('Call Cinco Pos [4,3] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 39 format('Call Cinco Pos [3,3] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 40 format('Call Cinco Pos [3,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 41 format('Call Cinco Pos [4,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 42 format('Call Cinco Pos [4,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 43 format('Call Cinco Pos [3,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 44 format('Call Cinco Pos [3,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 45 format('Call Cinco Pos [4,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + +!EOC +!----------------------------------------------------------------------- + + end function Cinco + +!*********************************************************************** +!BOP +! !IROUTINE: PeanoM +! !INTERFACE: + + recursive function PeanoM(l,type,ma,md,ja,jd) result(ierr) + +! !DESCRIPTION: +! This function implements a meandering Peano +! space-filling curve. A meandering Peano curve +! connects a Nb x Nb block of points where +! +! Nb = 3^p +! +! !REVISION HISTORY: +! same as module +! + +! !INPUT PARAMETERS + + integer(int_kind), intent(in) :: & + l, & ! level of the space-filling curve + type, & ! type of SFC curve + ma, & ! Major axis [0,1] + md, & ! direction of major axis [-1,1] + ja, & ! joiner axis [0,1] + jd ! direction of joiner axis [-1,1] + +! !OUTPUT PARAMETERS + + integer(int_kind) :: ierr ! error return code + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + + integer(int_kind) :: & + lma, &! local major axis (next level) + lmd, &! local major direction (next level) + lja, &! local joiner axis (next level) + ljd, &! local joiner direction (next level) + ltype, &! type of SFC on next level + ll ! next level down + + logical :: debug = .FALSE. + +!----------------------------------------------------------------------- + + ll = l + if(ll .gt. 1) ltype = fact%factors(ll-1) ! Set the next type of space curve + !-------------------------------------------------------------- + ! Position [0,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,21) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'PeanoM: After Position [0,0] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [0,1] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + if(ll .gt. 1) then + if(debug) write(*,22) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'PeanoM: After Position [0,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [0,2] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + if(ll .gt. 1) then + if(debug) write(*,23) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'PeanoM: After Position [0,2] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,2] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + if(ll .gt. 1) then + if(debug) write(*,24) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'PeanoM: After Position [1,2] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [2,2] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = MOD(lma+1,maxdim) + ljd = -lmd + + if(ll .gt. 1) then + if(debug) write(*,25) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'PeanoM: After Position [2,2] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,1] + !-------------------------------------------------------------- + lma = ma + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,26) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'PeanoM: After Position [2,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,1] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,27) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'PeanoM: After Position [1,1] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [1,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = MOD(lma+1,maxdim) + ljd = -lmd + + if(ll .gt. 1) then + if(debug) write(*,28) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'PeanoM: After Position [1,0] ',pos + endif + + !-------------------------------------------------------------- + ! Position [2,0] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = ja + ljd = jd + + if(ll .gt. 1) then + if(debug) write(*,29) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'PeanoM: After Position [2,0] ',pos + endif + + 21 format('Call PeanoM Pos [0,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 22 format('Call PeanoM Pos [0,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 23 format('Call PeanoM Pos [0,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 24 format('Call PeanoM Pos [1,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 25 format('Call PeanoM Pos [2,2] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 26 format('Call PeanoM Pos [2,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 27 format('Call PeanoM Pos [1,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 28 format('Call PeanoM Pos [1,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 29 format('Call PeanoM Pos [2,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + +!EOC +!----------------------------------------------------------------------- + + end function PeanoM + +!*********************************************************************** +!BOP +! !IROUTINE: Hilbert +! !INTERFACE: + + recursive function Hilbert(l,type,ma,md,ja,jd) result(ierr) + +! !DESCRIPTION: +! This function implements a Hilbert space-filling curve. +! A Hilbert curve connect a Nb x Nb block of points where +! +! Nb = 2^p +! +! !REVISION HISTORY: +! same as module +! + + +! !INPUT PARAMETERS + + integer(int_kind), intent(in) :: & + l, & ! level of the space-filling curve + type, & ! type of SFC curve + ma, & ! Major axis [0,1] + md, & ! direction of major axis [-1,1] + ja, & ! joiner axis [0,1] + jd ! direction of joiner axis [-1,1] + +! !OUTPUT PARAMETERS + + integer(int_kind) :: ierr ! error return code + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + + integer(int_kind) :: & + lma, &! local major axis (next level) + lmd, &! local major direction (next level) + lja, &! local joiner axis (next level) + ljd, &! local joiner direction (next level) + ltype, &! type of SFC on next level + ll ! next level down + + logical :: debug = .FALSE. + +!----------------------------------------------------------------------- + ll = l + if(ll .gt. 1) ltype = fact%factors(ll-1) ! Set the next type of space curve + !-------------------------------------------------------------- + ! Position [0,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = md + lja = lma + ljd = lmd + + if(ll .gt. 1) then + if(debug) write(*,21) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'Hilbert: After Position [0,0] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [0,1] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = lma + ljd = lmd + if(ll .gt. 1) then + if(debug) write(*,22) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'Hilbert: After Position [0,1] ',pos + endif + + + !-------------------------------------------------------------- + ! Position [1,1] + !-------------------------------------------------------------- + lma = ma + lmd = md + lja = MOD(ma+1,maxdim) + ljd = -md + + if(ll .gt. 1) then + if(debug) write(*,23) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'Hilbert: After Position [1,1] ',pos + endif + + !-------------------------------------------------------------- + ! Position [1,0] + !-------------------------------------------------------------- + lma = MOD(ma+1,maxdim) + lmd = -md + lja = ja + ljd = jd + + if(ll .gt. 1) then + if(debug) write(*,24) ll-1,pos(0),pos(1),lma,lmd,lja,ljd + ierr = GenCurve(ll-1,ltype,lma,lmd,lja,ljd) + if(debug) call PrintCurve(ordered) + else + ierr = IncrementCurve(lja,ljd) + if(debug) print *,'Hilbert: After Position [1,0] ',pos + endif + + 21 format('Call Hilbert Pos [0,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 22 format('Call Hilbert Pos [0,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 23 format('Call Hilbert Pos [1,1] Level ',i1,' at (',i2,',',i2,')',4(i3)) + 24 format('Call Hilbert Pos [1,0] Level ',i1,' at (',i2,',',i2,')',4(i3)) + +!EOC +!----------------------------------------------------------------------- + + end function hilbert + +!*********************************************************************** +!BOP +! !IROUTINE: IncrementCurve +! !INTERFACE: + + function IncrementCurve(ja,jd) result(ierr) + +! !DESCRIPTION: +! This function creates the curve which is stored in the +! the ordered array. The curve is implemented by +! incrementing the curve in the direction [jd] of axis [ja]. +! +! !REVISION HISTORY: +! same as module +! + +! !INPUT PARAMETERS: + integer(int_kind) :: & + ja, &! axis to increment + jd ! direction along axis + +! !OUTPUT PARAMETERS: + integer(int_kind) :: ierr ! error return code + + !----------------------------- + ! mark the newly visited point + !----------------------------- + ordered(pos(0)+1,pos(1)+1) = vcnt + + !------------------------------------ + ! increment curve and update position + !------------------------------------ + vcnt = vcnt + 1 + pos(ja) = pos(ja) + jd + + ierr = 0 +!EOC +!----------------------------------------------------------------------- + + end function IncrementCurve + +!*********************************************************************** +!BOP +! !IROUTINE: log2 +! !INTERFACE: + + function log2( n) + +! !DESCRIPTION: +! This function calculates the log2 of its integer +! input. +! +! !REVISION HISTORY: +! same as module + +! !INPUT PARAMETERS: + + integer(int_kind), intent(in) :: n ! integer value to find the log2 + +! !OUTPUT PARAMETERS: + + integer(int_kind) :: log2 + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer(int_kind) :: tmp + + !------------------------------- + ! Find the log2 of input value + ! Abort if n < 1 + !------------------------------- + + if (n < 1) then + call abort_ice ('ice: spacecurve log2 error') + + elseif (n == 1) then + log2 = 0 + + else ! n > 1 + log2 = 1 + tmp =n + do while (tmp > 1 .and. tmp/2 .ne. 1) + tmp=tmp/2 + log2=log2+1 + enddo + endif + +!EOP +!----------------------------------------------------------------------- + + end function log2 + +!*********************************************************************** +!BOP +! !IROUTINE: IsLoadBalanced +! !INTERFACE: + + function IsLoadBalanced(nelem,npart) + +! !DESCRIPTION: +! This function determines if we can create +! a perfectly load-balanced partitioning. +! +! !REVISION HISTORY: +! same as module + +! !INTPUT PARAMETERS: + + integer(int_kind), intent(in) :: & + nelem, & ! number of blocks/elements to partition + npart ! size of partition + +! !OUTPUT PARAMETERS: + logical :: IsLoadBalanced ! .TRUE. if a perfectly load balanced + ! partition is possible +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer(int_kind) :: tmp1 ! temporary int + +!----------------------------------------------------------------------- + tmp1 = nelem/npart + + if(npart*tmp1 == nelem ) then + IsLoadBalanced=.TRUE. + else + IsLoadBalanced=.FALSE. + endif + +!EOP +!----------------------------------------------------------------------- + + end function IsLoadBalanced + +!*********************************************************************** +!BOP +! !IROUTINE: GenCurve +! !INTERFACE: + + function GenCurve(l,type,ma,md,ja,jd) result(ierr) + +! !DESCRIPTION: +! This subroutine generates the next level down +! space-filling curve +! +! !REVISION HISTORY: +! same as module +! + +! !INPUT PARAMETERS + + integer(int_kind), intent(in) :: & + l, & ! level of the space-filling curve + type, & ! type of SFC curve + ma, & ! Major axis [0,1] + md, & ! direction of major axis [-1,1] + ja, & ! joiner axis [0,1] + jd ! direction of joiner axis [-1,1] + +! !OUTPUT PARAMETERS + + integer(int_kind) :: ierr ! error return code + +!EOP +!BOC +!----------------------------------------------------------------------- + + !------------------------------------------------- + ! create the space-filling curve on the next level + !------------------------------------------------- + + if(type == 2) then + ierr = Hilbert(l,type,ma,md,ja,jd) + elseif ( type == 3) then + ierr = PeanoM(l,type,ma,md,ja,jd) + elseif ( type == 5) then + ierr = Cinco(l,type,ma,md,ja,jd) + endif + +!EOP +!----------------------------------------------------------------------- + + end function GenCurve + + + function FirstFactor(fac) result(res) + type (factor_t) :: fac + integer :: res + logical :: found + integer (int_kind) :: i + + found = .false. + i=1 + do while (i<=fac%numfact .and. (.not. found)) + if(fac%used(i) == 0) then + res = fac%factors(i) + found = .true. + endif + i=i+1 + enddo + + end function FirstFactor + + function FindandMark(fac,val,f2) result(found) + type (factor_t) :: fac + integer :: val + logical :: found + logical :: f2 + integer (int_kind) :: i + + found = .false. + i=1 + do while (i<=fac%numfact .and. (.not. found)) + if(fac%used(i) == 0) then + if(fac%factors(i) .eq. val) then + if(f2) then + fac%used(i) = 1 + found = .true. + else if( .not. f2) then + fac%used(i) = -1 + found = .false. + endif + endif + endif + i=i+1 + enddo + + end function FindandMark + + + subroutine MatchFactor(fac1,fac2,val,found) + type (factor_t) :: fac1 + type (factor_t) :: fac2 + integer :: val + integer :: val1 + logical :: found + logical :: tmp + + found = .false. + + val1 = FirstFactor(fac1) +!JMD print *,'Matchfactor: found value: ',val1 + found = FindandMark(fac2,val1,.true.) + tmp = FindandMark(fac1,val1,found) + if (found) then + val = val1 + else + val = 1 + endif + + end subroutine MatchFactor + + function ProdFactor(fac) result(res) + + type (factor_t) :: fac + integer :: res + integer (int_kind) :: i + + res = 1 + do i=1,fac%numfact + if(fac%used(i) <= 0) then + res = res * fac%factors(i) + endif + enddo + + end function ProdFactor + + subroutine PrintFactor(msg,fac) + + + character(len=*) :: msg + type (factor_t) :: fac + integer (int_kind) :: i + + write(*,*) ' ' + write(*,*) 'PrintFactor: ',msg + write(*,*) (fac%factors(i),i=1,fac%numfact) + write(*,*) (fac%used(i),i=1,fac%numfact) + + + end subroutine PrintFactor + +!*********************************************************************** +!BOP +! !IROUTINE: Factor +! !INTERFACE: + + function Factor(num) result(res) + +! !DESCRIPTION: +! This function factors the input value num into a +! product of 2,3, and 5. +! +! !REVISION HISTORY: +! same as module +! + +! !INPUT PARAMETERS: + + integer(int_kind), intent(in) :: num ! number to factor + +! !OUTPUT PARAMETERS: + + type (factor_t) :: res + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer(int_kind) :: & + tmp,tmp2,tmp3,tmp5 ! tempories for the factorization algorithm + integer(int_kind) :: i,n ! loop tempories + logical :: found ! logical temporary + + ! -------------------------------------- + ! Allocate allocate for max # of factors + ! -------------------------------------- + tmp = num + tmp2 = max(log2(num),1) + allocate(res%factors(tmp2)) + allocate(res%used(tmp2)) + + res%used = 0 + n=0 + + + !----------------------- + ! Look for factors of 2 + !----------------------- + found=.TRUE. + do while (found) + found = .FALSE. + tmp2 = tmp/2 + if( tmp2*2 == tmp ) then + n = n + 1 + res%factors(n) = 2 + found = .TRUE. + tmp = tmp2 + endif + enddo + + !----------------------- + ! Look for factors of 3 + !----------------------- + found=.TRUE. + do while (found) + found = .FALSE. + tmp3 = tmp/3 + if( tmp3*3 == tmp ) then + n = n + 1 + res%factors(n) = 3 + found = .TRUE. + tmp = tmp3 + endif + enddo + + !----------------------- + ! Look for factors of 5 + !----------------------- + found=.TRUE. + do while (found) + found = .FALSE. + tmp5 = tmp/5 + if( tmp5*5 == tmp ) then + n = n + 1 + res%factors(n) = 5 + found = .TRUE. + tmp = tmp5 + endif + enddo + + !------------------------------------ + ! make sure that the input value + ! only contains factors of 2,3,and 5 + !------------------------------------ + tmp=1 + do i=1,n + tmp = tmp * res%factors(i) + enddo + if(tmp == num) then + res%numfact = n + else + res%numfact = -1 + endif + +!EOP +!--------------------------------------------------------- + end function Factor + +!*********************************************************************** +!BOP +! !IROUTINE: IsFactorable +! !INTERFACE: + + function IsFactorable(n) + +! !DESCRIPTION: +! This function determines if we can factor +! n into 2,3,and 5. +! +! !REVISION HISTORY: +! same as module + + +! !INTPUT PARAMETERS: + + integer(int_kind), intent(in) :: n ! number to factor + +! !OUTPUT PARAMETERS: + logical :: IsFactorable ! .TRUE. if it is factorable + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + type (factor_t) :: fact ! data structure to store factor information + + fact = Factor(n) + if(fact%numfact .ne. -1) then + IsFactorable = .TRUE. + else + IsFactorable = .FALSE. + endif + +!EOP +!----------------------------------------------------------------------- + + end function IsFactorable + +!*********************************************************************** +!BOP +! !IROUTINE: map +! !INTERFACE: + + subroutine map(l) + +! !DESCRIPTION: +! Interface routine between internal subroutines and public +! subroutines. +! +! !REVISION HISTORY: +! same as module + +! !INPUT PARAMETERS: + integer(int_kind) :: l ! level of space-filling curve + + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer(int_kind) :: & + d, & ! dimension of curve only 2D is supported + type, & ! type of space-filling curve to start off + ierr ! error return code + + d = SIZE(pos) + + pos=0 + maxdim=d + vcnt=0 + + type = fact%factors(l) + ierr = GenCurve(l,type,0,1,0,1) + + +!EOP +!----------------------------------------------------------------------- + + end subroutine map + +!*********************************************************************** +!BOP +! !IROUTINE: PrintCurve +! !INTERFACE: + + subroutine PrintCurve(Mesh) + + +! !DESCRIPTION: +! This subroutine prints the several low order +! space-filling curves in an easy to read format +! +! !REVISION HISTORY: +! same as module +! +! !INPUT PARAMETERS: + + integer(int_kind), intent(in), target :: Mesh(:,:) ! SFC to be printed + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + integer(int_kind) :: & + gridsize, &! order of space-filling curve + i ! loop temporary + +!----------------------------------------------------------------------- + + gridsize = SIZE(Mesh,dim=1) + + if(gridsize == 2) then + write (*,*) "A Level 1 Hilbert Curve:" + write (*,*) "------------------------" + do i=1,gridsize + write(*,2) Mesh(1,i),Mesh(2,i) + enddo + else if(gridsize == 3) then + write (*,*) "A Level 1 Peano Meandering Curve:" + write (*,*) "---------------------------------" + do i=1,gridsize + write(*,3) Mesh(1,i),Mesh(2,i),Mesh(3,i) + enddo + else if(gridsize == 4) then + write (*,*) "A Level 2 Hilbert Curve:" + write (*,*) "------------------------" + do i=1,gridsize + write(*,4) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i) + enddo + else if(gridsize == 5) then + write (*,*) "A Level 1 Cinco Curve:" + write (*,*) "------------------------" + do i=1,gridsize + write(*,5) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i),Mesh(5,i) + enddo + else if(gridsize == 6) then + write (*,*) "A Level 1 Hilbert and Level 1 Peano Curve:" + write (*,*) "------------------------------------------" + do i=1,gridsize + write(*,6) Mesh(1,i),Mesh(2,i),Mesh(3,i), & + Mesh(4,i),Mesh(5,i),Mesh(6,i) + enddo + else if(gridsize == 8) then + write (*,*) "A Level 3 Hilbert Curve:" + write (*,*) "------------------------" + do i=1,gridsize + write(*,8) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i), & + Mesh(5,i),Mesh(6,i),Mesh(7,i),Mesh(8,i) + enddo + else if(gridsize == 9) then + write (*,*) "A Level 2 Peano Meandering Curve:" + write (*,*) "---------------------------------" + do i=1,gridsize + write(*,9) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i), & + Mesh(5,i),Mesh(6,i),Mesh(7,i),Mesh(8,i), & + Mesh(9,i) + enddo + else if(gridsize == 10) then + write (*,*) "A Level 1 Hilbert and Level 1 Cinco Curve:" + write (*,*) "---------------------------------" + do i=1,gridsize + write(*,10) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i), & + Mesh(5,i),Mesh(6,i),Mesh(7,i),Mesh(8,i), & + Mesh(9,i),Mesh(10,i) + enddo + else if(gridsize == 12) then + write (*,*) "A Level 2 Hilbert and Level 1 Peano Curve:" + write (*,*) "------------------------------------------" + do i=1,gridsize + write(*,12) Mesh(1,i),Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i),Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i),Mesh(10,i),Mesh(11,i),Mesh(12,i) + enddo + else if(gridsize == 15) then + write (*,*) "A Level 1 Peano and Level 1 Cinco Curve:" + write (*,*) "------------------------" + do i=1,gridsize + write(*,15) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i), & + Mesh(5,i),Mesh(6,i),Mesh(7,i),Mesh(8,i), & + Mesh(9,i),Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i) + enddo + else if(gridsize == 16) then + write (*,*) "A Level 4 Hilbert Curve:" + write (*,*) "------------------------" + do i=1,gridsize + write(*,16) Mesh(1,i),Mesh(2,i),Mesh(3,i),Mesh(4,i), & + Mesh(5,i),Mesh(6,i),Mesh(7,i),Mesh(8,i), & + Mesh(9,i),Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i) + enddo + else if(gridsize == 18) then + write (*,*) "A Level 1 Hilbert and Level 2 Peano Curve:" + write (*,*) "------------------------------------------" + do i=1,gridsize + write(*,18) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i) + enddo + else if(gridsize == 20) then + write (*,*) "A Level 2 Hilbert and Level 1 Cinco Curve:" + write (*,*) "------------------------------------------" + do i=1,gridsize + write(*,20) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i),Mesh(19,i),Mesh(20,i) + enddo + else if(gridsize == 24) then + write (*,*) "A Level 3 Hilbert and Level 1 Peano Curve:" + write (*,*) "------------------------------------------" + do i=1,gridsize + write(*,24) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i),Mesh(19,i),Mesh(20,i), & + Mesh(21,i),Mesh(22,i),Mesh(23,i),Mesh(24,i) + enddo + else if(gridsize == 25) then + write (*,*) "A Level 2 Cinco Curve:" + write (*,*) "------------------------------------------" + do i=1,gridsize + write(*,25) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i),Mesh(19,i),Mesh(20,i), & + Mesh(21,i),Mesh(22,i),Mesh(23,i),Mesh(24,i), & + Mesh(25,i) + enddo + else if(gridsize == 27) then + write (*,*) "A Level 3 Peano Meandering Curve:" + write (*,*) "---------------------------------" + do i=1,gridsize + write(*,27) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i),Mesh(19,i),Mesh(20,i), & + Mesh(21,i),Mesh(22,i),Mesh(23,i),Mesh(24,i), & + Mesh(25,i),Mesh(26,i),Mesh(27,i) + enddo + else if(gridsize == 32) then + write (*,*) "A Level 5 Hilbert Curve:" + write (*,*) "------------------------" + do i=1,gridsize + write(*,32) Mesh(1,i), Mesh(2,i), Mesh(3,i), Mesh(4,i), & + Mesh(5,i), Mesh(6,i), Mesh(7,i), Mesh(8,i), & + Mesh(9,i), Mesh(10,i),Mesh(11,i),Mesh(12,i), & + Mesh(13,i),Mesh(14,i),Mesh(15,i),Mesh(16,i), & + Mesh(17,i),Mesh(18,i),Mesh(19,i),Mesh(20,i), & + Mesh(21,i),Mesh(22,i),Mesh(23,i),Mesh(24,i), & + Mesh(25,i),Mesh(26,i),Mesh(27,i),Mesh(28,i), & + Mesh(29,i),Mesh(30,i),Mesh(31,i),Mesh(32,i) + enddo + endif + 2 format('|',2(i2,'|')) + 3 format('|',3(i2,'|')) + 4 format('|',4(i2,'|')) + 5 format('|',5(i2,'|')) + 6 format('|',6(i2,'|')) + 8 format('|',8(i2,'|')) + 9 format('|',9(i2,'|')) +10 format('|',10(i2,'|')) +12 format('|',12(i3,'|')) +15 format('|',15(i3,'|')) +16 format('|',16(i3,'|')) +18 format('|',18(i3,'|')) +20 format('|',20(i3,'|')) +24 format('|',24(i3,'|')) +25 format('|',25(i3,'|')) +27 format('|',27(i3,'|')) +32 format('|',32(i4,'|')) + +!EOC +!----------------------------------------------------------------------- + + end subroutine PrintCurve + +!*********************************************************************** +!BOP +! !IROUTINE: GenSpaceCurve +! !INTERFACE: + + subroutine GenSpaceCurve(Mesh) + +! !DESCRIPTION: +! This subroutine is the public interface into the +! space-filling curve functionality +! +! !REVISION HISTORY: +! same as module +! + +! !INPUT/OUTPUT PARAMETERS: + integer(int_kind), target,intent(inout) :: & + Mesh(:,:) ! The SFC ordering in 2D array + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer(int_kind) :: & + level, &! Level of space-filling curve + dim ! dimension of SFC... currently limited to 2D + + integer(int_kind) :: gridsize ! number of points on a side + +!----------------------------------------------------------------------- + + !----------------------------------------- + ! Setup the size of the grid to traverse + !----------------------------------------- + dim = 2 + gridsize = SIZE(Mesh,dim=1) + fact = factor(gridsize) + level = fact%numfact + + if(verbose) print *,'GenSpacecurve: level is ',level + allocate(ordered(gridsize,gridsize)) + + !-------------------------------------------- + ! Setup the working arrays for the traversal + !-------------------------------------------- + allocate(pos(0:dim-1)) + + !----------------------------------------------------- + ! The array ordered will contain the visitation order + !----------------------------------------------------- + ordered(:,:) = 0 + + call map(level) + + Mesh(:,:) = ordered(:,:) + + deallocate(pos,ordered) + +!EOP +!----------------------------------------------------------------------- + + end subroutine GenSpaceCurve + + recursive subroutine qsort(a) + + integer, intent(inout) :: a(:) + integer :: split + + if(SIZE(a) > 1) then + call partition(a,split) + call qsort(a(:split-1)) + call qsort(a(split:)) + endif + + end subroutine qsort + + subroutine partition(a,marker) + + INTEGER, INTENT(IN OUT) :: a(:) + INTEGER, INTENT(OUT) :: marker + INTEGER :: left, right, pivot, temp + + pivot = (a(1) + a(size(a))) / 2 ! Average of first and last elements to prevent quadratic + left = 0 ! behavior with sorted or reverse sorted data + right = size(a) + 1 + + DO WHILE (left < right) + right = right - 1 + DO WHILE (a(right) > pivot) + right = right-1 + END DO + left = left + 1 + DO WHILE (a(left) < pivot) + left = left + 1 + END DO + IF (left < right) THEN + temp = a(left) + a(left) = a(right) + a(right) = temp + END IF + END DO + + IF (left == right) THEN + marker = left + 1 + ELSE + marker = left + END IF + + end subroutine partition + + + +end module ice_spacecurve + +!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/source/ice_state.F90 b/source/ice_state.F90 new file mode 100755 index 00000000..52680b8c --- /dev/null +++ b/source/ice_state.F90 @@ -0,0 +1,244 @@ +! SVN:$Id: ice_state.F90 825 2014-08-29 15:37:09Z eclare $ +!======================================================================= +! +! Primary state variables in various configurations +! Note: other state variables are at the end of this... +! The primary state variable names are: +!------------------------------------------------------------------- +! for each category aggregated over units +! categories +!------------------------------------------------------------------- +! aicen(i,j,n) aice(i,j) --- +! vicen(i,j,n) vice(i,j) m +! vsnon(i,j,n) vsno(i,j) m +! trcrn(i,j,it,n) trcr(i,j,it) +! +! Area is dimensionless because aice is the fractional area +! (normalized so that the sum over all categories, including open +! water, is 1.0). That is why vice/vsno have units of m instead of m^3. +! +! Variable names follow these rules: +! +! (1) For 3D variables (indices i,j,n), write 'ice' or 'sno' or +! 'sfc' and put an 'n' at the end. +! (2) For 2D variables (indices i,j) aggregated over all categories, +! write 'ice' or 'sno' or 'sfc' without the 'n'. +! (3) For 2D variables (indices i,j) associated with an individual +! category, write 'i' or 's' instead of 'ice' or 'sno' and put an 'n' +! at the end: e.g. hin, hsn. These are not declared here +! but in individual modules (e.g., ice_therm_vertical). +! +! authors C. M. Bitz, UW +! Elizabeth C. Hunke and William H. Lipscomb, LANL +! +! 2004: Block structure added by William Lipscomb +! 2006: Converted to free form source (F90) by Elizabeth Hunke + + module ice_state + + use ice_kinds_mod + use ice_domain_size, only: max_blocks, ncat, max_ntrcr, n_aero + use ice_blocks, only: nx_block, ny_block + + implicit none + private + public :: bound_state + save + + !----------------------------------------------------------------- + ! state of the ice aggregated over all categories + !----------------------------------------------------------------- + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + public :: & + aice , & ! concentration of ice + vice , & ! volume per unit area of ice (m) + vsno ! volume per unit area of snow (m) + + real (kind=dbl_kind), & + dimension(nx_block,ny_block,max_ntrcr,max_blocks), public :: & + trcr ! ice tracers + ! 1: surface temperature of ice/snow (C) + + !----------------------------------------------------------------- + ! state of the ice for each category + !----------------------------------------------------------------- + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), & + public:: & + aice0 ! concentration of open water + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ncat,max_blocks), public :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), public, & + dimension (nx_block,ny_block,max_ntrcr,ncat,max_blocks) :: & + trcrn ! tracers + ! 1: surface temperature of ice/snow (C) + + !----------------------------------------------------------------- + ! indices and flags for tracers + !----------------------------------------------------------------- + + integer (kind=int_kind), dimension (max_ntrcr), public :: & + trcr_depend ! = 0 for ice area tracers + ! = 1 for ice volume tracers + ! = 2 for snow volume tracers + + integer (kind=int_kind), public :: & + ntrcr ! number of tracers in use + + integer (kind=int_kind), public :: & + nbtrcr ! number of bgc tracers in use + + integer (kind=int_kind), public :: & + nt_Tsfc , & ! ice/snow temperature + nt_qice , & ! volume-weighted ice enthalpy (in layers) + nt_qsno , & ! volume-weighted snow enthalpy (in layers) + nt_sice , & ! volume-weighted ice bulk salinity (CICE grid layers) + nt_fbri , & ! volume fraction of ice with dynamic salt (hinS/vicen*aicen) + nt_iage , & ! volume-weighted ice age + nt_FY , & ! area-weighted first-year ice area + nt_alvl , & ! level ice area fraction + nt_vlvl , & ! level ice volume fraction + nt_apnd , & ! melt pond area fraction + nt_hpnd , & ! melt pond depth + nt_ipnd , & ! melt pond refrozen lid thickness + nt_aero , & ! starting index for aerosols in ice + nt_bgc_N_sk, & ! algae (skeletal layer) + nt_bgc_C_sk, & ! + nt_bgc_chl_sk, & ! + nt_bgc_Nit_sk, & ! nutrients (skeletal layer) + nt_bgc_Am_sk, & ! + nt_bgc_Sil_sk, & ! + nt_bgc_DMSPp_sk, & ! trace gases (skeletal layer) + nt_bgc_DMSPd_sk, & ! + nt_bgc_DMS_sk, & ! + nt_bgc_Nit_ml, & ! nutrients (ocean mixed layer) + nt_bgc_Am_ml, & ! + nt_bgc_Sil_ml, & ! + nt_bgc_DMSP_ml, & ! trace gases (ocean mixed layer) + nt_bgc_DMS_ml + + logical (kind=log_kind), public :: & + tr_iage, & ! if .true., use age tracer + tr_FY, & ! if .true., use first-year area tracer + tr_lvl, & ! if .true., use level ice tracer + tr_pond, & ! if .true., use melt pond tracer + tr_pond_cesm,& ! if .true., use cesm pond tracer + tr_pond_lvl, & ! if .true., use level-ice pond tracer + tr_pond_topo,& ! if .true., use explicit topography-based ponds + tr_aero ,& ! if .true., use aerosol tracers + tr_brine ! if .true., brine height differs from ice thickness + + !----------------------------------------------------------------- + ! dynamic variables closely related to the state of the ice + !----------------------------------------------------------------- + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + public :: & + uvel , & ! x-component of velocity (m/s) + vvel , & ! y-component of velocity (m/s) + divu , & ! strain rate I component, velocity divergence (1/s) + shear , & ! strain rate II component (1/s) + strength ! ice strength (N/m) + + !----------------------------------------------------------------- + ! ice state at start of time step, saved for later in the step + !----------------------------------------------------------------- + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + public :: & + aice_init ! initial concentration of ice, for diagnostics + + real (kind=dbl_kind), & + dimension(nx_block,ny_block,ncat,max_blocks), public :: & + aicen_init , & ! initial ice concentration, for linear ITD + vicen_init ! initial ice volume (m), for linear ITD + +!======================================================================= + + contains + +!======================================================================= +! +! Get ghost cell values for ice state variables in each thickness category. +! NOTE: This subroutine cannot be called from inside a block loop! +! +! author: William H. Lipscomb, LANL + + subroutine bound_state (aicen, trcrn, & + vicen, vsnon) + + use ice_boundary, only: ice_halo, ice_HaloMask, ice_HaloUpdate, & + ice_HaloDestroy + use ice_domain, only: halo_info, maskhalo_bound, nblocks + use ice_constants, only: field_loc_center, field_type_scalar, c0 + + real (kind=dbl_kind), & + dimension(nx_block,ny_block,ncat,max_blocks), intent(inout) :: & + aicen , & ! fractional ice area + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), & + dimension(nx_block,ny_block,max_ntrcr,ncat,max_blocks), & + intent(inout) :: & + trcrn ! ice tracers + + ! local variables + + integer (kind=int_kind) :: i, j, n, iblk + + integer (kind=int_kind), & + dimension(nx_block,ny_block,max_blocks) :: halomask + + type (ice_halo) :: halo_info_aicemask + + call ice_HaloUpdate (aicen, halo_info, & + field_loc_center, field_type_scalar) + + if (maskhalo_bound) then + halomask(:,:,:) = 0 + + !$OMP PARALLEL DO PRIVATE(iblk,n,i,j) + do iblk = 1, nblocks + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + if (aicen(i,j,n,iblk) > c0) halomask(i,j,iblk) = 1 + enddo + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call ice_HaloMask(halo_info_aicemask, halo_info, halomask) + + call ice_HaloUpdate (trcrn(:,:,1:ntrcr,:,:), halo_info_aicemask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (vicen, halo_info_aicemask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (vsnon, halo_info_aicemask, & + field_loc_center, field_type_scalar) + call ice_HaloDestroy(halo_info_aicemask) + + else + call ice_HaloUpdate (trcrn(:,:,1:ntrcr,:,:), halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (vicen, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (vsnon, halo_info, & + field_loc_center, field_type_scalar) + endif + + end subroutine bound_state + +!======================================================================= + + end module ice_state + +!======================================================================= diff --git a/source/ice_step_mod.F90 b/source/ice_step_mod.F90 new file mode 100755 index 00000000..de5e6471 --- /dev/null +++ b/source/ice_step_mod.F90 @@ -0,0 +1,1533 @@ +! SVN:$Id: ice_step_mod.F90 936 2015-03-17 15:46:44Z eclare $ +!======================================================================= +! +! Contains CICE component driver routines common to all drivers. +! +! authors Elizabeth C. Hunke, LANL +! Philip W. Jones, LANL +! William H. Lipscomb, LANL +! +! 2008 ECH: created module by moving subroutines from drivers/cice4/ + + module ice_step_mod + + use ice_constants + use ice_kinds_mod + implicit none + private + save + + public :: step_therm1, step_therm2, step_dynamics, & + prep_radiation, step_radiation, post_thermo + +!======================================================================= + + contains + +!======================================================================= +! +! Scales radiation fields computed on the previous time step. +! +! authors: Elizabeth Hunke, LANL + + subroutine prep_radiation (dt, iblk) + + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_communicate, only: my_task + use ice_domain, only: blocks_ice + use ice_domain_size, only: ncat, nilyr, nslyr + use ice_fileunits, only: nu_diag + use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & + alvdr_ai, alvdf_ai, alidr_ai, alidf_ai, fswfac, coszen + use ice_shortwave, only: fswsfcn, fswintn, fswthrun, fswpenln, & + Sswabsn, Iswabsn + use ice_state, only: aice, aicen + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_sw + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij , & ! horizontal indices + k , & ! vertical index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + n ! thickness category index + + integer (kind=int_kind) :: & + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! indirect indices for cells with aicen > puny + + real (kind=dbl_kind) :: netsw + + type (block) :: & + this_block ! block information for current block + + call ice_timer_start(timer_sw,iblk) ! shortwave + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + !----------------------------------------------------------------- + ! Compute netsw scaling factor (new netsw / old netsw) + !----------------------------------------------------------------- + + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > c0 .and. scale_factor(i,j,iblk) > puny) then + netsw = swvdr(i,j,iblk)*(c1 - alvdr_ai(i,j,iblk)) & + + swvdf(i,j,iblk)*(c1 - alvdf_ai(i,j,iblk)) & + + swidr(i,j,iblk)*(c1 - alidr_ai(i,j,iblk)) & + + swidf(i,j,iblk)*(c1 - alidf_ai(i,j,iblk)) + scale_factor(i,j,iblk) = netsw / scale_factor(i,j,iblk) + else + scale_factor(i,j,iblk) = c1 + endif + fswfac(i,j,iblk) = scale_factor(i,j,iblk) ! for history + enddo ! i + enddo ! j + + do n = 1, ncat + + !----------------------------------------------------------------- + ! Identify cells with nonzero ice area + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aicen(i,j,n,iblk) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + !----------------------------------------------------------------- + ! Scale absorbed solar radiation for change in net shortwave + !----------------------------------------------------------------- + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + fswsfcn(i,j,n,iblk) = scale_factor(i,j,iblk)*fswsfcn (i,j,n,iblk) + fswintn(i,j,n,iblk) = scale_factor(i,j,iblk)*fswintn (i,j,n,iblk) + fswthrun(i,j,n,iblk) = scale_factor(i,j,iblk)*fswthrun(i,j,n,iblk) + do k = 1,nilyr+1 + fswpenln(i,j,k,n,iblk) & + = scale_factor(i,j,iblk)*fswpenln(i,j,k,n,iblk) + enddo !k + + do k=1,nslyr + Sswabsn(i,j,k,n,iblk) = & + scale_factor(i,j,iblk)*Sswabsn(i,j,k,n,iblk) + enddo + do k=1,nilyr + Iswabsn(i,j,k,n,iblk) = & + scale_factor(i,j,iblk)*Iswabsn(i,j,k,n,iblk) + enddo + enddo + enddo ! ncat + + call ice_timer_stop(timer_sw,iblk) ! shortwave + + end subroutine prep_radiation + +!======================================================================= +! +! Driver for updating ice and snow internal temperatures and +! computing thermodynamic growth rates and coupler fluxes. +! +! authors: William H. Lipscomb, LANL + + subroutine step_therm1 (dt, iblk) + + use ice_aerosol + use ice_age, only: increment_age + use ice_atmo, only: calc_strair, & + atmbndy, atmo_boundary_const, atmo_boundary_layer, & + formdrag, neutral_drag_coeffs, & + Cdn_ocn, Cdn_ocn_skin, Cdn_ocn_floe, Cdn_ocn_keel, Cdn_atm_ratio, & + Cdn_atm, Cdn_atm_skin, Cdn_atm_floe, Cdn_atm_rdg, Cdn_atm_pond, & + hfreebd, hdraft, hridge, distrdg, hkeel, dkeel, lfloe, dfloe + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_calendar, only: yday, istep1 + use ice_communicate, only: my_task + use ice_coupling, only: set_sfcflux + use ice_domain, only: blocks_ice + use ice_domain_size, only: ncat, nilyr + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_diag + use ice_flux, only: frzmlt, sst, Tf, strocnxT, strocnyT, rside, & + meltsn, melttn, meltbn, congeln, snoicen, dsnown, uatm, vatm, & + wind, rhoa, potT, Qa, zlvl, strax, stray, flatn, fsensn, fsurfn, fcondtopn, & + flw, fsnow, fpond, sss, mlt_onset, frz_onset, faero_atm, faero_ocn, & + frain, Tair, coszen, strairxT, strairyT, fsurf, fcondtop, fsens, & + flat, fswabs, flwout, evap, Tref, Qref, Uref, fresh, fsalt, fhocn, & + fswthru, meltt, melts, meltb, meltl, congel, snoice, & + merge_fluxes + use ice_firstyear, only: update_FYarea + use ice_grid, only: lmask_n, lmask_s, TLAT, TLON + use ice_itd, only: hi_min + use ice_meltpond_cesm, only: compute_ponds_cesm + use ice_meltpond_lvl, only: compute_ponds_lvl, ffracn, dhsn, & + rfracmin, rfracmax, dpscale, pndaspect, frzpnd + use ice_meltpond_topo, only: compute_ponds_topo + use ice_shortwave, only: fswsfcn, fswintn, fswthrun, & + Sswabsn, Iswabsn, shortwave + use ice_state, only: aice, aicen, aice_init, aicen_init, vicen_init, & + vice, vicen, vsno, vsnon, ntrcr, trcrn, & + nt_apnd, nt_hpnd, nt_ipnd, nt_alvl, nt_vlvl, nt_Tsfc, & + tr_iage, nt_iage, tr_FY, nt_FY, tr_aero, tr_pond, tr_pond_cesm, & + tr_pond_lvl, nt_qice, nt_sice, tr_pond_topo, uvel, vvel + use ice_therm_shared, only: calc_Tsfc + use ice_therm_vertical, only: frzmlt_bottom_lateral, thermo_vertical + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_ponds +!#ifdef ACCESS +! use cpl_arrays_setup, only: maice_saved +!#endif + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij , & ! horizontal indices + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + n ! thickness category index + + integer (kind=int_kind) :: & + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! indirect indices for cells with aicen > puny + + ! 2D coupler variables (computed for each category, then aggregated) + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + fswabsn , & ! shortwave absorbed by ice (W/m^2) + flwoutn , & ! upward LW at surface (W/m^2) + evapn , & ! flux of vapor, atmos to ice (kg m-2 s-1) + freshn , & ! flux of water, ice to ocean (kg/m^2/s) + fsaltn , & ! flux of salt, ice to ocean (kg/m^2/s) + fhocnn , & ! fbot corrected for leftover energy (W/m^2) + strairxn , & ! air/ice zonal stress, (N/m^2) + strairyn , & ! air/ice meridional stress, (N/m^2) + Cdn_atm_ratio_n,& ! drag coefficient ratio + Trefn , & ! air tmp reference level (K) + Urefn , & ! air speed reference level (m/s) + Qrefn ! air sp hum reference level (kg/kg) + + ! other local variables + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + Tbot , & ! ice bottom surface temperature (deg C) + fbot , & ! ice-ocean heat flux at bottom surface (W/m^2) + shcoef , & ! transfer coefficient for sensible heat + lhcoef ! transfer coefficient for latent heat + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + vsnon_init , & ! for aerosol mass budget + rfrac ! water fraction retained for melt ponds + + real (kind=dbl_kind) :: & + raice , & ! 1/aice + pond ! water retained in ponds (m) + + type (block) :: & + this_block ! block information for current block + + logical (kind=log_kind) :: & + l_stop ! if true, abort the model + + integer (kind=int_kind) :: & + istop, jstop ! indices of grid cell where model aborts + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + worka, workb + + l_stop = .false. + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + !----------------------------------------------------------------- + ! Save the ice area passed to the coupler (so that history fields + ! can be made consistent with coupler fields). + ! Save the initial ice area and volume in each category. + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block +!#ifdef ACCESS +! aice_init (i,j, iblk) = maice_saved(i,j, iblk) +!#else + aice_init (i,j, iblk) = aice (i,j, iblk) +!#endif + enddo + enddo + + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + aicen_init(i,j,n,iblk) = aicen(i,j,n,iblk) + vicen_init(i,j,n,iblk) = vicen(i,j,n,iblk) + enddo + enddo + enddo + +!#ifdef CICE_IN_NEMO +!ars599: 08102014: while doing the OM run found this could cause some +! issue so might change back to NEMO rather than AusCOM +! need to double check +! 10102014: according to spo599 need to keep this for CM +! so that will suggest to use ifdef ACCESS rather than AusCOM +!#ifdef AusCOM +#ifdef ACCESS + !--------------------------------------------------------------- + ! Scale frain and fsnow by ice concentration as these fields + ! are supplied by NEMO multiplied by ice concentration + !--------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + + if (aice_init(i,j,iblk) > puny) then + raice = c1 / aice_init(i,j,iblk) +! if (maice_saved(i,j,iblk) > puny) then +! raice = c1 / maice_saved(i,j,iblk) +!BX. + frain(i,j,iblk) = frain(i,j,iblk)*raice + fsnow(i,j,iblk) = fsnow(i,j,iblk)*raice + else + frain(i,j,iblk) = c0 + fsnow(i,j,iblk) = c0 + endif + + enddo + enddo +#endif + + !----------------------------------------------------------------- + ! Adjust frzmlt to account for ice-ocean heat fluxes since last + ! call to coupler. + ! Compute lateral and bottom heat fluxes. + !----------------------------------------------------------------- + + call frzmlt_bottom_lateral & + (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + ntrcr, dt, & + aice (:,:, iblk), frzmlt(:,:, iblk), & + vicen (:,:,:,iblk), vsnon (:,:,:,iblk), & + trcrn (:,:,1:ntrcr,:,iblk), & + sst (:,:, iblk), Tf (:,:, iblk), & + strocnxT(:,:,iblk), strocnyT(:,:,iblk), & + Tbot, fbot, & + rside (:,:, iblk), Cdn_ocn (:,:,iblk) ) + + !----------------------------------------------------------------- + ! Update the neutral drag coefficients to account for form drag + ! Oceanic and atmospheric drag coefficients + !----------------------------------------------------------------- + + + if (formdrag) then + + call neutral_drag_coeffs & + (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + trcrn (:,:,nt_apnd,:,iblk), & + trcrn (:,:,nt_hpnd,:,iblk), & + trcrn (:,:,nt_ipnd,:,iblk), & + trcrn (:,:,nt_alvl,:,iblk), & + trcrn (:,:,nt_vlvl,:,iblk), & + aice (:,:,iblk), vice (:,:,iblk),& + vsno (:,:,iblk), aicen (:,:,:,iblk),& + vicen (:,:,:,iblk), vsnon (:,:,:,iblk),& + Cdn_ocn (:,:,iblk), Cdn_ocn_skin(:,:,iblk),& + Cdn_ocn_floe(:,:,iblk), Cdn_ocn_keel(:,:,iblk),& + Cdn_atm (:,:,iblk), Cdn_atm_skin(:,:,iblk),& + Cdn_atm_floe(:,:,iblk), Cdn_atm_pond(:,:,iblk),& + Cdn_atm_rdg (:,:,iblk), hfreebd (:,:,iblk),& + hdraft (:,:,iblk), hridge (:,:,iblk),& + distrdg (:,:,iblk), hkeel (:,:,iblk),& + dkeel (:,:,iblk), lfloe (:,:,iblk),& + dfloe (:,:,iblk), ncat) + endif + + do n = 1, ncat + + meltsn(:,:,n,iblk) = c0 + melttn(:,:,n,iblk) = c0 + meltbn(:,:,n,iblk) = c0 + congeln(:,:,n,iblk) = c0 + snoicen(:,:,n,iblk) = c0 + dsnown(:,:,n,iblk) = c0 +! Tsf_icen(:,:,n,iblk) = c0 + + !----------------------------------------------------------------- + ! Identify cells with nonzero ice area + !----------------------------------------------------------------- + + icells = 0 + indxi = 0 + indxj = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aicen(i,j,n,iblk) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + if ((calc_Tsfc .or. calc_strair) .and. icells > 0) then + + !----------------------------------------------------------------- + ! Atmosphere boundary layer calculation; compute coefficients + ! for sensible and latent heat fluxes. + ! + ! NOTE: The wind stress is computed here for later use if + ! calc_strair = .true. Otherwise, the wind stress + ! components are set to the data values. + !----------------------------------------------------------------- + + if (trim(atmbndy) == 'constant') then + call atmo_boundary_const & + (nx_block, ny_block, & + 'ice', icells, & + indxi, indxj, & + uatm(:,:,iblk), vatm(:,:,iblk), & + wind(:,:,iblk), rhoa(:,:,iblk), & + strairxn, strairyn, & + trcrn(:,:,nt_Tsfc,n,iblk), & + potT(:,:,iblk), Qa (:,:,iblk), & + worka, workb, & + lhcoef, shcoef, & + Cdn_atm(:,:,iblk)) + else ! default + call atmo_boundary_layer & + (nx_block, ny_block, & + 'ice', icells, & + indxi, indxj, & + trcrn(:,:,nt_Tsfc,n,iblk), & + potT(:,:,iblk), & + uatm(:,:,iblk), vatm(:,:,iblk), & + wind(:,:,iblk), zlvl(:,:,iblk), & + Qa (:,:,iblk), rhoa(:,:,iblk), & + strairxn, strairyn, & + Trefn, Qrefn, & + worka, workb, & + lhcoef, shcoef, & + Cdn_atm(:,:,iblk), & + Cdn_atm_ratio_n, & + uice=uvel(:,:,iblk), & + vice=vvel(:,:,iblk), & + Uref=Urefn ) + endif ! atmbndy + + else + + ! Initialize for safety + Trefn (:,:) = c0 + Qrefn (:,:) = c0 + Urefn (:,:) = c0 + lhcoef(:,:) = c0 + shcoef(:,:) = c0 + + endif ! calc_Tsfc or calc_strair + + if (.not.(calc_strair)) then +#ifndef CICE_IN_NEMO + ! Set to data values (on T points) + strairxn(:,:) = strax(:,:,iblk) + strairyn(:,:) = stray(:,:,iblk) +#else + ! NEMO wind stress is supplied on u grid, multipied + ! by ice concentration and set directly in evp, so + ! strairxT/yT = 0. Zero u-components here for safety. + strairxn(:,:) = c0 + strairyn(:,:) = c0 +#endif + endif + + !----------------------------------------------------------------- + ! Update ice age + ! This is further adjusted for freezing in the thermodynamics. + ! Melting does not alter the ice age. + !----------------------------------------------------------------- + + if (tr_iage) then + call increment_age (nx_block, ny_block, & + dt, icells, & + indxi, indxj, & + trcrn(:,:,nt_iage,n,iblk)) + endif + if (tr_FY) then + call update_FYarea (nx_block, ny_block, & + dt, icells, & + indxi, indxj, & + lmask_n(:,:,iblk), & + lmask_s(:,:,iblk), & + yday, & + trcrn(:,:,nt_FY,n,iblk)) + endif + + !----------------------------------------------------------------- + ! Vertical thermodynamics: Heat conduction, growth and melting. + !----------------------------------------------------------------- + + if (.not.(calc_Tsfc)) then + + ! If not calculating surface temperature and fluxes, set + ! surface fluxes (flatn, fsurfn, and fcondtopn) to be used + ! in thickness_changes + + ! hadgem routine sets fluxes to default values in ice-only mode + call set_sfcflux(nx_block, ny_block, & + n, iblk, & + icells, & + indxi, indxj, & + aicen (:,:,n,iblk),& + flatn (:,:,n,iblk),& + fsensn (:,:,n,iblk),& + fsurfn (:,:,n,iblk),& + fcondtopn(:,:,n,iblk) ) + endif + + vsnon_init(:,:) = vsnon(:,:,n,iblk) + + call thermo_vertical(nx_block, ny_block, & + dt, icells, & + indxi, indxj, & + aicen(:,:,n,iblk), & + trcrn(:,:,:,n,iblk), & + vicen(:,:,n,iblk), vsnon(:,:,n,iblk), & + flw (:,:,iblk), potT (:,:,iblk), & + Qa (:,:,iblk), rhoa (:,:,iblk), & + fsnow (:,:,iblk), fpond (:,:,iblk), & + fbot, Tbot, & + sss (:,:,iblk), & + lhcoef, shcoef, & + fswsfcn(:,:,n,iblk), fswintn(:,:,n,iblk), & + Sswabsn(:,:,:,n,iblk), & + Iswabsn(:,:,:,n,iblk), & + fsurfn(:,:,n,iblk), & + fcondtopn(:,:,n,iblk), & + fsensn(:,:,n,iblk), flatn(:,:,n,iblk), & + flwoutn, & + evapn, freshn, & + fsaltn, fhocnn, & + melttn(:,:,n,iblk), meltsn(:,:,n,iblk), & + meltbn(:,:,n,iblk), & + congeln(:,:,n,iblk), snoicen(:,:,n,iblk), & + mlt_onset(:,:,iblk), frz_onset(:,:,iblk), & + yday, l_stop, & + istop, jstop, & + dsnown(:,:,n,iblk)) + + if (l_stop) then + write (nu_diag,*) 'istep1, my_task, iblk =', & + istep1, my_task, iblk + write (nu_diag,*) 'category n = ', n + write (nu_diag,*) 'Global block:', this_block%block_id + if (istop > 0 .and. jstop > 0) then + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) + write(nu_diag,*) 'Lat, Lon:', & + TLAT(istop,jstop,iblk)*rad_to_deg, & + TLON(istop,jstop,iblk)*rad_to_deg + write(nu_diag,*) 'aice:', & + aice(istop,jstop,iblk) + write(nu_diag,*) 'n: ',n, 'aicen: ', & + aicen(istop,jstop,n,iblk) + endif + call abort_ice ('ice: Vertical thermo error') + endif + + !----------------------------------------------------------------- + ! Total absorbed shortwave radiation + !----------------------------------------------------------------- + do j = 1, ny_block + do i = 1, nx_block + fswabsn(i,j) = fswsfcn (i,j,n,iblk) & + + fswintn (i,j,n,iblk) & + + fswthrun(i,j,n,iblk) + enddo + enddo + + !----------------------------------------------------------------- + ! Aerosol update + !----------------------------------------------------------------- + if (tr_aero .and. icells > 0) then + + call update_aerosol (nx_block, ny_block, & + dt, icells, & + indxi, indxj, & + melttn(:,:,n,iblk), & + meltsn(:,:,n,iblk), & + meltbn(:,:,n,iblk), & + congeln(:,:,n,iblk), & + snoicen(:,:,n,iblk), & + fsnow(:,:,iblk), & + trcrn(:,:,:,n,iblk), & + aicen_init(:,:,n,iblk), & + vicen_init(:,:,n,iblk), & + vsnon_init(:,:), & + vicen(:,:,n,iblk), & + vsnon(:,:,n,iblk), & + aicen(:,:,n,iblk), & + faero_atm(:,:,:,iblk), & + faero_ocn(:,:,:,iblk)) + endif + + !----------------------------------------------------------------- + ! Melt ponds + ! If using tr_pond_cesm, the full calculation is performed here. + ! If using tr_pond_topo, the rest of the calculation is done after + ! the surface fluxes are merged, below. + !----------------------------------------------------------------- + + call ice_timer_start(timer_ponds,iblk) + + if (tr_pond) then + + if (tr_pond_cesm) then + rfrac(:,:) = rfracmin + (rfracmax-rfracmin) * aicen(:,:,n,iblk) + call compute_ponds_cesm(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, hi_min, & + pndaspect, rfrac, & + melttn(:,:,n,iblk), & + meltsn(:,:,n,iblk), frain(:,:,iblk), & + aicen (:,:,n,iblk), vicen (:,:,n,iblk), & + vsnon (:,:,n,iblk), & + trcrn(:,:,nt_Tsfc,n,iblk), & + trcrn(:,:,nt_apnd,n,iblk), & + trcrn(:,:,nt_hpnd,n,iblk)) + + elseif (tr_pond_lvl) then + rfrac(:,:) = rfracmin + (rfracmax-rfracmin) * aicen(:,:,n,iblk) + call compute_ponds_lvl(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, hi_min, & + dpscale, frzpnd, & + pndaspect, rfrac, & + melttn(:,:,n,iblk), meltsn(:,:,n,iblk), & + frain (:,:,iblk), Tair (:,:,iblk), & + fsurfn(:,:,n,iblk), & + dhsn (:,:,n,iblk), ffracn(:,:,n,iblk), & + aicen (:,:,n,iblk), vicen (:,:,n,iblk), & + vsnon (:,:,n,iblk), & + trcrn (:,:,nt_qice:nt_qice+nilyr-1,n,iblk), & + trcrn (:,:,nt_sice:nt_sice+nilyr-1,n,iblk), & + trcrn (:,:,nt_Tsfc,n,iblk), & + trcrn (:,:,nt_alvl,n,iblk), & + trcrn (:,:,nt_apnd,n,iblk), & + trcrn (:,:,nt_hpnd,n,iblk), & + trcrn (:,:,nt_ipnd,n,iblk)) + + elseif (tr_pond_topo .and. icells > 0) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! collect liquid water in ponds + ! assume salt still runs off + + rfrac(i,j) = rfracmin + (rfracmax-rfracmin) * aicen(i,j,n,iblk) + pond = rfrac(i,j)/rhofresh * (melttn(i,j,n,iblk)*rhoi & + + meltsn(i,j,n,iblk)*rhos & + + frain (i,j,iblk)*dt) + + ! if pond does not exist, create new pond over full ice area + ! otherwise increase pond depth without changing pond area + if (trcrn(i,j,nt_apnd,n,iblk) < puny) then + trcrn(i,j,nt_hpnd,n,iblk) = c0 + trcrn(i,j,nt_apnd,n,iblk) = c1 + endif + trcrn(i,j,nt_hpnd,n,iblk) = (pond & + + trcrn(i,j,nt_hpnd,n,iblk)*trcrn(i,j,nt_apnd,n,iblk)) & + / trcrn(i,j,nt_apnd,n,iblk) + fpond(i,j,iblk) = fpond(i,j,iblk) & + + pond * aicen(i,j,n,iblk) ! m + enddo + endif + + endif + call ice_timer_stop(timer_ponds,iblk) + + !----------------------------------------------------------------- + ! Increment area-weighted fluxes. + !----------------------------------------------------------------- + + call merge_fluxes (nx_block, ny_block, & + icells, & + indxi, indxj, & + aicen_init(:,:,n,iblk), & + flw(:,:,iblk), coszen(:,:,iblk), & + strairxn, strairyn, & + Cdn_atm_ratio_n, & + fsurfn(:,:,n,iblk), fcondtopn(:,:,n,iblk),& + fsensn(:,:,n,iblk), flatn(:,:,n,iblk), & + fswabsn, flwoutn, & + evapn, & + Trefn, Qrefn, & + freshn, fsaltn, & + fhocnn, fswthrun(:,:,n,iblk), & + strairxT(:,:,iblk), strairyT (:,:,iblk), & + Cdn_atm_ratio(:,:,iblk), & + fsurf (:,:,iblk), fcondtop (:,:,iblk), & + fsens (:,:,iblk), flat (:,:,iblk), & + fswabs (:,:,iblk), flwout (:,:,iblk), & + evap (:,:,iblk), & + Tref (:,:,iblk), Qref (:,:,iblk), & + fresh (:,:,iblk), fsalt (:,:,iblk), & + fhocn (:,:,iblk), fswthru (:,:,iblk), & + melttn(:,:,n,iblk), meltsn (:,:,n,iblk), & + meltbn(:,:,n,iblk), congeln (:,:,n,iblk), & + snoicen(:,:,n,iblk), & + meltt (:,:,iblk), melts (:,:,iblk), & + meltb (:,:,iblk), & + congel (:,:,iblk), snoice (:,:,iblk), & + Uref=Uref(:,:,iblk), Urefn=Urefn ) + + enddo ! ncat + + !----------------------------------------------------------------- + ! Calculate ponds from the topographic scheme + !----------------------------------------------------------------- + call ice_timer_start(timer_ponds,iblk) + if (tr_pond_topo) then + call compute_ponds_topo(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, & + aice (:,:,iblk), aicen(:,:,:,iblk), & + vice (:,:,iblk), vicen(:,:,:,iblk), & + vsno (:,:,iblk), vsnon(:,:,:,iblk), & + potT (:,:,iblk), meltt(:,:, iblk), & + fsurf(:,:,iblk), fpond(:,:, iblk), & + trcrn(:,:,nt_Tsfc,:,iblk), Tf(:,:, iblk), & + trcrn(:,:,nt_qice:nt_qice+nilyr-1,:,iblk), & + trcrn(:,:,nt_sice:nt_sice+nilyr-1,:,iblk), & + trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk)) + endif + call ice_timer_stop(timer_ponds,iblk) + + end subroutine step_therm1 + +!======================================================================= +! Driver for thermodynamic changes not needed for coupling: +! transport in thickness space, lateral growth and melting. +! +! author: William H. Lipscomb, LANL + + subroutine step_therm2 (dt, iblk) + + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_calendar, only: istep1, yday + use ice_communicate, only: my_task + use ice_domain, only: blocks_ice + use ice_domain_size, only: ncat + use ice_exit, only: abort_ice + use ice_flux, only: fresh, frain, fpond, frzmlt, frazil, frz_onset, & + update_ocn_f, fsalt, Tf, sss, salinz, fhocn, faero_ocn, rside, & + meltl + use ice_fileunits, only: nu_diag + use ice_grid, only: tmask + use ice_itd, only: cleanup_itd, kitd, aggregate_area, reduce_area + use ice_therm_itd, only: lateral_melt, linear_itd, add_new_ice + use ice_zbgc_shared, only: ocean_bio, flux_bio + use ice_state, only: aice, aicen, aice0, ntrcr, trcr_depend, & + aicen_init, vicen_init, trcrn, vicen, vsnon, nbtrcr, tr_aero, & + tr_pond_topo + use ice_therm_shared, only: heat_capacity + use ice_therm_vertical, only: phi_init, dSin0_frazil + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_catconv + use ice_zbgc_shared, only: first_ice + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + integer (kind=int_kind) :: & + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + i, j + + integer (kind=int_kind) :: & + icells ! number of ice/ocean cells + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! indirect indices for ice/ocean cells + + type (block) :: & + this_block ! block information for current block + + logical (kind=log_kind) :: & + l_stop ! if true, abort model + + integer (kind=int_kind) :: & + istop, jstop ! indices of grid cell where model aborts + + l_stop = .false. + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + !----------------------------------------------------------------- + ! Let rain drain through to the ocean. + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + fresh (i,j,iblk) = fresh(i,j,iblk) & + + frain(i,j,iblk)*aice(i,j,iblk) + enddo + enddo + + !----------------------------------------------------------------- + ! Given thermodynamic growth rates, transport ice between + ! thickness categories. + !----------------------------------------------------------------- + + call ice_timer_start(timer_catconv,iblk) ! category conversions + + !----------------------------------------------------------------- + ! Compute fractional ice area in each grid cell. + !----------------------------------------------------------------- + call aggregate_area (nx_block, ny_block, & + aicen(:,:,:,iblk), & + aice (:,:, iblk), aice0(:,:,iblk)) + + if (kitd == 1) then + !----------------------------------------------------------------- + ! Identify grid cells with ice. + !----------------------------------------------------------------- + + icells = 0 + do j = jlo,jhi + do i = ilo,ihi + if (aice(i,j,iblk) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + if (icells > 0) then + + call linear_itd (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, trcr_depend(1:ntrcr), & + aicen_init(:,:,:,iblk), & + vicen_init(:,:,:,iblk), & + aicen (:,:,:,iblk), & + trcrn (:,:,1:ntrcr,:,iblk), & + vicen (:,:,:,iblk), & + vsnon (:,:,:,iblk), & + aice (:,:, iblk), & + aice0 (:,:, iblk), & + fpond (:,:, iblk), & + l_stop, & + istop, jstop) + + if (l_stop) then + write (nu_diag,*) 'istep1, my_task, iblk =', & + istep1, my_task, iblk + write (nu_diag,*) 'Global block:', this_block%block_id + if (istop > 0 .and. jstop > 0) & + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) + call abort_ice ('ice: Linear ITD error') + endif + + endif ! icells + + endif ! kitd = 1 + + call ice_timer_stop(timer_catconv,iblk) ! category conversions + + !----------------------------------------------------------------- + ! Add frazil ice growing in leads. + !----------------------------------------------------------------- + + ! identify ice-ocean cells + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + call add_new_ice (nx_block, ny_block, & + ntrcr, icells, & + indxi, indxj, & + dt, & + aicen (:,:,:,iblk), & + trcrn (:,:,1:ntrcr,:,iblk), & + vicen (:,:,:,iblk), & + aice0 (:,:, iblk), & + aice (:,:, iblk), & + frzmlt (:,:, iblk), & + frazil (:,:, iblk), & + frz_onset (:,:, iblk), yday, & + update_ocn_f, & + fresh (:,:, iblk), & + fsalt (:,:, iblk), & + Tf (:,:, iblk), & + sss (:,:, iblk), & + salinz (:,:,:,iblk), & + phi_init, dSin0_frazil, & + nbtrcr, & + flux_bio (:,:,1:nbtrcr,iblk), & + ocean_bio (:,:,1:nbtrcr,iblk), & + l_stop, & + istop , jstop) + + if (l_stop) then + write (nu_diag,*) 'istep1, my_task, iblk =', & + istep1, my_task, iblk + write (nu_diag,*) 'Global block:', this_block%block_id + if (istop > 0 .and. jstop > 0) & + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) + call abort_ice ('ice: add_new_ice error') + endif + + !----------------------------------------------------------------- + ! Melt ice laterally. + !----------------------------------------------------------------- + + call lateral_melt (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, & + fpond (:,:, iblk), & + fresh (:,:, iblk), & + fsalt (:,:, iblk), & + fhocn (:,:, iblk), & + faero_ocn (:,:,:,iblk), & + rside (:,:, iblk), & + meltl (:,:, iblk), & + aicen (:,:,:,iblk), & + vicen (:,:,:,iblk), & + vsnon (:,:,:,iblk), & + trcrn (:,:,:,:,iblk)) + + !----------------------------------------------------------------- + ! For the special case of a single category, adjust the area and + ! volume (assuming that half the volume change decreases the + ! thickness, and the other half decreases the area). + !----------------------------------------------------------------- + +!echmod: test this + if (ncat==1) & + call reduce_area (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + tmask (:,:, iblk), & + aicen (:,:,1,iblk), & + vicen (:,:,1,iblk), & + aicen_init(:,:,1,iblk), & + vicen_init(:,:,1,iblk)) + + !----------------------------------------------------------------- + ! ITD cleanup: Rebin thickness categories if necessary, and remove + ! categories with very small areas. + !----------------------------------------------------------------- + + call cleanup_itd (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, ntrcr, & + aicen (:,:,:,iblk), & + trcrn (:,:,1:ntrcr,:,iblk), & + vicen (:,:,:,iblk), vsnon (:,:, :,iblk), & + aice0 (:,:, iblk), aice (:,:,iblk), & + trcr_depend(1:ntrcr), fpond (:,:,iblk), & + fresh (:,:, iblk), fsalt (:,:,iblk), & + fhocn (:,:, iblk), & + faero_ocn(:,:,:,iblk),tr_aero, & + tr_pond_topo, heat_capacity, & + nbtrcr, first_ice(:,:,:,iblk),& + flux_bio(:,:,1:nbtrcr,iblk), & + l_stop, & + istop, jstop) + + if (l_stop) then + write (nu_diag,*) 'istep1, my_task, iblk =', & + istep1, my_task, iblk + write (nu_diag,*) 'Global block:', this_block%block_id + if (istop > 0 .and. jstop > 0) & + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) + call abort_ice ('ice: ITD cleanup error in step_therm2') + endif + + end subroutine step_therm2 + +!======================================================================= +! +! finalize thermo updates +! +! authors: Elizabeth Hunke, LANL + + subroutine post_thermo (dt) + + use ice_blocks, only: nx_block, ny_block + use ice_domain, only: nblocks + use ice_flux, only: daidtt, dvidtt, dvsdtt, dagedtt + use ice_grid, only: tmask + use ice_itd, only: aggregate + use ice_state, only: aicen, trcrn, vicen, vsnon, ntrcr, & + aice, trcr, vice, vsno, aice0, trcr_depend, & + bound_state, tr_iage, nt_iage + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind) :: & + iblk , & ! block index + i,j ! horizontal indices + + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + !------------------------------------------------------------------- + + call ice_timer_start(timer_bound) + call bound_state (aicen, trcrn, & + vicen, vsnon) + call ice_timer_stop(timer_bound) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! Aggregate the updated state variables (includes ghost cells). + !----------------------------------------------------------------- + + call aggregate (nx_block, ny_block, & + aicen(:,:,:,iblk), & + trcrn(:,:,1:ntrcr,:,iblk), & + vicen(:,:,:,iblk), vsnon(:,:, :,iblk), & + aice (:,:, iblk), & + trcr (:,:,1:ntrcr, iblk), & + vice (:,:, iblk), vsno (:,:, iblk), & + aice0(:,:, iblk), tmask(:,:, iblk), & + ntrcr, trcr_depend(1:ntrcr)) + + !----------------------------------------------------------------- + ! Compute thermodynamic area and volume tendencies. + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + daidtt(i,j,iblk) = (aice(i,j,iblk) - daidtt(i,j,iblk)) / dt + dvidtt(i,j,iblk) = (vice(i,j,iblk) - dvidtt(i,j,iblk)) / dt + dvsdtt(i,j,iblk) = (vsno(i,j,iblk) - dvsdtt(i,j,iblk)) / dt + if (tr_iage) then + if (trcr(i,j,nt_iage,iblk) > c0) & + dagedtt(i,j,iblk)= (trcr(i,j,nt_iage,iblk)-dagedtt(i,j,iblk)-dt)/dt + endif + enddo + enddo + + enddo ! iblk + !$OMP END PARALLEL DO + + end subroutine post_thermo + +!======================================================================= +! +! Run one time step of dynamics, horizontal transport, and ridging. +! NOTE: The evp and transport modules include boundary updates, so +! they cannot be done inside a single block loop. Ridging +! and cleanup, on the other hand, are single-column operations. +! They are called with argument lists inside block loops +! to increase modularity. +! +! authors: William H. Lipscomb, LANL + + subroutine step_dynamics (dt, ndtd) + + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_calendar, only: istep + use ice_domain, only: blocks_ice, nblocks + use ice_domain_size, only: nslyr + use ice_dyn_evp, only: evp + use ice_dyn_eap, only: eap + use ice_dyn_shared, only: kdyn + use ice_flux, only: daidtd, dvidtd, dvsdtd, init_history_dyn, dagedtd + use ice_grid, only: tmask + use ice_itd, only: aggregate + use ice_state, only: nt_qsno, trcrn, vsnon, aicen, vicen, ntrcr, & + aice, trcr, vice, vsno, aice0, trcr_depend, bound_state, tr_iage, nt_iage + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_column, & + timer_ridge, timer_bound + use ice_transport_driver, only: advection, transport_upwind, transport_remap + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), intent(in) :: & + ndtd ! number of dynamics subcycles + + ! local variables + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: & + iblk , & ! block index + i,j , & ! horizontal indices + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + call init_history_dyn ! initialize dynamic history variables + + !----------------------------------------------------------------- + ! Elastic-viscous-plastic ice dynamics + !----------------------------------------------------------------- + + if (kdyn == 1) call evp (dt) + if (kdyn == 2) call eap (dt) + + !----------------------------------------------------------------- + ! Horizontal ice transport + !----------------------------------------------------------------- + + if (advection == 'upwind') then + call transport_upwind (dt) ! upwind + else + call transport_remap (dt) ! incremental remapping + endif + + !----------------------------------------------------------------- + ! Ridging + !----------------------------------------------------------------- + + call ice_timer_start(timer_column) + call ice_timer_start(timer_ridge) + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call step_ridge (dt, ndtd, iblk) + enddo ! iblk + !$OMP END PARALLEL DO + + call ice_timer_stop(timer_ridge) + + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + !------------------------------------------------------------------- + + call ice_timer_start(timer_bound) + call bound_state (aicen, trcrn, & + vicen, vsnon) + call ice_timer_stop(timer_bound) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! Aggregate the updated state variables (includes ghost cells). + !----------------------------------------------------------------- + + call aggregate (nx_block, ny_block, & + aicen(:,:,:,iblk), & + trcrn(:,:,1:ntrcr,:,iblk), & + vicen(:,:,:,iblk), vsnon(:,:, :,iblk), & + aice (:,:, iblk), & + trcr (:,:,1:ntrcr, iblk), & + vice (:,:, iblk), vsno (:,:, iblk), & + aice0(:,:, iblk), tmask(:,:, iblk), & + ntrcr, trcr_depend(1:ntrcr)) + + !----------------------------------------------------------------- + ! Compute dynamic area and volume tendencies. + !----------------------------------------------------------------- + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo,jhi + do i = ilo,ihi + dvidtd(i,j,iblk) = (vice(i,j,iblk) - dvidtd(i,j,iblk)) /dt + dvsdtd(i,j,iblk) = (vsno(i,j,iblk) - dvsdtd(i,j,iblk)) /dt + daidtd(i,j,iblk) = (aice(i,j,iblk) - daidtd(i,j,iblk)) /dt + if (tr_iage) & + dagedtd(i,j,iblk)= (trcr(i,j,nt_iage,iblk)-dagedtd(i,j,iblk))/dt + enddo + enddo + + enddo + !$OMP END PARALLEL DO + + call ice_timer_stop(timer_column) + + end subroutine step_dynamics + +!======================================================================= +! +! Computes sea ice mechanical deformation +! +! authors: William H. Lipscomb, LANL +! Elizabeth C. Hunke, LANL + + subroutine step_ridge (dt, ndtd, iblk) + + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_calendar, only: istep1 + use ice_communicate, only: my_task + use ice_domain, only: blocks_ice + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_diag + use ice_flux, only: rdg_conv, rdg_shear, dardg1dt, dardg2dt, & + dvirdgdt, opening, fpond, fresh, fhocn, faero_ocn, & + aparticn, krdgn, aredistn, vredistn, dardg1ndt, dardg2ndt, & + dvirdgndt, araftn, vraftn, fsalt + use ice_grid, only: tmask + use ice_itd, only: cleanup_itd + use ice_mechred, only: ridge_ice + use ice_state, only: ntrcr, aicen, trcrn, vicen, vsnon, aice0, & + trcr_depend, aice, tr_aero, tr_pond_topo, nbtrcr + use ice_therm_shared, only: heat_capacity + use ice_zbgc_shared, only: flux_bio, first_ice + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), intent(in) :: & + ndtd, & ! number of dynamics subcycles + iblk ! block index + + ! local variables + + real (kind=dbl_kind) :: & + dtt ! thermo time step + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: & + i,j , & ! horizontal indices + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + integer (kind=int_kind) :: & + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! indirect indices for cells with aicen > puny + + logical (kind=log_kind) :: & + l_stop ! if true, abort model + + integer (kind=int_kind) :: & + istop, jstop ! indices of grid cell where model aborts + + l_stop = .false. + + this_block = get_block(blocks_ice(iblk), iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + !----------------------------------------------------------------- + ! Identify ice-ocean cells. + ! Note: We can not define icells here using aice>puny because + ! aice has not yet been updated since the transport (and + ! it may be out of whack, which the ridging helps fix).-ECH + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + if (icells > 0) then + + call ridge_ice (nx_block, ny_block, & + dt, ndtd, & + ntrcr, icells, & + indxi, indxj, & + rdg_conv(:,:, iblk), rdg_shear (:,:, iblk), & + aicen (:,:,:,iblk), & + trcrn (:,:,1:ntrcr,:,iblk), & + vicen (:,:,:,iblk), vsnon (:,:,:,iblk), & + aice0 (:,:, iblk), & + trcr_depend(1:ntrcr), l_stop, & + istop, jstop, & + dardg1dt(:,:,iblk), dardg2dt (:,:,iblk), & + dvirdgdt(:,:,iblk), opening (:,:,iblk), & + fpond (:,:,iblk), & + fresh (:,:,iblk), fhocn (:,:,iblk), & + faero_ocn(:,:,:,iblk), & + aparticn(:,:,:,iblk), krdgn (:,:,:,iblk), & + aredistn(:,:,:,iblk), vredistn (:,:,:,iblk), & + dardg1ndt(:,:,:,iblk),dardg2ndt (:,:,:,iblk), & + dvirdgndt(:,:,:,iblk), & + araftn (:,:,:,iblk),vraftn (:,:,:,iblk)) + + if (l_stop) then + write (nu_diag,*) 'istep1, my_task, iblk =', & + istep1, my_task, iblk + write (nu_diag,*) 'Global block:', this_block%block_id + if (istop > 0 .and. jstop > 0) & + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) + call abort_ice ('ice: Ridging error') + endif + + endif + + !----------------------------------------------------------------- + ! ITD cleanup: Rebin thickness categories if necessary, and remove + ! categories with very small areas. + !----------------------------------------------------------------- + + dtt = dt * ndtd ! for proper averaging over thermo timestep + call cleanup_itd (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dtt, ntrcr, & + aicen (:,:,:,iblk), & + trcrn (:,:,1:ntrcr,:,iblk), & + vicen (:,:,:,iblk), vsnon (:,:, :,iblk), & + aice0 (:,:, iblk), aice (:,:,iblk), & + trcr_depend(1:ntrcr), fpond (:,:,iblk), & + fresh (:,:, iblk), fsalt (:,:,iblk), & + fhocn (:,:, iblk), & + faero_ocn(:,:,:,iblk),tr_aero, & + tr_pond_topo, heat_capacity, & + nbtrcr, first_ice(:,:,:,iblk),& + flux_bio(:,:,1:nbtrcr,iblk), & + l_stop, & + istop, jstop) + + if (l_stop) then + write (nu_diag,*) 'istep1, my_task, iblk =', & + istep1, my_task, iblk + write (nu_diag,*) 'Global block:', this_block%block_id + if (istop > 0 .and. jstop > 0) & + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) + call abort_ice ('ice: ITD cleanup error in step_ridge') + endif + + end subroutine step_ridge + +!======================================================================= +! +! Computes radiation fields +! +! authors: William H. Lipscomb, LANL +! David Bailey, NCAR +! Elizabeth C. Hunke, LANL + + subroutine step_radiation (dt, iblk) + + use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_domain, only: blocks_ice, nblocks + use ice_domain_size, only: ncat + use ice_flux, only: swvdr, swvdf, swidr, swidf, coszen, fsnow + use ice_grid, only: TLAT, TLON, tmask + use ice_meltpond_lvl, only: ffracn, dhsn + use ice_meltpond_topo, only: hp1 + use ice_shortwave, only: fswsfcn, fswintn, fswthrun, fswpenln, & + Sswabsn, Iswabsn, shortwave, & + albicen, albsnon, albpndn, & + alvdrn, alidrn, alvdfn, alidfn, & + run_dedd, shortwave_ccsm3, apeffn +!ars599: 27032014: 22042015 need ifdef +#ifdef AusCOM + use ice_shortwave, only : ocn_albedo2D +#endif + use ice_state, only: aicen, vicen, vsnon, trcrn, nt_Tsfc, & + nt_apnd, nt_ipnd, nt_hpnd, tr_pond_topo + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_sw + use ice_therm_shared, only: calc_Tsfc + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + integer (kind=int_kind) :: & + i, j, & ! horizontal indices + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + n ! thickness category index + + type (block) :: & + this_block ! block information for current block + + call ice_timer_start(timer_sw,iblk) ! shortwave + + ! Initialize + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + alvdrn(i,j,n,iblk) = c0 + alidrn(i,j,n,iblk) = c0 + alvdfn(i,j,n,iblk) = c0 + alidfn(i,j,n,iblk) = c0 + fswsfcn(i,j,n,iblk) = c0 + fswintn(i,j,n,iblk) = c0 + fswthrun(i,j,n,iblk) = c0 + enddo ! i + enddo ! j + enddo ! ncat + fswpenln(:,:,:,:,iblk) = c0 + Iswabsn(:,:,:,:,iblk) = c0 + Sswabsn(:,:,:,:,iblk) = c0 + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + + if (calc_Tsfc) then + if (trim(shortwave) == 'dEdd') then ! delta Eddington + + call run_dEdd(ilo, ihi, jlo, jhi, & + aicen(:,:,:,iblk), vicen(:,:,:,iblk), & + vsnon(:,:,:,iblk), trcrn(:,:,:,:,iblk), & + TLAT(:,:,iblk), TLON(:,:,iblk), & + tmask(:,:,iblk), & + swvdr(:,:,iblk), swvdf(:,:,iblk), & + swidr(:,:,iblk), swidf(:,:,iblk), & + coszen(:,:,iblk), fsnow(:,:,iblk), & + alvdrn(:,:,:,iblk), alvdfn(:,:,:,iblk), & + alidrn(:,:,:,iblk), alidfn(:,:,:,iblk), & + fswsfcn(:,:,:,iblk), fswintn(:,:,:,iblk), & + fswthrun(:,:,:,iblk), fswpenln(:,:,:,:,iblk), & + Sswabsn(:,:,:,:,iblk), Iswabsn(:,:,:,:,iblk), & + albicen(:,:,:,iblk), albsnon(:,:,:,iblk), & + albpndn(:,:,:,iblk), apeffn(:,:,:,iblk), & + dhsn(:,:,:,iblk), ffracn(:,:,:,iblk)) + + else ! .not. dEdd + + call shortwave_ccsm3(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + aicen(:,:,:,iblk), vicen(:,:,:,iblk), & + vsnon(:,:,:,iblk), & + trcrn(:,:,nt_Tsfc,:,iblk), & + swvdr(:,:, iblk), swvdf(:,:, iblk), & + swidr(:,:, iblk), swidf(:,:, iblk), & + alvdrn(:,:,:,iblk), alidrn(:,:,:,iblk), & + alvdfn(:,:,:,iblk), alidfn(:,:,:,iblk), & + fswsfcn(:,:,:,iblk), fswintn(:,:,:,iblk), & + fswthrun(:,:,:,iblk), & + fswpenln(:,:,:,:,iblk), & + Iswabsn(:,:,:,:,iblk), & + Sswabsn(:,:,:,:,iblk), & + albicen(:,:,:,iblk), albsnon(:,:,:,iblk), & +#ifndef AusCOM + coszen(:,:,iblk)) +#else + coszen(:,:,iblk), & + ocn_albedo2D(:,:,iblk)) +#endif + endif ! shortwave + + else ! .not. calc_Tsfc + + ! Calculate effective pond area for HadGEM + + if (tr_pond_topo) then + do n = 1, ncat + apeffn(:,:,n,iblk) = c0 + do j = 1, ny_block + do i = 1, nx_block + if (aicen(i,j,n,iblk) > puny) then + ! Lid effective if thicker than hp1 + if (trcrn(i,j,nt_apnd,n,iblk)*aicen(i,j,n,iblk) > puny .and. & + trcrn(i,j,nt_ipnd,n,iblk) < hp1) then + apeffn(i,j,n,iblk) = trcrn(i,j,nt_apnd,n,iblk) + else + apeffn(i,j,n,iblk) = c0 + endif + if (trcrn(i,j,nt_apnd,n,iblk) < puny) apeffn(i,j,n,iblk) = c0 + endif + enddo + enddo + enddo ! ncat + + endif ! tr_pond_topo + + ! Initialize for safety + do n = 1, ncat + do j = 1, ny_block + do i = 1, nx_block + alvdrn(i,j,n,iblk) = c0 + alidrn(i,j,n,iblk) = c0 + alvdfn(i,j,n,iblk) = c0 + alidfn(i,j,n,iblk) = c0 + fswsfcn(i,j,n,iblk) = c0 + fswintn(i,j,n,iblk) = c0 + fswthrun(i,j,n,iblk) = c0 + enddo ! i + enddo ! j + enddo ! ncat + Iswabsn(:,:,:,:,iblk) = c0 + Sswabsn(:,:,:,:,iblk) = c0 + + endif ! calc_Tsfc + + call ice_timer_stop(timer_sw,iblk) ! shortwave + + end subroutine step_radiation + +!======================================================================= + + end module ice_step_mod + +!======================================================================= diff --git a/source/ice_therm_0layer.F90 b/source/ice_therm_0layer.F90 new file mode 100755 index 00000000..af313132 --- /dev/null +++ b/source/ice_therm_0layer.F90 @@ -0,0 +1,470 @@ +! SVN:$Id: ice_therm_0layer.F90 700 2013-08-15 19:17:39Z eclare $ +!========================================================================= +! +! Update ice and snow internal temperatures +! using zero-layer thermodynamics +! +! authors: Alison McLaren, UK MetOffice +! Elizabeth C. Hunke, LANL +! +! 2012: Split from ice_therm_vertical.F90 + + module ice_therm_0layer + + use ice_kinds_mod + use ice_domain_size, only: nilyr, nslyr, max_ntrcr + use ice_constants + use ice_fileunits, only: nu_diag + use ice_therm_bl99, only: surface_fluxes + + implicit none + + private + public :: zerolayer_temperature + +!======================================================================= + + contains + +!======================================================================= +! +! Compute new surface temperature using zero layer model of Semtner +! (1976). +! +! New temperatures are computed iteratively by solving a +! surface flux balance equation (i.e. net surface flux from atmos +! equals conductive flux from the top to the bottom surface). +! +! author: Alison McLaren, Met Office +! (but largely taken from temperature_changes) + + subroutine zerolayer_temperature(nx_block, ny_block, & + my_task, istep1, & + dt, icells, & + indxi, indxj, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fswsfc, & + hilyr, hslyr, & + Tsf, Tbot, & + fsensn, flatn, & + flwoutn, fsurfn, & + fcondtopn,fcondbot, & + l_stop, & + istop, jstop) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + my_task , & ! task number (diagnostic only) + istep1 , & ! time step index (diagnostic only) + icells ! number of cells with aicen > puny + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + rhoa , & ! air density (kg/m^3) + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef , & ! transfer coefficient for latent heat + Tbot , & ! ice bottom surface temperature (deg C) + fswsfc ! SW absorbed at ice/snow surface (W m-2) + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + hilyr , & ! ice layer thickness (m) + hslyr ! snow layer thickness (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout):: & + fsensn , & ! surface downward sensible heat (W m-2) + flatn , & ! surface downward latent heat (W m-2) + flwoutn , & ! upward LW at surface (W m-2) + fsurfn , & ! net flux to top surface, excluding fcondtopn + fcondtopn ! downward cond flux at top surface (W m-2) + + real (kind=dbl_kind), dimension (icells), intent(out):: & + fcondbot ! downward cond flux at bottom surface (W m-2) + + real (kind=dbl_kind), dimension (icells), & + intent(inout):: & + Tsf ! ice/snow surface temperature, Tsfcn + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, print diagnostics and abort model + + integer (kind=int_kind), intent(inout) :: & + istop, jstop ! i and j indices of cell where model fails + + ! local variables + + logical (kind=log_kind), parameter :: & + l_zerolayerchecks = .true. + + integer (kind=int_kind), parameter :: & + nitermax = 50 ! max number of iterations in temperature solver + + real (kind=dbl_kind), parameter :: & + Tsf_errmax = 5.e-4_dbl_kind ! max allowed error in Tsf + ! recommend Tsf_errmax < 0.01 K + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij, m , & ! horizontal indices, combine i and j loops + niter ! iteration counter in temperature solver + + integer (kind=int_kind) :: & + isolve ! number of cells with temps not converged + + integer (kind=int_kind), dimension (icells) :: & + indxii, indxjj ! compressed indices for cells not converged + + integer (kind=int_kind), dimension (icells) :: & + indxij ! compressed 1D index for cells not converged + + real (kind=dbl_kind), dimension (:), allocatable :: & + Tsf_start , & ! Tsf at start of iteration + dTsf , & ! Tsf - Tsf_start + dfsurf_dT ! derivative of fsurfn wrt Tsf + + real (kind=dbl_kind), dimension (icells) :: & + dTsf_prev , & ! dTsf from previous iteration + dfsens_dT , & ! deriv of fsens wrt Tsf (W m-2 deg-1) + dflat_dT , & ! deriv of flat wrt Tsf (W m-2 deg-1) + dflwout_dT ! deriv of flwout wrt Tsf (W m-2 deg-1) + + real (kind=dbl_kind), dimension (:), allocatable :: & + kh , & ! effective conductivity + diag , & ! diagonal matrix elements + rhs ! rhs of tri-diagonal matrix equation + + real (kind=dbl_kind) :: & + heff , & ! effective ice thickness (m) + ! ( hice + hsno*kseaice/ksnow) + kratio , & ! ratio of ice and snow conductivies + avg_Tsf ! = 1. if Tsf averaged w/Tsf_start, else = 0. + + logical (kind=log_kind), dimension (icells) :: & + converged ! = true when local solution has converged + + logical (kind=log_kind) :: & + all_converged ! = true when all cells have converged + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + all_converged = .false. + + do ij = 1, icells + fcondbot(ij) = c0 + + converged (ij) = .false. + + dTsf_prev (ij) = c0 + + enddo ! ij + + !----------------------------------------------------------------- + ! Solve for new temperatures. + ! Iterate until temperatures converge with minimal temperature + ! change. + !----------------------------------------------------------------- + + do niter = 1, nitermax + + if (all_converged) then ! thermo calculation is done + exit + else ! identify cells not yet converged + isolve = 0 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (.not.converged(ij)) then + isolve = isolve + 1 + indxii(isolve) = i + indxjj(isolve) = j + indxij(isolve) = ij + endif + enddo ! ij + endif + + allocate( diag(isolve)) + allocate( rhs(isolve)) + allocate( kh(isolve)) + allocate(Tsf_start(isolve)) + allocate( dTsf(isolve)) + allocate(dfsurf_dT(isolve)) + + !----------------------------------------------------------------- + ! Update radiative and turbulent fluxes and their derivatives + ! with respect to Tsf. + !----------------------------------------------------------------- + + call surface_fluxes (nx_block, ny_block, & + isolve, icells, & + indxii, indxjj, indxij, & + Tsf, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + flwoutn, fsensn, & + flatn, fsurfn, & + dflwout_dT, dfsens_dT, & + dflat_dT, dfsurf_dT) + + !----------------------------------------------------------------- + ! Compute effective ice thickness (includes snow) and thermal + ! conductivity + !----------------------------------------------------------------- + + kratio = kseaice/ksno + + do ij = 1, isolve + m = indxij(ij) + + heff = hilyr(m) + kratio * hslyr(m) + kh(ij) = kseaice / heff + enddo ! ij + + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + !----------------------------------------------------------------- + ! Compute conductive flux at top surface, fcondtopn. + ! If fsurfn < fcondtopn and Tsf = 0, then reset Tsf to slightly less + ! than zero (but not less than -puny). + !----------------------------------------------------------------- + + fcondtopn(i,j) = kh(ij) * (Tsf(m) - Tbot(i,j)) + + if (fsurfn(i,j) < fcondtopn(i,j)) & + Tsf(m) = min (Tsf(m), -puny) + + !----------------------------------------------------------------- + ! Save surface temperature at start of iteration + !----------------------------------------------------------------- + + Tsf_start(ij) = Tsf(m) + + enddo ! ij + + !----------------------------------------------------------------- + ! Solve surface balance equation to obtain the new temperatures. + !----------------------------------------------------------------- + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + diag(ij) = dfsurf_dT(ij) - kh(ij) + rhs(ij) = dfsurf_dT(ij)*Tsf(m) - fsurfn(i,j) & + - kh(ij)*Tbot(i,j) + Tsf(m) = rhs(ij) / diag(ij) + + enddo + + !----------------------------------------------------------------- + ! Determine whether the computation has converged to an acceptable + ! solution. Four conditions must be satisfied: + ! + ! (1) Tsf <= 0 C. + ! (2) Tsf is not oscillating; i.e., if both dTsf(niter) and + ! dTsf(niter-1) have magnitudes greater than puny, then + ! dTsf(niter)/dTsf(niter-1) cannot be a negative number + ! with magnitude greater than 0.5. + ! (3) abs(dTsf) < Tsf_errmax + ! (4) If Tsf = 0 C, then the downward turbulent/radiative + ! flux, fsurfn, must be greater than or equal to the downward + ! conductive flux, fcondtopn. + !----------------------------------------------------------------- + + ! initialize global convergence flag + all_converged = .true. + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + m = indxij(ij) + + !----------------------------------------------------------------- + ! Initialize convergence flag (true until proven false), dTsf, + ! and temperature-averaging coefficients. + ! Average only if test 1 or 2 fails. + ! Initialize energy. + !----------------------------------------------------------------- + + converged(m) = .true. + dTsf(ij) = Tsf(m) - Tsf_start(ij) + avg_Tsf = c0 + + !----------------------------------------------------------------- + ! Condition 1: check for Tsf > 0 + ! If Tsf > 0, set Tsf = 0 and leave converged=.true. + !----------------------------------------------------------------- + + if (Tsf(m) > puny) then + Tsf(m) = c0 + dTsf(ij) = -Tsf_start(ij) + + !----------------------------------------------------------------- + ! Condition 2: check for oscillating Tsf + ! If oscillating, average all temps to increase rate of convergence. + ! It is possible that this may never occur. + !----------------------------------------------------------------- + + elseif (niter > 1 & ! condition (2) + .and. Tsf_start(ij) <= -puny & + .and. abs(dTsf(ij)) > puny & + .and. abs(dTsf_prev(m)) > puny & + .and. -dTsf(ij)/(dTsf_prev(m)+puny*puny) > p5) then + + avg_Tsf = c1 ! average with starting temp + dTsf(ij) = p5 * dTsf(ij) + converged(m) = .false. + all_converged = .false. + endif + + !----------------------------------------------------------------- + ! If condition 2 failed, average new surface temperature with + ! starting value. + !----------------------------------------------------------------- + Tsf(m) = Tsf(m) & + + avg_Tsf * p5 * (Tsf_start(ij) - Tsf(m)) + + enddo ! ij + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + !----------------------------------------------------------------- + ! Condition 3: check for large change in Tsf + !----------------------------------------------------------------- + + if (abs(dTsf(ij)) > Tsf_errmax) then + converged(m) = .false. + all_converged = .false. + endif + + !----------------------------------------------------------------- + ! Condition 4: check for fsurfn < fcondtopn with Tsf > 0 + !----------------------------------------------------------------- + + fsurfn(i,j) = fsurfn(i,j) + dTsf(ij)*dfsurf_dT(ij) + fcondtopn(i,j) = kh(ij) * (Tsf(m)-Tbot(i,j)) + + if (Tsf(m) > -puny .and. fsurfn(i,j) < fcondtopn(i,j)) then + converged(m) = .false. + all_converged = .false. + endif + + fcondbot(m) = fcondtopn(i,j) + + dTsf_prev(m) = dTsf(ij) + + enddo ! ij + + deallocate(diag) + deallocate(rhs) + deallocate(kh) + deallocate(Tsf_start) + deallocate(dTsf) + deallocate(dfsurf_dT) + + enddo ! temperature iteration niter + + if (.not.all_converged) then + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !----------------------------------------------------------------- + ! Check for convergence failures. + !----------------------------------------------------------------- + if (.not.converged(ij)) then + write(nu_diag,*) 'Thermo iteration does not converge,', & + 'istep1, my_task, i, j:', & + istep1, my_task, i, j + write(nu_diag,*) 'Ice thickness:', hilyr(ij)*nilyr + write(nu_diag,*) 'Snow thickness:', hslyr(ij)*nslyr + write(nu_diag,*) 'dTsf, Tsf_errmax:',dTsf_prev(ij), & + Tsf_errmax + write(nu_diag,*) 'Tsf:', Tsf(ij) + write(nu_diag,*) 'fsurfn:', fsurfn(i,j) + write(nu_diag,*) 'fcondtopn, fcondbot', & + fcondtopn(i,j), fcondbot(ij) + l_stop = .true. + istop = i + jstop = j + return + endif + enddo ! ij + endif ! all_converged + + !----------------------------------------------------------------- + ! Check that if Tsfc < 0, then fcondtopn = fsurfn + !----------------------------------------------------------------- + + if (l_zerolayerchecks) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (Tsf(ij) < c0 .and. & + abs(fcondtopn(i,j)-fsurfn(i,j)) > puny) then + + write(nu_diag,*) 'fcondtopn does not equal fsurfn,', & + 'istep1, my_task, i, j:', & + istep1, my_task, i, j + write(nu_diag,*) 'Tsf=',Tsf(ij) + write(nu_diag,*) 'fcondtopn=',fcondtopn(i,j) + write(nu_diag,*) 'fsurfn=',fsurfn(i,j) + l_stop = .true. + istop = i + jstop = j + return + endif + enddo ! ij + endif ! l_zerolayerchecks + + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! update fluxes that depend on Tsf + flwoutn(i,j) = flwoutn(i,j) + dTsf_prev(ij) * dflwout_dT(ij) + fsensn(i,j) = fsensn(i,j) + dTsf_prev(ij) * dfsens_dT(ij) + flatn(i,j) = flatn(i,j) + dTsf_prev(ij) * dflat_dT(ij) + + enddo ! ij + + end subroutine zerolayer_temperature + +!======================================================================= + + end module ice_therm_0layer + +!======================================================================= diff --git a/source/ice_therm_bl99.F90 b/source/ice_therm_bl99.F90 new file mode 100755 index 00000000..4cdd57df --- /dev/null +++ b/source/ice_therm_bl99.F90 @@ -0,0 +1,2042 @@ +! SVN:$Id: ice_therm_bl99.F90 710 2013-09-03 22:46:53Z eclare $ +!========================================================================= +! +! Update ice and snow internal temperatures +! using Bitz and Lipscomb 1999 thermodynamics +! +! authors: William H. Lipscomb, LANL +! C. M. Bitz, UW +! Elizabeth C. Hunke, LANL +! +! 2012: Split from ice_therm_vertical.F90 + + module ice_therm_bl99 + + use ice_kinds_mod + use ice_domain_size, only: nilyr, nslyr, max_ntrcr, n_aero, ncat + use ice_constants + use ice_fileunits, only: nu_diag + use ice_therm_shared, only: calculate_ki_from_Tin, & + conduct, calc_Tsfc, ferrmax, l_brine, hfrazilmin +!ars599: 06042016 conflict with the one at bellow so mark out +!, kimin, betak + + implicit none + save + + private + public :: surface_fluxes, temperature_changes + + real (kind=dbl_kind), parameter :: & + betak = 0.13_dbl_kind, & ! constant in formula for k (W m-1 ppt-1) + kimin = 0.10_dbl_kind ! min conductivity of saline ice (W m-1 deg-1) + +!======================================================================= + + contains + +!======================================================================= +! +! Compute new surface temperature and internal ice and snow +! temperatures. Include effects of salinity on sea ice heat +! capacity in a way that conserves energy (Bitz and Lipscomb, 1999). +! +! New temperatures are computed iteratively by solving a tridiagonal +! system of equations; heat capacity is updated with each iteration. +! Finite differencing is backward implicit. +! +! See Bitz, C.M., and W.H. Lipscomb, 1999: +! An energy-conserving thermodynamic model of sea ice, +! J. Geophys. Res., 104, 15,669-15,677. +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW + + subroutine temperature_changes (nx_block, ny_block, & + my_task, istep1, & + dt, icells, & + indxi, indxj, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fswsfc, fswint, & + Sswabs, Iswabs, & + hilyr, hslyr, & + zqin, zTin, & + zqsn, zTsn, & + zSin, & + Tsf, Tbot, & + fsensn, flatn, & + flwoutn, fsurfn, & + fcondtopn,fcondbot, & + einit, l_stop, & + istop, jstop, & + enum) + + use ice_itd, only: hs_min + use ice_therm_shared, only: surface_heat_flux, dsurface_heat_flux_dTsf + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + my_task , & ! task number (diagnostic only) + istep1 , & ! time step index (diagnostic only) + icells ! number of cells with aicen > puny + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + rhoa , & ! air density (kg/m^3) + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef , & ! transfer coefficient for latent heat + Tbot ! ice bottom surface temperature (deg C) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + fswint ! SW absorbed in ice interior below surface (W m-2) + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + hilyr , & ! ice layer thickness (m) + hslyr , & ! snow layer thickness (m) + einit ! initial energy of melting (J m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr), & + intent(inout) :: & + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), & + intent(inout) :: & + Iswabs ! SW radiation absorbed in ice layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout):: & + fsurfn , & ! net flux to top surface, excluding fcondtopn + fcondtopn , & ! downward cond flux at top surface (W m-2) + fsensn , & ! surface downward sensible heat (W m-2) + flatn , & ! surface downward latent heat (W m-2) + flwoutn ! upward LW at surface (W m-2) + + real (kind=dbl_kind), dimension (icells), intent(out):: & + fcondbot ! downward cond flux at bottom surface (W m-2) + + real (kind=dbl_kind), dimension (icells), & + intent(inout):: & + Tsf ! ice/snow surface temperature, Tsfcn + + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(inout) :: & + zqin , & ! ice layer enthalpy (J m-3) + zTin , & ! internal ice layer temperatures + zSin ! internal ice layer salinities + + real (kind=dbl_kind), dimension (icells,nslyr), & + intent(inout) :: & + zqsn , & ! snow layer enthalpy (J m-3) + zTsn ! internal snow layer temperatures + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, print diagnostics and abort model + + integer (kind=int_kind), intent(inout) :: & + istop, jstop ! i and j indices of cell where model fails + + ! local variables + + integer (kind=int_kind), parameter :: & + nitermax = 100, & ! max number of iterations in temperature solver + nmat = nslyr + nilyr + 1 ! matrix dimension + + real (kind=dbl_kind), parameter :: & + Tsf_errmax = 5.e-4_dbl_kind ! max allowed error in Tsf + ! recommend Tsf_errmax < 0.01 K + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij, m , & ! horizontal indices, combine i and j loops + ij_solve , & ! Alex West debugging + k , & ! ice layer index + niter ! iteration counter in temperature solver + + integer (kind=int_kind) :: & + isolve ! number of cells with temps not converged + + integer (kind=int_kind), dimension (icells) :: & + indxii, indxjj ! compressed indices for cells not converged + + integer (kind=int_kind), dimension (icells) :: & + indxij ! compressed 1D index for cells not converged + + logical (kind=log_kind), dimension (icells) :: & + l_snow , & ! true if snow temperatures are computed + l_cold ! true if surface temperature is computed + + real (kind=dbl_kind), dimension (:), allocatable :: & + Tsf_start , & ! Tsf at start of iteration + dTsf , & ! Tsf - Tsf_start + dTi1 , & ! Ti1(1) - Tin_start(1) + dfsurf_dT , & ! derivative of fsurf wrt Tsf + avg_Tsi , & ! = 1. if new snow/ice temps avg'd w/starting temps + enew ! new energy of melting after temp change (J m-2) + + real (kind=dbl_kind), dimension (icells), intent(out) :: & + enum ! Energy that, for numerical reasons, we don't want to use. + + real (kind=dbl_kind), dimension (icells) :: & + enew_icells ! debugging! + + real (kind=dbl_kind), dimension (icells) :: & + dTsf_prev , & ! dTsf from previous iteration + dTi1_prev , & ! dTi1 from previous iteration + dfsens_dT , & ! deriv of fsens wrt Tsf (W m-2 deg-1) + dflat_dT , & ! deriv of flat wrt Tsf (W m-2 deg-1) + dflwout_dT , & ! deriv of flwout wrt Tsf (W m-2 deg-1) + dt_rhoi_hlyr, & ! dt/(rhoi*hilyr) + einex , & ! excess energy from dqmat to ocean + ferr ! energy conservation error (W m-2) + + real (kind=dbl_kind), dimension (icells,nilyr) :: & + Tin_init , & ! zTin at beginning of time step + Tin_start , & ! zTin at start of iteration + dTmat , & ! zTin - matrix solution before limiting + dqmat , & ! associated enthalpy difference + Tmlts ! melting temp, -depressT * salinity + + real (kind=dbl_kind), dimension (icells,nslyr) :: & + dqmat_sn ! snow enthalpy difference before & after limiting + + real (kind=dbl_kind), dimension (icells,nslyr) :: & + Tsn_init , & ! zTsn at beginning of time step + Tsn_start , & ! zTsn at start of iteration + etas ! dt / (rho * cp * h) for snow layers + + real (kind=dbl_kind), dimension (:,:), allocatable :: & + etai , & ! dt / (rho * cp * h) for ice layers + sbdiag , & ! sub-diagonal matrix elements + diag , & ! diagonal matrix elements + spdiag , & ! super-diagonal matrix elements + rhs , & ! rhs of tri-diagonal matrix equation + Tmat ! matrix output temperatures + + real (kind=dbl_kind), dimension(icells,nilyr+nslyr+1):: & + kh ! effective conductivity at interfaces (W m-2 deg-1) + + real (kind=dbl_kind) :: & + ci , & ! specific heat of sea ice (J kg-1 deg-1) + avg_Tsf , & ! = 1. if Tsf averaged w/Tsf_start, else = 0. + Iswabs_tmp , & ! energy to melt through fraction frac of layer + Sswabs_tmp , & ! same for snow + dswabs , & ! difference in swabs and swabs_tmp + frac , & ! fraction of layer that can be melted through + dTemp ! minimum temperature difference for absorption + + logical (kind=log_kind), dimension (icells) :: & + converged ! = true when local solution has converged + + logical (kind=log_kind) :: & + all_converged ! = true when all cells have converged + + logical (kind=log_kind) , dimension (icells,nilyr) :: & + reduce_kh ! reduce conductivity when T exceeds Tmlt + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + fcondtopn_reduction, & ! desired decrease in cond. forcing if + ! top layer temp is being forced above + ! melting. Extra energy goes into ocean + fcondtopn_force ! Resulting value of fcondtopn passed to + ! tridiag matrix solver + + logical (kind=log_kind) , dimension (icells) :: & + Top_T_was_reset_last_time ! keep track of whether top layer temp was reset + ! in the previous iteration. For use in limiting + + real (kind=dbl_kind), dimension(icells) :: enew_save + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + all_converged = .false. + + do ij = 1, icells + + ! Set variables involved in tracking limiting of top layer temp + i = indxi(ij) + j = indxj(ij) + fcondtopn_reduction(i,j) = c0 + fcondtopn_force(i,j) = fcondtopn(i,j) + enum(ij) = c0 + Top_T_was_reset_last_time(ij) = .false. + + converged (ij) = .false. + l_snow (ij) = .false. + l_cold (ij) = .true. + fcondbot (ij) = c0 + dTsf_prev (ij) = c0 + dTi1_prev (ij) = c0 + dfsens_dT (ij) = c0 + dflat_dT (ij) = c0 + dflwout_dT(ij) = c0 + einex (ij) = c0 + dt_rhoi_hlyr(ij) = dt / (rhoi*hilyr(ij)) ! hilyr > 0 + if (hslyr(ij) > hs_min/real(nslyr,kind=dbl_kind)) & + l_snow(ij) = .true. + enew_icells(ij) = c0 + enddo ! ij + + do k = 1, nslyr + do ij = 1, icells + Tsn_init (ij,k) = zTsn(ij,k) ! beginning of time step + Tsn_start(ij,k) = zTsn(ij,k) ! beginning of iteration + if (l_snow(ij)) then + etas(ij,k) = dt/(rhos*cp_ice*hslyr(ij)) + else + etas(ij,k) = c0 + endif + enddo ! ij + enddo ! k + + do k = 1, nilyr + do ij = 1, icells + Tin_init (ij,k) = zTin(ij,k) ! beginning of time step + Tin_start(ij,k) = zTin(ij,k) ! beginning of iteration + Tmlts (ij,k) = -zSin(ij,k) * depressT + enddo + enddo + + !----------------------------------------------------------------- + ! Compute thermal conductivity at interfaces (held fixed during + ! subsequent iterations). + ! Ice and snow interfaces are combined into one array (kh) to + ! simplify the logic. + !----------------------------------------------------------------- + + call effect_conductivity (nx_block, ny_block, & + l_snow, icells, & + indxi, indxj, indxij, & + hilyr, hslyr, & + zTin, kh, zSin) + + !----------------------------------------------------------------- + ! Check for excessive absorbed solar radiation that may result in + ! temperature overshoots. Convergence is particularly difficult + ! if the starting temperature is already very close to the melting + ! temperature and extra energy is added. In that case, or if the + ! amount of energy absorbed is greater than the amount needed to + ! melt through a given fraction of a layer, we put the extra + ! energy into the surface. + ! NOTE: This option is not available if the atmosphere model + ! has already computed fsurf. (Unless we adjust fsurf here) + !----------------------------------------------------------------- +!mclaren: Should there be an if calc_Tsfc statement here then?? +! Alex West: Temporarily at least, yes. Here goes. + + if (istep1==15552) then + do m = 1, icells + i = indxi(m) + j = indxj(m) + + if ((i==91) .AND. (j==46) .and. (my_task==24)) then + write(nu_diag,*) ' ' + write(nu_diag,*) ' ' + write(nu_diag,*) 'Printing initial temperature profile' + write(nu_diag,*) 'Tsn_init, Tin_init = ', Tsn_init(m,:), Tin_init(m,:) + endif + + enddo + endif + + if (calc_Tsfc) then + frac = 0.9 + dTemp = 0.02_dbl_kind + do k = 1, nilyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + Iswabs_tmp = c0 ! all Iswabs is moved into fswsfc + if (Tin_init(ij,k) <= Tmlts(ij,k) - dTemp) then + if (l_brine) then + ci = cp_ice - Lfresh * Tmlts(ij,k) / (Tin_init(ij,k)**2) + Iswabs_tmp = min(Iswabs(i,j,k), & + frac*(Tmlts(ij,k)-Tin_init(ij,k))*ci/dt_rhoi_hlyr(ij)) + else + ci = cp_ice + Iswabs_tmp = min(Iswabs(i,j,k), & + frac*(-Tin_init(ij,k))*ci/dt_rhoi_hlyr(ij)) + endif + endif + if (Iswabs_tmp < puny) Iswabs_tmp = c0 + + dswabs = min(Iswabs(i,j,k) - Iswabs_tmp, fswint(i,j)) + + fswsfc(i,j) = fswsfc(i,j) + dswabs + fswint(i,j) = fswint(i,j) - dswabs + Iswabs(i,j,k) = Iswabs_tmp + + enddo + enddo + endif ! calc_Tsfc + + do k = 1, nslyr + do ij = 1, icells + if (l_snow(ij)) then + i = indxi(ij) + j = indxj(ij) + + Sswabs_tmp = c0 + if (Tsn_init(ij,k) <= -dTemp) then + Sswabs_tmp = min(Sswabs(i,j,k), & + -frac*Tsn_init(ij,k)/etas(ij,k)) + endif + if (Sswabs_tmp < puny) Sswabs_tmp = c0 + + dswabs = min(Sswabs(i,j,k) - Sswabs_tmp, fswint(i,j)) + + fswsfc(i,j) = fswsfc(i,j) + dswabs + fswint(i,j) = fswint(i,j) - dswabs + Sswabs(i,j,k) = Sswabs_tmp + + endif + enddo + enddo + + !----------------------------------------------------------------- + ! Solve for new temperatures. + ! Iterate until temperatures converge with minimal energy error. + !----------------------------------------------------------------- + + do niter = 1, nitermax + + if ((istep1==15552) .and. (my_task==24)) then + + write(nu_diag,*) ' ' + write(nu_diag,*) ' ' + + write(nu_diag,*) '--------------------' + write(nu_diag,*) 'Entering iteration', niter + write(nu_diag,*) ' ' + endif + + + !----------------------------------------------------------------- + ! Identify cells, if any, where calculation has not converged. + !----------------------------------------------------------------- + + if (all_converged) then ! thermo calculation is done + exit + else ! identify cells not yet converged + isolve = 0 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (.not.converged(ij)) then + isolve = isolve + 1 + indxii(isolve) = i + indxjj(isolve) = j + indxij(isolve) = ij + endif + enddo ! ij + endif + + !----------------------------------------------------------------- + ! Allocate and initialize + !----------------------------------------------------------------- + + allocate( sbdiag(isolve,nilyr+nslyr+1)) + allocate( diag(isolve,nilyr+nslyr+1)) + allocate( spdiag(isolve,nilyr+nslyr+1)) + allocate( rhs(isolve,nilyr+nslyr+1)) + allocate( Tmat(isolve,nilyr+nslyr+1)) + allocate( etai(isolve,nilyr)) + allocate(Tsf_start(isolve)) + allocate( dTsf(isolve)) + allocate(dfsurf_dT(isolve)) + allocate( avg_Tsi(isolve)) + allocate( enew(isolve)) + allocate( dTi1(isolve)) + + all_converged = .true. + + do ij = 1, isolve + m = indxij(ij) + converged(m) = .true. + dfsurf_dT(ij) = c0 + avg_Tsi (ij) = c0 + enew (ij) = c0 + einex (m) = c0 + enddo + + !----------------------------------------------------------------- + ! Update specific heat of ice layers. + ! To ensure energy conservation, the specific heat is a function of + ! both the starting temperature and the (latest guess for) the + ! final temperature. + !----------------------------------------------------------------- + + do k = 1, nilyr + do ij = 1, isolve + m = indxij(ij) + i = indxii(ij) + j = indxjj(ij) + + if (l_brine) then + ci = cp_ice - Lfresh*Tmlts(m,k) / & + (zTin(m,k)*Tin_init(m,k)) + else + ci = cp_ice + endif + etai(ij,k) = dt_rhoi_hlyr(m) / ci + + enddo + enddo + + if (calc_Tsfc) then + + !----------------------------------------------------------------- + ! Update radiative and turbulent fluxes and their derivatives + ! with respect to Tsf. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + ! surface heat flux + call surface_heat_flux(Tsf (m), fswsfc(i,j), & + rhoa (i,j), flw (i,j), & + potT (i,j), Qa (i,j), & + shcoef (i,j), lhcoef(i,j), & + flwoutn(i,j), fsensn(i,j), & + flatn (i,j), fsurfn(i,j)) + + ! derivative of heat flux with respect to surface temperature + call dsurface_heat_flux_dTsf(Tsf (m), fswsfc (i,j), & + rhoa (i,j), flw (i,j), & + potT (i,j), Qa (i,j), & + shcoef (i,j), lhcoef (i,j), & + dfsurf_dT(ij), dflwout_dT(m), & + dfsens_dT(m), dflat_dT (m)) + + !----------------------------------------------------------------- + ! Compute conductive flux at top surface, fcondtopn. + ! If fsurfn < fcondtopn and Tsf = 0, then reset Tsf to slightly less + ! than zero (but not less than -puny). + !----------------------------------------------------------------- + + if (l_snow(m)) then + fcondtopn(i,j) = kh(m,1) * (Tsf(m) - zTsn(m,1)) + else + fcondtopn(i,j) = kh(m,1+nslyr) * (Tsf(m) - zTin(m,1)) + endif + + if ( Tsf(m) >= c0 .and. fsurfn(i,j) < fcondtopn(i,j)) & + Tsf(m) = -puny + + + !----------------------------------------------------------------- + ! Save surface temperature at start of iteration + !----------------------------------------------------------------- + + Tsf_start(ij) = Tsf(m) + + if (Tsf(m) < c0) then + l_cold(m) = .true. + else + l_cold(m) = .false. + endif + + enddo ! ij + + !----------------------------------------------------------------- + ! Compute elements of tridiagonal matrix. + !----------------------------------------------------------------- + + call get_matrix_elements_calc_Tsfc & + (nx_block, ny_block, & + isolve, icells, & + indxii, indxjj, indxij, & + l_snow, l_cold, & + Tsf, Tbot, & + fsurfn, dfsurf_dT, & + Tin_init, Tsn_init, & + kh, Sswabs, & + Iswabs, & + etai, etas, & + sbdiag, diag, & + spdiag, rhs) + + else + + ! See if we need to reduce fcondtopn anywhere + fcondtopn_force = fcondtopn - fcondtopn_reduction + + if (istep1==15552) then + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if ((i==91) .AND. (j==46) .and. (my_task==24)) then + write(nu_diag,*) 'Calling solver with fcondtopn_force, fcondtopn, fcondtopn_reduction = ', & + fcondtopn_force(i,j), fcondtopn(i,j), fcondtopn_reduction(i,j) + write(nu_diag,*) 'and etai, etas, kh = ', etai(ij,:), etas(ij,:), kh(m,:) + endif + + enddo + endif + + call get_matrix_elements_know_Tsfc & + (nx_block, ny_block, & + isolve, icells, & + indxii, indxjj, indxij, & + l_snow, Tbot, & + Tin_init, Tsn_init, & + kh, Sswabs, & + Iswabs, & + etai, etas, & + sbdiag, diag, & + spdiag, rhs, & + fcondtopn_force) + endif ! calc_Tsfc + + !----------------------------------------------------------------- + ! Solve tridiagonal matrix to obtain the new temperatures. + !----------------------------------------------------------------- + + call tridiag_solver (nx_block, ny_block, & + isolve, icells, & + indxii, indxjj, & + nmat, sbdiag, & + diag, spdiag, & + rhs, Tmat) + + !----------------------------------------------------------------- + ! Determine whether the computation has converged to an acceptable + ! solution. Five conditions must be satisfied: + ! + ! (1) Tsf <= 0 C. + ! (2) Tsf is not oscillating; i.e., if both dTsf(niter) and + ! dTsf(niter-1) have magnitudes greater than puny, then + ! dTsf(niter)/dTsf(niter-1) cannot be a negative number + ! with magnitude greater than 0.5. + ! (3) abs(dTsf) < Tsf_errmax + ! (4) If Tsf = 0 C, then the downward turbulent/radiative + ! flux, fsurfn, must be greater than or equal to the downward + ! conductive flux, fcondtopn. + ! (5) The net energy added to the ice per unit time must equal + ! the net change in internal ice energy per unit time, + ! within the prescribed error ferrmax. + ! + ! For briny ice (the standard case), zTsn and zTin are limited + ! to prevent them from exceeding their melting temperatures. + ! (Note that the specific heat formula for briny ice assumes + ! that T < Tmlt.) + ! For fresh ice there is no limiting, since there are cases + ! when the only convergent solution has zTsn > 0 and/or zTin > 0. + ! Above-zero temperatures are then reset to zero (with melting + ! to conserve energy) in the thickness_changes subroutine. + !----------------------------------------------------------------- + + if (istep1==15552) then + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if ((i==91) .and. (j==46) .and. (my_task==24)) then + write(nu_diag,*) 'Matrix solution of temperatures Tmat = ', Tmat(ij,:) + endif + enddo + endif + + if (calc_Tsfc) then + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + m = indxij(ij) + + !----------------------------------------------------------------- + ! Reload Tsf from matrix solution + !----------------------------------------------------------------- + + if (l_cold(m)) then + if (l_snow(m)) then + Tsf(m) = Tmat(ij,1) + else + Tsf(m) = Tmat(ij,1+nslyr) + endif + else ! melting surface + Tsf(m) = c0 + endif + + !----------------------------------------------------------------- + ! Initialize convergence flag (true until proven false), dTsf, + ! and temperature-averaging coefficients. + ! Average only if test 1 or 2 fails. + ! Initialize energy. + !----------------------------------------------------------------- + + dTsf(ij) = Tsf(m) - Tsf_start(ij) + avg_Tsf = c0 + + !----------------------------------------------------------------- + ! Condition 1: check for Tsf > 0 + ! If Tsf > 0, set Tsf = 0, then average zTsn and zTin to force + ! internal temps below their melting temps. + !----------------------------------------------------------------- + + if (Tsf(m) > puny) then + Tsf(m) = c0 + dTsf(ij) = -Tsf_start(ij) + if (l_brine) avg_Tsi(ij) = c1 ! avg with starting temp + converged(m) = .false. + all_converged = .false. + + !----------------------------------------------------------------- + ! Condition 2: check for oscillating Tsf + ! If oscillating, average all temps to increase rate of convergence. + !----------------------------------------------------------------- + + elseif (niter > 1 & ! condition (2) + .and. Tsf_start(ij) <= -puny & + .and. abs(dTsf(ij)) > puny & + .and. abs(dTsf_prev(m)) > puny & + .and. -dTsf(ij)/(dTsf_prev(m)+puny*puny) > p5) then + + if (l_brine) then ! average with starting temp + avg_Tsf = c1 + avg_Tsi(ij) = c1 + endif + dTsf(ij) = p5 * dTsf(ij) + converged(m) = .false. + all_converged = .false. + endif + +!!! dTsf_prev(m) = dTsf(ij) + + !----------------------------------------------------------------- + ! If condition 2 failed, average new surface temperature with + ! starting value. + !----------------------------------------------------------------- + Tsf(m) = Tsf(m) & + + avg_Tsf * p5 * (Tsf_start(ij) - Tsf(m)) + + enddo ! ij + + endif ! calc_Tsfc + + dqmat_sn(:,:) = c0 + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + m = indxij(ij) + i = indxii(ij) + j = indxjj(ij) + + !----------------------------------------------------------------- + ! Reload zTsn from matrix solution + !----------------------------------------------------------------- + + if (l_snow(m)) then + zTsn(m,k) = Tmat(ij,k+1) + else + zTsn(m,k) = c0 + endif + + if ((l_brine) .and. zTsn(m,k)>c0) then + +! Alex West: return this energy to the ocean + + dqmat_sn(m,k) = (zTsn(m,k)*cp_ice - Lfresh)*rhos - zqsn(m,k) + + ! Alex West: If this is the second time in succession that Tsn(1) has been + ! reset, tell the solver to reduce the forcing at the top, and + ! pass the difference to the array enum where it will eventually + ! go into the ocean + ! This is done to avoid an 'infinite loop' whereby temp continually evolves + ! to the same point above zero, is reset, ad infinitum + if ((i==91) .and. (j==46) .and. (my_task==24)) then + write(nu_diag,*) + write(nu_diag,*) 'Resetting Tsn1' + write(nu_diag,*) 'zTsn, dqmat_sn = ', zTsn(m,k), dqmat_sn(m,k) + endif + if (l_snow(m) .AND. k == 1) then + if (Top_T_was_reset_last_time(m)) then + fcondtopn_reduction(i,j) = fcondtopn_reduction(i,j) + dqmat_sn(m,k)*hslyr(m) / dt + Top_T_was_reset_last_time(m) = .false. + enum(m) = enum(m) + hslyr(m) * dqmat_sn(m,k) + if ((i==91) .and. (j==46) .and. (my_task==24)) then + write(nu_diag,*) 'Adjusting forcing' + write(nu_diag,*) 'fcondtopn_reduction, enum = ', fcondtopn_reduction(i,j), enum(m) + endif + else + Top_T_was_reset_last_time(m) = .true. + endif + endif + + zTsn(m,k) = min(zTsn(m,k), c0) + + endif + + !----------------------------------------------------------------- + ! If condition 1 or 2 failed, average new snow layer + ! temperatures with their starting values. + !----------------------------------------------------------------- + zTsn(m,k) = zTsn(m,k) & + + avg_Tsi(ij)*p5*(Tsn_start(m,k)-zTsn(m,k)) + + !----------------------------------------------------------------- + ! Compute zqsn and increment new energy. + !----------------------------------------------------------------- + zqsn(m,k) = -rhos * (Lfresh - cp_ice*zTsn(m,k)) + enew(ij) = enew(ij) + hslyr(m) * zqsn(m,k) + + Tsn_start(m,k) = zTsn(m,k) ! for next iteration + + enddo ! ij + enddo ! nslyr + + if (istep1==15552) then + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if ((i==91) .and. (j==46) .and. (my_task==24)) then + write(nu_diag,*) 'After numerical corrections, zTsn = ', zTsn(m,:) + write(nu_diag,*) 'zqsn, enew, enum = ', zqsn(m,k), enew(ij), enum(m) + endif + enddo + endif + + + dTmat(:,:) = c0 + dqmat(:,:) = c0 + reduce_kh(:,:) = .false. + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + m = indxij(ij) + i = indxii(ij) + j = indxjj(ij) + + !----------------------------------------------------------------- + ! Reload zTin from matrix solution + !----------------------------------------------------------------- + + zTin(m,k) = Tmat(ij,k+1+nslyr) + + if (l_brine .and. zTin(m,k) > Tmlts(m,k) - puny) then + dTmat(m,k) = zTin(m,k) - Tmlts(m,k) + dqmat(m,k) = rhoi * dTmat(m,k) & + * (cp_ice - Lfresh * Tmlts(m,k)/zTin(m,k)**2) + ! Alex West: If this is the second time in succession that Tin(1) has been + ! reset, tell the solver to reduce the forcing at the top, and + ! pass the difference to the array enum where it will eventually + ! go into the ocean + ! This is done to avoid an 'infinite loop' whereby temp continually evolves + ! to the same point above zero, is reset, ad infinitum + if ((i==91) .and. (j==46) .and. (my_task==24)) then + write(nu_diag,*) + write(nu_diag,*) 'Resetting Tin1' + write(nu_diag,*) 'zTin, dTmat, dqmat = ', zTin(m,k), dTmat(m,k), dqmat(m,k) + endif + if ((.NOT. (l_snow(m))) .AND. (k == 1)) then + if (Top_T_was_reset_last_time(m)) then + fcondtopn_reduction(i,j) = fcondtopn_reduction(i,j) + dqmat(m,k)*hilyr(m) / dt + Top_T_was_reset_last_time(m) = .false. + enum(m) = enum(m) + hilyr(m) * dqmat(m,k) + if ((i==91) .and. (j==46) .and. (my_task==24)) then + write(nu_diag,*) 'Adjusting forcing' + write(nu_diag,*) 'fcondtopn_reduction, enum = ', fcondtopn_reduction(i,j), enum(m) + endif + else + Top_T_was_reset_last_time(m) = .true. + endif + endif + +! use this for the case that Tmlt changes by an amount dTmlt=Tmltnew-Tmlt(k) +! + rhoi * dTmlt & +! * (cp_ocn - cp_ice + Lfresh/zTin(m,k)) + zTin(m,k) = Tmlts(m,k) + reduce_kh(m,k) = .true. + endif + + !----------------------------------------------------------------- + ! Condition 2b: check for oscillating zTin(1) + ! If oscillating, average all ice temps to increase rate of convergence. + !----------------------------------------------------------------- + + if (k==1 .and. .not.calc_Tsfc) then + dTi1(ij) = zTin(m,k) - Tin_start(m,k) + + if (niter > 1 & ! condition 2b + .and. abs(dTi1(ij)) > puny & + .and. abs(dTi1_prev(m)) > puny & + .and. -dTi1(ij)/(dTi1_prev(m)+puny*puny) > p5) then + + if (l_brine) avg_Tsi(ij) = c1 + dTi1(ij) = p5 * dTi1(ij) + converged(m) = .false. + all_converged = .false. + endif + dTi1_prev(m) = dTi1(ij) + endif ! k = 1 .and. calc_Tsfc = F + + !----------------------------------------------------------------- + ! If condition 1 or 2 failed, average new ice layer + ! temperatures with their starting values. + !----------------------------------------------------------------- + zTin(m,k) = zTin(m,k) & + + avg_Tsi(ij)*p5*(Tin_start(m,k)-zTin(m,k)) + + !----------------------------------------------------------------- + ! Compute zqin and increment new energy. + !----------------------------------------------------------------- + if (l_brine) then + zqin(m,k) = -rhoi * (cp_ice*(Tmlts(m,k)-zTin(m,k)) & + + Lfresh*(c1-Tmlts(m,k)/zTin(m,k)) & + - cp_ocn*Tmlts(m,k)) + else + zqin(m,k) = -rhoi * (-cp_ice*zTin(m,k) + Lfresh) + endif + enew(ij) = enew(ij) + hilyr(m) * zqin(m,k) + einex(m) = einex(m) + hilyr(m) * dqmat(m,k) + + Tin_start(m,k) = zTin(m,k) ! for next iteration + + enddo ! ij + enddo ! nilyr + + if (istep1==15552) then + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if ((i==91) .and. (j==46) .and. (my_task==24)) then + write(nu_diag,*) 'After numerical corrections, zTin = ', zTin(m,:) + write(nu_diag,*) 'zqin, enew, enum = ', zqin(m,k), enew(ij), enum(m) + endif + enddo + endif + + + if (calc_Tsfc) then + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + !----------------------------------------------------------------- + ! Condition 3: check for large change in Tsf + !----------------------------------------------------------------- + + if (abs(dTsf(ij)) > Tsf_errmax) then + converged(m) = .false. + all_converged = .false. + endif + + !----------------------------------------------------------------- + ! Condition 4: check for fsurfn < fcondtopn with Tsf >= 0 + !----------------------------------------------------------------- + + fsurfn(i,j) = fsurfn(i,j) + dTsf(ij)*dfsurf_dT(ij) + if (l_snow(m)) then + fcondtopn(i,j) = kh(m,1) * (Tsf(m)-zTsn(m,1)) + else + fcondtopn(i,j) = kh(m,1+nslyr) * (Tsf(m)-zTin(m,1)) + endif + + if (Tsf(m) >= c0 .and. fsurfn(i,j) < fcondtopn(i,j)) then + converged(m) = .false. + all_converged = .false. + endif + + dTsf_prev(m) = dTsf(ij) + + enddo ! ij + endif ! calc_Tsfc + + !----------------------------------------------------------------- + ! Condition 5: check for energy conservation error + ! Change in internal ice energy should equal net energy input. + !----------------------------------------------------------------- + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + fcondbot(m) = kh(m,1+nslyr+nilyr) * & + (zTin(m,nilyr) - Tbot(i,j)) + + ! Flux extra energy out of the ice + ! Alex West. Commenting this out for now - it's essentially what I'm doing with enum, so possibility of double-counting. + ! fcondbot(m) = fcondbot(m) + einex(m)/dt + + ! Alex West. Now including enum, the 'numeric energy' from limiting Tin1 and Tsn, + ! in this conservation check + ferr(m) = abs( (enew(ij) - einit(m) + enum(m))/dt & + - (fcondtopn(i,j) - fcondbot(m) + fswint(i,j)) ) + + if (istep1==15552) then + if ((i==91) .and. (j==46) .and. (my_task==24)) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Testing for convergence' + write(nu_diag,*) 'enew, einit, enum = ', enew(ij), einit(m), enum(m) + write(nu_diag,*) 'enew/dt, einit/dt, enum/dt = ', enew(ij)/dt, einit(m)/dt, enum(m)/dt + write(nu_diag,*) 'fcondtop, fcondbot, fswint = ', fcondtopn(i,j), fcondbot(m), fswint(i,j) + write(nu_diag,*) '(enew(ij) - einit(m) + enum(m))/dt = ', (enew(ij) - einit(m) + enum(m))/dt + write(nu_diag,*) 'fcondtopn(i,j) - fcondbot(m) + fswint(i,j)', fcondtopn(i,j) - fcondbot(m) + fswint(i,j) + write(nu_diag,*) 'fcondtopn_force(i,j), fcondtopn_reduction(i,j) = ', fcondtopn_force(i,j), fcondtopn_reduction(i,j) + endif + endif + + ! factor of 0.9 allows for roundoff errors later + if (ferr(m) > 0.9_dbl_kind*ferrmax) then ! condition (5) + + converged(m) = .false. + all_converged = .false. + + ! reduce conductivity for next iteration + ! Alex West: I think this maybe shouldn't be done for the top layer + ! if the forcing is the top conductive flux? + do k = 1, nilyr + if (reduce_kh(m,k) .and. dqmat(m,k) > c0) then + frac = max(0.5*(c1-ferr(m)/abs(fcondtopn(i,j)-fcondbot(m))),p1) +! frac = p1 + kh(m,k+nslyr+1) = kh(m,k+nslyr+1) * frac + kh(m,k+nslyr) = kh(m,k+nslyr+1) + endif + enddo + + endif ! ferr + if (converged(m)) then + enew_icells(m) = enew(ij) + endif + enddo ! ij + + enew_save(1:isolve) = enew + + deallocate(sbdiag) + deallocate(diag) + deallocate(spdiag) + deallocate(rhs) + deallocate(Tmat) + deallocate(etai) + deallocate(Tsf_start) + deallocate(dTsf) + deallocate(dfsurf_dT) + deallocate(avg_Tsi) + deallocate(enew) + deallocate(dTi1) + + enddo ! temperature iteration niter + + if (.not.all_converged) then + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + + do m = 1,isolve + if (indxij(m)==ij) then + ij_solve = m + else + ij_solve = 1 + endif + enddo + + !----------------------------------------------------------------- + ! Check for convergence failures. + !----------------------------------------------------------------- + if (.not.converged(ij)) then + write(nu_diag,*) 'Thermo iteration does not converge,', & + 'istep1, my_task, i, j:', & + istep1, my_task, i, j + write(nu_diag,*) 'Ice thickness:', hilyr(ij)*nilyr + write(nu_diag,*) 'Snow thickness:', hslyr(ij)*nslyr + write(nu_diag,*) 'dTsf, Tsf_errmax:',dTsf_prev(ij), & + Tsf_errmax + write(nu_diag,*) 'Tsf:', Tsf(ij) + write(nu_diag,*) 'fsurf:', fsurfn(i,j) + write(nu_diag,*) 'fcondtop, fcondbot, fswint', & + fcondtopn(i,j), fcondbot(ij), fswint(i,j) + write(nu_diag,*) '(enew_save - einit)/dt, enum/dt, (enew_save - einit + enum)/dt = ', & + (enew_save(ij_solve) - einit(ij))/dt, enum(ij)/dt, (enew_save(ij_solve) - einit(ij) + enum(ij))/dt + write(nu_diag,*) 'enew_save, einit = ', enew_save(ij_solve), einit(ij) + write(nu_diag,*) 'size(enew_save), size(einit) = ', size(enew_save), size(einit) + write(nu_diag,*) 'ij, m = ', ij, m + write(nu_diag,*) 'fswsfc', fswsfc(i,j) + write(nu_diag,*) 'Iswabs',(Iswabs(i,j,k),k=1,nilyr) + write(nu_diag,*) 'Flux conservation error =', ferr(ij) + write(nu_diag,*) 'Initial snow temperatures:' + write(nu_diag,*) (Tsn_init(ij,k),k=1,nslyr) + write(nu_diag,*) 'Initial ice temperatures:' + write(nu_diag,*) (Tin_init(ij,k),k=1,nilyr) + write(nu_diag,*) 'Matrix ice temperature diff:' + write(nu_diag,*) (dTmat(ij,k),k=1,nilyr) + write(nu_diag,*) 'dqmat*hilyr/dt:' + write(nu_diag,*) (hilyr(ij)*dqmat(ij,k)/dt,k=1,nilyr) + write(nu_diag,*) 'Final snow temperatures:' + write(nu_diag,*) (zTsn(ij,k),k=1,nslyr) + write(nu_diag,*) 'Matrix ice temperature diff:' + write(nu_diag,*) (dTmat(ij,k),k=1,nilyr) + write(nu_diag,*) 'dqmat*hilyr/dt:' + write(nu_diag,*) (hilyr(ij)*dqmat(ij,k)/dt,k=1,nilyr) + write(nu_diag,*) 'Final ice temperatures:' + write(nu_diag,*) (zTin(ij,k),k=1,nilyr) + write(nu_diag,*) 'Ice melting temperatures:' + write(nu_diag,*) (Tmlts(ij,k),k=1,nilyr) + write(nu_diag,*) 'Ice bottom temperature:', Tbot(i,j) + write(nu_diag,*) 'dT initial:' + write(nu_diag,*) (Tmlts(ij,k)-Tin_init(ij,k),k=1,nilyr) + write(nu_diag,*) 'dT final:' + write(nu_diag,*) (Tmlts(ij,k)-zTin(ij,k),k=1,nilyr) + write(nu_diag,*) 'zSin' + write(nu_diag,*) (zSin(ij,k),k=1,nilyr) + l_stop = .true. + istop = i + jstop = j + return + endif + enddo ! ij + endif ! all_converged + + if (calc_Tsfc) then +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! update fluxes that depend on Tsf + flwoutn(i,j) = flwoutn(i,j) + dTsf_prev(ij) * dflwout_dT(ij) + fsensn(i,j) = fsensn(i,j) + dTsf_prev(ij) * dfsens_dT(ij) + flatn(i,j) = flatn(i,j) + dTsf_prev(ij) * dflat_dT(ij) + + enddo ! ij + endif ! calc_Tsfc + + end subroutine temperature_changes + +!======================================================================= +! +! Compute thermal conductivity at interfaces (held fixed during +! the subsequent iteration). +! +! NOTE: Ice conductivity must be >= kimin +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW + + subroutine effect_conductivity (nx_block, ny_block, & + l_snow, icells, & + indxi, indxj, indxij, & + hilyr, hslyr, & + zTin, kh, zSin) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with aicen > puny + + logical (kind=log_kind), dimension(icells), & + intent(in) :: & + l_snow ! true if snow temperatures are computed + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + integer (kind=int_kind), dimension (icells), & + intent(in) :: & + indxij ! compressed 1D index for cells not converged + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + hilyr , & ! ice layer thickness (same for all ice layers) + hslyr ! snow layer thickness (same for all snow layers) + + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(in) :: & + zTin , & ! internal ice layer temperatures + zSin ! internal ice layer salinities + + real (kind=dbl_kind), dimension (icells,nilyr+nslyr+1), & + intent(out) :: & + kh ! effective conductivity at interfaces (W m-2 deg-1) + + ! local variables + + integer (kind=int_kind) :: & + ij , & ! horizontal index, combines i and j loops + k ! vertical index + + real (kind=dbl_kind), dimension (icells,nilyr) :: & + kilyr ! thermal cond at ice layer midpoints (W m-1 deg-1) + + real (kind=dbl_kind), dimension (icells,nslyr) :: & + kslyr ! thermal cond at snow layer midpoints (W m-1 deg-1) + + ! interior snow layers (simple for now, but may be fancier later) + do k = 1, nslyr + do ij = 1, icells + kslyr(ij,k) = ksno + enddo + enddo ! nslyr + + ! interior ice layers + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + kilyr(ij,k) = calculate_ki_from_Tin(zTin(ij,k),zSin(ij,k)) + enddo + enddo ! nilyr + + + ! top snow interface, top and bottom ice interfaces + do ij = 1, icells + ! top of snow layer; top surface of top ice layer + if (l_snow(ij)) then + kh(ij,1) = c2 * kslyr(ij,1) / hslyr(ij) + kh(ij,1+nslyr) = c2 * kslyr(ij,nslyr) * kilyr(ij,1) / & + ( kslyr(ij,nslyr)*hilyr(ij) + & + kilyr(ij,1 )*hslyr(ij) ) + else + kh(ij,1) = c0 + kh(ij,1+nslyr) = c2 * kilyr(ij,1) / hilyr(ij) + endif + + ! bottom surface of bottom ice layer + kh(ij,1+nslyr+nilyr) = c2 * kilyr(ij,nilyr) / hilyr(ij) + + enddo ! ij + + ! interior snow interfaces + + if (nslyr > 1) then + do k = 2, nslyr + do ij = 1, icells + if (l_snow(ij)) then + kh(ij,k) = c2 * kslyr(ij,k-1) * kslyr(ij,k) / & + ((kslyr(ij,k-1) + kslyr(ij,k))*hslyr(ij)) + else + kh(ij,k) = c0 + endif + enddo ! ij + enddo ! nilyr + endif ! nslyr > 1 + + ! interior ice interfaces + do k = 2, nilyr + do ij = 1, icells + kh(ij,k+nslyr) = c2 * kilyr(ij,k-1) * kilyr(ij,k) / & + ((kilyr(ij,k-1) + kilyr(ij,k))*hilyr(ij)) + enddo ! ij + enddo ! nilyr + + end subroutine effect_conductivity + +!======================================================================= +! +! Compute radiative and turbulent fluxes and their derivatives +! with respect to Tsf. +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW + + subroutine surface_fluxes (nx_block, ny_block, & + isolve, icells, & + indxii, indxjj, indxij, & + Tsf, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + flwoutn, fsensn, & + flatn, fsurfn, & + dflwout_dT, dfsens_dT, & + dflat_dT, dfsurf_dT) + + use ice_therm_shared, only: surface_heat_flux, dsurface_heat_flux_dTsf + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + isolve , & ! number of cells with temps not converged + icells ! number of cells with ice present + + integer (kind=int_kind), dimension(icells), & + intent(in) :: & + indxii, indxjj ! compressed indices for cells not converged + + integer (kind=int_kind), dimension (icells) :: & + indxij ! compressed 1D index for cells not converged + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + Tsf ! ice/snow surface temperature, Tsfcn + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + rhoa , & ! air density (kg/m^3) + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef ! transfer coefficient for latent heat + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + fsensn , & ! surface downward sensible heat (W m-2) + flatn , & ! surface downward latent heat (W m-2) + flwoutn , & ! upward LW at surface (W m-2) + fsurfn ! net flux to top surface, excluding fcondtopn + + real (kind=dbl_kind), dimension (icells), & + intent(inout) :: & + dfsens_dT , & ! deriv of fsens wrt Tsf (W m-2 deg-1) + dflat_dT , & ! deriv of flat wrt Tsf (W m-2 deg-1) + dflwout_dT ! deriv of flwout wrt Tsf (W m-2 deg-1) + + real (kind=dbl_kind), dimension (isolve), & + intent(inout) :: & + dfsurf_dT ! derivative of fsurfn wrt Tsf + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij, m ! horizontal indices, combine i and j loops + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + i = indxii(ij) ! NOTE: not indxi and indxj + j = indxjj(ij) + m = indxij(ij) + + ! surface heat flux + call surface_heat_flux(Tsf (m), fswsfc(i,j), & + rhoa (i,j), flw (i,j), & + potT (i,j), Qa (i,j), & + shcoef (i,j), lhcoef(i,j), & + flwoutn(i,j), fsensn(i,j), & + flatn (i,j), fsurfn(i,j)) + + ! derivative of heat flux with respect to surface temperature + call dsurface_heat_flux_dTsf(Tsf (m), fswsfc (i,j), & + rhoa (i,j), flw (i,j), & + potT (i,j), Qa (i,j), & + shcoef (i,j), lhcoef (i,j), & + dfsurf_dT(ij), dflwout_dT(m), & + dfsens_dT(m), dflat_dT (m)) + enddo ! ij + + end subroutine surface_fluxes + +!======================================================================= +! +! Compute terms in tridiagonal matrix that will be solved to find +! the new vertical temperature profile +! This routine is for the case in which Tsfc is being computed. +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW +! +! March 2004 by William H. Lipscomb for multiple snow layers +! April 2008 by E. C. Hunke, divided into two routines based on calc_Tsfc + + subroutine get_matrix_elements_calc_Tsfc & + (nx_block, ny_block, & + isolve, icells, & + indxii, indxjj, indxij, & + l_snow, l_cold, & + Tsf, Tbot, & + fsurfn, dfsurf_dT, & + Tin_init, Tsn_init, & + kh, Sswabs, & + Iswabs, & + etai, etas, & + sbdiag, diag, & + spdiag, rhs) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + isolve , & ! number of cells with temps not converged + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(icells), & + intent(in) :: & + indxii, indxjj ! compressed indices for cells not converged + + integer (kind=int_kind), dimension (icells), & + intent(in) :: & + indxij ! compressed 1D index for cells not converged + + logical (kind=log_kind), dimension (icells), & + intent(in) :: & + l_snow , & ! true if snow temperatures are computed + l_cold ! true if surface temperature is computed + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + Tsf ! ice/snow top surface temp (deg C) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + fsurfn , & ! net flux to top surface, excluding fcondtopn (W/m^2) + Tbot ! ice bottom surface temperature (deg C) + + real (kind=dbl_kind), dimension (isolve), intent(in) :: & + dfsurf_dT ! derivative of fsurf wrt Tsf + + real (kind=dbl_kind), dimension (isolve,nilyr), & + intent(in) :: & + etai ! dt / (rho*cp*h) for ice layers + + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(in) :: & + Tin_init ! ice temp at beginning of time step + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr), & + intent(in) :: & + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), & + intent(in) :: & + Iswabs ! absorbed SW flux in ice layers + + real (kind=dbl_kind), dimension (icells,nslyr), & + intent(in) :: & + etas , & ! dt / (rho*cp*h) for snow layers + Tsn_init ! snow temp at beginning of time step + ! Note: no absorbed SW in snow layers + + real (kind=dbl_kind), dimension (icells,nslyr+nilyr+1), & + intent(in) :: & + kh ! effective conductivity at layer interfaces + + real (kind=dbl_kind), dimension (isolve,nslyr+nilyr+1), & + intent(inout) :: & + sbdiag , & ! sub-diagonal matrix elements + diag , & ! diagonal matrix elements + spdiag , & ! super-diagonal matrix elements + rhs ! rhs of tri-diagonal matrix eqn. + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij, m , & ! horizontal indices, combine i and j loops + k, ki, kr ! vertical indices and row counters + + !----------------------------------------------------------------- + ! Initialize matrix elements. + ! Note: When we do not need to solve for the surface or snow + ! temperature, we solve dummy equations with solution T = 0. + ! Ice layers are fully initialized below. + !----------------------------------------------------------------- + + do k = 1, nslyr+1 + do ij = 1, isolve + sbdiag(ij,k) = c0 + diag (ij,k) = c1 + spdiag(ij,k) = c0 + rhs (ij,k) = c0 + enddo + enddo + + !----------------------------------------------------------------- + ! Compute matrix elements + ! + ! Four possible cases to solve: + ! (1) Cold surface (Tsf < 0), snow present + ! (2) Melting surface (Tsf = 0), snow present + ! (3) Cold surface (Tsf < 0), no snow + ! (4) Melting surface (Tsf = 0), no snow + !----------------------------------------------------------------- + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + !----------------------------------------------------------------- + ! Tsf equation for case of cold surface (with or without snow) + !----------------------------------------------------------------- + if (l_cold(m)) then + if (l_snow(m)) then + k = 1 + else ! no snow + k = 1 + nslyr + endif + kr = k + + sbdiag(ij,kr) = c0 + diag (ij,kr) = dfsurf_dT(ij) - kh(m,k) + spdiag(ij,kr) = kh(m,k) + rhs (ij,kr) = dfsurf_dT(ij)*Tsf(m) - fsurfn(i,j) + endif ! l_cold + + !----------------------------------------------------------------- + ! top snow layer + !----------------------------------------------------------------- +! k = 1 +! kr = 2 + + if (l_snow(m)) then + if (l_cold(m)) then + sbdiag(ij,2) = -etas(m,1) * kh(m,1) + spdiag(ij,2) = -etas(m,1) * kh(m,2) + diag (ij,2) = c1 & + + etas(m,1) * (kh(m,1) + kh(m,2)) + rhs (ij,2) = Tsn_init(m,1) & + + etas(m,1) * Sswabs(i,j,1) + else ! melting surface + sbdiag(ij,2) = c0 + spdiag(ij,2) = -etas(m,1) * kh(m,2) + diag (ij,2) = c1 & + + etas(m,1) * (kh(m,1) + kh(m,2)) + rhs (ij,2) = Tsn_init(m,1) & + + etas(m,1)*kh(m,1)*Tsf(m) & + + etas(m,1) * Sswabs(i,j,1) + endif ! l_cold + endif ! l_snow + + enddo ! ij + + !----------------------------------------------------------------- + ! remaining snow layers + !----------------------------------------------------------------- + + if (nslyr > 1) then + + do k = 2, nslyr + kr = k + 1 + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if (l_snow(m)) then + sbdiag(ij,kr) = -etas(m,k) * kh(m,k) + spdiag(ij,kr) = -etas(m,k) * kh(m,k+1) + diag (ij,kr) = c1 & + + etas(m,k) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tsn_init(m,k) & + + etas(m,k) * Sswabs(i,j,k) + endif + enddo ! ij + enddo ! nslyr + + endif ! nslyr > 1 + + + if (nilyr > 1) then + + !----------------------------------------------------------------- + ! top ice layer + !----------------------------------------------------------------- + + ki = 1 + k = ki + nslyr + kr = k + 1 + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if (l_snow(m) .or. l_cold(m)) then + sbdiag(ij,kr) = -etai(ij,ki) * kh(m,k) + spdiag(ij,kr) = -etai(ij,ki) * kh(m,k+1) + diag (ij,kr) = c1 & + + etai(ij,ki) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki)*Iswabs(i,j,ki) + else ! no snow, warm surface + sbdiag(ij,kr) = c0 + spdiag(ij,kr) = -etai(ij,ki) * kh(m,k+1) + diag (ij,kr) = c1 & + + etai(ij,ki) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki)*Iswabs(i,j,ki) & + + etai(ij,ki)*kh(m,k)*Tsf(m) + endif + enddo ! ij + + !----------------------------------------------------------------- + ! bottom ice layer + !----------------------------------------------------------------- + + ki = nilyr + k = ki + nslyr + kr = k + 1 + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + sbdiag(ij,kr) = -etai(ij,ki) * kh(m,k) + spdiag(ij,kr) = c0 + diag (ij,kr) = c1 & + + etai(ij,ki) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki)*Iswabs(i,j,ki) & + + etai(ij,ki)*kh(m,k+1)*Tbot(i,j) + enddo ! ij + + else ! nilyr = 1 + + !----------------------------------------------------------------- + ! single ice layer + !----------------------------------------------------------------- + + ki = 1 + k = ki + nslyr + kr = k + 1 + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if (l_snow(m) .or. l_cold(m)) then + sbdiag(ij,kr) = -etai(ij,ki) * kh(m,k) + spdiag(ij,kr) = c0 + diag (ij,kr) = c1 & + + etai(ij,ki) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki) * Iswabs(i,j,ki) & + + etai(ij,ki) * kh(m,k+1)*Tbot(i,j) + else ! no snow, warm surface + sbdiag(ij,kr) = c0 + spdiag(ij,kr) = c0 + diag (ij,kr) = c1 & + + etai(ij,ki) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki) * Iswabs(i,j,ki) & + + etai(ij,ki) * kh(m,k)*Tsf(m) & + + etai(ij,ki) * kh(m,k+1)*Tbot(i,j) + endif + enddo ! ij + + endif ! nilyr > 1 + + !----------------------------------------------------------------- + ! interior ice layers + !----------------------------------------------------------------- + + do ki = 2, nilyr-1 + + k = ki + nslyr + kr = k + 1 + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + sbdiag(ij,kr) = -etai(ij,ki) * kh(m,k) + spdiag(ij,kr) = -etai(ij,ki) * kh(m,k+1) + diag (ij,kr) = c1 & + + etai(ij,ki) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki)*Iswabs(i,j,ki) + enddo ! ij + enddo ! nilyr + + end subroutine get_matrix_elements_calc_Tsfc + +!======================================================================= +! +! Compute terms in tridiagonal matrix that will be solved to find +! the new vertical temperature profile +! This routine is for the case in which Tsfc is already known. +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW +! +! March 2004 by William H. Lipscomb for multiple snow layers +! April 2008 by E. C. Hunke, divided into two routines based on calc_Tsfc + + subroutine get_matrix_elements_know_Tsfc & + (nx_block, ny_block, & + isolve, icells, & + indxii, indxjj, indxij, & + l_snow, Tbot, & + Tin_init, Tsn_init, & + kh, Sswabs, & + Iswabs, & + etai, etas, & + sbdiag, diag, & + spdiag, rhs, & + fcondtopn) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + isolve , & ! number of cells with temps not converged + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(icells), & + intent(in) :: & + indxii, indxjj ! compressed indices for cells not converged + + integer (kind=int_kind), dimension (icells), & + intent(in) :: & + indxij ! compressed 1D index for cells not converged + + logical (kind=log_kind), dimension (icells), & + intent(in) :: & + l_snow ! true if snow temperatures are computed + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tbot ! ice bottom surface temperature (deg C) + + real (kind=dbl_kind), dimension (isolve,nilyr), & + intent(in) :: & + etai ! dt / (rho*cp*h) for ice layers + + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(in) :: & + Tin_init ! ice temp at beginning of time step + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr), & + intent(in) :: & + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), & + intent(in) :: & + Iswabs ! absorbed SW flux in ice layers + + real (kind=dbl_kind), dimension (icells,nslyr), & + intent(in) :: & + etas , & ! dt / (rho*cp*h) for snow layers + Tsn_init ! snow temp at beginning of time step + ! Note: no absorbed SW in snow layers + + real (kind=dbl_kind), dimension (icells,nslyr+nilyr+1), & + intent(in) :: & + kh ! effective conductivity at layer interfaces + + real (kind=dbl_kind), dimension (isolve,nslyr+nilyr+1), & + intent(inout) :: & + sbdiag , & ! sub-diagonal matrix elements + diag , & ! diagonal matrix elements + spdiag , & ! super-diagonal matrix elements + rhs ! rhs of tri-diagonal matrix eqn. + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in), & + optional :: & + fcondtopn ! conductive flux at top sfc, positive down (W/m^2) + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij, m , & ! horizontal indices, combine i and j loops + k, ki, kr ! vertical indices and row counters + + !----------------------------------------------------------------- + ! Initialize matrix elements. + ! Note: When we do not need to solve for the surface or snow + ! temperature, we solve dummy equations with solution T = 0. + ! Ice layers are fully initialized below. + !----------------------------------------------------------------- + + do k = 1, nslyr+1 + do ij = 1, isolve + sbdiag(ij,k) = c0 + diag (ij,k) = c1 + spdiag(ij,k) = c0 + rhs (ij,k) = c0 + enddo + enddo + + !----------------------------------------------------------------- + ! Compute matrix elements + ! + ! Four possible cases to solve: + ! (1) Cold surface (Tsf < 0), snow present + ! (2) Melting surface (Tsf = 0), snow present + ! (3) Cold surface (Tsf < 0), no snow + ! (4) Melting surface (Tsf = 0), no snow + !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! top snow layer + !----------------------------------------------------------------- +! k = 1 +! kr = 2 + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if (l_snow(m)) then + sbdiag(ij,2) = c0 + spdiag(ij,2) = -etas(m,1) * kh(m,2) + diag (ij,2) = c1 & + + etas(m,1) * kh(m,2) + rhs (ij,2) = Tsn_init(m,1) & + + etas(m,1) * Sswabs(i,j,1) & + + etas(m,1) * fcondtopn(i,j) + endif ! l_snow + enddo ! ij + + !----------------------------------------------------------------- + ! remaining snow layers + !----------------------------------------------------------------- + + if (nslyr > 1) then + + do k = 2, nslyr + kr = k + 1 + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if (l_snow(m)) then + sbdiag(ij,kr) = -etas(m,k) * kh(m,k) + spdiag(ij,kr) = -etas(m,k) * kh(m,k+1) + diag (ij,kr) = c1 & + + etas(m,k) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tsn_init(m,k) & + + etas(m,k) * Sswabs(i,j,k) + endif + enddo ! ij + enddo ! nslyr + + endif ! nslyr > 1 + + + if (nilyr > 1) then + + !----------------------------------------------------------------- + ! top ice layer + !----------------------------------------------------------------- + + ki = 1 + k = ki + nslyr + kr = k + 1 + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if (l_snow(m)) then + + sbdiag(ij,kr) = -etai(ij,ki) * kh(m,k) + spdiag(ij,kr) = -etai(ij,ki) * kh(m,k+1) + diag (ij,kr) = c1 & + + etai(ij,ki) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki) * Iswabs(i,j,ki) + else + sbdiag(ij,kr) = c0 + spdiag(ij,kr) = -etai(ij,ki) * kh(m,k+1) + diag (ij,kr) = c1 & + + etai(ij,ki) * kh(m,k+1) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki) * Iswabs(i,j,ki) & + + etai(ij,ki) * fcondtopn(i,j) + endif ! l_snow + enddo ! ij + + !----------------------------------------------------------------- + ! bottom ice layer + !----------------------------------------------------------------- + + ki = nilyr + k = ki + nslyr + kr = k + 1 + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + sbdiag(ij,kr) = -etai(ij,ki) * kh(m,k) + spdiag(ij,kr) = c0 + diag (ij,kr) = c1 & + + etai(ij,ki) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki)*Iswabs(i,j,ki) & + + etai(ij,ki)*kh(m,k+1)*Tbot(i,j) + + enddo ! ij + + else ! nilyr = 1 + + !----------------------------------------------------------------- + ! single ice layer + !----------------------------------------------------------------- + + ki = 1 + k = ki + nslyr + kr = k + 1 + + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if (l_snow(m)) then + sbdiag(ij,kr) = -etai(ij,ki) * kh(m,k) + spdiag(ij,kr) = c0 + diag (ij,kr) = c1 & + + etai(ij,ki) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki) * Iswabs(i,j,ki) & + + etai(ij,ki) * kh(m,k+1)*Tbot(i,j) + else + sbdiag(ij,kr) = c0 + spdiag(ij,kr) = c0 + diag (ij,kr) = c1 & + + etai(ij,ki) * kh(m,k+1) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki) * Iswabs(i,j,ki) & + + etai(ij,ki) * fcondtopn(i,j) & + + etai(ij,ki) * kh(m,k+1)*Tbot(i,j) + endif + enddo ! ij + + endif ! nilyr > 1 + + !----------------------------------------------------------------- + ! interior ice layers + !----------------------------------------------------------------- + + do ki = 2, nilyr-1 + + k = ki + nslyr + kr = k + 1 + do ij = 1, isolve + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + sbdiag(ij,kr) = -etai(ij,ki) * kh(m,k) + spdiag(ij,kr) = -etai(ij,ki) * kh(m,k+1) + diag (ij,kr) = c1 & + + etai(ij,ki) * (kh(m,k) + kh(m,k+1)) + rhs (ij,kr) = Tin_init(m,ki) & + + etai(ij,ki)*Iswabs(i,j,ki) + + enddo ! ij + enddo ! nilyr + + end subroutine get_matrix_elements_know_Tsfc + +!======================================================================= +! +! Tridiagonal matrix solver--used to solve the implicit vertical heat +! equation in ice and snow +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW + + subroutine tridiag_solver (nx_block, ny_block, & + isolve, icells, & + indxii, indxjj, & + nmat, sbdiag, & + diag, spdiag, & + rhs, xout) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + isolve , & ! number of cells with temps not converged + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(icells), & + intent(in) :: & + indxii, indxjj ! compressed indices for cells not converged + + integer (kind=int_kind), intent(in) :: & + nmat ! matrix dimension + + real (kind=dbl_kind), dimension (isolve,nmat), & + intent(in) :: & + sbdiag , & ! sub-diagonal matrix elements + diag , & ! diagonal matrix elements + spdiag , & ! super-diagonal matrix elements + rhs ! rhs of tri-diagonal matrix eqn. + + real (kind=dbl_kind), dimension (isolve,nmat), & + intent(inout) :: & + xout ! solution vector + + ! local variables + + integer (kind=int_kind) :: & + ij , & ! horizontal index, combines i and j loops + k ! row counter + + real (kind=dbl_kind), dimension (isolve) :: & + wbeta ! temporary matrix variable + + real (kind=dbl_kind), dimension(isolve,nilyr+nslyr+1):: & + wgamma ! temporary matrix variable + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + wbeta(ij) = diag(ij,1) + xout(ij,1) = rhs(ij,1) / wbeta(ij) + enddo ! ij + + do k = 2, nmat +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + wgamma(ij,k) = spdiag(ij,k-1) / wbeta(ij) + wbeta(ij) = diag(ij,k) - sbdiag(ij,k)*wgamma(ij,k) + xout(ij,k) = (rhs(ij,k) - sbdiag(ij,k)*xout(ij,k-1)) & + / wbeta(ij) + enddo ! ij + enddo ! k + + do k = nmat-1, 1, -1 +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, isolve + xout(ij,k) = xout(ij,k) - wgamma(ij,k+1)*xout(ij,k+1) + enddo ! ij + enddo ! k + + end subroutine tridiag_solver + +!======================================================================= + + end module ice_therm_bl99 + +!======================================================================= diff --git a/source/ice_therm_itd.F90 b/source/ice_therm_itd.F90 new file mode 100755 index 00000000..1805dca1 --- /dev/null +++ b/source/ice_therm_itd.F90 @@ -0,0 +1,1847 @@ +! SVN:$Id: ice_therm_itd.F90 843 2014-10-02 19:54:30Z eclare $ +!======================================================================= +! +! Thermo calculations after call to coupler, related to ITD: +! ice thickness redistribution, lateral growth and melting. +! +! NOTE: The thermodynamic calculation is split in two for load balancing. +! First ice_therm_vertical computes vertical growth rates and coupler +! fluxes. Then ice_therm_itd does thermodynamic calculations not +! needed for coupling. +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW +! Elizabeth C. Hunke, LANL +! +! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb +! 2004: Block structure added by William Lipscomb. +! 2006: Streamlined for efficiency by Elizabeth Hunke +! + module ice_therm_itd + + use ice_kinds_mod + use ice_constants + use ice_communicate, only: my_task, master_task + use ice_domain_size, only: nilyr, nslyr, ncat, max_aero, & + n_aero, max_ntrcr + use ice_fileunits, only: nu_diag + + implicit none + save + + private + public :: update_vertical_tracers, lateral_melt, linear_itd, & + add_new_ice + + logical (kind=log_kind), parameter :: & + l_conservation_check = .false. ! if true, check conservation + ! (useful for debugging) + +!======================================================================= + + contains + +!======================================================================= +! +! ITD scheme that shifts ice among categories +! +! See Lipscomb, W. H. Remapping the thickness distribution in sea +! ice models. 2001, J. Geophys. Res., Vol 106, 13989--14000. +! +! Using the thermodynamic "velocities", interpolate to find the +! velocities in thickness space at the category boundaries, and +! compute the new locations of the boundaries. Then for each +! category, compute the thickness distribution function, g(h), +! between hL and hR, the left and right boundaries of the category. +! Assume g(h) is a linear polynomial that satisfies two conditions: +! +! (1) The ice area implied by g(h) equals aicen(n). +! (2) The ice volume implied by g(h) equals aicen(n)*hicen(n). +! +! Given g(h), at each boundary compute the ice area and volume lying +! between the original and new boundary locations. Transfer area +! and volume across each boundary in the appropriate direction, thus +! restoring the original boundaries. +! +! authors: William H. Lipscomb, LANL +! Elizabeth C. Hunke, LANL + + subroutine linear_itd (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, trcr_depend, & + aicen_init, vicen_init, & + aicen, trcrn, & + vicen, vsnon, & + aice, aice0, & + fpond, l_stop, & + istop, jstop) + + use ice_calendar, only: istep1 + use ice_itd, only: hin_max, hi_min, aggregate_area, shift_ice, & + column_sum, column_conservation_check + use ice_state, only: nt_qice, nt_qsno, nt_fbri, nt_sice, & + tr_pond_topo, nt_apnd, nt_hpnd, tr_brine + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of grid cells with ice + ntrcr ! number of tracers in use + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed i/j indices + + integer (kind=int_kind), dimension (ntrcr), intent(in) :: & + trcr_depend ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(in) :: & + aicen_init, & ! initial ice concentration (before vertical thermo) + vicen_init ! initial ice volume (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + aicen , & ! ice concentration + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + aice , & ! concentration of ice + aice0 ! concentration of open water + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + fpond ! fresh water flux to ponds (kg/m^2/s) + + logical (kind=log_kind), intent(out) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(out) :: & + istop, jstop ! indices of grid cell where model aborts + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + n, nd , & ! category indices + k ! ice layer index + + real (kind=dbl_kind) :: & + slope , & ! rate of change of dhice with hice + dh0 , & ! change in ice thickness at h = 0 + da0 , & ! area melting from category 1 + damax , & ! max allowed reduction in category 1 area + etamin, etamax,& ! left and right limits of integration + x1 , & ! etamax - etamin + x2 , & ! (etamax^2 - etamin^2) / 2 + x3 , & ! (etamax^3 - etamin^3) / 3 + wk1, wk2 ! temporary variables + + real (kind=dbl_kind), dimension(icells,0:ncat) :: & + hbnew ! new boundary locations + + real (kind=dbl_kind), dimension(icells) :: & + work ! temporary work array (for hbnew) + + integer (kind=int_kind), dimension(icells) :: & + indxii, indxjj,& ! compressed i/j indices + indxij ! compressed 1D i/j indices + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + g0 , & ! constant coefficient in g(h) + g1 , & ! linear coefficient in g(h) + hL , & ! left end of range over which g(h) > 0 + hR ! right end of range over which g(h) > 0 + + real (kind=dbl_kind), dimension(icells,ncat) :: & + hicen , & ! ice thickness for each cat (m) + hicen_init , & ! initial ice thickness for each cat (pre-thermo) + dhicen , & ! thickness change for remapping (m) + daice , & ! ice area transferred across boundary + dvice ! ice volume transferred across boundary + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat) :: & + eicen, & ! energy of melting for each ice layer (J/m^2) + esnon, & ! energy of melting for each snow layer (J/m^2) + vbrin, & ! ice volume defined by brine height (m) + sicen ! Bulk salt in h ice (ppt*m) + + real (kind=dbl_kind), dimension(icells) :: & + vice_init, vice_final, & ! ice volume summed over categories + vsno_init, vsno_final, & ! snow volume summed over categories + eice_init, eice_final, & ! ice energy summed over categories + esno_init, esno_final, & ! snow energy summed over categories + sice_init, sice_final, & ! ice bulk salinity summed over categories + vbri_init, vbri_final ! briny ice volume summed over categories + + ! NOTE: Third index of donor, daice, dvice should be ncat-1, + ! except that compilers would have trouble when ncat = 1 + integer (kind=int_kind), dimension(icells,ncat) :: & + donor ! donor category index + + logical (kind=log_kind), dimension(icells) :: & + remap_flag ! remap ITD if remap_flag(ij) is true + + character (len=char_len) :: & + fieldid ! field identifier + + logical (kind=log_kind), parameter :: & + print_diags = .false. ! if true, prints when remap_flag=F + + integer (kind=int_kind) :: & + iflag , & ! number of grid cells with remap_flag = .true. + ij, m ! combined horizontal indices + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + l_stop = .false. + istop = 0 + jstop = 0 + + hin_max(ncat) = 999.9_dbl_kind ! arbitrary big number + + !----------------------------------------------------------------- + ! Compute volume and energy sums that linear remapping should + ! conserve. + !----------------------------------------------------------------- + + if (l_conservation_check) then + + eicen(:,:,:) = c0 + esnon(:,:,:) = c0 + vbrin(:,:,:) = c0 + sicen(:,:,:) = c0 + + do n = 1, ncat + do k = 1, nilyr + do j = 1, ny_block + do i = 1, nx_block + eicen(i,j,n) = eicen(i,j,n) + trcrn(i,j,nt_qice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + do k = 1, nslyr + do j = 1, ny_block + do i = 1, nx_block + esnon(i,j,n) = esnon(i,j,n) + trcrn(i,j,nt_qsno+k-1,n) & + * vsnon(i,j,n)/real(nslyr,kind=dbl_kind) + enddo + enddo + enddo + + if (tr_brine) then + do j = 1, ny_block + do i = 1, nx_block + vbrin(i,j,n) = vbrin(i,j,n) + trcrn(i,j,nt_fbri,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + endif + + do k = 1, nilyr + do j = 1, ny_block + do i = 1, nx_block + sicen(i,j,n) = sicen(i,j,n) + trcrn(i,j,nt_sice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + + enddo ! ncat + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vicen, vice_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vsnon, vsno_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + eicen, eice_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + esnon, esno_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vbrin, vbri_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + sicen, sice_init) + endif + + !----------------------------------------------------------------- + ! Initialize remapping flag. + ! Remapping is done wherever remap_flag = .true. + ! In rare cases the category boundaries may shift too far for the + ! remapping algorithm to work, and remap_flag is set to .false. + ! In these cases the simpler 'rebin' subroutine will shift ice + ! between categories if needed. + !----------------------------------------------------------------- + + do ij = 1, icells + remap_flag(ij) = .true. + enddo + + !----------------------------------------------------------------- + ! Compute thickness change in each category. + !----------------------------------------------------------------- + + do n = 1, ncat + do ij = 1, icells ! aice(i,j) > puny + i = indxi(ij) + j = indxj(ij) + + if (aicen_init(i,j,n) > puny) then + hicen_init(ij,n) = vicen_init(i,j,n) / aicen_init(i,j,n) + else + hicen_init(ij,n) = c0 + endif ! aicen_init > puny + + if (aicen(i,j,n) > puny) then + hicen (ij,n) = vicen(i,j,n) / aicen(i,j,n) + dhicen(ij,n) = hicen(ij,n) - hicen_init(ij,n) + else + hicen (ij,n) = c0 + dhicen(ij,n) = c0 + endif ! aicen > puny + + enddo ! ij + enddo ! n + + !----------------------------------------------------------------- + ! Compute new category boundaries, hbnew, based on changes in + ! ice thickness from vertical thermodynamics. + !----------------------------------------------------------------- + + do ij = 1, icells ! aice(i,j) > puny + hbnew(ij,0) = hin_max(0) + enddo + + do n = 1, ncat-1 + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells ! aice(i,j) > puny + i = indxi(ij) + j = indxj(ij) + + if (hicen_init(ij,n) > puny .and. & + hicen_init(ij,n+1) > puny) then + ! interpolate between adjacent category growth rates + slope = (dhicen(ij,n+1) - dhicen(ij,n)) / & + (hicen_init(ij,n+1) - hicen_init(ij,n)) + hbnew(ij,n) = hin_max(n) + dhicen(ij,n) & + + slope * (hin_max(n) - hicen_init(ij,n)) + elseif (hicen_init(ij,n) > puny) then ! hicen_init(n+1)=0 + hbnew(ij,n) = hin_max(n) + dhicen(ij,n) + elseif (hicen_init(ij,n+1) > puny) then ! hicen_init(n)=0 + hbnew(ij,n) = hin_max(n) + dhicen(ij,n+1) + else + hbnew(ij,n) = hin_max(n) + endif + + !----------------------------------------------------------------- + ! Check that each boundary lies between adjacent values of hicen. + ! If not, set remap_flag = .false. + ! Write diagnosis outputs if remap_flag was changed to false + !----------------------------------------------------------------- + + if (aicen(i,j,n) > puny .and. & + hicen(ij,n) >= hbnew(ij,n)) then + remap_flag(ij) = .false. + + if (print_diags) then + write(nu_diag,*) my_task,':',i,j, & + 'ITD: hicen(n) > hbnew(n)' + write(nu_diag,*) 'cat ',n + write(nu_diag,*) istep1, my_task,':',i,j, & + 'hicen(n) =', hicen(ij,n) + write(nu_diag,*) istep1, my_task,':',i,j, & + 'hbnew(n) =', hbnew(ij,n) + endif + + elseif (aicen(i,j,n+1) > puny .and. & + hicen(ij,n+1) <= hbnew(ij,n)) then + remap_flag(ij) = .false. + + if (print_diags) then + write(nu_diag,*) my_task,':',i,j, & + 'ITD: hicen(n+1) < hbnew(n)' + write(nu_diag,*) 'cat ',n + write(nu_diag,*) istep1, my_task,':',i,j, & + 'hicen(n+1) =', hicen(ij,n+1) + write(nu_diag,*) istep1, my_task,':',i,j, & + 'hbnew(n) =', hbnew(ij,n) + endif + endif + + !----------------------------------------------------------------- + ! Check that hbnew(n) lies between hin_max(n-1) and hin_max(n+1). + ! If not, set remap_flag = .false. + ! (In principle we could allow this, but it would make the code + ! more complicated.) + ! Write diagnosis outputs if remap_flag was changed to false + !----------------------------------------------------------------- + + if (hbnew(ij,n) > hin_max(n+1)) then + remap_flag(ij) = .false. + + if (print_diags) then + write(nu_diag,*) my_task,':',i,j, & + 'ITD hbnew(n) > hin_max(n+1)' + write(nu_diag,*) 'cat ',n + write(nu_diag,*) istep1, my_task,':',i,j, & + 'hbnew(n) =', hbnew(ij,n) + write(nu_diag,*) istep1, my_task,':',i,j, & + 'hin_max(n+1) =', hin_max(n+1) + endif + endif + + if (hbnew(ij,n) < hin_max(n-1)) then + remap_flag(ij) = .false. + + if (print_diags) then + write(nu_diag,*) my_task,':',i,j, & + 'ITD: hbnew(n) < hin_max(n-1)' + write(nu_diag,*) 'cat ',n + write(nu_diag,*) istep1, my_task,':',i,j, & + 'hbnew(n) =', hbnew(ij,n) + write(nu_diag,*) istep1, my_task,':',i,j, & + 'hin_max(n-1) =', hin_max(n-1) + endif + endif + + enddo ! ij + + enddo ! boundaries, 1 to ncat-1 + + !----------------------------------------------------------------- + ! Compute hbnew(ncat) + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells ! aice(i,j) > puny + i = indxi(ij) + j = indxj(ij) + if (aicen(i,j,ncat) > puny) then + hbnew(ij,ncat) = c3*hicen(ij,ncat) - c2*hbnew(ij,ncat-1) + else + hbnew(ij,ncat) = hin_max(ncat) + endif + hbnew(ij,ncat) = max(hbnew(ij,ncat),hin_max(ncat-1)) + enddo + + !----------------------------------------------------------------- + ! Identify cells where the ITD is to be remapped + !----------------------------------------------------------------- + + iflag = 0 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (remap_flag(ij)) then + iflag = iflag + 1 + indxii(iflag) = i + indxjj(iflag) = j + indxij(iflag) = ij + endif + enddo + + allocate(g0(iflag,ncat)) + allocate(g1(iflag,ncat)) + allocate(hL(iflag,ncat)) + allocate(hR(iflag,ncat)) + + !----------------------------------------------------------------- + ! Compute g(h) for category 1 at start of time step + ! (hicen = hicen_init) + !----------------------------------------------------------------- + + do ij = 1, icells ! aice(i,j) > puny + work(ij) = hin_max(1) + enddo + + call fit_line(nx_block, ny_block, & + iflag, icells, & + indxii, indxjj, indxij, & + aicen(:,:,1), hicen_init(:,1), & + hbnew(:,0), work (:), & + g0 (:,1), g1 (:,1), & + hL (:,1), hR (:,1)) + + !----------------------------------------------------------------- + ! Find area lost due to melting of thin (category 1) ice + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iflag ! remap_flag = .true. + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if (aicen(i,j,1) > puny) then + + dh0 = dhicen(m,1) + + if (dh0 < c0) then ! remove area from category 1 + dh0 = min(-dh0,hin_max(1)) ! dh0 --> |dh0| + + !----------------------------------------------------------------- + ! Integrate g(1) from 0 to dh0 to estimate area melted + !----------------------------------------------------------------- + + ! right integration limit (left limit = 0) + etamax = min(dh0,hR(ij,1)) - hL(ij,1) + + if (etamax > c0) then + x1 = etamax + x2 = p5 * etamax*etamax + da0 = g1(ij,1)*x2 + g0(ij,1)*x1 ! ice area removed + + ! constrain new thickness <= hicen_init + damax = aicen(i,j,1) & + * (c1-hicen(m,1)/hicen_init(m,1)) ! damax > 0 + da0 = min (da0, damax) + + ! remove area, conserving volume + hicen(m,1) = hicen(m,1) & + * aicen(i,j,1) / (aicen(i,j,1)-da0) + aicen(i,j,1) = aicen(i,j,1) - da0 + + if (tr_pond_topo) then + fpond(i,j) = fpond(i,j) - (da0 & + * trcrn(i,j,nt_apnd,1) & + * trcrn(i,j,nt_hpnd,1)) + endif + + endif ! etamax > 0 + + else ! dh0 >= 0 + hbnew(m,0) = min(dh0,hin_max(1)) ! shift hbnew(0) to right + endif + + endif ! aicen(i,j,1) > puny + enddo ! ij + + !----------------------------------------------------------------- + ! Compute g(h) for each ice thickness category. + !----------------------------------------------------------------- + + do n = 1, ncat + call fit_line(nx_block, ny_block, & + iflag, icells, & + indxii, indxjj, indxij, & + aicen(:,:,n), hicen(:,n), & + hbnew(:,n-1), hbnew(:,n), & + g0 (:,n), g1 (:,n), & + hL (:,n), hR (:,n)) + + enddo + + !----------------------------------------------------------------- + ! Compute area and volume to be shifted across each boundary. + !----------------------------------------------------------------- + + do n = 1, ncat + do ij = 1, icells + donor(ij,n) = 0 + daice(ij,n) = c0 + dvice(ij,n) = c0 + enddo + enddo + + do n = 1, ncat-1 + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iflag ! remap_flag = .true. + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if (hbnew(m,n) > hin_max(n)) then ! transfer from n to n+1 + + ! left and right integration limits in eta space + etamin = max(hin_max(n), hL(ij,n)) - hL(ij,n) + etamax = min(hbnew(m,n), hR(ij,n)) - hL(ij,n) + donor(m,n) = n + + else ! hbnew(n) <= hin_max(n); transfer from n+1 to n + + ! left and right integration limits in eta space + etamin = c0 + etamax = min(hin_max(n), hR(ij,n+1)) - hL(ij,n+1) + donor(m,n) = n+1 + + endif ! hbnew(n) > hin_max(n) + + if (etamax > etamin) then + x1 = etamax - etamin + wk1 = etamin*etamin + wk2 = etamax*etamax + x2 = p5 * (wk2 - wk1) + wk1 = wk1*etamin + wk2 = wk2*etamax + x3 = p333 * (wk2 - wk1) + nd = donor(m,n) + daice(m,n) = g1(ij,nd)*x2 + g0(ij,nd)*x1 + dvice(m,n) = g1(ij,nd)*x3 + g0(ij,nd)*x2 & + + daice(m,n)*hL(ij,nd) + endif ! etamax > etamin + + ! If daice or dvice is very small, shift no ice. + + nd = donor(m,n) + + if (daice(m,n) < aicen(i,j,nd)*puny) then + daice(m,n) = c0 + dvice(m,n) = c0 + donor(m,n) = 0 + endif + + if (dvice(m,n) < vicen(i,j,nd)*puny) then + daice(m,n) = c0 + dvice(m,n) = c0 + donor(m,n) = 0 + endif + + ! If daice is close to aicen or dvice is close to vicen, + ! shift entire category + + if (daice(m,n) > aicen(i,j,nd)*(c1-puny)) then + daice(m,n) = aicen(i,j,nd) + dvice(m,n) = vicen(i,j,nd) + endif + + if (dvice(m,n) > vicen(i,j,nd)*(c1-puny)) then + daice(m,n) = aicen(i,j,nd) + dvice(m,n) = vicen(i,j,nd) + endif + + enddo ! ij + enddo ! boundaries, 1 to ncat-1 + + deallocate(g0) + deallocate(g1) + deallocate(hL) + deallocate(hR) + + !----------------------------------------------------------------- + ! Shift ice between categories as necessary + !----------------------------------------------------------------- + + ! maintain qsno negative definiteness + do n = 1, ncat + do k = nt_qsno, nt_qsno+nslyr-1 + do ij = 1, iflag + i = indxii(ij) + j = indxjj(ij) + trcrn(i,j,k,n) = trcrn(i,j,k,n) + rhos*Lfresh + enddo + enddo + enddo + + call shift_ice (nx_block, ny_block, & + indxi, indxj, & + icells, & + ntrcr, trcr_depend, & + aicen, trcrn, & + vicen, vsnon, & + hicen, donor, & + daice, dvice, & + l_stop, & + istop, jstop) + + + ! maintain qsno negative definiteness + do n = 1, ncat + do k = nt_qsno, nt_qsno+nslyr-1 + do ij = 1, iflag + i = indxii(ij) + j = indxjj(ij) + trcrn(i,j,k,n) = trcrn(i,j,k,n) - rhos*Lfresh + enddo + enddo + enddo + + if (l_stop) return + + !----------------------------------------------------------------- + ! Make sure hice(i,j,1) >= minimum ice thickness hi_min. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, iflag ! remap_flag = .true. + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + if (hi_min > c0 .and. & + aicen(i,j,1) > puny .and. hicen(m,1) < hi_min) then + + da0 = aicen(i,j,1) * (c1 - hicen(m,1)/hi_min) + aicen(i,j,1) = aicen(i,j,1) - da0 + hicen(m,1) = hi_min + + if (tr_pond_topo) then + fpond(i,j) = fpond(i,j) - (da0 & + * trcrn(i,j,nt_apnd,1) & + * trcrn(i,j,nt_hpnd,1)) + endif + endif + enddo ! ij + + !----------------------------------------------------------------- + ! Update fractional ice area in each grid cell. + !----------------------------------------------------------------- + call aggregate_area (nx_block, ny_block, & + aicen, & + aice, aice0) + + if (l_stop) return + + !----------------------------------------------------------------- + ! Check volume and energy conservation. + !----------------------------------------------------------------- + + if (l_conservation_check) then + + eicen(:,:,:) = c0 + esnon(:,:,:) = c0 + vbrin(:,:,:) = c0 + sicen(:,:,:) = c0 + + do n = 1, ncat + do k = 1, nilyr + do j = 1, ny_block + do i = 1, nx_block + eicen(i,j,n) = eicen(i,j,n) + trcrn(i,j,nt_qice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + do k = 1, nslyr + do j = 1, ny_block + do i = 1, nx_block + esnon(i,j,n) = esnon(i,j,n) + trcrn(i,j,nt_qsno+k-1,n) & + * vsnon(i,j,n)/real(nslyr,kind=dbl_kind) + enddo + enddo + enddo + + if (tr_brine) then + do j = 1, ny_block + do i = 1, nx_block + vbrin(i,j,n) = vbrin(i,j,n) + trcrn(i,j,nt_fbri,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + endif + + do k = 1, nilyr + do j = 1, ny_block + do i = 1, nx_block + sicen(i,j,n) = sicen(i,j,n) + trcrn(i,j,nt_sice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + + enddo + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vicen, vice_final) + fieldid = 'vice, ITD remap' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + vice_init, vice_final, & + puny, l_stop, & + istop, jstop) + if (l_stop) return + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vsnon, vsno_final) + fieldid = 'vsno, ITD remap' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + vsno_init, vsno_final, & + puny, l_stop, & + istop, jstop) + if (l_stop) return + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + eicen, eice_final) + fieldid = 'eice, ITD remap' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + eice_init, eice_final, & + puny*Lfresh*rhoi, & + l_stop, & + istop, jstop) + if (l_stop) return + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + esnon, esno_final) + fieldid = 'esno, ITD remap' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + esno_init, esno_final, & + puny*Lfresh*rhos, & + l_stop, & + istop, jstop) + if (l_stop) return + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + sicen, sice_final) + fieldid = 'sicen, ITD remap' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + sice_init, sice_final, & + puny, l_stop, & + istop, jstop) + if (l_stop) return + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vbrin, vbri_final) + fieldid = 'vbrin, ITD remap' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + vbri_init, vbri_final, & + puny*c10, l_stop, & + istop, jstop) + if (l_stop) return + endif ! conservation check + + end subroutine linear_itd + +!======================================================================= +! +! Fit g(h) with a line, satisfying area and volume constraints. +! To reduce roundoff errors caused by large values of g0 and g1, +! we actually compute g(eta), where eta = h - hL, and hL is the +! left boundary. +! +! authors: William H. Lipscomb, LANL +! Elizabeth C. Hunke, LANL + + subroutine fit_line (nx_block, ny_block, & + iflag, icells, & + indxii, indxjj, indxij, & + aicen, hice, & + hbL, hbR, & + g0, g1, & + hL, hR) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of grid cells with ice + iflag ! number of grid cells with remap_flag = .true. + + integer (kind=int_kind), dimension (icells), & + intent(in) :: & + indxii, indxjj, & ! compressed i/j indices (from iflag) + indxij ! compressed i/j indices (from icells) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + aicen ! concentration of ice + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + hbL, hbR , & ! left and right category boundaries + hice ! ice thickness + + real (kind=dbl_kind), dimension (iflag), intent(out):: & + g0, g1 , & ! coefficients in linear equation for g(eta) + hL , & ! min value of range over which g(h) > 0 + hR ! max value of range over which g(h) > 0 + + ! local varibles + + integer (kind=int_kind) :: & + i,j , & ! horizontal indices + ij, m ! combined horizontal indices + + real (kind=dbl_kind) :: & + h13 , & ! hbL + 1/3 * (hbR - hbL) + h23 , & ! hbL + 2/3 * (hbR - hbL) + dhr , & ! 1 / (hR - hL) + wk1, wk2 ! temporary variables + + !----------------------------------------------------------------- + ! Compute g0, g1, hL, and hR for each category to be remapped. + !----------------------------------------------------------------- + + do ij = 1, iflag + i = indxii(ij) + j = indxjj(ij) + m = indxij(ij) + + if (aicen(i,j) > puny .and. hbR(m) - hbL(m) > puny) then + + ! Initialize hL and hR + + hL(ij) = hbL(m) + hR(ij) = hbR(m) + + ! Change hL or hR if hicen(n) falls outside central third of range + + h13 = p333 * (c2*hL(ij) + hR(ij)) + h23 = p333 * (hL(ij) + c2*hR(ij)) + if (hice(m) < h13) then + hR(ij) = c3*hice(m) - c2*hL(ij) + elseif (hice(m) > h23) then + hL(ij) = c3*hice(m) - c2*hR(ij) + endif + + ! Compute coefficients of g(eta) = g0 + g1*eta + + dhr = c1 / (hR(ij) - hL(ij)) + wk1 = c6 * aicen(i,j) * dhr + wk2 = (hice(m) - hL(ij)) * dhr + g0(ij) = wk1 * (p666 - wk2) + g1(ij) = c2*dhr * wk1 * (wk2 - p5) + + else + + g0(ij) = c0 + g1(ij) = c0 + hL(ij) = c0 + hR(ij) = c0 + + endif ! aicen > puny + + enddo ! ij + + end subroutine fit_line + +!======================================================================= +! +! Given some added new ice to the base of the existing ice, recalculate +! vertical tracer so that new grid cells are all the same size. +! +! author: A. K. Turner, LANL +! + subroutine update_vertical_tracers(trc, h1, h2, trc0) + + real (kind=dbl_kind), dimension(1:nilyr), intent(inout) :: & + trc ! vertical tracer + + real (kind=dbl_kind), intent(in) :: & + h1, & ! old thickness + h2, & ! new thickness + trc0 ! tracer value of added ice on ice bottom + + ! local variables + + real(kind=dbl_kind), dimension(1:nilyr) :: trc2 ! updated tracer temporary + + ! vertical indexes for old and new grid + integer :: k1, k2 + + real (kind=dbl_kind) :: & + z1a, z1b, & ! upper, lower boundary of old cell/added new ice at bottom + z2a, z2b, & ! upper, lower boundary of new cell + overlap , & ! overlap between old and new cell + rnilyr + + rnilyr = real(nilyr,dbl_kind) + + ! loop over new grid cells + do k2 = 1, nilyr + + ! initialize new tracer + trc2(k2) = c0 + + ! calculate upper and lower boundary of new cell + z2a = ((k2 - 1) * h2) / rnilyr + z2b = (k2 * h2) / rnilyr + + ! loop over old grid cells + do k1 = 1, nilyr + + ! calculate upper and lower boundary of old cell + z1a = ((k1 - 1) * h1) / rnilyr + z1b = (k1 * h1) / rnilyr + + ! calculate overlap between old and new cell + overlap = max(min(z1b, z2b) - max(z1a, z2a), c0) + + ! aggregate old grid cell contribution to new cell + trc2(k2) = trc2(k2) + overlap * trc(k1) + + enddo ! k1 + + ! calculate upper and lower boundary of added new ice at bottom + z1a = h1 + z1b = h2 + + ! calculate overlap between added ice and new cell + overlap = max(min(z1b, z2b) - max(z1a, z2a), c0) + ! aggregate added ice contribution to new cell + trc2(k2) = trc2(k2) + overlap * trc0 + ! renormalize new grid cell + trc2(k2) = (rnilyr * trc2(k2)) / h2 + + enddo ! k2 + + ! update vertical tracer array with the adjusted tracer + trc = trc2 + + end subroutine update_vertical_tracers + +!======================================================================= +! +! Given the fraction of ice melting laterally in each grid cell +! (computed in subroutine frzmlt_bottom_lateral), melt ice. +! +! author: C. M. Bitz, UW +! 2003: Modified by William H. Lipscomb and Elizabeth C. Hunke, LANL +! + subroutine lateral_melt (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, fpond, & + fresh, fsalt, & + fhocn, faero_ocn, & + rside, meltl, & + aicen, vicen, & + vsnon, trcrn) + + use ice_state, only: nt_qice, nt_qsno, & + nt_aero, tr_aero, tr_pond_topo, nt_apnd, nt_hpnd + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), intent(in) :: & + dt ! time step (s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_ntrcr,ncat), & + intent(in) :: & + trcrn ! tracer array + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + rside ! fraction of ice that melts laterally + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + fpond , & ! fresh water flux to ponds (kg/m^2/s) + fresh , & ! fresh water flux to ocean (kg/m^2/s) + fsalt , & ! salt flux to ocean (kg/m^2/s) + fhocn , & ! net heat flux to ocean (W/m^2) + meltl ! lateral ice melt (m/step-->cm/day) + + real (kind=dbl_kind), dimension(nx_block,ny_block,max_aero), & + intent(inout) :: & + faero_ocn ! aerosol flux to ocean (kg/m^2/s) + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + n , & ! thickness category index + k , & ! layer index + ij , & ! horizontal index, combines i and j loops + icells ! number of cells with aice > puny + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! compressed indices for cells with aice > puny + + real (kind=dbl_kind) :: & + dfhocn , & ! change in fhocn + dfpond , & ! change in fpond + dfresh , & ! change in fresh + dfsalt ! change in fsalt + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + vicen_init ! volume per unit area of ice (m) + + do n = 1, ncat + + !----------------------------------------------------------------- + ! Identify grid cells with lateral melting. + !----------------------------------------------------------------- + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (rside(i,j) > c0) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + !----------------------------------------------------------------- + ! Melt the ice and increment fluxes. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! fluxes to coupler + ! dfresh > 0, dfsalt > 0, dfpond > 0 + + dfresh = (rhos*vsnon(i,j,n) + rhoi*vicen(i,j,n)) & + * rside(i,j) / dt + dfsalt = rhoi*vicen(i,j,n)*ice_ref_salinity*p001 & + * rside(i,j) / dt + fresh(i,j) = fresh(i,j) + dfresh + fsalt(i,j) = fsalt(i,j) + dfsalt + + if (tr_pond_topo) then + dfpond = aicen(i,j,n) & + * trcrn(i,j,nt_apnd,n) & + * trcrn(i,j,nt_hpnd,n) & + * rside(i,j) + fpond(i,j) = fpond(i,j) - dfpond + endif + + ! history diagnostics + meltl(i,j) = meltl(i,j) + vicen(i,j,n)*rside(i,j) + + ! state variables + vicen_init(i,j) = vicen(i,j,n) + aicen(i,j,n) = aicen(i,j,n) * (c1 - rside(i,j)) + vicen(i,j,n) = vicen(i,j,n) * (c1 - rside(i,j)) + vsnon(i,j,n) = vsnon(i,j,n) * (c1 - rside(i,j)) + enddo ! ij + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! enthalpy tracers do not change (e/v constant) + ! heat flux to coupler for ice melt (dfhocn < 0) + dfhocn = trcrn(i,j,nt_qice+k-1,n)*rside(i,j) / dt & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + fhocn(i,j) = fhocn(i,j) + dfhocn + enddo ! ij + enddo ! nilyr + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! heat flux to coupler for snow melt (dfhocn < 0) + + dfhocn = trcrn(i,j,nt_qsno+k-1,n)*rside(i,j) / dt & + * vsnon(i,j,n)/real(nslyr,kind=dbl_kind) + fhocn(i,j) = fhocn(i,j) + dfhocn + enddo ! ij + enddo ! nslyr + + if (tr_aero) then + do k = 1, n_aero +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + faero_ocn(i,j,k) = faero_ocn(i,j,k) + (vsnon(i,j,n) & + *(trcrn(i,j,nt_aero +4*(k-1),n) & + + trcrn(i,j,nt_aero+1+4*(k-1),n)) & + + vicen(i,j,n) & + *(trcrn(i,j,nt_aero+2+4*(k-1),n) & + + trcrn(i,j,nt_aero+3+4*(k-1),n))) & + * rside(i,j) / dt + enddo + enddo + endif + + enddo ! n + + end subroutine lateral_melt + +!======================================================================= +! +! Given the volume of new ice grown in open water, compute its area +! and thickness and add it to the appropriate category or categories. +! +! NOTE: Usually all the new ice is added to category 1. An exception is +! made if there is no open water or if the new ice is too thick +! for category 1, in which case ice is distributed evenly over the +! entire cell. Subroutine rebin should be called in case the ice +! thickness lies outside category bounds after new ice formation. +! +! When ice must be added to categories above category 1, the mushy +! formulation (ktherm=2) adds it only to the bottom of the ice. When +! added to only category 1, all formulations combine the new ice and +! existing ice tracers as bulk quantities. +! +! authors William H. Lipscomb, LANL +! Elizabeth C. Hunke, LANL +! Adrian Turner, LANL +! + subroutine add_new_ice (nx_block, ny_block, & + ntrcr, icells, & + indxi, indxj, & + dt, & + aicen, trcrn, & + vicen, & + aice0, aice, & + frzmlt, frazil, & + frz_onset, yday, & + update_ocn_f, & + fresh, fsalt, & + Tf, sss, & + salinz, phi_init, & + dSin0_frazil, & + nbtrcr, flux_bio, & + ocean_bio, & + l_stop, & + istop, jstop) + + use ice_itd, only: hin_max, column_sum, & + column_conservation_check + use ice_state, only: nt_Tsfc, nt_iage, nt_FY, nt_alvl, nt_vlvl, nt_aero, & + nt_sice, nt_qice, & + nt_apnd, tr_pond_cesm, tr_pond_lvl, tr_pond_topo, & + tr_iage, tr_FY, tr_lvl, tr_aero, tr_brine + use ice_therm_mushy, only: liquidus_temperature_mush, enthalpy_mush + use ice_therm_shared, only: ktherm, hfrazilmin + use ice_zbgc, only: add_new_ice_bgc + use ice_zbgc_shared, only: skl_bgc + +#ifdef AusCOM + use cpl_parameters, only: pop_icediag +#endif + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + icells ! number of ice/ocean grid cells + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed i/j indices + + real (kind=dbl_kind), intent(in) :: & + dt ! time step (s) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + aice , & ! total concentration of ice + frzmlt, & ! freezing/melting potential (W/m^2) + Tf , & ! freezing temperature (C) + sss ! sea surface salinity (ppt) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen ! volume per unit area of ice (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + ! 1: surface temperature + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + aice0 , & ! concentration of open water + frazil , & ! frazil ice growth (m/step-->cm/day) + fresh , & ! fresh water flux to ocean (kg/m^2/s) + fsalt ! salt flux to ocean (kg/m^2/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout), optional :: & + frz_onset ! day of year that freezing begins (congel or frazil) + + real (kind=dbl_kind), intent(in), optional :: & + yday ! day of year + + real (kind=dbl_kind), dimension(nx_block,ny_block,nilyr+1), intent(in) :: & + salinz ! initial salinity profile + + real (kind=dbl_kind), intent(in) :: & + phi_init , & ! initial frazil liquid fraction + dSin0_frazil ! initial frazil bulk salinity reduction from sss + + logical (kind=log_kind), intent(in) :: & + update_ocn_f ! if true, update fresh water and salt fluxes + + logical (kind=log_kind), intent(out) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(out) :: & + istop, jstop ! indices of grid cell where model aborts + + ! BGC + integer (kind=int_kind), intent(in) :: & + nbtrcr ! number of biology tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block,nbtrcr), & + intent(inout) :: & + flux_bio ! tracer flux to ocean from biology (mmol/m^2/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nbtrcr), & + intent(in) :: & + ocean_bio ! ocean concentration of biological tracer + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + n , & ! ice category index + k , & ! ice layer index + it ! aerosol tracer index + + real (kind=dbl_kind), dimension (icells) :: & + ai0new , & ! area of new ice added to cat 1 + vi0new , & ! volume of new ice added to cat 1 + hsurp ! thickness of new ice added to each cat + + real (kind=dbl_kind), dimension (icells) :: & + vice1 , & ! starting volume of existing ice + vice_init, vice_final, & ! ice volume summed over categories + eice_init, eice_final ! ice energy summed over categories + + real (kind=dbl_kind) :: & + fnew , & ! heat flx to open water for new ice (W/m^2) + hi0new , & ! thickness of new ice + hi0max , & ! max allowed thickness of new ice + vsurp , & ! volume of new ice added to each cat + vtmp , & ! total volume of new and old ice + area1 , & ! starting fractional area of existing ice + alvl , & ! starting level ice area + rnilyr , & ! real(nilyr) + dfresh , & ! change in fresh + dfsalt , & ! change in fsalt + Ti ! frazil temperature + + real (kind=dbl_kind), dimension (icells) :: & + qi0new , & ! frazil ice enthalpy + Si0new ! frazil ice bulk salinity + + real (kind=dbl_kind), dimension (icells,nilyr) :: & + Sprofile ! salinity profile used for new ice additions + + integer (kind=int_kind) :: & + jcells, kcells , & ! grid cell counters + ij, m ! combined i/j horizontal indices + + integer (kind=int_kind), dimension (icells) :: & + indxij2, indxij3 , & ! compressed i/j indices + indxi2, indxj2 , & + indxi3, indxj3 + + character (len=char_len) :: & + fieldid ! field identifier + + ! BGC + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat) :: & + eicen, & ! energy of melting for each ice layer (J/m^2) + aicen_init, & ! fractional area of ice + vicen_init ! volume per unit area of ice (m) + + real (kind=dbl_kind), dimension (icells) :: & + vi0_init ! volume of new ice + + !----------------------------------------------------------------- + ! initialize + !----------------------------------------------------------------- + + l_stop = .false. + istop = 0 + jstop = 0 + + jcells = 0 + kcells = 0 + + rnilyr = real(nilyr,kind=dbl_kind) + + if (ncat > 1) then + hi0max = hin_max(1)*0.9_dbl_kind ! not too close to boundary + else + hi0max = bignum ! big number + endif + + ! for bgc + aicen_init(:,:,:) = aicen(:,:,:) + vicen_init(:,:,:) = vicen(:,:,:) + + if (l_conservation_check) then + + ! initial ice volume and energy in each grid cell + eicen(:,:,:) = c0 + do n = 1, ncat + do k = 1, nilyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + eicen(i,j,n) = eicen(i,j,n) + trcrn(i,j,nt_qice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vicen, vice_init) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + eicen, eice_init) + + endif ! l_conservation_check + + !----------------------------------------------------------------- + ! Compute average enthalpy of new ice. + ! Sprofile is the salinity profile used when adding new ice to + ! all categories, for ktherm/=2, and to category 1 for all ktherm. + ! + ! NOTE: POP assumes new ice is fresh! + !----------------------------------------------------------------- + + if (ktherm == 2) then ! mushy +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (sss(i,j) > c2 * dSin0_frazil) then + Si0new(ij) = sss(i,j) - dSin0_frazil + else + Si0new(ij) = sss(i,j)**2 / (c4*dSin0_frazil) + endif + do k = 1, nilyr + Sprofile(ij,k) = Si0new(ij) + enddo + Ti = min(liquidus_temperature_mush(Si0new(ij)/phi_init), -p1) + qi0new(ij) = enthalpy_mush(Ti, Si0new(ij)) + enddo ! ij + + else + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + do k = 1, nilyr + Sprofile(ij,k) = salinz(i,j,k) + enddo + qi0new(ij) = -rhoi*Lfresh + enddo ! ij + endif ! ktherm + + !----------------------------------------------------------------- + ! Compute the volume, area, and thickness of new ice. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + fnew = max (frzmlt(i,j), c0) ! fnew > 0 iff frzmlt > 0 + vi0new(ij) = -fnew*dt / qi0new(ij) ! note sign convention, qi < 0 + vi0_init(ij) = vi0new(ij) ! for bgc + + ! increment ice volume and energy + if (l_conservation_check) then + vice_init(ij) = vice_init(ij) + vi0new(ij) + eice_init(ij) = eice_init(ij) + vi0new(ij)*qi0new(ij) + endif + + ! history diagnostics + frazil(i,j) = vi0new(ij) + + if (present(frz_onset) .and. present(yday)) then + if (frazil(i,j) > puny .and. frz_onset(i,j) < puny) & + frz_onset(i,j) = yday + endif + + !----------------------------------------------------------------- + ! Update fresh water and salt fluxes. + ! + ! NOTE: POP assumes fresh water and salt flux due to frzmlt > 0 + ! is NOT included in fluxes fresh and fsalt. + !----------------------------------------------------------------- + + if (update_ocn_f) then + dfresh = -rhoi*vi0new(ij)/dt + dfsalt = ice_ref_salinity*p001*dfresh + + fresh(i,j) = fresh(i,j) + dfresh + fsalt(i,j) = fsalt(i,j) + dfsalt + endif + + !----------------------------------------------------------------- + ! Decide how to distribute the new ice. + !----------------------------------------------------------------- + + hsurp(ij) = c0 + ai0new(ij) = c0 + + if (vi0new(ij) > c0) then + + ! new ice area and thickness + ! hin_max(0) < new ice thickness < hin_max(1) + if (aice0(i,j) > puny) then + hi0new = max(vi0new(ij)/aice0(i,j), hfrazilmin) + if (hi0new > hi0max .and. aice0(i,j)+puny < c1) then + ! distribute excess volume over all categories (below) + hi0new = hi0max + ai0new(ij) = aice0(i,j) + vsurp = vi0new(ij) - ai0new(ij)*hi0new + hsurp(ij) = vsurp / aice(i,j) + vi0new(ij) = ai0new(ij)*hi0new + else + ! put ice in a single category, with hsurp = 0 + ai0new(ij) = vi0new(ij)/hi0new + endif + else ! aice0 < puny + hsurp(ij) = vi0new(ij)/aice(i,j) ! new thickness in each cat + vi0new(ij) = c0 + endif ! aice0 > puny + endif ! vi0new > puny + + !----------------------------------------------------------------- + ! Identify grid cells receiving new ice. + !----------------------------------------------------------------- + + i = indxi(ij) + j = indxj(ij) + + if (vi0new(ij) > c0) then ! add ice to category 1 + jcells = jcells + 1 + indxi2(jcells) = i + indxj2(jcells) = j + indxij2(jcells) = ij + endif + + if (hsurp(ij) > c0) then ! add ice to all categories + kcells = kcells + 1 + indxi3(kcells) = i + indxj3(kcells) = j + indxij3(kcells) = ij + endif + + enddo ! ij + + !----------------------------------------------------------------- + ! Distribute excess ice volume among ice categories by increasing + ! ice thickness, leaving ice area unchanged. + ! + ! NOTE: If new ice contains globally conserved tracers + ! (e.g., isotopes from seawater), code must be added here. + ! + ! The mushy formulation (ktherm=2) puts the new ice only at the + ! bottom of existing ice and adjusts the layers accordingly. + ! The other formulations distribute the new ice throughout the + ! existing ice column. + !----------------------------------------------------------------- + + do n = 1, ncat + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, kcells + i = indxi3(ij) + j = indxj3(ij) + m = indxij3(ij) + + vsurp = hsurp(m) * aicen(i,j,n) + + ! update ice age due to freezing (new ice age = dt) + vtmp = vicen(i,j,n) + vsurp + if (tr_iage .and. vtmp > puny) & + trcrn(i,j,nt_iage,n) = & + (trcrn(i,j,nt_iage,n)*vicen(i,j,n) + dt*vsurp) / vtmp + + if (tr_lvl .and. vicen(i,j,n) > puny) then + trcrn(i,j,nt_vlvl,n) = & + (trcrn(i,j,nt_vlvl,n)*vicen(i,j,n) + & + trcrn(i,j,nt_alvl,n)*vsurp) / vtmp + endif + + if (tr_aero .and. vtmp > puny) then + do it = 1, n_aero + trcrn(i,j,nt_aero+2+4*(it-1),n) = & + trcrn(i,j,nt_aero+2+4*(it-1),n)*vicen(i,j,n) / vtmp + trcrn(i,j,nt_aero+3+4*(it-1),n) = & + trcrn(i,j,nt_aero+3+4*(it-1),n)*vicen(i,j,n) / vtmp + enddo + endif + + ! update category volumes + vicen(i,j,n) = vtmp + + enddo ! ij + + if (ktherm == 2) then + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, kcells + i = indxi3(ij) + j = indxj3(ij) + m = indxij3(ij) + + vsurp = hsurp(m) * aicen(i,j,n) ! note - save this above? + vtmp = vicen(i,j,n) - vsurp ! vicen is the new volume + if (vicen(i,j,n) > c0) then + call update_vertical_tracers(trcrn(i,j,nt_qice:nt_qice+nilyr-1,n), & + vtmp, vicen(i,j,n), qi0new(m)) + call update_vertical_tracers(trcrn(i,j,nt_sice:nt_sice+nilyr-1,n), & + vtmp, vicen(i,j,n), Si0new(m)) + endif + enddo ! ij + + else + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, kcells + i = indxi3(ij) + j = indxj3(ij) + m = indxij3(ij) + + ! factor of nilyr cancels out + vsurp = hsurp(m) * aicen(i,j,n) ! note - save this above? + vtmp = vicen(i,j,n) - vsurp ! vicen is the new volume + if (vicen(i,j,n) > c0) then + ! enthalpy + trcrn(i,j,nt_qice+k-1,n) = & + (trcrn(i,j,nt_qice+k-1,n)*vtmp + qi0new(ij)*vsurp) / vicen(i,j,n) + ! salinity + trcrn(i,j,nt_sice+k-1,n) = & + (trcrn(i,j,nt_sice+k-1,n)*vtmp + Sprofile(ij,k)*vsurp) / vicen(i,j,n) + endif + enddo ! ij + enddo ! k + + endif ! ktherm + + enddo ! n + + !----------------------------------------------------------------- + ! Combine new ice grown in open water with category 1 ice. + ! Assume that vsnon and esnon are unchanged. + ! The mushy formulation assumes salt from frazil is added uniformly + ! to category 1, while the others use a salinity profile. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, jcells + i = indxi2(ij) + j = indxj2(ij) + m = indxij2(ij) + + area1 = aicen(i,j,1) ! save + vice1(ij) = vicen(i,j,1) ! save + aicen(i,j,1) = aicen(i,j,1) + ai0new(m) + aice0(i,j) = aice0(i,j) - ai0new(m) + vicen(i,j,1) = vicen(i,j,1) + vi0new(m) + + trcrn(i,j,nt_Tsfc,1) = & + (trcrn(i,j,nt_Tsfc,1)*area1 + Tf(i,j)*ai0new(m))/aicen(i,j,1) + trcrn(i,j,nt_Tsfc,1) = min (trcrn(i,j,nt_Tsfc,1), c0) + + if (tr_FY) then + trcrn(i,j,nt_FY,1) = & + (trcrn(i,j,nt_FY,1)*area1 + ai0new(m))/aicen(i,j,1) + trcrn(i,j,nt_FY,1) = min(trcrn(i,j,nt_FY,1), c1) + endif + + if (vicen(i,j,1) > puny) then + if (tr_iage) & + trcrn(i,j,nt_iage,1) = & + (trcrn(i,j,nt_iage,1)*vice1(ij) + dt*vi0new(m))/vicen(i,j,1) + + if (tr_aero) then + do it = 1, n_aero + trcrn(i,j,nt_aero+2+4*(it-1),1) = & + trcrn(i,j,nt_aero+2+4*(it-1),1)*vice1(ij)/vicen(i,j,1) + trcrn(i,j,nt_aero+3+4*(it-1),1) = & + trcrn(i,j,nt_aero+3+4*(it-1),1)*vice1(ij)/vicen(i,j,1) + enddo + endif + + if (tr_lvl) then + alvl = trcrn(i,j,nt_alvl,1) + trcrn(i,j,nt_alvl,1) = & + (trcrn(i,j,nt_alvl,1)*area1 + ai0new(m))/aicen(i,j,1) + trcrn(i,j,nt_vlvl,1) = & + (trcrn(i,j,nt_vlvl,1)*vice1(ij) + vi0new(m))/vicen(i,j,1) + endif + + if (tr_pond_cesm .or. tr_pond_topo) then + trcrn(i,j,nt_apnd,1) = & + trcrn(i,j,nt_apnd,1)*area1/aicen(i,j,1) + elseif (tr_pond_lvl) then + if (trcrn(i,j,nt_alvl,1) > puny) then + trcrn(i,j,nt_apnd,1) = & + trcrn(i,j,nt_apnd,1) * alvl*area1 & + / (trcrn(i,j,nt_alvl,1)*aicen(i,j,1)) + endif + endif + endif + + enddo ! ij + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, jcells + i = indxi2(ij) + j = indxj2(ij) + m = indxij2(ij) + + if (vicen(i,j,1) > c0) then + ! factor of nilyr cancels out + ! enthalpy + trcrn(i,j,nt_qice+k-1,1) = & + (trcrn(i,j,nt_qice+k-1,1)*vice1(ij) & + + qi0new(m)*vi0new(m))/vicen(i,j,1) + ! salinity + trcrn(i,j,nt_sice+k-1,1) = & + (trcrn(i,j,nt_sice+k-1,1)*vice1(ij) & + + Sprofile(m,k)*vi0new(m))/vicen(i,j,1) + endif + enddo + enddo + + if (l_conservation_check) then + + ! initial ice volume in each grid cell + eicen(:,:,:) = c0 + do n = 1, ncat + do k = 1, nilyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + eicen(i,j,n) = eicen(i,j,n) + trcrn(i,j,nt_qice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vicen, vice_final) + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + eicen, eice_final) + + fieldid = 'vice, add_new_ice' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + vice_init, vice_final, & + puny, l_stop, & + istop, jstop) + + fieldid = 'eice, add_new_ice' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + eice_init, eice_final, & + puny*Lfresh*rhoi, l_stop, & + istop, jstop) + if (l_stop) return + + endif ! l_conservation_check + + !----------------------------------------------------------------- + ! Biogeochemistry + !----------------------------------------------------------------- + if (tr_brine .or. skl_bgc) & + call add_new_ice_bgc (nx_block, ny_block, dt, & + icells, jcells, kcells, & + indxi, indxj, & + indxi2, indxj2, indxij2, & + indxi3, indxj3, indxij3, & + aicen_init, vicen_init, vi0_init, & + aicen, vicen, vi0new, & + ntrcr, trcrn, nbtrcr, & + sss, ocean_bio, flux_bio, & + hsurp, & + l_stop, istop, jstop) + + end subroutine add_new_ice + +!======================================================================= + + end module ice_therm_itd + +!======================================================================= diff --git a/source/ice_therm_mushy.F90 b/source/ice_therm_mushy.F90 new file mode 100755 index 00000000..95d61340 --- /dev/null +++ b/source/ice_therm_mushy.F90 @@ -0,0 +1,3925 @@ +! SVN:$Id: ice_therm_mushy.F90 713 2013-09-06 18:45:51Z akt $ + +!======================================================================= + +module ice_therm_mushy + + use ice_kinds_mod + use ice_constants + use ice_domain_size, only: nilyr, nslyr + use ice_itd, only: hs_min + use ice_therm_shared, only: ferrmax + + implicit none + + private + public :: temperature_mush, & + temperature_snow, & + liquid_fraction, & + temperature_mush_liquid_fraction, & + liquidus_brine_salinity_mush, & + liquidus_temperature_mush, & + temperature_changes_salinity, & + enthalpy_mush, & + enthalpy_of_melting, & + permeability + + !----------------------------------------------------------------- + ! namelist parameters + !----------------------------------------------------------------- + real(kind=dbl_kind), public :: & + a_rapid_mode , & ! channel radius for rapid drainage mode (m) + Rac_rapid_mode , & ! critical Rayleigh number for rapid drainage mode + aspect_rapid_mode , & ! aspect ratio for rapid drainage mode (larger is wider) + dSdt_slow_mode , & ! slow mode drainage strength (m s-1 K-1) + phi_c_slow_mode , & ! critical liquid fraction porosity cutoff for slow mode + phi_i_mushy ! liquid fraction of congelation ice + + !----------------------------------------------------------------- + ! Constants for Liquidus relation from Assur (1958) + !----------------------------------------------------------------- + + ! liquidus relation - higher temperature region + real(kind=dbl_kind), parameter :: & + az1_liq = -18.48_dbl_kind, & + bz1_liq = 0.0_dbl_kind + + ! liquidus relation - lower temperature region + real(kind=dbl_kind), parameter :: & + az2_liq = -10.3085_dbl_kind, & + bz2_liq = 62.4_dbl_kind + + ! liquidus break + real(kind=dbl_kind), parameter :: & + Tb_liq = -7.6362968855167352_dbl_kind, & ! temperature of liquidus break + Sb_liq = 123.66702800276086_dbl_kind ! salinity of liquidus break + + ! basic liquidus relation constants + real(kind=dbl_kind), parameter :: & + az1p_liq = az1_liq / c1000, & + bz1p_liq = bz1_liq / c1000, & + az2p_liq = az2_liq / c1000, & + bz2p_liq = bz2_liq / c1000 + + ! quadratic constants - higher temperature region + real(kind=dbl_kind), parameter :: & + AS1_liq = az1p_liq * (rhow * cp_ocn - rhoi * cp_ice) , & + AC1_liq = rhoi * cp_ice * az1_liq , & + BS1_liq = (c1 + bz1p_liq) * (rhow * cp_ocn - rhoi * cp_ice) & + + rhoi * Lfresh * az1p_liq , & + BQ1_liq = -az1_liq , & + BC1_liq = rhoi * cp_ice * bz1_liq - rhoi * Lfresh * az1_liq, & + CS1_liq = rhoi * Lfresh * (c1 + bz1p_liq) , & + CQ1_liq = -bz1_liq , & + CC1_liq = -rhoi * Lfresh * bz1_liq + + ! quadratic constants - lower temperature region + real(kind=dbl_kind), parameter :: & + AS2_liq = az2p_liq * (rhow * cp_ocn - rhoi * cp_ice) , & + AC2_liq = rhoi * cp_ice * az2_liq , & + BS2_liq = (c1 + bz2p_liq) * (rhow * cp_ocn - rhoi * cp_ice) & + + rhoi * Lfresh * az2p_liq , & + BQ2_liq = -az2_liq , & + BC2_liq = rhoi * cp_ice * bz2_liq - rhoi * Lfresh * az2_liq, & + CS2_liq = rhoi * Lfresh * (c1 + bz2p_liq) , & + CQ2_liq = -bz2_liq , & + CC2_liq = -rhoi * Lfresh * bz2_liq + + ! break enthalpy constants + real(kind=dbl_kind), parameter :: & + D_liq = ((c1 + az1p_liq*Tb_liq + bz1p_liq) & + / ( az1_liq*Tb_liq + bz1_liq)) & + * ((cp_ocn*rhow - cp_ice*rhoi)*Tb_liq + Lfresh*rhoi), & + E_liq = cp_ice*rhoi*Tb_liq - Lfresh*rhoi + + ! just fully melted enthapy constants + real(kind=dbl_kind), parameter :: & + F1_liq = ( -c1000 * cp_ocn * rhow) / az1_liq , & + G1_liq = -c1000 , & + H1_liq = (-bz1_liq * cp_ocn * rhow) / az1_liq , & + F2_liq = ( -c1000 * cp_ocn * rhow) / az2_liq , & + G2_liq = -c1000 , & + H2_liq = (-bz2_liq * cp_ocn * rhow) / az2_liq + + ! warmer than fully melted constants + real(kind=dbl_kind), parameter :: & + I_liq = c1 / (cp_ocn * rhow) + + ! temperature to brine salinity + real(kind=dbl_kind), parameter :: & + J1_liq = bz1_liq / az1_liq , & + K1_liq = c1 / c1000 , & + L1_liq = (c1 + bz1p_liq) / az1_liq , & + J2_liq = bz2_liq / az2_liq , & + K2_liq = c1 / c1000 , & + L2_liq = (c1 + bz2p_liq) / az2_liq + + ! brine salinity to temperature + real(kind=dbl_kind), parameter :: & + M1_liq = az1_liq , & + N1_liq = -az1p_liq , & + O1_liq = -bz1_liq / az1_liq , & + M2_liq = az2_liq , & + N2_liq = -az2p_liq , & + O2_liq = -bz2_liq / az2_liq + + !----------------------------------------------------------------- + ! Other parameters + !----------------------------------------------------------------- + + real(kind=dbl_kind), parameter :: & + ki = 2.3_dbl_kind , & ! fresh ice conductivity (W m-1 K-1) + kb = 0.5375_dbl_kind ! brine conductivity (W m-1 K-1) + + real(kind=dbl_kind), parameter :: & + dTemp_errmax = 5.0e-4_dbl_kind ! max allowed change in temperature + ! between iterations + + !----------------------------------------------------------------- + ! debugging parameters + !----------------------------------------------------------------- + + integer(kind=int_kind) :: & + g_i , & ! current grid i-index (for diagnostics) + g_j ! current grid j-index (for diagnostics) + +!======================================================================= + +contains + +!======================================================================= + + subroutine temperature_changes_salinity(nx_block, ny_block, & + my_task, istep1, & + dt, icells, & + indxi, indxj, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fswsfc, fswint, & + Sswabs, Iswabs, & + hilyr, hslyr, & + zqin, zTin, & + zqsn, zTsn, & + zSin, & + trcrn, & + Tsf, Tbot, & + sss, & + fsensn, flatn, & + flwoutn, fsurfn, & + fcondtopn,fcondbot, & + fadvocn, snoice, & + einit, l_stop, & + istop, jstop) + + ! solve the changes in enthalpy and bulk salinity for the mushy vertical thermodynamics + + use ice_domain_size, only: max_ntrcr + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_diag + use ice_state, only: nt_apnd, nt_hpnd, tr_pond + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + my_task , & ! task number (diagnostic only) + istep1 , & ! time step index (diagnostic only) + icells ! number of cells with aicen > puny + + real (kind=dbl_kind), intent(in) :: & + dt ! time step (s) + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + rhoa , & ! air density (kg/m^3) + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef , & ! transfer coefficient for latent heat + Tbot , & ! ice bottom surface temperature (deg C) + sss ! sea surface salinity (ppt) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + fswint ! SW absorbed in ice interior below surface (W m-2) + + real (kind=dbl_kind), dimension (icells), intent(inout) :: & + hilyr , & ! ice layer thickness (m) + hslyr ! snow layer thickness (m) + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + einit ! initial energy of melting (J m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr), & + intent(inout) :: & + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), & + intent(inout) :: & + Iswabs ! SW radiation absorbed in ice layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + fsurfn , & ! net flux to top surface, excluding fcondtopn + fcondtopn , & ! downward cond flux at top surface (W m-2) + fsensn , & ! surface downward sensible heat (W m-2) + flatn , & ! surface downward latent heat (W m-2) + flwoutn , & ! upward LW at surface (W m-2) + fadvocn ! advection heat flux to ocean + + real (kind=dbl_kind), dimension (icells), intent(out):: & + fcondbot ! downward cond flux at bottom surface (W m-2) + + real (kind=dbl_kind), dimension (icells), & + intent(inout):: & + Tsf ! ice/snow surface temperature (C) + + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(inout) :: & + zqin , & ! ice layer enthalpy (J m-3) + zTin , & ! internal ice layer temperatures + zSin ! internal ice layer salinities + + real (kind=dbl_kind), dimension (icells,nslyr), & + intent(inout) :: & + zqsn , & ! snow layer enthalpy (J m-3) + zTsn ! internal snow layer temperatures + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_ntrcr), & + intent(inout) :: & + trcrn + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & + snoice ! snow-ice formation (m/step-->cm/day) + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, print diagnostics and abort model + + integer (kind=int_kind), intent(inout) :: & + istop, jstop ! i and j indices of cell where model fails + + real(kind=dbl_kind) :: & + trc_hpnd, & ! pond depth tracer + trc_apnd ! pond area tracer + + integer(kind=int_kind) :: & + ij, & ! icells index + i, & ! i index + j ! j index + + ! loop over cells + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + g_i = i + g_j = j + + if (tr_pond) then + trc_hpnd = trcrn(i,j,nt_hpnd) + trc_apnd = trcrn(i,j,nt_apnd) + else + trc_hpnd = c0 + trc_apnd = c0 + endif + + call temperature_changes_column(istep1, dt, & + rhoa(i,j), flw(i,j), & + potT(i,j), Qa(i,j), & + shcoef(i,j), lhcoef(i,j), & + fswsfc(i,j), fswint(i,j), & + Sswabs(i,j,:), Iswabs(i,j,:), & + hilyr(ij), hslyr(ij), & + zqin(ij,:), zTin(ij,:), & + zqsn(ij,:), zTsn(ij,:), & + zSin(ij,:), & + trc_hpnd, trc_apnd, & + Tsf(ij), Tbot(i,j), & + sss(i,j), & + fsensn(i,j), flatn(i,j), & + flwoutn(i,j), fsurfn(i,j), & + fcondtopn(i,j),fcondbot(ij), & + fadvocn(i,j), snoice(i,j), & + einit(ij), l_stop) + + if (tr_pond) then + trcrn(i,j,nt_hpnd) = trc_hpnd + trcrn(i,j,nt_apnd) = trc_apnd + endif + + if (l_stop) then + istop = i + jstop = j + write(nu_diag,*) "ice_therm_mushy solver failure: istep1, my_task, i, j:", istep1, my_task, i, j + call abort_ice("ice_therm_mushy solver failure") + endif + + enddo ! ij + + end subroutine temperature_changes_salinity + +!======================================================================= +! Solver with separation of cold and melting phases +!======================================================================= + + subroutine temperature_changes_column(istep1, dt, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fswsfc, fswint, & + Sswabs, Iswabs, & + hilyr, hslyr, & + zqin, zTin, & + zqsn, zTsn, & + zSin, & + hpond, apond, & + Tsf, Tbot, & + sss, & + fsensn, flatn, & + flwoutn, fsurfn, & + fcondtop, fcondbot, & + fadvheat, snoice, & + einit_old,lstop) + + ! solve the enthalpy and bulk salinity of the ice for a single column + + integer (kind=int_kind), intent(in) :: & + istep1 ! time step index (diagnostic only) + + real (kind=dbl_kind), intent(in) :: & + dt ! time step (s) + + real (kind=dbl_kind), intent(in) :: & + rhoa , & ! air density (kg/m^3) + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef , & ! transfer coefficient for latent heat + Tbot , & ! ice bottom surfce temperature (deg C) + sss ! sea surface salinity (PSU) + + real (kind=dbl_kind), intent(inout) :: & + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + fswint ! SW absorbed in ice interior below surface (W m-2) + + real (kind=dbl_kind), intent(inout) :: & + hilyr , & ! ice layer thickness (m) + hslyr ! snow layer thickness (m) + + real (kind=dbl_kind), intent(in) :: & + einit_old ! initial energy of melting (J m-2) + + real (kind=dbl_kind), dimension (nslyr), & + intent(inout) :: & + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real (kind=dbl_kind), dimension (nilyr), & + intent(inout) :: & + Iswabs ! SW radiation absorbed in ice layers (W m-2) + + real (kind=dbl_kind), intent(inout):: & + fsurfn , & ! net flux to top surface, excluding fcondtopn + fcondtop , & ! downward cond flux at top surface (W m-2) + fsensn , & ! surface downward sensible heat (W m-2) + flatn , & ! surface downward latent heat (W m-2) + flwoutn ! upward LW at surface (W m-2) + + real (kind=dbl_kind), intent(out):: & + fcondbot , & ! downward cond flux at bottom surface (W m-2) + fadvheat , & ! flow of heat to ocean due to advection (W m-2) + snoice ! snow ice formation + + real (kind=dbl_kind), intent(inout):: & + Tsf , & ! ice/snow surface temperature (C) + hpond , & ! melt pond depth (m) + apond ! melt pond area + + real (kind=dbl_kind), dimension (nilyr), intent(inout) :: & + zqin , & ! ice layer enthalpy (J m-3) + zTin , & ! internal ice layer temperatures + zSin ! internal ice layer salinities + + real (kind=dbl_kind), dimension (nslyr), intent(inout) :: & + zqsn , & ! snow layer enthalpy (J m-3) + zTsn ! internal snow layer temperatures + + logical (kind=log_kind), intent(inout) :: & + lstop ! solver failure flag + + ! local variables + real(kind=dbl_kind), dimension(1:nilyr) :: & + zqin0 , & ! ice layer enthalpy (J m-3) at start of timestep + zTin0 , & ! internal ice layer temperatures (C) at start of timestep + zSin0 , & ! internal ice layer salinities (ppt) at start of timestep + phi , & ! liquid fraction + km , & ! ice conductivity (W m-1 K-1) + dSdt ! gravity drainage desalination rate for slow mode (ppt s-1) + + real(kind=dbl_kind), dimension(1:nilyr+1) :: & + Sbr , & ! brine salinity (ppt) + qbr ! brine enthalpy (J m-3) + + real(kind=dbl_kind), dimension(0:nilyr) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), dimension(1:nslyr) :: & + zqsn0 , & ! snow layer enthalpy (J m-3) at start of timestep + zTsn0 , & ! internal snow layer temperatures (C) at start of timestep + ks ! snow conductivity (W m-1 K-1) + + real(kind=dbl_kind) :: & + Tsf0 , & ! ice/snow surface temperature (C) at start of timestep + hin , & ! ice thickness (m) + hsn , & ! snow thickness (m) + hslyr_min , & ! minimum snow layer thickness (m) + w , & ! vertical flushing Darcy velocity (m/s) + qocn , & ! ocean brine enthalpy (J m-3) + qpond , & ! melt pond brine enthalpy (J m-3) + Spond ! melt pond salinity (ppt) + + integer(kind=int_kind) :: & + k ! ice/snow layer index + + logical(kind=log_kind) :: & + lsnow ! snow presence: T: has snow, F: no snow + + lstop = .false. + fadvheat = c0 + snoice = c0 + + Tsf0 = Tsf + zqsn0 = zqsn + zqin0 = zqin + zSin0 = zSin + zTsn0 = zTsn + zTin0 = zTin + + Spond = c0 + qpond = enthalpy_brine(c0) + + hslyr_min = hs_min / real(nslyr, dbl_kind) + + lsnow = (hslyr > hslyr_min) + + hin = hilyr * real(nilyr,dbl_kind) + + qocn = enthalpy_brine(Tbot) + + if (lsnow) then + hsn = hslyr * real(nslyr,dbl_kind) + else + hsn = c0 + endif + + do k = 1, nilyr + phi(k) = liquid_fraction(temperature_mush(zqin(k),zSin(k)),zSin(k)) + enddo ! k + + ! calculate vertical bulk darcy flow + call flushing_velocity(zTin, zSin, & + phi, & + hin, hsn, & + hilyr, & + hpond, apond, & + dt, w) + + ! calculate quantities related to drainage + call explicit_flow_velocities(zSin, & + zTin, Tsf, & + Tbot, q, & + dSdt, Sbr, & + qbr, dt, & + sss, qocn, & + hilyr, hin) + + ! calculate the conductivities + call conductivity_mush_array(zqin0, zSin0, km) + + if (lsnow) then + ! case with snow + + ! calculate the snow conductivities + call conductivity_snow_array(ks) + + ! run the two stage solver + call two_stage_solver_snow(Tsf, Tsf0, & + zqsn, zqsn0, & + zqin, zqin0, & + zSin, zSin0, & + zTsn, zTsn0, & + zTin, zTin0, & + phi, Tbot, & + km, ks, & + q, dSdt, & + w, dt, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + Iswabs, Sswabs, & + qpond, qocn, & + Spond, sss, & + hilyr, hslyr, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + lstop) + + ! given the updated enthalpy and bulk salinity calculate other quantities + do k = 1, nslyr + zTsn(k) = temperature_snow(zqsn(k)) + enddo ! k + + do k = 1, nilyr + zTin(k) = temperature_mush_liquid_fraction(zqin(k), phi(k)) + Sbr(k) = liquidus_brine_salinity_mush(zTin(k)) + qbr(k) = enthalpy_brine(zTin(k)) + enddo ! k + + else + ! case without snow + + ! run the two stage solver + call two_stage_solver_nosnow(Tsf, Tsf0, & + zqsn, zqsn0, & + zqin, zqin0, & + zSin, zSin0, & + zTsn, zTsn0, & + zTin, zTin0, & + phi, Tbot, & + km, ks, & + q, dSdt, & + w, dt, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + Iswabs, Sswabs, & + qpond, qocn, & + Spond, sss, & + hilyr, hslyr, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + lstop) + + ! given the updated enthalpy and bulk salinity calculate other quantities + do k = 1, nilyr + zTin(k) = temperature_mush_liquid_fraction(zqin(k), phi(k)) + Sbr(k) = liquidus_brine_salinity_mush(zTin(k)) + qbr(k) = enthalpy_brine(zTin(k)) + enddo ! k + + endif + + if (lstop) then + return + end if + + ! drain ponds from flushing + call flush_pond(w, hin, hpond, apond, dt) + + ! flood snow ice + call flood_ice(hsn, hin, & + hslyr, hilyr, & + zqsn, zqin, & + phi, dt, & + zSin, Sbr, & + sss, qocn, & + snoice, fadvheat) + + end subroutine temperature_changes_column + +!======================================================================= + + subroutine two_stage_solver_snow(Tsf, Tsf0, & + zqsn, zqsn0, & + zqin, zqin0, & + zSin, zSin0, & + zTsn, zTsn0, & + zTin, zTin0, & + phi, Tbot, & + km, ks, & + q, dSdt, & + w, dt, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + Iswabs, Sswabs, & + qpond, qocn, & + Spond, sss, & + hilyr, hslyr, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + lstop) + + ! solve the vertical temperature and salt change for case with snow + ! 1) determine what type of surface condition existed previously - cold or melting + ! 2) solve the system assuming this condition persists + ! 3) check the consistency of the surface condition of the solution + ! 4) If the surface condition is inconsistent resolve for the other surface condition + ! 5) If neither solution is consistent the resolve the inconsistency + + use ice_calendar, only: istep1 + use ice_fileunits, only: nu_diag + + real(kind=dbl_kind), intent(inout) :: & + Tsf ! snow surface temperature (C) + + real(kind=dbl_kind), intent(out) :: & + fcondtop , & ! downward cond flux at top surface (W m-2) + fcondbot , & ! downward cond flux at bottom surface (W m-2) + flwoutn , & ! upward LW at surface (W m-2) + fsensn , & ! surface downward sensible heat (W m-2) + flatn , & ! surface downward latent heat (W m-2) + fsurfn , & ! net flux to top surface, excluding fcondtop + fadvheat ! flow of heat to ocean due to advection (W m-2) + + real(kind=dbl_kind), intent(in) :: & + Tsf0 ! snow surface temperature (C) at beginning of timestep + + real(kind=dbl_kind), dimension(1:nslyr), intent(inout) :: & + zqsn , & ! snow layer enthalpy (J m-3) + zTsn ! snow layer temperature (C) + + real(kind=dbl_kind), dimension(1:nslyr), intent(in) :: & + zqsn0 , & ! snow layer enthalpy (J m-3) at beginning of timestep + zTsn0 , & ! snow layer temperature (C) at beginning of timestep + ks , & ! snow conductivity (W m-1 K-1) + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real(kind=dbl_kind), dimension(1:nilyr), intent(inout) :: & + zqin , & ! ice layer enthalpy (J m-3) + zSin , & ! ice layer bulk salinity (ppt) + zTin , & ! ice layer temperature (C) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), dimension(1:nilyr), intent(in) :: & + zqin0 , & ! ice layer enthalpy (J m-3) at beginning of timestep + zSin0 , & ! ice layer bulk salinity (ppt) at beginning of timestep + zTin0 , & ! ice layer temperature (C) at beginning of timestep + km , & ! ice conductivity (W m-1 K-1) + Iswabs , & ! SW radiation absorbed in ice layers (W m-2) + dSdt ! gravity drainage desalination rate for slow mode (ppt s-1) + + real(kind=dbl_kind), dimension(0:nilyr), intent(in) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), intent(in) :: & + dt , & ! time step (s) + Tbot , & ! ice bottom surfce temperature (deg C) + hilyr , & ! ice layer thickness (m) + hslyr , & ! snow layer thickness (m) + fswint , & ! SW absorbed in ice interior below surface (W m-2) + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + rhoa , & ! air density (kg/m^3) + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef , & ! transfer coefficient for latent heat + w , & ! vertical flushing Darcy velocity (m/s) + qpond , & ! melt pond brine enthalpy (J m-3) + qocn , & ! ocean brine enthalpy (J m-3) + Spond , & ! melt pond salinity (ppt) + sss ! sea surface salinity (PSU) + + logical(kind=log_kind), intent(inout) :: & + lstop ! solver failure flag + + real(kind=dbl_kind) :: & + fcondtop1 , & ! first stage downward cond flux at top surface (W m-2) + fsurfn1 , & ! first stage net flux to top surface, excluding fcondtop + Tsf1 ! first stage ice surface temperature (C) + + + ! determine if surface is initially cold or melting + if (Tsf < c0) then + + ! initially cold + + ! solve the system for cold and snow + call picard_solver(.true., .true., & + Tsf, zqsn, & + zqin, zSin, & + zTin, zTsn, & + phi, dt, & + hilyr, hslyr, & + km, ks, & + Iswabs, Sswabs, & + Tbot, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + qpond, qocn, & + Spond, sss, & + q, dSdt, & + w, lstop) + + ! halt if solver failed + if (lstop) return + + ! check if solution is consistent - surface should still be cold + if (Tsf < dTemp_errmax) then + + ! solution is consistent - have solution so finish + return + + else + + ! solution is inconsistent - surface is warmer than melting + ! resolve assuming surface is melting + Tsf1 = Tsf + + ! reset the solution to initial values + Tsf = c0 + zqsn = zqsn0 + zqin = zqin0 + zSin = zSin0 + + ! solve the system for melting and snow + call picard_solver(.true., .false., & + Tsf, zqsn, & + zqin, zSin, & + zTin, zTsn, & + phi, dt, & + hilyr, hslyr, & + km, ks, & + Iswabs, Sswabs, & + Tbot, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + qpond, qocn, & + Spond, sss, & + q, dSdt, & + w, lstop) + + ! halt if solver failed + if (lstop) return + + ! check if solution is consistent + ! surface conductive heat flux should be less than + ! incoming surface heat flux + if (fcondtop - fsurfn < ferrmax) then + + ! solution is consistent - have solution so finish + return + + else + + ! solution is inconsistent + call two_stage_inconsistency(1, Tsf1, c0, fcondtop, fsurfn) + return + + endif ! surface flux consistency + + endif ! surface temperature consistency + + else + + ! initially melting + Tsf = c0 + + ! solve the system for melting and snow + call picard_solver(.true., .false., & + Tsf, zqsn, & + zqin, zSin, & + zTin, zTsn, & + phi, dt, & + hilyr, hslyr, & + km, ks, & + Iswabs, Sswabs, & + Tbot, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + qpond, qocn, & + Spond, sss, & + q, dSdt, & + w, lstop) + + ! halt if solver failed + if (lstop) return + + ! check if solution is consistent + ! surface conductive heat flux should be less than + ! incoming surface heat flux + if (fcondtop - fsurfn < ferrmax) then + + ! solution is consistent - have solution so finish + return + + else + + ! solution is inconsistent - resolve assuming other surface condition + ! assume surface is cold + fcondtop1 = fcondtop + fsurfn1 = fsurfn + + ! reset the solution to initial values + Tsf = Tsf0 + zqsn = zqsn0 + zqin = zqin0 + zSin = zSin0 + + ! solve the system for cold and snow + call picard_solver(.true., .true., & + Tsf, zqsn, & + zqin, zSin, & + zTin, zTsn, & + phi, dt, & + hilyr, hslyr, & + km, ks, & + Iswabs, Sswabs, & + Tbot, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + qpond, qocn, & + Spond, sss, & + q, dSdt, & + w, lstop) + + ! halt if solver failed + if (lstop) return + + ! check if solution is consistent - surface should be cold + if (Tsf < dTemp_errmax) then + + ! solution is consistent - have solution so finish + return + + else + + ! solution is inconsistent + ! failed to find a solution so need to refine solutions until consistency found + call two_stage_inconsistency(2, Tsf, c0, fcondtop1, fsurfn1) + return + + endif ! surface temperature consistency + + endif ! surface flux consistency + + endif + + end subroutine two_stage_solver_snow + +!======================================================================= + + subroutine two_stage_solver_nosnow(Tsf, Tsf0, & + zqsn, zqsn0, & + zqin, zqin0, & + zSin, zSin0, & + zTsn, zTsn0, & + zTin, zTin0, & + phi, Tbot, & + km, ks, & + q, dSdt, & + w, dt, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + Iswabs, Sswabs, & + qpond, qocn, & + Spond, sss, & + hilyr, hslyr, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + lstop) + + ! solve the vertical temperature and salt change for case with no snow + ! 1) determine what type of surface condition existed previously - cold or melting + ! 2) solve the system assuming this condition persists + ! 3) check the consistency of the surface condition of the solution + ! 4) If the surface condition is inconsistent resolve for the other surface condition + ! 5) If neither solution is consistent the resolve the inconsistency + + use ice_calendar, only: istep1 + + real(kind=dbl_kind), intent(inout) :: & + Tsf ! ice surface temperature (C) + + real(kind=dbl_kind), intent(out) :: & + fcondtop , & ! downward cond flux at top surface (W m-2) + fcondbot , & ! downward cond flux at bottom surface (W m-2) + flwoutn , & ! upward LW at surface (W m-2) + fsensn , & ! surface downward sensible heat (W m-2) + flatn , & ! surface downward latent heat (W m-2) + fsurfn , & ! net flux to top surface, excluding fcondtop + fadvheat ! flow of heat to ocean due to advection (W m-2) + + real(kind=dbl_kind), intent(in) :: & + Tsf0 ! ice surface temperature (C) at beginning of timestep + + real(kind=dbl_kind), dimension(1:nslyr), intent(inout) :: & + zqsn , & ! snow layer enthalpy (J m-3) + zTsn ! snow layer temperature (C) + + real(kind=dbl_kind), dimension(1:nslyr), intent(in) :: & + zqsn0 , & ! snow layer enthalpy (J m-3) at beginning of timestep + zTsn0 , & ! snow layer temperature (C) at beginning of timestep + ks , & ! snow conductivity (W m-1 K-1) + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real(kind=dbl_kind), dimension(1:nilyr), intent(inout) :: & + zqin , & ! ice layer enthalpy (J m-3) + zSin , & ! ice layer bulk salinity (ppt) + zTin , & ! ice layer temperature (C) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), dimension(1:nilyr), intent(in) :: & + zqin0 , & ! ice layer enthalpy (J m-3) at beginning of timestep + zSin0 , & ! ice layer bulk salinity (ppt) at beginning of timestep + zTin0 , & ! ice layer temperature (C) at beginning of timestep + km , & ! ice conductivity (W m-1 K-1) + Iswabs , & ! SW radiation absorbed in ice layers (W m-2) + dSdt ! gravity drainage desalination rate for slow mode (ppt s-1) + + real(kind=dbl_kind), dimension(0:nilyr), intent(in) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), intent(in) :: & + dt , & ! time step (s) + hilyr , & ! ice layer thickness (m) + hslyr , & ! snow layer thickness (m) + Tbot , & ! ice bottom surfce temperature (deg C) + fswint , & ! SW absorbed in ice interior below surface (W m-2) + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + rhoa , & ! air density (kg/m^3) + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef , & ! transfer coefficient for latent heat + w , & ! vertical flushing Darcy velocity (m/s) + qpond , & ! melt pond brine enthalpy (J m-3) + qocn , & ! ocean brine enthalpy (J m-3) + Spond , & ! melt pond salinity (ppt) + sss ! sea surface salinity (PSU) + + logical, intent(inout) :: & + lstop ! solver failure flag + + real(kind=dbl_kind) :: & + Tmlt , & ! upper ice layer melting temperature (C) + fcondtop1 , & ! first stage downward cond flux at top surface (W m-2) + fsurfn1 , & ! first stage net flux to top surface, excluding fcondtop + Tsf1 ! first stage ice surface temperature (C) + + ! initial surface melting temperature + Tmlt = liquidus_temperature_mush(zSin0(1)) + + ! determine if surface is initially cold or melting + if (Tsf < Tmlt) then + + ! initially cold + + ! solve the system for cold and no snow + call picard_solver(.false., .true., & + Tsf, zqsn, & + zqin, zSin, & + zTin, zTsn, & + phi, dt, & + hilyr, hslyr, & + km, ks, & + Iswabs, Sswabs, & + Tbot, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + qpond, qocn, & + Spond, sss, & + q, dSdt, & + w, lstop) + + ! halt if solver failed + if (lstop) return + + ! check if solution is consistent - surface should still be cold + if (Tsf < Tmlt + dTemp_errmax) then + + ! solution is consistent - have solution so finish + return + + else + ! solution is inconsistent - surface is warmer than melting + ! resolve assuming surface is melting + Tsf1 = Tsf + + ! reset the solution to initial values + Tsf = liquidus_temperature_mush(zSin0(1)) + zqin = zqin0 + zSin = zSin0 + + ! solve the system for melt and no snow + call picard_solver(.false., .false., & + Tsf, zqsn, & + zqin, zSin, & + zTin, zTsn, & + phi, dt, & + hilyr, hslyr, & + km, ks, & + Iswabs, Sswabs, & + Tbot, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + qpond, qocn, & + Spond, sss, & + q, dSdt, & + w, lstop) + + ! halt if solver failed + if (lstop) return + + ! check if solution is consistent + ! surface conductive heat flux should be less than + ! incoming surface heat flux + if (fcondtop - fsurfn < ferrmax) then + + ! solution is consistent - have solution so finish + return + + else + + ! solution is inconsistent + call two_stage_inconsistency(3, Tsf1, Tmlt, fcondtop, fsurfn) + return + + endif + + endif + + else + ! initially melting + + ! solve the system for melt and no snow + Tsf = Tmlt + + call picard_solver(.false., .false., & + Tsf, zqsn, & + zqin, zSin, & + zTin, zTsn, & + phi, dt, & + hilyr, hslyr, & + km, ks, & + Iswabs, Sswabs, & + Tbot, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + qpond, qocn, & + Spond, sss, & + q, dSdt, & + w, lstop) + + ! halt if solver failed + if (lstop) return + + ! check if solution is consistent + ! surface conductive heat flux should be less than + ! incoming surface heat flux + if (fcondtop - fsurfn < ferrmax) then + + ! solution is consistent - have solution so finish + return + + else + + ! solution is inconsistent - resolve assuming other surface condition + ! assume surface is cold + fcondtop1 = fcondtop + fsurfn1 = fsurfn + + ! reset the solution to initial values + Tsf = Tsf0 + zqin = zqin0 + zSin = zSin0 + + ! solve the system for cold and no snow + call picard_solver(.false., .true., & + Tsf, zqsn, & + zqin, zSin, & + zTin, zTsn, & + phi, dt, & + hilyr, hslyr, & + km, ks, & + Iswabs, Sswabs, & + Tbot, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + qpond, qocn, & + Spond, sss, & + q, dSdt, & + w, lstop) + + ! halt if solver failed + if (lstop) return + + ! check if solution is consistent - surface should be cold + if (Tsf < Tmlt + dTemp_errmax) then + + ! solution is consistent - have solution so finish + return + + else + + ! solution is inconsistent + call two_stage_inconsistency(4, Tsf, Tmlt, fcondtop1, fsurfn1) + return + + endif + + endif + + endif + + end subroutine two_stage_solver_nosnow + +!======================================================================= + + subroutine two_stage_inconsistency(type, Tsf, Tmlt, fcondtop, fsurfn) + + use ice_calendar, only: istep1 + use ice_fileunits, only: nu_diag + use ice_communicate, only: my_task + + integer, intent(in) :: & + type + + real(kind=dbl_kind), intent(in) :: & + Tsf, & + Tmlt, & + fcondtop, & + fsurfn + + write(nu_diag,*) "ice_therm_mushy: two stage inconsistency" + write(nu_diag,*) "istep1, my_task, type:", istep1, my_task, type + + if (type == 1) then + + write(nu_diag,*) "First stage : Tsf, Tmlt, dTemp_errmax, Tsf - Tmlt - dTemp_errmax" + write(nu_diag,*) " :", Tsf, Tmlt, dTemp_errmax, Tsf - Tmlt - dTemp_errmax + write(nu_diag,*) "Second stage : fcondtop, fsurfn, ferrmax, fcondtop - fsurfn - ferrmax" + write(nu_diag,*) " :", fcondtop, fsurfn, ferrmax, fcondtop - fsurfn - ferrmax + + else if (type == 2) then + + write(nu_diag,*) "First stage : Tsf, Tmlt, dTemp_errmax, Tsf - Tmlt - dTemp_errmax" + write(nu_diag,*) " :", Tsf, Tmlt, dTemp_errmax, Tsf - Tmlt - dTemp_errmax + write(nu_diag,*) "Second stage : fcondtop, fsurfn, ferrmax, fcondtop - fsurfn - ferrmax" + write(nu_diag,*) " :", fcondtop, fsurfn, ferrmax, fcondtop - fsurfn - ferrmax + + else if (type == 3) then + + write(nu_diag,*) "First stage : Tsf, Tmlt, dTemp_errmax, Tsf - Tmlt - dTemp_errmax" + write(nu_diag,*) " :", Tsf, Tmlt, dTemp_errmax, Tsf - Tmlt - dTemp_errmax + write(nu_diag,*) "Second stage : fcondtop, fsurfn, ferrmax, fcondtop - fsurfn - ferrmax" + write(nu_diag,*) " :", fcondtop, fsurfn, ferrmax, fcondtop - fsurfn - ferrmax + + else if (type == 4) then + + write(nu_diag,*) "First stage : fcondtop, fsurfn, ferrmax, fcondtop - fsurfn - ferrmax" + write(nu_diag,*) " :", fcondtop, fsurfn, ferrmax, fcondtop - fsurfn - ferrmax + write(nu_diag,*) "Second stage : Tsf, Tmlt, dTemp_errmax, Tsf - Tmlt - dTemp_errmax" + write(nu_diag,*) " :", Tsf, Tmlt, dTemp_errmax, Tsf - Tmlt - dTemp_errmax + + endif + + stop + + end subroutine two_stage_inconsistency + +!======================================================================= +! Picard/TDMA based solver +!======================================================================= + + subroutine prep_picard(lsnow, zqsn, & + zqin, zSin, & + hilyr, hslyr, & + km, ks, & + zTin, zTsn, & + Sbr, phi, & + dxp, kcstar, & + einit) + + logical, intent(in) :: & + lsnow ! snow presence: T: has snow, F: no snow + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zqin , & ! ice layer enthalpy (J m-3) + zSin , & ! ice layer bulk salinity (ppt) + km ! ice conductivity (W m-1 K-1) + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + zqsn , & ! snow layer enthalpy (J m-3) + ks ! snow conductivity (W m-1 K-1) + + real(kind=dbl_kind), intent(in) :: & + hilyr , & ! ice layer thickness (m) + hslyr ! snow layer thickness (m) + + real(kind=dbl_kind), dimension(nilyr), intent(out) :: & + zTin , & ! ice layer temperature (C) + Sbr , & ! ice layer brine salinity (ppt) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), dimension(nslyr), intent(out) :: & + zTsn ! snow layer temperature (C) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(out) :: & + dxp , & ! distances between grid points (m) + kcstar ! interface conductivities (W m-1 K-1) + + real(kind=dbl_kind), intent(out) :: & + einit ! initial total energy (J) + + integer(kind=int_kind) :: k + + ! calculate initial ice temperatures + do k = 1, nilyr + zTin(k) = temperature_mush(zqin(k), zSin(k)) + Sbr(k) = liquidus_brine_salinity_mush(zTin(k)) + phi(k) = liquid_fraction(zTin(k), zSin(k)) + enddo ! k + + if (lsnow) then + + do k = 1, nslyr + zTsn(k) = temperature_snow(zqsn(k)) + enddo ! k + + endif ! lsnow + + ! interface distances + call calc_intercell_thickness(lsnow, hilyr, hslyr, dxp) + + ! interface conductivities + call calc_intercell_conductivity(lsnow, km, ks, hilyr, hslyr, kcstar) + + ! total energy content + call total_energy_content(lsnow, & + zqin, zqsn, & + hilyr, hslyr, & + einit) + + end subroutine prep_picard + +!======================================================================= + + subroutine picard_solver(lsnow, lcold, & + Tsf, zqsn, & + zqin, zSin, & + zTin, zTsn, & + phi, dt, & + hilyr, hslyr, & + km, ks, & + Iswabs, Sswabs, & + Tbot, & + fswint, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fcondtop, fcondbot, & + fadvheat, & + flwoutn, fsensn, & + flatn, fsurfn, & + qpond, qocn, & + Spond, sss, & + q, dSdt, & + w, lstop) + + use ice_therm_shared, only: surface_heat_flux, dsurface_heat_flux_dTsf + use ice_communicate, only: my_task + use ice_calendar, only: istep1 + + logical, intent(in) :: & + lsnow , & ! snow presence: T: has snow, F: no snow + lcold ! surface cold: T: surface is cold, F: surface is melting + + real(kind=dbl_kind), intent(inout) :: & + Tsf ! snow surface temperature (C) + + real(kind=dbl_kind), intent(out) :: & + fcondtop , & ! downward cond flux at top surface (W m-2) + fcondbot , & ! downward cond flux at bottom surface (W m-2) + fadvheat ! flow of heat to ocean due to advection (W m-2) + + real(kind=dbl_kind), dimension(nilyr), intent(inout) :: & + zqin , & ! ice layer enthalpy (J m-3) + zSin , & ! ice layer bulk salinity (ppt) + zTin , & ! ice layer temperature (C) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), dimension(nslyr), intent(inout) :: & + zqsn , & ! snow layer enthalpy (J m-3) + zTsn ! snow layer temperature (C) + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + km , & ! ice conductivity (W m-1 K-1) + Iswabs , & ! SW radiation absorbed in ice layers (W m-2) + dSdt ! gravity drainage desalination rate for slow mode (ppt s-1) + + real(kind=dbl_kind), dimension(0:nilyr), intent(in) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + ks , & ! snow conductivity (W m-1 K-1) + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real(kind=dbl_kind), intent(out) :: & + flwoutn , & ! upward LW at surface (W m-2) + fsensn , & ! surface downward sensible heat (W m-2) + flatn , & ! surface downward latent heat (W m-2) + fsurfn ! net flux to top surface, excluding fcondtop + + real(kind=dbl_kind), intent(in) :: & + dt , & ! time step (s) + hilyr , & ! ice layer thickness (m) + hslyr , & ! snow layer thickness (m) + Tbot , & ! ice bottom surfce temperature (deg C) + fswint , & ! SW absorbed in ice interior below surface (W m-2) + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + rhoa , & ! air density (kg/m^3) + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef , & ! transfer coefficient for latent heat + qpond , & ! melt pond brine enthalpy (J m-3) + qocn , & ! ocean brine enthalpy (J m-3) + Spond , & ! melt pond salinity (ppt) + sss , & ! sea surface salinity (ppt) + w ! vertical flushing Darcy velocity (m/s) + + logical(kind=log_kind), intent(inout) :: & + lstop ! solver failure flag + + real(kind=dbl_kind), dimension(nilyr) :: & + Sbr , & ! ice layer brine salinity (ppt) + qbr , & ! ice layer brine enthalpy (J m-3) + zTin0 , & ! ice layer temperature (C) at start of timestep + zqin0 , & ! ice layer enthalpy (J m-3) at start of timestep + zSin0 , & ! ice layer bulk salinity (ppt) at start of timestep + zTin_prev ! ice layer temperature at previous iteration + + real(kind=dbl_kind), dimension(nslyr) :: & + zqsn0 , & ! snow layer enthalpy (J m-3) at start of timestep + zTsn0 , & ! snow layer temperature (C) at start of timestep + zTsn_prev ! snow layer temperature at previous iteration + + real(kind=dbl_kind), dimension(nslyr+nilyr+1) :: & + dxp , & ! distances between grid points (m) + kcstar ! interface conductivities (W m-1 K-1) + + real(kind=dbl_kind) :: & + Tsf0 , & ! snow surface temperature (C) at start of timestep + dfsurfn_dTsf , & ! derivative of net flux to top surface, excluding fcondtopn + dflwoutn_dTsf , & ! derivative of longwave flux wrt surface temperature + dfsensn_dTsf , & ! derivative of sensible heat flux wrt surface temperature + dflatn_dTsf , & ! derivative of latent heat flux wrt surface temperature + Tsf_prev , & ! snow surface temperature at previous iteration + einit , & ! initial total energy (J) + fadvheat_nit ! heat to ocean due to advection (W m-2) during iteration + + logical :: & + lconverged ! has Picard solver converged? + + integer :: & + nit ! Picard iteration count + + integer, parameter :: & + nit_max = 100 ! maximum number of Picard iterations + + lconverged = .false. + + ! prepare quantities for picard iteration + call prep_picard(lsnow, zqsn, & + zqin, zSin, & + hilyr, hslyr, & + km, ks, & + zTin, zTsn, & + Sbr, phi, & + dxp, kcstar, & + einit) + + Tsf0 = Tsf + zqin0 = zqin + zqsn0 = zqsn + zTin0 = zTin + zTsn0 = zTsn + zSin0 = zSin + + ! set prev variables + Tsf_prev = Tsf + zTsn_prev = zTsn + zTin_prev = zTin + + ! picard iteration + picard: do nit = 1, nit_max + + ! surface heat flux + call surface_heat_flux(Tsf, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + flwoutn, fsensn, & + flatn, fsurfn) + + ! derivative of heat flux with respect to surface temperature + call dsurface_heat_flux_dTsf(Tsf, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + dfsurfn_dTsf, dflwoutn_dTsf, & + dfsensn_dTsf, dflatn_dTsf) + + ! tridiagonal solve of new temperatures + call solve_heat_conduction(lsnow, lcold, & + Tsf, Tbot, & + zqin0, zqsn0, & + phi, dt, & + qpond, qocn, & + q, w, & + hilyr, hslyr, & + dxp, kcstar, & + Iswabs, Sswabs, & + fsurfn, dfsurfn_dTsf, & + zTin, zTsn,nit) + + ! update brine enthalpy + call picard_updates_enthalpy(zTin, qbr) + + ! drainage fluxes + call picard_drainage_fluxes(fadvheat_nit, q, & + qbr, qocn, & + hilyr) + + ! flushing fluxes + call picard_flushing_fluxes(fadvheat_nit, w, & + qbr, & + qocn, qpond) + + ! perform convergence check + call check_picard_convergence(lsnow, & + lconverged, nit, & + Tsf, Tsf_prev, & + zTin, zTin_prev,& + zTsn, zTsn_prev,& + phi, Tbot, & + zqin, zqsn, & + km, ks, & + hilyr, hslyr, & + fswint, & + einit, dt, & + fcondtop, fcondbot, & + fadvheat_nit) + + if (lconverged) exit + + Tsf_prev = Tsf + zTsn_prev = zTsn + zTin_prev = zTin + + enddo picard + + fadvheat = fadvheat_nit + + ! update the picard iterants + call picard_updates(zTin, & + Sbr, qbr) + + ! solve for the salinity + call solve_salinity(zSin, Sbr, & + Spond, sss, & + q, dSdt, & + w, hilyr, & + dt) + + ! final surface heat flux + call surface_heat_flux(Tsf, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + flwoutn, fsensn, & + flatn, fsurfn) + + ! if not converged + if (.not. lconverged) then + + call picard_nonconvergence(Tsf0, Tsf, & + zTsn0, zTsn, & + zTin0, zTin, & + zSin0, zSin, & + zqsn0, zqsn, & + zqin0, phi) + lstop = .true. + + endif + + end subroutine picard_solver + +!======================================================================= + + subroutine picard_nonconvergence(Tsf0, Tsf, & + zTsn0, zTsn, & + zTin0, zTin, & + zSin0, zSin, & + zqsn0, zqsn, & + zqin0, phi) + + use ice_calendar, only: istep1 + use ice_fileunits, only: nu_diag + use ice_communicate, only: my_task + + real(kind=dbl_kind), intent(in) :: & + Tsf0 , & ! snow surface temperature (C) at beginning of timestep + Tsf ! snow surface temperature (C) + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + zTsn0 , & ! snow layer temperature (C) at beginning of timestep + zTsn , & ! snow layer temperature (C) + zqsn0 , & + zqsn + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zTin0 , & ! ice layer temperature (C) + zTin , & ! ice layer temperature (C) + zSin0 , & ! ice layer bulk salinity (ppt) + zSin , & ! ice layer bulk salinity (ppt) + phi , & ! ice layer liquid fraction + zqin0 + + integer :: & + k ! vertical layer index + + write(nu_diag,*) "-------------------------------------" + + write(nu_diag,*) "picard convergence failed!" + write(nu_diag,*) 0, Tsf0, Tsf + + do k = 1, nslyr + write(nu_diag,*) k, zTsn0(k), zTsn(k), zqsn0(k) + enddo ! k + + do k = 1, nilyr + write(nu_diag,*) k, zTin0(k), zTin(k), zSin0(k), zSin(k), phi(k), zqin0(k) + enddo ! k + + write(nu_diag,*) "-------------------------------------" + + end subroutine picard_nonconvergence + +!======================================================================= + + subroutine check_picard_convergence(lsnow, & + lconverged, nit, & + Tsf, Tsf_prev, & + zTin, zTin_prev,& + zTsn, zTsn_prev,& + phi, Tbot, & + zqin, zqsn, & + km, ks, & + hilyr, hslyr, & + fswint, & + einit, dt, & + fcondtop, fcondbot, & + fadvheat) + + logical, intent(inout) :: & + lconverged ! has Picard solver converged? + + logical, intent(in) :: & + lsnow ! snow presence: T: has snow, F: no snow + + integer, intent(in) :: & + nit ! Picard iteration count + + real(kind=dbl_kind), intent(in) :: & + dt , & ! time step (s) + Tsf , & ! snow surface temperature (C) + Tsf_prev , & ! snow surface temperature at previous iteration + hilyr , & ! ice layer thickness (m) + hslyr , & ! snow layer thickness (m) + fswint , & ! SW absorbed in ice interior below surface (W m-2) + einit , & ! initial total energy (J) + Tbot , & ! ice bottom surfce temperature (deg C) + fadvheat ! flow of heat to ocean due to advection (W m-2) + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zTin , & ! ice layer temperature (C) + zTin_prev, & ! ice layer temperature at previous iteration + phi , & ! ice layer liquid fraction + km ! ice conductivity (W m-1 K-1) + + real(kind=dbl_kind), dimension(nilyr), intent(out) :: & + zqin ! ice layer enthalpy (J m-3) + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + zTsn , & ! snow layer temperature (C) + zTsn_prev, & ! snow layer temperature at previous iteration + ks ! snow conductivity (W m-1 K-1) + + real(kind=dbl_kind), dimension(nslyr), intent(out) :: & + zqsn ! snow layer enthalpy (J m-3) + + real(kind=dbl_kind), intent(out) :: & + fcondtop , & ! downward cond flux at top surface (W m-2) + fcondbot ! downward cond flux at bottom surface (W m-2) + + real(kind=dbl_kind) :: & + ferr , & ! energy flux error + efinal , & ! initial total energy (J) at iteration + dzTsn , & ! change in snow temperature (C) between iterations + dzTin , & ! change in ice temperature (C) between iterations + dTsf ! change in surface temperature (C) between iterations + + call picard_final(lsnow, & + zqin, zqsn, & + zTin, zTsn, & + phi) + + call total_energy_content(lsnow, & + zqin, zqsn, & + hilyr, hslyr, & + efinal) + + call maximum_variables_changes(lsnow, & + Tsf, Tsf_prev, dTsf, & + zTsn, zTsn_prev, dzTsn, & + zTin, zTin_prev, dzTin) + + + fcondbot = c2 * km(nilyr) * ((zTin(nilyr) - Tbot) / hilyr) + + if (lsnow) then + fcondtop = c2 * ks(1) * ((Tsf - zTsn(1)) / hslyr) + else + fcondtop = c2 * km(1) * ((Tsf - zTin(1)) / hilyr) + endif + + ferr = (efinal - einit) / dt - (fcondtop - fcondbot + fswint - fadvheat) + + lconverged = (dTsf < dTemp_errmax .and. & + dzTsn < dTemp_errmax .and. & + dzTin < dTemp_errmax .and. & + abs(ferr) < 0.9_dbl_kind*ferrmax) + + end subroutine check_picard_convergence + +!======================================================================= + + subroutine picard_drainage_fluxes(fadvheat, q, & + qbr, qocn, & + hilyr) + + real(kind=dbl_kind), intent(out) :: & + fadvheat ! flow of heat to ocean due to advection (W m-2) + + real(kind=dbl_kind), dimension(0:nilyr), intent(in) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + qbr ! ice layer brine enthalpy (J m-3) + + real(kind=dbl_kind), intent(in) :: & + qocn , & ! ocean brine enthalpy (J m-3) + hilyr ! ice layer thickness (m) + + integer :: & + k ! vertical layer index + + fadvheat = c0 + + ! calculate fluxes from base upwards + do k = 1, nilyr-1 + + fadvheat = fadvheat - q(k) * (qbr(k+1) - qbr(k)) + + enddo ! k + + k = nilyr + + fadvheat = fadvheat - q(k) * (qocn - qbr(k)) + + end subroutine picard_drainage_fluxes + +!======================================================================= + + subroutine picard_flushing_fluxes(fadvheat, w, & + qbr, & + qocn, qpond) + + real(kind=dbl_kind), intent(inout) :: & + fadvheat ! flow of heat to ocean due to advection (W m-2) + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + qbr ! ice layer brine enthalpy (J m-3) + + real(kind=dbl_kind), intent(in) :: & + w , & ! vertical flushing Darcy velocity (m/s) + qocn , & ! ocean brine enthalpy (J m-3) + qpond ! melt pond brine enthalpy (J m-3) + + fadvheat = fadvheat + w * (qbr(nilyr) - qpond) + + end subroutine picard_flushing_fluxes + +!======================================================================= + + subroutine maximum_variables_changes(lsnow, & + Tsf, Tsf_prev, dTsf, & + zTsn, zTsn_prev, dzTsn, & + zTin, zTin_prev, dzTin) + + logical, intent(in) :: & + lsnow ! snow presence: T: has snow, F: no snow + + real(kind=dbl_kind), intent(in) :: & + Tsf , & ! snow surface temperature (C) + Tsf_prev ! snow surface temperature at previous iteration + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + zTsn , & ! snow layer temperature (C) + zTsn_prev ! snow layer temperature at previous iteration + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zTin , & ! ice layer temperature (C) + zTin_prev ! ice layer temperature at previous iteration + + real(kind=dbl_kind), intent(out) :: & + dTsf , & ! change in surface temperature (C) between iterations + dzTsn , & ! change in snow temperature (C) between iterations + dzTin ! change in surface temperature (C) between iterations + + dTsf = abs(Tsf - Tsf_prev) + + if (lsnow) then + dzTsn = maxval(abs(zTsn - zTsn_prev)) + else ! lsnow + dzTsn = c0 + endif ! lsnow + + dzTin = maxval(abs(zTin - zTin_prev)) + + end subroutine maximum_variables_changes + +!======================================================================= + + subroutine total_energy_content(lsnow, & + zqin, zqsn, & + hilyr, hslyr, & + energy) + + logical, intent(in) :: & + lsnow ! snow presence: T: has snow, F: no snow + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zqin ! ice layer enthalpy (J m-3) + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + zqsn ! snow layer enthalpy (J m-3) + + real(kind=dbl_kind), intent(in) :: & + hilyr , & ! ice layer thickness (m) + hslyr ! snow layer thickness (m) + + real(kind=dbl_kind), intent(out) :: & + energy ! total energy of ice and snow + + integer :: & + k ! vertical layer index + + energy = c0 + + if (lsnow) then + + do k = 1, nslyr + + energy = energy + hslyr * zqsn(k) + + enddo ! k + + endif ! lsnow + + do k = 1, nilyr + + energy = energy + hilyr * zqin(k) + + enddo ! k + + end subroutine total_energy_content + +!======================================================================= + + subroutine picard_updates(zTin, & + Sbr, qbr) + + ! update brine salinity and liquid fraction based on new temperatures + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zTin ! ice layer temperature (C) + + real(kind=dbl_kind), dimension(nilyr), intent(inout) :: & + Sbr , & ! ice layer brine salinity (ppt) + qbr ! ice layer brine enthalpy (J m-3) + + integer :: & + k ! vertical layer index + + do k = 1, nilyr + + Sbr(k) = liquidus_brine_salinity_mush(zTin(k)) + qbr(k) = enthalpy_brine(zTin(k)) + + enddo ! k + + end subroutine picard_updates + +!======================================================================= + + subroutine picard_updates_enthalpy(zTin, qbr) + + ! update brine salinity and liquid fraction based on new temperatures + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zTin ! ice layer temperature (C) + + real(kind=dbl_kind), dimension(nilyr), intent(inout) :: & + qbr ! ice layer brine enthalpy (J m-3) + + integer :: & + k ! vertical layer index + + do k = 1, nilyr + + qbr(k) = enthalpy_brine(zTin(k)) + + enddo ! k + + end subroutine picard_updates_enthalpy + +!======================================================================= + + subroutine picard_final(lsnow, & + zqin, zqsn, & + zTin, zTsn, & + phi) + + logical, intent(in) :: & + lsnow ! snow presence: T: has snow, F: no snow + + real(kind=dbl_kind), dimension(nilyr), intent(out) :: & + zqin ! ice layer enthalpy (J m-3) + + real(kind=dbl_kind), dimension(nslyr), intent(out) :: & + zqsn ! snow layer enthalpy (J m-3) + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zTin, & ! ice layer temperature (C) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + zTsn ! snow layer temperature (C) + + integer :: & + k ! vertical layer index + + do k = 1, nilyr + zqin(k) = enthalpy_mush_liquid_fraction(zTin(k), phi(k)) + enddo ! k + + if (lsnow) then + + do k = 1, nslyr + zqsn(k) = enthalpy_snow(zTsn(k)) + enddo ! k + + endif ! lsnow + + end subroutine picard_final + +!======================================================================= + + subroutine calc_intercell_thickness(lsnow, hilyr, hslyr, dxp) + + logical, intent(in) :: & + lsnow ! snow presence: T: has snow, F: no snow + + real(kind=dbl_kind), intent(in) :: & + hilyr , & ! ice layer thickness (m) + hslyr ! snow layer thickness (m) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(out) :: & + dxp ! distances between grid points (m) + + integer :: & + l ! vertical index + + if (lsnow) then + + dxp(1) = hslyr / c2 + + do l = 2, nslyr + + dxp(l) = hslyr + + enddo ! l + + dxp(nslyr+1) = (hilyr + hslyr) / c2 + + do l = nslyr+2, nilyr+nslyr + + dxp(l) = hilyr + + enddo ! l + + dxp(nilyr+nslyr+1) = hilyr / c2 + + else ! lsnow + + dxp(1) = hilyr / c2 + + do l = 2, nilyr + + dxp(l) = hilyr + + enddo ! l + + dxp(nilyr+1) = hilyr / c2 + + do l = nilyr+2, nilyr+nslyr+1 + + dxp(l) = c0 + + enddo ! l + + endif ! lsnow + + end subroutine calc_intercell_thickness + +!======================================================================= + + subroutine calc_intercell_conductivity(lsnow, & + km, ks, & + hilyr, hslyr, & + kcstar) + + logical, intent(in) :: & + lsnow ! snow presence: T: has snow, F: no snow + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + km ! ice conductivity (W m-1 K-1) + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + ks ! snow conductivity (W m-1 K-1) + + real(kind=dbl_kind), intent(in) :: & + hilyr , & ! ice layer thickness (m) + hslyr ! snow layer thickness (m) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(out) :: & + kcstar ! interface conductivities (W m-1 K-1) + + real(kind=dbl_kind) :: & + fe ! distance fraction at interface + + integer :: & + k, & ! vertical layer index + l ! vertical index + + if (lsnow) then + + kcstar(1) = ks(1) + + do l = 2, nslyr + + k = l + kcstar(l) = (c2 * ks(k) * ks(k-1)) / (ks(k) + ks(k-1)) + + enddo ! l + + fe = hilyr / (hilyr + hslyr) + kcstar(nslyr+1) = c1 / ((c1 - fe) / ks(nslyr) + fe / km(1)) + + do k = 2, nilyr + + l = k + nslyr + kcstar(l) = (c2 * km(k) * km(k-1)) / (km(k) + km(k-1)) + + enddo ! k + + kcstar(nilyr+nslyr+1) = km(nilyr) + + else ! lsnow + + kcstar(1) = km(1) + + do k = 2, nilyr + + l = k + kcstar(l) = (c2 * km(k) * km(k-1)) / (km(k) + km(k-1)) + + enddo ! k + + kcstar(nilyr+1) = km(nilyr) + + do l = nilyr+2, nilyr+nslyr+1 + + kcstar(l) = c0 + + enddo ! l + + endif ! lsnow + + end subroutine calc_intercell_conductivity + +!======================================================================= + + subroutine solve_heat_conduction(lsnow, lcold, & + Tsf, Tbot, & + zqin0, zqsn0, & + phi, dt, & + qpond, qocn, & + q, w, & + hilyr, hslyr, & + dxp, kcstar, & + Iswabs, Sswabs, & + fsurfn, dfsurfn_dTsf, & + zTin, zTsn,nit) + + logical, intent(in) :: & + lsnow , & ! snow presence: T: has snow, F: no snow + lcold ! surface cold: T: surface is cold, F: surface is melting + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zqin0 , & ! ice layer enthalpy (J m-3) at beggining of timestep + Iswabs , & ! SW radiation absorbed in ice layers (W m-2) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + zqsn0 , & ! snow layer enthalpy (J m-3) at start of timestep + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real(kind=dbl_kind), intent(inout) :: & + Tsf ! snow surface temperature (C) + + real(kind=dbl_kind), intent(in) :: & + dt , & ! timestep (s) + hilyr , & ! ice layer thickness (m) + hslyr , & ! snow layer thickness (m) + Tbot , & ! ice bottom surfce temperature (deg C) + qpond , & ! melt pond brine enthalpy (J m-3) + qocn , & ! ocean brine enthalpy (J m-3) + w , & ! vertical flushing Darcy velocity (m/s) + fsurfn , & ! net flux to top surface, excluding fcondtop + dfsurfn_dTsf ! derivative of net flux to top surface, excluding fcondtopn + + real(kind=dbl_kind), dimension(0:nilyr), intent(in) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(in) :: & + dxp , & ! distances between grid points (m) + kcstar ! interface conductivities (W m-1 K-1) + + real(kind=dbl_kind), dimension(nilyr), intent(inout) :: & + zTin ! ice layer temperature (C) + + real(kind=dbl_kind), dimension(nslyr), intent(out) :: & + zTsn ! snow layer temperature (C) + + integer, intent(in) :: & + nit ! Picard iteration count + + real(kind=dbl_kind), dimension(nilyr+nslyr+1) :: & + Ap , & ! diagonal of tridiagonal matrix + As , & ! lower off-diagonal of tridiagonal matrix + An , & ! upper off-diagonal of tridiagonal matrix + b , & ! right hand side of matrix solve + T ! ice and snow temperatures + + integer :: & + nyn ! matrix size + + ! set up matrix and right hand side - snow + if (lsnow) then + + if (lcold) then + + call matrix_elements_snow_cold(Ap, As, An, b, nyn, & + Tsf, Tbot, & + zqin0, zqsn0, & + qpond, qocn, & + phi, q, & + w, & + hilyr, hslyr, & + dxp, kcstar, & + Iswabs, Sswabs, & + fsurfn, dfsurfn_dTsf, & + dt) + + else ! lcold + + call matrix_elements_snow_melt(Ap, As, An, b, nyn, & + Tsf, Tbot, & + zqin0, zqsn0, & + qpond, qocn, & + phi, q, & + w, & + hilyr, hslyr, & + dxp, kcstar, & + Iswabs, Sswabs, & + fsurfn, dfsurfn_dTsf, & + dt) + + endif ! lcold + + else ! lsnow + + if (lcold) then + + call matrix_elements_nosnow_cold(Ap, As, An, b, nyn, & + Tsf, Tbot, & + zqin0, & + qpond, qocn, & + phi, q, & + w, & + hilyr, & + dxp, kcstar, & + Iswabs, & + fsurfn, dfsurfn_dTsf, & + dt) + + else ! lcold + + call matrix_elements_nosnow_melt(Ap, As, An, b, nyn, & + Tsf, Tbot, & + zqin0, & + qpond, qocn, & + phi, q, & + w, & + hilyr, & + dxp, kcstar, & + Iswabs, & + fsurfn, dfsurfn_dTsf, & + dt) + + endif ! lcold + + endif ! lsnow + + ! tridiag to get new temperatures + call tdma_solve_sparse(An(1:nyn), Ap(1:nyn), As(1:nyn), b(1:nyn), T(1:nyn), nyn) + + call update_temperatures(lsnow, lcold, & + T, Tsf, & + zTin, zTsn) + + end subroutine solve_heat_conduction + +!======================================================================= + + subroutine update_temperatures(lsnow, lcold, & + T, Tsf, & + zTin, zTsn) + + logical, intent(in) :: & + lsnow , & ! snow presence: T: has snow, F: no snow + lcold ! surface cold: T: surface is cold, F: surface is melting + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(in) :: & + T ! matrix solution vector + + real(kind=dbl_kind), intent(inout) :: & + Tsf ! snow surface temperature (C) + + real(kind=dbl_kind), dimension(nilyr), intent(inout) :: & + zTin ! ice layer temperature (C) + + real(kind=dbl_kind), dimension(nslyr), intent(inout) :: & + zTsn ! snow layer temperature (C) + + integer :: & + l , & ! vertical index + k ! vertical layer index + + if (lsnow) then + + if (lcold) then + + Tsf = T(1) + + do k = 1, nslyr + l = k + 1 + zTsn(k) = T(l) + enddo ! k + + do k = 1, nilyr + l = k + nslyr + 1 + zTin(k) = T(l) + enddo ! k + + else ! lcold + + do k = 1, nslyr + l = k + zTsn(k) = T(l) + enddo ! k + + do k = 1, nilyr + l = k + nslyr + zTin(k) = T(l) + enddo ! k + + endif ! lcold + + else ! lsnow + + if (lcold) then + + Tsf = T(1) + + do k = 1, nilyr + l = k + 1 + zTin(k) = T(l) + enddo ! k + + else ! lcold + + do k = 1, nilyr + l = k + zTin(k) = T(l) + enddo ! k + + endif ! lcold + + endif ! lsnow + + end subroutine update_temperatures + +!======================================================================= + + subroutine matrix_elements_nosnow_melt(Ap, As, An, b, nyn, & + Tsf, Tbot, & + zqin0, & + qpond, qocn, & + phi, q, & + w, & + hilyr, & + dxp, kcstar, & + Iswabs, & + fsurfn, dfsurfn_dTsf, & + dt) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(out) :: & + Ap , & ! diagonal of tridiagonal matrix + As , & ! lower off-diagonal of tridiagonal matrix + An , & ! upper off-diagonal of tridiagonal matrix + b ! right hand side of matrix solve + + integer, intent(out) :: & + nyn ! matrix size + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zqin0 , & ! ice layer enthalpy (J m-3) at beggining of timestep + Iswabs , & ! SW radiation absorbed in ice layers (W m-2) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), intent(in) :: & + Tsf , & ! snow surface temperature (C) + dt , & ! timestep (s) + hilyr , & ! ice layer thickness (m) + Tbot , & ! ice bottom surfce temperature (deg C) + qpond , & ! melt pond brine enthalpy (J m-3) + qocn , & ! ocean brine enthalpy (J m-3) + w , & ! downwards vertical flushing Darcy velocity (m/s) + fsurfn , & ! net flux to top surface, excluding fcondtop + dfsurfn_dTsf ! derivative of net flux to top surface, excluding fcondtopn + + real(kind=dbl_kind), dimension(0:nilyr), intent(in) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(in) :: & + dxp , & ! distances between grid points (m) + kcstar ! interface conductivities (W m-1 K-1) + + integer :: & + k , & ! vertical layer index + l ! vertical index + + ! surface layer + k = 1 + l = k + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(k+1) / dxp(k+1) + & + kcstar(k) / dxp(k) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = -kcstar(k+1) / dxp(k+1) - & + q(k) * cp_ocn * rhow + An(l) = c0 + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + & + (kcstar(k) / dxp(k)) * Tsf + & + w * qpond + + ! interior ice layers + do k = 2, nilyr-1 + + l = k + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(k+1) / dxp(k+1) + & + kcstar(k) / dxp(k) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = -kcstar(k+1) / dxp(k+1) - & + q(k) * cp_ocn * rhow + An(l) = -kcstar(k) / dxp(k) - & + w * cp_ocn * rhow + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + + enddo ! k + + ! bottom layer + k = nilyr + l = k + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(k+1) / dxp(k+1) + & + kcstar(k) / dxp(k) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = c0 + An(l) = -kcstar(k) / dxp(k) - & + w * cp_ocn * rhow + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + & + (kcstar(k+1) * Tbot) / dxp(k+1) + & + q(k) * qocn + + nyn = nilyr + + end subroutine matrix_elements_nosnow_melt + +!======================================================================= + + subroutine matrix_elements_nosnow_cold(Ap, As, An, b, nyn, & + Tsf, Tbot, & + zqin0, & + qpond, qocn, & + phi, q, & + w, & + hilyr, & + dxp, kcstar, & + Iswabs, & + fsurfn, dfsurfn_dTsf, & + dt) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(out) :: & + Ap , & ! diagonal of tridiagonal matrix + As , & ! lower off-diagonal of tridiagonal matrix + An , & ! upper off-diagonal of tridiagonal matrix + b ! right hand side of matrix solve + + integer, intent(out) :: & + nyn ! matrix size + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zqin0 , & ! ice layer enthalpy (J m-3) at beggining of timestep + Iswabs , & ! SW radiation absorbed in ice layers (W m-2) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), intent(in) :: & + Tsf , & ! snow surface temperature (C) + dt , & ! timestep (s) + hilyr , & ! ice layer thickness (m) + Tbot , & ! ice bottom surfce temperature (deg C) + qpond , & ! melt pond brine enthalpy (J m-3) + qocn , & ! ocean brine enthalpy (J m-3) + w , & ! downwards vertical flushing Darcy velocity (m/s) + fsurfn , & ! net flux to top surface, excluding fcondtop + dfsurfn_dTsf ! derivative of net flux to top surface, excluding fcondtopn + + real(kind=dbl_kind), dimension(0:nilyr), intent(in) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(in) :: & + dxp , & ! distances between grid points (m) + kcstar ! interface conductivities (W m-1 K-1) + + integer :: & + k , & ! vertical layer index + l ! vertical index + + ! surface temperature + l = 1 + Ap(l) = dfsurfn_dTsf - kcstar(1) / dxp(1) + As(l) = kcstar(1) / dxp(1) + An(l) = c0 + b (l) = dfsurfn_dTsf * Tsf - fsurfn + + ! surface layer + k = 1 + l = k + 1 + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(k+1) / dxp(k+1) + & + kcstar(k) / dxp(k) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = -kcstar(k+1) / dxp(k+1) - & + q(k) * cp_ocn * rhow + An(l) = -kcstar(k) / dxp(k) + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + & + w * qpond + + ! interior ice layers + do k = 2, nilyr-1 + + l = k + 1 + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(k+1) / dxp(k+1) + & + kcstar(k) / dxp(k) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = -kcstar(k+1) / dxp(k+1) - & + q(k) * cp_ocn * rhow + An(l) = -kcstar(k) / dxp(k) - & + w * cp_ocn * rhow + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + + enddo ! k + + ! bottom layer + k = nilyr + l = k + 1 + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(k+1) / dxp(k+1) + & + kcstar(k) / dxp(k) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = c0 + An(l) = -kcstar(k) / dxp(k) - & + w * cp_ocn * rhow + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + & + (kcstar(k+1) * Tbot) / dxp(k+1) + & + q(k) * qocn + + nyn = nilyr + 1 + + end subroutine matrix_elements_nosnow_cold + +!======================================================================= + + subroutine matrix_elements_snow_melt(Ap, As, An, b, nyn, & + Tsf, Tbot, & + zqin0, zqsn0, & + qpond, qocn, & + phi, q, & + w, & + hilyr, hslyr, & + dxp, kcstar, & + Iswabs, Sswabs, & + fsurfn, dfsurfn_dTsf, & + dt) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(out) :: & + Ap , & ! diagonal of tridiagonal matrix + As , & ! lower off-diagonal of tridiagonal matrix + An , & ! upper off-diagonal of tridiagonal matrix + b ! right hand side of matrix solve + + integer, intent(out) :: & + nyn ! matrix size + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zqin0 , & ! ice layer enthalpy (J m-3) at beggining of timestep + Iswabs , & ! SW radiation absorbed in ice layers (W m-2) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + zqsn0 , & ! snow layer enthalpy (J m-3) at start of timestep + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real(kind=dbl_kind), intent(in) :: & + Tsf , & ! snow surface temperature (C) + dt , & ! timestep (s) + hilyr , & ! ice layer thickness (m) + hslyr , & ! snow layer thickness (m) + Tbot , & ! ice bottom surfce temperature (deg C) + qpond , & ! melt pond brine enthalpy (J m-3) + qocn , & ! ocean brine enthalpy (J m-3) + w , & ! downwards vertical flushing Darcy velocity (m/s) + fsurfn , & ! net flux to top surface, excluding fcondtop + dfsurfn_dTsf ! derivative of net flux to top surface, excluding fcondtopn + + real(kind=dbl_kind), dimension(0:nilyr), intent(in) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(in) :: & + dxp , & ! distances between grid points (m) + kcstar ! interface conductivities (W m-1 K-1) + + integer :: & + k , & ! vertical layer index + l ! vertical index + + ! surface layer + k = 1 + l = k + + Ap(l) = ((rhos * cp_ice) / dt) * hslyr + & + kcstar(l+1) / dxp(l+1) + & + kcstar(l) / dxp(l) + As(l) = -kcstar(l+1) / dxp(l+1) + An(l) = c0 + b (l) = ((rhos * Lfresh + zqsn0(k)) / dt) * hslyr + Sswabs(k) + & + (kcstar(l) * Tsf) / dxp(l) + + ! interior snow layers + do k = 2, nslyr + + l = k + + Ap(l) = ((rhos * cp_ice) / dt) * hslyr + & + kcstar(l+1) / dxp(l+1) + & + kcstar(l) / dxp(l) + As(l) = -kcstar(l+1) / dxp(l+1) + An(l) = -kcstar(l) / dxp(l) + b (l) = ((rhos * Lfresh + zqsn0(k)) / dt) * hslyr + Sswabs(k) + + enddo ! k + + ! top ice layer + k = 1 + l = nslyr + k + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(l+1) / dxp(l+1) + & + kcstar(l) / dxp(l) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = -kcstar(l+1) / dxp(l+1) - & + q(k) * cp_ocn * rhow + An(l) = -kcstar(l) / dxp(l) + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + & + w * qpond + + ! interior ice layers + do k = 2, nilyr-1 + + l = nslyr + k + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(l+1) / dxp(l+1) + & + kcstar(l) / dxp(l) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = -kcstar(l+1) / dxp(l+1) - & + q(k) * cp_ocn * rhow + An(l) = -kcstar(l) / dxp(l) - & + w * cp_ocn * rhow + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + + enddo ! k + + ! bottom layer + k = nilyr + l = nilyr + nslyr + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(l+1) / dxp(l+1) + & + kcstar(l) / dxp(l) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = c0 + An(l) = -kcstar(l) / dxp(l) - & + w * cp_ocn * rhow + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + & + (kcstar(l+1) * Tbot) / dxp(l+1) + & + q(k) * qocn + + nyn = nilyr + nslyr + + end subroutine matrix_elements_snow_melt + +!======================================================================= + + subroutine matrix_elements_snow_cold(Ap, As, An, b, nyn, & + Tsf, Tbot, & + zqin0, zqsn0, & + qpond, qocn, & + phi, q, & + w, & + hilyr, hslyr, & + dxp, kcstar, & + Iswabs, Sswabs, & + fsurfn, dfsurfn_dTsf, & + dt) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(out) :: & + Ap , & ! diagonal of tridiagonal matrix + As , & ! lower off-diagonal of tridiagonal matrix + An , & ! upper off-diagonal of tridiagonal matrix + b ! right hand side of matrix solve + + integer, intent(out) :: & + nyn ! matrix size + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zqin0 , & ! ice layer enthalpy (J m-3) at beggining of timestep + Iswabs , & ! SW radiation absorbed in ice layers (W m-2) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), dimension(nslyr), intent(in) :: & + zqsn0 , & ! snow layer enthalpy (J m-3) at start of timestep + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real(kind=dbl_kind), intent(in) :: & + Tsf , & ! snow surface temperature (C) + dt , & ! timestep (s) + hilyr , & ! ice layer thickness (m) + hslyr , & ! snow layer thickness (m) + Tbot , & ! ice bottom surfce temperature (deg C) + qpond , & ! melt pond brine enthalpy (J m-3) + qocn , & ! ocean brine enthalpy (J m-3) + w , & ! downwards vertical flushing Darcy velocity (m/s) + fsurfn , & ! net flux to top surface, excluding fcondtop + dfsurfn_dTsf ! derivative of net flux to top surface, excluding fcondtopn + + real(kind=dbl_kind), dimension(0:nilyr), intent(in) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), dimension(nilyr+nslyr+1), intent(in) :: & + dxp , & ! distances between grid points (m) + kcstar ! interface conductivities (W m-1 K-1) + + integer :: & + k , & ! vertical layer index + l , & ! matrix index + m ! vertical index + + ! surface temperature + l = 1 + Ap(l) = dfsurfn_dTsf - kcstar(1) / dxp(1) + As(l) = kcstar(1) / dxp(1) + An(l) = c0 + b (l) = dfsurfn_dTsf * Tsf - fsurfn + + ! surface layer + k = 1 + l = k + 1 + m = 1 + + Ap(l) = ((rhos * cp_ice) / dt) * hslyr + & + kcstar(m+1) / dxp(m+1) + & + kcstar(m) / dxp(m) + As(l) = -kcstar(m+1) / dxp(m+1) + An(l) = -kcstar(m) / dxp(m) + b (l) = ((rhos * Lfresh + zqsn0(k)) / dt) * hslyr + Sswabs(k) + + ! interior snow layers + do k = 2, nslyr + + l = k + 1 + m = k + + Ap(l) = ((rhos * cp_ice) / dt) * hslyr + & + kcstar(m+1) / dxp(m+1) + & + kcstar(m) / dxp(m) + As(l) = -kcstar(m+1) / dxp(m+1) + An(l) = -kcstar(m) / dxp(m) + b (l) = ((rhos * Lfresh + zqsn0(k)) / dt) * hslyr + Sswabs(k) + + enddo ! k + + ! top ice layer + k = 1 + l = nslyr + k + 1 + m = k + nslyr + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(m+1) / dxp(m+1) + & + kcstar(m) / dxp(m) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = -kcstar(m+1) / dxp(m+1) - & + q(k) * cp_ocn * rhow + An(l) = -kcstar(m) / dxp(m) + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + & + w * qpond + + ! interior ice layers + do k = 2, nilyr-1 + + l = nslyr + k + 1 + m = k + nslyr + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(m+1) / dxp(m+1) + & + kcstar(m) / dxp(m) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = -kcstar(m+1) / dxp(m+1) - & + q(k) * cp_ocn * rhow + An(l) = -kcstar(m) / dxp(m) - & + w * cp_ocn * rhow + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + + enddo ! k + + ! bottom layer + k = nilyr + l = nilyr + nslyr + 1 + m = k + nslyr + + Ap(l) = ((phi(k) * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) / dt) * hilyr + & + kcstar(m+1) / dxp(m+1) + & + kcstar(m) / dxp(m) + & + q(k) * cp_ocn * rhow + & + w * cp_ocn * rhow + As(l) = c0 + An(l) = -kcstar(m) / dxp(m) - & + w * cp_ocn * rhow + b (l) = (((c1 - phi(k)) * rhoi * Lfresh + zqin0(k)) / dt) * hilyr + Iswabs(k) + & + (kcstar(m+1) * Tbot) / dxp(m+1) + & + q(k) * qocn + + nyn = nilyr + nslyr + 1 + + end subroutine matrix_elements_snow_cold + +!======================================================================= + + subroutine solve_salinity(zSin, Sbr, & + Spond, sss, & + q, dSdt, & + w, hilyr, & + dt) + + real(kind=dbl_kind), dimension(nilyr), intent(inout) :: & + zSin ! ice layer bulk salinity (ppt) + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + Sbr , & ! ice layer brine salinity (ppt) + dSdt ! gravity drainage desalination rate for slow mode (ppt s-1) + + real(kind=dbl_kind), dimension(0:nilyr), intent(in) :: & + q ! upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), intent(in) :: & + Spond , & ! melt pond salinity (ppt) + sss , & ! sea surface salinity (ppt) + w , & ! vertical flushing Darcy velocity (m/s) + hilyr , & ! ice layer thickness (m) + dt ! timestep (s) + + integer :: & + k ! vertical layer index + + real(kind=dbl_kind), parameter :: & + S_min = p01 + + real(kind=dbl_kind), dimension(nilyr) :: & + zSin0 + + zSin0 = zSin + + k = 1 + zSin(k) = zSin(k) + max(S_min - zSin(k), & + ((q(k) * (Sbr(k+1) - Sbr(k))) / hilyr + & + dSdt(k) + & + (w * (Spond - Sbr(k))) / hilyr) * dt) + + do k = 2, nilyr-1 + + zSin(k) = zSin(k) + max(S_min - zSin(k), & + ((q(k) * (Sbr(k+1) - Sbr(k))) / hilyr + & + dSdt(k) + & + (w * (Sbr(k-1) - Sbr(k))) / hilyr) * dt) + + enddo ! k + + k = nilyr + zSin(k) = zSin(k) + max(S_min - zSin(k), & + ((q(k) * (sss - Sbr(k))) / hilyr + & + dSdt(k) + & + (w * (Sbr(k-1) - Sbr(k))) / hilyr) * dt) + + + if (minval(zSin) < c0) then + + + write(*,*) (q(k) * (Sbr(k+1) - Sbr(k))) / hilyr, & + dSdt(k) , & + (w * (Spond - Sbr(k))) / hilyr + + do k = 1, nilyr + + write(*,*) k, zSin(k), zSin0(k) + + enddo + + stop + + endif + + end subroutine solve_salinity + +!======================================================================= + + subroutine tdma_solve_sparse(a, b, c, d, x, n) + + ! perform a tri-diagonal solve with TDMA using a sparse tridiagoinal matrix + + integer(kind=int_kind), intent(in) :: & + n ! matrix size + + real(kind=dbl_kind), dimension(1:n), intent(in) :: & + a , & ! matrix lower off-diagonal + b , & ! matrix diagonal + c , & ! matrix upper off-diagonal + d ! right hand side vector + + real(kind=dbl_kind), dimension(1:n), intent(out) :: & + x ! solution vector + + real(kind=dbl_kind), dimension(nilyr+nslyr+1) :: & + cp , & ! modified upper off-diagonal vector + dp ! modified right hand side vector + + integer(kind=int_kind) :: & + i ! vector index + + ! forward sweep + cp(1) = c(1) / b(1) + do i = 2, n-1 + cp(i) = c(i) / (b(i) - cp(i-1)*a(i)) + enddo + + dp(1) = d(1) / b(1) + do i = 2, n + dp(i) = (d(i) - dp(i-1)*a(i)) / (b(i) - cp(i-1)*a(i)) + enddo + + ! back substitution + x(n) = dp(n) + do i = n-1,1,-1 + x(i) = dp(i) - cp(i)*x(i+1) + enddo + + end subroutine tdma_solve_sparse + +!======================================================================= +! Effect of salinity +!======================================================================= + + function permeability(phi) result(perm) + + ! given the liquid fraction calculate the permeability + ! See Golden et al. 2007 + + real(kind=dbl_kind), intent(in) :: & + phi ! liquid fraction + + real(kind=dbl_kind) :: & + perm ! permeability (m2) + + real(kind=dbl_kind), parameter :: & + phic = p05 ! critical liquid fraction for impermeability + + perm = 3.0e-8_dbl_kind * max(phi - phic, c0)**3 + + end function permeability + +!======================================================================= + + subroutine explicit_flow_velocities(zSin, & + zTin, Tsf, & + Tbot, q, & + dSdt, Sbr, & + qbr, dt, & + sss, qocn, & + hilyr, hin) + + ! calculate the rapid gravity drainage mode Darcy velocity and the + ! slow mode drainage rate + + use ice_constants, only: viscosity_dyn + + real(kind=dbl_kind), dimension(1:nilyr), intent(in) :: & + zSin, & ! ice layer bulk salinity (ppt) + zTin ! ice layer temperature (C) + + real(kind=dbl_kind), intent(in) :: & + Tsf , & ! ice/snow surface temperature (C) + Tbot , & ! ice bottom temperature (C) + dt , & ! time step (s) + sss , & ! sea surface salinty (ppt) + qocn , & ! ocean enthalpy (J m-3) + hilyr , & ! ice layer thickness (m) + hin ! ice thickness (m) + + real(kind=dbl_kind), dimension(0:nilyr), intent(out) :: & + q ! rapid mode upward interface vertical Darcy flow (m s-1) + + real(kind=dbl_kind), dimension(1:nilyr), intent(out) :: & + dSdt ! slow mode drainage rate (ppt s-1) + + real(kind=dbl_kind), dimension(1:nilyr+1), intent(out) :: & + Sbr , & ! ice layer brine salinity (ppt) + qbr ! ice layer brine enthalpy (J m-3) + + real(kind=dbl_kind), parameter :: & + kappal = 8.824e-8_dbl_kind, & ! heat diffusivity of liquid + ra_constants = gravit / (viscosity_dyn * kappal), & ! for Rayleigh number + fracmax = p2 , & ! limiting advective layer fraction + zSin_min = p1 , & ! minimum bulk salinity (ppt) + safety_factor = c10 ! to prevent negative salinities + + real(kind=dbl_kind), dimension(1:nilyr) :: & + phi ! ice layer liquid fraction + + real(kind=dbl_kind), dimension(0:nilyr) :: & + rho ! ice layer brine density (kg m-3) + + real(kind=dbl_kind) :: & + rho_ocn , & ! ocean density (kg m-3) + perm_min , & ! minimum permeability from layer to ocean (m2) + perm_harm , & ! harmonic mean of permeability from layer to ocean (m2) + rho_sum , & ! sum of the brine densities from layer to ocean (kg m-3) + rho_pipe , & ! density of the brine in the channel (kg m-3) + z , & ! distance to layer from top surface (m) + perm , & ! ice layer permeability (m2) + drho , & ! brine density difference between layer and ocean (kg m-3) + Ra , & ! local mush Rayleigh number + rn , & ! real value of number of layers considered + L , & ! thickness of the layers considered (m) + dx , & ! horizontal size of convective flow (m) + dx2 , & ! square of the horizontal size of convective flow (m2) + Am , & ! A parameter for mush + Bm , & ! B parameter for mush + Ap , & ! A parameter for channel + Bp , & ! B parameter for channel + qlimit , & ! limit to vertical Darcy flow for numerical stability + dS_guess , & ! expected bulk salinity without limits + alpha ! desalination limiting factor + + integer(kind=int_kind) :: & + k ! ice layer index + + ! initial downward sweep - determine derived physical quantities + do k = 1, nilyr + + Sbr(k) = liquidus_brine_salinity_mush(zTin(k)) + phi(k) = liquid_fraction(zTin(k), zSin(k)) + qbr(k) = enthalpy_brine(zTin(k)) + rho(k) = density_brine(Sbr(k)) + + enddo ! k + + rho(0) = rho(1) + + ! ocean conditions + Sbr(nilyr+1) = sss + qbr(nilyr+1) = qocn + rho_ocn = density_brine(sss) + + ! initialize accumulated quantities + perm_min = bignum + perm_harm = c0 + rho_sum = c0 + + ! limit to q for numerical stability + qlimit = (fracmax * hilyr) / dt + + ! no flow through ice top surface + q(0) = c0 + + ! first iterate over layers going up + do k = nilyr, 1, -1 + + ! vertical position from ice top surface + z = ((real(k, dbl_kind) - p5) / real(nilyr, dbl_kind)) * hin + + ! permeabilities + perm = permeability(phi(k)) + perm_min = min(perm_min,perm) + perm_harm = perm_harm + (c1 / max(perm,1.0e-30_dbl_kind)) + + ! densities + rho_sum = rho_sum + rho(k) + !rho_pipe = rho(k) + rho_pipe = p5 * (rho(k) + rho(k-1)) + drho = max(rho(k) - rho_ocn, c0) + + ! mush Rayleigh number + Ra = drho * (hin-z) * perm_min * ra_constants + + ! height of mush layer to layer k + rn = real(nilyr-k+1,dbl_kind) + L = rn * hilyr + + ! horizontal size of convection + dx = L * c2 * aspect_rapid_mode + dx2 = dx**2 + + ! determine vertical Darcy flow + Am = (dx2 * rn) / (viscosity_dyn * perm_harm) + Bm = (-gravit * rho_sum) / rn + + Ap = (pi * a_rapid_mode**4) / (c8 * viscosity_dyn) + Bp = -rho_pipe * gravit + + q(k) = max((Am / dx2) * ((-Ap*Bp - Am*Bm) / (Am + Ap) + Bm), 1.0e-30_dbl_kind) + + ! modify by Rayleigh number and advection limit + q(k) = min(q(k) * (max(Ra - Rac_rapid_mode, c0) / (Ra+puny)), qlimit) + + ! late stage drainage + dSdt(k) = dSdt_slow_mode * (max((zSin(k) - phi_c_slow_mode*Sbr(k)), c0) & + * max((Tbot - Tsf), c0)) / (hin + 0.001_dbl_kind) + + dSdt(k) = max(dSdt(k), (-zSin(k) * 0.5_dbl_kind) / dt) + + ! restrict flows to prevent too much salt loss + dS_guess = (((q(k) * (Sbr(k+1) - Sbr(k))) / hilyr + dSdt(k)) * dt) * safety_factor + + if (abs(dS_guess) < puny) then + alpha = c1 + else + alpha = (zSin_min - zSin(k)) / dS_guess + endif + + if (alpha < c0 .or. alpha > c1) alpha = c1 + + q(k) = q(k) * alpha + dSdt(k) = dSdt(k) * alpha + + enddo ! k + + end subroutine explicit_flow_velocities + +!======================================================================= +! Flushing +!======================================================================= + + subroutine flushing_velocity(zTin, zSin, & + phi, & + hin, hsn, & + hilyr, & + hpond, apond, & + dt, w) + + ! calculate the vertical flushing Darcy velocity (positive downward) + + use ice_constants, only: viscosity_dyn + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + zTin , & ! ice layer temperature (C) + zSin , & ! ice layer bulk salinity (ppt) + phi ! ice layer liquid fraction + + real(kind=dbl_kind), intent(in) :: & + hilyr , & ! ice layer thickness (m) + hpond , & ! melt pond thickness (m) + apond , & ! melt pond area (-) + hsn , & ! snow thickness (m) + hin , & ! ice thickness (m) + dt ! time step (s) + + real(kind=dbl_kind), intent(out) :: & + w ! vertical flushing Darcy flow rate (m s-1) + + real(kind=dbl_kind), parameter :: & + advection_limit = 0.005_dbl_kind ! limit to fraction of brine in + ! any layer that can be advected + + real(kind=dbl_kind) :: & + perm , & ! ice layer permeability (m2) + ice_mass , & ! mass of ice (kg m-2) + perm_harm , & ! harmonic mean of ice permeability (m2) + hocn , & ! ocean surface height above ice base (m) + hbrine , & ! brine surface height above ice base (m) + w_down_max , & ! maximum downward flushing Darcy flow rate (m s-1) + phi_min , & ! minimum porosity in the mush + wlimit , & ! limit to w to avoid advecting all brine in layer + dhhead ! hydraulic head (m) + + integer(kind=int_kind) :: & + k ! ice layer index + + ! initialize + w = c0 + + ice_mass = c0 + perm_harm = c0 + phi_min = c1 + + do k = 1, nilyr + + ! liquid fraction + !phi = liquid_fraction(zTin(k), zSin(k)) + phi_min = min(phi_min,phi(k)) + + ! permeability + perm = permeability(phi(k)) + + ! ice mass + ice_mass = ice_mass + phi(k) * density_brine(liquidus_brine_salinity_mush(zTin(k))) + & + (c1 - phi(k)) * rhoi + + ! permeability harmonic mean + perm_harm = perm_harm + c1 / (perm + 1e-30_dbl_kind) + + enddo ! k + + ice_mass = ice_mass * hilyr + + perm_harm = real(nilyr,dbl_kind) / perm_harm + + ! calculate ocean surface height above bottom of ice + hocn = (ice_mass + hpond * apond * rhow + hsn * rhos) / rhow + + ! calculate brine height above bottom of ice + hbrine = hin + hpond + + ! pressure head + dhhead = max(hbrine - hocn,c0) + + ! darcy flow through ice + w = (perm_harm * rhow * gravit * (dhhead / hin)) / viscosity_dyn + + ! maximum down flow to drain pond + w_down_max = (hpond * apond) / dt + + ! limit flow + w = min(w,w_down_max) + + ! limit amount of brine that can be advected out of any particular layer + wlimit = (advection_limit * phi_min * hilyr) / dt + + if (abs(w) > puny) then + w = w * max(min(abs(wlimit/w),c1),c0) + else + w = c0 + endif + + w = max(w, c0) + + end subroutine flushing_velocity + +!======================================================================= + + subroutine flush_pond(w, hin, hpond, apond, dt) + + ! given a flushing velocity drain the meltponds + + real(kind=dbl_kind), intent(in) :: & + w , & ! vertical flushing Darcy flow rate (m s-1) + hin , & ! ice thickness (m) + apond , & ! melt pond area (-) + dt ! time step (s) + + real(kind=dbl_kind), intent(inout) :: & + hpond ! melt pond thickness (m) + + real(kind=dbl_kind), parameter :: & + lambda_pond = c1 / (10.0_dbl_kind * 24.0_dbl_kind * 3600.0_dbl_kind), & + hpond0 = 0.01_dbl_kind + + if (apond > c0 .and. hpond > c0) then + + ! flush pond through mush + hpond = hpond - w * dt / apond + + hpond = max(hpond, c0) + + ! exponential decay of pond + hpond = hpond - lambda_pond * dt * (hpond + hpond0) + + hpond = max(hpond, c0) + + endif + + end subroutine flush_pond + + !======================================================================= + + subroutine flood_ice(hsn, hin, & + hslyr, hilyr, & + zqsn, zqin, & + phi, dt, & + zSin, Sbr, & + sss, qocn, & + snoice, fadvheat) + + ! given upwards flushing brine flow calculate amount of snow ice and + ! convert snow to ice with appropriate properties + + real(kind=dbl_kind), intent(in) :: & + dt , & ! time step (s) + hsn , & ! snow thickness (m) + hin , & ! ice thickness (m) + sss , & ! sea surface salinity (ppt) + qocn ! ocean brine enthalpy (J m-2) + + real(kind=dbl_kind), dimension(nslyr), intent(inout) :: & + zqsn ! snow layer enthalpy (J m-2) + + real(kind=dbl_kind), dimension(nilyr), intent(inout) :: & + zqin , & ! ice layer enthalpy (J m-2) + zSin , & ! ice layer bulk salinity (ppt) + phi ! ice liquid fraction + + real(kind=dbl_kind), dimension(nilyr), intent(in) :: & + Sbr ! ice layer brine salinity (ppt) + + real(kind=dbl_kind), intent(inout) :: & + hslyr , & ! snow layer thickness (m) + hilyr ! ice layer thickness (m) + + real(kind=dbl_kind), intent(out) :: & + snoice ! snow ice formation + + real(kind=dbl_kind), intent(inout) :: & + fadvheat ! advection heat flux to ocean + + real(kind=dbl_kind) :: & + hin2 , & ! new ice thickness (m) + hsn2 , & ! new snow thickness (m) + hilyr2 , & ! new ice layer thickness (m) + hslyr2 , & ! new snow layer thickness (m) + dh , & ! thickness of snowice formation (m) + phi_snowice , & ! liquid fraction of new snow ice + rho_snowice , & ! density of snowice (kg m-3) + zSin_snowice , & ! bulk salinity of new snowice (ppt) + zqin_snowice , & ! ice enthalpy of new snowice (J m-2) + zqsn_snowice , & ! snow enthalpy of snow thats becoming snowice (J m-2) + freeboard_density , & ! negative of ice surface freeboard times the ocean density (kg m-2) + ice_mass , & ! mass of the ice (kg m-2) + rho_ocn , & ! density of the ocean (kg m-3) + ice_density , & ! density of ice layer (kg m-3) + hadded , & ! thickness rate of water used from ocean (m/s) + wadded , & ! mass rate of water used from ocean (kg/m^2/s) + eadded , & ! energy rate of water used from ocean (W/m^2) + sadded ! salt rate of water used from ocean (kg/m^2/s) + + integer :: & + k ! vertical index + + snoice = c0 + + ! check we have snow + if (hsn > puny) then + + rho_ocn = density_brine(sss) + + ! ice mass + ice_mass = c0 + do k = 1, nilyr + ice_density = min(phi(k) * density_brine(Sbr(k)) + (c1 - phi(k)) * rhoi,rho_ocn) + ice_mass = ice_mass + ice_density + enddo ! k + ice_mass = ice_mass * hilyr + + ! negative freeboard times ocean density + freeboard_density = max(ice_mass + hsn * rhos - hin * rho_ocn, c0) + + ! check if have flooded ice + if (freeboard_density > c0) then + + ! sea ice fraction of newly formed snow ice + phi_snowice = (c1 - rhos / rhoi) + + ! density of newly formed snowice + rho_snowice = phi_snowice * rho_ocn + (c1 - phi_snowice) * rhoi + + ! calculate thickness of new ice added + dh = freeboard_density / (rho_ocn - rho_snowice + rhos) + dh = max(min(dh,hsn),c0) + + ! enthalpy of snow that becomes snowice + call enthalpy_snow_snowice(dh, hsn, zqsn, zqsn_snowice) + + ! change thicknesses + hin2 = hin + dh + hsn2 = hsn - dh + + hilyr2 = hin2 / real(nilyr,dbl_kind) + hslyr2 = hsn2 / real(nslyr,dbl_kind) + + ! properties of new snow ice + zSin_snowice = phi_snowice * sss + zqin_snowice = phi_snowice * qocn + zqsn_snowice + + ! change snow properties + call update_vertical_tracers_snow(zqsn, hslyr, hslyr2) + + ! change ice properties + call update_vertical_tracers_ice(zqin, hilyr, hilyr2, & + hin, hin2, zqin_snowice) + call update_vertical_tracers_ice(zSin, hilyr, hilyr2, & + hin, hin2, zSin_snowice) + call update_vertical_tracers_ice(phi, hilyr, hilyr2, & + hin, hin2, phi_snowice) + + ! change thicknesses + hilyr = hilyr2 + hslyr = hslyr2 + snoice = dh + + hadded = (dh * phi_snowice) / dt + wadded = hadded * rhoi + eadded = hadded * qocn + sadded = wadded * ice_ref_salinity * p001 + + ! conservation + fadvheat = fadvheat - eadded + + endif + + endif + + end subroutine flood_ice + +!======================================================================= + + subroutine enthalpy_snow_snowice(dh, hsn, zqsn, zqsn_snowice) + + ! determine enthalpy of the snow being converted to snow ice + + real(kind=dbl_kind), intent(in) :: & + dh , & ! thickness of new snowice formation (m) + hsn ! initial snow thickness + + real(kind=dbl_kind), dimension(1:nslyr), intent(in) :: & + zqsn ! snow layer enthalpy (J m-2) + + real(kind=dbl_kind), intent(out) :: & + zqsn_snowice ! enthalpy of snow becoming snowice (J m-2) + + real(kind=dbl_kind) :: & + rnlyr ! real value of number of snow layers turning to snowice + + integer(kind=int_kind) :: & + nlyr , & ! no of snow layers involved in snow ice formation + k ! snow layer index + + zqsn_snowice = c0 + + ! snow depth and snow layers affected by snowice formation + if (hsn > puny) then + rnlyr = (dh / hsn) * nslyr + nlyr = min(floor(rnlyr),nslyr-1) ! nlyr=0 if nslyr=1 + + ! loop over full snow layers affected + ! not executed if nlyr=0 + do k = nslyr, nslyr-nlyr+1, -1 + zqsn_snowice = zqsn_snowice + zqsn(k) / rnlyr + enddo ! k + + ! partially converted snow layer + zqsn_snowice = zqsn_snowice + & + ((rnlyr - real(nlyr,dbl_kind)) / rnlyr) * zqsn(nslyr-nlyr) + endif + + end subroutine enthalpy_snow_snowice + +!======================================================================= + + subroutine update_vertical_tracers_snow(trc, hlyr1, hlyr2) + + ! given some snow ice formation regrid snow layers + + real(kind=dbl_kind), dimension(1:nslyr), intent(inout) :: & + trc ! vertical tracer + + real(kind=dbl_kind), intent(in) :: & + hlyr1 , & ! old cell thickness + hlyr2 ! new cell thickness + + real(kind=dbl_kind), dimension(1:nslyr) :: & + trc2 ! temporary array for updated tracer + + ! vertical indexes for old and new grid + integer(kind=int_kind) :: & + k1 , & ! vertical index for old grid + k2 ! vertical index for new grid + + real(kind=dbl_kind) :: & + z1a , & ! lower boundary of old cell + z1b , & ! upper boundary of old cell + z2a , & ! lower boundary of new cell + z2b , & ! upper boundary of new cell + overlap ! overlap between old and new cell + + ! loop over new grid cells + do k2 = 1, nslyr + + ! initialize new tracer + trc2(k2) = c0 + + ! calculate upper and lower boundary of new cell + z2a = (k2 - 1) * hlyr2 + z2b = k2 * hlyr2 + + ! loop over old grid cells + do k1 = 1, nslyr + + ! calculate upper and lower boundary of old cell + z1a = (k1 - 1) * hlyr1 + z1b = k1 * hlyr1 + + ! calculate overlap between old and new cell + overlap = max(min(z1b, z2b) - max(z1a, z2a), c0) + + ! aggregate old grid cell contribution to new cell + trc2(k2) = trc2(k2) + overlap * trc(k1) + + enddo ! k1 + + ! renormalize new grid cell + trc2(k2) = trc2(k2) / hlyr2 + + enddo ! k2 + + ! update vertical tracer array with the adjusted tracer + trc = trc2 + + end subroutine update_vertical_tracers_snow + +!======================================================================= + + subroutine update_vertical_tracers_ice(trc, hlyr1, hlyr2, & + h1, h2, trc0) + + ! given some snow ice formation regrid ice layers + + real(kind=dbl_kind), dimension(1:nilyr), intent(inout) :: & + trc ! vertical tracer + + real(kind=dbl_kind), intent(in) :: & + hlyr1 , & ! old cell thickness + hlyr2 , & ! new cell thickness + h1 , & ! old total thickness + h2 , & ! new total thickness + trc0 ! tracer value of added snow ice on ice top + + real(kind=dbl_kind), dimension(1:nilyr) :: & + trc2 ! temporary array for updated tracer + + integer(kind=int_kind) :: & + k1 , & ! vertical indexes for old grid + k2 ! vertical indexes for new grid + + real(kind=dbl_kind) :: & + z1a , & ! lower boundary of old cell + z1b , & ! upper boundary of old cell + z2a , & ! lower boundary of new cell + z2b , & ! upper boundary of new cell + overlap ! overlap between old and new cell + + ! loop over new grid cells + do k2 = 1, nilyr + + ! initialize new tracer + trc2(k2) = c0 + + ! calculate upper and lower boundary of new cell + z2a = (k2 - 1) * hlyr2 + z2b = k2 * hlyr2 + + ! calculate upper and lower boundary of added snow ice at top + z1a = c0 + z1b = h2 - h1 + + ! calculate overlap between added ice and new cell + overlap = max(min(z1b, z2b) - max(z1a, z2a), c0) + + ! aggregate added ice contribution to new cell + trc2(k2) = trc2(k2) + overlap * trc0 + + ! loop over old grid cells + do k1 = 1, nilyr + + ! calculate upper and lower boundary of old cell + z1a = (k1 - 1) * hlyr1 + h2 - h1 + z1b = k1 * hlyr1 + h2 - h1 + + ! calculate overlap between old and new cell + overlap = max(min(z1b, z2b) - max(z1a, z2a), c0) + + ! aggregate old grid cell contribution to new cell + trc2(k2) = trc2(k2) + overlap * trc(k1) + + enddo ! k1 + + ! renormalize new grid cell + trc2(k2) = trc2(k2) / hlyr2 + + enddo ! k2 + + ! update vertical tracer array with the adjusted tracer + trc = trc2 + + end subroutine update_vertical_tracers_ice + +!======================================================================= +! Physical Quantities +!======================================================================= + + subroutine conductivity_mush_array(zqin, zSin, km) + + ! detemine the conductivity of the mush from enthalpy and salinity + + real(kind=dbl_kind), dimension(1:nilyr), intent(in) :: & + zqin, & ! ice layer enthalpy (J m-3) + zSin ! ice layer bulk salinity (ppt) + + real(kind=dbl_kind), dimension(1:nilyr), intent(out) :: & + km ! ice layer conductivity (W m-1 K-1) + + integer(kind=int_kind) :: & + k ! ice layer index + + do k = 1, nilyr + + km(k) = & + heat_conductivity(temperature_mush(zqin(k), zSin(k)), zSin(k)) + + enddo ! k + + end subroutine conductivity_mush_array + +!======================================================================= + + function density_brine(Sbr) result(rho) + + ! density of brine from brine salinity + + real(kind=dbl_kind), intent(in) :: & + Sbr ! brine salinity (ppt) + + real(kind=dbl_kind) :: & + rho ! brine density (kg m-3) + + real(kind=dbl_kind), parameter :: & + a = 1000.3_dbl_kind , & ! zeroth empirical coefficient + b = 0.78237_dbl_kind , & ! linear empirical coefficient + c = 2.8008e-4_dbl_kind ! quadratic empirical coefficient + + rho = a + b * Sbr + c * Sbr**2 + + end function density_brine + +!======================================================================= +! Snow +!======================================================================= + + subroutine conductivity_snow_array(ks) + + ! heat conductivity of the snow + + real(kind=dbl_kind), dimension(1:nslyr), intent(out) :: & + ks ! snow layer conductivity (W m-1 K-1) + + ks = ksno + + end subroutine conductivity_snow_array + +!======================================================================= + + function enthalpy_snow(zTsn) result(zqsn) + + ! enthalpy of snow from snow temperature + + real(kind=dbl_kind), intent(in) :: & + zTsn ! snow layer temperature (C) + + real(kind=dbl_kind) :: & + zqsn ! snow layer enthalpy (J m-3) + + zqsn = -rhos * (-cp_ice * zTsn + Lfresh) + + end function enthalpy_snow + +!======================================================================= + + function temperature_snow(zqsn) result(zTsn) + + ! temperature of snow from the snow enthalpy + + real(kind=dbl_kind), intent(in) :: & + zqsn ! snow layer enthalpy (J m-3) + + real(kind=dbl_kind) :: & + zTsn ! snow layer temperature (C) + + real(kind=dbl_kind), parameter :: & + A = c1 / (rhos * cp_ice) , & + B = Lfresh / cp_ice + + zTsn = A * zqsn + B + + end function temperature_snow + +!======================================================================= +! Mushy Layer Formulation - Assur (1958) liquidus +!======================================================================= + + function liquidus_brine_salinity_mush(zTin) result(Sbr) + + ! liquidus relation: equilibrium brine salinity as function of temperature + ! based on empirical data from Assur (1958) + + real(kind=dbl_kind), intent(in) :: & + zTin ! ice layer temperature (C) + + real(kind=dbl_kind) :: & + Sbr ! ice brine salinity (ppt) + + real(kind=dbl_kind) :: & + t_high , & ! mask for high temperature liquidus region + lsubzero ! mask for sub-zero temperatures + + t_high = merge(c1, c0, (zTin > Tb_liq)) + lsubzero = merge(c1, c0, (zTin <= c0)) + + Sbr = ((zTin + J1_liq) / (K1_liq * zTin + L1_liq)) * t_high + & + ((zTin + J2_liq) / (K2_liq * zTin + L2_liq)) * (c1 - t_high) + + Sbr = Sbr * lsubzero + + end function liquidus_brine_salinity_mush + +!======================================================================= + + function liquidus_temperature_mush(Sbr) result(zTin) + + ! liquidus relation: equilibrium temperature as function of brine salinity + ! based on empirical data from Assur (1958) + + real(kind=dbl_kind), intent(in) :: & + Sbr ! ice brine salinity (ppt) + + real(kind=dbl_kind) :: & + zTin ! ice layer temperature (C) + + real(kind=dbl_kind) :: & + t_high ! mask for high temperature liquidus region + + t_high = merge(c1, c0, (Sbr <= Sb_liq)) + + zTin = ((Sbr / (M1_liq + N1_liq * Sbr)) + O1_liq) * t_high + & + ((Sbr / (M2_liq + N2_liq * Sbr)) + O2_liq) * (c1 - t_high) + + end function liquidus_temperature_mush + +!======================================================================= + + function enthalpy_mush(zTin, zSin) result(zqin) + + ! enthalpy of mush from mush temperature and bulk salinity + + real(kind=dbl_kind), intent(in) :: & + zTin, & ! ice layer temperature (C) + zSin ! ice layer bulk salinity (ppt) + + real(kind=dbl_kind) :: & + zqin ! ice layer enthalpy (J m-3) + + real(kind=dbl_kind) :: & + phi ! ice liquid fraction + + phi = liquid_fraction(zTin, zSin) + + zqin = phi * (cp_ocn * rhow - cp_ice * rhoi) * zTin + & + rhoi * cp_ice * zTin - (c1 - phi) * rhoi * Lfresh + + end function enthalpy_mush + +!======================================================================= + + function enthalpy_mush_liquid_fraction(zTin, phi) result(zqin) + + ! enthalpy of mush from mush temperature and bulk salinity + + real(kind=dbl_kind), intent(in) :: & + zTin, & ! ice layer temperature (C) + phi ! liquid fraction + + real(kind=dbl_kind) :: & + zqin ! ice layer enthalpy (J m-3) + + zqin = phi * (cp_ocn * rhow - cp_ice * rhoi) * zTin + & + rhoi * cp_ice * zTin - (c1 - phi) * rhoi * Lfresh + + end function enthalpy_mush_liquid_fraction + +!======================================================================= + + function enthalpy_of_melting(zSin) result(qm) + + ! enthalpy of melting of mush + ! energy needed to fully melt mush (T < 0) + + real(kind=dbl_kind), intent(in) :: & + zSin ! ice layer bulk salinity (ppt) + + real(kind=dbl_kind) :: & + qm ! melting ice enthalpy (J m-3) + + qm = cp_ocn * rhow * liquidus_temperature_mush(zSin) + + end function enthalpy_of_melting + +!======================================================================= + + function enthalpy_brine(zTin) result(qbr) + + ! enthalpy of brine (fully liquid) + + real(kind=dbl_kind), intent(in) :: & + zTin ! ice layer temperature (C) + + real(kind=dbl_kind) :: & + qbr ! brine enthalpy (J m-3) + + qbr = cp_ocn * rhow * zTin + + end function enthalpy_brine + +!======================================================================= + + function temperature_mush(zqin, zSin) result(zTin) + + ! temperature of mush from mush enthalpy + + real(kind=dbl_kind), intent(in) :: & + zqin , & ! ice enthalpy (J m-3) + zSin ! ice layer bulk salinity (ppt) + + real(kind=dbl_kind) :: & + zTin ! ice layer temperature (C) + + real(kind=dbl_kind) :: & + qb , & ! liquidus break enthalpy + q0 , & ! fully melted enthalpy + A , & ! quadratic equation A parameter + B , & ! quadratic equation B parameter + C , & ! quadratic equation C parameter + S_low , & ! mask for salinity less than the liquidus break salinity + t_high , & ! mask for high temperature liquidus region + t_low , & ! mask for low temperature liquidus region + q_melt ! mask for all mush melted + + ! just melted enthalpy + S_low = merge(c1, c0, (zSin < Sb_liq)) + q0 = ((F1_liq * zSin) / (G1_liq + zSin) + H1_liq) * S_low + & + ((F2_liq * zSin) / (G2_liq + zSin) + H2_liq) * (c1 - S_low) + q_melt = merge(c1, c0, (zqin > q0)) + + ! break enthalpy + qb = D_liq * zSin + E_liq + t_high = merge(c1, c0, (zqin > qb)) + t_low = c1 - t_high + + ! quadratic values + A = (AS1_liq * zSin + AC1_liq) * t_high + & + (AS2_liq * zSin + AC2_liq) * t_low + + B = (BS1_liq * zSin + BQ1_liq * zqin + BC1_liq) * t_high + & + (BS2_liq * zSin + BQ2_liq * zqin + BC2_liq) * t_low + + C = (CS1_liq * zSin + CQ1_liq * zqin + CC1_liq) * t_high + & + (CS2_liq * zSin + CQ2_liq * zqin + CC2_liq) * t_low + + zTin = (-B + sqrt(max(B**2 - c4 * A * C,puny))) / (c2 * A) + + ! change T if all melted + zTin = q_melt * zqin * I_liq + (c1 - q_melt) * zTin + + end function temperature_mush + +!======================================================================= + + function temperature_mush_liquid_fraction(zqin, phi) result(zTin) + + ! temperature of mush from mush enthalpy + + real(kind=dbl_kind), intent(in) :: & + zqin , & ! ice enthalpy (J m-3) + phi ! liquid fraction + + real(kind=dbl_kind) :: & + zTin ! ice layer temperature (C) + + zTin = (zqin + (c1 - phi) * rhoi * Lfresh) / & + (phi * (cp_ocn * rhow - cp_ice * rhoi) + rhoi * cp_ice) + + end function temperature_mush_liquid_fraction + +!======================================================================= + + function heat_conductivity(zTin, zSin) result(km) + + ! msuh heat conductivity from mush temperature and bulk salinity + + real(kind=dbl_kind), intent(in) :: & + zTin , & ! ice layer temperature (C) + zSin ! ice layer bulk salinity (ppt) + + real(kind=dbl_kind) :: & + km ! ice layer conductivity (W m-1 K-1) + + real(kind=dbl_kind) :: & + phi ! liquid fraction + + phi = liquid_fraction(zTin, zSin) + + km = phi * (kb - ki) + ki + + end function heat_conductivity + + !======================================================================= + + function liquid_fraction(zTin, zSin) result(phi) + + ! liquid fraction of mush from mush temperature and bulk salinity + + real(kind=dbl_kind), intent(in) :: & + zTin, & ! ice layer temperature (C) + zSin ! ice layer bulk salinity (ppt) + + real(kind=dbl_kind) :: & + phi , & ! liquid fraction + Sbr ! brine salinity (ppt) + + Sbr = max(liquidus_brine_salinity_mush(zTin),puny) + phi = zSin / max(Sbr, zSin) + + end function liquid_fraction + +!======================================================================= + +end module ice_therm_mushy + +!======================================================================= diff --git a/source/ice_therm_shared.F90 b/source/ice_therm_shared.F90 new file mode 100755 index 00000000..4b231874 --- /dev/null +++ b/source/ice_therm_shared.F90 @@ -0,0 +1,282 @@ +! SVN:$Id: ice_therm_shared.F90 710 2013-09-03 22:46:53Z eclare $ +!========================================================================= +! +! Shared thermo variables, subroutines +! +! authors: Elizabeth C. Hunke, LANL +! AEW (Feb 2014): Added extra function calculate_ki_from_Tin, after Alison +! McLaren + + module ice_therm_shared + + use ice_kinds_mod + use ice_domain_size, only: ncat, nilyr, nslyr, max_ntrcr + + implicit none + save + + private + public :: calculate_Tin_from_qin, & + calculate_ki_from_Tin, & + surface_heat_flux, dsurface_heat_flux_dTsf + + integer (kind=int_kind), public :: & + ktherm ! type of thermodynamics + ! 0 = 0-layer approximation + ! 1 = Bitz and Lipscomb 1999 + ! 2 = mushy layer theory + + real (kind=dbl_kind), dimension(nilyr+1), public :: & + Tmlt ! melting temperature + ! nilyr + 1 index is for bottom surface + + real (kind=dbl_kind), parameter, public :: & + ferrmax = 1.0e-3_dbl_kind ! max allowed energy flux error (W m-2) + ! recommend ferrmax < 0.01 W m-2 + + real (kind=dbl_kind), parameter, public :: & + Tmin = -100.0_dbl_kind ! min allowed internal temperature (deg C) + + character (char_len), public :: & + conduct ! 'MU71' or 'bubbly' + + logical (kind=log_kind), public :: & + l_brine ! if true, treat brine pocket effects + + logical (kind=log_kind), public :: & + heat_capacity, &! if true, ice has nonzero heat capacity + ! if false, use zero-layer thermodynamics + calc_Tsfc , &! if true, calculate surface temperature + ! if false, Tsfc is computed elsewhere and + ! atmos-ice fluxes are provided to CICE + cap_fluxes ! AEW: Logical for capping conductive flux + + real (kind=dbl_kind), parameter, public :: & + hfrazilmin = 0.05_dbl_kind ! min thickness of new frazil ice (m) + + real (kind=dbl_kind), parameter, public :: & + betak = 0.13_dbl_kind, & ! constant in formula for k (W m-1 ppt-1) + kimin = 0.10_dbl_kind ! min conductivity of saline ice (W m-1 deg-1) + +!======================================================================= + + contains + +!======================================================================= +! +! Compute the internal ice temperatures from enthalpy using +! quadratic formula + + function calculate_Tin_from_qin (qin, Tmltk) & + result(Tin) + + use ice_constants + + real (kind=dbl_kind), intent(in) :: & + qin , & ! enthalpy + Tmltk ! melting temperature at one level + + real (kind=dbl_kind) :: & + Tin ! internal temperature + + ! local variables + + real (kind=dbl_kind) :: & + aa1,bb1,cc1 ! quadratic solvers + + if (l_brine) then + aa1 = cp_ice + bb1 = (cp_ocn-cp_ice)*Tmltk - qin/rhoi - Lfresh + cc1 = Lfresh * Tmltk + Tin = min((-bb1 - sqrt(bb1*bb1 - c4*aa1*cc1)) / & + (c2*aa1),Tmltk) + + else ! fresh ice + Tin = (Lfresh + qin/rhoi) / cp_ice + endif + + end function calculate_Tin_from_qin + +!======================================================================= +! Surface heat flux +!======================================================================= + +! heat flux into ice + + subroutine surface_heat_flux(Tsf, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + flwoutn, fsensn, & + flatn, fsurfn) + + use ice_constants, only: c1, Tffresh, TTTice, qqqice, & + stefan_boltzmann, emissivity + + ! input surface temperature + real(kind=dbl_kind), intent(in) :: & + Tsf ! ice/snow surface temperature (C) + + ! input variables + real(kind=dbl_kind), intent(in) :: & + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + rhoa , & ! air density (kg/m^3) + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef ! transfer coefficient for latent heat + + ! output + real(kind=dbl_kind), intent(out) :: & + fsensn , & ! surface downward sensible heat (W m-2) + flatn , & ! surface downward latent heat (W m-2) + flwoutn , & ! upward LW at surface (W m-2) + fsurfn ! net flux to top surface, excluding fcondtopn + + ! local variables + real(kind=dbl_kind) :: & + TsfK , & ! ice/snow surface temperature (K) + Qsfc , & ! saturated surface specific humidity (kg/kg) + qsat , & ! the saturation humidity of air (kg/m^3) + flwdabs , & ! downward longwave absorbed heat flx (W/m^2) + tmpvar ! 1/TsfK + + ! ice surface temperature in Kelvin + TsfK = Tsf + Tffresh +! TsfK = max(Tsf + Tffresh, c1) + tmpvar = c1/TsfK + + ! saturation humidity + qsat = qqqice * exp(-TTTice*tmpvar) + Qsfc = qsat / rhoa + + ! longwave radiative flux + flwdabs = emissivity * flw + flwoutn = -emissivity * stefan_boltzmann * TsfK**4 + + ! downward latent and sensible heat fluxes + fsensn = shcoef * (potT - TsfK) + flatn = lhcoef * (Qa - Qsfc) + + ! combine fluxes + fsurfn = fswsfc + flwdabs + flwoutn + fsensn + flatn + + end subroutine surface_heat_flux + +!======================================================================= +!BOP +! +! !ROUTINE: calculate_ki_from_Tin - calculate ice thermal conductivity +! +! !DESCRIPTION: +! +! Compute the ice thermal conductivity +! +! !REVISION HISTORY: +! +! !INTERFACE: +! + function calculate_ki_from_Tin (Tink, salink) & + result(ki) + + use ice_constants +! +! !USES: +! +! !INPUT PARAMETERS: +! + real (kind=dbl_kind), intent(in) :: & + Tink , & ! ice layer temperature + salink ! salinity at one level +! +! !OUTPUT PARAMETERS +! + real (kind=dbl_kind) :: & + ki ! ice conductivity + +! +!EOP +! + if (conduct == 'MU71') then + ! Maykut and Untersteiner 1971 form (with Wettlaufer 1991 constants) + ki = kice + betak*salink/min(-puny,Tink) + else + ! Pringle et al JGR 2007 'bubbly brine' + ki = (2.11_dbl_kind - 0.011_dbl_kind*Tink & + + 0.09_dbl_kind*salink/min(-puny,Tink)) & + * rhoi / 917._dbl_kind + endif + + ki = max (ki, kimin) + + end function calculate_ki_from_Tin + + !======================================================================= + + subroutine dsurface_heat_flux_dTsf(Tsf, fswsfc, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + dfsurfn_dTsf, dflwoutn_dTsf, & + dfsensn_dTsf, dflatn_dTsf) + + use ice_constants, only: c1, c4, Tffresh, TTTice, qqqice, & + stefan_boltzmann, emissivity + + ! input surface temperature + real(kind=dbl_kind), intent(in) :: & + Tsf ! ice/snow surface temperature (C) + + ! input variables + real(kind=dbl_kind), intent(in) :: & + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + rhoa , & ! air density (kg/m^3) + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + shcoef , & ! transfer coefficient for sensible heat + lhcoef ! transfer coefficient for latent heat + + ! output + real(kind=dbl_kind), intent(out) :: & + dfsurfn_dTsf ! derivative of net flux to top surface, excluding fcondtopn + + real(kind=dbl_kind), intent(out) :: & + dflwoutn_dTsf , & ! derivative of longwave flux wrt surface temperature + dfsensn_dTsf , & ! derivative of sensible heat flux wrt surface temperature + dflatn_dTsf ! derivative of latent heat flux wrt surface temperature + + ! local variables + real(kind=dbl_kind) :: & + TsfK , & ! ice/snow surface temperature (K) + dQsfc_dTsf , & ! saturated surface specific humidity (kg/kg) + qsat , & ! the saturation humidity of air (kg/m^3) + tmpvar ! 1/TsfK + + ! ice surface temperature in Kelvin +! TsfK = max(Tsf + Tffresh, c1) + TsfK = Tsf + Tffresh + tmpvar = c1/TsfK + + ! saturation humidity + qsat = qqqice * exp(-TTTice*tmpvar) + dQsfc_dTsf = TTTice * tmpvar * tmpvar * (qsat / rhoa) + + ! longwave radiative flux + dflwoutn_dTsf = -emissivity * stefan_boltzmann * c4*TsfK**3 + + ! downward latent and sensible heat fluxes + dfsensn_dTsf = -shcoef + dflatn_dTsf = -lhcoef * dQsfc_dTsf + + ! combine fluxes + dfsurfn_dTsf = dflwoutn_dTsf + dfsensn_dTsf + dflatn_dTsf + + end subroutine dsurface_heat_flux_dTsf + +!======================================================================= + + end module ice_therm_shared + +!======================================================================= diff --git a/source/ice_therm_vertical.F90 b/source/ice_therm_vertical.F90 new file mode 100755 index 00000000..a356afd3 --- /dev/null +++ b/source/ice_therm_vertical.F90 @@ -0,0 +1,2696 @@ +! SVN:$Id: ice_therm_vertical.F90 825 2014-08-29 15:37:09Z eclare $ +!========================================================================= +! +! Update ice and snow internal temperatures and compute +! thermodynamic growth rates and atmospheric fluxes. +! +! NOTE: The thermodynamic calculation is split in two for load balancing. +! First ice_therm_vertical computes vertical growth rates and coupler +! fluxes. Then ice_therm_itd does thermodynamic calculations not +! needed for coupling. +! +! authors: William H. Lipscomb, LANL +! C. M. Bitz, UW +! Elizabeth C. Hunke, LANL +! +! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb +! 2004: Block structure added by William Lipscomb +! 2006: Streamlined for efficiency by Elizabeth Hunke +! Converted to free source form (F90) + + module ice_therm_vertical + + use ice_kinds_mod + use ice_domain_size, only: ncat, nilyr, nslyr, max_ntrcr, max_blocks + use ice_calendar, only: istep1 + use ice_constants + use ice_fileunits, only: nu_diag + use ice_state, only: tr_iage, tr_pond_topo, nt_apnd, nt_hpnd, tr_pond, & + nt_Tsfc, nt_iage, nt_sice, nt_qice, nt_qsno, & + nt_apnd, nt_hpnd + use ice_therm_shared, only: ktherm, ferrmax, heat_capacity, l_brine, & + calc_Tsfc, calculate_tin_from_qin, Tmin, & + cap_fluxes + use ice_therm_bl99, only: temperature_changes + use ice_therm_0layer, only: zerolayer_temperature + use ice_flux, only: Tf + use ice_zbgc_shared, only: min_salin + + implicit none + save + + private + public :: init_thermo_vertical, frzmlt_bottom_lateral, thermo_vertical + + real (kind=dbl_kind), public :: & + saltmax ! max salinity at ice base for BL99 (ppt) + ! Now set in namelist + + real (kind=dbl_kind), parameter, public :: & + ! phi_init and dSin0_frazil are used for mushy thermo, ktherm=2 + phi_init = 0.75_dbl_kind, & ! initial liquid fraction of frazil + dSin0_frazil = c3 ! bulk salinity reduction of newly formed frazil + + real (kind=dbl_kind), public :: & +!#if defined(AusCOM) || defined(ACCICE) +#ifdef AusCOM + ustar_min , & ! minimum friction velocity for ice-ocean heat flux + chio ! default 0.006 = unitless param for basal heat flx ala McPhee and Maykut +#else + ustar_min ! minimum friction velocity for ice-ocean heat flux +#endif + character (len=char_len), public :: & + fbot_xfer_type ! transfer coefficient type for ice-ocean heat flux + +!======================================================================= + + contains + +!======================================================================= +! +! Driver for updating ice and snow internal temperatures and +! computing thermodynamic growth rates and atmospheric fluxes. +! +! authors: William H. Lipscomb, LANL +! C. M. Bitz, UW + + subroutine thermo_vertical (nx_block, ny_block, & + dt, icells, & + indxi, indxj, & + aicen, trcrn, & + vicen, vsnon, & + flw, potT, & + Qa, rhoa, & + fsnow, fpond, & + fbot, Tbot, & + sss, & + lhcoef, shcoef, & + fswsfc, fswint, & + Sswabs, Iswabs, & + fsurfn, fcondtopn, & + fsensn, flatn, & + flwoutn, evapn, & + freshn, fsaltn, & + fhocnn, meltt, & + melts, meltb, & + congel, snoice, & + mlt_onset, frz_onset, & + yday, l_stop, & + istop, jstop, & + dsnow) + + use ice_communicate, only: my_task + use ice_therm_mushy, only: temperature_changes_salinity +#ifdef CCSMCOUPLED + use ice_prescribed_mod, only: prescribed_ice +#endif + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with ice present + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with ice + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! ice state variables + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_ntrcr), & + intent(inout) :: & + trcrn + + ! input from atmosphere + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + flw , & ! incoming longwave radiation (W/m^2) + potT , & ! air potential temperature (K) + Qa , & ! specific humidity (kg/kg) + rhoa , & ! air density (kg/m^3) + fsnow , & ! snowfall rate (kg m-2 s-1) + shcoef , & ! transfer coefficient for sensible heat + lhcoef ! transfer coefficient for latent heat + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + fswsfc , & ! SW absorbed at ice/snow surface (W m-2) + fswint , & ! SW absorbed in ice interior, below surface (W m-2) + fpond ! fresh water flux to ponds (kg/m^2/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nslyr), & + intent(inout) :: & + Sswabs ! SW radiation absorbed in snow layers (W m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), & + intent(inout) :: & + Iswabs ! SW radiation absorbed in ice layers (W m-2) + + ! input from ocean + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + fbot , & ! ice-ocean heat flux at bottom surface (W/m^2) + Tbot , & ! ice bottom surface temperature (deg C) + sss ! ocean salinity + + ! coupler fluxes to atmosphere + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + flwoutn , & ! outgoing longwave radiation (W/m^2) + evapn ! evaporative water flux (kg/m^2/s) + + ! Note: these are intent out if calc_Tsfc = T, otherwise intent in + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout):: & + fsensn , & ! sensible heat flux (W/m^2) + flatn , & ! latent heat flux (W/m^2) + fsurfn , & ! net flux to top surface, excluding fcondtopn + fcondtopn ! downward cond flux at top surface (W m-2) + + ! coupler fluxes to ocean + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + freshn , & ! fresh water flux to ocean (kg/m^2/s) + fsaltn , & ! salt flux to ocean (kg/m^2/s) + fhocnn ! net heat flux to ocean (W/m^2) + + ! diagnostic fields + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout):: & + meltt , & ! top ice melt (m/step-->cm/day) + melts , & ! snow melt (m/step-->cm/day) + meltb , & ! basal ice melt (m/step-->cm/day) + congel , & ! basal ice growth (m/step-->cm/day) + snoice , & ! snow-ice formation (m/step-->cm/day) + dsnow , & ! change in snow thickness (m/step-->cm/day) + mlt_onset, & ! day of year that sfc melting begins + frz_onset ! day of year that freezing begins (congel or frazil) + + real (kind=dbl_kind), intent(in) :: & + yday ! day of year + + logical (kind=log_kind), intent(out) :: & + l_stop ! if true, print diagnostics and abort on return + + integer (kind=int_kind), intent(out) :: & + istop, jstop ! indices of grid cell where code aborts + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij , & ! horizontal index, combines i and j loops + k ! ice layer index + + real (kind=dbl_kind) :: & + dhi , & ! change in ice thickness + dhs ! change in snow thickness + +! 2D state variables (thickness, temperature, enthalpy) + + real (kind=dbl_kind), dimension (icells) :: & + hilyr , & ! ice layer thickness + hslyr , & ! snow layer thickness + Tsf , & ! ice/snow top surface temp, same as Tsfcn (deg C) + hin , & ! ice thickness (m) + hsn , & ! snow thickness (m) + hsn_new , & ! thickness of new snow (m) + worki , & ! local work array + works ! local work array + + real (kind=dbl_kind), dimension (icells,nilyr) :: & + zqin , & ! ice layer enthalpy, zqin < 0 (J m-3) + zTin , & ! internal ice layer temperatures + zSin ! internal ice layer salinities + + real (kind=dbl_kind), dimension (icells,nslyr) :: & + zqsn , & ! snow layer enthalpy, zqsn < 0 (J m-3) + zTsn ! internal snow layer temperatures + +! other 2D flux and energy variables + + real (kind=dbl_kind), dimension (icells) :: & + fcondbot , & ! downward cond flux at bottom surface (W m-2) + einit , & ! initial energy of melting (J m-2) + efinal , & ! final energy of melting (J m-2) + einter ! intermediate energy + +! echmod: reduce size to icells? + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + iage ! ice age (s) + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + fadvocn ! advective heat flux to ocean + + real (kind=dbl_kind), dimension (icells) :: & + enum ! energy not used by the temperature solver (due to + ! limiting) that should be returned to the ocean. + + real (kind=dbl_kind) :: & + fcondtopn_extra(nx_block,ny_block), & + fcondtopn_solve(nx_block,ny_block) + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + l_stop = .false. + istop = 0 + jstop = 0 + + do j=1, ny_block + do i=1, nx_block + + fcondtopn_solve(i,j) = c0 + fcondtopn_extra(i,j) = c0 + + flwoutn(i,j) = c0 + evapn (i,j) = c0 + + freshn (i,j) = c0 + fsaltn (i,j) = c0 + fhocnn (i,j) = c0 + fadvocn(i,j) = c0 + + meltt (i,j) = c0 + meltb (i,j) = c0 + melts (i,j) = c0 + congel (i,j) = c0 + snoice (i,j) = c0 + dsnow (i,j) = c0 + if (tr_iage) iage(i,j) = trcrn(i,j,nt_iage) + enddo + enddo + + if (calc_Tsfc) then + do j=1, ny_block + do i=1, nx_block + fsensn (i,j) = c0 + flatn (i,j) = c0 + fsurfn (i,j) = c0 + fcondtopn(i,j) = c0 + enddo + enddo + endif + + !----------------------------------------------------------------- + ! Compute variables needed for vertical thermo calculation + !----------------------------------------------------------------- + + call init_vertical_profile (nx_block, ny_block, & + my_task, istep1, & + icells, & + indxi, indxj, & + aicen(:,:), & + vicen(:,:), vsnon(:,:), & + trcrn(:,:,:), & + hin, hilyr, & + hsn, hslyr, & + zqin, zTin, & + zqsn, zTsn, & + zSin, & + Tsf, einit, & + Tbot, l_stop, & + istop, jstop) + if (l_stop) return + + do ij = 1, icells + ! Save initial ice and snow thickness (for fresh and fsalt) + worki(ij) = hin(ij) + works(ij) = hsn(ij) + enddo + + !----------------------------------------------------------------- + ! Compute new surface temperature and internal ice and snow + ! temperatures. + !----------------------------------------------------------------- + + if (heat_capacity) then ! usual case + + if (ktherm == 2) then + + call temperature_changes_salinity(nx_block, ny_block, & + my_task, istep1, & + dt, icells, & + indxi, indxj, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fswsfc, fswint, & + Sswabs, Iswabs, & + hilyr, hslyr, & + zqin, zTin, & + zqsn, zTsn, & + zSin, & + trcrn, & + Tsf, Tbot, & + sss, & + fsensn, flatn, & + flwoutn, fsurfn, & + fcondtopn, fcondbot, & + fadvocn, snoice, & + einit, l_stop, & + istop, jstop) + + else ! ktherm + + !------------------ Flux capping code ------------------------------- + ! To be used with the UM-style coupling formulation (calc_Tsfc=.false.), + ! in which high fluxes can occasionally cause the thermo solver to crash. + ! Reduce fluxes either when ice is too thin, or when ice is getting too + ! cold. + + if (cap_fluxes) then + call cap_conductive_flux(nx_block,ny_block,my_task,icells,indxi,indxj,& + fcondtopn,fcondtopn_solve,fcondtopn_extra,hin,zTsn,zTin,hslyr) + + else + do i = 1,nx_block + do j = 1,ny_block + fcondtopn_solve(i,j) = fcondtopn(i,j) + fcondtopn_extra(i,j) = c0 + enddo + enddo + endif + + + !------------------ End of new code------------------------------- + + call temperature_changes(nx_block, ny_block, & + my_task, istep1, & + dt, icells, & + indxi, indxj, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fswsfc, fswint, & + Sswabs, Iswabs, & + hilyr, hslyr, & + zqin, zTin, & + zqsn, zTsn, & + zSin, & + Tsf, Tbot, & + fsensn, flatn, & + flwoutn, fsurfn, & + fcondtopn_solve,fcondbot, & + einit, l_stop, & + istop, jstop, & + enum) + + if (calc_Tsfc) then + ! Need to read fcondtopn_solve BACK INTO fcondtopn + ! during forced runs or we'll get nonsensical top melt... + do i = 1,nx_block + do j = 1,ny_block + fcondtopn(i,j) = fcondtopn_solve(i,j) + end do + end do + end if + + endif ! ktherm + + else + + if (calc_Tsfc) then + + call zerolayer_temperature(nx_block, ny_block, & + my_task, istep1, & + dt, icells, & + indxi, indxj, & + rhoa, flw, & + potT, Qa, & + shcoef, lhcoef, & + fswsfc, & + hilyr, hslyr, & + Tsf, Tbot, & + fsensn, flatn, & + flwoutn, fsurfn, & + fcondtopn, fcondbot, & + l_stop, & + istop, jstop) + + else + + !------------------------------------------------------------ + ! Set fcondbot = fcondtop for zero layer thermodynamics + ! fcondtop is set in call to set_sfcflux in step_therm1 + !------------------------------------------------------------ + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + fcondbot(ij) = fcondtopn(i,j) ! zero layer + enddo + + endif ! calc_Tsfc + + endif ! heat_capacity + + ! intermediate energy for error check + do ij = 1, icells + einter(ij) = c0 + do k = 1, nslyr + einter(ij) = einter(ij) + hslyr(ij) * zqsn(ij,k) + enddo ! k + do k = 1, nilyr + einter(ij) = einter(ij) + hilyr(ij) * zqin(ij,k) + enddo ! k + enddo ! ij + + if (l_stop) return + + !----------------------------------------------------------------- + ! Compute growth and/or melting at the top and bottom surfaces. + ! Add new snowfall. + ! Repartition ice into equal-thickness layers, conserving energy. + !----------------------------------------------------------------- + + call thickness_changes(nx_block, ny_block, & + dt, & + yday, icells, & + indxi, indxj, & + efinal, & + hin, hilyr, & + hsn, hslyr, & + zqin, zqsn, & + fbot, Tbot, & + flatn, fsurfn, & + fcondtopn, fcondbot, & + fsnow, hsn_new, & + fhocnn, evapn, & + meltt, melts, & + meltb, iage, & + congel, snoice, & + mlt_onset, frz_onset,& + zSin, sss, & + dsnow, enum, & + fcondtopn_extra) + + !----------------------------------------------------------------- + ! Check for energy conservation by comparing the change in energy + ! to the net energy input + !----------------------------------------------------------------- + + call conservation_check_vthermo(nx_block, ny_block, & + my_task, istep1, & + dt, icells, & + indxi, indxj, & + fsurfn, flatn, & + fhocnn, fswint, & + fsnow, einit, & + einter, efinal, & + fcondtopn,fcondbot, & + fadvocn, & + fbot, l_stop, & + istop, jstop, & + fcondtopn_solve,fcondtopn_extra, & + enum) + + if (l_stop) return + + !----------------------------------------------------------------- + ! If prescribed ice, set hi back to old values + !----------------------------------------------------------------- + +#ifdef CCSMCOUPLED + if (prescribed_ice) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + hin(ij) = worki(ij) + fhocnn(i,j) = c0 ! for diagnostics + enddo ! ij + endif +#endif + + !----------------------------------------------------------------- + ! Compute fluxes of water and salt from ice to ocean. + ! evapn < 0 => sublimation, evapn > 0 => condensation + ! aerosol flux is accounted for in ice_aerosol.F90 + !----------------------------------------------------------------- + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + dhi = hin(ij) - worki(ij) + dhs = hsn(ij) - works(ij) - hsn_new(ij) + + freshn(i,j) = freshn(i,j) + & + evapn(i,j) - & + (rhoi*dhi + rhos*dhs) / dt + fsaltn(i,j) = fsaltn(i,j) - & + rhoi*dhi*ice_ref_salinity*p001/dt + + fhocnn(i,j) = fhocnn(i,j) + fadvocn(i,j) ! for ktherm=2 + + if (hin(ij) == c0) then + if (tr_pond_topo) & + fpond(i,j) = fpond(i,j) - aicen(i,j) & + * trcrn(i,j,nt_apnd) * trcrn(i,j,nt_hpnd) + endif + + enddo ! ij + + !----------------------------------------------------------------- + ! Given the vertical thermo state variables, compute the new ice + ! state variables. + !----------------------------------------------------------------- + + call update_state_vthermo(nx_block, ny_block, & + icells, & + indxi, indxj, & + Tbot, Tsf, & + hin, hsn, & + zqin, zSin, & + zqsn, & + aicen(:,:), & + vicen(:,:), vsnon(:,:), & + trcrn(:,:,:)) + + !----------------------------------------------------------------- + ! Reload passive tracer array + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + if (tr_iage) trcrn(i,j,nt_iage) = iage(i,j) + enddo + enddo + + end subroutine thermo_vertical + +!======================================================================= +! +! Initialize the vertical profile of ice salinity and melting temperature. +! +! authors: C. M. Bitz, UW +! William H. Lipscomb, LANL + + subroutine init_thermo_vertical + + use ice_blocks, only: nx_block, ny_block + use ice_flux, only: salinz, Tmltz, sss + + integer (kind=int_kind) :: & + i, j, iblk ! horizontal indices + + real (kind=dbl_kind), parameter :: & + nsal = 0.407_dbl_kind, & + msal = 0.573_dbl_kind + + integer (kind=int_kind) :: k ! ice layer index + real (kind=dbl_kind) :: zn ! normalized ice thickness + + !----------------------------------------------------------------- + ! Determine l_brine based on saltmax. + ! Thermodynamic solver will not converge if l_brine is true and + ! saltmax is close to zero. + ! Set l_brine to false for zero layer thermodynamics + !----------------------------------------------------------------- + + heat_capacity = .true. + if (ktherm == 0) heat_capacity = .false. ! 0-layer thermodynamics + + l_brine = .false. + if (saltmax > min_salin .and. heat_capacity) l_brine = .true. + + !----------------------------------------------------------------- + ! Prescibe vertical profile of salinity and melting temperature. + ! Note this profile is only used for BL99 thermodynamics. + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,k,zn) + do iblk = 1,max_blocks + do j = 1, ny_block + do i = 1, nx_block + if (l_brine) then + do k = 1, nilyr + zn = (real(k,kind=dbl_kind)-p5) / & + real(nilyr,kind=dbl_kind) + salinz(i,j,k,iblk)=(saltmax/c2)*(c1-cos(pi*zn**(nsal/(msal+zn)))) + salinz(i,j,k,iblk) = max(salinz(i,j,k,iblk), min_salin) + Tmltz (i,j,k,iblk) = -salinz(i,j,k,iblk)*depressT + enddo ! k + salinz(i,j,nilyr+1,iblk) = saltmax + Tmltz(i,j,nilyr+1,iblk) = -salinz(i,j,nilyr+1,iblk)*depressT + + else ! .not. l_brine + do k = 1, nilyr+1 + salinz(i,j,k,iblk) = c0 + Tmltz(i,j,k,iblk) = c0 + enddo + endif ! l_brine + + enddo !i + enddo !j + enddo !iblk + !$OMP END PARALLEL DO + + end subroutine init_thermo_vertical + +!======================================================================= +! +! Compute heat flux to bottom surface. +! Compute fraction of ice that melts laterally. +! +! authors C. M. Bitz, UW +! William H. Lipscomb, LANL +! Elizabeth C. Hunke, LANL + + subroutine frzmlt_bottom_lateral (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + ntrcr, dt, & + aice, frzmlt, & + vicen, vsnon, & + trcrn, & + sst, Tf, & + strocnxT, strocnyT, & + Tbot, fbot, & + rside, Cdn_ocn) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + ntrcr ! number of tracers + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + aice , & ! ice concentration + frzmlt , & ! freezing/melting potential (W/m^2) + sst , & ! sea surface temperature (C) + Tf , & ! freezing temperature (C) + Cdn_ocn , & ! ocean-ice neutral drag coefficient + strocnxT, & ! ice-ocean stress, x-direction + strocnyT ! ice-ocean stress, y-direction + + real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & + intent(in) :: & + vicen , & ! ice volume (m) + vsnon ! snow volume (m) + + real (kind=dbl_kind), dimension(nx_block,ny_block,ntrcr,ncat), & + intent(in) :: & + trcrn ! tracer array + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(out) :: & + Tbot , & ! ice bottom surface temperature (deg C) + fbot , & ! heat flux to ice bottom (W/m^2) + rside ! fraction of ice that melts laterally + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + n , & ! thickness category index + k , & ! layer index + ij , & ! horizontal index, combines i and j loops + imelt ! number of cells with ice melting + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, indxj ! compressed indices for cells with ice melting + + real (kind=dbl_kind), dimension (:), allocatable :: & + etot , & ! total energy in column + fside ! lateral heat flux (W/m^2) + + real (kind=dbl_kind) :: & + deltaT , & ! SST - Tbot >= 0 + ustar , & ! skin friction velocity for fbot (m/s) + wlat , & ! lateral melt rate (m/s) + xtmp ! temporary variable + + ! Parameters for bottom melting + + real (kind=dbl_kind) :: & + cpchr ! -cp_ocn*rhow*exchange coefficient + + ! Parameters for lateral melting + + real (kind=dbl_kind), parameter :: & + floediam = 300.0_dbl_kind, & ! effective floe diameter (m) + floeshape = 0.66_dbl_kind , & ! constant from Steele (unitless) + m1 = 1.6e-6_dbl_kind , & ! constant from Maykut & Perovich + ! (m/s/deg^(-m2)) + m2 = 1.36_dbl_kind ! constant from Maykut & Perovich + ! (unitless) +!#if defined(AusCOM) || defined(ACCICE) +#ifdef AusCOM + cpchr = -cp_ocn*rhow*chio +#endif + + do j = 1, ny_block + do i = 1, nx_block + rside(i,j) = c0 + Tbot (i,j) = Tf(i,j) + fbot (i,j) = c0 + enddo + enddo + + !----------------------------------------------------------------- + ! Identify grid cells where ice can melt. + !----------------------------------------------------------------- + + imelt = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j) > puny .and. frzmlt(i,j) < c0) then ! ice can melt + imelt = imelt + 1 + indxi(imelt) = i + indxj(imelt) = j + endif + enddo ! i + enddo ! j + + allocate(etot (imelt)) + allocate(fside(imelt)) + + do ij = 1, imelt ! cells where ice can melt + i = indxi(ij) + j = indxj(ij) + + fside(ij) = c0 + + !----------------------------------------------------------------- + ! Use boundary layer theory for fbot. + ! See Maykut and McPhee (1995): JGR, 100, 24,691-24,703. + !----------------------------------------------------------------- + + deltaT = max((sst(i,j)-Tbot(i,j)),c0) + + ! strocnx has units N/m^2 so strocnx/rho has units m^2/s^2 + ustar = sqrt (sqrt(strocnxT(i,j)**2+strocnyT(i,j)**2)/rhow) + ustar = max (ustar,ustar_min) + + if (trim(fbot_xfer_type) == 'Cdn_ocn') then + ! Note: Cdn_ocn has already been used for calculating ustar + ! (formdrag only) --- David Schroeder (CPOM) + cpchr = -cp_ocn*rhow*Cdn_ocn(i,j) + else ! fbot_xfer_type == 'constant' + ! 0.006 = unitless param for basal heat flx ala McPhee and Maykut + cpchr = -cp_ocn*rhow*0.006_dbl_kind + endif + + fbot(i,j) = cpchr * deltaT * ustar ! < 0 + + fbot(i,j) = max (fbot(i,j), frzmlt(i,j)) ! frzmlt < fbot < 0 + +!!! uncomment to use all frzmlt for standalone runs + ! fbot(i,j) = min (c0, frzmlt(i,j)) + + !----------------------------------------------------------------- + ! Compute rside. See these references: + ! Maykut and Perovich (1987): JGR, 92, 7032-7044 + ! Steele (1992): JGR, 97, 17,729-17,738 + !----------------------------------------------------------------- + + wlat = m1 * deltaT**m2 ! Maykut & Perovich + rside(i,j) = wlat*dt*pi/(floeshape*floediam) ! Steele + rside(i,j) = max(c0,min(rside(i,j),c1)) + + enddo ! ij + + !----------------------------------------------------------------- + ! Compute heat flux associated with this value of rside. + !----------------------------------------------------------------- + + do n = 1, ncat + + do ij = 1, imelt + etot(ij) = c0 + enddo + + ! melting energy/unit area in each column, etot < 0 + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, imelt + i = indxi(ij) + j = indxj(ij) + etot(ij) = etot(ij) + trcrn(i,j,nt_qsno+k-1,n) & + * vsnon(i,j,n)/real(nslyr,kind=dbl_kind) + enddo ! ij + enddo + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, imelt + i = indxi(ij) + j = indxj(ij) + etot(ij) = etot(ij) + trcrn(i,j,nt_qice+k-1,n) & + * vicen(i,j,n)/real(nilyr,kind=dbl_kind) + enddo ! ij + enddo ! nilyr + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, imelt + i = indxi(ij) + j = indxj(ij) + ! lateral heat flux + fside(ij) = fside(ij) + rside(i,j)*etot(ij)/dt ! fside < 0 + enddo ! ij + + enddo ! n + + !----------------------------------------------------------------- + ! Limit bottom and lateral heat fluxes if necessary. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, imelt + i = indxi(ij) + j = indxj(ij) + + xtmp = frzmlt(i,j)/(fbot(i,j) + fside(ij) + puny) + xtmp = min(xtmp, c1) + fbot (i,j) = fbot (i,j) * xtmp + rside(i,j) = rside(i,j) * xtmp + enddo ! ij + + deallocate(etot) + deallocate(fside) + + end subroutine frzmlt_bottom_lateral + +!======================================================================= +! +! Given the state variables (vicen, vsnon, trcrn) +! compute variables needed for the vertical thermodynamics +! (hin, hsn, zqin, zqsn, zTin, zTsn, Tsf). +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW + + subroutine init_vertical_profile(nx_block, ny_block, & + my_task, istep1, & + icells, & + indxi, indxj, & + aicen, vicen, & + vsnon, trcrn, & + hin, hilyr, & + hsn, hslyr, & + zqin, zTin, & + zqsn, zTsn, & + zSin, & + Tsf, einit, & + Tbot, l_stop, & + istop, jstop) + + use ice_itd, only: hs_min + use ice_therm_mushy, only: temperature_mush, & + liquidus_temperature_mush, & + enthalpy_of_melting + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + my_task , & ! task number (diagnostic only) + istep1 , & ! time step index (diagnostic only) + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_ntrcr), & + intent(in) :: & + trcrn ! tracer array + + real (kind=dbl_kind), dimension(icells), intent(out):: & + hilyr , & ! ice layer thickness + hslyr , & ! snow layer thickness + Tsf , & ! ice/snow surface temperature, Tsfcn + einit ! initial energy of melting (J m-2) + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in):: & + Tbot ! bottom ice temp (C) + + real (kind=dbl_kind), dimension(icells), intent(out):: & + hin , & ! ice thickness (m) + hsn ! snow thickness (m) + + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(out) :: & + zqin , & ! ice layer enthalpy (J m-3) + zTin , & ! internal ice layer temperatures + zSin ! internal ice layer salinities + + real (kind=dbl_kind), dimension (icells,nslyr), & + intent(out) :: & + zqsn , & ! snow enthalpy + zTsn ! snow temperature + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, print diagnostics and abort model + + integer (kind=int_kind), intent(inout) :: & + istop, jstop ! i and j indices of cell where model fails + + ! local variables + real (kind=dbl_kind), dimension(icells,nilyr) :: & + Tmlts ! melting temperature + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij , & ! horizontal index, combines i and j loops + k ! ice layer index + + real (kind=dbl_kind) :: & + rnslyr, & ! real(nslyr) + Tmax ! maximum allowed snow/ice temperature (deg C) + + logical (kind=log_kind) :: & ! for vector-friendly error checks + tsno_high , & ! flag for zTsn > Tmax + tice_high , & ! flag for zTin > Tmlt + tsno_low , & ! flag for zTsn < Tmin + tice_low ! flag for zTin < Tmin + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + rnslyr = real(nslyr,kind=dbl_kind) + + do ij = 1, icells + einit(ij) = c0 + enddo + + tsno_high = .false. + tice_high = .false. + tsno_low = .false. + tice_low = .false. + + !----------------------------------------------------------------- + ! Load arrays for vertical thermo calculation. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !----------------------------------------------------------------- + ! Surface temperature, ice and snow thickness + ! Initialize internal energy + !----------------------------------------------------------------- + + Tsf(ij) = trcrn(i,j,nt_Tsfc) + hin(ij) = vicen(i,j) / aicen(i,j) + hsn(ij) = vsnon(i,j) / aicen(i,j) + hilyr(ij) = hin(ij) / real(nilyr,kind=dbl_kind) + hslyr(ij) = hsn(ij) / rnslyr + + enddo ! ij + + !----------------------------------------------------------------- + ! Snow enthalpy and maximum allowed snow temperature + ! If heat_capacity = F, zqsn and zTsn are used only for checking + ! conservation. + !----------------------------------------------------------------- + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !----------------------------------------------------------------- + ! Tmax based on the idea that dT ~ dq / (rhos*cp_ice) + ! dq ~ q dv / v + ! dv ~ puny = eps11 + ! where 'd' denotes an error due to roundoff. + !----------------------------------------------------------------- + + if (hslyr(ij) > hs_min/rnslyr .and. heat_capacity) then + ! zqsn < 0 + zqsn(ij,k) = trcrn(i,j,nt_qsno+k-1) + Tmax = -zqsn(ij,k)*puny*rnslyr / & + (rhos*cp_ice*vsnon(i,j)) + else + zqsn (ij,k) = -rhos * Lfresh + Tmax = puny + endif + + !----------------------------------------------------------------- + ! Compute snow temperatures from enthalpies. + ! Note: zqsn <= -rhos*Lfresh, so zTsn <= 0. + !----------------------------------------------------------------- + zTsn(ij,k) = (Lfresh + zqsn(ij,k)/rhos)/cp_ice + + !----------------------------------------------------------------- + ! Check for zTsn > Tmax (allowing for roundoff error) and zTsn < Tmin. + !----------------------------------------------------------------- + if (zTsn(ij,k) > Tmax) then + tsno_high = .true. + elseif (zTsn(ij,k) < Tmin) then + tsno_low = .true. + endif + + enddo ! ij + enddo ! nslyr + + !----------------------------------------------------------------- + ! If zTsn is out of bounds, print diagnostics and exit. + !----------------------------------------------------------------- + + if (tsno_high .and. heat_capacity) then + do k = 1, nslyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (hslyr(ij) > hs_min/rnslyr) then + Tmax = -zqsn(ij,k)*puny*rnslyr / & + (rhos*cp_ice*vsnon(i,j)) + else + Tmax = puny + endif + + if (zTsn(ij,k) > Tmax) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Starting thermo, zTsn > Tmax' + write(nu_diag,*) 'zTsn=',zTsn(ij,k) + write(nu_diag,*) 'Tmax=',Tmax + write(nu_diag,*) 'istep1, my_task, i, j:', & + istep1, my_task, i, j + write(nu_diag,*) 'zqsn',zqsn(ij,k),-Lfresh*rhos,zqsn(ij,k)+Lfresh*rhos + write(nu_diag,*) 'XX=>zTsn=',zTsn(ij,k),hslyr(ij),hin(ij),aicen(i,j) +!BX: drag zTsn back ------ + zTsn(ij,k) = Tmax +! +!BX l_stop = .true. + l_stop = .false. + istop = i + jstop = j + return + endif + + enddo ! ij + enddo ! nslyr + endif ! tsno_high + + if (tsno_low .and. heat_capacity) then + do k = 1, nslyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (zTsn(ij,k) < Tmin) then ! allowing for roundoff error + write(nu_diag,*) ' ' + write(nu_diag,*) 'Starting thermo, zTsn < Tmin' + write(nu_diag,*) 'zTsn=', zTsn(ij,k) + write(nu_diag,*) 'Tmin=', Tmin + write(nu_diag,*) 'istep1, my_task, i, j:', & + istep1, my_task, i, j + write(nu_diag,*) 'zqsn', zqsn(ij,k) + write(nu_diag,*) hin(ij) + write(nu_diag,*) hsn(ij) + write(nu_diag,*) 0, Tsf(ij) +!BX: grad zTsn back ------ + zTsn(ij,k) = Tmin +! +!BX: l_stop = .true. + l_stop = .false. + istop = i + jstop = j + return + endif + + enddo ! ij + enddo ! nslyr + endif ! tsno_low + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + + if (zTsn(ij,k) > c0) then ! correct roundoff error + zTsn(ij,k) = c0 + zqsn(ij,k) = -rhos*Lfresh + endif + + !----------------------------------------------------------------- + ! initial energy per unit area of ice/snow, relative to 0 C + !----------------------------------------------------------------- + einit(ij) = einit(ij) + hslyr(ij)*zqsn(ij,k) + + enddo ! ij + enddo ! nslyr + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !--------------------------------------------------------------------- + ! Use initial salinity profile for thin ice + !--------------------------------------------------------------------- + + zSin(ij,k) = trcrn(i,j,nt_sice+k-1) + if (ktherm == 1 .and. zSin(ij,k) < min_salin-puny) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Starting zSin < min_salin, layer', k + write(nu_diag,*) 'zSin =', zSin(ij,k) + write(nu_diag,*) 'min_salin =', min_salin + write(nu_diag,*) 'istep1, my_task, i, j:', & + istep1, my_task, i, j + l_stop = .true. + istop = i + jstop = j + return + endif + + if (ktherm == 2) then + Tmlts(ij,k) = liquidus_temperature_mush(zSin(ij,k)) + else + Tmlts(ij,k) = -zSin(ij,k) * depressT + endif + + !----------------------------------------------------------------- + ! Compute ice enthalpy + ! If heat_capacity = F, zqin and zTin are used only for checking + ! conservation. + !----------------------------------------------------------------- + ! zqin < 0 + zqin(ij,k) = trcrn(i,j,nt_qice+k-1) + + !----------------------------------------------------------------- + ! Compute ice temperatures from enthalpies using quadratic formula + !----------------------------------------------------------------- + + if (ktherm == 2) then + zTin(ij,k) = temperature_mush(zqin(ij,k),zSin(ij,k)) + else + zTin(ij,k) = calculate_Tin_from_qin(zqin(ij,k),Tmlts(ij,k)) + endif + + if (l_brine) then + Tmax = Tmlts(ij,k) + else ! fresh ice + Tmax = -zqin(ij,k)*puny/(rhos*cp_ice*vicen(i,j)) + endif + + !----------------------------------------------------------------- + ! Check for zTin > Tmax and zTin < Tmin + !----------------------------------------------------------------- + if (zTin(ij,k) > Tmax) then + tice_high = .true. + elseif (zTin(ij,k) < Tmin) then + tice_low = .true. + endif + + enddo ! ij + + !----------------------------------------------------------------- + ! If zTin is out of bounds, print diagnostics and exit. + !----------------------------------------------------------------- + + if (tice_high .and. heat_capacity) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (l_brine) then + Tmax = Tmlts(ij,k) + else ! fresh ice + Tmax = -zqin(ij,k)*puny/(rhos*cp_ice*vicen(i,j)) + endif + + if (zTin(ij,k) > Tmax) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Starting thermo, T > Tmax, layer', k + write(nu_diag,*) 'istep1, my_task, i, j, k:', & + istep1, my_task, i, j, k + write(nu_diag,*) 'zTin =',zTin(ij,k),', Tmax=',Tmax + write(nu_diag,*) 'zSin =',zSin(ij,k) + write(nu_diag,*) 'hin =',hin(ij) + write(nu_diag,*) 'zqin =',zqin(ij,k) + write(nu_diag,*) 'qmlt=',enthalpy_of_melting(zSin(ij,k)) + write(nu_diag,*) 'Tmlt=',Tmlts(ij,k) + + if (ktherm == 2) then + zqin(ij,k) = enthalpy_of_melting(zSin(ij,k)) - c1 + zTin(ij,k) = temperature_mush(zqin(ij,k),zSin(ij,k)) + write(nu_diag,*) 'Corrected quantities' + write(nu_diag,*) 'zqin=',zqin(ij,k) + write(nu_diag,*) 'zTin=',zTin(ij,k) + else + l_stop = .true. + istop = i + jstop = j + return + endif + endif + enddo ! ij + endif ! tice_high + + if (tice_low .and. heat_capacity) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (zTin(ij,k) < Tmin) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Starting thermo T < Tmin, layer', k + write(nu_diag,*) 'zTin =', zTin(ij,k) + write(nu_diag,*) 'Tmin =', Tmin + write(nu_diag,*) 'istep1, my_task, i, j:', & + istep1, my_task, i, j + l_stop = .true. + istop = i + jstop = j + return + endif + enddo ! ij + endif ! tice_low + + !----------------------------------------------------------------- + ! correct roundoff error + !----------------------------------------------------------------- + + if (ktherm /= 2) then +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + if (zTin(ij,k) > c0) then + zTin(ij,k) = c0 + zqin(ij,k) = -rhoi*Lfresh + endif + enddo ! ij + endif + +! echmod: is this necessary? +! if (ktherm == 1) then +!!DIR$ CONCURRENT !Cray +!!cdir nodep !NEC +!!ocl novrec !Fujitsu +! do ij = 1, icells +! if (zTin(ij,k)>= -zSin(ij,k)*depressT) then +! zTin(ij,k) = -zSin(ij,k)*depressT - puny +! zqin(ij,k) = -rhoi*cp_ocn*zSin(ij,k)*depressT +! endif +! enddo ! ij +! endif + + !----------------------------------------------------------------- + ! initial energy per unit area of ice/snow, relative to 0 C + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + einit(ij) = einit(ij) + hilyr(ij)*zqin(ij,k) + enddo ! ij + + enddo ! nilyr + + end subroutine init_vertical_profile + + +!=============================================== +! +! This routine is only called if UM-style coupling is being used, with top conductive flux as forcing. +! Check top conductive flux, ice thickness and top layer temperature. +! If the ratio of flux to thickness is too high, remove some of the flux and put it into fcondtopn_extra. +! If the top layer temperature is getting too low, and the flux is negative, also put some into fcondtopn_extra. +! The remainder, fcondtopn_solve, goes to the thermodynamic solver. fcondtopn_extra is added to the energy balance +! at the bottom of the ice in thickness_changes, and is thus used to grow / melt ice at the bottom. +! +! author Alex West, MOHC + + subroutine cap_conductive_flux(nx_block,ny_block,my_task,icells,indxi,indxj,fcondtopn,fcondtopn_solve,fcondtopn_extra,hin,zTsn,zTin,hslyr) + + use ice_itd, only: hs_min + + integer (kind=int_kind), intent(in) :: nx_block, ny_block, my_task + integer (kind=int_kind), intent(in) :: icells + integer (kind=int_kind), intent(in) :: indxi(nx_block*ny_block), indxj(nx_block*ny_block) + real (kind=dbl_kind), intent(in) :: fcondtopn(nx_block,ny_block) + real (kind=dbl_kind) :: fcondtopn_solve(nx_block,ny_block), fcondtopn_extra(nx_block,ny_block) + real (kind=dbl_kind), intent(in) :: hin(icells) + real (kind=dbl_kind), intent(in) :: zTin(icells,nilyr) + real (kind=dbl_kind), intent(in) :: zTsn(icells,nslyr) + real (kind=dbl_kind), intent(in) :: hslyr(icells) + + real (kind=dbl_kind), parameter :: ratio_Wm2_m = c1000, cold_temp_flag = c0 - c60 + + ! AEW: New variables for cold-ice flux capping + real (kind=dbl_kind) :: top_layer_temp, & + reduce_ratio, & + reduce_amount + + integer (kind=int_kind) :: i, j, ij + + + do ij = 1,icells + i = indxi(ij) + j = indxj(ij) + if (abs(fcondtopn(i,j)) > ratio_Wm2_m * hin(ij)) then + fcondtopn_solve(i,j) = sign(ratio_Wm2_m * hin(ij),fcondtopn(i,j)) + fcondtopn_extra(i,j) = fcondtopn(i,j) - fcondtopn_solve(i,j) + + else + fcondtopn_solve(i,j) = fcondtopn(i,j) + fcondtopn_extra(i,j) = c0 + endif + + if (hslyr(ij)>hs_min) then + top_layer_temp = zTsn(ij,1) + else + top_layer_temp = zTin(ij,1) + endif + + if ((top_layer_temp < cold_temp_flag) .and. (fcondtopn_solve(i,j) < c0)) then + reduce_ratio = (cold_temp_flag - top_layer_temp) / (c100 + cold_temp_flag) + reduce_amount = reduce_ratio * fcondtopn_solve(i,j) + fcondtopn_solve(i,j) = fcondtopn_solve(i,j) - reduce_amount + fcondtopn_extra(i,j) = fcondtopn_extra(i,j) + reduce_amount + + endif + + + enddo + + end subroutine cap_conductive_flux + +!======================================================================= +! +! Compute growth and/or melting at the top and bottom surfaces. +! Convert snow to ice if necessary. +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW + + subroutine thickness_changes (nx_block, ny_block, & + dt, & + yday, icells, & + indxi, indxj, & + efinal, & + hin, hilyr, & + hsn, hslyr, & + zqin, zqsn, & + fbot, Tbot, & + flatn, fsurfn, & + fcondtopn, fcondbot, & + fsnow, hsn_new, & + fhocnn, evapn, & + meltt, melts, & + meltb, iage, & + congel, snoice, & + mlt_onset, frz_onset,& + zSin, sss, & + dsnow, enum, & + fcondtopn_extra) + + use ice_therm_mushy, only: enthalpy_mush, enthalpy_of_melting, & + phi_i_mushy, temperature_mush, & + liquidus_temperature_mush, liquid_fraction + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), intent(in) :: & + dt , & ! time step + yday ! day of the year + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + fbot , & ! ice-ocean heat flux at bottom surface (W/m^2) + Tbot , & ! ice bottom surface temperature (deg C) + fsnow , & ! snowfall rate (kg m-2 s-1) + flatn , & ! surface downward latent heat (W m-2) + fsurfn , & ! net flux to top surface, excluding fcondtopn + fcondtopn ! downward cond flux at top surface (W m-2) + + real (kind=dbl_kind), dimension (icells), intent(inout) :: & + fcondbot ! downward cond flux at bottom surface (W m-2) + + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(inout) :: & + zqin ! ice layer enthalpy (J m-3) + + real (kind=dbl_kind), dimension (icells,nslyr), & + intent(inout) :: & + zqsn ! snow layer enthalpy (J m-3) + + real (kind=dbl_kind), dimension (icells), & + intent(inout) :: & + hilyr , & ! ice layer thickness (m) + hslyr ! snow layer thickness (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + meltt , & ! top ice melt (m/step-->cm/day) + melts , & ! snow melt (m/step-->cm/day) + meltb , & ! basal ice melt (m/step-->cm/day) + congel , & ! basal ice growth (m/step-->cm/day) + snoice , & ! snow-ice formation (m/step-->cm/day) + dsnow , & ! snow formation (m/step-->cm/day) + iage , & ! ice age (s) + mlt_onset , & ! day of year that sfc melting begins + frz_onset ! day of year that freezing begins (congel or frazil) + + real (kind=dbl_kind), dimension (icells), & + intent(inout) :: & + hin , & ! total ice thickness (m) + hsn ! total snow thickness (m) + + real (kind=dbl_kind), dimension (icells), intent(out):: & + efinal ! final energy of melting (J m-2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + fhocnn , & ! fbot, corrected for any surplus energy (W m-2) + evapn ! ice/snow mass sublimated/condensed (kg m-2 s-1) + + real (kind=dbl_kind), dimension (icells), intent(out):: & + hsn_new ! thickness of new snow (m) + + ! changes to zSin in this subroutine are not reloaded into the + ! trcrn array for ktherm /= 2, so we could remove ktherm=2 conditionals + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(inout) :: & + zSin ! ice layer salinity (ppt) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + sss ! ocean salinity (PSU) + + ! local variables + + real (kind=dbl_kind), parameter :: & + qbotmax = -p5*rhoi*Lfresh ! max enthalpy of ice growing at bottom + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij , & ! horizontal index, combines i and j loops + k ! vertical index + + real (kind=dbl_kind), dimension (icells) :: & + esub , & ! energy for sublimation, > 0 (J m-2) + econ , & ! energy for condensation, < 0 (J m-2) + etop_mlt , & ! energy for top melting, > 0 (J m-2) + ebot_mlt , & ! energy for bottom melting, > 0 (J m-2) + ebot_gro , & ! energy for bottom growth, < 0 (J m-2) + emlt_atm , & ! total energy of brine, from atmosphere (J m-2) + emlt_ocn ! total energy of brine, to ocean (J m-2) + + real (kind=dbl_kind) :: & + dhi , & ! change in ice thickness + dhs , & ! change in snow thickness + Ti , & ! ice temperature + Ts , & ! snow temperature + qbot , & ! enthalpy of ice growing at bottom surface (J m-3) + qsub , & ! energy/unit volume to sublimate ice/snow (J m-3) + hqtot , & ! sum of h*q for two layers + wk1 , & ! temporary variable + zqsnew , & ! enthalpy of new snow (J m-3) + hstot , & ! snow thickness including new snow (m) + Tmlts ! melting temperature + + real (kind=dbl_kind), dimension (icells,nilyr+1) :: & + zi1 , & ! depth of ice layer boundaries (m) + zi2 ! adjusted depths, with equal hilyr (m) + + real (kind=dbl_kind), dimension (icells,nslyr+1) :: & + zs1 , & ! depth of snow layer boundaries (m) + zs2 ! adjusted depths, with equal hslyr (m) + + real (kind=dbl_kind), dimension (icells,nilyr) :: & + dzi ! ice layer thickness after growth/melting + + real (kind=dbl_kind), dimension (icells,nslyr) :: & + dzs ! snow layer thickness after growth/melting + + real (kind=dbl_kind), dimension (icells,nilyr) :: & + qm , & ! energy of melting (J m-3) = zqin in BL99 formulation + qmlt ! enthalpy of melted ice (J m-3) = zero in BL99 formulation + + real (kind=dbl_kind) :: & + qbotm , & + qbotp , & + qbot0 + +! Alex West: Extra conductive flux, that didn't go into the thermo solver. + real (kind=dbl_kind), intent(in), dimension (nx_block,ny_block) :: & + fcondtopn_extra + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + enum ! energy not used by the temperature solver (due to + ! limiting) that should be returned to the ocean. + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + hsn_new (:) = c0 + + do k = 1, nilyr + do ij = 1, icells + dzi(ij,k) = hilyr(ij) + enddo + enddo + + do k = 1, nslyr + do ij = 1, icells + dzs(ij,k) = hslyr(ij) + enddo + enddo + + do k = 1, nilyr + do ij = 1, icells + if (ktherm == 2) then + qmlt(ij,k) = enthalpy_of_melting(zSin(ij,k)) + else + qmlt(ij,k) = c0 + endif + qm(ij,k) = zqin(ij,k) - qmlt(ij,k) + emlt_atm(ij) = c0 + emlt_ocn(ij) = c0 + enddo + enddo + + !----------------------------------------------------------------- + ! For l_brine = false (fresh ice), check for temperatures > 0. + ! Melt ice or snow as needed to bring temperatures back to 0. + ! For l_brine = true, this should not be necessary. + !----------------------------------------------------------------- + + if (.not. l_brine) then + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + Ts = (Lfresh + zqsn(ij,k)/rhos) / cp_ice + if (Ts > c0) then + dhs = cp_ice*Ts*dzs(ij,k) / Lfresh + dzs(ij,k) = dzs(ij,k) - dhs + zqsn(ij,k) = -rhos*Lfresh + endif + enddo + enddo + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + Ti = (Lfresh + zqin(ij,k)/rhoi) / cp_ice + if (Ti > c0) then + dhi = cp_ice*Ti*dzi(ij,k) / Lfresh + dzi(ij,k) = dzi(ij,k) - dhi + zqin(ij,k) = -rhoi*Lfresh + endif + enddo ! ij + enddo ! k + + endif ! .not. l_brine + + !----------------------------------------------------------------- + ! Compute energy available for sublimation/condensation, top melt, + ! and bottom growth/melt. + !----------------------------------------------------------------- + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + wk1 = -flatn(i,j) * dt + esub(ij) = max(wk1, c0) ! energy for sublimation, > 0 + econ(ij) = min(wk1, c0) ! energy for condensation, < 0 + + wk1 = (fsurfn(i,j) - fcondtopn(i,j)) * dt + etop_mlt(ij) = max(wk1, c0) ! etop_mlt > 0 + + ! AEW: Add negative energy, thrown away, to the energy available for bottom growth + wk1 = (fcondbot(ij) - fbot(i,j) + fcondtopn_extra(i,j)) * dt + ebot_mlt(ij) = max(wk1, c0) ! ebot_mlt > 0 + ebot_gro(ij) = min(wk1, c0) ! ebot_gro < 0 + + !-------------------------------------------------------------- + ! Condensation (evapn > 0) + ! Note: evapn here has unit of kg/m^2. Divide by dt later. + ! This is the only case in which energy from the atmosphere + ! is used for changes in the brine energy (emlt_atm). + !-------------------------------------------------------------- + + evapn (i,j) = c0 ! initialize + + if (hsn(ij) > puny) then ! add snow with enthalpy zqsn(ij,1) + dhs = econ(ij) / (zqsn(ij,1) - rhos*Lvap) ! econ < 0, dhs > 0 + dzs(ij,1) = dzs(ij,1) + dhs + evapn(i,j) = evapn(i,j) + dhs*rhos + else ! add ice with enthalpy zqin(ij,1) + dhi = econ(ij) / (qm(ij,1) - rhoi*Lvap) ! econ < 0, dhi > 0 + dzi(ij,1) = dzi(ij,1) + dhi + evapn(i,j) = evapn(i,j) + dhi*rhoi + ! enthalpy of melt water + emlt_atm(ij) = emlt_atm(ij) - qmlt(ij,1) * dhi + endif + + !-------------------------------------------------------------- + ! Grow ice (bottom) + !-------------------------------------------------------------- + + if (ktherm == 2) then + + qbotm = enthalpy_mush(Tbot(i,j), sss(i,j)) + qbotp = -Lfresh * rhoi * (c1 - phi_i_mushy) + qbot0 = qbotm - qbotp + + dhi = ebot_gro(ij) / qbotp ! dhi > 0 + + hqtot = dzi(ij,nilyr)*zqin(ij,nilyr) + dhi*qbotm + hstot = dzi(ij,nilyr)*zSin(ij,nilyr) + dhi*sss(i,j) + emlt_ocn(ij) = emlt_ocn(ij) - qbot0 * dhi + + else + + Tmlts = -zSin(ij,nilyr) * depressT + + ! enthalpy of new ice growing at bottom surface + if (heat_capacity) then + if (l_brine) then + qbot = -rhoi * (cp_ice * (Tmlts-Tbot(i,j)) & + + Lfresh * (c1-Tmlts/Tbot(i,j)) & + - cp_ocn * Tmlts) + qbot = min (qbot, qbotmax) ! in case Tbot is close to Tmlt + else + qbot = -rhoi * (-cp_ice * Tbot(i,j) + Lfresh) + endif + else ! zero layer + qbot = -rhoi * Lfresh + endif + + dhi = ebot_gro(ij) / qbot ! dhi > 0 + + hqtot = dzi(ij,nilyr)*zqin(ij,nilyr) + dhi*qbot + hstot = c0 + + endif ! ktherm + + dzi(ij,nilyr) = dzi(ij,nilyr) + dhi + if (dzi(ij,nilyr) > puny) then + zqin(ij,nilyr) = hqtot / dzi(ij,nilyr) + if (ktherm == 2) then + zSin(ij,nilyr) = hstot / dzi(ij,nilyr) + qmlt(ij,nilyr) = enthalpy_of_melting(zSin(ij,nilyr)) + else + qmlt(ij,nilyr) = c0 + endif + qm(ij,nilyr) = zqin(ij,nilyr) - qmlt(ij,nilyr) + endif + + ! update ice age due to freezing (new ice age = dt) +! if (tr_iage) & +! iage(i,j) = (iage(i,j)*hin(ij) + dt*dhi) / (hin(ij) + dhi) + + ! history diagnostics + congel(i,j) = congel(i,j) + dhi + if (dhi > puny .and. frz_onset(i,j) < puny) & + frz_onset(i,j) = yday + + enddo ! ij + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !-------------------------------------------------------------- + ! Remove internal snow melt + !-------------------------------------------------------------- + + if (ktherm == 2 .and. zqsn(ij,k) > -rhos * Lfresh) then + + dhs = max(-dzs(ij,k), & + -((zqsn(ij,k) + rhos*Lfresh) / (rhos*Lfresh)) * dzs(ij,k)) + dzs(ij,k) = dzs(ij,k) + dhs + zqsn(ij,k) = -rhos * Lfresh + melts(i,j) = melts(i,j) - dhs + ! delta E = zqsn(ij,k) + rhos * Lfresh + + endif + + !-------------------------------------------------------------- + ! Sublimation of snow (evapn < 0) + !-------------------------------------------------------------- + + qsub = zqsn(ij,k) - rhos*Lvap ! qsub < 0 + dhs = max (-dzs(ij,k), esub(ij)/qsub) ! esub > 0, dhs < 0 + dzs(ij,k) = dzs(ij,k) + dhs + esub(ij) = esub(ij) - dhs*qsub + esub(ij) = max(esub(ij), c0) ! in case of roundoff error + evapn(i,j) = evapn(i,j) + dhs*rhos + + !-------------------------------------------------------------- + ! Melt snow (top) + !-------------------------------------------------------------- + + dhs = max(-dzs(ij,k), etop_mlt(ij)/zqsn(ij,k)) + dzs(ij,k) = dzs(ij,k) + dhs ! zqsn < 0, dhs < 0 + etop_mlt(ij) = etop_mlt(ij) - dhs*zqsn(ij,k) + etop_mlt(ij) = max(etop_mlt(ij), c0) ! in case of roundoff error + + ! history diagnostics + if (dhs < -puny .and. mlt_onset(i,j) < puny) & + mlt_onset(i,j) = yday + melts(i,j) = melts(i,j) - dhs + + enddo ! ij + enddo ! nslyr + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !-------------------------------------------------------------- + ! Sublimation of ice (evapn < 0) + !-------------------------------------------------------------- + + qsub = qm(ij,k) - rhoi*Lvap ! qsub < 0 + dhi = max (-dzi(ij,k), esub(ij)/qsub) ! esub < 0, dhi < 0 + dzi(ij,k) = dzi(ij,k) + dhi + esub(ij) = esub(ij) - dhi*qsub + esub(ij) = max(esub(ij), c0) + evapn(i,j) = evapn(i,j) + dhi*rhoi + emlt_ocn(ij) = emlt_ocn(ij) - qmlt(ij,k) * dhi + + !-------------------------------------------------------------- + ! Melt ice (top) + !-------------------------------------------------------------- + + if (qm(ij,k) < c0) then + dhi = max(-dzi(ij,k), etop_mlt(ij)/qm(ij,k)) + else + qm(ij,k) = c0 + dhi = -dzi(ij,k) + endif + emlt_ocn(ij) = emlt_ocn(ij) - max(zqin(ij,k),qmlt(ij,k)) * dhi + + dzi(ij,k) = dzi(ij,k) + dhi ! zqin < 0, dhi < 0 + etop_mlt(ij) = max(etop_mlt(ij) - dhi*qm(ij,k), c0) + + ! history diagnostics + if (dhi < -puny .and. mlt_onset(i,j) < puny) & + mlt_onset(i,j) = yday + meltt(i,j) = meltt(i,j) - dhi + + enddo ! ij + enddo ! nilyr + + do k = nilyr, 1, -1 +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !-------------------------------------------------------------- + ! Melt ice (bottom) + !-------------------------------------------------------------- + + if (qm(ij,k) < c0) then + dhi = max(-dzi(ij,k), ebot_mlt(ij)/qm(ij,k)) + else + qm(ij,k) = c0 + dhi = -dzi(ij,k) + endif + emlt_ocn(ij) = emlt_ocn(ij) - max(zqin(ij,k),qmlt(ij,k)) * dhi + + dzi(ij,k) = dzi(ij,k) + dhi ! zqin < 0, dhi < 0 + ebot_mlt(ij) = max(ebot_mlt(ij) - dhi*qm(ij,k), c0) + + ! history diagnostics + meltb(i,j) = meltb(i,j) -dhi + + enddo ! ij + enddo ! nilyr + + do k = nslyr, 1, -1 +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + + !-------------------------------------------------------------- + ! Melt snow (only if all the ice has melted) + !-------------------------------------------------------------- + + dhs = max(-dzs(ij,k), ebot_mlt(ij)/zqsn(ij,k)) + dzs(ij,k) = dzs(ij,k) + dhs ! zqsn < 0, dhs < 0 + ebot_mlt(ij) = ebot_mlt(ij) - dhs*zqsn(ij,k) + ebot_mlt(ij) = max(ebot_mlt(ij), c0) + + enddo ! ij + enddo ! nslyr + + + !----------------------------------------------------------------- + ! Compute heat flux used by the ice (<=0). + ! fhocn is the available ocean heat that is left after use by ice + !----------------------------------------------------------------- + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + fhocnn(i,j) = fbot(i,j) & + + (esub(ij) + etop_mlt(ij) + ebot_mlt(ij) + enum(ij))/dt + enddo + +!---!----------------------------------------------------------------- +!---! Add new snowfall at top surface. +!---!----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !---------------------------------------------------------------- + ! NOTE: If heat flux diagnostics are to work, new snow should + ! have T = 0 (i.e. q = -rhos*Lfresh) and should not be + ! converted to rain. + !---------------------------------------------------------------- + + if (fsnow(i,j) > c0) then + + hsn_new(ij) = fsnow(i,j)/rhos * dt + zqsnew = -rhos*Lfresh + hstot = dzs(ij,1) + hsn_new(ij) + + if (hstot > c0) then + zqsn(ij,1) = (dzs(ij,1) * zqsn(ij,1) & + + hsn_new(ij) * zqsnew) / hstot + ! avoid roundoff errors + zqsn(ij,1) = min(zqsn(ij,1), -rhos*Lfresh) + + dzs(ij,1) = hstot + endif + endif + + !----------------------------------------------------------------- + ! Find the new ice and snow thicknesses. + !----------------------------------------------------------------- + + hin(ij) = c0 + hsn(ij) = c0 + enddo + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + hin(ij) = hin(ij) + dzi(ij,k) + enddo ! ij + enddo ! k + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + hsn(ij) = hsn(ij) + dzs(ij,k) + dsnow(i,j) = dsnow(i,j) + dzs(ij,k) - hslyr(ij) + enddo ! ij + enddo ! k + + !------------------------------------------------------------------- + ! Convert snow to ice if snow lies below freeboard. + !------------------------------------------------------------------- + + if (ktherm /= 2) & + call freeboard (nx_block, ny_block, & + icells, & + indxi, indxj, & + dt, & + snoice, & + iage, & + hin, hsn, & + zqin, zqsn, & + dzi, dzs, & + dsnow) + +!---!------------------------------------------------------------------- +!---! Repartition the ice and snow into equal-thickness layers, +!---! conserving energy. +!---!------------------------------------------------------------------- + + !----------------------------------------------------------------- + ! Compute desired layer thicknesses. + !----------------------------------------------------------------- + + do ij = 1, icells + + if (hin(ij) > c0) then + hilyr(ij) = hin(ij) / real(nilyr,kind=dbl_kind) + else + hin(ij) = c0 + hilyr(ij) = c0 + endif + if (hsn(ij) > c0) then + hslyr(ij) = hsn(ij) / real(nslyr,kind=dbl_kind) + else + hsn(ij) = c0 + hslyr(ij) = c0 + endif + + !----------------------------------------------------------------- + ! Compute depths zi1 of old layers (unequal thickness). + ! Compute depths zi2 of new layers (equal thickness). + !----------------------------------------------------------------- + + zi1(ij,1) = c0 + zi1(ij,1+nilyr) = hin(ij) + + zi2(ij,1) = c0 + zi2(ij,1+nilyr) = hin(ij) + + enddo ! ij + + if (heat_capacity) then + + do k = 1, nilyr-1 +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + zi1(ij,k+1) = zi1(ij,k) + dzi(ij,k) + zi2(ij,k+1) = zi2(ij,k) + hilyr(ij) + end do + enddo + + !----------------------------------------------------------------- + ! Conserving energy, compute the enthalpy of the new equal layers. + !----------------------------------------------------------------- + + call adjust_enthalpy (nx_block, ny_block, & + nilyr, icells, & + indxi, indxj, & + zi1, zi2, & + hilyr, hin, & + zqin) + + if (ktherm == 2) & + call adjust_enthalpy (nx_block, ny_block, & + nilyr, icells, & + indxi, indxj, & + zi1, zi2, & + hilyr, hin, & + zSin) + + else ! zero layer (nilyr=1) + + do ij = 1, icells + zqin(ij,1) = -rhoi * Lfresh + zqsn(ij,1) = -rhos * Lfresh + end do + + endif + + if (nslyr > 1) then + + !----------------------------------------------------------------- + ! Compute depths zs1 of old layers (unequal thickness). + ! Compute depths zs2 of new layers (equal thickness). + !----------------------------------------------------------------- + + do ij = 1, icells + zs1(ij,1) = c0 + zs1(ij,1+nslyr) = hsn(ij) + + zs2(ij,1) = c0 + zs2(ij,1+nslyr) = hsn(ij) + enddo + + do k = 1, nslyr-1 +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + zs1(ij,k+1) = zs1(ij,k) + dzs(ij,k) + zs2(ij,k+1) = zs2(ij,k) + hslyr(ij) + end do + enddo + + !----------------------------------------------------------------- + ! Conserving energy, compute the enthalpy of the new equal layers. + !----------------------------------------------------------------- + + call adjust_enthalpy (nx_block, ny_block, & + nslyr, icells, & + indxi, indxj, & + zs1, zs2, & + hslyr, hsn, & + zqsn) + + endif ! nslyr > 1 + + !----------------------------------------------------------------- + ! Remove very thin snow layers (ktherm = 2) + !----------------------------------------------------------------- + + if (ktherm == 2) then + do k = 1, nslyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (hsn(ij) <= puny) then + fhocnn(i,j) = fhocnn(i,j) & + + zqsn(ij,k)*hsn(ij)/(real(nslyr,kind=dbl_kind)*dt) + zqsn(ij,k) = -rhos*Lfresh + hslyr(ij) = c0 + endif + enddo + enddo + endif + + !----------------------------------------------------------------- + ! Compute final ice-snow energy, including the energy of + ! sublimated/condensed ice. + !----------------------------------------------------------------- + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + efinal(ij) = -evapn(i,j)*Lvap + evapn(i,j) = evapn(i,j)/dt + enddo + + do k = 1, nslyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + efinal(ij) = efinal(ij) + hslyr(ij)*zqsn(ij,k) + enddo ! ij + enddo + + do k = 1, nilyr +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + efinal(ij) = efinal(ij) + hilyr(ij)*zqin(ij,k) + enddo ! ij + enddo ! k + + if (ktherm < 2) then + do ij = 1, icells + emlt_atm(ij) = c0 + emlt_ocn(ij) = c0 + enddo + endif + + ! melt water is no longer zero enthalpy with ktherm=2 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + fhocnn(i,j) = fhocnn(i,j) + emlt_ocn(ij)/dt + efinal(ij) = efinal(ij) + emlt_atm(ij) ! for conservation check + enddo ! ij + + end subroutine thickness_changes + +!======================================================================= +! +! If there is enough snow to lower the ice/snow interface below +! sea level, convert enough snow to ice to bring the interface back +! to sea level. +! +! authors William H. Lipscomb, LANL +! Elizabeth C. Hunke, LANL + + subroutine freeboard (nx_block, ny_block, & + icells, & + indxi, indxj, & + dt, & + snoice, & + iage, & + hin, hsn, & + zqin, zqsn, & + dzi, dzs, & + dsnow) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + snoice , & ! snow-ice formation (m/step-->cm/day) + dsnow , & ! change in snow thickness after snow-ice formation (m) + iage ! snow thickness (m) + + real (kind=dbl_kind), dimension (icells), & + intent(inout) :: & + hin , & ! ice thickness (m) + hsn ! snow thickness (m) + + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(inout) :: & + zqin ! ice layer enthalpy (J m-3) + + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(inout) :: & + dzi ! ice layer thicknesses (m) + + real (kind=dbl_kind), dimension (icells,nslyr), & + intent(in) :: & + zqsn ! snow layer enthalpy (J m-3) + + real (kind=dbl_kind), dimension (icells,nslyr), & + intent(inout) :: & + dzs ! snow layer thicknesses (m) + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij , & ! horizontal index, combines i and j loops + k ! vertical index + + real (kind=dbl_kind), dimension (icells) :: & + dhin , & ! change in ice thickness (m) + dhsn , & ! change in snow thickness (m) + hqs ! sum of h*q for snow (J m-2) + + real (kind=dbl_kind) :: & + wk1 , & ! temporary variable + dhs ! snow to remove from layer (m) + + !----------------------------------------------------------------- + ! Determine whether snow lies below freeboard. + !----------------------------------------------------------------- + + do ij = 1, icells + + i = indxi(ij) + j = indxj(ij) + + dhin(ij) = c0 + dhsn(ij) = c0 + hqs (ij) = c0 + + wk1 = hsn(ij) - hin(ij)*(rhow-rhoi)/rhos + + if (wk1 > puny .and. hsn(ij) > puny) then ! snow below freeboard + dhsn(ij) = min(wk1*rhoi/rhow, hsn(ij)) ! snow to remove + dhin(ij) = dhsn(ij) * rhos/rhoi ! ice to add + endif + enddo + + !----------------------------------------------------------------- + ! Adjust snow layer thickness. + ! Compute energy to transfer from snow to ice. + !----------------------------------------------------------------- + + do k = nslyr, 1, -1 +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + if (dhin(ij) > puny) then + dhs = min(dhsn(ij), dzs(ij,k)) ! snow to remove from layer + hsn(ij) = hsn(ij) - dhs + dsnow(i,j) = dsnow(i,j) -dhs !new snow addition term + dzs(ij,k) = dzs(ij,k) - dhs + dhsn(ij) = dhsn(ij) - dhs + dhsn(ij) = max(dhsn(ij),c0) + hqs(ij) = hqs(ij) + dhs * zqsn(ij,k) + endif ! dhin > puny + enddo + enddo + + !----------------------------------------------------------------- + ! Transfer volume and energy from snow to top ice layer. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (dhin(ij) > puny) then + ! update ice age due to freezing (new ice age = dt) +! if (tr_iage) & +! iage(i,j) = (iage(i,j)*hin(ij)+dt*dhin(ij))/(hin(ij)+dhin(ij)) + + wk1 = dzi(ij,1) + dhin(ij) + hin(ij) = hin(ij) + dhin(ij) + zqin(ij,1) = (dzi(ij,1)*zqin(ij,1) + hqs(ij)) / wk1 + dzi(ij,1) = wk1 + + ! history diagnostic + snoice(i,j) = snoice(i,j) + dhin(ij) + endif ! dhin > puny + enddo ! ij + + end subroutine freeboard + +!======================================================================= +! +! Conserving energy, compute the new enthalpy of equal-thickness ice +! or snow layers. +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW + + subroutine adjust_enthalpy (nx_block, ny_block, & + nlyr, icells, & + indxi, indxj, & + z1, z2, & + hlyr, hn, & + qn) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + nlyr , & ! number of layers (nilyr or nslyr) + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), dimension (icells,nlyr+1), & + intent(in) :: & + z1 , & ! interface depth for old, unequal layers (m) + z2 ! interface depth for new, equal layers (m) + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + hlyr ! new layer thickness (m) + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + hn ! total thickness (m) + + real (kind=dbl_kind), dimension (icells,nlyr), & + intent(inout) :: & + qn ! layer quantity (enthalpy, salinity...) + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij , & ! horizontal index, combines i and j loops + k, k1, k2 ! vertical indices + + real (kind=dbl_kind) :: & + hovlp ! overlap between old and new layers (m) + + real (kind=dbl_kind), dimension (icells) :: & + rhlyr ! 1./hlyr + + real (kind=dbl_kind), dimension (icells,nlyr) :: & + hq ! h * q for a layer + + !----------------------------------------------------------------- + ! Compute reciprocal layer thickness. + !----------------------------------------------------------------- + + do ij = 1, icells + rhlyr(ij) = c0 + if (hn(ij) > puny) rhlyr(ij) = c1 / hlyr(ij) + enddo ! ij + + !----------------------------------------------------------------- + ! Compute h*q for new layers (k2) given overlap with old layers (k1) + !----------------------------------------------------------------- + + do k2 = 1, nlyr + do ij = 1, icells + hq(ij,k2) = c0 + enddo + enddo ! k + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + k1 = 1 + k2 = 1 + do while (k1 <= nlyr .and. k2 <= nlyr) + hovlp = min (z1(ij,k1+1), z2(ij,k2+1)) & + - max (z1(ij,k1), z2(ij,k2)) + hovlp = max (hovlp, c0) + hq(ij,k2) = hq(ij,k2) + hovlp*qn(ij,k1) + if (z1(ij,k1+1) > z2(ij,k2+1)) then + k2 = k2 + 1 + else + k1 = k1 + 1 + endif + enddo ! while + enddo ! ij + + !----------------------------------------------------------------- + ! Compute new enthalpies. + !----------------------------------------------------------------- + + do k = 1, nlyr + do ij = 1, icells + qn(ij,k) = hq(ij,k) * rhlyr(ij) + enddo ! ij + enddo ! k + + end subroutine adjust_enthalpy + +!======================================================================= +! +! Check for energy conservation by comparing the change in energy +! to the net energy input. +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW +! Adrian K. Turner, LANL + + subroutine conservation_check_vthermo(nx_block, ny_block, & + my_task, istep1, & + dt, icells, & + indxi, indxj, & + fsurfn, flatn, & + fhocnn, fswint, & + fsnow, & + einit, einter, & + efinal, & + fcondtopn,fcondbot, & + fadvocn, & + fbot, l_stop, & + istop, jstop, & + fcondtopn_solve,fcondtopn_extra, & + enum) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + my_task , & ! task number (diagnostic only) + istep1 , & ! time step index (diagnostic only) + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + fsurfn , & ! net flux to top surface, excluding fcondtopn + flatn , & ! surface downward latent heat (W m-2) + fhocnn , & ! fbot, corrected for any surplus energy + fswint , & ! SW absorbed in ice interior, below surface (W m-2) + fsnow , & ! snowfall rate (kg m-2 s-1) + fcondtopn , & + fadvocn , & + fbot + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + einit , & ! initial energy of melting (J m-2) + einter , & ! intermediate energy of melting (J m-2) + efinal , & ! final energy of melting (J m-2) + fcondbot + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, print diagnostics and abort model + + integer (kind=int_kind), intent(inout) :: & + istop, jstop ! i and j indices of cell where model fails + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij ! horizontal index, combines i and j loops + + real (kind=dbl_kind) :: & + einp , & ! energy input during timestep (J m-2) + ferr ! energy conservation error (W m-2) + + real (kind=dbl_kind), intent(in) :: & + fcondtopn_extra(nx_block,ny_block), & + fcondtopn_solve(nx_block,ny_block) + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + enum ! energy not used by the temperature solver (due to + ! limiting) that should be returned to the ocean. + + !---------------------------------------------------------------- + ! If energy is not conserved, print diagnostics and exit. + !---------------------------------------------------------------- +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + !----------------------------------------------------------------- + ! Note that fsurf - flat = fsw + flw + fsens; i.e., the latent + ! heat is not included in the energy input, since (efinal - einit) + ! is the energy change in the system ice + vapor, and the latent + ! heat lost by the ice is equal to that gained by the vapor. + !----------------------------------------------------------------- + + einp = (fsurfn(i,j) - flatn(i,j) + fswint(i,j) - fhocnn(i,j) & + - fsnow(i,j)*Lfresh - fadvocn(i,j)) * dt + ferr = abs(efinal(ij)-einit(ij)-einp) / dt + + if (ferr > ferrmax) then + l_stop = .true. + istop = i + jstop = j + + write(nu_diag,*) 'Thermo energy conservation error' + write(nu_diag,*) 'istep1, my_task, i, j:', & + istep1, my_task, i, j + write(nu_diag,*) 'Flux error (W/m^2) =', ferr + write(nu_diag,*) 'Energy error (J) =', ferr*dt + write(nu_diag,*) 'Initial energy =', einit(ij) + write(nu_diag,*) 'Final energy =', efinal(ij) + write(nu_diag,*) 'efinal - einit =', & + efinal(ij)-einit(ij) + write(nu_diag,*) 'fsurfn,flatn,fswint,fhocn, fsnow*Lfresh:' + write(nu_diag,*) fsurfn(i,j),flatn(i,j),fswint(i,j),fhocnn(i,j), fsnow(i,j)*Lfresh + write(nu_diag,*) 'Input energy =', einp + write(nu_diag,*) 'fbot(i,j),fcondbot(ij):' + write(nu_diag,*) fbot(i,j),fcondbot(ij) + write(nu_diag,*) 'fcondtop_solve(i,j), fcondtopn_extra(i,j):' + write(nu_diag,*) fcondtopn_solve(i,j), fcondtopn_extra(i,j) + write(nu_diag,*) 'enum(ij):' + write(nu_diag,*) enum(ij) + +! if (ktherm == 2) then + write(nu_diag,*) 'Intermediate energy =', einter(ij) + write(nu_diag,*) 'efinal - einter =', & + efinal(ij)-einter(ij) + write(nu_diag,*) 'einter - einit =', & + einter(ij)-einit(ij) + write(nu_diag,*) 'Conduction Error =', (einter(ij)-einit(ij)) & + - (fcondtopn(i,j)*dt - fcondbot(ij)*dt + fswint(i,j)*dt) + write(nu_diag,*) 'Melt/Growth Error =', (einter(ij)-einit(ij)) & + + ferr*dt - (fcondtopn(i,j)*dt - fcondbot(ij)*dt + fswint(i,j)*dt) + write(nu_diag,*) 'Advection Error =', fadvocn(i,j)*dt +! endif + +! write(nu_diag,*) fsurfn(i,j),flatn(i,j),fswint(i,j),fhocnn(i,j) + + write(nu_diag,*) 'dt*(fsurfn, flatn, fswint, fhocn, fsnow*Lfresh, fadvocn):' + write(nu_diag,*) fsurfn(i,j)*dt, flatn(i,j)*dt, & + fswint(i,j)*dt, fhocnn(i,j)*dt, & + fsnow(i,j)*Lfresh*dt, fadvocn(i,j)*dt + return + endif + enddo + + end subroutine conservation_check_vthermo + +!======================================================================= +! +! Given the vertical thermo state variables (hin, hsn, zqin, +! zqsn, Tsf), compute the new ice state variables (vicen, vsnon, trcrn). +! Zero out state variables if ice has melted entirely. +! +! authors William H. Lipscomb, LANL +! C. M. Bitz, UW + + subroutine update_state_vthermo(nx_block, ny_block, & + icells, & + indxi, indxj, & + Tf, Tsf, & + hin, hsn, & + zqin, zSin, & + zqsn, & + aicen, vicen, & + vsnon, trcrn) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + Tf ! freezing temperature (C) + + real (kind=dbl_kind), dimension(icells), intent(in) :: & + Tsf ! ice/snow surface temperature, Tsfcn + + real (kind=dbl_kind), dimension(icells), intent(in) :: & + hin , & ! ice thickness (m) + hsn ! snow thickness (m) + + real (kind=dbl_kind), dimension (icells,nilyr), & + intent(in) :: & + zqin , & ! ice layer enthalpy (J m-3) + zSin ! ice salinity (ppt) + + real (kind=dbl_kind), dimension (icells,nslyr), & + intent(in) :: & + zqsn ! snow layer enthalpy (J m-3) + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_ntrcr), & + intent(inout) :: & + trcrn + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + ij , & ! horizontal index, combines i and j loops + k ! ice layer index + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (hin(ij) > c0) then + ! aicen is already up to date + vicen(i,j) = aicen(i,j) * hin(ij) + vsnon(i,j) = aicen(i,j) * hsn(ij) + trcrn(i,j,nt_Tsfc) = Tsf(ij) + else ! (hin(ij) == c0) + aicen(i,j) = c0 + vicen(i,j) = c0 + vsnon(i,j) = c0 + trcrn(i,j,nt_Tsfc) = Tf(i,j) + endif + enddo ! ij + + do k = 1, nilyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (hin(ij) > c0) then + trcrn(i,j,nt_qice+k-1) = zqin(ij,k) + ! trcrn(i,j,nt_sice+k-1) = zSin(ij,k) + else + trcrn(i,j,nt_qice+k-1) = c0 + ! trcrn(i,j,nt_sice+k-1) = c0 + endif + + enddo ! ij + enddo ! nilyr + + if (ktherm == 2) then + do k = 1, nilyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (hin(ij) > c0) then + trcrn(i,j,nt_sice+k-1) = zSin(ij,k) + else + trcrn(i,j,nt_sice+k-1) = c0 + endif + + enddo ! ij + enddo ! nilyr + endif + + do k = 1, nslyr + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (hin(ij) > c0) then + trcrn(i,j,nt_qsno+k-1) = zqsn(ij,k) + else + trcrn(i,j,nt_qsno+k-1) = c0 + endif + + enddo ! ij + enddo ! nslyr + + end subroutine update_state_vthermo + +!======================================================================= + + end module ice_therm_vertical + +!======================================================================= diff --git a/source/ice_transport_driver.F90 b/source/ice_transport_driver.F90 new file mode 100755 index 00000000..934a3a67 --- /dev/null +++ b/source/ice_transport_driver.F90 @@ -0,0 +1,1694 @@ +! SVN:$Id: ice_transport_driver.F90 825 2014-08-29 15:37:09Z eclare $ +!======================================================================= +! +! Drivers for remapping and upwind ice transport +! +! authors: Elizabeth C. Hunke and William H. Lipscomb, LANL +! +! 2004: Revised by William Lipscomb from ice_transport_mpdata. +! Stripped out mpdata, retained upwind, and added block structure. +! 2006: Incorporated remap transport driver and renamed from +! ice_transport_upwind. +! 2011: ECH moved edgearea arrays into ice_transport_remap.F90 + + module ice_transport_driver + + use ice_kinds_mod + use ice_communicate, only: my_task, master_task + use ice_fileunits, only: nu_diag + + implicit none + private + public :: init_transport, transport_remap, transport_upwind + save + + character (len=char_len), public :: & + advection ! type of advection scheme used + ! 'upwind' => 1st order donor cell scheme + ! 'remap' => remapping scheme + + logical, parameter :: & ! if true, prescribe area flux across each edge + l_fixed_area = .false. + +! NOTE: For remapping, hice and hsno are considered tracers. +! ntrace is not equal to ntrcr! + + integer (kind=int_kind) :: & + ntrace ! number of tracers in use + + integer (kind=int_kind), dimension(:), allocatable :: & + tracer_type ,&! = 1, 2, or 3 (depends on 0, 1 or 2 other tracers) + depend ! tracer dependencies (see below) + + logical (kind=log_kind), dimension (:), allocatable :: & + has_dependents ! true if a tracer has dependent tracers + + integer (kind=int_kind), parameter :: & + integral_order = 3 ! polynomial order of quadrature integrals + ! linear=1, quadratic=2, cubic=3 + + logical (kind=log_kind), parameter :: & + l_dp_midpt = .true. ! if true, find departure points using + ! corrected midpoint velocity + +!======================================================================= + + contains + +!======================================================================= +! +! This subroutine is a wrapper for init_remap, which initializes the +! remapping transport scheme. If the model is run with upwind +! transport, no initializations are necessary. +! +! authors William H. Lipscomb, LANL + + subroutine init_transport + + use ice_state, only: ntrcr, trcr_depend, nt_Tsfc, nt_qice, nt_qsno, & + nt_sice, nt_fbri, nt_iage, nt_FY, nt_alvl, nt_vlvl, & + nt_apnd, nt_hpnd, nt_ipnd, nt_bgc_n_sk + use ice_exit, only: abort_ice + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_advect + use ice_transport_remap, only: init_remap + + integer (kind=int_kind) :: & + k, nt, nt1 ! tracer indices + + call ice_timer_start(timer_advect) ! advection + + ntrace = 2 + ntrcr ! hice,hsno,trcr + + if (allocated(tracer_type)) deallocate(tracer_type) + if (allocated(depend)) deallocate(depend) + if (allocated(has_dependents)) deallocate(has_dependents) + + allocate (tracer_type (ntrace), & + depend (ntrace), & + has_dependents(ntrace)) + + ! define tracer dependency arrays + ! see comments in remapping routine + + depend(1:2) = 0 ! hice, hsno + tracer_type(1:2) = 1 ! no dependency + + k = 2 + + do nt = 1, ntrcr + depend(k+nt) = trcr_depend(nt) ! 0 for ice area tracers + ! 1 for ice volume tracers + ! 2 for snow volume tracers + tracer_type(k+nt) = 2 ! depends on 1 other tracer + if (trcr_depend(nt) == 0) then + tracer_type(k+nt) = 1 ! depends on no other tracers + elseif (trcr_depend(nt) > 2) then + if (trcr_depend(trcr_depend(nt)-2) > 0) then + tracer_type(k+nt) = 3 ! depends on 2 other tracers + endif + endif + enddo + + has_dependents = .false. + do nt = 1, ntrace + if (depend(nt) > 0) then + nt1 = depend(nt) + has_dependents(nt1) = .true. + if (nt1 > nt) then + write(nu_diag,*) & + 'Tracer nt2 =',nt,' depends on tracer nt1 =',nt1 + call abort_ice & + ('ice: remap transport: Must have nt2 > nt1') + endif + endif + enddo ! ntrace + + ! diagnostic output + if (my_task == master_task) then + write (nu_diag, *) 'tracer index depend type has_dependents' + nt = 1 + write(nu_diag,*) ' hi ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + nt = 2 + write(nu_diag,*) ' hs ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + k=2 + do nt = k+1, k+ntrcr + if (nt-k==nt_Tsfc) & + write(nu_diag,*) 'nt_Tsfc',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_qice) & + write(nu_diag,*) 'nt_qice',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_qsno) & + write(nu_diag,*) 'nt_qsno',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_sice) & + write(nu_diag,*) 'nt_sice',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_fbri) & + write(nu_diag,*) 'nt_fbri',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_iage) & + write(nu_diag,*) 'nt_iage',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_FY) & + write(nu_diag,*) 'nt_FY ', nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_alvl) & + write(nu_diag,*) 'nt_alvl',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_vlvl) & + write(nu_diag,*) 'nt_vlvl',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_apnd) & + write(nu_diag,*) 'nt_apnd',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_hpnd) & + write(nu_diag,*) 'nt_hpnd',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_ipnd) & + write(nu_diag,*) 'nt_ipnd',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_bgc_N_sk) & + write(nu_diag,*) 'nt_bgc_sk',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + enddo + endif ! master_task + + if (trim(advection)=='remap') call init_remap ! grid quantities + + call ice_timer_stop(timer_advect) ! advection + + end subroutine init_transport + +!======================================================================= +! +! This subroutine solves the transport equations for one timestep +! using the conservative remapping scheme developed by John Dukowicz +! and John Baumgardner (DB) and modified for sea ice by William +! Lipscomb and Elizabeth Hunke. +! +! This scheme preserves monotonicity of ice area and tracers. That is, +! it does not produce new extrema. It is second-order accurate in space, +! except where gradients are limited to preserve monotonicity. +! +! authors William H. Lipscomb, LANL + + subroutine transport_remap (dt) + + use ice_blocks, only: nx_block, ny_block + use ice_boundary, only: ice_HaloUpdate + use ice_constants, only: c0, & + field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_vector + use ice_global_reductions, only: global_sum, global_sum_prod + use ice_domain, only: nblocks, distrb_info, blocks_ice, halo_info + use ice_domain_size, only: ncat, max_blocks + use ice_blocks, only: nx_block, ny_block, block, get_block, nghost + use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, ntrcr, & + uvel, vvel, bound_state + use ice_grid, only: tarea, HTE, HTN + use ice_exit, only: abort_ice + use ice_calendar, only: istep1 + use ice_timers, only: ice_timer_start, ice_timer_stop, & + timer_advect, timer_bound + use ice_transport_remap, only: horizontal_remap, make_masks + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + iblk ,&! block index + ilo,ihi,jlo,jhi,&! beginning and end of physical domain + n ,&! ice category index + nt, nt1, nt2 ! tracer indices + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,0:ncat,max_blocks) :: & + aim ,&! mean ice category areas in each grid cell + aimask ! = 1. if ice is present, = 0. otherwise + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & + trm ,&! mean tracer values in each grid cell + trmask ! = 1. if tracer is present, = 0. otherwise + + logical (kind=log_kind) :: & + l_stop ! if true, abort the model + + integer (kind=int_kind) :: & + istop, jstop ! indices of grid cell where model aborts + + integer (kind=int_kind), dimension(0:ncat,max_blocks) :: & + icellsnc ! number of cells with ice + + integer (kind=int_kind), & + dimension(nx_block*ny_block,0:ncat,max_blocks) :: & + indxinc, indxjnc ! compressed i/j indices + + type (block) :: & + this_block ! block information for current block + + ! variables related to optional bug checks + + logical (kind=log_kind), parameter :: & + l_conservation_check = .false. ,&! if true, check conservation + l_monotonicity_check = .false. ! if true, check monotonicity + + real (kind=dbl_kind), dimension(0:ncat) :: & + asum_init ,&! initial global ice area + asum_final ! final global ice area + + real (kind=dbl_kind), dimension(ntrace,ncat) :: & + atsum_init ,&! initial global ice area*tracer + atsum_final ! final global ice area*tracer + + real (kind=dbl_kind), dimension (:,:,:,:,:), allocatable :: & + tmin ,&! local min tracer + tmax ! local max tracer + + integer (kind=int_kind) :: alloc_error + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + call ice_timer_start(timer_advect) ! advection + +!---!------------------------------------------------------------------- +!---! Prepare for remapping. +!---! Initialize, update ghost cells, fill tracer arrays. +!---!------------------------------------------------------------------- + + l_stop = .false. + istop = 0 + jstop = 0 + + !------------------------------------------------------------------- + ! Compute open water area in each grid cell. + ! Note: An aggregate_area call is needed only if the open + ! water area has changed since the previous call. + ! Here we assume that aice0 is up to date. + !------------------------------------------------------------------- + +! !$OMP PARALLEL DO PRIVATE(iblk) +! do iblk = 1, nblocks +! call aggregate_area (nx_block, ny_block, +! iblk, & +! aicen(:,:,:,iblk), & +! aice (:,:, iblk), & +! aice0(:,:, iblk)) +! enddo +! !$OMP END PARALLEL DO + + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + ! Commented out because ghost cells are updated after cleanup_itd. + !------------------------------------------------------------------- +! call ice_timer_start(timer_bound) + +! call ice_HaloUpdate (aice0, halo_info, & +! field_loc_center, field_type_scalar) + +! call bound_state (aicen, trcrn, & +! vicen, vsnon) + +! call ice_timer_stop(timer_bound) + + !------------------------------------------------------------------- + ! Ghost cell updates for ice velocity. + ! Commented out because ghost cell velocities are computed + ! in ice_dyn_evp. + !------------------------------------------------------------------- + +! call ice_timer_start(timer_bound) +! call ice_HaloUpdate (uvel, halo_info, & +! field_loc_NEcorner, field_type_vector) +! call ice_HaloUpdate (vvel, halo_info, & +! field_loc_NEcorner, field_type_vector) +! call ice_timer_stop(timer_bound) + + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + !------------------------------------------------------------------- + ! Fill arrays with fields to be remapped. + !------------------------------------------------------------------- + + call state_to_tracers(nx_block, ny_block, & + ntrcr, ntrace, & + aice0(:,:, iblk), aicen(:,:,:,iblk), & + trcrn(:,:,1:ntrcr,:,iblk), & + vicen(:,:,:,iblk), vsnon(:,:, :,iblk), & + aim (:,:,:,iblk), trm (:,:,:,:,iblk)) + + enddo + !$OMP END PARALLEL DO + +!---!------------------------------------------------------------------- +!---! Optional conservation and monotonicity checks. +!---!------------------------------------------------------------------- + + if (l_conservation_check) then + + !------------------------------------------------------------------- + ! Compute initial values of globally conserved quantities. + !------------------------------------------------------------------- + + do n = 0, ncat + asum_init(n) = global_sum(aim(:,:,n,:), distrb_info, & + field_loc_center, tarea) + enddo + + do n = 1, ncat + do nt = 1, ntrace + if (tracer_type(nt)==1) then ! does not depend on another tracer + atsum_init(nt,n) = & + global_sum_prod(trm(:,:,nt,n,:), aim(:,:,n,:), & + distrb_info, field_loc_center, & + tarea) + elseif (tracer_type(nt)==2) then ! depends on another tracer + nt1 = depend(nt) + work1(:,:,:) = trm(:,:,nt,n,:)*trm(:,:,nt1,n,:) + atsum_init(nt,n) = & + global_sum_prod(work1(:,:,:), aim(:,:,n,:), & + distrb_info, field_loc_center, & + tarea) + elseif (tracer_type(nt)==3) then ! depends on two tracers + nt1 = depend(nt) + nt2 = depend(nt1) + work1(:,:,:) = trm(:,:,nt,n,:)*trm(:,:,nt1,n,:) & + *trm(:,:,nt2,n,:) + atsum_init(nt,n) = & + global_sum_prod(work1(:,:,:), aim(:,:,n,:), & + distrb_info, field_loc_center, & + tarea) + endif ! tracer_type + enddo ! nt + enddo ! n + + endif ! l_conservation_check + + if (l_monotonicity_check) then + + allocate(tmin(nx_block,ny_block,ntrace,ncat,max_blocks), & + tmax(nx_block,ny_block,ntrace,ncat,max_blocks), & + STAT=alloc_error) + + if (alloc_error /= 0) & + call abort_ice ('ice: allocation error') + + tmin(:,:,:,:,:) = c0 + tmax(:,:,:,:,:) = c0 + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + !------------------------------------------------------------------- + ! Compute masks. + ! Masks are used to prevent tracer values in cells without ice + ! from being used in the monotonicity check. + !------------------------------------------------------------------- + + call make_masks (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, ntrace, & + has_dependents, & + icellsnc(:,iblk), & + indxinc(:,:,iblk), indxjnc(:,:,iblk), & + aim(:,:,:,iblk), aimask(:,:,:,iblk), & + trm(:,:,:,:,iblk), trmask(:,:,:,:,iblk)) + + !------------------------------------------------------------------- + ! Compute local max and min of tracer fields. + !------------------------------------------------------------------- + + do n = 1, ncat + call local_max_min & + (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + trm (:,:,:,n,iblk), & + tmin(:,:,:,n,iblk), tmax (:,:,:,n,iblk), & + aimask(:,:,n,iblk), trmask(:,:,:,n,iblk)) + enddo + enddo + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (tmin, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (tmax, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do n = 1, ncat + call quasilocal_max_min (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + tmin(:,:,:,n,iblk), & + tmax(:,:,:,n,iblk)) + enddo + enddo + !$OMP END PARALLEL DO + + endif ! l_monotonicity_check + + !------------------------------------------------------------------- + ! Main remapping routine: Step ice area and tracers forward in time. + !------------------------------------------------------------------- + + call horizontal_remap (dt, ntrace, & + uvel (:,:,:), vvel (:,:,:), & + aim (:,:,:,:), trm (:,:,:,:,:), & + l_fixed_area, & + tracer_type, depend, & + has_dependents, integral_order, & + l_dp_midpt) + + !------------------------------------------------------------------- + ! Given new fields, recompute state variables. + !------------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call tracers_to_state (nx_block, ny_block, & + ntrcr, ntrace, & + aim (:,:,:,iblk), trm (:,:,:,:,iblk), & + aice0(:,:, iblk), aicen(:,:,:,iblk), & + trcrn(:,:,1:ntrcr,:,iblk), & + vicen(:,:,:,iblk), vsnon(:,:, :,iblk)) + + enddo ! iblk + !$OMP END PARALLEL DO + + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + !------------------------------------------------------------------- + + call ice_timer_start(timer_bound) + + call bound_state (aicen, trcrn, & + vicen, vsnon) + + call ice_timer_stop(timer_bound) + +!---!------------------------------------------------------------------- +!---! Optional conservation and monotonicity checks +!---!------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Compute final values of globally conserved quantities. + ! Check global conservation of area and area*tracers. (Optional) + !------------------------------------------------------------------- + + if (l_conservation_check) then + + do n = 0, ncat + asum_final(n) = global_sum(aim(:,:,n,:), distrb_info, & + field_loc_center, tarea) + enddo + + do n = 1, ncat + do nt = 1, ntrace + if (tracer_type(nt)==1) then ! does not depend on another tracer + atsum_final(nt,n) = & + global_sum_prod(trm(:,:,nt,n,:), aim(:,:,n,:), & + distrb_info, field_loc_center, & + tarea) + elseif (tracer_type(nt)==2) then ! depends on another tracer + nt1 = depend(nt) + work1(:,:,:) = trm(:,:,nt,n,:)*trm(:,:,nt1,n,:) + atsum_final(nt,n) = & + global_sum_prod(work1(:,:,:), aim(:,:,n,:), & + distrb_info, field_loc_center, & + tarea) + elseif (tracer_type(nt)==3) then ! depends on two tracers + nt1 = depend(nt) + nt2 = depend(nt1) + work1(:,:,:) = trm(:,:,nt,n,:)*trm(:,:,nt1,n,:) & + *trm(:,:,nt2,n,:) + atsum_final(nt,n) = & + global_sum_prod(work1(:,:,:), aim(:,:,n,:), & + distrb_info, field_loc_center, & + tarea) + endif ! tracer_type + enddo ! nt + enddo ! n + + if (my_task == master_task) then + call global_conservation (l_stop, & + asum_init(0), asum_final(0)) + + if (l_stop) then + write (nu_diag,*) 'istep1, my_task, iblk =', & + istep1, my_task, iblk + write (nu_diag,*) 'transport: conservation error, cat 0' + call abort_ice('ice remap transport: conservation error') + endif + + do n = 1, ncat + call global_conservation & + (l_stop, & + asum_init(n), asum_final(n), & + atsum_init(:,n), atsum_final(:,n)) + + if (l_stop) then + write (nu_diag,*) 'istep1, my_task, iblk, cat =', & + istep1, my_task, iblk, n + write (nu_diag,*) 'transport: conservation error, cat ',n + call abort_ice & + ('ice remap transport: conservation error') + endif + enddo ! n + + endif ! my_task = master_task + + endif ! l_conservation_check + + !------------------------------------------------------------------- + ! Check tracer monotonicity. (Optional) + !------------------------------------------------------------------- + + if (l_monotonicity_check) then + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n,l_stop,istop,jstop) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + l_stop = .false. + istop = 0 + jstop = 0 + + do n = 1, ncat + call check_monotonicity & + (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + tmin(:,:,:,n,iblk), tmax(:,:,:,n,iblk), & + aim (:,:, n,iblk), trm (:,:,:,n,iblk), & + l_stop, & + istop, jstop) + + if (l_stop) then + write (nu_diag,*) 'istep1, my_task, iblk, cat =', & + istep1, my_task, iblk, n + call abort_ice('ice remap transport: monotonicity error') + endif + enddo ! n + + enddo ! iblk + !$OMP END PARALLEL DO + + deallocate(tmin, tmax, STAT=alloc_error) + if (alloc_error /= 0) call abort_ice ('deallocation error') + + endif ! l_monotonicity_check + + call ice_timer_stop(timer_advect) ! advection + + end subroutine transport_remap + +!======================================================================= +! +! Computes the transport equations for one timestep using upwind. Sets +! several fields into a work array and passes it to upwind routine. + + subroutine transport_upwind (dt) + + use ice_boundary, only: ice_HaloUpdate + use ice_blocks, only: nx_block, ny_block, block, get_block, nx_block, ny_block + use ice_constants, only: p5, & + field_loc_Nface, field_loc_Eface, field_type_vector + use ice_domain, only: blocks_ice, halo_info, nblocks + use ice_domain_size, only: ncat, max_blocks + use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, ntrcr, & + uvel, vvel, trcr_depend, bound_state + use ice_grid, only: HTE, HTN, tarea + use ice_timers, only: ice_timer_start, ice_timer_stop, & + timer_bound, timer_advect + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + ! local variables + + integer (kind=int_kind) :: & + narr ! max number of state variable arrays + + integer (kind=int_kind) :: & + i, j, iblk ,&! horizontal indices + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), dimension (nx_block,ny_block,nblocks) :: & + uee, vnn ! cell edge velocities + + real (kind=dbl_kind), & + dimension (:,:,:,:), allocatable :: & + works ! work array + + type (block) :: & + this_block ! block information for current block + + call ice_timer_start(timer_advect) ! advection + + narr = 1 + ncat*(3+ntrcr) ! max number of state variable arrays + + allocate (works(nx_block,ny_block,narr,max_blocks)) + + !------------------------------------------------------------------- + ! Get ghost cell values of state variables. + ! (Assume velocities are already known for ghost cells, also.) + !------------------------------------------------------------------- +! call bound_state (aicen, trcrn, & +! vicen, vsnon) + + !------------------------------------------------------------------- + ! Average corner velocities to edges. + !------------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + uee(i,j,iblk) = p5*(uvel(i,j,iblk) + uvel(i,j-1,iblk)) + vnn(i,j,iblk) = p5*(vvel(i,j,iblk) + vvel(i-1,j,iblk)) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uee, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (vnn, halo_info, & + field_loc_Nface, field_type_vector) + call ice_timer_stop(timer_bound) + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + + !----------------------------------------------------------------- + ! fill work arrays with fields to be advected + !----------------------------------------------------------------- + + call state_to_work (nx_block, ny_block, & + ntrcr, & + narr, trcr_depend, & + aicen (:,:, :,iblk), trcrn (:,:,1:ntrcr,:,iblk), & + vicen (:,:, :,iblk), vsnon (:,:, :,iblk), & + aice0 (:,:, iblk), works (:,:, :,iblk)) + + !----------------------------------------------------------------- + ! advect + !----------------------------------------------------------------- + + call upwind_field (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, & + narr, works(:,:,:,iblk), & + uee(:,:,iblk), vnn (:,:,iblk), & + HTE(:,:,iblk), HTN (:,:,iblk), & + tarea(:,:,iblk)) + + !----------------------------------------------------------------- + ! convert work arrays back to state variables + !----------------------------------------------------------------- + + call work_to_state (nx_block, ny_block, & + ntrcr, & + narr, trcr_depend, & + aicen(:,:, :,iblk), trcrn (:,:,1:ntrcr,:,iblk), & + vicen(:,:, :,iblk), vsnon (:,:, :,iblk), & + aice0(:,:, iblk), works (:,:, :,iblk)) + + enddo ! iblk + !$OMP END PARALLEL DO + + deallocate (works) + + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + !------------------------------------------------------------------- + + call ice_timer_start(timer_bound) + + call bound_state (aicen, trcrn, & + vicen, vsnon) + + call ice_timer_stop(timer_bound) + + call ice_timer_stop(timer_advect) ! advection + + end subroutine transport_upwind + +!======================================================================= +! The next few subroutines (through check_monotonicity) are called +! by transport_remap. +!======================================================================= +! +! Fill ice area and tracer arrays. +! Assume that the advected tracers are hicen, hsnon, trcrn, +! qicen(1:nilyr), and qsnon(1:nslyr). +! This subroutine must be modified if a different set of tracers +! is to be transported. The rule for ordering tracers +! is that a dependent tracer (such as qice) must have a larger +! tracer index than the tracer it depends on (i.e., hice). +! +! author William H. Lipscomb, LANL + + subroutine state_to_tracers (nx_block, ny_block, & + ntrcr, ntrace, & + aice0, aicen, & + trcrn, & + vicen, vsnon, & + aim, trm) + + use ice_constants, only: c0, c1, rhos, Lfresh, puny + use ice_domain_size, only: ncat, nslyr + use ice_state, only: nt_qsno + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + ntrace ! number of tracers in use incl. hi, hs + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + aice0 ! fractional open water area + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + aicen ,&! fractional ice area + vicen ,&! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(in) :: & + trcrn ! ice area tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), & + intent(out):: & + aim ! mean ice area in each grid cell + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat), & + intent(out) :: & + trm ! mean tracer values in each grid cell + + ! local variables + + integer (kind=int_kind) :: & + i, j, n ,&! standard indices + it, kt ,&! tracer indices + ij ! combined i/j index + + real (kind=dbl_kind) :: & + w1 ! work variable + + integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat) :: & + indxi ,&! compressed i/j indices + indxj + + integer (kind=int_kind), dimension(0:ncat) :: & + icells ! number of cells with ice + + aim(:,:,0) = aice0(:,:) + + do n = 1, ncat + + trm(:,:,:,n) = c0 + + !------------------------------------------------------------------- + ! Find grid cells where ice is present and fill area array. + !------------------------------------------------------------------- + + icells(n) = 0 + do j = 1, ny_block + do i = 1, nx_block + aim(i,j,n) = aicen(i,j,n) + if (aim(i,j,n) > puny) then + icells(n) = icells(n) + 1 + ij = icells(n) + indxi(ij,n) = i + indxj(ij,n) = j + endif ! aim > puny + enddo + enddo + + !------------------------------------------------------------------- + ! Fill tracer array + ! Note: If aice > 0, then hice > 0, but we can have hsno = 0. + ! Alse note: We transport qice*nilyr rather than qice, so as to + ! avoid extra operations here and in tracers_to_state. + !------------------------------------------------------------------- + + do ij = 1, icells(n) + i = indxi(ij,n) + j = indxj(ij,n) + w1 = c1 / aim(i,j,n) + trm(i,j,1,n) = vicen(i,j,n) * w1 ! hice + trm(i,j,2,n) = vsnon(i,j,n) * w1 ! hsno + enddo + kt = 2 + + do it = 1, ntrcr + if (it >= nt_qsno .and. it < nt_qsno+nslyr) then + do ij = 1, icells(n) + i = indxi(ij,n) + j = indxj(ij,n) + trm(i,j,kt+it,n) = trcrn(i,j,it,n) + rhos*Lfresh ! snow enthalpy + enddo + else + do ij = 1, icells(n) + i = indxi(ij,n) + j = indxj(ij,n) + trm(i,j,kt+it,n) = trcrn(i,j,it,n) ! other tracers + enddo + endif + enddo + enddo ! ncat + + end subroutine state_to_tracers + +!======================================================================= +! +! Convert area and tracer arrays back to state variables. +! +! author William H. Lipscomb, LANL + + subroutine tracers_to_state (nx_block, ny_block, & + ntrcr, ntrace, & + aim, trm, & + aice0, aicen, & + trcrn, & + vicen, vsnon) + + use ice_constants, only: c0, rhos, Lfresh + use ice_domain_size, only: ncat, nslyr + use ice_state, only: nt_qsno + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + ntrace ! number of tracers in use incl. hi, hs + + real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), & + intent(in) :: & + aim ! fractional ice area + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat), & + intent(in) :: & + trm ! mean tracer values in each grid cell + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + aice0 ! fractional ice area + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(inout) :: & + aicen ,&! fractional ice area + vicen ,&! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! tracers + + ! local variables + + integer (kind=int_kind) :: & + i, j, k, n ,&! standard indices + it, kt ,&! tracer indices + icells ,&! number of cells with ice + ij + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, indxj ! compressed indices + + aice0(:,:) = aim(:,:,0) + + do n = 1, ncat + + icells = 0 + do j = 1, ny_block + do i = 1, nx_block + if (aim(i,j,n) > c0) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + !------------------------------------------------------------------- + ! Compute state variables. + !------------------------------------------------------------------- + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + aicen(i,j,n) = aim(i,j,n) + vicen(i,j,n) = aim(i,j,n)*trm(i,j,1,n) ! aice*hice + vsnon(i,j,n) = aim(i,j,n)*trm(i,j,2,n) ! aice*hsno + enddo ! ij + kt = 2 + + do it = 1, ntrcr + if (it >= nt_qsno .and. it < nt_qsno+nslyr) then + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + trcrn(i,j,it,n) = trm(i,j,kt+it,n) - rhos*Lfresh ! snow enthalpy + enddo + else + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + trcrn(i,j,it,n) = trm(i,j,kt+it,n) ! other tracers + enddo + endif + enddo + enddo ! ncat + + end subroutine tracers_to_state + +!======================================================================= +! +! Check whether values of conserved quantities have changed. +! An error probably means that ghost cells are treated incorrectly. +! +! author William H. Lipscomb, LANL + + subroutine global_conservation (l_stop, & + asum_init, asum_final, & + atsum_init, atsum_final) + + use ice_constants, only: puny + + real (kind=dbl_kind), intent(in) :: & + asum_init ,&! initial global ice area + asum_final ! final global ice area + + real (kind=dbl_kind), dimension(ntrace), intent(in), optional :: & + atsum_init ,&! initial global ice area*tracer + atsum_final ! final global ice area*tracer + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, abort on return + + ! local variables + + integer (kind=int_kind) :: & + nt ! tracer index + + real (kind=dbl_kind) :: & + diff ! difference between initial and final values + + + if (asum_init > puny) then + diff = asum_final - asum_init + if (abs(diff/asum_init) > puny) then + l_stop = .true. + write (nu_diag,*) + write (nu_diag,*) 'Ice area conserv error' + write (nu_diag,*) 'Initial global area =', asum_init + write (nu_diag,*) 'Final global area =', asum_final + write (nu_diag,*) 'Fractional error =', abs(diff)/asum_init + write (nu_diag,*) 'asum_final-asum_init =', diff + endif + endif + + if (present(atsum_init)) then + do nt = 1, ntrace + if (abs(atsum_init(nt)) > puny) then + diff = atsum_final(nt) - atsum_init(nt) + if (abs(diff/atsum_init(nt)) > puny) then + l_stop = .true. + write (nu_diag,*) + write (nu_diag,*) 'area*tracer conserv error' + write (nu_diag,*) 'tracer index =', nt + write (nu_diag,*) 'Initial global area*tracer =', & + atsum_init(nt) + write (nu_diag,*) 'Final global area*tracer =', & + atsum_final(nt) + write (nu_diag,*) 'Fractional error =', & + abs(diff)/atsum_init(nt) + write (nu_diag,*) 'atsum_final-atsum_init =', diff + endif + endif + enddo + endif ! present(atsum_init) + + end subroutine global_conservation + +!======================================================================= +! +! At each grid point, compute the local max and min of a scalar +! field phi: i.e., the max and min values in the nine-cell region +! consisting of the home cell and its eight neighbors. +! +! To extend to the neighbors of the neighbors (25 cells in all), +! follow this call with a call to quasilocal_max_min. +! +! author William H. Lipscomb, LANL + + subroutine local_max_min (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + trm, & + tmin, tmax, & + aimask, trmask) + + use ice_constants, only: c1 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block,&! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), intent(in), & + dimension(nx_block,ny_block) :: & + aimask ! ice area mask + + real (kind=dbl_kind), intent(in), & + dimension (nx_block,ny_block,ntrace) :: & + trm ,&! tracer fields + trmask ! tracer mask + + real (kind=dbl_kind), intent(out), & + dimension (nx_block,ny_block,ntrace) :: & + tmin ,&! local min tracer + tmax ! local max tracer + + ! local variables + + integer (kind=int_kind) :: & + i, j ,&! horizontal indices + nt, nt1 ! tracer indices + + real (kind=dbl_kind), dimension(nx_block,ny_block) :: & + phimask ! aimask or trmask, as appropriate + + real (kind=dbl_kind) :: & + phi_nw, phi_n, phi_ne ,&! field values in 8 neighbor cells + phi_w, phi_e ,& + phi_sw, phi_s, phi_se + + do nt = 1, ntrace + + if (tracer_type(nt)==1) then ! does not depend on another tracer + + do j = 1, ny_block + do i = 1, nx_block + phimask(i,j) = aimask(i,j) + enddo + enddo + + else ! depends on another tracer + + nt1 = depend(nt) + do j = 1, ny_block + do i = 1, nx_block + phimask(i,j) = trmask(i,j,nt1) + enddo + enddo + + endif + +!----------------------------------------------------------------------- +! Store values of trm in the 8 neighbor cells. +! If aimask = 1, use the true value; otherwise use the home cell value +! so that non-physical values of phi do not contribute to the gradient. +!----------------------------------------------------------------------- + + do j = jlo, jhi + do i = ilo, ihi + + phi_nw = phimask(i-1,j+1) * trm(i-1,j+1,nt) & + + (c1-phimask(i-1,j+1))* trm(i, j, nt) + phi_n = phimask(i, j+1) * trm(i, j+1,nt) & + + (c1-phimask(i, j+1))* trm(i, j, nt) + phi_ne = phimask(i+1,j+1) * trm(i+1,j+1,nt) & + + (c1-phimask(i+1,j+1))* trm(i, j, nt) + phi_w = phimask(i-1,j) * trm(i-1,j, nt) & + + (c1-phimask(i-1,j)) * trm(i, j, nt) + phi_e = phimask(i+1,j) * trm(i+1,j, nt) & + + (c1-phimask(i+1,j)) * trm(i, j, nt) + phi_sw = phimask(i-1,j-1) * trm(i-1,j-1,nt) & + + (c1-phimask(i-1,j-1))* trm(i, j, nt) + phi_s = phimask(i, j-1) * trm(i, j-1,nt) & + + (c1-phimask(i, j-1))* trm(i, j, nt) + phi_se = phimask(i+1,j-1) * trm(i+1,j-1,nt) & + + (c1-phimask(i+1,j-1))* trm(i, j, nt) + +!----------------------------------------------------------------------- +! Compute the minimum and maximum among the nine local cells. +!----------------------------------------------------------------------- + + tmax(i,j,nt) = max (phi_nw, phi_n, phi_ne, phi_w, & + trm(i,j,nt), phi_e, phi_sw, phi_s, phi_se) + + tmin(i,j,nt) = min (phi_nw, phi_n, phi_ne, phi_w, & + trm(i,j,nt), phi_e, phi_sw, phi_s, phi_se) + + enddo ! i + enddo ! j + + enddo ! nt + + end subroutine local_max_min + +!======================================================================= +! +! Extend the local max and min by one grid cell in each direction. +! Incremental remapping is monotone for the "quasilocal" max and min, +! but in rare cases may violate monotonicity for the local max and min. +! +! author William H. Lipscomb, LANL + + subroutine quasilocal_max_min (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + tmin, tmax) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block,&! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), intent(inout), & + dimension (nx_block,ny_block,ntrace) :: & + tmin ,&! local min tracer + tmax ! local max tracer + + ! local variables + + integer (kind=int_kind) :: & + i, j ,&! horizontal indices + nt ! tracer index + + do nt = 1, ntrace + + do j = jlo, jhi + do i = ilo, ihi + + tmax(i,j,nt) = & + max (tmax(i-1,j+1,nt), tmax(i,j+1,nt), tmax(i+1,j+1,nt), & + tmax(i-1,j, nt), tmax(i,j, nt), tmax(i+1,j, nt), & + tmax(i-1,j-1,nt), tmax(i,j-1,nt), tmax(i+1,j-1,nt)) + + tmin(i,j,nt) = & + min (tmin(i-1,j+1,nt), tmin(i,j+1,nt), tmin(i+1,j+1,nt), & + tmin(i-1,j, nt), tmin(i,j, nt), tmin(i+1,j, nt), & + tmin(i-1,j-1,nt), tmin(i,j-1,nt), tmin(i+1,j-1,nt)) + + enddo ! i + enddo ! j + + enddo + + end subroutine quasilocal_max_min + +!====================================================================== +! +! At each grid point, make sure that the new tracer values +! fall between the local max and min values before transport. +! +! author William H. Lipscomb, LANL + + subroutine check_monotonicity (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + tmin, tmax, & + aim, trm, & + l_stop, & + istop, jstop) + + use ice_constants, only: c1, puny + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block,&! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), intent(in), & + dimension (nx_block,ny_block) :: & + aim ! new ice area + + real (kind=dbl_kind), intent(in), & + dimension (nx_block,ny_block,ntrace) :: & + trm ! new tracers + + real (kind=dbl_kind), intent(in), & + dimension (nx_block,ny_block,ntrace) :: & + tmin ,&! local min tracer + tmax ! local max tracer + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(inout) :: & + istop, jstop ! indices of grid cell where model aborts + + ! local variables + + integer (kind=int_kind) :: & + i, j ,&! horizontal indices + nt, nt1, nt2 ! tracer indices + + real (kind=dbl_kind) :: & + w1, w2 ! work variables + + logical (kind=log_kind), dimension (nx_block, ny_block) :: & + l_check ! if true, check monotonicity + + do nt = 1, ntrace + + !------------------------------------------------------------------- + ! Load logical array to identify tracers that need checking. + !------------------------------------------------------------------- + + if (tracer_type(nt)==1) then ! does not depend on another tracer + + do j = jlo, jhi + do i = ilo, ihi + if (aim(i,j) > puny) then + l_check(i,j) = .true. + else + l_check(i,j) = .false. + endif + enddo + enddo + + elseif (tracer_type(nt)==2) then ! depends on another tracer + + nt1 = depend(nt) + do j = jlo, jhi + do i = ilo, ihi + if (abs(trm(i,j,nt1)) > puny) then + l_check(i,j) = .true. + else + l_check(i,j) = .false. + endif + enddo + enddo + + elseif (tracer_type(nt)==3) then ! depends on two tracers + + nt1 = depend(nt) + nt2 = depend(nt1) + do j = jlo, jhi + do i = ilo, ihi + if (abs(trm(i,j,nt1)) > puny .and. & + abs(trm(i,j,nt2)) > puny) then + l_check(i,j) = .true. + else + l_check(i,j) = .false. + endif + enddo + enddo + endif + + !------------------------------------------------------------------- + ! Make sure new values lie between tmin and tmax + !------------------------------------------------------------------- + + do j = jlo, jhi + do i = ilo, ihi + + if (l_check(i,j)) then + ! w1 and w2 allow for roundoff error when abs(trm) is big + w1 = max(c1, abs(tmin(i,j,nt))) + w2 = max(c1, abs(tmax(i,j,nt))) + if (trm(i,j,nt) < tmin(i,j,nt)-w1*puny) then + l_stop = .true. + istop = i + jstop = j + write (nu_diag,*) ' ' + write (nu_diag,*) 'new tracer < tmin' + write (nu_diag,*) 'i, j, nt =', i, j, nt + write (nu_diag,*) 'new tracer =', trm (i,j,nt) + write (nu_diag,*) 'tmin =' , tmin(i,j,nt) + write (nu_diag,*) 'ice area =' , aim(i,j) + elseif (trm(i,j,nt) > tmax(i,j,nt)+w2*puny) then + l_stop = .true. + istop = i + jstop = j + write (nu_diag,*) ' ' + write (nu_diag,*) 'new tracer > tmax' + write (nu_diag,*) 'i, j, nt =', i, j, nt + write (nu_diag,*) 'new tracer =', trm (i,j,nt) + write (nu_diag,*) 'tmax =' , tmax(i,j,nt) + write (nu_diag,*) 'ice area =' , aim(i,j) + endif + endif + + enddo ! i + enddo ! j + + enddo ! nt + + end subroutine check_monotonicity + +!======================================================================= +! The remaining subroutines are called by transport_upwind. +!======================================================================= +! +! Fill work array with state variables in preparation for upwind transport + + subroutine state_to_work (nx_block, ny_block, & + ntrcr, & + narr, trcr_depend, & + aicen, trcrn, & + vicen, vsnon, & + aice0, works) + + use ice_domain_size, only: ncat + use ice_state, only: nt_alvl, nt_apnd, nt_fbri, & + tr_pond_cesm, tr_pond_lvl, tr_pond_topo + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + narr ! number of 2D state variable arrays in works array + + integer (kind=int_kind), dimension (ntrcr), intent(in) :: & + trcr_depend ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + aicen ,&! concentration of ice + vicen ,&! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(in) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + aice0 ! concentration of open water + + real (kind=dbl_kind), dimension(nx_block,ny_block,narr), & + intent (out) :: & + works ! work array + + ! local variables + + integer (kind=int_kind) :: & + i, j, n, it ,&! counting indices + narrays ! counter for number of state variable arrays + + !----------------------------------------------------------------- + ! This array is used for performance (balance memory/cache vs + ! number of bound calls); a different number of arrays may perform + ! better depending on the machine used, number of processors, etc. + ! --tested on SGI R2000, using 4 pes for the ice model under MPI + !----------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + works(i,j,1) = aice0(i,j) + enddo + enddo + narrays = 1 + + do n=1, ncat + + do j = 1, ny_block + do i = 1, nx_block + works(i,j,narrays+1) = aicen(i,j,n) + works(i,j,narrays+2) = vicen(i,j,n) + works(i,j,narrays+3) = vsnon(i,j,n) + enddo ! i + enddo ! j + narrays = narrays + 3 + + do it = 1, ntrcr + if (trcr_depend(it) == 0) then + do j = 1, ny_block + do i = 1, nx_block + works(i,j,narrays+it) = aicen(i,j,n)*trcrn(i,j,it,n) + enddo + enddo + elseif (trcr_depend(it) == 1) then + do j = 1, ny_block + do i = 1, nx_block + works(i,j,narrays+it) = vicen(i,j,n)*trcrn(i,j,it,n) + enddo + enddo + elseif (trcr_depend(it) == 2) then + do j = 1, ny_block + do i = 1, nx_block + works(i,j,narrays+it) = vsnon(i,j,n)*trcrn(i,j,it,n) + enddo + enddo + elseif (trcr_depend(it) == 2+nt_alvl) then + do j = 1, ny_block + do i = 1, nx_block + works(i,j,narrays+it) = aicen(i,j,n) & + * trcrn(i,j,nt_alvl,n) & + * trcrn(i,j,it,n) + enddo + enddo + elseif (trcr_depend(it) == 2+nt_apnd .and. & + tr_pond_cesm .or. tr_pond_topo) then + do j = 1, ny_block + do i = 1, nx_block + works(i,j,narrays+it) = aicen(i,j,n) & + * trcrn(i,j,nt_apnd,n) & + * trcrn(i,j,it,n) + enddo + enddo + elseif (trcr_depend(it) == 2+nt_apnd .and. & + tr_pond_lvl) then + do j = 1, ny_block + do i = 1, nx_block + works(i,j,narrays+it) = aicen(i,j,n) & + * trcrn(i,j,nt_alvl,n) & + * trcrn(i,j,nt_apnd,n) & + * trcrn(i,j,it,n) + enddo + enddo + elseif (trcr_depend(it) == 2+nt_fbri) then + do j = 1, ny_block + do i = 1, nx_block + works(i,j,narrays+it) = vicen(i,j,n) & + * trcrn(i,j,nt_fbri,n) & + * trcrn(i,j,it,n) + enddo + enddo + endif + enddo + narrays = narrays + ntrcr + + enddo ! n + + if (narr /= narrays) write(nu_diag,*) & + "Wrong number of arrays in transport bound call" + + end subroutine state_to_work + +!======================================================================= +! +! Convert work array back to state variables + + subroutine work_to_state (nx_block, ny_block, & + ntrcr, & + narr, trcr_depend, & + aicen, trcrn, & + vicen, vsnon, & + aice0, works) + + use ice_domain_size, only: ncat + use ice_blocks, only: + use ice_itd, only: compute_tracers + + integer (kind=int_kind), intent (in) :: & + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + narr ! number of 2D state variable arrays in works array + + integer (kind=int_kind), dimension (ntrcr), intent(in) :: & + trcr_depend ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon + + real (kind=dbl_kind), intent (in) :: & + works (nx_block,ny_block,narr) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(out) :: & + aicen ,&! concentration of ice + vicen ,&! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(out) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + aice0 ! concentration of open water + + ! local variables + + integer (kind=int_kind) :: & + i, j, n ,&! counting indices + narrays ,&! counter for number of state variable arrays + icells ! number of ocean/ice cells + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxi, indxj + + real (kind=dbl_kind), dimension (nx_block*ny_block,narr) :: & + work + + ! for call to compute_tracers + icells = 0 + do j = 1, ny_block + do i = 1, nx_block + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + work (icells,:) = works(i,j,:) + enddo + enddo + + do j=1,ny_block + do i=1,nx_block + aice0(i,j) = works(i,j,1) + enddo + enddo + narrays = 1 ! aice0 is first array + + do n=1,ncat + + do j=1,ny_block + do i=1,nx_block + aicen(i,j,n) = works(i,j,narrays+1) + vicen(i,j,n) = works(i,j,narrays+2) + vsnon(i,j,n) = works(i,j,narrays+3) + enddo + enddo + narrays = narrays + 3 + + call compute_tracers (nx_block, ny_block, & + icells, indxi, indxj, & + ntrcr, trcr_depend, & + work (:,narrays+1:narrays+ntrcr), & + aicen(:,:,n), & + vicen(:,:,n), vsnon(:,:,n), & + trcrn(:,:,:,n)) + + narrays = narrays + ntrcr + + enddo ! ncat + + end subroutine work_to_state + +!======================================================================= +! +! upwind transport algorithm + + subroutine upwind_field (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + dt, & + narrays, phi, & + uee, vnn, & + HTE, HTN, & + tarea) + + use ice_constants, only: p5 + + integer (kind=int_kind), intent (in) :: & + nx_block, ny_block ,&! block dimensions + ilo,ihi,jlo,jhi ,&! beginning and end of physical domain + narrays ! number of 2D arrays to be transported + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + real (kind=dbl_kind), dimension(nx_block,ny_block,narrays), & + intent(inout) :: & + phi ! scalar field + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in):: & + uee, vnn ! cell edge velocities + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + HTE ,&! length of east cell edge + HTN ,&! length of north cell edge + tarea ! grid cell area + + ! local variables + + integer (kind=int_kind) :: & + i, j, n ! standard indices + + real (kind=dbl_kind) :: & + upwind, y1, y2, a, h ! function + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + worka, workb + + !------------------------------------------------------------------- + ! Define upwind function + !------------------------------------------------------------------- + + upwind(y1,y2,a,h) = p5*dt*h*((a+abs(a))*y1+(a-abs(a))*y2) + + !------------------------------------------------------------------- + ! upwind transport + !------------------------------------------------------------------- + + do n = 1, narrays + + do j = 1, jhi + do i = 1, ihi + worka(i,j)= & + upwind(phi(i,j,n),phi(i+1,j,n),uee(i,j),HTE(i,j)) + workb(i,j)= & + upwind(phi(i,j,n),phi(i,j+1,n),vnn(i,j),HTN(i,j)) + enddo + enddo + + do j = jlo, jhi + do i = ilo, ihi + phi(i,j,n) = phi(i,j,n) - ( worka(i,j)-worka(i-1,j) & + + workb(i,j)-workb(i,j-1) ) & + / tarea(i,j) + enddo + enddo + + enddo ! narrays + + end subroutine upwind_field + +!======================================================================= + + end module ice_transport_driver + +!======================================================================= diff --git a/source/ice_transport_remap.F90 b/source/ice_transport_remap.F90 new file mode 100755 index 00000000..02aa3645 --- /dev/null +++ b/source/ice_transport_remap.F90 @@ -0,0 +1,3735 @@ +! SVN:$Id: ice_transport_remap.F90 825 2014-08-29 15:37:09Z eclare $ +!======================================================================= +! +! Transports quantities using the second-order conservative remapping +! scheme developed by John Dukowicz and John Baumgardner (DB) and modified +! for sea ice by William Lipscomb and Elizabeth Hunke. +! +! References: +! +! Dukowicz, J. K., and J. R. Baumgardner, 2000: Incremental +! remapping as a transport/advection algorithm, J. Comput. Phys., +! 160, 318-335. +! +! Lipscomb, W. H., and E. C. Hunke, 2004: Modeling sea ice +! transport using incremental remapping, Mon. Wea. Rev., 132, +! 1341-1354. +! +! authors William H. Lipscomb, LANL +! John Baumgardner, LANL +! +! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb +! 2004-05: Block structure added (WHL) +! 2006: Moved remap driver to ice_transport_driver +! Geometry changes: +! (1) Reconstruct fields in stretched logically rectangular coordinates +! (2) Modify geometry so that the area flux across each edge +! can be specified (following an idea of Mats Bentsen) +! 2010: ECH removed unnecessary grid arrays and optional arguments from +! horizontal_remap + + module ice_transport_remap + + use ice_kinds_mod + use ice_communicate, only: my_task, master_task + use ice_domain_size, only: max_blocks, ncat + use ice_fileunits, only: nu_diag + + implicit none + save + private + public :: init_remap, horizontal_remap, make_masks + + integer (kind=int_kind), parameter :: & + ngroups = 6 ,&! number of groups of triangles that + ! contribute transports across each edge + nvert = 3 ! number of vertices in a triangle + + ! for triangle integral formulas + real (kind=dbl_kind), parameter :: & + p5625m = -9._dbl_kind/16._dbl_kind ,& + p52083 = 25._dbl_kind/48._dbl_kind + + logical (kind=log_kind), parameter :: bugcheck = .false. + +!======================================================================= +! Here is some information about how the incremental remapping scheme +! works in CICE and how it can be adapted for use in other models. +! +! The remapping routine is designed to transport a generic mass-like +! field (in CICE, the ice fractional area) along with an arbitrary number +! of tracers in two dimensions. The velocity components are assumed +! to lie at grid cell corners and the transported scalars at cell centers. +! Incremental remapping has the following desirable properties: +! +! (1) Tracer monotonicity is preserved. That is, no new local +! extrema are produced in fields like ice thickness or internal +! energy. +! (2) The reconstucted mass and tracer fields vary linearly in x and y. +! This means that remapping is 2nd-order accurate in space, +! except where horizontal gradients are limited to preserve +! monotonicity. +! (3) There are economies of scale. Transporting a single field +! is rather expensive, but additional fields have a relatively +! low marginal cost. +! +! The following generic conservation equations may be solved: +! +! dm/dt = del*(u*m) (0) +! d(m*T1)/dt = del*(u*m*T1) (1) +! d(m*T1*T2)/dt = del*(u*m*T1*T2) (2) +! d(m*T1*T2*T3)/dt = del*(u*m*T1*T2*T3) (3) +! +! where d is a partial derivative, del is the 2D divergence operator, +! u is the horizontal velocity, m is the mass density field, and +! T1, T2, and T3 are tracers. +! +! In CICE, these equations have the form +! +! da/dt = del*(u*a) (4) +! dv/dt = d(a*h)/dt = del*(u*a*h) (5) +! de/dt = d(a*h*q)/dt = del*(u*a*h*q) (6) +! d(aT)/dt = del*(u*a*t) (7) +! +! where a = fractional ice area, v = ice/snow volume, h = v/a = thickness, +! e = ice/snow internal energy (J/m^2), q = e/v = internal energy per +! unit volume (J/m^3), and T is a tracer. These equations express +! conservation of ice area, volume, internal energy, and area-weighted +! tracer, respectively. +! +! (Note: In CICE, a, v and e are prognostic quantities from which +! h and q are diagnosed. The remapping routine works with tracers, +! which means that h and q must be derived from a, v, and e before +! calling the remapping routine.) +! +! Earlier versions of CICE assumed fixed ice and snow density. +! Beginning with CICE 4.0, the ice and snow density can be variable. +! In this case, equations (5) and (6) are replaced by +! +! dv/dt = d(a*h)/dt = del*(u*a*h) (8) +! dm/dt = d(a*h*rho)/dt = del*(u*a*h*rho) (9) +! de/dt = d(a*h*rho*qm)/dt = del*(u*a*h*rho*qm) (10) +! +! where rho = density and qm = internal energy per unit mass (J/kg). +! Eq. (9) expresses mass conservation, which in the variable-density +! case is no longer equivalent to volume conservation (8). +! +! Tracers satisfying equations of the form (1) are called "type 1." +! In CICE the paradigmatic type 1 tracers are hi and hs. +! +! Tracers satisfying equations of the form (2) are called "type 2". +! The paradigmatic type 2 tracers are qi and qs (or rhoi and rhos +! in the variable-density case). +! +! Tracers satisfying equations of the form (3) are called "type 3." +! The paradigmatic type 3 tracers are qmi and qms in the variable-density +! case. There are no such tracers in the constant-density case. +! +! The fields a, T1, and T2 are reconstructed in each grid cell with +! 2nd-order accuracy. T3 is reconstructed with 1st-order accuracy +! (i.e., it is transported in upwind fashion) in order to avoid +! additional mathematical complexity. +! +! The mass-like field lives in the array "mm" (shorthand for mean +! mass) and the tracers fields in the array "tm" (mean tracers). +! In order to transport tracers correctly, the remapping routine +! needs to know the tracers types and relationships. This is done +! as follows: +! +! Each field in the "tm" array is assigned an index, 1:ntrace. +! (Note: ntrace is not the same as ntrcr, the number of tracers +! in the trcrn state variable array. For remapping purposes we +! have additional tracers hi and hs.) +! +! The tracer types (1,2,3) are contained in the "tracer_type" array. +! For standard CICE: +! +! tracer_type = (1 1 1 2 2 2 2 2) +! +! Type 2 and type 3 tracers are said to depend on type 1 tracers. +! For instance, qi depends on hi, which is to say that +! there is a conservation equation of the form (2) or (6). +! Thus we define a "depend" array. For standard CICE: +! +! depend = (0 0 0 1 1 1 1 2) +! +! which implies that elements 1-3 (hi, hs, Ts) are type 1, +! elements 4-7 (qi) depend on element 1 (hi), and element 8 (qs) +! depends on element 2 (hs). +! +! We also define a logical array "has_dependents". In standard CICE: +! +! has_dependents = (T T F F F F F F), +! +! which means that only elements 1 and 2 (hi and hs) have dependent +! tracers. +! +! For the variable-density case, things are a bit more complicated. +! Suppose we have 4 variable-density ice layers and one variable- +! density snow layer. Then the indexing is as follows: +! 1 = hi +! 2 = hs +! 3 = Ts +! 4-7 = rhoi +! 8 = rhos +! 9-12 = qmi +! 13 = qms +! +! The key arrays are: +! +! tracer_type = (1 1 1 2 2 2 2 2 3 3 3 3 3) +! +! depend = (0 0 0 1 1 1 1 2 4 5 6 7 8) +! +! has_dependents = (T T F T T T T T F F F F F) +! +! which imply that hi and hs are type 1 with dependents rhoi and rhos, +! while rhoi and rhos are type 2 with dependents qmi and qms. +! +! Tracers added to the ntrcr array are handled automatically +! by the remapping with little extra coding. It is necessary +! only to provide the correct type and dependency information. +! +! When using this routine in other models, most of the tracer dependency +! apparatus may be irrelevant. In a layered ocean model, for example, +! the transported fields are the layer thickness h (the mass density +! field) and two or more tracers (T, S, and various trace species). +! Suppose there are just two tracers, T and S. Then the tracer arrays +! have the values: +! +! tracer_type = (1 1) +! depend = (0 0) +! has_dependents = (F F) +! +! which is to say that all tracer transport equations are of the form (1). +! +! The tracer dependency arrays are optional input arguments for the +! main remapping subroutine. If these arrays are not passed in, they +! take on the default values tracer_type(:) = 1, depend(:) = 0, and +! has_dependents(:) = F, which are appropriate for most purposes. +! +! Another optional argument is integral_order. If integral_order = 1, +! then the triangle integrals are exact for linear functions of x and y. +! If integral_order = 2, these integrals are exact for both linear and +! quadratic functions. If integral_order = 3, integrals are exact for +! cubic functions as well. If all tracers are of type 1, then the +! integrals of mass*tracer are quadratic, and integral_order = 2 is +! sufficient. In CICE, where there are type 2 tracers, we integrate +! functions of the form mass*tracer1*tracer2. Thus integral_order = 3 +! is required for exactness, though integral_order = 2 may be good enough +! in practice. +! +! Finally, a few words about the edgearea fields: +! +! In earlier versions of this scheme, the divergence of the velocity +! field implied by the remapping was, in general, different from the +! value of del*u computed in the dynamics. For energetic consistency +! (in CICE as well as in layered ocean models such as HYPOP), +! these two values should agree. This can be ensured by setting +! l_fixed_area = T and specifying the area transported across each grid +! cell edge in the arrays edgearea_e and edgearea_n. The departure +! regions are then tweaked, following an idea by Mats Bentsen, such +! that they have the desired area. If l_fixed_area = F, these regions +! are not tweaked, and the edgearea arrays are output variables. +! +!======================================================================= + + contains + +!======================================================================= +! +! Grid quantities used by the remapping transport scheme +! +! Note: the arrays xyav, xxxav, etc are not needed for rectangular grids +! but may be needed in the future for other nonuniform grids. They have +! been commented out here to save memory and flops. +! +! author William H. Lipscomb, LANL + + subroutine init_remap + + use ice_constants, only: c0, c1, c12 + use ice_domain, only: nblocks + use ice_blocks, only: nx_block, ny_block + use ice_grid, only: xav, yav, xxav, yyav +! dxt, dyt, xyav, & +! xxxav, xxyav, xyyav, yyyav + + integer (kind=int_kind) :: & + i, j, iblk ! standard indices + + ! Compute grid cell average geometric quantities on the scaled + ! rectangular grid with dx = 1, dy = 1. + ! + ! Note: On a rectangular grid, the integral of any odd function + ! of x or y = 0. + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + xav(i,j,iblk) = c0 + yav(i,j,iblk) = c0 +!!! These formulas would be used on a rectangular grid +!!! with dimensions (dxt, dyt): +!!! xxav(i,j,iblk) = dxt(i,j,iblk)**2 / c12 +!!! yyav(i,j,iblk) = dyt(i,j,iblk)**2 / c12 + xxav(i,j,iblk) = c1/c12 + yyav(i,j,iblk) = c1/c12 +! xyav(i,j,iblk) = c0 +! xxxav(i,j,iblk) = c0 +! xxyav(i,j,iblk) = c0 +! xyyav(i,j,iblk) = c0 +! yyyav(i,j,iblk) = c0 + enddo + enddo + enddo + !$OMP END PARALLEL DO + + end subroutine init_remap + +!======================================================================= +! +! Solve the transport equations for one timestep using the incremental +! remapping scheme developed by John Dukowicz and John Baumgardner (DB) +! and modified for sea ice by William Lipscomb and Elizabeth Hunke. +! +! This scheme preserves monotonicity of ice area and tracers. That is, +! it does not produce new extrema. It is second-order accurate in space, +! except where gradients are limited to preserve monotonicity. +! +! This version of the remapping allows the user to specify the areal +! flux across each edge, based on an idea developed by Mats Bentsen. +! +! author William H. Lipscomb, LANL +! 2006: Moved driver (subroutine transport_remap) into separate module. +! Geometry changes (logically rectangular coordinates, fixed +! area fluxes) + + subroutine horizontal_remap (dt, ntrace, & + uvel, vvel, & + mm, tm, & + l_fixed_area, & + tracer_type, depend, & + has_dependents, & + integral_order, & + l_dp_midpt) + + use ice_boundary, only: ice_halo, ice_HaloMask, ice_HaloUpdate, & + ice_HaloDestroy + use ice_constants, only: c0, p5, & + field_loc_center, field_type_scalar, & + field_loc_NEcorner, field_type_vector + use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_remap + use ice_blocks, only: block, get_block, nghost, nx_block, ny_block + use ice_grid, only: HTE, HTN, dxu, dyu, & + tarea, tarear, hm, & + xav, yav, xxav, yyav +! xyav, xxxav, xxyav, xyyav, yyyav + use ice_exit, only: abort_ice + use ice_calendar, only: istep1 + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), intent(in) :: & + ntrace ! number of tracers in use + + real (kind=dbl_kind), intent(in), & + dimension(nx_block,ny_block,max_blocks) :: & + uvel ,&! x-component of velocity (m/s) + vvel ! y-component of velocity (m/s) + + real (kind=dbl_kind), intent(inout), & + dimension (nx_block,ny_block,0:ncat,max_blocks) :: & + mm ! mean mass values in each grid cell + + real (kind=dbl_kind), intent(inout), & + dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & + tm ! mean tracer values in each grid cell + + !------------------------------------------------------------------- + ! If l_fixed_area is true, the area of each departure region is + ! computed in advance (e.g., by taking the divergence of the + ! velocity field and passed to locate_triangles. The departure + ! regions are adjusted to obtain the desired area. + ! If false, edgearea is computed in locate_triangles and passed out. + !------------------------------------------------------------------- + + logical, intent(in) :: & + l_fixed_area ! if true, edgearea_e and edgearea_n are prescribed + ! if false, edgearea is computed here and passed out + + integer (kind=int_kind), dimension (ntrace), intent(in) :: & + tracer_type ,&! = 1, 2, or 3 (see comments above) + depend ! tracer dependencies (see above) + + logical (kind=log_kind), dimension (ntrace), intent(in) :: & + has_dependents ! true if a tracer has dependent tracers + + integer (kind=int_kind), intent(in) :: & + integral_order ! polynomial order for triangle integrals + + logical (kind=log_kind), intent(in) :: & + l_dp_midpt ! if true, find departure points using + ! corrected midpoint velocity + ! local variables + + integer (kind=int_kind) :: & + i, j ,&! horizontal indices + iblk ,&! block index + ilo,ihi,jlo,jhi,&! beginning and end of physical domain + n, m ! ice category, tracer indices + + integer (kind=int_kind), dimension(0:ncat,max_blocks) :: & + icellsnc ! number of cells with ice + + integer (kind=int_kind), & + dimension(nx_block*ny_block,0:ncat) :: & + indxinc, indxjnc ! compressed i/j indices + + real (kind=dbl_kind), dimension(nx_block,ny_block) :: & + edgearea_e ,&! area of departure regions for east edges + edgearea_n ! area of departure regions for north edges + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + dpx ,&! x coordinates of departure points at cell corners + dpy ! y coordinates of departure points at cell corners + + real (kind=dbl_kind), dimension(nx_block,ny_block,0:ncat,max_blocks) :: & + mc ,&! mass at geometric center of cell + mx, my ! limited derivative of mass wrt x and y + + real (kind=dbl_kind), dimension(nx_block,ny_block,0:ncat) :: & + mmask ! = 1. if mass is present, = 0. otherwise + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & + tc ,&! tracer values at geometric center of cell + tx, ty ! limited derivative of tracer wrt x and y + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ntrace,ncat) :: & + tmask ! = 1. if tracer is present, = 0. otherwise + + real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat) :: & + mflxe, mflxn ! mass transports across E and N cell edges + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat) :: & + mtflxe, mtflxn ! mass*tracer transports across E and N cell edges + + real (kind=dbl_kind), dimension (nx_block,ny_block,ngroups) :: & + triarea ! area of east-edge departure triangle + + real (kind=dbl_kind), dimension (nx_block,ny_block,0:nvert,ngroups) :: & + xp, yp ! x and y coordinates of special triangle points + ! (need 4 points for triangle integrals) + + integer (kind=int_kind), & + dimension (nx_block,ny_block,ngroups) :: & + iflux ,&! i index of cell contributing transport + jflux ! j index of cell contributing transport + + integer (kind=int_kind), dimension(ngroups,max_blocks) :: & + icellsng ! number of cells with ice + + integer (kind=int_kind), & + dimension(nx_block*ny_block,ngroups) :: & + indxing, indxjng ! compressed i/j indices + + logical (kind=log_kind) :: & + l_stop ! if true, abort the model + + integer (kind=int_kind) :: & + istop, jstop ! indices of grid cell where model aborts + + character (len=char_len) :: & + edge ! 'north' or 'east' + + integer (kind=int_kind), & + dimension(nx_block,ny_block,max_blocks) :: halomask + + type (ice_halo) :: halo_info_tracer + + type (block) :: & + this_block ! block information for current block + +!---!------------------------------------------------------------------- +!---! Remap the ice area and associated tracers. +!---! Remap the open water area (without tracers). +!---!------------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n,m, & + !$OMP indxinc,indxjnc,mmask,tmask,istop,jstop,l_stop) + do iblk = 1, nblocks + + l_stop = .false. + istop = 0 + jstop = 0 + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + !------------------------------------------------------------------- + ! Compute masks and count ice cells. + ! Masks are used to prevent tracer values in cells without ice from + ! being used to compute tracer gradients. + !------------------------------------------------------------------- + + call make_masks (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, ntrace, & + has_dependents, icellsnc(:,iblk), & + indxinc(:,:), indxjnc(:,:), & + mm(:,:,:,iblk), mmask(:,:,:), & + tm(:,:,:,:,iblk), tmask(:,:,:,:)) + + !------------------------------------------------------------------- + ! Construct linear fields, limiting gradients to preserve monotonicity. + ! Note: Pass in unit arrays instead of true distances HTE, HTN, etc. + ! The resulting gradients are in scaled coordinates. + !------------------------------------------------------------------- + + ! open water + call construct_fields(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, ntrace, & + tracer_type, depend, & + has_dependents, icellsnc (0,iblk), & + indxinc (:,0), indxjnc(:,0), & + hm (:,:,iblk), xav (:,:,iblk), & + yav (:,:,iblk), xxav (:,:,iblk), & + yyav (:,:,iblk), & +! xyav (:,:,iblk), & +! xxxav (:,:,iblk), xxyav(:,:,iblk), & +! xyyav (:,:,iblk), yyyav(:,:,iblk), & + mm (:,:,0,iblk), mc(:,:,0,iblk), & + mx (:,:,0,iblk), my(:,:,0,iblk), & + mmask (:,:,0) ) + + ! ice categories + do n = 1, ncat + + call construct_fields(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, ntrace, & + tracer_type, depend, & + has_dependents, icellsnc (n,iblk), & + indxinc (:,n), indxjnc(:,n), & + hm (:,:,iblk), xav (:,:,iblk), & + yav (:,:,iblk), xxav (:,:,iblk), & + yyav (:,:,iblk), & +! xyav (:,:,iblk), & +! xxxav (:,:,iblk), xxyav (:,:,iblk), & +! xyyav (:,:,iblk), yyyav (:,:,iblk), & + mm (:,:,n,iblk), mc (:,:,n,iblk), & + mx (:,:,n,iblk), my (:,:,n,iblk), & + mmask (:,:,n), & + tm (:,:,:,n,iblk), tc(:,:,:,n,iblk), & + tx (:,:,:,n,iblk), ty(:,:,:,n,iblk), & + tmask(:,:,:,n) ) + + enddo ! n + + !------------------------------------------------------------------- + ! Given velocity field at cell corners, compute departure points + ! of trajectories. + !------------------------------------------------------------------- + + call departure_points(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, dt, & + uvel (:,:,iblk), vvel(:,:,iblk), & + dxu (:,:,iblk), dyu (:,:,iblk), & + HTN (:,:,iblk), HTE (:,:,iblk), & + dpx (:,:,iblk), dpy (:,:,iblk), & + l_dp_midpt, l_stop, & + istop, jstop) + + if (l_stop) then + write(nu_diag,*) 'istep1, my_task, iblk =', & + istep1, my_task, iblk + write (nu_diag,*) 'Global block:', this_block%block_id + if (istop > 0 .and. jstop > 0) & + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) + call abort_ice('remap transport: bad departure points') + endif + + enddo ! iblk + !$OMP END PARALLEL DO + + !------------------------------------------------------------------- + ! Ghost cell updates + ! If nghost >= 2, these calls are not needed + !------------------------------------------------------------------- + + if (nghost==1) then + + call ice_timer_start(timer_bound) + + ! departure points + call ice_HaloUpdate (dpx, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_HaloUpdate (dpy, halo_info, & + field_loc_NEcorner, field_type_vector) + + ! mass field + call ice_HaloUpdate (mc, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (mx, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (my, halo_info, & + field_loc_center, field_type_vector) + + ! tracer fields + if (maskhalo_remap) then + halomask(:,:,:) = 0 + !$OMP PARALLEL DO PRIVATE(iblk,this_block,ilo,ihi,jlo,jhi,n,m,j,i) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do n = 1, ncat + do m = 1, ntrace + do j = jlo, jhi + do i = ilo, ihi + if (tc(i,j,m,n,iblk) /= c0) halomask(i,j,iblk) = 1 + if (tx(i,j,m,n,iblk) /= c0) halomask(i,j,iblk) = 1 + if (ty(i,j,m,n,iblk) /= c0) halomask(i,j,iblk) = 1 + enddo + enddo + enddo + enddo + enddo + !$OMP END PARALLEL DO + call ice_HaloUpdate(halomask, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloMask(halo_info_tracer, halo_info, halomask) + + call ice_HaloUpdate (tc, halo_info_tracer, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (tx, halo_info_tracer, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (ty, halo_info_tracer, & + field_loc_center, field_type_vector) + call ice_HaloDestroy(halo_info_tracer) + else + call ice_HaloUpdate (tc, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (tx, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (ty, halo_info, & + field_loc_center, field_type_vector) + endif + call ice_timer_stop(timer_bound) + + endif ! nghost + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,n,m, & + !$OMP edgearea_e,edgearea_n,edge,iflux,jflux, & + !$OMP xp,yp,indxing,indxjng,mflxe,mflxn, & + !$OMP mtflxe,mtflxn,triarea,istop,jstop,l_stop) + do iblk = 1, nblocks + + l_stop = .false. + istop = 0 + jstop = 0 + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + !------------------------------------------------------------------- + ! If l_fixed_area is true, compute edgearea by taking the divergence + ! of the velocity field. Otherwise, initialize edgearea. + !------------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + edgearea_e(i,j) = c0 + edgearea_n(i,j) = c0 + enddo + enddo + + if (l_fixed_area) then + do j = jlo, jhi + do i = ilo-1, ihi + edgearea_e(i,j) = (uvel(i,j,iblk) + uvel(i,j-1,iblk)) & + * p5 * HTE(i,j,iblk) * dt + enddo + enddo + + do j = jlo-1, jhi + do i = ilo, ihi + edgearea_n(i,j) = (vvel(i,j,iblk) + vvel(i-1,j,iblk)) & + * p5 * HTN(i,j,iblk) * dt + enddo + enddo + endif + + !------------------------------------------------------------------- + ! Transports for east cell edges. + !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Compute areas and vertices of departure triangles. + !------------------------------------------------------------------- + + edge = 'east' + call locate_triangles(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, edge, & + icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + dpx (:,:,iblk), dpy (:,:,iblk), & + dxu (:,:,iblk), dyu (:,:,iblk), & + xp(:,:,:,:), yp(:,:,:,:), & + iflux, jflux, & + triarea, & + l_fixed_area, edgearea_e(:,:)) + + !------------------------------------------------------------------- + ! Given triangle vertices, compute coordinates of triangle points + ! needed for transport integrals. + !------------------------------------------------------------------- + + call triangle_coordinates (nx_block, ny_block, & + integral_order, icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + xp, yp) + + !------------------------------------------------------------------- + ! Compute the transport across east cell edges by summing contributions + ! from each triangle. + !------------------------------------------------------------------- + + ! open water + call transport_integrals(nx_block, ny_block, & + ntrace, icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + tracer_type, depend, & + integral_order, triarea, & + iflux, jflux, & + xp, yp, & + mc(:,:,0,iblk), mx (:,:,0,iblk), & + my(:,:,0,iblk), mflxe(:,:,0)) + + ! ice categories + do n = 1, ncat + call transport_integrals & + (nx_block, ny_block, & + ntrace, icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + tracer_type, depend, & + integral_order, triarea, & + iflux, jflux, & + xp, yp, & + mc(:,:, n,iblk), mx (:,:, n,iblk), & + my(:,:, n,iblk), mflxe(:,:, n), & + tc(:,:,:,n,iblk), tx (:,:,:,n,iblk), & + ty(:,:,:,n,iblk), mtflxe(:,:,:,n)) + + enddo + + !------------------------------------------------------------------- + ! Repeat for north edges + !------------------------------------------------------------------- + + edge = 'north' + call locate_triangles(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, edge, & + icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + dpx (:,:,iblk), dpy (:,:,iblk), & + dxu (:,:,iblk), dyu (:,:,iblk), & + xp(:,:,:,:), yp(:,:,:,:), & + iflux, jflux, & + triarea, & + l_fixed_area, edgearea_n(:,:)) + + call triangle_coordinates (nx_block, ny_block, & + integral_order, icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + xp, yp) + + ! open water + call transport_integrals(nx_block, ny_block, & + ntrace, icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + tracer_type, depend, & + integral_order, triarea, & + iflux, jflux, & + xp, yp, & + mc(:,:,0,iblk), mx(:,:,0,iblk), & + my(:,:,0,iblk), mflxn(:,:,0)) + + ! ice categories + do n = 1, ncat + call transport_integrals & + (nx_block, ny_block, & + ntrace, icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + tracer_type, depend, & + integral_order, triarea, & + iflux, jflux, & + xp, yp, & + mc(:,:, n,iblk), mx (:,:, n,iblk), & + my(:,:, n,iblk), mflxn(:,:, n), & + tc(:,:,:,n,iblk), tx (:,:,:,n,iblk), & + ty(:,:,:,n,iblk), mtflxn(:,:,:,n)) + + enddo ! n + + !------------------------------------------------------------------- + ! Update the ice area and tracers. + !------------------------------------------------------------------- + + ! open water + call update_fields (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + ntrace, & + tracer_type, depend, & + tarear(:,:,iblk), l_stop, & + istop, jstop, & + mflxe(:,:,0), mflxn(:,:,0), & + mm (:,:,0,iblk)) + + if (l_stop) then + this_block = get_block(blocks_ice(iblk),iblk) + write (nu_diag,*) 'istep1, my_task, iblk, cat =', & + istep1, my_task, iblk, '0' + write (nu_diag,*) 'Global block:', this_block%block_id + if (istop > 0 .and. jstop > 0) & + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) + call abort_ice ('ice remap_transport: negative area (open water)') + endif + + ! ice categories + do n = 1, ncat + + call update_fields(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + ntrace, & + tracer_type, depend, & + tarear(:,:,iblk), l_stop, & + istop, jstop, & + mflxe(:,:, n), mflxn(:,:, n), & + mm (:,:, n,iblk), & + mtflxe(:,:,:,n), mtflxn(:,:,:,n), & + tm (:,:,:,n,iblk)) + + if (l_stop) then + write (nu_diag,*) 'istep1, my_task, iblk, cat =', & + istep1, my_task, iblk, n + write (nu_diag,*) 'Global block:', this_block%block_id + if (istop > 0 .and. jstop > 0) & + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) + call abort_ice ('ice remap_transport: negative area (ice)') + endif + enddo ! n + + enddo ! iblk + !$OMP END PARALLEL DO + + end subroutine horizontal_remap + +!======================================================================= +! +! Make area and tracer masks. +! +! If an area is masked out (mm < puny), then the values of tracers +! in that grid cell are assumed to have no physical meaning. +! +! Similarly, if a tracer with dependents is masked out +! (abs(tm) < puny), then the values of its dependent tracers in that +! grid cell are assumed to have no physical meaning. +! For example, the enthalpy value has no meaning if the thickness +! is zero. +! +! author William H. Lipscomb, LANL + + subroutine make_masks (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, ntrace, & + has_dependents, & + icells, & + indxi, indxj, & + mm, mmask, & + tm, tmask) + + use ice_constants, only: c0, c1, puny + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ,&! block dimensions + ilo,ihi,jlo,jhi ,&! beginning and end of physical domain + nghost ,&! number of ghost cells + ntrace ! number of tracers in use + + + logical (kind=log_kind), dimension (ntrace), intent(in) :: & + has_dependents ! true if a tracer has dependent tracers + + integer (kind=int_kind), dimension(0:ncat), intent(out) :: & + icells ! number of cells with ice + + integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat), & + intent(out) :: & + indxi ,&! compressed i/j indices + indxj + + real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), & + intent(in) :: & + mm ! mean ice area in each grid cell + + real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), & + intent(out) :: & + mmask ! = 1. if ice is present, else = 0. + + real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace, ncat), & + intent(in), optional :: & + tm ! mean tracer values in each grid cell + + real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace, ncat), & + intent(out), optional :: & + tmask ! = 1. if tracer is present, else = 0. + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij ,&! horizontal indices + n ,&! ice category index + nt ! tracer index + + do n = 0, ncat + do ij = 1, nx_block*ny_block + indxi(ij,n) = 0 + indxj(ij,n) = 0 + enddo + enddo + + !------------------------------------------------------------------- + ! open water mask + !------------------------------------------------------------------- + + icells(0) = 0 + do j = 1, ny_block + do i = 1, nx_block + if (mm(i,j,0) > puny) then + mmask(i,j,0) = c1 + icells(0) = icells(0) + 1 + ij = icells(0) + indxi(ij,0) = i + indxj(ij,0) = j + else + mmask(i,j,0) = c0 + endif + enddo + enddo + + do n = 1, ncat + + !------------------------------------------------------------------- + ! Find grid cells where ice is present. + !------------------------------------------------------------------- + + icells(n) = 0 + do j = 1, ny_block + do i = 1, nx_block + if (mm(i,j,n) > puny) then + icells(n) = icells(n) + 1 + ij = icells(n) + indxi(ij,n) = i + indxj(ij,n) = j + endif ! mm > puny + enddo + enddo + + !------------------------------------------------------------------- + ! ice area mask + !------------------------------------------------------------------- + + mmask(:,:,n) = c0 + do ij = 1, icells(n) + i = indxi(ij,n) + j = indxj(ij,n) + mmask(i,j,n) = c1 + enddo + + !------------------------------------------------------------------- + ! tracer masks + !------------------------------------------------------------------- + + if (present(tm)) then + + tmask(:,:,:,n) = c0 + do nt = 1, ntrace + if (has_dependents(nt)) then + do ij = 1, icells(n) + i = indxi(ij,n) + j = indxj(ij,n) + if (abs(tm(i,j,nt,n)) > puny) then + tmask(i,j,nt,n) = c1 + endif + enddo + endif + enddo + + endif ! present(tm) + + !------------------------------------------------------------------- + ! Redefine icells + ! For nghost = 1, exclude ghost cells + ! For nghost = 2, include one layer of ghost cells + !------------------------------------------------------------------- + + icells(n) = 0 + do j = jlo-nghost+1, jhi+nghost-1 + do i = ilo-nghost+1, ihi+nghost-1 + if (mm(i,j,n) > puny) then + icells(n) = icells(n) + 1 + ij = icells(n) + indxi(ij,n) = i + indxj(ij,n) = j + endif ! mm > puny + enddo + enddo + + enddo ! n + + end subroutine make_masks + +!======================================================================= +! +! Construct fields of ice area and tracers. +! +! authors William H. Lipscomb, LANL +! John R. Baumgardner, LANL + + subroutine construct_fields (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, ntrace, & + tracer_type, depend, & + has_dependents, icells, & + indxi, indxj, & + hm, xav, & + yav, xxav, & + yyav, & +! xyav, & +! xxxav, xxyav, & +! xyyav, yyyav, & + mm, mc, & + mx, my, & + mmask, & + tm, tc, & + tx, ty, & + tmask) + + use ice_constants, only: c0, c1, puny + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ,&! block dimensions + ilo,ihi,jlo,jhi ,&! beginning and end of physical domain + nghost ,&! number of ghost cells + ntrace ,&! number of tracers in use + icells ! number of cells with mass + + integer (kind=int_kind), dimension (ntrace), intent(in) :: & + tracer_type ,&! = 1, 2, or 3 (see comments above) + depend ! tracer dependencies (see above) + + logical (kind=log_kind), dimension (ntrace), intent(in) :: & + has_dependents ! true if a tracer has dependent tracers + + integer (kind=int_kind), dimension(nx_block*ny_block), intent(in) :: & + indxi ,&! compressed i/j indices + indxj + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + hm ,&! land/boundary mask, thickness (T-cell) + xav, yav ,&! mean T-cell values of x, y + xxav, yyav ! mean T-cell values of xx, yy +! xyav, ,&! mean T-cell values of xy +! xxxav,xxyav,xyyav,yyyav ! mean T-cell values of xxx, xxy, xyy, yyy + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + mm ,&! mean value of mass field + mmask ! = 1. if ice is present, = 0. otherwise + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace), & + intent(in), optional :: & + tm ,&! mean tracer + tmask ! = 1. if tracer is present, = 0. otherwise + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + mc ,&! mass value at geometric center of cell + mx, my ! limited derivative of mass wrt x and y + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace), & + intent(out), optional :: & + tc ,&! tracer at geometric center of cell + tx, ty ! limited derivative of tracer wrt x and y + + ! local variables + + integer (kind=int_kind) :: & + i, j ,&! horizontal indices + nt, nt1 ,&! tracer indices + ij ! combined i/j horizontal index + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + mxav ,&! x coordinate of center of mass + myav ! y coordinate of center of mass + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace) :: & + mtxav ,&! x coordinate of center of mass*tracer + mtyav ! y coordinate of center of mass*tracer + + real (kind=dbl_kind) :: & + w1, w2, w3, w4, w5, w6, w7 ! work variables + + !------------------------------------------------------------------- + ! Compute field values at the geometric center of each grid cell, + ! and compute limited gradients in the x and y directions. + ! + ! For second order accuracy, each state variable is approximated as + ! a field varying linearly over x and y within each cell. For each + ! category, the integrated value of m(x,y) over the cell must + ! equal mm(i,j,n)*tarea(i,j), where tarea(i,j) is the cell area. + ! Similarly, the integrated value of m(x,y)*t(x,y) must equal + ! the total mass*tracer, mm(i,j,n)*tm(i,j,n)*tarea(i,j). + ! + ! These integral conditions are satisfied for linear fields if we + ! stipulate the following: + ! (1) The mean mass, mm, is equal to the mass at the cell centroid. + ! (2) The mean value tm1 of type 1 tracers is equal to the value + ! at the center of mass. + ! (3) The mean value tm2 of type 2 tracers is equal to the value + ! at the center of mass*tm1, where tm2 depends on tm1. + ! (See comments at the top of the module.) + ! + ! We want to find the value of each state variable at a standard + ! reference point, which we choose to be the geometric center of + ! the cell. The geometric center is located at the intersection + ! of the line joining the midpoints of the north and south edges + ! with the line joining the midpoints of the east and west edges. + ! To find the value at the geometric center, we must know the + ! location of the cell centroid/center of mass, along with the + ! mean value and the gradients with respect to x and y. + ! + ! The cell gradients are first computed from the difference between + ! values in the neighboring cells, then limited by requiring that + ! no new extrema are created within the cell. + ! + ! For rectangular coordinates the centroid and the geometric + ! center coincide, which means that some of the equations in this + ! subroutine could be simplified. However, the full equations + ! are retained for generality. + !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Initialize + !------------------------------------------------------------------- + + do j = 1, ny_block + do i = 1, nx_block + mc(i,j) = c0 + mx(i,j) = c0 + my(i,j) = c0 + mxav(i,j) = c0 + myav(i,j) = c0 + enddo + enddo + + if (present(tm)) then + do nt = 1, ntrace + do j = 1, ny_block + do i = 1, nx_block + tc(i,j,nt) = c0 + tx(i,j,nt) = c0 + ty(i,j,nt) = c0 + enddo + enddo + enddo + endif + + ! limited gradient of mass field in each cell (except masked cells) + ! Note: The gradient is computed in scaled coordinates with + ! dxt = dyt = hte = htn = 1. + + call limited_gradient (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + mm, hm, & + xav, yav, & + mx, my) + + do ij = 1,icells ! ice is present + i = indxi(ij) + j = indxj(ij) + + ! mass field at geometric center +!echmod: xav = yav = 0 + mc(i,j) = mm(i,j) + +! mc(i,j) = mm(i,j) - xav(i,j)*mx(i,j) & +! - yav(i,j)*my(i,j) + + enddo ! ij + + ! tracers + + if (present(tm)) then + + do ij = 1,icells ! cells with mass + i = indxi(ij) + j = indxj(ij) + + ! center of mass (mxav,myav) for each cell +!echmod: xyav = 0 + mxav(i,j) = (mx(i,j)*xxav(i,j) & + + mc(i,j)*xav (i,j)) / mm(i,j) + myav(i,j) = (my(i,j)*yyav(i,j) & + + mc(i,j)*yav(i,j)) / mm(i,j) + +! mxav(i,j) = (mx(i,j)*xxav(i,j) & +! + my(i,j)*xyav(i,j) & +! + mc(i,j)*xav (i,j)) / mm(i,j) +! myav(i,j) = (mx(i,j)*xyav(i,j) & +! + my(i,j)*yyav(i,j) & +! + mc(i,j)*yav(i,j)) / mm(i,j) + enddo + + do nt = 1, ntrace + + if (tracer_type(nt)==1) then ! independent of other tracers + + call limited_gradient(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + tm(:,:,nt), mmask, & + mxav, myav, & + tx(:,:,nt), ty(:,:,nt)) + + if (has_dependents(nt)) then ! need center of area*tracer + + do j = 1, ny_block + do i = 1, nx_block + mtxav(i,j,nt) = c0 + mtyav(i,j,nt) = c0 + enddo + enddo + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells ! Note: no tx or ty in ghost cells + ! (bound calls are later) + i = indxi(ij) + j = indxj(ij) + + ! tracer value at geometric center + tc(i,j,nt) = tm(i,j,nt) - tx(i,j,nt)*mxav(i,j) & + - ty(i,j,nt)*myav(i,j) + + if (tmask(i,j,nt) > puny) then + + ! center of area*tracer + w1 = mc(i,j)*tc(i,j,nt) + w2 = mc(i,j)*tx(i,j,nt) & + + mx(i,j)*tc(i,j,nt) + w3 = mc(i,j)*ty(i,j,nt) & + + my(i,j)*tc(i,j,nt) + w4 = mx(i,j)*tx(i,j,nt) + w5 = mx(i,j)*ty(i,j,nt) & + + my(i,j)*tx(i,j,nt) + w6 = my(i,j)*ty(i,j,nt) + w7 = c1 / (mm(i,j)*tm(i,j,nt)) +!echmod: grid arrays = 0 + mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j)) & + * w7 + mtyav(i,j,nt) = (w1*yav(i,j) + w3*yyav(i,j)) & + * w7 + +! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j) & +! + w3*xyav (i,j) + w4*xxxav(i,j) & +! + w5*xxyav(i,j) + w6*xyyav(i,j)) & +! * w7 +! mtyav(i,j,nt) = (w1*yav(i,j) + w2*xyav (i,j) & +! + w3*yyav(i,j) + w4*xxyav(i,j) & +! + w5*xyyav(i,j) + w6*yyyav(i,j)) & +! * w7 + endif ! tmask + + enddo ! ij + + else ! no dependents + + do ij = 1, icells ! mass is present + i = indxi(ij) + j = indxj(ij) + + ! tracer value at geometric center + tc(i,j,nt) = tm(i,j,nt) - tx(i,j,nt)*mxav(i,j) & + - ty(i,j,nt)*myav(i,j) + enddo ! ij + + endif ! has_dependents + + elseif (tracer_type(nt)==2) then ! tracer nt depends on nt1 + nt1 = depend(nt) + + call limited_gradient(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + tm(:,:,nt), tmask(:,:,nt1), & + mtxav(:,:,nt1), mtyav(:,:,nt1), & + tx(:,:,nt), ty(:,:,nt)) + + do ij = 1, icells ! ice is present + i = indxi(ij) + j = indxj(ij) + tc(i,j,nt) = tm(i,j,nt) & + - tx(i,j,nt) * mtxav(i,j,nt1) & + - ty(i,j,nt) * mtyav(i,j,nt1) + enddo ! ij + + elseif (tracer_type(nt)==3) then ! upwind approx; gradient = 0 + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + tc(i,j,nt) = tm(i,j,nt) +! tx(i,j,nt) = c0 ! already initialized to 0. +! ty(i,j,nt) = c0 + enddo ! ij + + endif ! tracer_type + enddo ! ntrace + + endif ! present (tm) + + end subroutine construct_fields + +!======================================================================= +! +! Compute a limited gradient of the scalar field phi in scaled coordinates. +! "Limited" means that we do not create new extrema in phi. For +! instance, field values at the cell corners can neither exceed the +! maximum of phi(i,j) in the cell and its eight neighbors, nor fall +! below the minimum. +! +! authors William H. Lipscomb, LANL +! John R. Baumgardner, LANL + + subroutine limited_gradient (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + phi, phimask, & + cnx, cny, & + gx, gy) + + use ice_constants, only: c0, c1, p5, puny + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block,&! block dimensions + ilo,ihi,jlo,jhi ,&! beginning and end of physical domain + nghost ! number of ghost cells + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent (in) :: & + phi ,&! input tracer field (mean values in each grid cell) + cnx ,&! x-coordinate of phi relative to geometric center of cell + cny ,&! y-coordinate of phi relative to geometric center of cell + phimask + ! phimask(i,j) = 1 if phi(i,j) has physical meaning, = 0 otherwise. + ! For instance, aice has no physical meaning in land cells, + ! and hice no physical meaning where aice = 0. + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(out) :: & + gx ,&! limited x-direction gradient + gy ! limited y-direction gradient + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij ,&! standard indices + icells ! number of cells to limit + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! combined i/j horizontal indices + + real (kind=dbl_kind) :: & + phi_nw, phi_n, phi_ne ,&! values of phi in 8 neighbor cells + phi_w, phi_e ,& + phi_sw, phi_s, phi_se ,& + qmn, qmx ,&! min and max value of phi within grid cell + pmn, pmx ,&! min and max value of phi among neighbor cells + w1, w2, w3, w4 ! work variables + + real (kind=dbl_kind) :: & + gxtmp, gytmp ! temporary term for x- and y- limited gradient + + gx(:,:) = c0 + gy(:,:) = c0 + + ! For nghost = 1, loop over physical cells and update ghost cells later + ! For nghost = 2, loop over a layer of ghost cells and skip the update + + icells = 0 + do j = jlo-nghost+1, jhi+nghost-1 + do i = ilo-nghost+1, ihi+nghost-1 + if (phimask(i,j) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! phimask > puny + enddo + enddo + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ! Store values of phi in the 8 neighbor cells. + ! Note: phimask = 1. or 0. If phimask = 1., use the true value; + ! if phimask = 0., use the home cell value so that non-physical + ! values of phi do not contribute to the gradient. + phi_nw = phimask(i-1,j+1) * phi(i-1,j+1) & + + (c1-phimask(i-1,j+1))* phi(i,j) + phi_n = phimask(i,j+1) * phi(i,j+1) & + + (c1-phimask(i,j+1)) * phi(i,j) + phi_ne = phimask(i+1,j+1) * phi(i+1,j+1) & + + (c1-phimask(i+1,j+1))* phi(i,j) + phi_w = phimask(i-1,j) * phi(i-1,j) & + + (c1-phimask(i-1,j)) * phi(i,j) + phi_e = phimask(i+1,j) * phi(i+1,j) & + + (c1-phimask(i+1,j)) * phi(i,j) + phi_sw = phimask(i-1,j-1) * phi(i-1,j-1) & + + (c1-phimask(i-1,j-1))* phi(i,j) + phi_s = phimask(i,j-1) * phi(i,j-1) & + + (c1-phimask(i,j-1)) * phi(i,j) + phi_se = phimask(i+1,j-1) * phi(i+1,j-1) & + + (c1-phimask(i+1,j-1))* phi(i,j) + + ! unlimited gradient components + ! (factors of two cancel out) + + gxtmp = (phi_e - phi_w) * p5 + gytmp = (phi_n - phi_s) * p5 + + ! minimum and maximum among the nine local cells + pmn = min (phi_nw, phi_n, phi_ne, phi_w, phi(i,j), & + phi_e, phi_sw, phi_s, phi_se) + pmx = max (phi_nw, phi_n, phi_ne, phi_w, phi(i,j), & + phi_e, phi_sw, phi_s, phi_se) + + pmn = pmn - phi(i,j) + pmx = pmx - phi(i,j) + + ! minimum and maximum deviation of phi within the cell + w1 = (p5 - cnx(i,j)) * gxtmp & + + (p5 - cny(i,j)) * gytmp + w2 = (p5 - cnx(i,j)) * gxtmp & + - (p5 + cny(i,j)) * gytmp + w3 = -(p5 + cnx(i,j)) * gxtmp & + - (p5 + cny(i,j)) * gytmp + w4 = (p5 - cny(i,j)) * gytmp & + - (p5 + cnx(i,j)) * gxtmp + + qmn = min (w1, w2, w3, w4) + qmx = max (w1, w2, w3, w4) + + ! the limiting coefficient + if ( abs(qmn) > abs(pmn) ) then ! 'abs(qmn) > puny' not sufficient + w1 = max(c0, pmn/qmn) + else + w1 = c1 + endif + + if ( abs(qmx) > abs(pmx) ) then + w2 = max(c0, pmx/qmx) + else + w2 = c1 + endif + + w1 = min(w1, w2) + + ! Limit the gradient components + gx(i,j) = w1 * gxtmp + gy(i,j) = w1 * gytmp + + enddo ! ij + + end subroutine limited_gradient + +!======================================================================= +! +! Given velocity fields on cell corners, compute departure points +! of back trajectories in nondimensional coordinates. +! +! author William H. Lipscomb, LANL + + subroutine departure_points (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, dt, & + uvel, vvel, & + dxu, dyu, & + HTN, HTE, & + dpx, dpy, & + l_dp_midpt, l_stop, & + istop, jstop) + + use ice_constants, only: c0, p5 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block,&! block dimensions + ilo,ihi,jlo,jhi, &! beginning and end of physical domain + nghost ! number of ghost cells + + real (kind=dbl_kind), intent(in) :: & + dt ! time step (s) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvel ,&! x-component of velocity (m/s) + vvel ,&! y-component of velocity (m/s) + dxu ,&! E-W dimensions of U-cell (m) + dyu ,&! N-S dimensions of U-cell (m) + HTN ,&! length of north face of T-cell (m) + HTE ! length of east face of T-cell (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & + dpx ,&! coordinates of departure points (m) + dpy ! coordinates of departure points (m) + + logical (kind=log_kind), intent(in) :: & + l_dp_midpt ! if true, find departure points using + ! corrected midpoint velocity + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(inout) :: & + istop, jstop ! indices of grid cell where model aborts + + ! local variables + + integer (kind=int_kind) :: & + i, j, i2, j2 ! horizontal indices + + real (kind=dbl_kind) :: & + mpx, mpy ,&! coordinates of midpoint of back trajectory, + ! relative to cell corner + mpxt, mpyt ,&! midpoint coordinates relative to cell center + ump, vmp ! corrected velocity at midpoint + + !------------------------------------------------------------------- + ! Estimate departure points. + ! This estimate is 1st-order accurate in time; improve accuracy by + ! using midpoint approximation (to add later). + ! For nghost = 1, loop over physical cells and update ghost cells later. + ! For nghost = 2, loop over a layer of ghost cells and skip update. + !------------------------------------------------------------------- + + dpx(:,:) = c0 + dpy(:,:) = c0 + + do j = jlo-nghost+1, jhi+nghost-1 + do i = ilo-nghost+1, ihi+nghost-1 + + dpx(i,j) = -dt*uvel(i,j) + dpy(i,j) = -dt*vvel(i,j) + + ! Check for values out of bounds (more than one grid cell away) + if (dpx(i,j) < -HTN(i,j) .or. dpx(i,j) > HTN(i+1,j) .or. & + dpy(i,j) < -HTE(i,j) .or. dpy(i,j) > HTE(i,j+1)) then + l_stop = .true. + istop = i + jstop = j + endif + + enddo + enddo + + if (l_stop) then + i = istop + j = jstop + write (nu_diag,*) ' ' + write (nu_diag,*) & + 'Warning: Departure points out of bounds in remap' + write (nu_diag,*) 'my_task, i, j =', my_task, i, j +#ifdef AusCOM + write (nu_diag,*) 'dt, uvel, vvel = ', dt, uvel(i,j), vvel(i,j) +#endif + write (nu_diag,*) 'dpx, dpy =', dpx(i,j), dpy(i,j) + write (nu_diag,*) 'HTN(i,j), HTN(i+1,j) =', HTN(i,j), HTN(i+1,j) + write (nu_diag,*) 'HTE(i,j), HTE(i,j+1) =', HTE(i,j), HTE(i,j+1) + return + endif + + if (l_dp_midpt) then ! find dep pts using corrected midpt velocity + + do j = jlo-nghost+1, jhi+nghost-1 + do i = ilo-nghost+1, ihi+nghost-1 + if (uvel(i,j)/=c0 .or. vvel(i,j)/=c0) then + + !------------------------------------------------------------------- + ! Scale departure points to coordinate system in which grid cells + ! have sides of unit length. + !------------------------------------------------------------------- + + dpx(i,j) = dpx(i,j) / dxu(i,j) + dpy(i,j) = dpy(i,j) / dyu(i,j) + + !------------------------------------------------------------------- + ! Estimate midpoint of backward trajectory relative to corner (i,j). + !------------------------------------------------------------------- + + mpx = p5 * dpx(i,j) + mpy = p5 * dpy(i,j) + + !------------------------------------------------------------------- + ! Determine the indices (i2,j2) of the cell where the trajectory lies. + ! Compute the coordinates of the midpoint of the backward trajectory + ! relative to the cell center in a stretch coordinate system + ! with vertices at (1/2, 1/2), (1/2, -1/2), etc. + !------------------------------------------------------------------- + + if (mpx >= c0 .and. mpy >= c0) then ! cell (i+1,j+1) + i2 = i+1 + j2 = j+1 + mpxt = mpx - p5 + mpyt = mpy - p5 + elseif (mpx < c0 .and. mpy < c0) then ! cell (i,j) + i2 = i + j2 = j + mpxt = mpx + p5 + mpyt = mpy + p5 + elseif (mpx >= c0 .and. mpy < c0) then ! cell (i+1,j) + i2 = i+1 + j2 = j + mpxt = mpx - p5 + mpyt = mpy + p5 + elseif (mpx < c0 .and. mpy >= c0) then ! cell (i,j+1) + i2 = i + j2 = j+1 + mpxt = mpx + p5 + mpyt = mpy - p5 + endif + + !------------------------------------------------------------------- + ! Using a bilinear approximation, estimate the velocity at the + ! trajectory midpoint in the (i2,j2) reference frame. + !------------------------------------------------------------------- + + ump = uvel(i2-1,j2-1)*(mpxt-p5)*(mpyt-p5) & + - uvel(i2, j2-1)*(mpxt+p5)*(mpyt-p5) & + + uvel(i2, j2 )*(mpxt+p5)*(mpyt+p5) & + - uvel(i2-1,j2 )*(mpxt-p5)*(mpyt+p5) + + vmp = vvel(i2-1,j2-1)*(mpxt-p5)*(mpyt-p5) & + - vvel(i2, j2-1)*(mpxt+p5)*(mpyt-p5) & + + vvel(i2, j2 )*(mpxt+p5)*(mpyt+p5) & + - vvel(i2-1,j2 )*(mpxt-p5)*(mpyt+p5) + + !------------------------------------------------------------------- + ! Use the midpoint velocity to estimate the coordinates of the + ! departure point relative to corner (i,j). + !------------------------------------------------------------------- + + dpx(i,j) = -dt * ump + dpy(i,j) = -dt * vmp + + endif ! nonzero velocity + + enddo ! i + enddo ! j + + endif ! l_dp_midpt + + end subroutine departure_points + +!======================================================================= +! +! Compute areas and vertices of transport triangles for north or +! east cell edges. +! +! authors William H. Lipscomb, LANL +! John R. Baumgardner, LANL + + subroutine locate_triangles (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, edge, & + icells, & + indxi, indxj, & + dpx, dpy, & + dxu, dyu, & + xp, yp, & + iflux, jflux, & + triarea, & + l_fixed_area, edgearea) + + use ice_constants, only: c0, c1, c2, p5, puny, eps13, eps16 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block,&! block dimensions + ilo,ihi,jlo,jhi ,&! beginning and end of physical domain + nghost ! number of ghost cells + + character (len=char_len), intent(in) :: & + edge ! 'north' or 'east' + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & + dpx ,&! x coordinates of departure points at cell corners + dpy ,&! y coordinates of departure points at cell corners + dxu ,&! E-W dimension of U-cell (m) + dyu ! N-S dimension of U-cell (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,0:nvert,ngroups), & + intent(out) :: & + xp, yp ! coordinates of triangle vertices + + real (kind=dbl_kind), dimension (nx_block,ny_block,ngroups), & + intent(out) :: & + triarea ! area of departure triangle + + integer (kind=int_kind), dimension (nx_block,ny_block,ngroups), & + intent(out) :: & + iflux ,&! i index of cell contributing transport + jflux ! j index of cell contributing transport + + integer (kind=int_kind), dimension (ngroups), intent(out) :: & + icells ! number of cells where triarea > puny + + integer (kind=int_kind), dimension (nx_block*ny_block,ngroups), & + intent(out) :: & + indxi ,&! compressed index in i-direction + indxj ! compressed index in j-direction + + logical, intent(in) :: & + l_fixed_area ! if true, the area of each departure region is + ! passed in as edgearea + ! if false, edgearea if determined internally + ! and is passed out + + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(inout) :: & + edgearea ! area of departure region for each edge + ! edgearea > 0 for eastward/northward flow + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij, ic ,&! horizontal indices + ib, ie, jb, je ,&! limits for loops over edges + ng, nv ,&! triangle indices + ishift, jshift ,&! differences between neighbor cells + ishift_tl, jshift_tl ,&! i,j indices of TL cell relative to edge + ishift_bl, jshift_bl ,&! i,j indices of BL cell relative to edge + ishift_tr, jshift_tr ,&! i,j indices of TR cell relative to edge + ishift_br, jshift_br ,&! i,j indices of BR cell relative to edge + ishift_tc, jshift_tc ,&! i,j indices of TC cell relative to edge + ishift_bc, jshift_bc ! i,j indices of BC cell relative to edge + + integer (kind=int_kind) :: & + icellsd ! number of cells where departure area > 0. + + integer (kind=int_kind), dimension (nx_block*ny_block) :: & + indxid ,&! compressed index in i-direction + indxjd ! compressed index in j-direction + + real (kind=dbl_kind), dimension(nx_block,ny_block) :: & + dx, dy ,&! scaled departure points + areafac_c ,&! area scale factor at center of edge + areafac_l ,&! area scale factor at left corner + areafac_r ! area scale factor at right corner + + real (kind=dbl_kind) :: & + xcl, ycl ,&! coordinates of left corner point + ! (relative to midpoint of edge) + xdl, ydl ,&! left departure point + xil, yil ,&! left intersection point + xcr, ycr ,&! right corner point + xdr, ydr ,&! right departure point + xir, yir ,&! right intersection point + xic, yic ,&! x-axis intersection point + xicl, yicl ,&! left-hand x-axis intersection point + xicr, yicr ,&! right-hand x-axis intersection point + xdm, ydm ,&! midpoint of segment connecting DL and DR; + ! shifted if l_fixed_area = T + md ,&! slope of line connecting DL and DR + mdl ,&! slope of line connecting DL and DM + mdr ,&! slope of line connecting DR and DM + area1, area2 ,&! temporary triangle areas + area3, area4 ,&! + area_c ,&! center polygon area + w1, w2 ! work variables + + real (kind=dbl_kind), dimension (nx_block,ny_block,ngroups) :: & + areafact ! = 1 for positive flux, -1 for negative + + real (kind=dbl_kind), dimension(nx_block,ny_block) :: & + areasum ! sum of triangle areas for a given edge + + !------------------------------------------------------------------- + ! Triangle notation: + ! For each edge, there are 20 triangles that can contribute, + ! but many of these are mutually exclusive. It turns out that + ! at most 5 triangles can contribute to transport integrals at once. + ! + ! See Figure 3 in DB for pictures of these triangles. + ! See Table 1 in DB for logical conditions. + ! + ! For the north edge, DB refer to these triangles as: + ! (1) NW, NW1, W, W2 + ! (2) NE, NE1, E, E2 + ! (3) NW2, W1, NE2, E1 + ! (4) H1a, H1b, N1a, N1b + ! (5) H2a, H2b, N2a, N2b + ! + ! For the east edge, DB refer to these triangles as: + ! (1) NE, NE1, N, N2 + ! (2) SE, SE1, S, S2 + ! (3) NE2, N1, SE2, S1 + ! (4) H1a, H1b, E1a, E2b + ! (5) H2a, H2b, E2a, E2b + ! + ! The code below works for either north or east edges. + ! The respective triangle labels are: + ! (1) TL, TL1, BL, BL2 + ! (2) TR, TR1, BR, BR2 + ! (3) TL2, BL1, TR2, BR1 + ! (4) BC1a, BC1b, TC1a, TC2b + ! (5) BC2a, BC2b, TC2a, TC2b + ! + ! where the cell labels are: + ! + ! | | + ! TL | TC | TR (top left, center, right) + ! | | + ! ------------------------ + ! | | + ! BL | BC | BR (bottom left, center, right) + ! | | + ! + ! and the transport is across the edge between cells TC and TB. + ! + ! Departure points are scaled to a local coordinate system + ! whose origin is at the midpoint of the edge. + ! In this coordinate system, the lefthand corner CL = (-0.5,0) + ! and the righthand corner CR = (0.5, 0). + !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Initialize + !------------------------------------------------------------------- + + areafac_c(:,:) = c0 + areafac_l(:,:) = c0 + areafac_r(:,:) = c0 + do ng = 1, ngroups + do j = 1, ny_block + do i = 1, nx_block + triarea (i,j,ng) = c0 + areafact(i,j,ng) = c0 + iflux (i,j,ng) = i + jflux (i,j,ng) = j + enddo + enddo + do nv = 0, nvert + do j = 1, ny_block + do i = 1, nx_block + xp(i,j,nv,ng) = c0 + yp(i,j,nv,ng) = c0 + enddo + enddo + enddo + enddo + + if (trim(edge) == 'north') then + + ! loop size + + ib = ilo + ie = ihi + jb = jlo - nghost ! lowest j index is a ghost cell + je = jhi + + ! index shifts for neighbor cells + + ishift_tl = -1 + jshift_tl = 1 + ishift_bl = -1 + jshift_bl = 0 + ishift_tr = 1 + jshift_tr = 1 + ishift_br = 1 + jshift_br = 0 + ishift_tc = 0 + jshift_tc = 1 + ishift_bc = 0 + jshift_bc = 0 + + ! area scale factor + + do j = jb, je + do i = ib, ie + areafac_l(i,j) = dxu(i-1,j)*dyu(i-1,j) + areafac_r(i,j) = dxu(i,j)*dyu(i,j) + areafac_c(i,j) = p5*(areafac_l(i,j) + areafac_r(i,j)) + enddo + enddo + + else ! east edge + + ! loop size + + ib = ilo - nghost ! lowest i index is a ghost cell + ie = ihi + jb = jlo + je = jhi + + ! index shifts for neighbor cells + + ishift_tl = 1 + jshift_tl = 1 + ishift_bl = 0 + jshift_bl = 1 + ishift_tr = 1 + jshift_tr = -1 + ishift_br = 0 + jshift_br = -1 + ishift_tc = 1 + jshift_tc = 0 + ishift_bc = 0 + jshift_bc = 0 + + ! area scale factors + + do j = jb, je + do i = ib, ie + areafac_l(i,j) = dxu(i,j)*dyu(i,j) + areafac_r(i,j) = dxu(i,j-1)*dyu(i,j-1) + areafac_c(i,j) = p5 * (areafac_l(i,j) + areafac_r(i,j)) + enddo + enddo + + endif + + !------------------------------------------------------------------- + ! Compute mask for edges with nonzero departure areas + !------------------------------------------------------------------- + + if (l_fixed_area) then + icellsd = 0 + do j = jb, je + do i = ib, ie + if (edgearea(i,j) /= c0) then + icellsd = icellsd + 1 + indxid(icellsd) = i + indxjd(icellsd) = j + endif + enddo + enddo + else + icellsd = 0 + if (trim(edge) == 'north') then + do j = jb, je + do i = ib, ie + if (dpx(i-1,j)/=c0 .or. dpy(i-1,j)/=c0 & + .or. & + dpx(i,j)/=c0 .or. dpy(i,j)/=c0) then + icellsd = icellsd + 1 + indxid(icellsd) = i + indxjd(icellsd) = j + endif + enddo + enddo + else ! east edge + do j = jb, je + do i = ib, ie + if (dpx(i,j-1)/=c0 .or. dpy(i,j-1)/=c0 & + .or. & + dpx(i,j)/=c0 .or. dpy(i,j)/=c0) then + icellsd = icellsd + 1 + indxid(icellsd) = i + indxjd(icellsd) = j + endif + enddo + enddo + endif ! edge = north/east + endif ! l_fixed_area + + !------------------------------------------------------------------- + ! Scale the departure points + !------------------------------------------------------------------- + + do j = 1, je + do i = 1, ie + dx(i,j) = dpx(i,j) / dxu(i,j) + dy(i,j) = dpy(i,j) / dyu(i,j) + enddo + enddo + + !------------------------------------------------------------------- + ! Compute departure regions, divide into triangles, and locate + ! vertices of each triangle. + ! Work in a nondimensional coordinate system in which lengths are + ! scaled by the local metric coefficients (dxu and dyu). + ! Note: The do loop includes north faces of the j = 1 ghost cells + ! when edge = 'north'. The loop includes east faces of i = 1 + ! ghost cells when edge = 'east'. + !------------------------------------------------------------------- + + do ij = 1, icellsd + i = indxid(ij) + j = indxjd(ij) + + xcl = -p5 + ycl = c0 + + xcr = p5 + ycr = c0 + + ! Departure points + + if (trim(edge) == 'north') then ! north edge + xdl = xcl + dx(i-1,j) + ydl = ycl + dy(i-1,j) + xdr = xcr + dx(i,j) + ydr = ycr + dy(i,j) + else ! east edge; rotate trajectory by pi/2 + xdl = xcl - dy(i,j) + ydl = ycl + dx(i,j) + xdr = xcr - dy(i,j-1) + ydr = ycr + dx(i,j-1) + endif + + xdm = p5 * (xdr + xdl) + ydm = p5 * (ydr + ydl) + + ! Intersection points + + xil = xcl + yil = (xcl*(ydm-ydl) + xdm*ydl - xdl*ydm) / (xdm - xdl) + + xir = xcr + yir = (xcr*(ydr-ydm) - xdm*ydr + xdr*ydm) / (xdr - xdm) + + md = (ydr - ydl) / (xdr - xdl) + + if (abs(md) > puny) then + xic = xdl - ydl/md + else + xic = c0 + endif + yic = c0 + + xicl = xic + yicl = yic + xicr = xic + yicr = yic + + !------------------------------------------------------------------- + ! Locate triangles in TL cell (NW for north edge, NE for east edge) + ! and BL cell (W for north edge, N for east edge). + !------------------------------------------------------------------- + + if (yil > c0 .and. xdl < xcl .and. ydl >= c0) then + + ! TL (group 1) + + ng = 1 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xil + yp (i,j,2,ng) = yil + xp (i,j,3,ng) = xdl + yp (i,j,3,ng) = ydl + iflux (i,j,ng) = i + ishift_tl + jflux (i,j,ng) = j + jshift_tl + areafact(i,j,ng) = -areafac_l(i,j) + + elseif (yil < c0 .and. xdl < xcl .and. ydl < c0) then + + ! BL (group 1) + + ng = 1 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xdl + yp (i,j,2,ng) = ydl + xp (i,j,3,ng) = xil + yp (i,j,3,ng) = yil + iflux (i,j,ng) = i + ishift_bl + jflux (i,j,ng) = j + jshift_bl + areafact(i,j,ng) = areafac_l(i,j) + + elseif (yil < c0 .and. xdl < xcl .and. ydl >= c0) then + + ! TL1 (group 1) + + ng = 1 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xdl + yp (i,j,2,ng) = ydl + xp (i,j,3,ng) = xic + yp (i,j,3,ng) = yic + iflux (i,j,ng) = i + ishift_tl + jflux (i,j,ng) = j + jshift_tl + areafact(i,j,ng) = areafac_l(i,j) + + ! BL1 (group 3) + + ng = 3 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xic + yp (i,j,2,ng) = yic + xp (i,j,3,ng) = xil + yp (i,j,3,ng) = yil + iflux (i,j,ng) = i + ishift_bl + jflux (i,j,ng) = j + jshift_bl + areafact(i,j,ng) = areafac_l(i,j) + + elseif (yil > c0 .and. xdl < xcl .and. ydl < c0) then + + ! TL2 (group 3) + + ng = 3 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xil + yp (i,j,2,ng) = yil + xp (i,j,3,ng) = xic + yp (i,j,3,ng) = yic + iflux (i,j,ng) = i + ishift_tl + jflux (i,j,ng) = j + jshift_tl + areafact(i,j,ng) = -areafac_l(i,j) + + ! BL2 (group 1) + + ng = 1 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xic + yp (i,j,2,ng) = yic + xp (i,j,3,ng) = xdl + yp (i,j,3,ng) = ydl + iflux (i,j,ng) = i + ishift_bl + jflux (i,j,ng) = j + jshift_bl + areafact(i,j,ng) = -areafac_l(i,j) + + endif ! TL and BL triangles + + !------------------------------------------------------------------- + ! Locate triangles in TR cell (NE for north edge, SE for east edge) + ! and in BR cell (E for north edge, S for east edge). + !------------------------------------------------------------------- + + if (yir > c0 .and. xdr >= xcr .and. ydr >= c0) then + + ! TR (group 2) + + ng = 2 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xdr + yp (i,j,2,ng) = ydr + xp (i,j,3,ng) = xir + yp (i,j,3,ng) = yir + iflux (i,j,ng) = i + ishift_tr + jflux (i,j,ng) = j + jshift_tr + areafact(i,j,ng) = -areafac_r(i,j) + + elseif (yir < c0 .and. xdr >= xcr .and. ydr < c0) then + + ! BR (group 2) + + ng = 2 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xir + yp (i,j,2,ng) = yir + xp (i,j,3,ng) = xdr + yp (i,j,3,ng) = ydr + iflux (i,j,ng) = i + ishift_br + jflux (i,j,ng) = j + jshift_br + areafact(i,j,ng) = areafac_r(i,j) + + elseif (yir < c0 .and. xdr >= xcr .and. ydr >= c0) then + + ! TR1 (group 2) + + ng = 2 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xic + yp (i,j,2,ng) = yic + xp (i,j,3,ng) = xdr + yp (i,j,3,ng) = ydr + iflux (i,j,ng) = i + ishift_tr + jflux (i,j,ng) = j + jshift_tr + areafact(i,j,ng) = areafac_r(i,j) + + ! BR1 (group 3) + + ng = 3 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xir + yp (i,j,2,ng) = yir + xp (i,j,3,ng) = xic + yp (i,j,3,ng) = yic + iflux (i,j,ng) = i + ishift_br + jflux (i,j,ng) = j + jshift_br + areafact(i,j,ng) = areafac_r(i,j) + + elseif (yir > c0 .and. xdr >= xcr .and. ydr < c0) then + + ! TR2 (group 3) + + ng = 3 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xic + yp (i,j,2,ng) = yic + xp (i,j,3,ng) = xir + yp (i,j,3,ng) = yir + iflux (i,j,ng) = i + ishift_tr + jflux (i,j,ng) = j + jshift_tr + areafact(i,j,ng) = -areafac_r(i,j) + + ! BR2 (group 2) + + ng = 2 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xdr + yp (i,j,2,ng) = ydr + xp (i,j,3,ng) = xic + yp (i,j,3,ng) = yic + iflux (i,j,ng) = i + ishift_br + jflux (i,j,ng) = j + jshift_br + areafact(i,j,ng) = -areafac_r(i,j) + + endif ! TR and BR triangles + + !------------------------------------------------------------------- + ! Redefine departure points if not located in central cells (TC or BC) + !------------------------------------------------------------------- + + if (xdl < xcl) then + xdl = xil + ydl = yil + endif + + if (xdr > xcr) then + xdr = xir + ydr = yir + endif + + !------------------------------------------------------------------- + ! For l_fixed_area = T, shift the midpoint so that the departure + ! region has the prescribed area + !------------------------------------------------------------------- + + if (l_fixed_area) then + + ! Sum the areas of the left and right triangles. + ! Note that yp(i,j,1,ng) = 0 for all triangles, so we can + ! drop those terms from the area formula. + + ng = 1 + area1 = p5 * ( (xp(i,j,2,ng)-xp(i,j,1,ng)) * & + yp(i,j,3,ng) & + - yp(i,j,2,ng) * & + (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & + * areafact(i,j,ng) + + ng = 2 + area2 = p5 * ( (xp(i,j,2,ng)-xp(i,j,1,ng)) * & + yp(i,j,3,ng) & + - yp(i,j,2,ng) * & + (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & + * areafact(i,j,ng) + + ng = 3 + area3 = p5 * ( (xp(i,j,2,ng)-xp(i,j,1,ng)) * & + yp(i,j,3,ng) & + - yp(i,j,2,ng) * & + (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & + * areafact(i,j,ng) + + !----------------------------------------------------------- + ! Check whether the central triangles lie in one grid cell or two. + ! If all are in one grid cell, then adjust the area of the central + ! region so that the sum of all triangle areas is equal to the + ! prescribed value. + ! If two triangles are in one grid cell and one is in the other, + ! then compute the area of the lone triangle using an area factor + ! corresponding to the adjacent corner. This is necessary to prevent + ! negative masses in some rare cases on curved grids. Then adjust + ! the area of the remaining two-triangle region so that the sum of + ! all triangle areas has the prescribed value. + !----------------------------------------------------------- + + if (ydl*ydr >= c0) then ! Both DPs lie on same side of x-axis + + ! compute required area of central departure region + area_c = edgearea(i,j) - area1 - area2 - area3 + + ! shift midpoint so that the area of remaining triangles = area_c + w1 = c2*area_c/areafac_c(i,j) & + + (xdr-xcl)*ydl + (xcr-xdl)*ydr + w2 = (xdr-xdl)**2 + (ydr-ydl)**2 + w1 = w1/w2 + xdm = xdm + (ydr - ydl) * w1 + ydm = ydm - (xdr - xdl) * w1 + + ! compute left and right intersection points + mdl = (ydm - ydl) / (xdm - xdl) + mdr = (ydr - ydm) / (xdr - xdm) + + if (abs(mdl) > puny) then + xicl = xdl - ydl/mdl + else + xicl = c0 + endif + yicl = c0 + + if (abs(mdr) > puny) then + xicr = xdr - ydr/mdr + else + xicr = c0 + endif + yicr = c0 + + elseif (xic < c0) then ! fix ICL = IC + + xicl = xic + yicl = yic + + ! compute midpoint between ICL and DR + xdm = p5 * (xdr + xicl) + ydm = p5 * ydr + + ! compute area of triangle adjacent to left corner + area4 = p5 * (xcl - xic) * ydl * areafac_l(i,j) + area_c = edgearea(i,j) - area1 - area2 - area3 - area4 + + ! shift midpoint so that area of remaining triangles = area_c + w1 = c2*area_c/areafac_c(i,j) + (xcr-xic)*ydr + w2 = (xdr-xic)**2 + ydr**2 + w1 = w1/w2 + xdm = xdm + ydr*w1 + ydm = ydm - (xdr - xic) * w1 + + ! compute ICR + mdr = (ydr - ydm) / (xdr - xdm) + if (abs(mdr) > puny) then + xicr = xdr - ydr/mdr + else + xicr = c0 + endif + yicr = c0 + + elseif (xic >= c0) then ! fix ICR = IR + + xicr = xic + yicr = yic + + ! compute midpoint between ICR and DL + xdm = p5 * (xicr + xdl) + ydm = p5 * ydl + + area4 = p5 * (xic - xcr) * ydr * areafac_r(i,j) + area_c = edgearea(i,j) - area1 - area2 - area3 - area4 + + ! shift midpoint so that area of remaining triangles = area_c + w1 = c2*area_c/areafac_c(i,j) + (xic-xcl)*ydl + w2 = (xic-xdl)**2 + ydl**2 + w1 = w1/w2 + xdm = xdm - ydl*w1 + ydm = ydm - (xic - xdl) * w1 + + ! compute ICL + + mdl = (ydm - ydl) / (xdm - xdl) + if (abs(mdl) > puny) then + xicl = xdl - ydl/mdl + else + xicl = c0 + endif + yicl = c0 + + endif ! ydl*ydr >= c0 + + endif ! l_fixed_area + + !------------------------------------------------------------------- + ! Locate triangles in BC cell (H for both north and east edges) + ! and TC cell (N for north edge and E for east edge). + !------------------------------------------------------------------- + + ! Start with cases where both DPs lie in the same grid cell + + if (ydl >= c0 .and. ydr >= c0 .and. ydm >= c0) then + + ! TC1a (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xcr + yp (i,j,2,ng) = ycr + xp (i,j,3,ng) = xdl + yp (i,j,3,ng) = ydl + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + ! TC2a (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xdr + yp (i,j,2,ng) = ydr + xp (i,j,3,ng) = xdl + yp (i,j,3,ng) = ydl + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + ! TC3a (group 6) + ng = 6 + xp (i,j,1,ng) = xdl + yp (i,j,1,ng) = ydl + xp (i,j,2,ng) = xdr + yp (i,j,2,ng) = ydr + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + elseif (ydl >= c0 .and. ydr >= c0 .and. ydm < c0) then ! rare + + ! TC1b (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xicl + yp (i,j,2,ng) = yicl + xp (i,j,3,ng) = xdl + yp (i,j,3,ng) = ydl + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + ! TC2b (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xdr + yp (i,j,2,ng) = ydr + xp (i,j,3,ng) = xicr + yp (i,j,3,ng) = yicr + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + ! BC3b (group 6) + + ng = 6 + xp (i,j,1,ng) = xicr + yp (i,j,1,ng) = yicr + xp (i,j,2,ng) = xicl + yp (i,j,2,ng) = yicl + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + elseif (ydl < c0 .and. ydr < c0 .and. ydm < c0) then + + ! BC1a (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xdl + yp (i,j,2,ng) = ydl + xp (i,j,3,ng) = xcr + yp (i,j,3,ng) = ycr + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + ! BC2a (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xdl + yp (i,j,2,ng) = ydl + xp (i,j,3,ng) = xdr + yp (i,j,3,ng) = ydr + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + ! BC3a (group 6) + + ng = 6 + xp (i,j,1,ng) = xdl + yp (i,j,1,ng) = ydl + xp (i,j,2,ng) = xdm + yp (i,j,2,ng) = ydm + xp (i,j,3,ng) = xdr + yp (i,j,3,ng) = ydr + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + elseif (ydl < c0 .and. ydr < c0 .and. ydm >= c0) then ! rare + + ! BC1b (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xdl + yp (i,j,2,ng) = ydl + xp (i,j,3,ng) = xicl + yp (i,j,3,ng) = yicl + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + ! BC2b (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xicr + yp (i,j,2,ng) = yicr + xp (i,j,3,ng) = xdr + yp (i,j,3,ng) = ydr + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + ! TC3b (group 6) + + ng = 6 + xp (i,j,1,ng) = xicl + yp (i,j,1,ng) = yicl + xp (i,j,2,ng) = xicr + yp (i,j,2,ng) = yicr + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + ! Now consider cases where the two DPs lie in different grid cells + ! For these cases, one triangle is given the area factor associated + ! with the adjacent corner, to avoid rare negative masses on curved grids. + + elseif (ydl >= c0 .and. ydr < c0 .and. xic >= c0 & + .and. ydm >= c0) then + + ! TC1b (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xicr + yp (i,j,2,ng) = yicr + xp (i,j,3,ng) = xdl + yp (i,j,3,ng) = ydl + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + ! BC2b (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xicr + yp (i,j,2,ng) = yicr + xp (i,j,3,ng) = xdr + yp (i,j,3,ng) = ydr + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_r(i,j) + + ! TC3b (group 6) + + ng = 6 + xp (i,j,1,ng) = xdl + yp (i,j,1,ng) = ydl + xp (i,j,2,ng) = xicr + yp (i,j,2,ng) = yicr + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + elseif (ydl >= c0 .and. ydr < c0 .and. xic >= c0 & + .and. ydm < c0 ) then ! less common + + ! TC1b (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xicl + yp (i,j,2,ng) = yicl + xp (i,j,3,ng) = xdl + yp (i,j,3,ng) = ydl + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + ! BC2b (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xicr + yp (i,j,2,ng) = yicr + xp (i,j,3,ng) = xdr + yp (i,j,3,ng) = ydr + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_r(i,j) + + ! BC3b (group 6) + + ng = 6 + xp (i,j,1,ng) = xicr + yp (i,j,1,ng) = yicr + xp (i,j,2,ng) = xicl + yp (i,j,2,ng) = yicl + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + elseif (ydl >= c0 .and. ydr < c0 .and. xic < c0 & + .and. ydm < c0) then + + ! TC1b (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xicl + yp (i,j,2,ng) = yicl + xp (i,j,3,ng) = xdl + yp (i,j,3,ng) = ydl + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_l(i,j) + + ! BC2b (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xicl + yp (i,j,2,ng) = yicl + xp (i,j,3,ng) = xdr + yp (i,j,3,ng) = ydr + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + ! BC3b (group 6) + + ng = 6 + xp (i,j,1,ng) = xdr + yp (i,j,1,ng) = ydr + xp (i,j,2,ng) = xicl + yp (i,j,2,ng) = yicl + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + elseif (ydl >= c0 .and. ydr < c0 .and. xic < c0 & + .and. ydm >= c0) then ! less common + + ! TC1b (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xicl + yp (i,j,2,ng) = yicl + xp (i,j,3,ng) = xdl + yp (i,j,3,ng) = ydl + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_l(i,j) + + ! BC2b (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xicr + yp (i,j,2,ng) = yicr + xp (i,j,3,ng) = xdr + yp (i,j,3,ng) = ydr + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + ! TC3b (group 6) + + ng = 6 + xp (i,j,1,ng) = xicl + yp (i,j,1,ng) = yicl + xp (i,j,2,ng) = xicr + yp (i,j,2,ng) = yicr + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + elseif (ydl < c0 .and. ydr >= c0 .and. xic < c0 & + .and. ydm >= c0) then + + ! BC1b (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xdl + yp (i,j,2,ng) = ydl + xp (i,j,3,ng) = xicl + yp (i,j,3,ng) = yicl + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_l(i,j) + + ! TC2b (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xdr + yp (i,j,2,ng) = ydr + xp (i,j,3,ng) = xicl + yp (i,j,3,ng) = yicl + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + ! TC3b (group 6) + + ng = 6 + xp (i,j,1,ng) = xicl + yp (i,j,1,ng) = yicl + xp (i,j,2,ng) = xdr + yp (i,j,2,ng) = ydr + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + elseif (ydl < c0 .and. ydr >= c0 .and. xic < c0 & + .and. ydm < c0) then ! less common + + ! BC1b (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xdl + yp (i,j,2,ng) = ydl + xp (i,j,3,ng) = xicl + yp (i,j,3,ng) = yicl + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_l(i,j) + + ! TC2b (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xdr + yp (i,j,2,ng) = ydr + xp (i,j,3,ng) = xicr + yp (i,j,3,ng) = yicr + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + ! BC3b (group 6) + + ng = 6 + xp (i,j,1,ng) = xicr + yp (i,j,1,ng) = yicr + xp (i,j,2,ng) = xicl + yp (i,j,2,ng) = yicl + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + elseif (ydl < c0 .and. ydr >= c0 .and. xic >= c0 & + .and. ydm < c0) then + + ! BC1b (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xdl + yp (i,j,2,ng) = ydl + xp (i,j,3,ng) = xicr + yp (i,j,3,ng) = yicr + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + ! TC2b (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xdr + yp (i,j,2,ng) = ydr + xp (i,j,3,ng) = xicr + yp (i,j,3,ng) = yicr + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_r(i,j) + + ! BC3b (group 6) + + ng = 6 + xp (i,j,1,ng) = xicr + yp (i,j,1,ng) = yicr + xp (i,j,2,ng) = xdl + yp (i,j,2,ng) = ydl + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + elseif (ydl < c0 .and. ydr >= c0 .and. xic >= c0 & + .and. ydm >= c0) then ! less common + + ! BC1b (group 4) + + ng = 4 + xp (i,j,1,ng) = xcl + yp (i,j,1,ng) = ycl + xp (i,j,2,ng) = xdl + yp (i,j,2,ng) = ydl + xp (i,j,3,ng) = xicl + yp (i,j,3,ng) = yicl + iflux (i,j,ng) = i + ishift_bc + jflux (i,j,ng) = j + jshift_bc + areafact(i,j,ng) = areafac_c(i,j) + + ! TC2b (group 5) + + ng = 5 + xp (i,j,1,ng) = xcr + yp (i,j,1,ng) = ycr + xp (i,j,2,ng) = xdr + yp (i,j,2,ng) = ydr + xp (i,j,3,ng) = xicr + yp (i,j,3,ng) = yicr + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_r(i,j) + + ! TC3b (group 6) + + ng = 6 + xp (i,j,1,ng) = xicl + yp (i,j,1,ng) = yicl + xp (i,j,2,ng) = xicr + yp (i,j,2,ng) = yicr + xp (i,j,3,ng) = xdm + yp (i,j,3,ng) = ydm + iflux (i,j,ng) = i + ishift_tc + jflux (i,j,ng) = j + jshift_tc + areafact(i,j,ng) = -areafac_c(i,j) + + endif ! TC and BC triangles + + enddo ! ij + + !------------------------------------------------------------------- + ! Compute triangle areas with appropriate sign. + ! These are found by computing the area in scaled coordinates and + ! multiplying by a scale factor (areafact). + ! Note that the scale factor is positive for fluxes out of the cell + ! and negative for fluxes into the cell. + ! + ! Note: The triangle area formula below gives A >=0 iff the triangle + ! points x1, x2, and x3 are taken in counterclockwise order. + ! These points are defined above in such a way that the + ! order is nearly always CCW. + ! In rare cases, we may compute A < 0. In this case, + ! the quadrilateral departure area is equal to the + ! difference of two triangle areas instead of the sum. + ! The fluxes work out correctly in the end. + ! + ! Also compute the cumulative area transported across each edge. + ! If l_fixed_area = T, this area is compared to edgearea as a bug check. + ! If l_fixed_area = F, this area is passed as an output array. + !------------------------------------------------------------------- + + areasum(:,:) = c0 + + do ng = 1, ngroups + icells(ng) = 0 + + do ij = 1, icellsd + i = indxid(ij) + j = indxjd(ij) + + triarea(i,j,ng) = p5 * ( (xp(i,j,2,ng)-xp(i,j,1,ng)) * & + (yp(i,j,3,ng)-yp(i,j,1,ng)) & + - (yp(i,j,2,ng)-yp(i,j,1,ng)) * & + (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & + * areafact(i,j,ng) + + if (abs(triarea(i,j,ng)) < eps16*areafac_c(i,j)) then + triarea(i,j,ng) = c0 + else + icells(ng) = icells(ng) + 1 + ic = icells(ng) + indxi(ic,ng) = i + indxj(ic,ng) = j + endif + + areasum(i,j) = areasum(i,j) + triarea(i,j,ng) + + enddo ! ij + enddo ! ng + + if (l_fixed_area) then + if (bugcheck) then ! set bugcheck = F to speed up code + do ij = 1, icellsd + i = indxid(ij) + j = indxjd(ij) + if (abs(areasum(i,j) - edgearea(i,j)) > eps13*areafac_c(i,j)) then + print*, '' + print*, 'Areas do not add up: m, i, j, edge =', & + my_task, i, j, trim(edge) + print*, 'edgearea =', edgearea(i,j) + print*, 'areasum =', areasum(i,j) + print*, 'areafac_c =', areafac_c(i,j) + print*, '' + print*, 'Triangle areas:' + do ng = 1, ngroups ! not vector friendly + if (abs(triarea(i,j,ng)) > eps16*abs(areafact(i,j,ng))) then + print*, ng, triarea(i,j,ng) + endif + enddo + endif + enddo + endif ! bugcheck + + else ! l_fixed_area = F + do ij = 1, icellsd + i = indxid(ij) + j = indxjd(ij) + edgearea(i,j) = areasum(i,j) + enddo + endif ! l_fixed_area + + !------------------------------------------------------------------- + ! Transform triangle vertices to a scaled coordinate system centered + ! in the cell containing the triangle. + !------------------------------------------------------------------- + + if (trim(edge) == 'north') then + do ng = 1, ngroups + do nv = 1, nvert + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + ishift = iflux(i,j,ng) - i + jshift = jflux(i,j,ng) - j + xp(i,j,nv,ng) = xp(i,j,nv,ng) - c1*ishift + yp(i,j,nv,ng) = yp(i,j,nv,ng) + p5 - c1*jshift + enddo ! ij + enddo ! nv + enddo ! ng + else ! east edge + do ng = 1, ngroups + do nv = 1, nvert + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + ishift = iflux(i,j,ng) - i + jshift = jflux(i,j,ng) - j + ! Note rotation of pi/2 here + w1 = xp(i,j,nv,ng) + xp(i,j,nv,ng) = yp(i,j,nv,ng) + p5 - c1*ishift + yp(i,j,nv,ng) = -w1 - c1*jshift + enddo ! ij + enddo ! nv + enddo ! ng + endif + + if (bugcheck) then + do ng = 1, ngroups + do nv = 1, nvert + do j = jb, je + do i = ib, ie + if (abs(triarea(i,j,ng)) > puny) then + if (abs(xp(i,j,nv,ng)) > p5+puny) then + print*, '' + print*, 'WARNING: xp =', xp(i,j,nv,ng) + print*, 'm, i, j, ng, nv =', my_task, i, j, ng, nv +! print*, 'yil,xdl,xcl,ydl=',yil,xdl,xcl,ydl +! print*, 'yir,xdr,xcr,ydr=',yir,xdr,xcr,ydr +! print*, 'ydm=',ydm +! stop + endif + if (abs(yp(i,j,nv,ng)) > p5+puny) then + print*, '' + print*, 'WARNING: yp =', yp(i,j,nv,ng) + print*, 'm, i, j, ng, nv =', my_task, i, j, ng, nv + endif + endif ! triarea + enddo + enddo + enddo + enddo + endif ! bugcheck + + end subroutine locate_triangles + +!======================================================================= +! +! For each triangle, find the coordinates of the quadrature points needed +! to compute integrals of linear, quadratic, or cubic polynomials, +! using formulas from A.H. Stroud, Approximate Calculation of Multiple +! Integrals, Prentice-Hall, 1971. (Section 8.8, formula 3.1.) +! Linear functions can be integrated exactly by evaluating the function +! at just one point (the midpoint). Quadratic functions require +! 3 points, and cubics require 4 points. +! The default is cubic, but the code can be sped up slightly using +! linear or quadratic integrals, usually with little loss of accuracy. +! +! The formulas are as follows: +! +! I1 = integral of f(x,y)*dA +! = A * f(x0,y0) +! where A is the traingle area and (x0,y0) is the midpoint. +! +! I2 = A * (f(x1,y1) + f(x2,y2) + f(x3,y3)) +! where these three points are located halfway between the midpoint +! and the three vertics of the triangle. +! +! I3 = A * [ -9/16 * f(x0,y0) +! + 25/48 * (f(x1,y1) + f(x2,y2) + f(x3,y3))] +! where (x0,y0) is the midpoint, and the other three points are +! located 2/5 of the way from the midpoint to the three vertices. +! +! author William H. Lipscomb, LANL + + subroutine triangle_coordinates (nx_block, ny_block, & + integral_order, icells, & + indxi, indxj, & + xp, yp) + + use ice_constants, only: p333, p4, p5, p6 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block,&! block dimensions + integral_order ! polynomial order for quadrature integrals + + integer (kind=int_kind), dimension (ngroups), intent(in) :: & + icells ! number of cells where triarea > puny + + integer (kind=int_kind), dimension (nx_block*ny_block,ngroups), & + intent(in) :: & + indxi ,&! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), intent(inout), & + dimension (nx_block, ny_block, 0:nvert, ngroups) :: & + xp, yp ! coordinates of triangle points + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij ,&! horizontal indices + ng ! triangle index + + + if (integral_order == 1) then ! linear (1-point formula) + + do ng = 1, ngroups + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + + ! coordinates of midpoint + xp(i,j,0,ng) = p333 & + * (xp(i,j,1,ng) + xp(i,j,2,ng) + xp(i,j,3,ng)) + yp(i,j,0,ng) = p333 & + * (yp(i,j,1,ng) + yp(i,j,2,ng) + yp(i,j,3,ng)) + + enddo ! ij + enddo ! ng + + elseif (integral_order == 2) then ! quadratic (3-point formula) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + + do ng = 1, ngroups + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + + ! coordinates of midpoint + xp(i,j,0,ng) = p333 & + * (xp(i,j,1,ng) + xp(i,j,2,ng) + xp(i,j,3,ng)) + yp(i,j,0,ng) = p333 & + * (yp(i,j,1,ng) + yp(i,j,2,ng) + yp(i,j,3,ng)) + + ! coordinates of the 3 points needed for integrals + + xp(i,j,1,ng) = p5*xp(i,j,1,ng) + p5*xp(i,j,0,ng) + yp(i,j,1,ng) = p5*yp(i,j,1,ng) + p5*yp(i,j,0,ng) + + xp(i,j,2,ng) = p5*xp(i,j,2,ng) + p5*xp(i,j,0,ng) + yp(i,j,2,ng) = p5*yp(i,j,2,ng) + p5*yp(i,j,0,ng) + + xp(i,j,3,ng) = p5*xp(i,j,3,ng) + p5*xp(i,j,0,ng) + yp(i,j,3,ng) = p5*yp(i,j,3,ng) + p5*yp(i,j,0,ng) + + enddo ! ij + enddo ! ng + + else ! cubic (4-point formula) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ng = 1, ngroups + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + + ! coordinates of midpoint + xp(i,j,0,ng) = p333 & + * (xp(i,j,1,ng) + xp(i,j,2,ng) + xp(i,j,3,ng)) + yp(i,j,0,ng) = p333 & + * (yp(i,j,1,ng) + yp(i,j,2,ng) + yp(i,j,3,ng)) + + ! coordinates of the other 3 points needed for integrals + + xp(i,j,1,ng) = p4*xp(i,j,1,ng) + p6*xp(i,j,0,ng) + yp(i,j,1,ng) = p4*yp(i,j,1,ng) + p6*yp(i,j,0,ng) + + xp(i,j,2,ng) = p4*xp(i,j,2,ng) + p6*xp(i,j,0,ng) + yp(i,j,2,ng) = p4*yp(i,j,2,ng) + p6*yp(i,j,0,ng) + + xp(i,j,3,ng) = p4*xp(i,j,3,ng) + p6*xp(i,j,0,ng) + yp(i,j,3,ng) = p4*yp(i,j,3,ng) + p6*yp(i,j,0,ng) + + enddo ! ij + enddo ! ng + + endif + + end subroutine triangle_coordinates + +!======================================================================= +! +! Compute the transports across each edge by integrating the mass +! and tracers over each departure triangle. +! Input variables have the same meanings as in the main subroutine. +! Repeated use of certain sums makes the calculation more efficient. +! Integral formulas are described in triangle_coordinates subroutine. +! +! author William H. Lipscomb, LANL + + subroutine transport_integrals (nx_block, ny_block, & + ntrace, icells, & + indxi, indxj, & + tracer_type, depend, & + integral_order, triarea, & + iflux, jflux, & + xp, yp, & + mc, mx, & + my, mflx, & + tc, tx, & + ty, mtflx) + + use ice_constants, only: c0, p333 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ,&! block dimensions + ntrace ,&! number of tracers in use + integral_order ! polynomial order for quadrature integrals + + integer (kind=int_kind), dimension (ntrace), intent(in) :: & + tracer_type ,&! = 1, 2, or 3 (see comments above) + depend ! tracer dependencies (see above) + + integer (kind=int_kind), dimension (ngroups), intent(in) :: & + icells ! number of cells where triarea > puny + + integer (kind=int_kind), dimension (nx_block*ny_block,ngroups), & + intent(in) :: & + indxi ,&! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), intent(in), & + dimension (nx_block, ny_block, 0:nvert, ngroups) :: & + xp, yp ! coordinates of triangle points + + real (kind=dbl_kind), intent(in), & + dimension (nx_block, ny_block, ngroups) :: & + triarea ! triangle area + + integer (kind=int_kind), intent(in), & + dimension (nx_block, ny_block, ngroups) :: & + iflux ,& + jflux + + real (kind=dbl_kind), intent(in), & + dimension (nx_block, ny_block) :: & + mc, mx, my + + real (kind=dbl_kind), intent(out), & + dimension (nx_block, ny_block) :: & + mflx + + real (kind=dbl_kind), intent(in), & + dimension (nx_block, ny_block, ntrace), optional :: & + tc, tx, ty + + real (kind=dbl_kind), intent(out), & + dimension (nx_block, ny_block, ntrace), optional :: & + mtflx + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij ,&! horizontal indices of edge + i2, j2 ,&! horizontal indices of cell contributing transport + ng ,&! triangle index + nt, nt1 ! tracer indices + + real (kind=dbl_kind) :: & + m0, m1, m2, m3 ,&! mass field at internal points + w0, w1, w2, w3 ! work variables + + real (kind=dbl_kind), dimension (nx_block, ny_block) :: & + msum, mxsum, mysum ,&! sum of mass, mass*x, and mass*y + mxxsum, mxysum, myysum ! sum of mass*x*x, mass*x*y, mass*y*y + + real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace) :: & + mtsum ,&! sum of mass*tracer + mtxsum ,&! sum of mass*tracer*x + mtysum ! sum of mass*tracer*y + + !------------------------------------------------------------------- + ! Initialize + !------------------------------------------------------------------- + + mflx(:,:) = c0 + if (present(mtflx)) then + do nt = 1, ntrace + mtflx(:,:,nt) = c0 + enddo + endif + + !------------------------------------------------------------------- + ! Main loop + !------------------------------------------------------------------- + + do ng = 1, ngroups + + if (integral_order == 1) then ! linear (1-point formula) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + + i2 = iflux(i,j,ng) + j2 = jflux(i,j,ng) + + ! mass transports + + m0 = mc(i2,j2) + xp(i,j,0,ng)*mx(i2,j2) & + + yp(i,j,0,ng)*my(i2,j2) + msum(i,j) = m0 + + mflx(i,j) = mflx(i,j) + triarea(i,j,ng)*msum(i,j) + + ! quantities needed for tracer transports + mxsum(i,j) = m0*xp(i,j,0,ng) + mxxsum(i,j) = mxsum(i,j)*xp(i,j,0,ng) + mxysum(i,j) = mxsum(i,j)*yp(i,j,0,ng) + mysum(i,j) = m0*yp(i,j,0,ng) + myysum(i,j) = mysum(i,j)*yp(i,j,0,ng) + enddo ! ij + + elseif (integral_order == 2) then ! quadratic (3-point formula) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + + i2 = iflux(i,j,ng) + j2 = jflux(i,j,ng) + + ! mass transports + ! Weighting factor of 1/3 is incorporated into the ice + ! area terms m1, m2, and m3. + m1 = p333 * (mc(i2,j2) + xp(i,j,1,ng)*mx(i2,j2) & + + yp(i,j,1,ng)*my(i2,j2)) + m2 = p333 * (mc(i2,j2) + xp(i,j,2,ng)*mx(i2,j2) & + + yp(i,j,2,ng)*my(i2,j2)) + m3 = p333 * (mc(i2,j2) + xp(i,j,3,ng)*mx(i2,j2) & + + yp(i,j,3,ng)*my(i2,j2)) + msum(i,j) = m1 + m2 + m3 + mflx(i,j) = mflx(i,j) + triarea(i,j,ng)*msum(i,j) + + ! quantities needed for mass_tracer transports + w1 = m1 * xp(i,j,1,ng) + w2 = m2 * xp(i,j,2,ng) + w3 = m3 * xp(i,j,3,ng) + + mxsum(i,j) = w1 + w2 + w3 + + mxxsum(i,j) = w1*xp(i,j,1,ng) + w2*xp(i,j,2,ng) & + + w3*xp(i,j,3,ng) + + mxysum(i,j) = w1*yp(i,j,1,ng) + w2*yp(i,j,2,ng) & + + w3*yp(i,j,3,ng) + + w1 = m1 * yp(i,j,1,ng) + w2 = m2 * yp(i,j,2,ng) + w3 = m3 * yp(i,j,3,ng) + + mysum(i,j) = w1 + w2 + w3 + + myysum(i,j) = w1*yp(i,j,1,ng) + w2*yp(i,j,2,ng) & + + w3*yp(i,j,3,ng) + enddo ! ij + + else ! cubic (4-point formula) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + + i2 = iflux(i,j,ng) + j2 = jflux(i,j,ng) + + ! mass transports + + ! Weighting factors are incorporated into the + ! terms m0, m1, m2, and m3. + m0 = p5625m * (mc(i2,j2) + xp(i,j,0,ng)*mx(i2,j2) & + + yp(i,j,0,ng)*my(i2,j2)) + m1 = p52083 * (mc(i2,j2) + xp(i,j,1,ng)*mx(i2,j2) & + + yp(i,j,1,ng)*my(i2,j2)) + m2 = p52083 * (mc(i2,j2) + xp(i,j,2,ng)*mx(i2,j2) & + + yp(i,j,2,ng)*my(i2,j2)) + m3 = p52083 * (mc(i2,j2) + xp(i,j,3,ng)*mx(i2,j2) & + + yp(i,j,3,ng)*my(i2,j2)) + msum(i,j) = m0 + m1 + m2 + m3 + mflx(i,j) = mflx(i,j) + triarea(i,j,ng)*msum(i,j) + + ! quantities needed for tracer transports + w0 = m0 * xp(i,j,0,ng) + w1 = m1 * xp(i,j,1,ng) + w2 = m2 * xp(i,j,2,ng) + w3 = m3 * xp(i,j,3,ng) + + mxsum(i,j) = w0 + w1 + w2 + w3 + + mxxsum(i,j) = w0*xp(i,j,0,ng) + w1*xp(i,j,1,ng) & + + w2*xp(i,j,2,ng) + w3*xp(i,j,3,ng) + + mxysum(i,j) = w0*yp(i,j,0,ng) + w1*yp(i,j,1,ng) & + + w2*yp(i,j,2,ng) + w3*yp(i,j,3,ng) + + w0 = m0 * yp(i,j,0,ng) + w1 = m1 * yp(i,j,1,ng) + w2 = m2 * yp(i,j,2,ng) + w3 = m3 * yp(i,j,3,ng) + + mysum(i,j) = w0 + w1 + w2 + w3 + + myysum(i,j) = w0*yp(i,j,0,ng) + w1*yp(i,j,1,ng) & + + w2*yp(i,j,2,ng) + w3*yp(i,j,3,ng) + + enddo ! ij + + endif ! integral_order + + ! mass * tracer transports + + if (present(mtflx)) then + + do nt = 1, ntrace + if (tracer_type(nt)==1) then ! does not depend on another tracer + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + + i2 = iflux(i,j,ng) + j2 = jflux(i,j,ng) + + mtsum(i,j,nt) = msum(i,j) * tc(i2,j2,nt) & + + mxsum(i,j) * tx(i2,j2,nt) & + + mysum(i,j) * ty(i2,j2,nt) + + mtflx(i,j,nt) = mtflx(i,j,nt) & + + triarea(i,j,ng) * mtsum(i,j,nt) + + ! quantities needed for dependent tracers + + mtxsum(i,j,nt) = mxsum(i,j) * tc(i2,j2,nt) & + + mxxsum(i,j) * tx(i2,j2,nt) & + + mxysum(i,j) * ty(i2,j2,nt) + + mtysum(i,j,nt) = mysum(i,j) * tc(i2,j2,nt) & + + mxysum(i,j) * tx(i2,j2,nt) & + + myysum(i,j) * ty(i2,j2,nt) + enddo ! ij + + elseif (tracer_type(nt)==2) then ! depends on another tracer + nt1 = depend(nt) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + + i2 = iflux(i,j,ng) + j2 = jflux(i,j,ng) + + mtsum(i,j,nt) = mtsum(i,j,nt1) * tc(i2,j2,nt) & + + mtxsum(i,j,nt1) * tx(i2,j2,nt) & + + mtysum(i,j,nt1) * ty(i2,j2,nt) + + mtflx(i,j,nt) = mtflx(i,j,nt) & + + triarea(i,j,ng) * mtsum(i,j,nt) + enddo ! ij + + + elseif (tracer_type(nt)==3) then ! depends on two tracers + nt1 = depend(nt) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells(ng) + i = indxi(ij,ng) + j = indxj(ij,ng) + + i2 = iflux(i,j,ng) + j2 = jflux(i,j,ng) + + ! upwind approx (tx=ty=0) for type 3 tracers + mtsum(i,j,nt) = mtsum(i,j,nt1) * tc(i2,j2,nt) + + mtflx(i,j,nt) = mtflx(i,j,nt) & + + triarea(i,j,ng) * mtsum(i,j,nt) + enddo ! ij + + endif ! tracer type + enddo ! ntrace + endif ! present(mtflx) + enddo ! ng + + end subroutine transport_integrals + +!======================================================================= +! +! Given transports through cell edges, compute new area and tracers. +! +! author William H. Lipscomb, LANL + + subroutine update_fields (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + ntrace, & + tracer_type, depend, & + tarear, l_stop, & + istop, jstop, & + mflxe, mflxn, & + mm, & + mtflxe, mtflxn, & + tm) + + use ice_constants, only: c0, puny + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block,&! block dimensions + ilo,ihi,jlo,jhi ,&! beginning and end of physical domain + ntrace ! number of tracers in use + + integer (kind=int_kind), dimension (ntrace), intent(in) :: & + tracer_type ,&! = 1, 2, or 3 (see comments above) + depend ! tracer dependencies (see above) + + real (kind=dbl_kind), dimension (nx_block, ny_block), & + intent(in) :: & + mflxe, mflxn ,&! mass transport across east and north cell edges + tarear ! 1/tarea + + real (kind=dbl_kind), dimension (nx_block, ny_block), & + intent(inout) :: & + mm ! mass field (mean) + + real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace), & + intent(in), optional :: & + mtflxe, mtflxn ! mass*tracer transport across E and N cell edges + + real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace), & + intent(inout), optional :: & + tm ! tracer fields + + logical (kind=log_kind), intent(inout) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(inout) :: & + istop, jstop ! indices of grid cell where model aborts + + ! local variables + + integer (kind=int_kind) :: & + i, j ,&! horizontal indices + nt, nt1, nt2 ! tracer indices + + real (kind=dbl_kind), dimension(nx_block,ny_block,ntrace) :: & + mtold ! old mass*tracer + + real (kind=dbl_kind) :: & + w1 ! work variable + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi ,&! compressed indices in i and j directions + indxj + + integer (kind=int_kind) :: & + icells ,&! number of cells with mm > 0. + ij ! combined i/j horizontal index + + !------------------------------------------------------------------- + ! Save starting values of mass*tracer + !------------------------------------------------------------------- + + if (present(tm)) then + do nt = 1, ntrace + if (tracer_type(nt)==1) then ! does not depend on other tracers + do j = jlo, jhi + do i = ilo, ihi + mtold(i,j,nt) = mm(i,j) * tm(i,j,nt) + enddo ! i + enddo ! j + elseif (tracer_type(nt)==2) then ! depends on another tracer + nt1 = depend(nt) + do j = jlo, jhi + do i = ilo, ihi + mtold(i,j,nt) = mm(i,j) * tm(i,j,nt1) * tm(i,j,nt) + enddo ! i + enddo ! j + elseif (tracer_type(nt)==3) then ! depends on two tracers + nt1 = depend(nt) + nt2 = depend(nt1) + do j = jlo, jhi + do i = ilo, ihi + mtold(i,j,nt) = mm(i,j) & + * tm(i,j,nt2) * tm(i,j,nt1) * tm(i,j,nt) + enddo ! i + enddo ! j + endif ! depend(nt) = 0 + enddo ! nt + endif ! present(tm) + + !------------------------------------------------------------------- + ! Update mass field + !------------------------------------------------------------------- + + do j = jlo, jhi + do i = ilo, ihi + + w1 = mflxe(i,j) - mflxe(i-1,j) & + + mflxn(i,j) - mflxn(i,j-1) + mm(i,j) = mm(i,j) - w1*tarear(i,j) + + if (mm(i,j) < -puny) then ! abort with negative value + l_stop = .true. + istop = i + jstop = j + elseif (mm(i,j) < c0) then ! set to zero + mm(i,j) = c0 + endif + + enddo + enddo + + if (l_stop) then + i = istop + j = jstop + w1 = mflxe(i,j) - mflxe(i-1,j) & + + mflxn(i,j) - mflxn(i,j-1) + write (nu_diag,*) ' ' + write (nu_diag,*) 'New mass < 0, i, j =', i, j + write (nu_diag,*) 'Old mass =', mm(i,j) + w1*tarear(i,j) + write (nu_diag,*) 'New mass =', mm(i,j) + write (nu_diag,*) 'Net transport =', -w1*tarear(i,j) + return + endif + + !------------------------------------------------------------------- + ! Update tracers + !------------------------------------------------------------------- + + if (present(tm)) then + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (mm(i,j) > c0) then ! grid cells with positive areas + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + do nt = 1, ntrace + + do j = jlo, jhi + do i = ilo, ihi + tm(i,j,nt) = c0 + enddo + enddo + + if (tracer_type(nt)==1) then ! does not depend on other tracers + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + w1 = mtflxe(i,j,nt) - mtflxe(i-1,j,nt) & + + mtflxn(i,j,nt) - mtflxn(i,j-1,nt) + tm(i,j,nt) = (mtold(i,j,nt) - w1*tarear(i,j)) & + / mm(i,j) + enddo ! ij + + elseif (tracer_type(nt)==2) then ! depends on another tracer + nt1 = depend(nt) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (abs(tm(i,j,nt1)) > c0) then + w1 = mtflxe(i,j,nt) - mtflxe(i-1,j,nt) & + + mtflxn(i,j,nt) - mtflxn(i,j-1,nt) + tm(i,j,nt) = (mtold(i,j,nt) - w1*tarear(i,j)) & + / (mm(i,j) * tm(i,j,nt1)) + endif + + enddo ! ij + + elseif (tracer_type(nt)==3) then ! depends on two tracers + nt1 = depend(nt) + nt2 = depend(nt1) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + if (abs(tm(i,j,nt1)) > c0 .and. & + abs(tm(i,j,nt2)) > c0) then + w1 = mtflxe(i,j,nt) - mtflxe(i-1,j,nt) & + + mtflxn(i,j,nt) - mtflxn(i,j-1,nt) + tm(i,j,nt) = (mtold(i,j,nt) - w1*tarear(i,j)) & + / (mm(i,j) * tm(i,j,nt2) * tm(i,j,nt1)) + endif + enddo ! ij + + endif ! tracer_type + enddo ! nt + endif ! present(tm) + + end subroutine update_fields + +!======================================================================= + + end module ice_transport_remap + +!======================================================================= diff --git a/source/ice_zbgc.F90 b/source/ice_zbgc.F90 new file mode 100755 index 00000000..9ff08287 --- /dev/null +++ b/source/ice_zbgc.F90 @@ -0,0 +1,1078 @@ +! SVN:$Id: ice_zbgc.F90 820 2014-08-26 19:08:29Z eclare $ +!======================================================================= +! +! Biogeochemistry driver +! +! authors: Nicole Jeffery, LANL +! Scott Elliot, LANL +! Elizabeth C. Hunke, LANL +! + module ice_zbgc + + use ice_kinds_mod + use ice_zbgc_shared ! everything + + implicit none + + private + public :: add_new_ice_bgc, init_zbgc, init_bgc, & + init_history_bgc, biogeochemistry + +!======================================================================= + + contains + +!======================================================================= + +! Namelist variables, set to default values; may be altered at run time +! +! author Elizabeth C. Hunke, LANL +! Nicole Jeffery, LANL + + subroutine init_zbgc + + use ice_broadcast, only: broadcast_scalar + use ice_communicate, only: my_task, master_task + use ice_constants, only: c1, p5, c0, rhos, rhoi + use ice_domain_size, only: max_ntrcr, max_nbtrcr, nblyr + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_nml, nml_filename, get_fileunit, & + release_fileunit, nu_diag + use ice_restart_shared, only: runtype + use ice_state, only: tr_brine, nt_fbri, ntrcr, nbtrcr, trcr_depend, & + nt_bgc_N_sk, nt_bgc_Nit_sk, nt_bgc_chl_sk, nt_bgc_Am_sk, & + nt_bgc_Sil_sk, nt_bgc_DMSPp_sk, nt_bgc_DMSPd_sk, & + nt_bgc_DMS_sk, nt_bgc_C_sk + + integer (kind=int_kind) :: & + nml_error ! namelist i/o error flag + + !----------------------------------------------------------------- + ! namelist variables + !----------------------------------------------------------------- + + namelist /zbgc_nml/ & + tr_brine, bgc_data_dir, sil_data_type, nit_data_type, & + restore_bgc, skl_bgc, & + tr_bgc_C_sk, tr_bgc_chl_sk, tr_bgc_Am_sk, tr_bgc_Sil_sk, & + tr_bgc_DMSPp_sk, tr_bgc_DMSPd_sk, tr_bgc_DMS_sk, & + restart_bgc, restart_hbrine, phi_snow, bgc_flux_type + + !----------------------------------------------------------------- + ! default values + !----------------------------------------------------------------- + + tr_brine = .false. ! brine height differs from ice height + restore_bgc = .false. ! restore bgc if true + skl_bgc = .false. ! solve skeletal biochemistry in diffuse bio + bgc_data_dir = 'unknown_bgc_data_dir' + sil_data_type = 'default' + nit_data_type = 'default' + tr_bgc_C_sk = .false. ! biogeochemistry, + tr_bgc_chl_sk = .false. ! biogeochemistry, + tr_bgc_Am_sk = .false. ! biogeochemistry, + tr_bgc_Sil_sk = .false. ! biogeochemistry, + tr_bgc_DMSPp_sk = .false. ! biogeochemistry, trace gases (skeletal) + tr_bgc_DMSPd_sk = .false. ! biogeochemistry, trace gases (skeletal) + tr_bgc_DMS_sk = .false. ! biogeochemistry, trace gases (skeletal) + restart_bgc = .false. ! biogeochemistry restart + restart_hbrine = .false. ! hbrine restart + phi_snow = p5 ! snow porosity + bgc_flux_type = 'Jin2006'! type of ocean-ice poston velocity ('constant') + + !----------------------------------------------------------------- + ! read from input file + !----------------------------------------------------------------- + + call get_fileunit(nu_nml) + + if (my_task == master_task) then + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) + if (nml_error /= 0) then + nml_error = -1 + else + nml_error = 1 + endif + + print*,'Reading zbgc_nml' + do while (nml_error > 0) + read(nu_nml, nml=zbgc_nml,iostat=nml_error) + end do + if (nml_error == 0) close(nu_nml) + endif + call broadcast_scalar(nml_error, master_task) + if (nml_error /= 0) then + call abort_ice('ice: error reading zbgc namelist') + endif + call release_fileunit(nu_nml) + + !----------------------------------------------------------------- + ! brine + !----------------------------------------------------------------- + + if (trim(runtype) == 'continue') restart_hbrine = .true. + + call broadcast_scalar(tr_brine, master_task) + call broadcast_scalar(restart_hbrine, master_task) + call broadcast_scalar(phi_snow, master_task) + + nt_fbri = c0 + if (tr_brine) then + nt_fbri = ntrcr + 1 ! ice volume fraction with salt + ntrcr = ntrcr + 1 + trcr_depend(nt_fbri) = 1 ! volume-weighted + endif + + if (phi_snow .le. c0) phi_snow = c1-rhos/rhoi + + if (my_task == master_task) then + write(nu_diag,1010) ' tr_brine = ', tr_brine + if (tr_brine) then + write(nu_diag,1010) ' restart_hbrine = ', restart_hbrine + write(nu_diag,1005) ' phi_snow = ', phi_snow + endif + write(nu_diag,1010) ' skl_bgc = ', skl_bgc + endif + + !----------------------------------------------------------------- + ! skeletal layer biogeochemistry + !----------------------------------------------------------------- + + if (TRBGCS == 0 .and. skl_bgc) then + write(nu_diag,*) & + 'WARNING: skl_bgc=T but 0 bgc tracers compiled' + write(nu_diag,*) & + 'WARNING: setting skl_bgc = F' + skl_bgc = .false. + endif + + if (trim(runtype) == 'continue') restart_bgc = .true. + + call broadcast_scalar(skl_bgc, master_task) + call broadcast_scalar(restart_bgc, master_task) + + if (skl_bgc) then + tr_bgc_N_sk = .true. ! minimum NP biogeochemistry + tr_bgc_Nit_sk = .true. + else + tr_bgc_N_sk = .false. + tr_bgc_C_sk = .false. + tr_bgc_chl_sk = .false. + tr_bgc_Nit_sk = .false. + tr_bgc_Am_sk = .false. + tr_bgc_Sil_sk = .false. + tr_bgc_DMSPp_sk = .false. + tr_bgc_DMSPd_sk = .false. + tr_bgc_DMS_sk = .false. + endif + + call broadcast_scalar(bgc_flux_type, master_task) + call broadcast_scalar(restore_bgc, master_task) + call broadcast_scalar(bgc_data_dir, master_task) + call broadcast_scalar(sil_data_type, master_task) + call broadcast_scalar(nit_data_type, master_task) + call broadcast_scalar(tr_bgc_N_sk, master_task) + call broadcast_scalar(tr_bgc_C_sk, master_task) + call broadcast_scalar(tr_bgc_chl_sk, master_task) + call broadcast_scalar(tr_bgc_Nit_sk, master_task) + call broadcast_scalar(tr_bgc_Am_sk, master_task) + call broadcast_scalar(tr_bgc_Sil_sk, master_task) + call broadcast_scalar(tr_bgc_DMSPp_sk, master_task) + call broadcast_scalar(tr_bgc_DMSPd_sk, master_task) + call broadcast_scalar(tr_bgc_DMS_sk, master_task) + + if (skl_bgc) then + + if (my_task == master_task) then + + write(nu_diag,1030) ' bgc_flux_type = ', bgc_flux_type + write(nu_diag,1010) ' restart_bgc = ', restart_bgc + write(nu_diag,1010) ' restore_bgc = ', restore_bgc + write(nu_diag,*) ' bgc_data_dir = ', & + trim(bgc_data_dir) + write(nu_diag,*) ' sil_data_type = ', & + trim(sil_data_type) + write(nu_diag,*) ' nit_data_type = ', & + trim(nit_data_type) + write(nu_diag,1010) ' tr_bgc_N_sk = ', tr_bgc_N_sk + write(nu_diag,1010) ' tr_bgc_C_sk = ', tr_bgc_C_sk + write(nu_diag,1010) ' tr_bgc_chl_sk = ', tr_bgc_chl_sk + write(nu_diag,1010) ' tr_bgc_Nit_sk = ', tr_bgc_Nit_sk + write(nu_diag,1010) ' tr_bgc_Am_sk = ', tr_bgc_Am_sk + write(nu_diag,1010) ' tr_bgc_Sil_sk = ', tr_bgc_Sil_sk + write(nu_diag,1010) ' tr_bgc_DMSPp_sk = ', tr_bgc_DMSPp_sk + write(nu_diag,1010) ' tr_bgc_DMSPd_sk = ', tr_bgc_DMSPd_sk + write(nu_diag,1010) ' tr_bgc_DMS_sk = ', tr_bgc_DMS_sk + + endif ! master_task + + !----------------------------------------------------------------- + ! assign tracer indices and dependencies + !----------------------------------------------------------------- + + nbtrcr = 0 + nlt_bgc_NO = 0 + nlt_bgc_N = 0 + nlt_bgc_C = 0 + nlt_bgc_chl = 0 + nlt_bgc_NH = 0 + nlt_bgc_Sil = 0 + nlt_bgc_DMSPp = 0 + nlt_bgc_DMSPd = 0 + nlt_bgc_DMS = 0 + + ntrcr = ntrcr + 1 ! algalN, required tracer + nt_bgc_N_sk = ntrcr + nbtrcr = nbtrcr + 1 + nlt_bgc_N = nbtrcr + + ntrcr = ntrcr + 1 ! nitrate, required tracer + nt_bgc_Nit_sk = ntrcr + nbtrcr = nbtrcr + 1 + nlt_bgc_NO = nbtrcr + + if (tr_bgc_C_sk) then + ntrcr = ntrcr + 1 + nt_bgc_C_sk = ntrcr + nbtrcr = nbtrcr + 1 + nlt_bgc_C = nbtrcr + endif + if (tr_bgc_chl_sk)then + ntrcr = ntrcr + 1 + nt_bgc_chl_sk = ntrcr + nbtrcr = nbtrcr + 1 + nlt_bgc_chl = nbtrcr + endif + if (tr_bgc_Am_sk)then + ntrcr = ntrcr + 1 + nt_bgc_Am_sk = ntrcr + nbtrcr = nbtrcr + 1 + nlt_bgc_NH = nbtrcr + endif + if (tr_bgc_Sil_sk)then + ntrcr = ntrcr + 1 + nt_bgc_Sil_sk = ntrcr + nbtrcr = nbtrcr + 1 + nlt_bgc_Sil = nbtrcr + endif + if (tr_bgc_DMSPp_sk)then + ntrcr = ntrcr + 1 + nt_bgc_DMSPp_sk = ntrcr + nbtrcr = nbtrcr + 1 + nlt_bgc_DMSPp = nbtrcr + endif + if (tr_bgc_DMSPd_sk)then + ntrcr = ntrcr + 1 + nt_bgc_DMSPd_sk = ntrcr + nbtrcr = nbtrcr + 1 + nlt_bgc_DMSPd = nbtrcr + endif + if (tr_bgc_DMS_sk)then + ntrcr = ntrcr + 1 + nt_bgc_DMS_sk = ntrcr + nbtrcr = nbtrcr + 1 + nlt_bgc_DMS = nbtrcr + endif + endif ! skl_bgc + + if (nbtrcr > max_nbtrcr) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'nbtrcr > max_nbtrcr' + write (nu_diag,*) 'nbtrcr, max_nbtrcr:',nbtrcr, max_nbtrcr + call abort_ice ('ice: ice_zbgc error') + endif + + if (ntrcr > max_ntrcr) then + write(nu_diag,*) 'max_ntrcr < number of namelist tracers' + write(nu_diag,*) 'max_ntrcr = ',max_ntrcr,' ntrcr = ',ntrcr + call abort_ice('max_ntrcr < number of namelist tracers') + endif + + if (skl_bgc .and. TRBGCS < 2) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'comp_ice must have number of bgc tracers >= 2' + write (nu_diag,*) 'number of bgc tracers compiled:',TRBGCS + call abort_ice ('ice: ice_zbgc error') + endif + + if (my_task == master_task) then + if (skl_bgc) then + write(nu_diag,1020)'nt_bgc_N_sk = ', nt_bgc_N_sk + write(nu_diag,1020)'nt_bgc_Nit_sk = ', nt_bgc_Nit_sk + endif + if (tr_brine .or. skl_bgc) then + write(nu_diag,1020)'nblyr = ', nblyr + write(nu_diag,1020) 'ntrcr (w/ bgc) = ', ntrcr + endif + endif + + ! BGC layer model (on bottom "skeletal" layer) + if (tr_bgc_N_sk) trcr_depend(nt_bgc_N_sk) = 0 ! algae (skeletal) + if (tr_bgc_C_sk) trcr_depend(nt_bgc_C_sk) = 0 ! + if (tr_bgc_chl_sk) trcr_depend(nt_bgc_chl_sk) = 0 ! + if (tr_bgc_Nit_sk) trcr_depend(nt_bgc_Nit_sk) = 0 ! nutrients + if (tr_bgc_Am_sk) trcr_depend(nt_bgc_Am_sk) = 0 ! + if (tr_bgc_Sil_sk) trcr_depend(nt_bgc_Sil_sk) = 0 ! + if (tr_bgc_DMSPp_sk) trcr_depend(nt_bgc_DMSPp_sk) = 0 ! trace gases + if (tr_bgc_DMSPd_sk) trcr_depend(nt_bgc_DMSPd_sk) = 0 ! + if (tr_bgc_DMS_sk) trcr_depend(nt_bgc_DMS_sk) = 0 ! + + if (tr_bgc_N_sk) bgc_tracer_type(nlt_bgc_N) = c0 ! algae + if (tr_bgc_C_sk) bgc_tracer_type(nlt_bgc_C) = c0 ! + if (tr_bgc_chl_sk) bgc_tracer_type(nlt_bgc_chl) = c0 ! + if (tr_bgc_Nit_sk) bgc_tracer_type(nlt_bgc_NO) = c1 ! nutrients + if (tr_bgc_Am_sk) bgc_tracer_type(nlt_bgc_NH) = c1 ! + if (tr_bgc_Sil_sk) bgc_tracer_type(nlt_bgc_Sil) = c1 ! + if (tr_bgc_DMSPp_sk) bgc_tracer_type(nlt_bgc_DMSPp) = c0 ! trace gases + if (tr_bgc_DMSPd_sk) bgc_tracer_type(nlt_bgc_DMSPd) = c1 ! + if (tr_bgc_DMS_sk) bgc_tracer_type(nlt_bgc_DMS) = c1 ! + + 1000 format (a30,2x,f9.2) ! a30 to align formatted, unformatted statements + 1005 format (a30,2x,f9.6) ! float + 1010 format (a30,2x,l6) ! logical + 1020 format (a30,2x,i6) ! integer + 1030 format (a30, a8) ! character + + end subroutine init_zbgc + +!======================================================================= + +! Initialize vertical profile of biogeochemistry + + subroutine init_bgc + + use ice_algae, only: read_restart_bgc + use ice_blocks, only: nx_block, ny_block + use ice_communicate, only: my_task, master_task + use ice_constants, only: c1, c0, c10, c5, p15, & + field_type_scalar, field_loc_center + use ice_domain, only: nblocks + use ice_domain_size, only: max_blocks + use ice_fileunits, only: nu_diag, nu_forcing + use ice_flux, only: sss + use ice_calendar, only: month + use ice_read_write, only: ice_read, ice_open + use ice_state, only: trcrn, aicen, & + nt_bgc_N_sk, nt_bgc_Nit_sk, nt_bgc_chl_sk, nt_bgc_Am_sk, & + nt_bgc_Sil_sk, nt_bgc_DMSPp_sk, nt_bgc_DMSPd_sk, & + nt_bgc_DMS_sk, nt_bgc_C_sk + + integer (kind=int_kind) :: & + i, j, iblk , & ! horizontal indices + nbits + + logical (kind=log_kind) :: & + dbug ! prints debugging output if true + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + dbug = .true. + + if (.not. skl_bgc) return + + if (restart_bgc) then + + call read_restart_bgc + + else ! not restarting + + !----------------------------------------------------------------- + ! default ocean values + !----------------------------------------------------------------- + + sil (:,:,:) = c10 ! ocean silicate (mmol/m^3) + nit (:,:,:) = c5 ! ocean nitrate (mmol/m^3) + amm (:,:,:) = c1 ! ocean ammonia (mmol/m^3) + dmsp (:,:,:) = R_S2N*p15 ! sulfur cycle product (mmol/m^3) + dms (:,:,:) = c0 ! sulfur cycle product (mmol/m^3) + algalN(:,:,:) = p15 ! algal concentration (mmol/m^3) + + !----------------------------------------------------------------- + ! skeletal layer model + !----------------------------------------------------------------- + + if (tr_bgc_N_sk) trcrn(:,:,nt_bgc_N_sk, :,:) = & + p15/phi_sk*sk_l + if (tr_bgc_C_sk) trcrn(:,:,nt_bgc_C_sk, :,:) = & + R_C2N*p15/phi_sk*sk_l + if (tr_bgc_chl_sk) trcrn(:,:,nt_bgc_chl_sk, :,:) = & + R_chl2N*p15/phi_sk*sk_l + if (tr_bgc_Nit_sk) trcrn(:,:,nt_bgc_Nit_sk, :,:) = & + c5/phi_sk*sk_l + if (tr_bgc_Am_sk) trcrn(:,:,nt_bgc_Am_sk, :,:) = & + c1/phi_sk*sk_l + if (tr_bgc_Sil_sk) trcrn(:,:,nt_bgc_Sil_sk, :,:) = & + c10/phi_sk*sk_l + if (tr_bgc_DMSPp_sk) trcrn(:,:,nt_bgc_DMSPp_sk,:,:) = & + R_S2N*p15/phi_sk*sk_l + if (tr_bgc_DMSPd_sk) trcrn(:,:,nt_bgc_DMSPd_sk,:,:) = c0 + if (tr_bgc_DMS_sk) trcrn(:,:,nt_bgc_DMS_sk, :,:) = c0 + + !----------------------------------------------------------------- + ! silicate + !----------------------------------------------------------------- + + nbits = 64 ! double precision data + + if (tr_bgc_Sil_sk) then + if (trim(sil_data_type) == 'clim') then + ! gx1 only + sil_file = trim(bgc_data_dir)//'silicate_WOA2005_surface_monthly' + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'silicate initialized from:' + write (nu_diag,*) trim(sil_file) + endif + + if (my_task == master_task) & + call ice_open (nu_forcing, sil_file, nbits) + + call ice_read (nu_forcing, month, work1, 'rda8', dbug, & + field_loc_center, field_type_scalar) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + sil(i,j,iblk) = work1(i,j,iblk) + enddo + enddo + enddo + + if (my_task == master_task) close(nu_forcing) + + else ! default + + ! use WOA2005_surface (winter or spring) for a specific location + ! Bering (60, 180), Okhotsk (55, 150E), Chukchi (70, 170W) + ! Labrador Sea (56, 50W), central(0,86) + ! March: (25, 50, 30, 2.5, 20) + ! mmol/m^3 Apr, May, Jun spring range: (20, 40, 10, 2.5, 20) + ! Jan, Feb, Mar winter range: (20, 60, 25, 2.5, 20) + + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + sil(i,j,iblk) = 30.0_dbl_kind !chukchi, march + enddo + enddo + enddo + + endif ! sil_data_type + endif ! tr_bgc_Sil_sk + + !----------------------------------------------------------------- + ! nitrate + !----------------------------------------------------------------- + + if (tr_bgc_Nit_sk) then + if (trim(nit_data_type) == 'clim') then + ! gx1 only + nit_file = trim(bgc_data_dir)//'nitrate_WOA2005_surface_monthly' + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'nitrate initialized from:' + write (nu_diag,*) trim(nit_file) + endif + + if (my_task == master_task) & + call ice_open (nu_forcing, nit_file, nbits) + + call ice_read (nu_forcing, month, work1, 'rda8', dbug, & + field_loc_center, field_type_scalar) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + nit(i,j,iblk) = work1(i,j,iblk) + enddo + enddo + enddo + + if (my_task == master_task) close(nu_forcing) + + elseif (trim(nit_data_type) == 'sss') then + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'nitrate initialized from salinity' + endif + + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + nit(i,j,iblk) = sss(i,j,iblk) + enddo + enddo + enddo + + else ! default + + if (my_task == master_task) then + write (nu_diag,*) ' ' + write (nu_diag,*) 'nitrate initialized from March, Chukchi Sea' + endif + + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + nit(i,j,iblk) = c10 + enddo + enddo + enddo + + endif ! nit_data_type + endif ! tr_bgc_Nit_sk + + endif ! restart_bgc + + end subroutine init_bgc + +!======================================================================= + + subroutine biogeochemistry (dt, iblk) + + use ice_algae, only: skl_biogeochemistry + use ice_blocks, only: nx_block, ny_block, block, get_block + use ice_brine, only: preflushing_changes, compute_microS_mushy, & + update_hbrine + use ice_constants, only: c0, c1, puny + use ice_domain, only: blocks_ice + use ice_domain_size, only: ncat, nblyr + use ice_flux, only: hin_old, meltbn, melttn, congeln, snoicen, & + sss, sst, meltsn, hmix + use ice_shortwave, only: fswthrun + use ice_state, only: aicen_init, vicen_init, aicen, vicen, vsnon, & + trcrn, nt_fbri, tr_brine, ntrcr, nbtrcr, & + nt_bgc_N_sk, nt_bgc_Nit_sk, nt_bgc_chl_sk, nt_bgc_Am_sk, & + nt_bgc_Sil_sk, nt_bgc_DMSPp_sk, nt_bgc_DMSPd_sk, & + nt_bgc_DMS_sk, nt_bgc_C_sk + use ice_timers, only: timer_bgc, ice_timer_start, ice_timer_stop + + real (kind=dbl_kind), intent(in) :: & + dt ! time step + + integer (kind=int_kind), intent(in) :: & + iblk ! block index + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij , & ! horizontal indices + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + n ! thickness category index + + integer (kind=int_kind) :: & + icells ! number of cells with aicen > puny + + integer (kind=int_kind), dimension(nx_block*ny_block) :: & + indxi, indxj ! indirect indices for cells with aicen > puny + + real (kind=dbl_kind), dimension (nx_block*ny_block) :: & + hin , & ! new ice thickness + hsn , & ! snow thickness (m) + hbr_old , & ! old brine thickness before growh/melt + kavg , & ! average ice permeability (m^2) + zphi_o , & ! surface ice porosity + hbrin ! brine height + + real (kind=dbl_kind), dimension (nx_block*ny_block,nblyr+2) :: & + ! Defined on Bio Grid points + bSin , & ! salinity on the bio grid (ppt) + brine_sal , & ! brine salinity (ppt) + brine_rho , & ! brine_density (kg/m^3) + ! Defined on Bio Grid interfaces + iphin , & ! porosity + ibrine_sal , & ! brine salinity (ppt) + ibrine_rho ! brine_density (kg/m^3) + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + grow_Cn ! C growth + + real (kind=dbl_kind), dimension (nx_block,ny_block,nbtrcr) :: & + flux_bion ! tracer flux to ocean + + type (block) :: & + this_block ! block information for current block + + if (tr_brine .or. skl_bgc) then + + call ice_timer_start(timer_bgc) ! biogeochemistry + + !----------------------------------------------------------------- + ! initialize + !----------------------------------------------------------------- + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + ! Define ocean tracer concentration + do j = 1, ny_block + do i = 1, nx_block + if (tr_bgc_Nit_sk) ocean_bio(i,j,nlt_bgc_NO ,iblk) = nit (i,j,iblk) + if (tr_bgc_chl_sk) ocean_bio(i,j,nlt_bgc_chl ,iblk) = algalN(i,j,iblk)*R_chl2N + if (tr_bgc_Am_sk) ocean_bio(i,j,nlt_bgc_NH ,iblk) = amm (i,j,iblk) + if (tr_bgc_C_sk) ocean_bio(i,j,nlt_bgc_C ,iblk) = algalN(i,j,iblk)*R_C2N + if (tr_bgc_Sil_sk) ocean_bio(i,j,nlt_bgc_Sil ,iblk) = sil (i,j,iblk) + if (tr_bgc_DMSPp_sk) ocean_bio(i,j,nlt_bgc_DMSPp,iblk) = dmsp (i,j,iblk) + if (tr_bgc_DMSPd_sk) ocean_bio(i,j,nlt_bgc_DMSPd,iblk) = dmsp (i,j,iblk) + if (tr_bgc_DMS_sk) ocean_bio(i,j,nlt_bgc_DMS ,iblk) = dms (i,j,iblk) + if (tr_bgc_N_sk) ocean_bio(i,j,nlt_bgc_N ,iblk) = algalN(i,j,iblk) + enddo + enddo + + do n = 1, ncat + + hin_old(:,:,n,iblk) = c0 + flux_bion(:,:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aicen_init(i,j,n,iblk) > puny) then + hin_old(i,j,n,iblk) = vicen_init(i,j,n,iblk) & + / aicen_init(i,j,n,iblk) + else + first_ice(i,j,n,iblk) = .true. + if (tr_brine) trcrn(i,j,nt_fbri,n,iblk) = c1 + endif + enddo + enddo + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (aicen(i,j,n,iblk) > puny) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo ! i + enddo ! j + + if (icells > 0) then + + !----------------------------------------------------------------- + ! brine dynamics + !----------------------------------------------------------------- + + if (tr_brine) then + + call preflushing_changes (nx_block, ny_block, & + icells, n, & + indxi, indxj, & + aicen (:,:,n,iblk), & + vicen (:,:,n,iblk), vsnon (:,:,n,iblk), & + meltbn (:,:,n,iblk), melttn (:,:,n,iblk), & + congeln(:,:,n,iblk), snoicen(:,:,n,iblk), & + hin_old(:,:,n,iblk), & + trcrn (:,:,nt_fbri,n,iblk), & + dhbr_top(:,:,n,iblk),dhbr_bot(:,:,n,iblk),& + hbr_old, hin, & + hsn, first_ice(:,:,n,iblk)) + + ! Requires the average ice permeability = kavg(:) + ! and the surface ice porosity = zphi_o(:) + ! computed in "compute_microS" or from "thermosaline_vertical" + + call compute_microS_mushy (nx_block, ny_block, & + icells, n, & + indxi, indxj, & + trcrn(:,:,:,n,iblk), hin_old(:,:,n,iblk), & + hbr_old, & + sss (:,:,iblk), sst(:,:,iblk), & + bTiz (:,:,:,n,iblk), bphi(:,:,:,n,iblk), & + kavg, zphi_o, & + bSin, brine_sal, & + brine_rho, iphin, & + ibrine_rho, ibrine_sal) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + call update_hbrine (meltbn (i,j,n,iblk), melttn(i,j,n,iblk), & + meltsn (i,j,n,iblk), dt, & + hin (ij), hsn (ij), & + hin_old (i,j,n,iblk), hbrin (ij), & + hbr_old (ij), & + trcrn (i,j,nt_fbri,n,iblk), & + dhbr_top(i,j,n,iblk), dhbr_bot(i,j,n,iblk),& + kavg (ij), zphi_o(ij), & + darcy_V (i,j,n,iblk)) + + hbri(i,j,iblk) = hbri(i,j,iblk) + hbrin(ij)*aicen_init(i,j,n,iblk) + enddo ! ij + + endif ! tr_brine + + !----------------------------------------------------------------- + ! biogeochemistry + !----------------------------------------------------------------- + + if (skl_bgc) then + call skl_biogeochemistry (nx_block, ny_block, & + icells, dt, & + indxi, indxj, & + nbtrcr, & + flux_bion(:,:,1:nbtrcr), & + ocean_bio(:,:,1:nbtrcr, iblk), & + hmix (:,:, iblk), & + aicen (:,:, n,iblk), & + meltbn (:,:, n,iblk), & + congeln (:,:, n,iblk), & + fswthrun (:,:, n,iblk), & + first_ice(:,:, n,iblk), & + trcrn (:,:,1:ntrcr,n,iblk), & + grow_Cn) + + call merge_bgc_fluxes_skl(nx_block, ny_block, & + icells, & + indxi, indxj, & + nbtrcr, & + aicen_init(:,:, n,iblk), & + trcrn (:,:,nt_bgc_N_sk,n,iblk), & + flux_bion (:,:,1:nbtrcr), & + flux_bio (:,:,1:nbtrcr, iblk), & + PP_net (:,:, iblk), & + grow_net (:,:, iblk), & + grow_Cn) + endif + + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + first_ice(i,j,n,iblk) = .false. + enddo + + endif ! icells + enddo ! ncat + + call ice_timer_stop(timer_bgc) ! biogeochemistry + + endif ! tr_brine .or. skl_bgc + + end subroutine biogeochemistry + +!======================================================================= + +! Aggregate flux information from all ice thickness categories +! for skeletal layer biogeochemistry +! +! author: Elizabeth C. Hunke and William H. Lipscomb, LANL + + subroutine merge_bgc_fluxes_skl (nx_block, ny_block, & + icells, & + indxi, indxj, & + nbtrcr, & + aicen, algal_N, & + flux_bion, flux_bio, & + PP_net, grow_net, & + grow_Cn) + + use ice_constants, only: c1 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icells , & ! number of cells with aicen > puny + nbtrcr ! number of bgc tracers + + integer (kind=int_kind), dimension(nx_block*ny_block), & + intent(in) :: & + indxi, indxj ! compressed indices for cells with aicen > puny + + ! single category fluxes + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in):: & + aicen ! category ice area fraction + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(in) :: & + algal_N ! (mmol N/m^2) + + real (kind=dbl_kind), dimension(nx_block,ny_block,nbtrcr), & + intent(in):: & + flux_bion ! all bio fluxes to ocean, on categories + + real (kind=dbl_kind), dimension(nx_block,ny_block,nbtrcr), & + intent(inout):: & + flux_bio ! all bio fluxes to ocean, aggregated + + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(in):: & + grow_Cn ! specific growth (/s) + + ! history output + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout):: & + PP_net , & ! Bulk net PP (mg C/m^2/s) + grow_net ! net specific growth (/s) + + ! local variables + + integer (kind=int_kind) :: & + ij, i, j, & ! horizontal indices + k ! tracer indice + + !----------------------------------------------------------------- + ! Merge fluxes + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + do k = 1,nbtrcr + flux_bio (i,j,k) = flux_bio(i,j,k) + flux_bion(i,j,k)*aicen(i,j) + enddo + + PP_net (i,j) = PP_net (i,j) & + + algal_N(i,j)*phi_sk*grow_Cn(i,j)*(c1-fr_resp) & + * R_C2N*R_gC2molC * aicen(i,j) + grow_net (i,j) = grow_net(i,j) + grow_Cn(i,j) * phi_sk*aicen(i,j) + enddo ! ij + + end subroutine merge_bgc_fluxes_skl + +!======================================================================= + +! Initialize bgc fields written to history files +! +! authors: Nicole Jeffery, LANL +! Elizabeth C. Hunke, LANL + + subroutine init_history_bgc + + use ice_constants, only: c0 + + PP_net (:,:,:) = c0 + grow_net (:,:,:) = c0 + hbri (:,:,:) = c0 + flux_bio (:,:,:,:) = c0 + flux_bio_ai(:,:,:,:) = c0 + + end subroutine init_history_bgc + +!======================================================================= + +! Adjust biogeochemical tracers when new frazil ice forms + + subroutine add_new_ice_bgc (nx_block, ny_block, dt, & + icells, jcells, kcells, & + indxi, indxj, & + indxi2, indxj2, indxij2, & + indxi3, indxj3, indxij3, & + aicen_init, vicen_init, vi0_init, & + aicen, vicen, vi0new, & + ntrcr, trcrn, nbtrcr, & + sss, ocean_bio, flux_bio, & + hsurp, & + l_stop, istop, jstop) + + use ice_constants, only: c0, c1, puny + use ice_domain_size, only: ncat + use ice_itd, only: column_sum, & + column_conservation_check + use ice_state, only: tr_brine, nt_fbri + use ice_timers, only: timer_bgc, ice_timer_start, ice_timer_stop + + integer (kind=int_kind), intent(in) :: & + nx_block, & ! block dimensions + ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + icells , & ! number of ice/ocean grid cells + jcells , & ! grid cell counter + kcells ! grid cell counter + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxi, indxj, & ! compressed i/j indices + indxi2, indxj2, indxij2, & ! compressed i/j indices + indxi3, indxj3, indxij3 ! compressed i/j indices + + real (kind=dbl_kind), intent(in) :: & + dt ! time step (s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & + intent(in) :: & + aicen_init , & ! initial concentration of ice + vicen_init , & ! intiial volume per unit area of ice (m) + aicen , & ! concentration of ice + vicen ! volume per unit area of ice (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), & + intent(inout) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + sss ! sea surface salinity (ppt) + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + vi0_init , & ! volume of new ice added to cat 1 (initial) + vi0new ! volume of new ice added to cat 1 + + real (kind=dbl_kind), dimension (icells), intent(in) :: & + hsurp ! thickness of new ice added to each cat + + integer (kind=int_kind), intent(in) :: & + nbtrcr ! number of biology tracers + + real (kind=dbl_kind), dimension (nx_block,ny_block,nbtrcr), & + intent(inout) :: & + flux_bio ! tracer flux to ocean from biology (mmol/m^2/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,nbtrcr), & + intent(in) :: & + ocean_bio ! ocean concentration of biological tracer + + logical (kind=log_kind), intent(out) :: & + l_stop ! if true, abort on return + + integer (kind=int_kind), intent(out) :: & + istop, jstop ! indices of grid cell where model aborts + + ! local variables + + integer (kind=int_kind) :: & + i, j , & ! horizontal indices + n , & ! ice category index + k , & ! ice layer index + ij, m ! combined i/j horizontal indices + + real (kind=dbl_kind), dimension (icells) :: & + vbri1 , & ! starting volume of existing brine + vbri_init , & ! brine volume summed over categories + vbri_final ! brine volume summed over categories + + real (kind=dbl_kind), dimension(icells) :: & + vsurp , & ! volume of new ice added to each cat + vtmp ! total volume of new and old ice + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat) :: & + vbrin ! trcrn(i,j,nt_fbri,n)*vicen(i,j,n) + + character (len=char_len) :: & + fieldid ! field identifier + + call ice_timer_start(timer_bgc) ! biogeochemistry + + !----------------------------------------------------------------- + ! brine + !----------------------------------------------------------------- + do n = 1, ncat + do ij = 1,icells + i = indxi(ij) + j = indxj(ij) + vbrin(i,j,n) = vicen_init(i,j,n) + if (tr_brine) vbrin(i,j,n) = trcrn(i,j,nt_fbri,n)*vicen_init(i,j,n) + enddo + enddo + + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vbrin, vbri_init) + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + vbri_init(ij) = vbri_init(ij) + vi0_init(ij) + + !----------------------------------------------------------------- + ! ocean flux + !----------------------------------------------------------------- + do k = 1, nbtrcr ! only correct for dissolved tracers + flux_bio(i,j,k) = flux_bio(i,j,k) & + - vi0_init(ij)/dt*ocean_bio(i,j,k) & + * (bgc_tracer_type(k)*initbio_frac & + + (c1-bgc_tracer_type(k))) + enddo + enddo + + !----------------------------------------------------------------- + ! kcells: + ! Distribute bgc in new ice volume among all ice categories by + ! increasing ice thickness, leaving ice area unchanged. + !----------------------------------------------------------------- + + do n = 1,ncat + + ! Diffuse_bio handles concentration changes from ice growth/melt + ! ice area does not change for kcells + ! add salt to the bottom + + do ij = 1, kcells + i = indxi3(ij) + j = indxj3(ij) + m = indxij3(ij) + + vtmp(m) = vbrin(i,j,n) + vsurp(m) = hsurp(m) * aicen_init(i,j,n) + vbrin(i,j,n) = vbrin(i,j,n) + vsurp(m) + if (tr_brine) then + trcrn(i,j,nt_fbri,n) = c1 + if (vicen(i,j,n) > c0) trcrn(i,j,nt_fbri,n) = vbrin(i,j,n)/vicen(i,j,n) + endif + enddo + + enddo ! n + + !----------------------------------------------------------------- + ! jcells: + ! Combine bgc in new ice grown in open water with category 1 ice. + !----------------------------------------------------------------- + +!DIR$ CONCURRENT !Cray +!cdir nodep !NEC +!ocl novrec !Fujitsu + do ij = 1, jcells + i = indxi2(ij) + j = indxj2(ij) + m = indxij2(ij) + + vbri1(m) = vbrin(i,j,1) + vbrin(i,j,1) = vbrin(i,j,1) + vi0new(m) + if (tr_brine) then + trcrn(i,j,nt_fbri,1) = c1 + if (vicen(i,j,1) > c0) trcrn(i,j,nt_fbri,1) = vbrin(i,j,1)/vicen(i,j,1) + endif + enddo + + ! Diffuse_bio handles concentration changes from ice growth/melt + ! ice area changes for jcells + ! add salt throughout + + if (tr_brine) then + call column_sum (nx_block, ny_block, & + icells, indxi, indxj, & + ncat, & + vbrin, vbri_final) + + fieldid = 'vbrin, add_new_ice' + call column_conservation_check (nx_block, ny_block, & + icells, indxi, indxj, & + fieldid, & + vbri_init, vbri_final, & + puny, l_stop, & + istop, jstop) + if (l_stop) return + endif + + call ice_timer_stop(timer_bgc) ! biogeochemistry + + end subroutine add_new_ice_bgc + +!======================================================================= + + end module ice_zbgc + +!======================================================================= diff --git a/source/ice_zbgc_shared.F90 b/source/ice_zbgc_shared.F90 new file mode 100755 index 00000000..b3c94bd5 --- /dev/null +++ b/source/ice_zbgc_shared.F90 @@ -0,0 +1,335 @@ +! SVN:$Id: ice_zbgc_shared.F90 745 2013-09-28 18:22:36Z eclare $ +!======================================================================= +! +! Biogeochemistry variables +! +! authors: Nicole Jeffery, LANL +! Scott Elliot, LANL +! Elizabeth C. Hunke, LANL +! + module ice_zbgc_shared + + use ice_kinds_mod + use ice_constants, only: p01, p1, p5, c0, c1 + use ice_domain_size, only: ncat, max_blocks, max_nbtrcr, & + nblyr, nilyr + use ice_blocks, only: nx_block, ny_block + + implicit none + + private + public :: remap_layers_bgc + + logical (kind=log_kind), public :: & + restart_hbrine ! if true, read hbrine from restart file + + character(char_len_long), public :: & + bgc_data_dir ! directory for biogeochemistry data + + logical (kind=log_kind), & + dimension (nx_block,ny_block,ncat,max_blocks), public :: & + first_ice ! distinguishes ice that disappears (e.g. melts) + ! and reappears (e.g. transport) in a grid cell + ! during a single time step from ice that was + ! there the entire time step (true until ice forms) + + ! coupling fluxes + real (kind=dbl_kind), & + dimension (nx_block,ny_block,max_nbtrcr,max_blocks), public :: & + flux_bio , & ! all bio fluxes to ocean + ocean_bio , & ! contains all the ocean bgc tracer concentrations + flux_bio_ai ! all bio fluxes to ocean, averaged over grid cell + + !----------------------------------------------------------------- + ! general biogeochemistry + !----------------------------------------------------------------- + + real (kind=int_kind), dimension(max_nbtrcr), public :: & + bgc_tracer_type ! 1 dissolved tracers: mix like salinity + ! 0 tracers that cling: resist brine motion (algae) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & + nit , & ! ocean nitrate (mmol/m^3) + amm , & ! ammonia/um (mmol/m^3) + sil , & ! silicate (mmol/m^3) + dmsp , & ! dmsp (mmol/m^3) + dms , & ! dms (mmol/m^3) + algalN ! ocean algal nitrogen (mmol/m^3) + + character (char_len_long), public :: & ! input data file names + nit_file , & ! nitrate input file + sil_file ! silicate input file + + character(char_len), public :: & + sil_data_type , & ! 'default', 'clim' + nit_data_type , & ! 'default', 'clim' + bgc_flux_type ! type of ocean-ice piston velocity + ! 'constant', 'Jin2006' + + ! ocean sources/sinks + integer (kind=int_kind), public :: & + nlt_bgc_N , & ! algae + nlt_bgc_C , & ! + nlt_bgc_chl , & ! + nlt_bgc_NO , & ! nutrients + nlt_bgc_NH , & ! + nlt_bgc_Sil , & ! + nlt_bgc_DMSPp , & ! trace gases + nlt_bgc_DMSPd , & ! + nlt_bgc_DMS + + ! bio parameters for algal_dyn + real (kind=dbl_kind), parameter, public :: & + initbio_frac = c1 , & ! fraction of ocean tracer used for initialization + R_C2N = 7.0_dbl_kind , & ! algal C to N (mole/mole) + ! Kristiansen 1991 (Barents) 9.0 + R_gC2molC = 12.01_dbl_kind, & ! mg/mmol C + R_chl2N = 3.0_dbl_kind , & ! algal chlorophyll to N (mg/mmol) + R_S2N = 0.03_dbl_kind , & ! algal S to N (mole/mole) + fr_resp = 0.05_dbl_kind ! respiration fraction + + !----------------------------------------------------------------- + ! skeletal layer biogeochemistry + !----------------------------------------------------------------- + + logical (kind=log_kind), public :: & + tr_bgc_N_sk, & ! if true, nitrogen as algal tracer on ice + tr_bgc_C_sk, & ! if true, carbon as algal tracer on ice + tr_bgc_chl_sk, & ! if true, chlorophyll as algal tracer on ice + tr_bgc_Nit_sk, & ! if true, nitrate as nutrient tracer on ice + tr_bgc_Am_sk, & ! if true, ammonia/um as nutrient tracer on ice + tr_bgc_Sil_sk, & ! if true, silicon as nutrient tracer on ice + tr_bgc_DMSPp_sk, & ! if true, DMSPp as algal content tracer on ice + tr_bgc_DMSPd_sk, & ! if true, DMSPd as precursor tracer on ice + tr_bgc_DMS_sk, & ! if true, DMS as product tracer on ice + restart_bgc, & ! if true, read bgc restart file + restore_bgc, & ! if true, restore nitrate + skl_bgc ! if true, solve skeletal biochemistry + + real (kind=dbl_kind), parameter, public :: & + sk_l = 0.03_dbl_kind, & ! skeletal layer thickness (m) + phi_sk = 0.30_dbl_kind ! skeletal layer porosity + + !----------------------------------------------------------------- + ! brine + !----------------------------------------------------------------- + + integer (kind=int_kind), parameter, public :: & + exp_h = 3 ! power law for hierarchical model + + real (kind=dbl_kind), parameter, public :: & + k_o = 3.e-8_dbl_kind, & ! permeability scaling factor (m^2) + rhosi = 940.0_dbl_kind, & ! average sea ice density + ! Cox and Weeks, 1982: 919-974 kg/m^2 + min_salin = p1 , & ! threshold for brine pocket treatment + hbr_min = p01 , & ! minimum hbrine thickness + thinS = 0.05_dbl_kind ! minimum ice thickness for brine + + real (kind=dbl_kind), public :: & + phi_snow , & ! porosity of snow + flood_frac ! fraction of ocean/meltwater that floods + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ncat,max_blocks), public :: & + dhbr_top , & ! brine top change + dhbr_bot ! brine bottom change + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,max_blocks), public :: & + grow_net , & ! Specific growth rate (/s) per grid cell + PP_net , & ! Total production (mg C/m^2/s) per grid cell + hbri ! brine height, area-averaged for comparison with hi (m) + + real (kind=dbl_kind), dimension (nblyr+2), public :: & + bgrid ! biology nondimensional vertical grid points + + real (kind=dbl_kind), dimension (nblyr+1), public :: & + igrid ! biology vertical interface points + + real (kind=dbl_kind), dimension (nilyr+1), public :: & + cgrid ! CICE vertical coordinate + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,nblyr+2,ncat,max_blocks), public :: & + bphi , & ! porosity of layers + bTiz ! layer temperatures interpolated on bio grid (C) + + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ncat,max_blocks), public :: & + darcy_V ! darcy velocity positive up (m/s) + +!======================================================================= + + contains + +!======================================================================= +! +! Remaps tracer fields in a given category from one set of layers to another. +! Grids can be very different and so can vertical spaces. + + subroutine remap_layers_bgc (ntrcr, nlyrn, & + it, & + trcrn, trtmp, & + nr0, nblyr, & + hice, hinS, & + ice_grid, bio_grid, & + S_min) + + use ice_fileunits, only: nu_diag + use ice_exit, only: abort_ice + + integer (kind=int_kind), intent(in) :: & + ntrcr , & ! number of tracers in use + it , & ! tracer index in top layer + nr0 , & ! receiver category + nlyrn , & ! number of ice layers + nblyr ! number of biology layers + + real (kind=dbl_kind), dimension (ntrcr), & + intent(in) :: & + trcrn ! ice tracers + + real (kind=dbl_kind), dimension (nblyr+2), & + intent(inout) :: & + trtmp ! temporary, remapped ice tracers + + real (kind=dbl_kind), dimension (nlyrn), intent(in) :: & + ice_grid ! CICE grid cgrid(2:nilyr+1) + + real (kind=dbl_kind), dimension (nblyr), intent(in) :: & + bio_grid ! CICE grid grid(2:nblyr+1) + + real(kind=dbl_kind), intent(in) :: & + hice , & ! CICE ice thickness + hinS , & ! brine height + S_min ! for salinity on CICE grid + + ! local variables + + integer (kind=int_kind) :: & + kd, kr, kdr , & ! more indices + kdi , & ! more indices + n_nd , & ! number of layers in donor + n_nr, n_plus ! number of layers in receiver + + real (kind=dbl_kind), dimension (nblyr+3+nlyrn) :: & + trdr , & ! combined tracer + trgrid ! combined grid + + real (kind=dbl_kind), dimension (nblyr+nilyr+3) :: & + tracer , & ! temporary, ice tracers values + dgrid , & ! temporary, donor grid dimensional + rgrid ! temporary, receiver grid dimensional + + if ((hinS < c0) .OR. (hice < c0)) then + write(nu_diag, *)'Problem in remap_layers_bgc' + write(nu_diag, *) '(hinS < c0) .OR. (hice < c0)' + write(nu_diag, *) 'hinS,hice',hinS,hice + call abort_ice ('ice: remap_layers_bgc error') + endif + + if (nr0 == 0) then ! cice to bio + + n_nd = nlyrn + n_nr = nblyr + n_plus = 2 + dgrid (1) = min(-hice+hinS, -hinS+hice, c0) + dgrid (nlyrn+2) = min(hinS, hice) + tracer(1) = trcrn(it) + tracer(nlyrn+2) = trcrn(it+nlyrn-1) + rgrid (nblyr+2) = min(hinS, hice) + if (hice > hinS) then + rgrid(1) = c0 + do kr = 1,n_nr + rgrid(kr+1) = bio_grid(kr)*hinS + enddo + do kd = 1,n_nd + dgrid(kd+1) = (ice_grid(kd)-c1)*hice+hinS + tracer(kd+1) = trcrn(it+kd-1) + enddo + else + rgrid(1) = -hinS + hice + do kr = 1,n_nr + rgrid(kr+1) = (bio_grid(kr)-c1)*hinS + hice + enddo + do kd = 1,n_nd + dgrid(kd+1) = ice_grid(kd)*hice + tracer(kd+1) = trcrn(it+kd-1) + enddo + endif + + else ! bio to cice + + n_nd = nblyr + n_nr = nlyrn + if (hice > hinS) then + n_plus = 3 + tracer(1) = S_min + tracer(2) = S_min + dgrid (1) = -hice+hinS + dgrid (2) = p5*(hinS-hice) + dgrid (nblyr+3) = hinS + tracer(nblyr+3) = trcrn(it+nblyr-1) + rgrid (1) = -hice + hinS + rgrid (nlyrn+2) = hinS + do kd = 1,n_nd + dgrid(kd+2) = bio_grid(kd)*hinS + tracer(kd+2) = trcrn(it+kd-1) + enddo + do kr = 1,n_nr + rgrid(kr+1) = (ice_grid(kr)-c1)*hice+ hinS + enddo + else + n_plus = 2 + tracer(1) = trcrn(it) + tracer(nblyr+2) = trcrn(it+nblyr-1) + dgrid (1) = hice-hinS + dgrid (nblyr+2) = hice + rgrid (nlyrn+2) = hice + rgrid (1) = c0 + do kd = 1,n_nd + dgrid(kd+1) = (bio_grid(kd)-c1)*hinS + hice + tracer(kd+1) = trcrn(it+kd-1) + enddo + do kr = 1,n_nr + rgrid(kr+1) = ice_grid(kr)*hice + enddo + endif + + endif + + kdr = 0 !combined indices + kdi = 1 + + do kr = 1, n_nr + do kd = kdi, n_nd+n_plus + if (dgrid(kd) < rgrid(kr+1)) then + kdr = kdr+1 + trgrid(kdr) = dgrid(kd) + trdr (kdr) = tracer(kd) + elseif (dgrid(kd) > rgrid(kr+1)) then + kdr = kdr + 1 + kdi = kd + trgrid(kdr) = rgrid(kr+1) + trtmp (kr) = trdr(kdr-1) & + + (rgrid(kr+1) - trgrid(kdr-1)) & + * (tracer(kd) - trdr(kdr-1)) & + / (dgrid(kd) - trgrid(kdr-1)) + trdr(kdr) = trtmp(kr) + else + kdr = kdr+1 + kdi = kd+1 + trgrid(kdr) = rgrid(kr+1) + trtmp (kr) = tracer(kd) + trdr (kdr) = tracer(kd) + endif + enddo + enddo + + end subroutine remap_layers_bgc + +!======================================================================= + + end module ice_zbgc_shared + +!======================================================================= From 51af00a7aa9eeae11047a9e3ac0ae3c420578e9e Mon Sep 17 00:00:00 2001 From: pbd562 Date: Wed, 31 Aug 2016 04:18:53 +0000 Subject: [PATCH 02/52] Updated cpl_forcing_handler.F90 from /short/p66/dhb599/ACCESS-CM2/submodels/cice_GC3_GA7_hxyo/ git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_GC3_GA7@358 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- drivers/access/cpl_forcing_handler.F90 | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/access/cpl_forcing_handler.F90 b/drivers/access/cpl_forcing_handler.F90 index 64b1fd27..1f11bb92 100644 --- a/drivers/access/cpl_forcing_handler.F90 +++ b/drivers/access/cpl_forcing_handler.F90 @@ -686,20 +686,23 @@ subroutine get_sbc_ice do j = 1, ny_block do i = 1, nx_block do k = 1, nblocks - if (aice(i,j,k)==0.0) then - do cat = 1, ncat - flatn_f(i,j,cat,k) = 0.0 - enddo - ! This will then be conserved in CICE (done in sfcflux_to_ocn) - flatn_f(i,j,1,k) = um_lhflx(i,j,k) - else + !BX 20160826: as in NEMO sbccpl.F90, there is no "open water field" um_lhflx involved: + ! qla_ice(:,:,1:jpl) = - frcv(jpr_ievp)%z3(:,:,1:jpl) * lsub + !------------------------------------------------------------------------------------- + !if (aice(i,j,k)==0.0) then + ! do cat = 1, ncat + ! flatn_f(i,j,cat,k) = 0.0 + ! enddo + ! ! This will then be conserved in CICE (done in sfcflux_to_ocn) + ! flatn_f(i,j,1,k) = um_lhflx(i,j,k) + !else do cat = 1, ncat !!!BX: flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * aicen(i,j,cat,k)/aice(i,j,k) !!! Double check "Lsub" used here !!! !?! flatn_f(i,j,cat,k) = um_iceevp(i,j,cat,k) * Lsub flatn_f(i,j,cat,k) = - um_iceevp(i,j,cat,k) * Lsub enddo - endif + !endif enddo enddo enddo From 6f30219cbab48b18107f6b97064dd695bf1b2c9e Mon Sep 17 00:00:00 2001 From: dhb599 Date: Thu, 10 Nov 2016 00:23:37 +0000 Subject: [PATCH 03/52] I2A coupling bug fix git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_GC3_GA7@359 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- drivers/access/cpl_forcing_handler.F90 | 37 +++++++++++++++++++++++--- drivers/access/ice_coupling.F90 | 6 +++-- source/ice_flux.F90 | 1 + 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/drivers/access/cpl_forcing_handler.F90 b/drivers/access/cpl_forcing_handler.F90 index 1f11bb92..51a9559c 100644 --- a/drivers/access/cpl_forcing_handler.F90 +++ b/drivers/access/cpl_forcing_handler.F90 @@ -14,7 +14,8 @@ MODULE cpl_forcing_handler use ice_domain, only : distrb_info, nblocks use ice_flux !forcing data definition (Tair, Qa, uocn, etc.) !Tn_top, keffn_top ...(for multilayer configuration) -use ice_state, only : aice, aicen, trcr, trcrn, nt_hpnd !ice concentration and tracers +!use ice_state, only : aice, aicen, trcr, trcrn, nt_hpnd !ice concentration and tracers +use ice_state, only : aice, aicen, trcr, trcrn, nt_hpnd, nt_Tsfc !ice concentration and tracers use ice_state, only: uvel, vvel, vsnon, vicen use ice_gather_scatter !ars599: 11042014: use all ice_constants @@ -726,6 +727,27 @@ subroutine get_sbc_ice !!! the original weighting is CORRECT! so back to *aice: fsnow = max(aice * um_snow,0.0) frain = max(aice * um_rain,0.0) +! +!!! XXXXXX: ice surface skin temperature (from UM)-------------------------- +!see: tsfc_ice definition in sbccpl.F90 at +!/short/p66/hxy599/fcm_make_ocean_GC3/extract/nemo/NEMOGCM/NEMO/OPA_SRC/SBC +!--------------------------------------------------------------------------- +do cat = 1, ncat + !!! trcrn(:,:,nt_Tsfc,cat,:) = um_tsfice(:,:,cat,:) + do k = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (um_tsfice(i,j,cat,k) > 0.0) then + trcrn(i,j,nt_Tsfc,cat,k) = 0.0 + else if (um_tsfice(i,j,cat,k) < -60.0) then + trcrn(i,j,nt_Tsfc,cat,k) = -60.0 + else + trcrn(i,j,nt_Tsfc,cat,k) = um_tsfice(i,j,cat,k) + endif + enddo + enddo + enddo +enddo !!!------------------------------------------------------------------------------------------ ! Fields from MOM4 (SSU/V and sslx/y are on U points): @@ -1013,11 +1035,16 @@ subroutine get_i2a_fields !BX: save it for use in atm_icefluxes_back2GBM --- maicen_saved = maicen +!XXX -- As per Alex West, only two of the ice vaiables below need to be scaled down +! by "* aice": ice top layer "temperature" and "effective conductivity"! + !(9-13) ice thickness ia_thikn(:,:,:,:) = mthikn(:,:,:,:) +!ia_thikn(:,:,:,:) = mthikn(:,:,:,:) * mfoifr(:,:,:,:) !X !(14-18) snow thickness ia_snown(:,:,:,:) = msnown(:,:,:,:) +!ia_snown(:,:,:,:) = msnown(:,:,:,:) * mfoifr(:,:,:,:) !X !(19-20) co2 flux stuff ia_co2 = mco2 @@ -1030,16 +1057,20 @@ subroutine get_i2a_fields ia_foifr(:,:,:,:) = mfoifr(:,:,:,:) !(27-31) ice top layer temperature -ia_itopt(:,:,:,:) = mitopt(:,:,:,:) + 273.15 +!XXX ia_itopt(:,:,:,:) = mitopt(:,:,:,:) + 273.15 +ia_itopt(:,:,:,:) = (mitopt(:,:,:,:) + 273.15) * mfoifr(:,:,:,:) !Y !(32-36) ice top layer effective conductivity -ia_itopk(:,:,:,:) = mitopk(:,:,:,:) +!XXX ia_itopk(:,:,:,:) = mitopk(:,:,:,:) +ia_itopk(:,:,:,:) = mitopk(:,:,:,:) * mfoifr(:,:,:,:) !Y !(37-41) ice melt pond concentration ia_pndfn(:,:,:,:) = mpndfn(:,:,:,:) +!ia_pndfn(:,:,:,:) = mpndfn(:,:,:,:) * mfoifr(:,:,:,:) !X !(42-46) ice melt pond thickness ia_pndtn(:,:,:,:) = mpndtn(:,:,:,:) +!ia_pndtn(:,:,:,:) = mpndtn(:,:,:,:) * mfoifr(:,:,:,:) !X return end subroutine get_i2a_fields diff --git a/drivers/access/ice_coupling.F90 b/drivers/access/ice_coupling.F90 index e50bec57..732e8e78 100644 --- a/drivers/access/ice_coupling.F90 +++ b/drivers/access/ice_coupling.F90 @@ -91,7 +91,8 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! !EOP ! -#ifdef CICE_IN_NEMO +!#ifdef CICE_IN_NEMO +#ifdef ACCESS integer (kind=int_kind) :: & i, j, n ! horizontal indices @@ -182,7 +183,8 @@ subroutine set_sfcflux (nx_block, ny_block, & i = indxi(ij) j = indxj(ij) -#ifdef CICE_IN_NEMO +!#ifdef CICE_IN_NEMO +#ifdef ACCESS !---------------------------------------------------------------------- ! Convert fluxes from GBM values to per ice area values when ! running in NEMO environment. (When in standalone mode, fluxes diff --git a/source/ice_flux.F90 b/source/ice_flux.F90 index d4dd93f7..a4adbe54 100755 --- a/source/ice_flux.F90 +++ b/source/ice_flux.F90 @@ -430,6 +430,7 @@ subroutine init_coupler_flux sss (:,:,:) = 34.0_dbl_kind ! sea surface salinity (ppt) !ars599: 04042016: should we change to AusCOM or ACCESS? #ifndef CICE_IN_NEMO +!#ifndef ACCESS !XXX -- won't run! if (ktherm == 2) then ! freezing temp (C) ! liquidus_temperature_mush(sss) Tf (:,:,:) = sss(:,:,:) / (-18.48_dbl_kind & From 8d76453e1e88c98608d874468f827d230306c3c4 Mon Sep 17 00:00:00 2001 From: dhb599 Date: Mon, 28 Nov 2016 03:54:35 +0000 Subject: [PATCH 04/52] ice errmax tolorance x 10 git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_GC3_GA7@360 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- source/ice_step_mod.F90 | 23 +++++++++++++++++++++++ source/ice_therm_vertical.F90 | 21 ++++++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/source/ice_step_mod.F90 b/source/ice_step_mod.F90 index de5e6471..65d78e7a 100755 --- a/source/ice_step_mod.F90 +++ b/source/ice_step_mod.F90 @@ -43,6 +43,10 @@ subroutine prep_radiation (dt, iblk) Sswabsn, Iswabsn use ice_state, only: aice, aicen use ice_timers, only: ice_timer_start, ice_timer_stop, timer_sw + !BBB: + use ice_grid, only: tmask + use ice_calendar, only: istep1 + !b. real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -106,9 +110,16 @@ subroutine prep_radiation (dt, iblk) do j = jlo, jhi do i = ilo, ihi if (aicen(i,j,n,iblk) > puny) then + if (.not. tmask(i,j,iblk)) then !BBB + write(nu_diag,*) 'BBB1 ??? land point found! at istep1, my_task: ',& + istep1, my_task + write(nu_diag,'(a,4i5,e12.6)') 'BBB1 i,j, n,iblk, aicen = ', & + i,j, n,iblk, aicen(i,j,n,iblk) + else !B icells = icells + 1 indxi(icells) = i indxj(icells) = j + endif !B endif enddo ! i enddo ! j @@ -194,6 +205,11 @@ subroutine step_therm1 (dt, iblk) use ice_therm_shared, only: calc_Tsfc use ice_therm_vertical, only: frzmlt_bottom_lateral, thermo_vertical use ice_timers, only: ice_timer_start, ice_timer_stop, timer_ponds + !BBB: + use ice_grid, only: tmask + use ice_calendar, only: istep1 + !b. + !#ifdef ACCESS ! use cpl_arrays_setup, only: maice_saved !#endif @@ -391,9 +407,16 @@ subroutine step_therm1 (dt, iblk) do j = jlo, jhi do i = ilo, ihi if (aicen(i,j,n,iblk) > puny) then + if (.not. tmask(i,j,iblk)) then !BBB + write(nu_diag,*) 'BBB2 ??? land point found! at istep1, my_task: ',& + istep1, my_task + write(nu_diag,'(a,4i5,e12.6)') 'BBB2 i,j, n,iblk, aicen = ', & + i,j, n,iblk, aicen(i,j,n,iblk) + else !B icells = icells + 1 indxi(icells) = i indxj(icells) = j + endif !B endif enddo ! i enddo ! j diff --git a/source/ice_therm_vertical.F90 b/source/ice_therm_vertical.F90 index a356afd3..abf2cb50 100755 --- a/source/ice_therm_vertical.F90 +++ b/source/ice_therm_vertical.F90 @@ -2510,7 +2510,14 @@ subroutine conservation_check_vthermo(nx_block, ny_block, & ferr = abs(efinal(ij)-einit(ij)-einp) / dt if (ferr > ferrmax) then - l_stop = .true. + + if (ferr > 10.0 * ferrmax) then + l_stop = .true. + write(nu_diag,*) 'BBB: TOO BAD --- CICE is to stop!' + else + write(nu_diag,*) 'BBB: WARNING -- too big flux error --' + endif + istop = i jstop = j @@ -2532,6 +2539,18 @@ subroutine conservation_check_vthermo(nx_block, ny_block, & write(nu_diag,*) fcondtopn_solve(i,j), fcondtopn_extra(i,j) write(nu_diag,*) 'enum(ij):' write(nu_diag,*) enum(ij) +!B: +! write(nu_diag,*) 'Global i and j:', & +! this_block%i_glob(istop), & +! this_block%j_glob(jstop) +! write(nu_diag,*) 'Lat, Lon:', & +! TLAT(istop,jstop,iblk)*rad_to_deg, & +! TLON(istop,jstop,iblk)*rad_to_deg +! write(nu_diag,*) 'aice:', & +! aice(istop,jstop,iblk) +! write(nu_diag,*) 'n: ',n, 'aicen: ', & +! aicen(istop,jstop,n,iblk) +!b ! if (ktherm == 2) then write(nu_diag,*) 'Intermediate energy =', einter(ij) From 4520c26544d8259bdfaead05e1ab4c56799d0fed Mon Sep 17 00:00:00 2001 From: ars599 Date: Mon, 28 Aug 2017 05:24:22 +0000 Subject: [PATCH 05/52] dhb599 mod driver files git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_GC3_GA7@366 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- drivers/access/CICE_InitMod.F90 | 15 +- drivers/access/CICE_RunMod.F90 | 321 +++++++---------- drivers/access/cpl_forcing_handler.F90 | 443 ++++++++++++++++-------- drivers/access/cpl_interface.F90_uphalo | 20 +- drivers/access/cpl_parameters.F90 | 29 +- drivers/access/ice_constants.F90 | 8 +- 6 files changed, 468 insertions(+), 368 deletions(-) diff --git a/drivers/access/CICE_InitMod.F90 b/drivers/access/CICE_InitMod.F90 index f481f796..a4aa850a 100644 --- a/drivers/access/CICE_InitMod.F90 +++ b/drivers/access/CICE_InitMod.F90 @@ -257,7 +257,7 @@ subroutine cice_init ! for continue runs, need restart o2i forcing fields and time-averaged ice ! variables ('mice')saved at the end of last run from ice models; ! for initial run, pre-processed o2i (and maybe mice) fields are required. -!hxy599 call get_restart_o2i(trim(restartdir)//'/o2i.nc') + call get_restart_o2i(trim(restartdir)//'/o2i.nc') !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !if no lag for ice to atm coupling, then cice has to read restart file i2a.nc and @@ -266,24 +266,21 @@ subroutine cice_init ! call put_restart_i2a('i2a.nc', 0) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! if ( file_exist('CICE_restart/mice.nc') ) then if ( file_exist(trim(restartdir)//'/mice.nc') ) then !for continue runs, mice data MUST be available. -! call get_restart_mice('CICE_restart/mice.nc') call get_restart_mice(trim(restartdir)//'/mice.nc') else -write(6,*)'*** CICE WARNING: No initial mice.nc data available here! **' -write(6,*)'*** CICE WARNING: ALL mice variables will be set to ZERO! **' -write(6,*)'*** CICE WARNING: This is allowed for the init run ONLY ! **' + write(6,*)'*** CICE WARNING: No initial mice.nc data available here! **' + write(6,*)'*** CICE WARNING: ALL mice variables will be set to ZERO! **' + write(6,*)'*** CICE WARNING: This is allowed for the init run ONLY ! **' endif if (use_core_runoff) then -! call get_core_runoff('CICE_input/core_runoff_regrid.nc',& call get_core_runoff(trim(inputdir)//'/core_runoff_regrid.nc',& 'runoff',1) endif -!hxy599 write(il_out,*)' calling ave_ocn_fields_4_i2a at time_sec = ',0 !time_sec -!hxy599 call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling + write(il_out,*)' calling ave_ocn_fields_4_i2a at time_sec = ',0 !time_sec + call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling !get a2i fields and then set up initial SBC for ice !call from_atm(0) diff --git a/drivers/access/CICE_RunMod.F90 b/drivers/access/CICE_RunMod.F90 index c286bd81..6fd8fd2f 100644 --- a/drivers/access/CICE_RunMod.F90 +++ b/drivers/access/CICE_RunMod.F90 @@ -17,12 +17,7 @@ module CICE_RunMod use ice_kinds_mod -#ifdef AusCOM - !For stuff in this AusCOM's own driver the "#ifdef AusCOM" is NOT needed! - !but for consistency with the code in other places, we keep it anyway ... - !...to "indentify" the modification to the original code, easier - !...to locate future code update Aug. 2008 - +#ifdef ACCESS use cpl_parameters use cpl_arrays_setup use cpl_interface @@ -52,11 +47,9 @@ subroutine CICE_Run use ice_algae, only: get_forcing_bgc use ice_calendar, only: istep, istep1, time, dt, stop_now, calendar use ice_forcing, only: get_forcing_atmo, get_forcing_ocn -#ifdef AusCOM -!ars599: 27032014 add in - use ice_calendar, only: month, mday, istep, istep1, & - time, dt, stop_now, calendar - use ice_forcing, only: get_forcing_atmo, get_forcing_ocn +#ifdef ACCESS + use ice_calendar, only: month, mday, istep, istep1, time, dt, stop_now, calendar + use ice_restart_driver, only: dumpfile !temporary debug #endif use ice_flux, only: init_flux_atm, init_flux_ocn use ice_state, only: tr_aero @@ -64,21 +57,17 @@ subroutine CICE_Run timer_couple, timer_step use ice_zbgc_shared, only: skl_bgc -#ifdef AusCOM -!ars599: 27032014 add in +#ifdef ACCESS use ice_timers, only: ice_timer_start, & ice_timer_stop, timer_couple, timer_step use ice_grid, only: t2ugrid_vector, u2tgrid_vector - - - integer (kind=int_kind) :: time_sec, itap, icpl_ai, icpl_io, tmp_time + integer (kind=int_kind) :: time_sec, itap, icpl_ai, tmp_time integer (kind=int_kind) :: rtimestamp_ai, stimestamp_ai integer (kind=int_kind) :: rtimestamp_io, stimestamp_io !receive and send timestamps (seconds) integer (kind=int_kind) :: imon -!ars: 08052014 according to dhb599 fm changed, and mark out the one from OM -! logical :: first_step = .true. !1st time step of experiment or not - logical :: need_i2o = .true. + + logical :: write_tmp_dump = .true. #endif !-------------------------------------------------------------------- @@ -91,184 +80,142 @@ subroutine CICE_Run ! timestep loop !-------------------------------------------------------------------- -#ifdef AusCOM - write(il_out,*)'A <==> I coupling num_cpl_ai= ',num_cpl_ai - write(il_out,*)' to/from ocean num_cpl_io= ',num_cpl_io - write(il_out,*)' ice steps num_ice_io = ', num_ice_io - write(il_out,*)'runtime, runtime0=', runtime, runtime0 +#ifdef ACCESS + write(il_out,*)'A <==> I coupling num_cpl_ai = ',num_cpl_ai + write(il_out,*)' ice steps per ai interval num_ice_ai = ',num_ice_ai + write(il_out,*)' runtime, runtime0 = ',runtime, runtime0 time_sec = 0 - ! get from atm once at time 0 -! rtimestamp_ai = time_sec -! call ice_timer_start(timer_from_atm) ! atm/ocn coupling -! call from_atm(rtimestamp_ai) -! call ice_timer_stop(timer_from_atm) ! atm/ocn coupling -! -! !set time averaged ice and ocn variables back to 0 -! call initialize_mice_fields_4_i2a - call initialize_mocn_fields_4_i2a DO icpl_ai = 1, num_cpl_ai !begin A <==> I coupling iterations - !!!B: from_atm should be called here, like - !!! if (icpl_ai /= numcpl_ai) then !avoid the last step(?) - !!! rtimestamp_ai = time_sec !(?) - !!! call from_atm(rtimestamp_ai) - !!! endif - !!! call atm_icefluxes_back2GBM - - Do icpl_io = 1, num_cpl_io !begin I <==> O coupling iterations - - rtimestamp_io = time_sec - if (rtimestamp_io < runtime) then !get coupling from ocean except the last time step - write(il_out,*)' calling from_ocn at time_sec = ',time_sec - !call ice_timer_start(timer_from_ocn) ! atm/ocn coupling - call from_ocn(rtimestamp_io) !get o2i fields for next IO cpl int - !call ice_timer_stop(timer_from_ocn) ! atm/ocn coupling - - write(il_out,*)' calling ave_ocn_fields_4_i2a at time_sec = ',time_sec - call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling - end if - - - ! atm ice coupling time except last step: - if(icpl_ai <= num_cpl_ai .and. mod(time_sec, dt_cpl_ai ) == 0) then - rtimestamp_ai = time_sec - !call ice_timer_start(timer_from_atm) ! atm/ice coupling - call from_atm(rtimestamp_ai) - !call ice_timer_stop(timer_from_atm) ! atm/ice coupling - - !"TTI" approach ice fluxes converted to GBM units - !(topmelt, bototmmelt and surface sublimation) - call atm_icefluxes_back2GBM - -!! !set time averaged ice and ocn variables back to 0 - write(il_out,*)' calling init_mice_fields_4_i2a at time_sec = ',time_sec - call initialize_mice_fields_4_i2a -! call initialize_mocn_fields_4_i2a - end if - - - stimestamp_io = time_sec - - !at the beginning of the run, cice (CICE_init) reads in the required ice fields - !(time averaged over the last coupling interval of previous run), which include - !strocnx/yT, aice, fresh_gbm, fsalt_gbm, fhocn_gbm, fswthru_gbm, sicemass etc. - !(named as mstrocnx/yT, maice, mfresh, mfsalt, mfhocn, mfswthru, msicemass ...) - - !together with the a2i fields (sent from um at the end of previous run) received - !above, the time0 i2o fields can be obtained here - - !if (runtime0 == 0 .and. need_i2o) then - ! write(6,*)'*** CICE: initial run calls get_restart_i2o *** ' - ! write(6,*)'*** CICE: time_sec = ', time_sec - ! write(il_out,*)' calling get_restart_i2o at time_sec = ',time_sec - ! call get_restart_i2o('i2o.nc') - ! need_i2o = .false. - !else - ! write(6,*)'*** CICE: calling get_i2o_fields... ' - ! write(6,*)'*** CICE: time_sec = ', time_sec - ! write(6,*)'*** CICE: calling get_i2o_fields... ' - ! write(il_out,*)' calling get_i2o_fields at time_sec = ',time_sec - call get_i2o_fields - !endif - - !shift stresses from T onto U grid before sending into ocn - write(il_out,*)' calling t2ugrid_vector - u/v at time_sec = ', time_sec - call t2ugrid_vector(io_strsu) - call t2ugrid_vector(io_strsv) - - write(il_out,*)' calling into_ocn at time_sec = ', time_sec - !call ice_timer_start(timer_into_ocn) ! atm/ocn coupling - call into_ocn(stimestamp_io) - !call ice_timer_stop(timer_into_ocn) ! atm/ocn coupling - - !at the beginning of the run, cice (CICE_init) reads in the required o2i fields - !(saved from the last timestep of ocean). - - !together with the a2i fields (sent from um at the end of previous run) received - !above, the time0 boundary condition for ice 'internal time loop' is set here - - !-------------------------------------------------------------------------------- - !* This needs be investigated: calling set_sbc_ice outside the itap loop causes - ! cice to crash ('departure error') due probably to "aice" "mismatch?" for each - ! time step in the set_sbc_ice calculation.... (?) - ! We therefore still call "get_sbc_ice" inside the ice time loop (below) - ! - !write(il_out,*)' calling set_sbc_ice at time_sec = ',time_sec - !call set_sbc_ice - !-------------------------------------------------------------------------------- - - !set time averaged ice variables back to 0 - write(il_out,*)' calling init_mice_fields_4_i2o at time_sec = ',time_sec - call initialize_mice_fields_4_i2o - - do itap = 1, num_ice_io ! cice time loop within each i<=>o cpl interval - - !------------------------------------------------------------------------------ - !* see comments above - call get_sbc_ice - !set boundary condition (forcing) for ice time step - !------------------------------------------------------------------------------ - - call ice_step - write(il_out,*)' calling ave_ice_fields_4_i2a at time_sec = ',time_sec - -!======================================= - tmp_time = time_sec + dt - if ( mod(tmp_time, dt_cpl_ai) == 0 ) then !put to atm i step before coupling - write(il_out,*)' calling get_i2a_fields at time_sec = ',time_sec - !call ice_timer_start(timer_into_atm) ! atm/ocn coupling - call get_i2a_fields ! i2a fields ready to be sent for next IA cpl int in atm. - -! if(tmp_time < runtime ) then - ! * because of using lag=+dt_ice, we must take one step off the time_sec - ! * to make the sending happen at right time: - stimestamp_ai = time_sec ! - dt - write(il_out,*)' calling into_atm at time_sec = ',time_sec - call into_atm(stimestamp_ai) - -! !set time averaged ice and ocn variables back to 0 - write(il_out,*)' calling init_mocn_fields_4_i2a at time_sec = ',time_sec - !call initialize_mice_fields_4_i2a - call initialize_mocn_fields_4_i2a -! end if - !call ice_timer_stop(timer_into_atm) ! atm/ocn coupling - end if -!====================================== - - ! note ice_step makes call to time_average_fields_4_i2o - ! and time_average_fields_4_i2a - ! to get time-averaged ice variables required for setting up i2o and i2a cpl fields - - istep = istep + 1 ! update time step counters - istep1 = istep1 + 1 - time = time + dt ! determine the time and date - - time_sec = time_sec + dt - - call calendar(time-runtime0) + !receive a2i fields + rtimestamp_ai = time_sec + !call ice_timer_start(timer_from_atm) ! atm/ice coupling + write(il_out,*)' calling from_atm at icpl_ai, time_sec = ', icpl_ai, time_sec + !=========================== + call from_atm(rtimestamp_ai) + !=========================== + !call ice_timer_stop(timer_from_atm) ! atm/ice coupling - !initialize fluxes sent to coupler (WHY should still need do this? CH: NOT needed!) - call init_flux_atm - call init_flux_ocn + !"TTI" approach ice fluxes converted to GBM units + call atm_icefluxes_back2GBM - !CH: should be doing things here - !get_i2o_fields - !get_i2a_fields + do itap = 1, num_ice_ai ! cice time loop + ! Note I <==> O coupling happens at each time step + + stimestamp_io = time_sec - end do !itap + !"combine" a2i fields and ice fields to get i2o fields + call get_i2o_fields - End Do !icpl_io + !shift stresses from T onto U grid before sending into ocn + call t2ugrid_vector(io_strsu) + call t2ugrid_vector(io_strsv) + + write(il_out,'(a,3i10)') & + ' calling into_ocn at icpl_ai, itap, time_sec = ', icpl_ai, itap, time_sec + !call ice_timer_start(timer_into_ocn) ! atm/ocn coupling + !=========================== + call into_ocn(stimestamp_io) + !=========================== + !call ice_timer_stop(timer_into_ocn) ! atm/ocn coupling + + !set boundary condition (forcing) + call get_sbc_ice + + !Debug: 20170825 -- check sbc_ice variables from "get_sbc_ice" + call check_ice_sbc_fields('chk_ice_sbc.nc') + + !Debug: 20170927 -- check the restart fields at the beginning of day 3 + !if (idate == 10103 .and. write_tmp_dump ) then + if (icpl_ai == 17 .and. itap == 1) then + write(il_out,'(a,4i10)') & + ' calling dumpfile at icpl_ai, itap, time_sec, idate = ', icpl_ai, itap, time_sec, idate + call dumpfile + !write_tmp_dump = .false. + endif + + !*** ice "update" ***! + call ice_step + + !Debug: 20170827 -- check updated ice varables after ice_step + call check_ice_fields('chk_ice_fields.nc') + + !time-average ice variables required for setting up i2o and i2a cpl fields + call time_average_fields_4_i2o !actually "instant" ice vairables + call time_average_fields_4_i2a !time averaging over ia cpl interval + + tmp_time = time_sec + dt + if ( mod(tmp_time, dt_cpl_ai) == 0 ) then !this happens at itap = num_ice_ai + !call ice_timer_start(timer_into_atm) + !i2a fields ready to be sent for next IA cpl int in atm. + call get_i2a_fields + + stimestamp_ai = time_sec + + write(il_out,'(a,3i10)') & + ' calling into_atm at icpl_ai, itap, time_sec = ',icpl_ai, itap, time_sec + !=========================== + call into_atm(stimestamp_ai) + !=========================== + + !set time averaged ice and ocn variables back to 0 + call initialize_mocn_fields_4_i2a + call initialize_mice_fields_4_i2a + !call ice_timer_stop(timer_into_atm) ! atm/ocn coupling + endif + + istep = istep + 1 ! update time step counters + istep1 = istep1 + 1 + time = time + dt ! determine the time and date + + time_sec = time_sec + dt + call calendar(time-runtime0) + + !initialize fluxes sent to coupler + !WHY should still need this? CH: NOT needed! ==> but model crashes if NOT! + call init_flux_atm + call init_flux_ocn + + rtimestamp_io = time_sec + if (rtimestamp_io < runtime) then + !get o2i fields for next time step ice update + write(il_out,'(a,3i10)') & + ' calling from_ocn at icpl_ai, itap, time_sec = ',icpl_ai, itap, time_sec + !call ice_timer_start(timer_from_ocn) + !=========================== + call from_ocn(rtimestamp_io) + !=========================== + !call ice_timer_stop(timer_from_ocn) + !accumulate/average ocn fields needed for IA coupling + call time_average_ocn_fields_4_i2a + end if + + end do !itap +! +!B: the following part is done in the above itap loop......logically "unnatural"! +! write(il_out,*)' calling get_i2a_fields at time_sec = ',time_sec +! !call ice_timer_start(timer_into_atm) +! !i2a fields ready to be sent for next IA cpl int in atm. +! call get_i2a_fields +! +! !stimestamp_ai = time_sec +! stimestamp_ai = time_sec - dt !??? test new order???! +! +! write(il_out,*)' calling into_atm at icpl_ai, itap, time_sec = ',icpl_ai, itap, time_sec +! call into_atm(stimestamp_ai) +! +! !set time averaged ice and ocn variables back to 0 +! write(il_out,*)' calling init_mocn_fields_4_i2a at time_sec = ',time_sec +! call initialize_mocn_fields_4_i2a +! !call ice_timer_stop(timer_into_atm) ! atm/ocn coupling END DO !icpl_ai ! final update of the stimestamp_io, ie., put back the last dt_cice: stimestamp_io = stimestamp_io + dt - ! *** need save o2i fields here instead of in mom4 *** - !call save_restart_o2i('o2i.nc', stimestamp_io) !it is done in mom4 - ! *** need save the last IO cpl int (time-averaged) ice variables used to get i2o fields *** ! *** at the beginning of next run *** call save_restart_mice('mice.nc',stimestamp_io) @@ -440,16 +387,6 @@ subroutine ice_step call ice_timer_stop(timer_thermo) ! thermodynamics call ice_timer_stop(timer_column) ! column physics -!ars599: 04092014: add in -! not sure should add inside the loop or not? -!ars599: 09052014: move from after line 458 "enddo ! iblk" to here -#ifdef AusCOM - !need some time-mean ice fields - !(so as to get i2o and i2a fields for next coupling interval) - call time_average_fields_4_i2o - call time_average_fields_4_i2a -#endif - !----------------------------------------------------------------- ! write data !----------------------------------------------------------------- diff --git a/drivers/access/cpl_forcing_handler.F90 b/drivers/access/cpl_forcing_handler.F90 index 51a9559c..3e66a8de 100644 --- a/drivers/access/cpl_forcing_handler.F90 +++ b/drivers/access/cpl_forcing_handler.F90 @@ -1,11 +1,6 @@ MODULE cpl_forcing_handler ! -! It contains subroutines handling coupling fields. They are -! -! nullify_i2o_fluxes: -! tavg_i2o_fluxes: -! ............... -! ............... +! It contains subroutines handling coupling fields. ! use ice_blocks use ice_forcing @@ -18,8 +13,6 @@ MODULE cpl_forcing_handler use ice_state, only : aice, aicen, trcr, trcrn, nt_hpnd, nt_Tsfc !ice concentration and tracers use ice_state, only: uvel, vvel, vsnon, vicen use ice_gather_scatter -!ars599: 11042014: use all ice_constants -!use ice_constants, only : gravit, Lvap, Lsub use ice_constants use ice_grid, only : tmask, to_ugrid use ice_communicate, only : my_task, master_task @@ -38,7 +31,7 @@ MODULE cpl_forcing_handler contains -!=============================================================================== +!================================================= subroutine get_core_runoff(fname, vname, nrec) ! read in the remapped core runoff data (S.Marsland) which will be used to replace ! the ncep2 runoff sent from matm via coupler @@ -66,7 +59,7 @@ subroutine get_core_runoff(fname, vname, nrec) return end subroutine get_core_runoff -!=============================================================================== +!================================================= subroutine get_time0_sstsss(fname, nmonth) ! This routine is to be used only once at the beginning at an exp. @@ -106,7 +99,7 @@ subroutine get_time0_sstsss(fname, nmonth) return end subroutine get_time0_sstsss -!=============================================================================== +!================================================= ! temporary use ... subroutine read_access_a2i_data(fname,nrec,istep) @@ -178,28 +171,12 @@ subroutine read_access_a2i_data(fname,nrec,istep) end subroutine read_access_a2i_data -!============================================================================= +!================================================= subroutine atm_icefluxes_back2GBM !convert the a2i fluxes into GBM units for those that are scaled up in UM !by "/maicen" before being sent to cice [needed for GSI8 TTI approach]. implicit none -!integer :: cat,i,j,k -!do j = 1, ny_block -!do i = 1, nx_block -! do k = 1, nblocks -! do cat = 1, ncat -! um_tmlt(i,j,cat,k) = um_tmlt(i,j,cat,k) * maicen_ia(i,j,cat,k) -! um_bmlt(i,j,cat,k) = um_bmlt(i,j,cat,k) * maicen_ia(i,j,cat,k) -! um_iceevp(i,j,cat,k) = um_iceevp(i,j,cat,k) * maicen_ia(i,j,cat,k) -! enddo -! enddo -!enddo -!enddo - -!um_tmlt(:,:,:,:) = um_tmlt(:,:,:,:) * maicen(:,:,:,:) -!um_bmlt(:,:,:,:) = um_bmlt(:,:,:,:) * maicen(:,:,:,:) -!um_iceevp(:,:,:,:) = um_iceevp(:,:,:,:) * maicen(:,:,:,:) um_tmlt(:,:,:,:) = um_tmlt(:,:,:,:) * maicen_saved(:,:,:,:) um_bmlt(:,:,:,:) = um_bmlt(:,:,:,:) * maicen_saved(:,:,:,:) @@ -207,7 +184,7 @@ subroutine atm_icefluxes_back2GBM end subroutine atm_icefluxes_back2GBM -!============================================================================= +!================================================= subroutine read_restart_i2a(fname, sec) !'i2a.nc', 0) ! read ice to atm coupling fields from restart file, and send to atm module @@ -286,8 +263,7 @@ subroutine read_restart_i2a(fname, sec) !'i2a.nc', 0) endif end subroutine read_restart_i2a - -!============================================================================= +!================================================= subroutine read_restart_i2asum(fname, sec) !'i2a.nc', 0) ! read ice to atm coupling fields from restart file, and send to atm module @@ -368,7 +344,7 @@ subroutine read_restart_i2asum(fname, sec) !'i2a.nc', 0) endif end subroutine read_restart_i2asum -!============================================================================== +!================================================= subroutine put_restart_i2a(fname, sec) ! call this subroutine after called get_restart_oi2 ! it uses ocn_sst etc to calculate average ocn fields which will be used to send @@ -396,7 +372,7 @@ subroutine put_restart_i2a(fname, sec) end subroutine put_restart_i2a -!=============================================================================== +!================================================= subroutine get_restart_o2i(fname) ! To be called at beginning of each run trunk to read in restart o2i fields @@ -438,7 +414,7 @@ subroutine get_restart_o2i(fname) return end subroutine get_restart_o2i -!=============================================================================== +!================================================= subroutine get_restart_mice(fname) ! Called at beginning of the run to get 'last' IO cpl int T-M ice variables @@ -458,6 +434,13 @@ subroutine get_restart_mice(fname) write(il_out,*) '(get_restart_mice) reading in mice variables......' endif call ice_open_nc(fname, ncid_o2i) +!B: 20170825 ==> need maicen_saved variables + call ice_read_nc(ncid_o2i, 1, 'maicen1', maicen_saved(:,:,1,:), dbug) + call ice_read_nc(ncid_o2i, 1, 'maicen2', maicen_saved(:,:,2,:), dbug) + call ice_read_nc(ncid_o2i, 1, 'maicen3', maicen_saved(:,:,3,:), dbug) + call ice_read_nc(ncid_o2i, 1, 'maicen4', maicen_saved(:,:,4,:), dbug) + call ice_read_nc(ncid_o2i, 1, 'maicen5', maicen_saved(:,:,5,:), dbug) +!b. call ice_read_nc(ncid_o2i, 1, 'maice', maice, dbug) call ice_read_nc(ncid_o2i, 1, 'mstrocnxT', mstrocnxT, dbug) call ice_read_nc(ncid_o2i, 1, 'mstrocnyT', mstrocnyT, dbug) @@ -482,7 +465,7 @@ subroutine get_restart_mice(fname) return end subroutine get_restart_mice -!=============================================================================== +!================================================= subroutine get_restart_i2o(fname) ! To be called at beginning of each run trunk to read in restart i2o fields @@ -537,7 +520,7 @@ subroutine get_restart_i2o(fname) return end subroutine get_restart_i2o -!=============================================================================== +!================================================= subroutine set_sbc_ice !!NOTE: This routine is NOT used!! ! ! Set coupling fields (in units of GMB, from UM and MOM4) needed for CICE @@ -632,7 +615,7 @@ subroutine set_sbc_ice !!NOTE: This routine is NOT used!! ss_tlty = ocn_ssly !(as per S.O.) make sure Tf if properly initialized -!Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) +Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) ! !B: May use different formula for Tf such as TEOS-10 formulation: ! @@ -643,8 +626,6 @@ subroutine set_sbc_ice !!NOTE: This routine is NOT used!! ! 2.07679e-02) * zzs(:,:,:) - 5.87701e-02 !Tf(:,:,:) = Tf(:,:,:) * sss(:,:,:) ! - 7.53e-4 * 5.0 !!!5.0 is depth in meters -!=============================================================== - end subroutine set_sbc_ice !=============================================================================== @@ -656,15 +637,6 @@ subroutine get_sbc_ice ! for the "nsbc = 5" case. ! ! It should be called after calling "from_atm" and "from_ocn". -! -! *** This routine is used/called within ice time loop (itap) -! *** in case "set_sbc_ice" call (outside the itap loop) fails -! *** which is unfortunately the case the moment (Jan2010) ! -!B: possible reason: "maice" used in set_sbc_ice may be (quite) different from -! the real time aice (used here). E.g., in the case of aice << maice, taux/y -! calculated in set_sbc_ice (i.e., um_taux/y * maice) should be too large for -! a (possibly very) small aice grid, causing huge ice velocity and thus ice -! "departure point error". (June 2016) !------------------------------------------------------------------------------- implicit none @@ -728,7 +700,7 @@ subroutine get_sbc_ice fsnow = max(aice * um_snow,0.0) frain = max(aice * um_rain,0.0) ! -!!! XXXXXX: ice surface skin temperature (from UM)-------------------------- +!ice surface skin temperature (from UM)------------------------------------- !see: tsfc_ice definition in sbccpl.F90 at !/short/p66/hxy599/fcm_make_ocean_GC3/extract/nemo/NEMOGCM/NEMO/OPA_SRC/SBC !--------------------------------------------------------------------------- @@ -748,7 +720,6 @@ subroutine get_sbc_ice enddo enddo enddo -!!!------------------------------------------------------------------------------------------ ! Fields from MOM4 (SSU/V and sslx/y are on U points): @@ -794,7 +765,7 @@ subroutine get_sbc_ice ! end subroutine get_sbc_ice -!=============================================================================== +!================================================= subroutine save_restart_o2i(fname, nstep) ! output the last o2i forcing data received in cice by the end of the run, @@ -838,7 +809,7 @@ subroutine save_restart_o2i(fname, nstep) return end subroutine save_restart_o2i -!============================================================================== +!================================================= subroutine save_restart_i2asum(fname, nstep) ! output the last i2a forcing data in cice at the end of the run, ! to be read in at the beginning of next run by cice and sent to atm @@ -966,7 +937,7 @@ subroutine save_restart_i2asum(fname, nstep) end subroutine save_restart_i2asum -!=============================================================================== +!================================================= subroutine save_restart_mice(fname, nstep) ! output ice variable averaged over the last IO cpl int of this run, @@ -985,6 +956,27 @@ subroutine save_restart_mice(fname, nstep) call write_nc_1Dtime(real(nstep), 1, 'time', ncid) endif +!B: 20170825 ==> add maicen_saved for atm_icefluxes_back2GBM calculation! +! note maicen_saved is the last ia interval mean. +vwork(:,:,:) = maicen_saved(:,:,1,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maicen1', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork(:,:,:) = maicen_saved(:,:,2,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maicen2', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork(:,:,:) = maicen_saved(:,:,3,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maicen3', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork(:,:,:) = maicen_saved(:,:,4,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maicen4', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork(:,:,:) = maicen_saved(:,:,5,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maicen5', gwork, 2, il_im, il_jm, 1, ilout=il_out) +!b. + +!The following fields are actually the ice state of last timestep +!(no time-averaging is required in each timestep io coupling, see time_average_fields_4_i2o) vwork = maice call gather_global(gwork, vwork, master_task, distrb_info) if (my_task == 0) call write_nc2D(ncid, 'maice', gwork, 2, il_im, il_jm, 1, ilout=il_out) @@ -1015,7 +1007,7 @@ subroutine save_restart_mice(fname, nstep) return end subroutine save_restart_mice -!=============================================================================== +!================================================= subroutine get_i2a_fields implicit none @@ -1075,7 +1067,7 @@ subroutine get_i2a_fields return end subroutine get_i2a_fields -!=============================================================================== +!================================================= subroutine get_i2o_fields ! All fluxes should be in GBM units before passing into coupler. @@ -1122,18 +1114,11 @@ subroutine get_i2o_fields !(8) latent heat flux (positive out of ocean as required by MOM4) io_qflux = um_evap * Lvap !Note it's already weighted in UM for open sea. -!20101210: NOT sure about the ice weghting in UM, 'cos the ice area does see -! non-zero (positive) evap. -!if (imsk_evap) then -! io_qflux = um_evap * Lvap * (1. - maice) -!endif !(9) sensible heat flux (positive out of ocean as required by MOM4) -!io_shflx = um_shflx * (1. - maice) io_shflx = um_shflx !(10) net long wave radiation positive down -!io_lwflx = um_lwflx * (1. - maice) io_lwflx = um_lwflx !(11) runoff (!check the incoming field! pattern? remapping ok? conserved? ...) @@ -1174,7 +1159,7 @@ subroutine get_i2o_fields return end subroutine get_i2o_fields -!=============================================================================== +!================================================= subroutine initialize_mice_fields_4_i2o implicit none @@ -1191,7 +1176,7 @@ subroutine initialize_mice_fields_4_i2o return end subroutine initialize_mice_fields_4_i2o -!=============================================================================== +!================================================= subroutine initialize_mice_fields_4_i2a implicit none @@ -1213,7 +1198,7 @@ subroutine initialize_mice_fields_4_i2a return end subroutine initialize_mice_fields_4_i2a -!=============================================================================== +!================================================= subroutine initialize_mocn_fields_4_i2a implicit none @@ -1228,93 +1213,51 @@ subroutine initialize_mocn_fields_4_i2a return end subroutine initialize_mocn_fields_4_i2a -!=============================================================================== +!================================================= subroutine time_average_ocn_fields_4_i2a implicit none -msst(:,:,:) = msst(:,:,:) + ocn_sst(:,:,:) * coef_cpl -mssu(:,:,:) = mssu(:,:,:) + ocn_ssu(:,:,:) * coef_cpl -mssv(:,:,:) = mssv(:,:,:) + ocn_ssv(:,:,:) * coef_cpl -mco2(:,:,:) = mco2(:,:,:) + ocn_co2(:,:,:) * coef_cpl -mco2fx(:,:,:) = mco2fx(:,:,:) + ocn_co2fx(:,:,:) * coef_cpl -msstfz(:,:,:) = msstfz(:,:,:) + Tf(:,:,:) * coef_cpl +msst(:,:,:) = msst(:,:,:) + ocn_sst(:,:,:) * coef_ai +mssu(:,:,:) = mssu(:,:,:) + ocn_ssu(:,:,:) * coef_ai +mssv(:,:,:) = mssv(:,:,:) + ocn_ssv(:,:,:) * coef_ai +mco2(:,:,:) = mco2(:,:,:) + ocn_co2(:,:,:) * coef_ai +mco2fx(:,:,:) = mco2fx(:,:,:) + ocn_co2fx(:,:,:) * coef_ai +msstfz(:,:,:) = msstfz(:,:,:) + Tf(:,:,:) * coef_ai return end subroutine time_average_ocn_fields_4_i2a -!=============================================================================== -!dhb599-20131002: resuming the old 'approach' (used before 20130420) which sets -!do_scale_fluxes = .t. and thus the "flatn_f/Lsub" terms is NOT used as part of -!'fresh' and passed into ocean...since the 'evaporation out of ice surface' is -!going into atmosphere, not supposed to change the ocean water volume! -!------------------------------------------------------------------------------- - subroutine time_average_fields_4_i2o - +!now for each timestep io coupling, so no time-averaging is required. implicit none -maice(:,:,:) = maice(:,:,:) + aice(:,:,:) * coef_io -mstrocnxT(:,:,:) = mstrocnxT(:,:,:) + strocnxT(:,:,:) * coef_io -mstrocnyT(:,:,:) = mstrocnyT(:,:,:) + strocnyT(:,:,:) * coef_io -!20130420: possible bug due to missing term "flatn_f/Lsub" in the last update for fresh -! use scale_fluxes=.f. to avoid flux scaling by /aice -! meaning fluxes are all grid-box-mean by the end of ice_step. -!mfresh(:,:,:) = mfresh(:,:,:) + fresh_gbm(:,:,:) * coef_io -!mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt_gbm(:,:,:) * coef_io -!mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn_gbm(:,:,:) * coef_io -!mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru_gbm(:,:,:) * coef_io -mfresh(:,:,:) = mfresh(:,:,:) + fresh(:,:,:) * coef_io -mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt(:,:,:) * coef_io -mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn(:,:,:) * coef_io -mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru(:,:,:) * coef_io -!--------------------------------------------------------------------------------------- -!--------------------------------------------------------------------------------------- -msicemass(:,:,:) = msicemass(:,:,:) + sicemass(:,:,:) * coef_io +maice(:,:,:) = aice(:,:,:) +mstrocnxT(:,:,:) = strocnxT(:,:,:) +mstrocnyT(:,:,:) = strocnyT(:,:,:) +mfresh(:,:,:) = fresh(:,:,:) +mfsalt(:,:,:) = fsalt(:,:,:) +mfhocn(:,:,:) = fhocn(:,:,:) +mfswthru(:,:,:) = fswthru(:,:,:) +msicemass(:,:,:) = sicemass(:,:,:) return end subroutine time_average_fields_4_i2o -!=============================================================================== -subroutine time_average_fields_4_i2o_20130420 - -implicit none - -maice(:,:,:) = maice(:,:,:) + aice(:,:,:) * coef_io -mstrocnxT(:,:,:) = mstrocnxT(:,:,:) + strocnxT(:,:,:) * coef_io -mstrocnyT(:,:,:) = mstrocnyT(:,:,:) + strocnyT(:,:,:) * coef_io -!20130420: possible bug due to missing term "flatn_f/Lsub" in the last update -!for fresh -! use scale_fluxes=.f. to avoid flux scaling by /aice -! meaning fluxes are all grid-box-mean by the end of ice_step. -!mfresh(:,:,:) = mfresh(:,:,:) + fresh_gbm(:,:,:) * coef_io -!mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt_gbm(:,:,:) * coef_io -!mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn_gbm(:,:,:) * coef_io -!mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru_gbm(:,:,:) * coef_io -mfresh(:,:,:) = mfresh(:,:,:) + fresh(:,:,:) * coef_io -mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt(:,:,:) * coef_io -mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn(:,:,:) * coef_io -mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru(:,:,:) * coef_io -!--------------------------------------------------------------------------------------- -msicemass(:,:,:) = msicemass(:,:,:) + sicemass(:,:,:) * coef_io - -return -end subroutine time_average_fields_4_i2o_20130420 - -!=============================================================================== +!================================================= subroutine time_average_fields_4_i2a implicit none ! ice fields: -muvel(:,:,:) = muvel(:,:,:) + uvel(:,:,:) * coef_ia -mvvel(:,:,:) = mvvel(:,:,:) + vvel(:,:,:) * coef_ia -maicen(:,:,:,:) = maicen(:,:,:,:) + aicen(:,:,:,:) * coef_ia !T cat. ice concentration -mthikn(:,:,:,:) = mthikn(:,:,:,:) + vicen(:,:,:,:) * coef_ia !T cat. ice thickness -msnown(:,:,:,:) = msnown(:,:,:,:) + vsnon(:,:,:,:) * coef_ia !T cat. snow thickness +muvel(:,:,:) = muvel(:,:,:) + uvel(:,:,:) * coef_ai +mvvel(:,:,:) = mvvel(:,:,:) + vvel(:,:,:) * coef_ai +maicen(:,:,:,:) = maicen(:,:,:,:) + aicen(:,:,:,:) * coef_ai !T cat. ice concentration +mthikn(:,:,:,:) = mthikn(:,:,:,:) + vicen(:,:,:,:) * coef_ai !T cat. ice thickness +msnown(:,:,:,:) = msnown(:,:,:,:) + vsnon(:,:,:,:) * coef_ai !T cat. snow thickness call to_ugrid(aice, aiiu) -maiu(:,:,:) = maiu(:,:,:) + aiiu(:,:,:) * coef_ia !U cell ice concentraction +maiu(:,:,:) = maiu(:,:,:) + aiiu(:,:,:) * coef_ai !U cell ice concentraction !BX: "First order" ice fraction (mfoifr, below) is required for GSI8 "Time-Travelling Ice" (TTI) ! coupling approach. It may be different than the "normal" ice fraction (maicen, above) if @@ -1323,16 +1266,16 @@ subroutine time_average_fields_4_i2a ! In ACCESS practice, no second order remapping has been appllied to any coupling field, and ! maicen and mfoifr are ALWAYS the same thing. ! We pass both of them to UM for "concictency" (thus keeping UM coupling code intact)! -mfoifr(:,:,:,:) = mfoifr(:,:,:,:) + aicen(:,:,:,:)* coef_ia !==maicen -mitopt(:,:,:,:) = mitopt(:,:,:,:) + Tn_top(:,:,:,:) * coef_ia -mitopk(:,:,:,:) = mitopk(:,:,:,:) + keffn_top(:,:,:,:) * coef_ia -mpndfn(:,:,:,:) = mpndfn(:,:,:,:) + apeffn(:,:,:,:) * coef_ia -mpndtn(:,:,:,:) = mpndtn(:,:,:,:) + trcrn(:,:,nt_hpnd,:,:) * coef_ia +mfoifr(:,:,:,:) = mfoifr(:,:,:,:) + aicen(:,:,:,:)* coef_ai !==maicen +mitopt(:,:,:,:) = mitopt(:,:,:,:) + Tn_top(:,:,:,:) * coef_ai +mitopk(:,:,:,:) = mitopk(:,:,:,:) + keffn_top(:,:,:,:) * coef_ai +mpndfn(:,:,:,:) = mpndfn(:,:,:,:) + apeffn(:,:,:,:) * coef_ai +mpndtn(:,:,:,:) = mpndtn(:,:,:,:) + trcrn(:,:,nt_hpnd,:,:) * coef_ai !add one more a-i interval mean field (integrated ice concentration), which, togthere with maicen, !should be saved at the end of current run for use at the beginning of the continue run (e.g., !converting ice fluxes into GBM. see routines "atm_icefluxes_back2GBM", and "get_sbc_ice")...... -!maice_ia(:,:,:) = maice_ia(:,:,:) + aice(:,:,:) * coef_ia +!maice_ia(:,:,:) = maice_ia(:,:,:) + aice(:,:,:) * coef_ai !ocn fields: !must be done after calling from_ocn so as to get the most recently updated ocn fields, @@ -1341,7 +1284,7 @@ subroutine time_average_fields_4_i2a return end subroutine time_average_fields_4_i2a -!=============================================================================== +!================================================= subroutine check_i2a_fields(nstep) implicit none @@ -1427,7 +1370,7 @@ subroutine check_i2a_fields(nstep) return end subroutine check_i2a_fields -!============================================================================ +!================================================= subroutine check_a2i_fields(nstep) implicit none @@ -1506,7 +1449,7 @@ subroutine check_a2i_fields(nstep) return end subroutine check_a2i_fields -!============================================================================ +!================================================= subroutine check_i2o_fields(nstep, scale) implicit none @@ -1581,7 +1524,7 @@ subroutine check_i2o_fields(nstep, scale) return end subroutine check_i2o_fields -!============================================================================ +!================================================= subroutine check_o2i_fields(nstep) implicit none @@ -1665,7 +1608,220 @@ subroutine check_frzmlt_sst(ncfilenm) return end subroutine check_frzmlt_sst -!============================================================================ +!================================================= +subroutine check_i2o_uvfluxes(ncfilenm) + +!this is temporarily used to check i2o fields (uflux, vflux and maice) +!for debug purpose + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, maice, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maice', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, um_taux, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'um_taux', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, um_tauy, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'um_tauy', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, mstrocnxT, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mstrocnxT', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, mstrocnyT, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mstrocnyT', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_i2o_uvfluxes + +!================================================= +subroutine check_ice_fields(ncfilenm) +!this is temporarily used to check ice fields immediately after ice_step +!for debug purpose + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, aice, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'aice', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, aicen(:,:,1,:), master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'aicen1', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, aicen(:,:,2,:), master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'aicen2', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, aicen(:,:,3,:), master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'aicen3', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, aicen(:,:,4,:), master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'aicen4', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, aicen(:,:,5,:), master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'aicen5', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_ice_fields + +!================================================= +subroutine check_ice_sbc_fields(ncfilenm) + +!this is temporarily used to check ice_sbc fields got from get_ice_sbc +!for debug purpose + +implicit none + +real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: v3d + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +v3d = 0.0 + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, aice, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'aice', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, strax, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'strax', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, stray, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'stray', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +v3d(:,:,:) = flatn_f(:,:,1,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'flatn_f1', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = flatn_f(:,:,2,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'flatn_f2', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = flatn_f(:,:,3,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'flatn_f3', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = flatn_f(:,:,4,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'flatn_f4', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = flatn_f(:,:,5,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'flatn_f5', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +v3d(:,:,:) = fcondtopn_f(:,:,1,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fcondtopn_f1', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fcondtopn_f(:,:,2,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fcondtopn_f2', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fcondtopn_f(:,:,3,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fcondtopn_f3', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fcondtopn_f(:,:,4,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fcondtopn_f4', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fcondtopn_f(:,:,5,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fcondtopn_f5', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +v3d(:,:,:) = fsurfn_f(:,:,1,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsurfn_f1', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fsurfn_f(:,:,2,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsurfn_f2', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fsurfn_f(:,:,3,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsurfn_f3', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fsurfn_f(:,:,4,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsurfn_f4', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fsurfn_f(:,:,5,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsurfn_f5', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +call gather_global(gwork, fsnow, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsnow', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, frain, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'frain', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +v3d(:,:,:) = trcrn(:,:,nt_Tsfc,1,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'trcrn1', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = trcrn(:,:,nt_Tsfc,2,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'trcrn2', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = trcrn(:,:,nt_Tsfc,3,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'trcrn3', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = trcrn(:,:,nt_Tsfc,4,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'trcrn4', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = trcrn(:,:,nt_Tsfc,5,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'trcrn5', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +!from ocen: +call gather_global(gwork, frzmlt, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'frzmlt', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, sst, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sst', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, sss, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sss', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, uocn, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'uocn', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, vocn, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'vocn', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +call gather_global(gwork, ss_tltx, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'ss_tltx', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, ss_tlty, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'ss_tlty', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +call gather_global(gwork, Tf, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'Tf', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_ice_sbc_fields + +!================================================= subroutine check_sstsss(ncfilenm) !this is used to check cice sst/sss : temporary use (20091019) @@ -1699,8 +1855,7 @@ subroutine check_sstsss(ncfilenm) return end subroutine check_sstsss - -!============================================================================ +!================================================= function file_exist (file_name) ! character(len=*), intent(in) :: file_name @@ -1714,6 +1869,6 @@ function file_exist (file_name) end function file_exist -!============================================================================ +!================================================= end module cpl_forcing_handler diff --git a/drivers/access/cpl_interface.F90_uphalo b/drivers/access/cpl_interface.F90_uphalo index 714ef999..00a8c226 100644 --- a/drivers/access/cpl_interface.F90_uphalo +++ b/drivers/access/cpl_interface.F90_uphalo @@ -919,13 +919,19 @@ endif enddo - !BX: 20160623...... avoid initial "remap transport: bad departure points" (e.g.@(332,776))? - if (isteps == 0) then - um_taux = um_taux * 0.1 - um_tauy = um_tauy * 0.1 - endif - !BX. - +!=========================B: The operation below is ridiculous"====================================== +! it definitely destroys the "reproducibility" when model is run +! with different time segments (e.g., 1 month vs 3 months +! IF really required (see comments below), should do it at the very beginning of the exp. or +! --actually better-- reducing windstress in the preprecessed data "a2i.nc" !!! +! +! !BX: 20160623...... avoid initial "remap transport: bad departure points" (e.g.@(332,776))? +! if (isteps == 0) then +! um_taux = um_taux * 0.1 +! um_tauy = um_tauy * 0.1 +! endif +! !BX. +!---------------------------------------------------------------------------------------------------- call ice_HaloUpdate(um_thflx, halo_info,field_loc_center,field_type_scalar) call ice_HaloUpdate(um_pswflx, halo_info,field_loc_center,field_type_scalar) call ice_HaloUpdate(um_runoff, halo_info,field_loc_center,field_type_scalar) diff --git a/drivers/access/cpl_parameters.F90 b/drivers/access/cpl_parameters.F90 index 1ffcf12a..a859da38 100644 --- a/drivers/access/cpl_parameters.F90 +++ b/drivers/access/cpl_parameters.F90 @@ -32,8 +32,7 @@ module cpl_parameters !integer(kind=int_kind) :: il_master=0 ! master_task id integer(kind=int_kind) :: num_cpl_ai ! num of (a2i) cpl periods -integer(kind=int_kind) :: num_cpl_io ! num of (i2o) cpl periods -integer(kind=int_kind) :: num_ice_io ! ice time loop iter num per i2o cpl interval +integer(kind=int_kind) :: num_ice_ai ! ice time loop iter num per a2i cpl interval real(kind=dbl_kind) :: meltlimit = -200. !12/03/2008: set max melt real(kind=dbl_kind) :: ocn_albedo = 0.06 ! for compability with AusCOM @@ -112,9 +111,7 @@ module cpl_parameters chk_o2i_fields integer(kind=int_kind) :: iniday, inimon, iniyear !from inidate -real(kind=dbl_kind) :: coef_io !dt_ice/dt_cpl_io, for i2o fields tavg -real(kind=dbl_kind) :: coef_ia !dt_ice/dt_cpl_ai, for i2a fields tavg -real(kind=dbl_kind) :: coef_cpl !dt_cpl_io/dt_cpl_ai, for ocn fields tavg +real(kind=dbl_kind) :: coef_ai !dt_ice/dt_cpl_ai, for i2a fields tavg real(kind=dbl_kind) :: frazil_factor = 0.5 !frazil_factor is associated with the difference between ocean @@ -137,14 +134,16 @@ subroutine get_cpl_timecontrol_simple open(unit=99,file="input_ice.nml",form="formatted",status="old") read (99, coupling) close(unit=99) + ! *** make sure dt_cpl_ai is multiple of dt_cpl_io, and dt_cpl_io if multiple of dt_ice *** + +!hardrwire dt_cpl_io == dt_cice +dt_cpl_io = dt_cice + num_cpl_ai = runtime/dt_cpl_ai -num_cpl_io = dt_cpl_ai/dt_cpl_io -num_ice_io = dt_cpl_io/dt_cice +num_ice_ai = dt_cpl_ai/dt_cice -coef_io = float(dt_cice)/float(dt_cpl_io) -coef_ia = float(dt_cice)/float(dt_cpl_ai) -coef_cpl = float(dt_cpl_io)/float(dt_cpl_ai) +coef_ai = float(dt_cice)/float(dt_cpl_ai) iniday = mod(inidate, 100) inimon = mod( (inidate - iniday)/100, 100) @@ -192,15 +191,15 @@ subroutine get_cpl_timecontrol write(6, *) endif +!hardrwire dt_cpl_io == dt_cice +dt_cpl_io = dt_cice + ! * make sure runtime is mutliple of dt_cpl_ai, dt_cpl_ai is mutliple of dt_cpl_io, ! * and dt_cpl_io is mutliple of dt_cice! num_cpl_ai = runtime/dt_cpl_ai -num_cpl_io = dt_cpl_ai/dt_cpl_io -num_ice_io = dt_cpl_io/dt_cice +num_ice_ai = dt_cpl_ai/dt_cice -coef_io = float(dt_cice)/float(dt_cpl_io) -coef_ia = float(dt_cice)/float(dt_cpl_ai) -coef_cpl = float(dt_cpl_io)/float(dt_cpl_ai) +coef_ai = float(dt_cice)/float(dt_cpl_ai) iniday = mod(inidate, 100) inimon = mod( (inidate - iniday)/100, 100) diff --git a/drivers/access/ice_constants.F90 b/drivers/access/ice_constants.F90 index 4ded4cc6..8699d60d 100644 --- a/drivers/access/ice_constants.F90 +++ b/drivers/access/ice_constants.F90 @@ -21,14 +21,20 @@ module ice_constants real (kind=dbl_kind), parameter, public :: & rhos = 330.0_dbl_kind ,&! density of snow (kg/m^3) rhoi = 917.0_dbl_kind ,&! density of ice (kg/m^3) +!#ifdef AusCOM +! rhow = 1035.0_dbl_kind ,&! density of seawater (kg/m^3) +! !mom uses this value---arguable for sea ice--- +!#else rhow = 1026.0_dbl_kind ,&! density of seawater (kg/m^3) +!#endif cp_air = 1005.0_dbl_kind ,&! specific heat of air (J/kg/K) ! (Briegleb JGR 97 11475-11485 July 1992) emissivity= 0.95_dbl_kind ,&! emissivity of snow and ice cp_ice = 2106._dbl_kind ,&! specific heat of fresh ice (J/kg/K) !ars599: 11042014: add AusCOM #ifdef AusCOM - cp_ocn = 3989._dbl_kind ,&! specific heat of ocn (J/kg/K) +! cp_ocn = 3989._dbl_kind ,&! specific heat of ocn (J/kg/K) + cp_ocn = 3992.10322329649_dbl_kind ! freshwater value needed for enthalpy #else cp_ocn = 4218._dbl_kind ,&! specific heat of ocn (J/kg/K) From 2b434c7f9b2646afa23652ca581ac59f950cab6f Mon Sep 17 00:00:00 2001 From: ars599 Date: Mon, 28 Aug 2017 06:19:50 +0000 Subject: [PATCH 06/52] ukmo:changeset 145,146,163 & 134 git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_GC3_GA7@367 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- mpi/ice_boundary.F90 | 2 +- mpi/ice_gather_scatter.F90 | 4 ++++ source/ice_dyn_evp.F90 | 12 ++++++++---- source/ice_itd.F90 | 2 +- source/ice_shortwave.F90 | 14 ++++++++++++++ 5 files changed, 28 insertions(+), 6 deletions(-) diff --git a/mpi/ice_boundary.F90 b/mpi/ice_boundary.F90 index 7fde84c7..a776197c 100644 --- a/mpi/ice_boundary.F90 +++ b/mpi/ice_boundary.F90 @@ -5415,7 +5415,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & !*** out of range and skipped !*** otherwise do the copy - if (jSrc <= nghost+1) then + if (jSrc <= nghost+1 .AND. jDst /= -1 ) then array1(iDst,jDst,dstBlock) = isign*bufTripoleR8(iSrc,jSrc) endif diff --git a/mpi/ice_gather_scatter.F90 b/mpi/ice_gather_scatter.F90 index baea56a3..cef68f35 100644 --- a/mpi/ice_gather_scatter.F90 +++ b/mpi/ice_gather_scatter.F90 @@ -2201,8 +2201,10 @@ subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) ! interior do j = 1, ny_block do i = 1, nx_block + if(this_block%j_glob(j) > 0) then msg_buffer(i,j) = ARRAY_G(this_block%i_glob(i)+nghost,& this_block%j_glob(j)+nghost) + endif end do end do @@ -2312,8 +2314,10 @@ subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) ! interior do j = 1, ny_block do i = 1, nx_block + if(this_block%j_glob(j) > 0) then ARRAY(i,j,dst_block) = ARRAY_G(this_block%i_glob(i)+nghost,& this_block%j_glob(j)+nghost) + endif end do end do diff --git a/source/ice_dyn_evp.F90 b/source/ice_dyn_evp.F90 index 916a074c..c7c4cdee 100755 --- a/source/ice_dyn_evp.F90 +++ b/source/ice_dyn_evp.F90 @@ -407,7 +407,7 @@ subroutine evp (dt) ! Force symmetry across the tripole seam if (trim(grid_type) == 'tripole') then - if (maskhalo_dyn) then + if (maskhalo_dyn) then !------------------------------------------------------- ! set halomask to zero because ice_HaloMask always keeps ! local copies AND tripole zipper communication @@ -443,7 +443,7 @@ subroutine evp (dt) field_loc_center, field_type_scalar) call ice_HaloDestroy(halo_info_mask) - else + else call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info, & field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stressp_3, stressp_1, halo_info, & @@ -470,7 +470,7 @@ subroutine evp (dt) field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & field_loc_center, field_type_scalar) - endif ! maskhalo + endif ! maskhalo endif ! tripole !----------------------------------------------------------------- @@ -498,8 +498,12 @@ subroutine evp (dt) call u2tgrid_vector(strocnxT) ! shift call u2tgrid_vector(strocnyT) - call ice_timer_stop(timer_dynamics) ! dynamics + call ice_HaloUpdate(strocnxT, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate(strocnyT, halo_info, & + field_loc_center, field_type_vector) + call ice_timer_stop(timer_dynamics) ! dynamics end subroutine evp !======================================================================= diff --git a/source/ice_itd.F90 b/source/ice_itd.F90 index f1b7205c..16d36f43 100755 --- a/source/ice_itd.F90 +++ b/source/ice_itd.F90 @@ -1846,7 +1846,7 @@ subroutine zap_small_areas (nx_block, ny_block, & trcrn ! ice tracers real (kind=dbl_kind), dimension (nx_block,ny_block), & - intent(out) :: & + intent(inout) :: & dfpond , & ! zapped pond water flux (kg/m^2/s) dfresh , & ! zapped fresh water flux (kg/m^2/s) dfsalt , & ! zapped salt flux (kg/m^2/s) diff --git a/source/ice_shortwave.F90 b/source/ice_shortwave.F90 index 52e51597..21e4208c 100755 --- a/source/ice_shortwave.F90 +++ b/source/ice_shortwave.F90 @@ -169,6 +169,7 @@ subroutine init_shortwave use ice_blocks, only: block, get_block use ice_grid, only: tmask, tlat, tlon use ice_meltpond_lvl, only: dhsn, ffracn + use ice_therm_shared, only: calc_Tsfc integer (kind=int_kind) :: & icells ! number of cells with aicen > puny @@ -224,6 +225,17 @@ subroutine init_shortwave enddo ! iblk !$OMP END PARALLEL DO + ! Alex West, March 2017: Because we do not model SW radiation penetrating + ! into ice in the coupled model yet, furthur SW calculations in the + ! initialisation after setting everything to 0 are unnecessary, and + ! may be introducing spurious values. Hence everything from here + ! onwards will be enclosed in a 'if calc_Tsfc' statement (which only + ! evaluates to .true. in the forced model). + ! + ! In the case that penetrating SW radiation is implemented in the coupled + ! model, this control structure may need to be removed. + + if (calc_Tsfc) then if (trim(shortwave) == 'dEdd') then ! delta Eddington #ifndef CCSMCOUPLED @@ -397,6 +409,8 @@ subroutine init_shortwave enddo ! nblocks !$OMP END PARALLEL DO + endif ! calc_Tsfc + end subroutine init_shortwave !======================================================================= From 2b9eba74cf1690efc9f5d8c29d8aa4ad5b517498 Mon Sep 17 00:00:00 2001 From: pbd562 Date: Thu, 31 Aug 2017 00:18:33 +0000 Subject: [PATCH 07/52] Bug fix in ice constants git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_GC3_GA7@368 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- drivers/access/ice_constants.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/access/ice_constants.F90 b/drivers/access/ice_constants.F90 index 8699d60d..05762746 100644 --- a/drivers/access/ice_constants.F90 +++ b/drivers/access/ice_constants.F90 @@ -34,7 +34,7 @@ module ice_constants !ars599: 11042014: add AusCOM #ifdef AusCOM ! cp_ocn = 3989._dbl_kind ,&! specific heat of ocn (J/kg/K) - cp_ocn = 3992.10322329649_dbl_kind + cp_ocn = 3992.10322329649_dbl_kind ,& ! freshwater value needed for enthalpy #else cp_ocn = 4218._dbl_kind ,&! specific heat of ocn (J/kg/K) From 823a987c7497fff92a76cbadbe4170986693a8c0 Mon Sep 17 00:00:00 2001 From: ars599 Date: Tue, 16 Jan 2018 03:20:41 +0000 Subject: [PATCH 08/52] add dhb599 iceberg scheme git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_GC3_GA7@372 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- drivers/access/CICE_InitMod.F90 | 39 +- drivers/access/CICE_RunMod.F90 | 43 +- drivers/access/cpl_arrays_setup.F90 | 16 +- drivers/access/cpl_forcing_handler.F90 | 355 ++- .../cpl_forcing_handler.F90-save-20170826 | 1919 +++++++++++++++++ drivers/access/cpl_interface.F90_uphalo | 49 +- drivers/access/cpl_parameters.F90 | 10 +- drivers/access/ice_constants.F90 | 13 +- source/ice_read_write.F90 | 4 +- 9 files changed, 2369 insertions(+), 79 deletions(-) create mode 100644 drivers/access/cpl_forcing_handler.F90-save-20170826 diff --git a/drivers/access/CICE_InitMod.F90 b/drivers/access/CICE_InitMod.F90 index a4aa850a..e4169b8b 100644 --- a/drivers/access/CICE_InitMod.F90 +++ b/drivers/access/CICE_InitMod.F90 @@ -270,28 +270,41 @@ subroutine cice_init !for continue runs, mice data MUST be available. call get_restart_mice(trim(restartdir)//'/mice.nc') else - write(6,*)'*** CICE WARNING: No initial mice.nc data available here! **' - write(6,*)'*** CICE WARNING: ALL mice variables will be set to ZERO! **' - write(6,*)'*** CICE WARNING: This is allowed for the init run ONLY ! **' + write(6,*)'* WARNING: No initial mice.nc data available here! *' + write(6,*)'* WARNING: ALL mice variables will be set to ZERO! *' + write(6,*)'* WARNING: This is allowed for the init run ONLY ! *' endif if (use_core_runoff) then call get_core_runoff(trim(inputdir)//'/core_runoff_regrid.nc',& 'runoff',1) endif - write(il_out,*)' calling ave_ocn_fields_4_i2a at time_sec = ',0 !time_sec - call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling + write(il_out,*)' calling ave_ocn_fields_4_i2a time_sec = ',0 !time_sec + call time_average_ocn_fields_4_i2a + !accumulate/average ocn fields needed for IA coupling - !get a2i fields and then set up initial SBC for ice - !call from_atm(0) - !call get_sbc_ice - !now "most of" the IC of this run are 'proper' for "call ice_write_hist" #endif -!ars599: 11042014: ice_write_hist is no longer there now change to accum_hist -! so wrapup this line n markout -!dhb599: 20111128: the following call is moved 'upstair' -! if (write_ic) call accum_hist(dt) ! write initial conditions +!20171024: read in mask for land ice discharge into ocean off Antarctica and Greenland. +#ifdef ACCESS + !!! options for land ice discharged as iceberg melting around AA and Gnld + ! 0: "even" distribution as for u-ar676; + ! 1: use AC2 data but GC3.1 iceberg climatological pattern, each month takes the + ! total discharge as that diagnosed in u-ar676 (yrs2-101); + ! 2: use GC3 iceberg climatological pattern, each month enhanced by ac2/gc3 annual ratio + ! of land ice discharge to make sure the annual total discharge is same as case 1; + ! 3: as case 1 but use annual mean + ! 4: as case 2 but use annual mean + !!! Note 3 and 4 are similar but NOT the same; 1-4 cases should have idential annual + !!! discharge of land ice (as iceberg) into ocean. + + if ( file_exist(trim(inputdir)//'/lice_discharge_masks_iceberg.nc') ) then + call get_lice_discharge_masks_or_iceberg(trim(inputdir)//'/lice_discharge_masks_iceberg.nc') + else + write(6,*)'* CICE stopped -- land ice discharge masks and iceberg datafile missing.*' + call abort_ice ('ice: land ice discharge masks and iceberg datafile missing!') + endif +#endif end subroutine cice_init diff --git a/drivers/access/CICE_RunMod.F90 b/drivers/access/CICE_RunMod.F90 index 6fd8fd2f..69323408 100644 --- a/drivers/access/CICE_RunMod.F90 +++ b/drivers/access/CICE_RunMod.F90 @@ -117,6 +117,7 @@ subroutine CICE_Run ' calling into_ocn at icpl_ai, itap, time_sec = ', icpl_ai, itap, time_sec !call ice_timer_start(timer_into_ocn) ! atm/ocn coupling !=========================== + !call check_iceberg_fields('chk_iceberg_i2o.nc') call into_ocn(stimestamp_io) !=========================== !call ice_timer_stop(timer_into_ocn) ! atm/ocn coupling @@ -125,22 +126,20 @@ subroutine CICE_Run call get_sbc_ice !Debug: 20170825 -- check sbc_ice variables from "get_sbc_ice" - call check_ice_sbc_fields('chk_ice_sbc.nc') + !call check_ice_sbc_fields('chk_ice_sbc.nc') !Debug: 20170927 -- check the restart fields at the beginning of day 3 - !if (idate == 10103 .and. write_tmp_dump ) then - if (icpl_ai == 17 .and. itap == 1) then - write(il_out,'(a,4i10)') & - ' calling dumpfile at icpl_ai, itap, time_sec, idate = ', icpl_ai, itap, time_sec, idate - call dumpfile - !write_tmp_dump = .false. - endif + !if (icpl_ai == 17 .and. itap == 1) then + ! write(il_out,'(a,4i10)') & + ! ' calling dumpfile at icpl_ai, itap, time_sec, idate = ', icpl_ai, itap, time_sec, idate + ! call dumpfile + !endif !*** ice "update" ***! call ice_step !Debug: 20170827 -- check updated ice varables after ice_step - call check_ice_fields('chk_ice_fields.nc') + !call check_ice_fields('chk_ice_fields.nc') !time-average ice variables required for setting up i2o and i2a cpl fields call time_average_fields_4_i2o !actually "instant" ice vairables @@ -193,23 +192,15 @@ subroutine CICE_Run end if end do !itap -! -!B: the following part is done in the above itap loop......logically "unnatural"! -! write(il_out,*)' calling get_i2a_fields at time_sec = ',time_sec -! !call ice_timer_start(timer_into_atm) -! !i2a fields ready to be sent for next IA cpl int in atm. -! call get_i2a_fields -! -! !stimestamp_ai = time_sec -! stimestamp_ai = time_sec - dt !??? test new order???! -! -! write(il_out,*)' calling into_atm at icpl_ai, itap, time_sec = ',icpl_ai, itap, time_sec -! call into_atm(stimestamp_ai) -! -! !set time averaged ice and ocn variables back to 0 -! write(il_out,*)' calling init_mocn_fields_4_i2a at time_sec = ',time_sec -! call initialize_mocn_fields_4_i2a -! !call ice_timer_stop(timer_into_atm) ! atm/ocn coupling + + !reset land ice amount lice_nth and lice_sth for "previous" a2i step: + + !debug: check landice fields---- + !call check_landice_fields_1('chk_lice_fields_ai.nc') + + lice_nth = um_icenth + lice_sth = um_icesth + newstep_ai = .true. END DO !icpl_ai diff --git a/drivers/access/cpl_arrays_setup.F90 b/drivers/access/cpl_arrays_setup.F90 index 1597d57f..1294ecb3 100644 --- a/drivers/access/cpl_arrays_setup.F90 +++ b/drivers/access/cpl_arrays_setup.F90 @@ -92,10 +92,16 @@ module cpl_arrays_setup !(16) co2 io_co2 !(17) wind speed io_wnd ! +! 2 more added for "iceberg melt" (induced from land ice change): +! +!(18) iceberg melt waterflux io_licefw +!(19) iceberg melt heatflux io_liceht +! ! Therefore, currently we have ! ! *for ACCESS1.x, 31 in, 33 out => thus jpfldout=33, jpfldin=31 in cpl_parameters. ! for ACCESS-CM2, 47 in, 63 out => thus jpfldout=63, jpfldin=47 in cpl_parameters. +! now (20171024) 47 in, 65 out 65 47 !---------------------------------------------------------------------------------- ! This module will be largely modified/'simplifed' after ACCESS works ! !============================================================================ @@ -111,7 +117,9 @@ module cpl_arrays_setup um_thflx, um_pswflx, um_runoff, um_wme, um_snow, um_rain, & um_evap, um_lhflx, um_taux, um_tauy, & um_swflx, um_lwflx, um_shflx, um_press,um_co2, um_wnd, & - um_icenth, um_icesth + um_icenth, um_icesth, & + !!20171024 added for calculation of land ice increment + lice_nth, lice_sth, msk_nth, msk_sth, amsk_nth, amsk_sth real(kind=dbl_kind), dimension(:,:,:,:), allocatable :: & um_tmlt, um_bmlt, um_tsfice, um_iceevp @@ -140,7 +148,8 @@ module cpl_arrays_setup real(kind=dbl_kind),dimension(:,:,:), allocatable :: & !to ocn (time averaged) io_strsu, io_strsv, io_rain, io_snow, io_stflx, io_htflx, io_swflx, & io_qflux, io_shflx, io_lwflx, io_runof, io_press, io_aice, & - io_melt, io_form, io_co2, io_wnd + io_melt, io_form, io_co2, io_wnd, & + io_licefw, io_liceht !2 more added 20171024 ! Temporary arrays !================== @@ -171,6 +180,9 @@ module cpl_arrays_setup real(kind=dbl_kind),dimension(:,:,:), allocatable :: & sicemass !ice mass +real(kind=dbl_kind),dimension(:,:,:,:), allocatable :: & + icebergfw !land ice discharge into ocean as monthly iceberg melt waterflux ==>io_licefw + !=========================================================================== end module cpl_arrays_setup diff --git a/drivers/access/cpl_forcing_handler.F90 b/drivers/access/cpl_forcing_handler.F90 index 3e66a8de..8569e583 100644 --- a/drivers/access/cpl_forcing_handler.F90 +++ b/drivers/access/cpl_forcing_handler.F90 @@ -24,6 +24,8 @@ MODULE cpl_forcing_handler use cpl_netcdf_setup use cpl_arrays_setup +use ice_calendar, only: month + implicit none real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & @@ -449,6 +451,9 @@ subroutine get_restart_mice(fname) call ice_read_nc(ncid_o2i, 1, 'mfhocn', mfhocn, dbug) call ice_read_nc(ncid_o2i, 1, 'mfswthru', mfswthru, dbug) call ice_read_nc(ncid_o2i, 1, 'msicemass', msicemass, dbug) +!20171024: added 2 more: + call ice_read_nc(ncid_o2i, 1, 'lice_sth', lice_sth, dbug) + call ice_read_nc(ncid_o2i, 1, 'lice_nth', lice_nth, dbug) if (my_task == master_task) then call ice_close_nc(ncid_o2i) @@ -465,6 +470,134 @@ subroutine get_restart_mice(fname) return end subroutine get_restart_mice +!================================================= +subroutine get_lice_discharge_masks_or_iceberg(fname) + +! Called at beginning of each run trunk to read in land ice discharge mask or iceberg +! (off Antarctica and Greenland). + +implicit none + +character(len=*), intent(in) :: fname +character*80 :: myvar = 'ficeberg' +integer(kind=int_kind) :: ncid_i2o, im, k +logical :: dbug +!!! +!character(:), allocatable :: fname_trim +!!! + +dbug = .true. + +!!! +!fname_trim = trim(fname) +!!! +if (my_task == 0) write(*,'(a,a)'),'BBB1: opening file ',fname +if (my_task == 0) write(*,'(a,a)'),'BBB2: opening file ',trim(fname) + +call ice_open_nc(trim(fname), ncid_i2o) +!call ice_open_nc(fname_trim, ncid_i2o) +!deallocate(fname_trim) + +if (iceberg == 0) then + if (my_task==0) then + write(il_out,*) '(get_lice_discharge_masks_or_iceberg) reading in lice_mask and total areas......' + endif + call ice_read_nc(ncid_i2o, 1, 'msk_nth', msk_nth, dbug) + call ice_read_nc(ncid_i2o, 1, 'msk_sth', msk_sth, dbug) + call ice_read_nc(ncid_i2o, 1, 'amsk_nth', amsk_nth, dbug) + call ice_read_nc(ncid_i2o, 1, 'amsk_sth', amsk_sth, dbug) +else + select case (iceberg) + case (1); myvar = 'FICEBERG_AC2' + case (2); myvar = 'FICEBERG_GC3' + case (3); myvar = 'FICEBERG_AC2_AVE' + case (4); myvar = 'FICEBERG_GC3_AVE' + end select + if (my_task==0) then + write(il_out,'(a,a)') '(get_lice_discharge_masks_or_iceberg) reading in iceberg data, myvar= ',trim(myvar) + endif + do im = 1, 12 + write(il_out,*) '(get_lice_discharge_masks_or_iceberg) reading in data, month= ',im + !call ice_read_nc(ncid_i2o, im, trim(myvar), icebergfw(:,:,im,:), dbug) + call ice_read_nc(ncid_i2o, im, trim(myvar), vwork, dbug) + icebergfw(:,:,im,:) = vwork(:,:,:) + enddo + +!call check_iceberg_reading('chk_iceberg_readin.nc') +!above call results segmentation fault !?! + +endif +if (my_task == master_task) then + call ice_close_nc(ncid_i2o) + write(il_out,*) '(get_lice_discharge_masks_or_iceberg) reading completed!' +endif + +return +end subroutine get_lice_discharge_masks_or_iceberg + +!================================================= +subroutine get_iceberg_distribution(fname) !, mychoice) + +! This routine is called at beginning of each job. + +implicit none + +character*(*), intent(in) :: fname +!integer(kind=int_kind), intent(in) :: mychoice !iceberg distribution option (1,2,3,4) +logical :: dbug +integer(kind=int_kind) :: ncid, im + +dbug = .true. +!dbug = .false. + +IF (file_exist(fname)) THEN + +if (my_task==0) then + write(*,*) '(get_iceberg_distribution) opening ncfile: ',fname + write(il_out,*) '(get_iceberg_distribution) opening ncfile: ',fname +endif + +call ice_open_nc(trim(fname), ncid) +if (my_task==0) then + write(*,*) '(get_iceberg_distribution) reading in iceberg data, option: ',iceberg !mychoice + write(il_out,'(a,a)') '(get_iceberg_distribution) reading in iceberg data, option: ',iceberg !mychoice +endif +!!if (mychoice == 1) then +if (iceberg == 1) then + do im = 1, 12 + call ice_read_nc(ncid, im, 'FICEBERG_AC2', icebergfw(:,:,im,:), dbug) + enddo +!!else if (mychoice == 2) then +else if (iceberg == 2) then + do im = 1, 12 + call ice_read_nc(ncid, im, 'FICEBERG_GC3', icebergfw(:,:,im,:), dbug) + enddo +!!else if (mychoice == 3) then +else if (iceberg == 3) then + do im = 1, 12 + !set monthly to be annual mean: + call ice_read_nc(ncid, 1, 'FICEBERG_AC2_AVE', icebergfw(:,:,im,:), dbug) + enddo +else if (iceberg == 4) then + do im = 1, 12 + !set monthly to be annual mean: + call ice_read_nc(ncid, 1, 'FICEBERG_GC3_AVE', icebergfw(:,:,im,:), dbug) + enddo +endif + +if (my_task == master_task) call ice_close_nc(ncid) + +ELSE + +write(6,'(a,a)')'CICE stopped -- iceberg data missing ----> ', fname +write(il_out,'(a,a)')'CICE stopped -- iceberg data missing ----> ', fname +call abort_ice ('ice: iceberg data missing!') + +ENDIF + +return +end subroutine get_iceberg_distribution + !================================================= subroutine get_restart_i2o(fname) @@ -503,6 +636,9 @@ subroutine get_restart_i2o(fname) case ('form_io'); io_form = vwork case ('co2_i1'); io_co2 = vwork case ('wnd_i1'); io_wnd = vwork +!2 more added 20171024: + case ('lice_fw'); io_licefw = vwork + case ('lice_ht'); io_liceht = vwork end select enddo if (my_task == master_task) then @@ -1001,6 +1137,13 @@ subroutine save_restart_mice(fname, nstep) vwork = msicemass call gather_global(gwork, vwork, master_task, distrb_info) if (my_task == 0) call write_nc2D(ncid, 'msicemass', gwork, 2, il_im, il_jm, 1, ilout=il_out) +!2 more added 20171024 for calculation of N/S land ice increase between a2i cpl interval: +vwork = um_icenth +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'lice_nth', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = um_icesth +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'lice_sth', gwork, 2, il_im, il_jm, 1, ilout=il_out) if (my_task == 0) call ncheck( nf_close(ncid) ) @@ -1010,8 +1153,6 @@ end subroutine save_restart_mice !================================================= subroutine get_i2a_fields -implicit none - ! all fields (except for vector) obtained here are all on T cell center !(1) ocean surface temperature @@ -1074,8 +1215,8 @@ subroutine get_i2o_fields ! e.g., io_htflx(:,:,:) = fhocn_gbm(:,:,:) implicit none - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: pice +integer (kind=int_kind) :: i,j,k ! Fields obtained here are all at T cell center. before being sent to MOM4, vector ! (Taux, Tauy) should be shifted on to U point as required @@ -1153,9 +1294,27 @@ subroutine get_i2o_fields !(15) ice form fwflux io_form = min(0.0,mfresh(:,:,:)) +!(16) CO2 io_co2 = um_co2 +!(17) 10m winnspeed io_wnd = um_wnd +!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +!2 more flux items induced by land ice "increment" discharged into ocean +!(18) water flux due to land ice melt off Antarctica and Greenland (kg/m^2/s) +!(19) heat flux due to land ice melt off Antarctica and Greenland +! +!Note there are 5 options for the land ice discharge as icebgerg melt waterflux! + +IF (iceberg == 0) THEN + io_licefw = (max(0.0, um_icenth - lice_nth) * msk_nth/amsk_nth + & + max(0.0, um_icesth - lice_sth) * msk_sth/amsk_sth)/dt_cpl_ai +ELSE !case 1, 2, 3 ,4: + io_licefw(:,:,:) = icebergfw(:,:,month,:) +ENDIF + +io_liceht = - io_licefw * Lfresh !?! (W/m^2) + return end subroutine get_i2o_fields @@ -1855,6 +2014,196 @@ subroutine check_sstsss(ncfilenm) return end subroutine check_sstsss +!================================================= +subroutine check_iceberg_fields(ncfilenm) + +!this is used to check land ice fields + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, io_licefw, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'io_licefw', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, io_liceht, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'io_liceht', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return + +end subroutine check_iceberg_fields +!================================================= +subroutine check_iceberg_reading(ncfilenm) + +!this is used to check land ice fields read in + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +vwork(:,:,:) = icebergfw(:,:,1,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm01', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,2,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm02', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,3,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm03', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,4,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm04', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,5,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm05', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,6,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm06', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,7,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm07', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,8,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm08', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,9,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm09', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,10,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm10', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,11,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm11', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +vwork(:,:,:) = icebergfw(:,:,12,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'icebergfm12', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return + +end subroutine check_iceberg_reading + +!================================================= +subroutine check_landice_fields_1(ncfilenm) + +!this is used to check land ice fields + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, lice_sth, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'lice_sth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, lice_nth, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'lice_nth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, um_icesth, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'um_icesth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, um_icenth, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'um_icenth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return + +end subroutine check_landice_fields_1 + +!================================================= +subroutine check_landice_fields_2(ncfilenm) + +!this is used to check land ice fields + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, lice_sth, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'lice_sth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, lice_nth, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'lice_nth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, um_icesth, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'um_icesth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, um_icenth, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'um_icenth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return + +end subroutine check_landice_fields_2 + !================================================= function file_exist (file_name) ! diff --git a/drivers/access/cpl_forcing_handler.F90-save-20170826 b/drivers/access/cpl_forcing_handler.F90-save-20170826 new file mode 100644 index 00000000..fbc2a9d5 --- /dev/null +++ b/drivers/access/cpl_forcing_handler.F90-save-20170826 @@ -0,0 +1,1919 @@ +MODULE cpl_forcing_handler +! +! It contains subroutines handling coupling fields. They are +! +! nullify_i2o_fluxes: +! tavg_i2o_fluxes: +! ............... +! ............... +! +use ice_blocks +use ice_forcing +use ice_read_write +use ice_domain_size +use ice_domain, only : distrb_info, nblocks +use ice_flux !forcing data definition (Tair, Qa, uocn, etc.) + !Tn_top, keffn_top ...(for multilayer configuration) +!use ice_state, only : aice, aicen, trcr, trcrn, nt_hpnd !ice concentration and tracers +use ice_state, only : aice, aicen, trcr, trcrn, nt_hpnd, nt_Tsfc !ice concentration and tracers +use ice_state, only: uvel, vvel, vsnon, vicen +use ice_gather_scatter +!ars599: 11042014: use all ice_constants +!use ice_constants, only : gravit, Lvap, Lsub +use ice_constants +use ice_grid, only : tmask, to_ugrid +use ice_communicate, only : my_task, master_task +!use ice_ocean, only : cprho +use ice_exit, only : abort_ice +use ice_shortwave, only : apeffn + +use cpl_parameters +use cpl_netcdf_setup +use cpl_arrays_setup + +implicit none + +real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + aiiu ! ice fraction on u-grid + +contains + +!=============================================================================== +subroutine get_core_runoff(fname, vname, nrec) +! read in the remapped core runoff data (S.Marsland) which will be used to replace +! the ncep2 runoff sent from matm via coupler + +implicit none + +character*(*), intent(in) :: fname, vname +integer(kind=int_kind), intent(in) :: nrec +logical :: dbug +integer(kind=int_kind) :: ncid + +dbug = .true. + +if ( file_exist(fname) ) then + call ice_open_nc(fname, ncid) + call ice_read_global_nc(ncid, nrec, vname, gwork, dbug) + call scatter_global(core_runoff, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) + if (my_task == master_task) call ice_close_nc(ncid) +else + if (my_task == 0) write(il_out,*) '(get_core_runoff) file doesnt exist: ', fname + stop 'CICE stopped: core runoff (remapped) file not found.' +endif + +return +end subroutine get_core_runoff + +!=============================================================================== +subroutine get_time0_sstsss(fname, nmonth) + +! This routine is to be used only once at the beginning at an exp. + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nmonth +logical :: dbug +integer(kind=int_kind) :: ncid + +dbug = .true. +!dbug = .false. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) opening ncfile: ',fname + endif + call ice_open_nc(fname, ncid) + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) reading in initial SST...' + endif + call ice_read_nc(ncid, nmonth, 'TEMP', sst, dbug) + call gather_global(gwork, sst, master_task, distrb_info) + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) reading in initial SSS...' + endif + call ice_read_nc(ncid, nmonth, 'SALT', sss, dbug) + call gather_global(gwork, sss, master_task, distrb_info) + if (my_task == master_task) call ice_close_nc(ncid) +else + if (my_task==0) then + write(il_out,*) '(get_time0_sstsss) file doesnt exist: ', fname + endif + call abort_ice('CICE stopped--initial SST and SSS ncfile not found.') +endif + +return +end subroutine get_time0_sstsss + +!=============================================================================== +! temporary use ... +subroutine read_access_a2i_data(fname,nrec,istep) + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nrec,istep +logical :: dbug +integer(kind=int_kind) :: ncid + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(read_access_a2i_data) opening ncfile: ',fname + endif + call ice_open_nc(fname, ncid) + if (my_task==0) then + write(il_out,*) '(read_access_a2i_data) reading a2i forcing data...' + endif + call ice_read_nc(ncid, nrec, 'thflx_i', um_thflx, dbug) + call ice_read_nc(ncid, nrec, 'pswflx_i', um_pswflx, dbug) + call ice_read_nc(ncid, nrec, 'runoff_i', um_runoff, dbug) + call ice_read_nc(ncid, nrec, 'wme_i', um_wme, dbug) + call ice_read_nc(ncid, nrec, 'rain_i', um_rain, dbug) + call ice_read_nc(ncid, nrec, 'snow_i', um_snow, dbug) + call ice_read_nc(ncid, nrec, 'evap_i', um_evap, dbug) + call ice_read_nc(ncid, nrec, 'lhflx_i', um_lhflx, dbug) + call ice_read_nc(ncid, nrec, 'tmlt01_i', um_tmlt(:,:,1,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt02_i', um_tmlt(:,:,2,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt03_i', um_tmlt(:,:,3,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt04_i', um_tmlt(:,:,4,:), dbug) + call ice_read_nc(ncid, nrec, 'tmlt05_i', um_tmlt(:,:,5,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt01_i', um_bmlt(:,:,1,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt02_i', um_bmlt(:,:,2,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt03_i', um_bmlt(:,:,3,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt04_i', um_bmlt(:,:,4,:), dbug) + call ice_read_nc(ncid, nrec, 'bmlt05_i', um_bmlt(:,:,5,:), dbug) + call ice_read_nc(ncid, nrec, 'taux_i', um_taux, dbug) + call ice_read_nc(ncid, nrec, 'tauy_i', um_tauy, dbug) + call ice_read_nc(ncid, nrec, 'swflx_i', um_swflx, dbug) + call ice_read_nc(ncid, nrec, 'lwflx_i', um_lwflx, dbug) + call ice_read_nc(ncid, nrec, 'shflx_i', um_shflx, dbug) + call ice_read_nc(ncid, nrec, 'press_i', um_press, dbug) + call ice_read_nc(ncid, nrec, 'co2_ai', um_co2, dbug) + call ice_read_nc(ncid, nrec, 'wnd_ai', um_wnd, dbug) +! + call ice_read_nc(ncid, nrec, 'icenth_i', um_icenth, dbug) + call ice_read_nc(ncid, nrec, 'icesth_i', um_icesth, dbug) + call ice_read_nc(ncid, nrec, 'tsfice01', um_tsfice(:,:,1,:), dbug) + call ice_read_nc(ncid, nrec, 'tsfice02', um_tsfice(:,:,2,:), dbug) + call ice_read_nc(ncid, nrec, 'tsfice03', um_tsfice(:,:,3,:), dbug) + call ice_read_nc(ncid, nrec, 'tsfice04', um_tsfice(:,:,4,:), dbug) + call ice_read_nc(ncid, nrec, 'tsfice05', um_tsfice(:,:,5,:), dbug) + call ice_read_nc(ncid, nrec, 'iceevp01', um_iceevp(:,:,1,:), dbug) + call ice_read_nc(ncid, nrec, 'iceevp02', um_iceevp(:,:,2,:), dbug) + call ice_read_nc(ncid, nrec, 'iceevp03', um_iceevp(:,:,3,:), dbug) + call ice_read_nc(ncid, nrec, 'iceevp04', um_iceevp(:,:,4,:), dbug) + call ice_read_nc(ncid, nrec, 'iceevp05', um_iceevp(:,:,5,:), dbug) + + if (my_task == master_task) call ice_close_nc(ncid) +else + if (my_task==0) then + write(il_out,*) '(ed_access_a2i_data file doesnt exist: ', fname + endif + call abort_ice('CICE stopped--ACCESS fields_a2i ncfile not found.') +endif + +call check_a2i_fields(istep) + +end subroutine read_access_a2i_data + +!============================================================================= +subroutine atm_icefluxes_back2GBM +!convert the a2i fluxes into GBM units for those that are scaled up in UM +!by "/maicen" before being sent to cice [needed for GSI8 TTI approach]. + +implicit none +!integer :: cat,i,j,k +!do j = 1, ny_block +!do i = 1, nx_block +! do k = 1, nblocks +! do cat = 1, ncat +! um_tmlt(i,j,cat,k) = um_tmlt(i,j,cat,k) * maicen_ia(i,j,cat,k) +! um_bmlt(i,j,cat,k) = um_bmlt(i,j,cat,k) * maicen_ia(i,j,cat,k) +! um_iceevp(i,j,cat,k) = um_iceevp(i,j,cat,k) * maicen_ia(i,j,cat,k) +! enddo +! enddo +!enddo +!enddo + +!um_tmlt(:,:,:,:) = um_tmlt(:,:,:,:) * maicen(:,:,:,:) +!um_bmlt(:,:,:,:) = um_bmlt(:,:,:,:) * maicen(:,:,:,:) +!um_iceevp(:,:,:,:) = um_iceevp(:,:,:,:) * maicen(:,:,:,:) + +um_tmlt(:,:,:,:) = um_tmlt(:,:,:,:) * maicen_saved(:,:,:,:) +um_bmlt(:,:,:,:) = um_bmlt(:,:,:,:) * maicen_saved(:,:,:,:) +um_iceevp(:,:,:,:) = um_iceevp(:,:,:,:) * maicen_saved(:,:,:,:) + +end subroutine atm_icefluxes_back2GBM + +!============================================================================= +subroutine read_restart_i2a(fname, sec) !'i2a.nc', 0) + +! read ice to atm coupling fields from restart file, and send to atm module + +implicit none +character*(*), intent(in) :: fname +integer :: sec + +integer(kind=int_kind) :: ncid +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(read_restart_i2a) reading in i2a fields......' + endif + call ice_open_nc(fname, ncid) + call ice_read_nc(ncid, 1, 'icecon01', ia_aicen(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'icecon02', ia_aicen(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'icecon03', ia_aicen(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'icecon04', ia_aicen(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'icecon05', ia_aicen(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk01', ia_snown(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk02', ia_snown(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk03', ia_snown(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk04', ia_snown(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'snwthk05', ia_snown(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'icethk01', ia_thikn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'icethk02', ia_thikn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'icethk03', ia_thikn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'icethk04', ia_thikn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'icethk05', ia_thikn(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'isst_ia', ia_sst, dbug) + call ice_read_nc(ncid, 1, 'uvel_ia', ia_uvel, dbug) + call ice_read_nc(ncid, 1, 'vvel_ia', ia_vvel, dbug) + call ice_read_nc(ncid, 1, 'co2_i2', ia_co2, dbug) + call ice_read_nc(ncid, 1, 'co2fx_i2', ia_co2fx, dbug) + call ice_read_nc(ncid, 1, 'sstfz_ia', ia_sstfz, dbug) + call ice_read_nc(ncid, 1, 'foifr01', ia_foifr(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'foifr02', ia_foifr(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'foifr03', ia_foifr(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'foifr04', ia_foifr(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'foifr05', ia_foifr(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'itopt01', ia_itopt(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'itopt02', ia_itopt(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'itopt03', ia_itopt(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'itopt04', ia_itopt(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'itopt05', ia_itopt(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'itopk01', ia_itopk(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'itopk02', ia_itopk(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'itopk03', ia_itopk(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'itopk04', ia_itopk(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'itopk05', ia_itopk(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'pndfn01', ia_pndfn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'pndfn02', ia_pndfn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'pndfn03', ia_pndfn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'pndfn04', ia_pndfn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'pndfn05', ia_pndfn(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'pndtn01', ia_pndtn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'pndtn02', ia_pndtn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'pndtn03', ia_pndtn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'pndtn04', ia_pndtn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'pndtn05', ia_pndtn(:,:,5,:), dbug) + + if (my_task == master_task) then + call ice_close_nc(ncid) + write(il_out,*) '(read_restart_i2a) has read in 18 i2a fields.' + endif + +else + if (my_task==0) then + write(il_out,*) 'ERROR: (read_restart_i2a) not found file *** ',fname + endif + print *, 'CICE: (read_restart_i2a) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 i2a data file.') +endif +end subroutine read_restart_i2a + + +!============================================================================= +subroutine read_restart_i2asum(fname, sec) !'i2a.nc', 0) + +! read ice to atm coupling fields from restart file, and send to atm module + +implicit none +character*(*), intent(in) :: fname +integer :: sec + +integer(kind=int_kind) :: ncid +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(read_restart_i2asum) reading in i2a fields......' + endif + call ice_open_nc(fname, ncid) + call ice_read_nc(ncid, 1, 'maicen1', maicen(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'maicen2', maicen(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'maicen3', maicen(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'maicen4', maicen(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'maicen5', maicen(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'msnown1', msnown(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'msnown2', msnown(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'msnown3', msnown(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'msnown4', msnown(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'msnown5', msnown(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn1', mthikn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn2', mthikn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn3', mthikn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn4', mthikn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mthikn5', mthikn(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'msst', msst, dbug) + call ice_read_nc(ncid, 1, 'mssu', mssu, dbug) + call ice_read_nc(ncid, 1, 'mssv', mssv, dbug) + call ice_read_nc(ncid, 1, 'muvel', muvel, dbug) + call ice_read_nc(ncid, 1, 'mvvel', mvvel, dbug) + call ice_read_nc(ncid, 1, 'maiu', maiu, dbug) + ! + !call ice_read_nc(ncid, 1, 'maice_ia', maice_ia, dbug) + call ice_read_nc(ncid, 1, 'mfoifr01', mfoifr(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mfoifr02', mfoifr(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mfoifr03', mfoifr(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mfoifr04', mfoifr(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mfoifr05', mfoifr(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mitopt01', mitopt(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mitopt02', mitopt(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mitopt03', mitopt(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mitopt04', mitopt(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mitopt05', mitopt(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mitopk01', mitopk(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mitopk02', mitopk(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mitopk03', mitopk(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mitopk04', mitopk(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mitopk05', mitopk(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mpndfn01', mpndfn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mpndfn02', mpndfn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mpndfn03', mpndfn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mpndfn04', mpndfn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mpndfn05', mpndfn(:,:,5,:), dbug) + call ice_read_nc(ncid, 1, 'mpndtn01', mpndtn(:,:,1,:), dbug) + call ice_read_nc(ncid, 1, 'mpndtn02', mpndtn(:,:,2,:), dbug) + call ice_read_nc(ncid, 1, 'mpndtn03', mpndtn(:,:,3,:), dbug) + call ice_read_nc(ncid, 1, 'mpndtn04', mpndtn(:,:,4,:), dbug) + call ice_read_nc(ncid, 1, 'mpndtn05', mpndtn(:,:,5,:), dbug) + + if (my_task == master_task) then + call ice_close_nc(ncid) + write(il_out,*) '(read_restart_i2asum) has read in 21 i2a fields.' + endif + +else + if (my_task==0) then + write(il_out,*) 'ERROR: (read_restart_i2asum) not found file *** ',fname + endif + print *, 'CICE: (read_restart_i2asum) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 i2a data file.') +endif +end subroutine read_restart_i2asum + +!============================================================================== +subroutine put_restart_i2a(fname, sec) +! call this subroutine after called get_restart_oi2 +! it uses ocn_sst etc to calculate average ocn fields which will be used to send +!to atm at the 1st step of continue run, because the ocn_sst cannot be sent to ice at the end of last run. +! average ice fields (done at end of last run) are ready by calling read_restart_i2asum() +! +implicit none + +character*(*), intent(in) :: fname +integer :: sec + + if ( file_exist('i2a.nc') ) then + write(il_out,*)' calling read_restart_i2a at time_sec = ',sec + call read_restart_i2a('i2a.nc', sec) + endif + if ( file_exist('i2asum.nc') ) then + write(il_out,*)' calling read_restart_i2asum at time_sec = ',sec + call read_restart_i2asum('i2asum.nc', sec) + + write(il_out,*)' calling ave_ocn_fields_4_i2a at time_sec = ',sec + call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling + write(il_out,*) ' calling get_i2a_fields at time_sec =', sec + call get_i2a_fields + endif + +end subroutine put_restart_i2a + +!=============================================================================== +subroutine get_restart_o2i(fname) + +! To be called at beginning of each run trunk to read in restart o2i fields + +implicit none + +character*(*), intent(in) :: fname + +integer(kind=int_kind) :: ncid_o2i +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_restart_o2i) reading in o2i fields......' + endif + call ice_open_nc(fname, ncid_o2i) + call ice_read_nc(ncid_o2i, 1, 'sst_i', ocn_sst, dbug) + call ice_read_nc(ncid_o2i, 1, 'sss_i', ocn_sss, dbug) + call ice_read_nc(ncid_o2i, 1, 'ssu_i', ocn_ssu, dbug) + call ice_read_nc(ncid_o2i, 1, 'ssv_i', ocn_ssv, dbug) + call ice_read_nc(ncid_o2i, 1, 'sslx_i', ocn_sslx, dbug) + call ice_read_nc(ncid_o2i, 1, 'ssly_i', ocn_ssly, dbug) + call ice_read_nc(ncid_o2i, 1, 'pfmice_i', ocn_pfmice, dbug) + call ice_read_nc(ncid_o2i, 1, 'co2_oi', ocn_co2, dbug) + call ice_read_nc(ncid_o2i, 1, 'co2fx_oi', ocn_co2fx, dbug) + if (my_task == master_task) then + call ice_close_nc(ncid_o2i) + write(il_out,*) '(get_restart_o2i) has read in 7 o2i fields.' + endif +else + if (my_task==0) then + write(il_out,*) 'ERROR: (get_restart_o2i) not found file *** ',fname + endif + print *, 'CICE: (get_restart_o2i) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 o2i data file.') +endif + +return +end subroutine get_restart_o2i + +!=============================================================================== +subroutine get_restart_mice(fname) + +! Called at beginning of the run to get 'last' IO cpl int T-M ice variables +! which are used together with the first received i2a fields to obtain the first +! i2o fields sent to ocn immediately as the 1st io cpl int forcing there. + +implicit none + +character*(*), intent(in) :: fname + +integer(kind=int_kind) :: ncid_o2i +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_restart_mice) reading in mice variables......' + endif + call ice_open_nc(fname, ncid_o2i) +!B: 20170825 ==> need maicen_saved variables + call ice_read_nc(ncid_o2i, 1, 'maicen1', maicen_saved(:,:,1,:), dbug) + call ice_read_nc(ncid_o2i, 1, 'maicen2', maicen_saved(:,:,2,:), dbug) + call ice_read_nc(ncid_o2i, 1, 'maicen3', maicen_saved(:,:,3,:), dbug) + call ice_read_nc(ncid_o2i, 1, 'maicen4', maicen_saved(:,:,4,:), dbug) + call ice_read_nc(ncid_o2i, 1, 'maicen5', maicen_saved(:,:,5,:), dbug) +!b. + call ice_read_nc(ncid_o2i, 1, 'maice', maice, dbug) + call ice_read_nc(ncid_o2i, 1, 'mstrocnxT', mstrocnxT, dbug) + call ice_read_nc(ncid_o2i, 1, 'mstrocnyT', mstrocnyT, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfresh', mfresh, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfsalt', mfsalt, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfhocn', mfhocn, dbug) + call ice_read_nc(ncid_o2i, 1, 'mfswthru', mfswthru, dbug) + call ice_read_nc(ncid_o2i, 1, 'msicemass', msicemass, dbug) + + if (my_task == master_task) then + call ice_close_nc(ncid_o2i) + write(il_out,*) '(get_restart_mice) has read in 8 T-M variables.' + endif +else + if (my_task==0) then + write(il_out,*) 'ERROR: (get_restart_mice) not found file *** ',fname + endif + print *, 'CICE: (get_restart_mice) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 mice data file.') +endif + +return +end subroutine get_restart_mice + +!=============================================================================== +subroutine get_restart_i2o(fname) + +! To be called at beginning of each run trunk to read in restart i2o fields + +implicit none + +character*(*), intent(in) :: fname + +integer(kind=int_kind) :: ncid_i2o, jf, jfs +logical :: dbug + +dbug = .true. +if ( file_exist(fname) ) then + if (my_task==0) then + write(il_out,*) '(get_time0_i2o_fields) reading in i2o fields......' + endif + call ice_open_nc(fname, ncid_i2o) + do jf = nsend_i2a + 1, jpfldout + call ice_read_nc(ncid_i2o, 1, cl_writ(jf) , vwork, dbug) + select case(trim(cl_writ(jf))) + case ('strsu_io'); io_strsu = vwork + case ('strsv_io'); io_strsv = vwork + case ('rain_io'); io_rain = vwork + case ('snow_io'); io_snow = vwork + case ('stflx_io'); io_stflx = vwork + case ('htflx_io'); io_htflx = vwork + case ('swflx_io'); io_swflx = vwork + case ('qflux_io'); io_qflux = vwork + case ('shflx_io'); io_shflx = vwork + case ('lwflx_io'); io_lwflx = vwork + case ('runof_io'); io_runof = vwork + case ('press_io'); io_press = vwork + case ('aice_io'); io_aice = vwork + case ('melt_io'); io_melt = vwork + case ('form_io'); io_form = vwork + case ('co2_i1'); io_co2 = vwork + case ('wnd_i1'); io_wnd = vwork + end select + enddo + if (my_task == master_task) then + call ice_close_nc(ncid_i2o) + write(il_out,*) '(get_time0_i2o_fields) has read in 11 i2o fields.' + endif +else + if (my_task==0) then + write(il_out,*) 'ERROR: (get_time0_i2o_fields) not found file *** ',fname + endif + print *, 'CICE: (get_time0_i2o_fields_old) not found file *** ',fname + call abort_ice('CICE stopped -- Need time0 i2o data file.') +endif + +return +end subroutine get_restart_i2o + +!=============================================================================== +subroutine set_sbc_ice !!NOTE: This routine is NOT used!! +! +! Set coupling fields (in units of GMB, from UM and MOM4) needed for CICE +! +! Adapted from "subroutine cice_sbc_in" of HadGem3 Nemo "MODULE sbcice_cice" +! for the "nsbc = 5" case. +! +! It should be called after calling "from_atm" and "from_ocn". +!------------------------------------------------------------------------------- + +implicit none + +real :: r1_S0 +real, dimension(nx_block,ny_block,nblocks) :: zzs + +integer :: i,j,k,cat + +!*** Fields from UM (all on T cell center): + +!(1) windstress taux: +strax = um_taux * maice !*tmask + +!(2) windstress tauy: +stray = um_tauy * maice !*tmask + +!(3) surface downward latent heat flux (==> multi-category) +do j = 1, ny_block +do i = 1, nx_block + do k = 1, nblocks + if (maice(i,j,k)==0.0) then + do cat = 1, ncat + flatn_f(i,j,cat,k) = 0.0 + enddo + ! This will then be conserved in CICE (done in sfcflux_to_ocn) + flatn_f(i,j,1,k) = um_lhflx(i,j,k) + else + do cat = 1, ncat + !!!B: flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * maicen(i,j,cat,k)/maice(i,j,k) + !???: flatn_f(i,j,cat,k) = um_iceevp(i,j,cat,k) * Lsub + flatn_f(i,j,cat,k) = - um_iceevp(i,j,cat,k) * Lsub + enddo + endif + enddo +enddo +enddo + +! GBM conductive flux through ice: +!(4-8) top melting; (9-13) bottom belting ==> surface heatflux +do cat = 1, ncat + fcondtopn_f(:,:,cat,:) = um_bmlt(:,:,cat,:) + fsurfn_f (:,:,cat,:) = um_tmlt(:,:,cat,:) + um_bmlt(:,:,cat,:) +enddo + +!(14) snowfall +fsnow = max(maice * um_snow, 0.0) + +!(15) rainfall +frain = max(maice * um_rain, 0.0) + +!BX-20160718: "save" the ice concentration "maice" used here for scaling-up frain etc in +!ice_step for "consistency"-- +!maice_saved = maice + +!*** Fields from MOM4 (SSU/V and sslx/y are on U points): + +!(1) freezing/melting potential +frzmlt = ocn_pfmice +if (limit_icemelt) then + frzmlt(:,:,:) = max(frzmlt(:,:,:), meltlimit) +endif + +!(2) SST +!make sure SST is 'all right' K==>C +sst = ocn_sst +if (maxval(sst).gt.200) then + sst = sst -273.15 +endif + +!(3) SSS +sss = ocn_sss + +!(4) SSU +uocn = ocn_ssu + +!(5) SSV +vocn = ocn_ssv + +!(6) surface slope sslx +ss_tltx = ocn_sslx + +!(7) surface slope ssly +ss_tlty = ocn_ssly + +!(as per S.O.) make sure Tf if properly initialized +Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) +! +!B: May use different formula for Tf such as TEOS-10 formulation: +! +!r1_S0 = 0.875/35.16504 +!zzs(:,:,:) = sqrt(abs(sss(:,:,:)) * r1_S0) +!Tf(:,:,:) = ((((1.46873e-03 * zzs(:,:,:) - 9.64972e-03) * zzs(:,:,:) + & +! 2.28348e-02) * zzs(:,:,:) - 3.12775e-02) * zzs(:,:,:) + & +! 2.07679e-02) * zzs(:,:,:) - 5.87701e-02 +!Tf(:,:,:) = Tf(:,:,:) * sss(:,:,:) ! - 7.53e-4 * 5.0 !!!5.0 is depth in meters + +!=============================================================== + +end subroutine set_sbc_ice + +!=============================================================================== +subroutine get_sbc_ice +! +! ** Purpose: set GBM coupling fields (from UM and MOM4) needed for CICE +! +! Adapted from "subroutine cice_sbc_in" of HadGem3 Nemo "MODULE sbcice_cice" +! for the "nsbc = 5" case. +! +! It should be called after calling "from_atm" and "from_ocn". +! +! *** This routine is used/called within ice time loop (itap) +! *** in case "set_sbc_ice" call (outside the itap loop) fails +! *** which is unfortunately the case the moment (Jan2010) ! +!B: possible reason: "maice" used in set_sbc_ice may be (quite) different from +! the real time aice (used here). E.g., in the case of aice << maice, taux/y +! calculated in set_sbc_ice (i.e., um_taux/y * maice) should be too large for +! a (possibly very) small aice grid, causing huge ice velocity and thus ice +! "departure point error". (June 2016) +!------------------------------------------------------------------------------- + +implicit none + +real :: r1_S0 +real, dimension(nx_block,ny_block,nblocks) :: zzs + +integer :: i,j,k,cat + +! Fields from UM (all on T cell center): + +!(1) windstress taux: +strax = um_taux * aice !*tmask ? + +!(2) windstress tauy: +stray = um_tauy * aice !*tmask ? + +!(3) surface downward latent heat flux (==> multi_category) +!BX: where is flatn_f "used" in CICE? +do j = 1, ny_block +do i = 1, nx_block + do k = 1, nblocks + !BX 20160826: as in NEMO sbccpl.F90, there is no "open water field" um_lhflx involved: + ! qla_ice(:,:,1:jpl) = - frcv(jpr_ievp)%z3(:,:,1:jpl) * lsub + !------------------------------------------------------------------------------------- + !if (aice(i,j,k)==0.0) then + ! do cat = 1, ncat + ! flatn_f(i,j,cat,k) = 0.0 + ! enddo + ! ! This will then be conserved in CICE (done in sfcflux_to_ocn) + ! flatn_f(i,j,1,k) = um_lhflx(i,j,k) + !else + do cat = 1, ncat + !!!BX: flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * aicen(i,j,cat,k)/aice(i,j,k) + !!! Double check "Lsub" used here !!! + !?! flatn_f(i,j,cat,k) = um_iceevp(i,j,cat,k) * Lsub + flatn_f(i,j,cat,k) = - um_iceevp(i,j,cat,k) * Lsub + enddo + !endif + enddo +enddo +enddo + +! GBM conductive flux through ice: +!(4-8) top melting; (9-13) bottom belting ==> surface heatflux +do cat = 1, ncat + fcondtopn_f(:,:,cat,:) = um_bmlt(:,:,cat,:) + fsurfn_f (:,:,cat,:) = um_tmlt(:,:,cat,:) + um_bmlt(:,:,cat,:) +enddo + +!!! 20130419: Martin Dix's investigation suggests that frain and fsnow should NOT be scaled by +!!! aice here. This scaling would caused double-scaling with "fresh" calculation.. +!(14) snowfall +!!!fsnow = max(aice * um_snow,0.0) +!fsnow = max(um_snow,0.0) !no more scaling as per M.D.! +!(15) rainfall +!!!frain = max(aice * um_rain,0.0) +!frain = max(um_rain,0.0) !no more scaling as per M.D.! +!!! 20130420: I dug deeper and checked all the associated steps of "fresh" calculation, found +!!! the original weighting is CORRECT! so back to *aice: +fsnow = max(aice * um_snow,0.0) +frain = max(aice * um_rain,0.0) +! +!!! XXXXXX: ice surface skin temperature (from UM)-------------------------- +!see: tsfc_ice definition in sbccpl.F90 at +!/short/p66/hxy599/fcm_make_ocean_GC3/extract/nemo/NEMOGCM/NEMO/OPA_SRC/SBC +!--------------------------------------------------------------------------- +do cat = 1, ncat + !!! trcrn(:,:,nt_Tsfc,cat,:) = um_tsfice(:,:,cat,:) + do k = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (um_tsfice(i,j,cat,k) > 0.0) then + trcrn(i,j,nt_Tsfc,cat,k) = 0.0 + else if (um_tsfice(i,j,cat,k) < -60.0) then + trcrn(i,j,nt_Tsfc,cat,k) = -60.0 + else + trcrn(i,j,nt_Tsfc,cat,k) = um_tsfice(i,j,cat,k) + endif + enddo + enddo + enddo +enddo +!!!------------------------------------------------------------------------------------------ + +! Fields from MOM4 (SSU/V and sslx/y are on U points): + +!(1) freezing/melting potential +frzmlt = ocn_pfmice +!20080312: set maximum melting htflux allowed from ocn, (eg, -200 W/m^2) +! the artificial "meltlimit = -200 " is read in from input_ice.nml +!20090320: set option 'limit_icemelt' in case no limit needed if cice behaves! +if (limit_icemelt) then + frzmlt(:,:,:) = max(frzmlt(:,:,:), meltlimit) +endif + +!(2) SST +sst = ocn_sst -273.15 + +!(3) SSS +sss = ocn_sss + +!(4) SSU +uocn = ocn_ssu + +!(5) SSV +vocn = ocn_ssv +!(6) surface slope sslx + +ss_tltx = ocn_sslx + +!(7) surface slope ssly +ss_tlty = ocn_ssly + +! * (as per S. O'Farrel) make sure Tf if properly initialized +!----- should use eos formula to calculate Tf for "consistency" with GCx ----! +!Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) +! +!B: May use different formula for Tf such as TEOS-10 formulation: +! +!r1_S0 = 0.875/35.16504 +!zzs(:,:,:) = sqrt(abs(sss(:,:,:)) * r1_S0) +!Tf(:,:,:) = ((((1.46873e-03 * zzs(:,:,:) - 9.64972e-03) * zzs(:,:,:) + & +! 2.28348e-02) * zzs(:,:,:) - 3.12775e-02) * zzs(:,:,:) + & +! 2.07679e-02) * zzs(:,:,:) - 5.87701e-02 +!Tf(:,:,:) = Tf(:,:,:) * sss(:,:,:) ! - 7.53e-4 * 5.0 !!!5.0 is depth in meters +! +end subroutine get_sbc_ice + +!=============================================================================== +subroutine save_restart_o2i(fname, nstep) + +! output the last o2i forcing data received in cice by the end of the run, +! to be read in at the beginning of next run by cice + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +if (my_task == 0) then + call create_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) + call write_nc_1Dtime(real(nstep), 1, 'time', ncid) +endif + +do jf = nrecv_a2i + 1, jpfldin + + select case (trim(cl_read(jf))) + case('sst_i'); vwork = ocn_sst + case('sss_i'); vwork = ocn_sss + case('ssu_i'); vwork = ocn_ssu + case('ssv_i'); vwork = ocn_ssv + case('sslx_i'); vwork = ocn_sslx + case('ssly_i'); vwork = ocn_ssly + case('pfmice_i'); vwork = ocn_pfmice + case('co2_oi'); vwork = ocn_co2 + case('co2fx_oi'); vwork = ocn_co2fx + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + if (my_task == 0) then + call write_nc2D(ncid, cl_read(jf), gwork, 2, il_im, il_jm, 1, ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +return +end subroutine save_restart_o2i + +!============================================================================== +subroutine save_restart_i2asum(fname, nstep) +! output the last i2a forcing data in cice at the end of the run, +! to be read in at the beginning of next run by cice and sent to atm + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +integer(kind=int_kind), parameter :: sumfldin = 46 !21 +character(len=8), dimension(sumfldin) :: sumfld + +sumfld(1)='msst' +sumfld(2)='mssu' +sumfld(3)='mssv' +sumfld(4)='muvel' +sumfld(5)='mvvel' +sumfld(6)='maiu' +sumfld(7)='maicen1' +sumfld(8)='maicen2' +sumfld(9)='maicen3' +sumfld(10)='maicen4' +sumfld(11)='maicen5' +sumfld(12)='mthikn1' +sumfld(13)='mthikn2' +sumfld(14)='mthikn3' +sumfld(15)='mthikn4' +sumfld(16)='mthikn5' +sumfld(17)='msnown1' +sumfld(18)='msnown2' +sumfld(19)='msnown3' +sumfld(20)='msnown4' +sumfld(21)='msnown5' +! +sumfld(22)='mfoifr1' +sumfld(23)='mfoifr2' +sumfld(24)='mfoifr3' +sumfld(25)='mfoifr4' +sumfld(26)='mfoifr5' +sumfld(27)='mitopt1' +sumfld(28)='mitopt2' +sumfld(29)='mitopt3' +sumfld(30)='mitopt4' +sumfld(31)='mitopt5' +sumfld(32)='mitopk1' +sumfld(33)='mitopk2' +sumfld(34)='mitopk3' +sumfld(35)='mitopk4' +sumfld(36)='mitopk5' +sumfld(37)='mpndfn1' +sumfld(38)='mpndfn2' +sumfld(39)='mpndfn3' +sumfld(40)='mpndfn4' +sumfld(41)='mpndfn5' +sumfld(42)='mpndtn1' +sumfld(43)='mpndtn2' +sumfld(44)='mpndtn3' +sumfld(45)='mpndtn4' +sumfld(46)='mpndtn5' + +if (my_task == 0) then + call create_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) +endif + +do jf = 1, sumfldin + select case (trim(sumfld(jf))) + case('msst'); vwork = msst + case('mssu'); vwork = mssu + case('mssv'); vwork = mssv + case('muvel'); vwork = muvel + case('mvvel'); vwork = mvvel + case('maiu'); vwork = maiu + case('maicen1'); vwork = maicen(:,:,1,:) + case('maicen2'); vwork = maicen(:,:,2,:) + case('maicen3'); vwork = maicen(:,:,3,:) + case('maicen4'); vwork = maicen(:,:,4,:) + case('maicen5'); vwork = maicen(:,:,5,:) + case('mthikn1'); vwork = mthikn(:,:,1,:) + case('mthikn2'); vwork = mthikn(:,:,2,:) + case('mthikn3'); vwork = mthikn(:,:,3,:) + case('mthikn4'); vwork = mthikn(:,:,4,:) + case('mthikn5'); vwork = mthikn(:,:,5,:) + case('msnown1'); vwork = msnown(:,:,1,:) + case('msnown2'); vwork = msnown(:,:,2,:) + case('msnown3'); vwork = msnown(:,:,3,:) + case('msnown4'); vwork = msnown(:,:,4,:) + case('msnown5'); vwork = msnown(:,:,5,:) + case('mfoifr1'); vwork = mfoifr(:,:,1,:) + case('mfoifr2'); vwork = mfoifr(:,:,2,:) + case('mfoifr3'); vwork = mfoifr(:,:,3,:) + case('mfoifr4'); vwork = mfoifr(:,:,4,:) + case('mfoifr5'); vwork = mfoifr(:,:,5,:) + case('mitopt1'); vwork = mitopt(:,:,1,:) + case('mitopt2'); vwork = mitopt(:,:,2,:) + case('mitopt3'); vwork = mitopt(:,:,3,:) + case('mitopt4'); vwork = mitopt(:,:,4,:) + case('mitopt5'); vwork = mitopt(:,:,5,:) + case('mitopk1'); vwork = mitopk(:,:,1,:) + case('mitopk2'); vwork = mitopk(:,:,2,:) + case('mitopk3'); vwork = mitopk(:,:,3,:) + case('mitopk4'); vwork = mitopk(:,:,4,:) + case('mitopk5'); vwork = mitopk(:,:,5,:) + case('mpndfn1'); vwork = mpndfn(:,:,1,:) + case('mpndfn2'); vwork = mpndfn(:,:,2,:) + case('mpndfn3'); vwork = mpndfn(:,:,3,:) + case('mpndfn4'); vwork = mpndfn(:,:,4,:) + case('mpndfn5'); vwork = mpndfn(:,:,5,:) + case('mpndtn1'); vwork = mpndtn(:,:,1,:) + case('mpndtn2'); vwork = mpndtn(:,:,2,:) + case('mpndtn3'); vwork = mpndtn(:,:,3,:) + case('mpndtn4'); vwork = mpndtn(:,:,4,:) + case('mpndtn5'); vwork = mpndtn(:,:,5,:) + + end select + call gather_global(gwork, vwork, master_task, distrb_info) + if (my_task == 0) then + call write_nc2D(ncid, sumfld(jf), gwork, 2, il_im, il_jm, 1, ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +end subroutine save_restart_i2asum + +!=============================================================================== +subroutine save_restart_mice(fname, nstep) + +! output ice variable averaged over the last IO cpl int of this run, +! cice reads in these vars at the beginning of next run, uses them with the first +! received a2i fields to obtain the first i2o fields to be sent to ocn + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +if (my_task == 0) then + call create_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) + call write_nc_1Dtime(real(nstep), 1, 'time', ncid) +endif + +!B: 20170825 ==> add maicen_saved for atm_icefluxes_back2GBM calculation! +vwork(:,:,:) = maicen_saved(:,:,1,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maicen1', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork(:,:,:) = maicen_saved(:,:,2,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maicen2', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork(:,:,:) = maicen_saved(:,:,3,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maicen3', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork(:,:,:) = maicen_saved(:,:,4,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maicen4', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork(:,:,:) = maicen_saved(:,:,5,:) +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maicen5', gwork, 2, il_im, il_jm, 1, ilout=il_out) +!b. + +vwork = maice +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maice', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mstrocnxT +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mstrocnxT', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mstrocnyT +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mstrocnyT', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfresh +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfresh', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfsalt +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfsalt', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfhocn +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfhocn', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = mfswthru +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mfswthru', gwork, 2, il_im, il_jm, 1, ilout=il_out) +vwork = msicemass +call gather_global(gwork, vwork, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'msicemass', gwork, 2, il_im, il_jm, 1, ilout=il_out) + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +return +end subroutine save_restart_mice + +!=============================================================================== +subroutine get_i2a_fields + +implicit none + +! all fields (except for vector) obtained here are all on T cell center + +!(1) ocean surface temperature +ia_sst(:,:,:) = msst(:,:,:) + +!(2-3) ice/ocn combined surface velocity +!CH: should use "aiu", not aice! +ia_uvel(:,:,:) = mssu(:,:,:) * (1. - maiu(:,:,:)) + muvel(:,:,:) * maiu(:,:,:) +ia_vvel(:,:,:) = mssv(:,:,:) * (1. - maiu(:,:,:)) + mvvel(:,:,:) * maiu(:,:,:) + +!(4-8) ice concentration +ia_aicen(:,:,:,:) = maicen(:,:,:,:) +!BX: save it for use in atm_icefluxes_back2GBM --- +maicen_saved = maicen + +!XXX -- As per Alex West, only two of the ice vaiables below need to be scaled down +! by "* aice": ice top layer "temperature" and "effective conductivity"! + +!(9-13) ice thickness +ia_thikn(:,:,:,:) = mthikn(:,:,:,:) +!ia_thikn(:,:,:,:) = mthikn(:,:,:,:) * mfoifr(:,:,:,:) !X + +!(14-18) snow thickness +ia_snown(:,:,:,:) = msnown(:,:,:,:) +!ia_snown(:,:,:,:) = msnown(:,:,:,:) * mfoifr(:,:,:,:) !X + +!(19-20) co2 flux stuff +ia_co2 = mco2 +ia_co2fx = mco2fx + +!(21) ocean surface freezing temperature +ia_sstfz(:,:,:) = msstfz(:,:,:) + 273.15 + +!(22-26) first order ice concentration +ia_foifr(:,:,:,:) = mfoifr(:,:,:,:) + +!(27-31) ice top layer temperature +!XXX ia_itopt(:,:,:,:) = mitopt(:,:,:,:) + 273.15 +ia_itopt(:,:,:,:) = (mitopt(:,:,:,:) + 273.15) * mfoifr(:,:,:,:) !Y + +!(32-36) ice top layer effective conductivity +!XXX ia_itopk(:,:,:,:) = mitopk(:,:,:,:) +ia_itopk(:,:,:,:) = mitopk(:,:,:,:) * mfoifr(:,:,:,:) !Y + +!(37-41) ice melt pond concentration +ia_pndfn(:,:,:,:) = mpndfn(:,:,:,:) +!ia_pndfn(:,:,:,:) = mpndfn(:,:,:,:) * mfoifr(:,:,:,:) !X + +!(42-46) ice melt pond thickness +ia_pndtn(:,:,:,:) = mpndtn(:,:,:,:) +!ia_pndtn(:,:,:,:) = mpndtn(:,:,:,:) * mfoifr(:,:,:,:) !X + +return +end subroutine get_i2a_fields + +!=============================================================================== +subroutine get_i2o_fields + +! All fluxes should be in GBM units before passing into coupler. +! e.g., io_htflx(:,:,:) = fhocn_gbm(:,:,:) + +implicit none + +real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: pice + +! Fields obtained here are all at T cell center. before being sent to MOM4, vector +! (Taux, Tauy) should be shifted on to U point as required +!------------------------------------------------------------------------------- + +!(1-2) air/ice - sea stress TAUX/TAUY +! Caution: in nemo, "strocnx/y" are NOT weighted by aice here, 'cos strocnx/y +! have already been 'weighted' using aice (when calculated in "evp_finish". +! But, this weight has been removed in strocnx/yT (see "evp_finish"), therfore +! we need put it on again here. +io_strsu = um_taux * (1. - maice) - mstrocnxT * maice +io_strsv = um_tauy * (1. - maice) - mstrocnyT * maice + +!(3) freshwater flux to ocean: rainfall (+ ice melting water flux ?) +io_rain = um_rain * (1. - maice) +!!CH: confirmed: +!!if (ice_fwflux) io_rain = io_rain + mfresh !always .t. +!!NOTE mfresh is now splitted into melt (14) and form (15) and passed into ocn seperately. + +!(4) freshwater flux to ocean: snowfall +io_snow = um_snow * (1. - maice) + +!(5) salt flux to ocean +io_stflx = mfsalt + +!(6) ice/snow melting heatflux into ocean +io_htflx = mfhocn + +!(7) short wave radiation +!(CH: the (1-aice) weight should not be here 'cos all fluxes passed in from +! UM have already been aice-weighted when they are calculated there!!!) +!io_swflx = um_swflx * (1. - maice) + mfswthru +io_swflx = um_swflx + mfswthru +!!!20100616: test for more swflx +!!!io_swflx = 1.064 * um_swflx + mfswthru + +!(8) latent heat flux (positive out of ocean as required by MOM4) +io_qflux = um_evap * Lvap !Note it's already weighted in UM for open sea. +!20101210: NOT sure about the ice weghting in UM, 'cos the ice area does see +! non-zero (positive) evap. +!if (imsk_evap) then +! io_qflux = um_evap * Lvap * (1. - maice) +!endif + +!(9) sensible heat flux (positive out of ocean as required by MOM4) +!io_shflx = um_shflx * (1. - maice) +io_shflx = um_shflx + +!(10) net long wave radiation positive down +!io_lwflx = um_lwflx * (1. - maice) +io_lwflx = um_lwflx + +!(11) runoff (!check the incoming field! pattern? remapping ok? conserved? ...) +io_runof = um_runoff +! CHECK with SM about the annual cycle of core-runoff! (we only have annual mean) + +!(12) pressure +pice = gravit * msicemass +!---------------------------------------------------------------------------- +!sicemass = rho_ice x hi + rho_snow x hs (in m) +! +! Should we set limit to the ovelying ice pressure as suggested in MOM4 code? +!(see ocean_sbc.F90) if yes, we may use following +!pice(i,j) = min(pice(i,j), gravit*rhow*max_ice_thickness) +! (note rhow = 1026 kg/m^3 here, but mom4 instead uses rho0 = 1035 kg/m^3) +! No, let mom4 handle it (see ocean_sbc.F90) +! +!as GFDL SIS, we use patm 'anormaly' and then add in the ice/snow pressure! +!29/11/2007 +!---------------------------------------------------------------------------- +if (ice_pressure_on) then + io_press = pice * maice +endif +if (air_pressure_on) then + !as GFDL SIS, we use patm anormaly, i.e., taking off 1.e5 Pa ! + io_press(:,:,:) = io_press(:,:,:) + um_press(:,:,:) - 1.0e5 +endif +!(13) ice concentration +io_aice = maice +!(14) ice melt fwflux +io_melt = max(0.0,mfresh(:,:,:)) +!(15) ice form fwflux +io_form = min(0.0,mfresh(:,:,:)) + +io_co2 = um_co2 +io_wnd = um_wnd + +return +end subroutine get_i2o_fields + +!=============================================================================== +subroutine initialize_mice_fields_4_i2o + +implicit none + +maice = 0. +mstrocnxT = 0. +mstrocnyT = 0. +mfresh = 0. +mfsalt = 0. +mfhocn = 0. +mfswthru = 0. +msicemass = 0. + +return +end subroutine initialize_mice_fields_4_i2o + +!=============================================================================== +subroutine initialize_mice_fields_4_i2a + +implicit none + +muvel = 0. +mvvel = 0. + +maiu = 0. +maicen = 0. +mthikn = 0. +msnown = 0. + +mfoifr = 0. +mitopt = 0. +mitopk = 0. +mpndfn = 0. +mpndtn = 0. + +return +end subroutine initialize_mice_fields_4_i2a + +!=============================================================================== +subroutine initialize_mocn_fields_4_i2a + +implicit none + +msst = 0. +mssu = 0. +mssv = 0. +mco2 = 0. +mco2fx = 0. +msstfz = 0. + +return +end subroutine initialize_mocn_fields_4_i2a + +!=============================================================================== +subroutine time_average_ocn_fields_4_i2a + +implicit none + +msst(:,:,:) = msst(:,:,:) + ocn_sst(:,:,:) * coef_cpl +mssu(:,:,:) = mssu(:,:,:) + ocn_ssu(:,:,:) * coef_cpl +mssv(:,:,:) = mssv(:,:,:) + ocn_ssv(:,:,:) * coef_cpl +mco2(:,:,:) = mco2(:,:,:) + ocn_co2(:,:,:) * coef_cpl +mco2fx(:,:,:) = mco2fx(:,:,:) + ocn_co2fx(:,:,:) * coef_cpl +msstfz(:,:,:) = msstfz(:,:,:) + Tf(:,:,:) * coef_cpl + +return +end subroutine time_average_ocn_fields_4_i2a + +!=============================================================================== +!dhb599-20131002: resuming the old 'approach' (used before 20130420) which sets +!do_scale_fluxes = .t. and thus the "flatn_f/Lsub" terms is NOT used as part of +!'fresh' and passed into ocean...since the 'evaporation out of ice surface' is +!going into atmosphere, not supposed to change the ocean water volume! +!------------------------------------------------------------------------------- + +subroutine time_average_fields_4_i2o + +implicit none + +maice(:,:,:) = maice(:,:,:) + aice(:,:,:) * coef_io +mstrocnxT(:,:,:) = mstrocnxT(:,:,:) + strocnxT(:,:,:) * coef_io +mstrocnyT(:,:,:) = mstrocnyT(:,:,:) + strocnyT(:,:,:) * coef_io +!20130420: possible bug due to missing term "flatn_f/Lsub" in the last update for fresh +! use scale_fluxes=.f. to avoid flux scaling by /aice +! meaning fluxes are all grid-box-mean by the end of ice_step. +!mfresh(:,:,:) = mfresh(:,:,:) + fresh_gbm(:,:,:) * coef_io +!mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt_gbm(:,:,:) * coef_io +!mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn_gbm(:,:,:) * coef_io +!mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru_gbm(:,:,:) * coef_io +mfresh(:,:,:) = mfresh(:,:,:) + fresh(:,:,:) * coef_io +mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt(:,:,:) * coef_io +mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn(:,:,:) * coef_io +mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru(:,:,:) * coef_io +!--------------------------------------------------------------------------------------- +!--------------------------------------------------------------------------------------- +msicemass(:,:,:) = msicemass(:,:,:) + sicemass(:,:,:) * coef_io + +return +end subroutine time_average_fields_4_i2o + +!=============================================================================== +subroutine time_average_fields_4_i2o_20130420 + +implicit none + +maice(:,:,:) = maice(:,:,:) + aice(:,:,:) * coef_io +mstrocnxT(:,:,:) = mstrocnxT(:,:,:) + strocnxT(:,:,:) * coef_io +mstrocnyT(:,:,:) = mstrocnyT(:,:,:) + strocnyT(:,:,:) * coef_io +!20130420: possible bug due to missing term "flatn_f/Lsub" in the last update +!for fresh +! use scale_fluxes=.f. to avoid flux scaling by /aice +! meaning fluxes are all grid-box-mean by the end of ice_step. +!mfresh(:,:,:) = mfresh(:,:,:) + fresh_gbm(:,:,:) * coef_io +!mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt_gbm(:,:,:) * coef_io +!mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn_gbm(:,:,:) * coef_io +!mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru_gbm(:,:,:) * coef_io +mfresh(:,:,:) = mfresh(:,:,:) + fresh(:,:,:) * coef_io +mfsalt(:,:,:) = mfsalt(:,:,:) + fsalt(:,:,:) * coef_io +mfhocn(:,:,:) = mfhocn(:,:,:) + fhocn(:,:,:) * coef_io +mfswthru(:,:,:) = mfswthru(:,:,:) + fswthru(:,:,:) * coef_io +!--------------------------------------------------------------------------------------- +msicemass(:,:,:) = msicemass(:,:,:) + sicemass(:,:,:) * coef_io + +return +end subroutine time_average_fields_4_i2o_20130420 + +!=============================================================================== +subroutine time_average_fields_4_i2a + +implicit none + +! ice fields: +muvel(:,:,:) = muvel(:,:,:) + uvel(:,:,:) * coef_ia +mvvel(:,:,:) = mvvel(:,:,:) + vvel(:,:,:) * coef_ia +maicen(:,:,:,:) = maicen(:,:,:,:) + aicen(:,:,:,:) * coef_ia !T cat. ice concentration +mthikn(:,:,:,:) = mthikn(:,:,:,:) + vicen(:,:,:,:) * coef_ia !T cat. ice thickness +msnown(:,:,:,:) = msnown(:,:,:,:) + vsnon(:,:,:,:) * coef_ia !T cat. snow thickness + +call to_ugrid(aice, aiiu) +maiu(:,:,:) = maiu(:,:,:) + aiiu(:,:,:) * coef_ia !U cell ice concentraction + +!BX: "First order" ice fraction (mfoifr, below) is required for GSI8 "Time-Travelling Ice" (TTI) +! coupling approach. It may be different than the "normal" ice fraction (maicen, above) if +! maicen is regridded with second order conservation scheme (as "proposed" in GC3). +! BUT, GC3 actually uses 1st order remapping for both of them, so they are identical! +! In ACCESS practice, no second order remapping has been appllied to any coupling field, and +! maicen and mfoifr are ALWAYS the same thing. +! We pass both of them to UM for "concictency" (thus keeping UM coupling code intact)! +mfoifr(:,:,:,:) = mfoifr(:,:,:,:) + aicen(:,:,:,:)* coef_ia !==maicen +mitopt(:,:,:,:) = mitopt(:,:,:,:) + Tn_top(:,:,:,:) * coef_ia +mitopk(:,:,:,:) = mitopk(:,:,:,:) + keffn_top(:,:,:,:) * coef_ia +mpndfn(:,:,:,:) = mpndfn(:,:,:,:) + apeffn(:,:,:,:) * coef_ia +mpndtn(:,:,:,:) = mpndtn(:,:,:,:) + trcrn(:,:,nt_hpnd,:,:) * coef_ia + +!add one more a-i interval mean field (integrated ice concentration), which, togthere with maicen, +!should be saved at the end of current run for use at the beginning of the continue run (e.g., +!converting ice fluxes into GBM. see routines "atm_icefluxes_back2GBM", and "get_sbc_ice")...... +!maice_ia(:,:,:) = maice_ia(:,:,:) + aice(:,:,:) * coef_ia + +!ocn fields: +!must be done after calling from_ocn so as to get the most recently updated ocn fields, +!therefore a separate call to "time_average_ocn_fields_4_i2a" is done for this purpose. + +return +end subroutine time_average_fields_4_i2a + +!=============================================================================== +subroutine check_i2a_fields(nstep) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ilout, ll, jf +integer(kind=int_kind), save :: ncid,currstep +data currstep/0/ + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist('fields_i2a_in_ice.nc') ) then + call create_ncfile('fields_i2a_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file fields_i2a_in_ice.nc at nstep = ', nstep + call ncheck( nf_open('fields_i2a_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = 1, nsend_i2a + + select case(trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,5,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + case('uvel_ia'); vwork = ia_uvel + case('vvel_ia'); vwork = ia_vvel + case('co2_i2'); vwork = ia_co2 + case('co2fx_i2'); vwork = ia_co2fx + case('sstfz_ia'); vwork = ia_sstfz + case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) + case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) + case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) + case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) + case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) + case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) + case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) + case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) + case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) + case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) + case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) + case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) + case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) + case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) + case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) + case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) + case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) + case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) + case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) + case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) + case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) + case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) + case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) + case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) + case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0 ) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_i2a_fields + +!============================================================================ +subroutine check_a2i_fields(nstep) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +character*80 :: ncfile='fields_a2i_in_ice_2.nc' +integer(kind=int_kind) :: ncid, currstep, ll, ilout, jf +data currstep/0/ +save currstep + +currstep=currstep+1 + +if ( my_task == 0 .and. .not. file_exist(trim(ncfile)) ) then + call create_ncfile(trim(ncfile),ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file ', trim(ncfile), ' at nstep = ', nstep + call ncheck( nf_open(trim(ncfile),nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = 1, nrecv_a2i + + select case (trim(cl_read(jf))) + case ('thflx_i'); vwork = um_thflx + case ('pswflx_i'); vwork = um_pswflx + case ('runoff_i'); vwork = um_runoff + case ('wme_i'); vwork = um_wme + case ('rain_i'); vwork = um_rain + case ('snow_i'); vwork = um_snow + case ('evap_i'); vwork = um_evap + case ('lhflx_i'); vwork = um_lhflx + case ('tmlt01'); vwork(:,:,:) = um_tmlt(:,:,1,:) + case ('tmlt02'); vwork(:,:,:) = um_tmlt(:,:,2,:) + case ('tmlt03'); vwork(:,:,:) = um_tmlt(:,:,3,:) + case ('tmlt04'); vwork(:,:,:) = um_tmlt(:,:,4,:) + case ('tmlt05'); vwork(:,:,:) = um_tmlt(:,:,5,:) + case ('bmlt01'); vwork(:,:,:) = um_tmlt(:,:,1,:) + case ('bmlt02'); vwork(:,:,:) = um_tmlt(:,:,2,:) + case ('bmlt03'); vwork(:,:,:) = um_tmlt(:,:,3,:) + case ('bmlt04'); vwork(:,:,:) = um_tmlt(:,:,4,:) + case ('bmlt05'); vwork(:,:,:) = um_tmlt(:,:,5,:) + case ('taux_i'); vwork = um_taux + case ('tauy_i'); vwork = um_tauy + case ('swflx_i'); vwork = um_swflx + case ('lwflx_i'); vwork = um_lwflx + case ('shflx_i'); vwork = um_shflx + case ('press_i'); vwork = um_press + case ('co2_ai'); vwork = um_co2 + case ('wnd_ai'); vwork = um_wnd + case ('icenth_i'); vwork = um_icenth + case ('icesth_i'); vwork = um_icesth + case ('tsfice01'); vwork = um_tsfice(:,:,1,:) + case ('tsfice02'); vwork = um_tsfice(:,:,2,:) + case ('tsfice03'); vwork = um_tsfice(:,:,3,:) + case ('tsfice04'); vwork = um_tsfice(:,:,4,:) + case ('tsfice05'); vwork = um_tsfice(:,:,5,:) + case ('iceevp01'); vwork = um_iceevp(:,:,1,:) + case ('iceevp02'); vwork = um_iceevp(:,:,2,:) + case ('iceevp03'); vwork = um_iceevp(:,:,3,:) + case ('iceevp04'); vwork = um_iceevp(:,:,4,:) + case ('iceevp05'); vwork = um_iceevp(:,:,5,:) + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_a2i_fields + +!============================================================================ +subroutine check_i2o_fields(nstep, scale) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +real, intent(in) :: scale +integer(kind=int_kind) :: ncid, currstep, ll, ilout, jf +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist('fields_i2o_in_ice.nc') ) then + call create_ncfile('fields_i2o_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file fields_i2o_in_ice.nc at nstep = ', nstep + call ncheck( nf_open('fields_i2o_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = nsend_i2a + 1, jpfldout + + select case(trim(cl_writ(jf))) + case('strsu_io') + vwork = scale * io_strsu + case('strsv_io') + vwork = scale * io_strsv + case('rain_io') + vwork = scale * io_rain + case('snow_io') + vwork = scale * io_snow + case('stflx_io') + vwork = scale * io_stflx + case('htflx_io') + vwork = scale * io_htflx + case('swflx_io') + vwork = scale * io_swflx + case('qflux_io') + vwork = scale * io_qflux + case('shflx_io') + vwork = scale * io_shflx + case('lwflx_io') + vwork = scale * io_lwflx + case('runof_io') + vwork = scale * io_runof + case('press_io') + vwork = scale * io_press + case('aice_io') + vwork = scale * io_aice + case('form_io') + vwork = scale * io_form + case('melt_io') + vwork = scale * io_melt + case('co2_i1') + vwork = scale * io_co2 + case('wnd_i1') + vwork = scale * io_wnd + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0 ) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_i2o_fields + +!============================================================================ +subroutine check_o2i_fields(nstep) + +implicit none + +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid, currstep, ilout, ll, jf +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist('fields_o2i_in_ice.nc') ) then + call create_ncfile('fields_o2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening file fields_o2i_in_ice.nc at nstep = ', nstep + call ncheck( nf_open('fields_o2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(nstep),currstep,'time',ncid) +end if + +do jf = nrecv_a2i + 1, jpfldin + + select case (trim(cl_read(jf))) + case ('sst_i'); vwork = ocn_sst + case ('sss_i'); vwork = ocn_sss + case ('ssu_i'); vwork = ocn_ssu + case ('ssv_i'); vwork = ocn_ssv + case ('sslx_i'); vwork = ocn_sslx + case ('ssly_i'); vwork = ocn_ssly + case ('pfmice_i'); vwork = ocn_pfmice + case ('co2_oi'); vwork = ocn_co2 + case ('co2fx_oi'); vwork = ocn_co2fx + end select + + call gather_global(gwork, vwork, master_task, distrb_info) + + if (my_task == 0) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm,currstep,ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_o2i_fields + +!============================================================================ +subroutine check_frzmlt_sst(ncfilenm) + +!this is (mainly) used to check cice solo run frzmlt and sst ! +! (for comparison against a coupled run forcing into cice) + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, sst, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sst', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, frzmlt, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'frzmlt', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_frzmlt_sst + +!================================================= +subroutine check_i2o_uvfluxes(ncfilenm) + +!this is temporarily used to check i2o fields (uflux, vflux and maice) +!for debug purpose + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, maice, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'maice', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, um_taux, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'um_taux', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, um_tauy, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'um_tauy', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, mstrocnxT, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mstrocnxT', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, mstrocnyT, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'mstrocnyT', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_i2o_uvfluxes + + +subroutine check_ice_sbc_fields(ncfilenm) + +!this is temporarily used to check ice_sbc fields got from get_ice_sbc +!for debug purpose + +implicit none + +real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: v3d + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +v3d = 0.0 + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, aice, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'aice', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, strax, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'strax', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, stray, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'stray', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +! +!ignore flatn_f, fcondtopn_f, fsurfn_f, and trcrn here...... +! too many, and they are not associated with aice...... +! +v3d(:,:,:) = flatn_f(:,:,1,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'flatn_f1', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = flatn_f(:,:,2,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'flatn_f2', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = flatn_f(:,:,3,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'flatn_f3', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = flatn_f(:,:,4,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'flatn_f4', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = flatn_f(:,:,5,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'flatn_f5', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +v3d(:,:,:) = fcondtopn_f(:,:,1,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fcondtopn_f1', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fcondtopn_f(:,:,2,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fcondtopn_f2', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fcondtopn_f(:,:,3,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fcondtopn_f3', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fcondtopn_f(:,:,4,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fcondtopn_f4', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fcondtopn_f(:,:,5,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fcondtopn_f5', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +v3d(:,:,:) = fsurfn_f(:,:,1,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsurfn_f1', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fsurfn_f(:,:,2,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsurfn_f2', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fsurfn_f(:,:,3,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsurfn_f3', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fsurfn_f(:,:,4,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsurfn_f4', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = fsurfn_f(:,:,5,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsurfn_f5', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +call gather_global(gwork, fsnow, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'fsnow', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, frain, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'frain', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +v3d(:,:,:) = trcrn(:,:,nt_Tsfc,1,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'trcrn1', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = trcrn(:,:,nt_Tsfc,2,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'trcrn2', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = trcrn(:,:,nt_Tsfc,3,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'trcrn3', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = trcrn(:,:,nt_Tsfc,4,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'trcrn4', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +v3d(:,:,:) = trcrn(:,:,nt_Tsfc,5,:) +call gather_global(gwork, v3d, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'trcrn5', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +!from ocen: +call gather_global(gwork, frzmlt, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'frzmlt', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, sst, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sst', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, sss, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sss', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, uocn, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'uocn', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, vocn, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'vocn', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +call gather_global(gwork, ss_tltx, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'ss_tltx', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, ss_tlty, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'ss_tlty', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +call gather_global(gwork, Tf, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'Tf', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_ice_sbc_fields + + +!============================================================================ +subroutine check_sstsss(ncfilenm) + +!this is used to check cice sst/sss : temporary use (20091019) + +implicit none + +character*(*), intent(in) :: ncfilenm +integer(kind=int_kind) :: ncid,currstep, ilout, ll +data currstep/0/ +save currstep + +currstep=currstep+1 + +if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then + call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) +endif + +if (my_task == 0) then + write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep + call ncheck( nf_open(ncfilenm, nf_write,ncid) ) + call write_nc_1Dtime(real(currstep),currstep,'time',ncid) +end if + +call gather_global(gwork, sst, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sst', gwork, 1, il_im,il_jm,currstep,ilout=il_out) +call gather_global(gwork, sss, master_task, distrb_info) +if (my_task == 0) call write_nc2D(ncid, 'sss', gwork, 1, il_im,il_jm,currstep,ilout=il_out) + +if (my_task == 0) call ncheck(nf_close(ncid)) + +return +end subroutine check_sstsss + + +!============================================================================ +function file_exist (file_name) +! +character(len=*), intent(in) :: file_name +logical file_exist + +file_exist = .false. +if (len_trim(file_name) == 0) return +if (file_name(1:1) == ' ') return + +inquire (file=trim(file_name), exist=file_exist) + +end function file_exist + +!============================================================================ + +end module cpl_forcing_handler diff --git a/drivers/access/cpl_interface.F90_uphalo b/drivers/access/cpl_interface.F90_uphalo index 00a8c226..dc71f43c 100644 --- a/drivers/access/cpl_interface.F90_uphalo +++ b/drivers/access/cpl_interface.F90_uphalo @@ -515,6 +515,11 @@ cl_writ(nsend_i2o)='co2_i1' nsend_i2o = nsend_i2o + 1 cl_writ(nsend_i2o)='wnd_i1' +!2 more added 20171024: + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='lice_fw' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='lice_ht' if (my_task == 0 .or. ll_comparal) then @@ -686,6 +691,13 @@ allocate (um_icesth(nx_block,ny_block,max_blocks)); um_icesth(:,:,:) = 0 allocate (um_tsfice(nx_block,ny_block,ncat,max_blocks)); um_tsfice(:,:,:,:) = 0 allocate (um_iceevp(nx_block,ny_block,ncat,max_blocks)); um_iceevp(:,:,:,:) = 0 + !20171024: 6 more arrays added (for land ice discharge into ocean) + allocate (lice_nth(nx_block,ny_block,max_blocks)); lice_nth(:,:,:) = 0 + allocate (lice_sth(nx_block,ny_block,max_blocks)); lice_sth(:,:,:) = 0 + allocate (msk_nth(nx_block,ny_block,max_blocks)); msk_nth(:,:,:) = 0 + allocate (msk_sth(nx_block,ny_block,max_blocks)); msk_sth(:,:,:) = 0 + allocate (amsk_nth(nx_block,ny_block,max_blocks)); amsk_nth(:,:,:) = 0 + allocate (amsk_sth(nx_block,ny_block,max_blocks)); amsk_sth(:,:,:) = 0 ! allocate ( core_runoff(nx_block,ny_block,max_blocks)); core_runoff(:,:,:) = 0. @@ -738,6 +750,9 @@ allocate (io_form(nx_block,ny_block,max_blocks)); io_form(:,:,:) = 0 allocate (io_co2(nx_block,ny_block,max_blocks)); io_co2(:,:,:) = 0 allocate (io_wnd(nx_block,ny_block,max_blocks)); io_wnd(:,:,:) = 0 +!20171024: 2 more added + allocate (io_licefw(nx_block,ny_block,max_blocks)); io_licefw(:,:,:) = 0 + allocate (io_liceht(nx_block,ny_block,max_blocks)); io_liceht(:,:,:) = 0 ! temporary arrays: ! IO cpl int time-average @@ -770,11 +785,14 @@ allocate (mpndtn(nx_block,ny_block,ncat,max_blocks)); mpndtn(:,:,:,:) = 0 !BX: allocate (maicen_saved(nx_block,ny_block,ncat,max_blocks)); maicen_saved(:,:,:,:) = 0 +! + allocate (icebergfw(nx_block,ny_block,12,max_blocks)); icebergfw(:,:,:,:) = 0 allocate (vwork(nx_block,ny_block,max_blocks)); vwork(:,:,:) = 0 allocate (gwork(nx_global,ny_global)); gwork(:,:) = 0 allocate (sicemass(nx_block,ny_block,max_blocks)); sicemass(:,:,:) = 0. allocate (vwork2d(l_ilo:l_ihi, l_jlo:l_jhi)); vwork2d(:,:) = 0. !l_ihi-l_ilo+1, l_jhi-l_jlo+1 + end subroutine init_cpl !======================================================================= @@ -919,18 +937,6 @@ endif enddo -!=========================B: The operation below is ridiculous"====================================== -! it definitely destroys the "reproducibility" when model is run -! with different time segments (e.g., 1 month vs 3 months -! IF really required (see comments below), should do it at the very beginning of the exp. or -! --actually better-- reducing windstress in the preprecessed data "a2i.nc" !!! -! -! !BX: 20160623...... avoid initial "remap transport: bad departure points" (e.g.@(332,776))? -! if (isteps == 0) then -! um_taux = um_taux * 0.1 -! um_tauy = um_tauy * 0.1 -! endif -! !BX. !---------------------------------------------------------------------------------------------------- call ice_HaloUpdate(um_thflx, halo_info,field_loc_center,field_type_scalar) call ice_HaloUpdate(um_pswflx, halo_info,field_loc_center,field_type_scalar) @@ -1197,6 +1203,9 @@ case('form_io'); vwork = io_form case('co2_i1'); vwork = io_co2 case('wnd_i1'); vwork = io_wnd + !2 more added 20171024: + case('lice_fw'); vwork = io_licefw + case('lice_ht'); vwork = io_liceht end select if(.not. ll_comparal) then @@ -1204,22 +1213,6 @@ else call pack_global_dbl(gwork, vwork, master_task, distrb_info) vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) = gwork(l_ilo:l_ihi, l_jlo:l_jhi) -! do iblk=1,nblocks_tot -! -! if (distrb_info%blockLocation(iblk) == my_task+1) then -! -! this_block = get_block(iblk,iblk) -! ilo = this_block%ilo -! ihi = this_block%ihi -! jlo = this_block%jlo -! jhi = this_block%jhi -! -! vwork2d(this_block%i_glob(ilo):this_block%i_glob(ihi), & -! this_block%j_glob(jlo):this_block%j_glob(jhi)) = & -! vwork(ilo:ihi,jlo:jhi,distrb_info%blockLocalID(iblk)) -! endif -! end do - endif if (my_task == 0 .or. ll_comparal) then diff --git a/drivers/access/cpl_parameters.F90 b/drivers/access/cpl_parameters.F90 index a859da38..19273d50 100644 --- a/drivers/access/cpl_parameters.F90 +++ b/drivers/access/cpl_parameters.F90 @@ -15,7 +15,7 @@ module cpl_parameters !integer(kind=int_kind), parameter :: nrecv = 50 ! maxium no of flds rcvd allowed integer(kind=int_kind) :: nsend_i2a, nsend_i2o integer(kind=int_kind) :: nrecv_a2i, nrecv_o2i -integer(kind=int_kind), parameter :: jpfldout = 63 ! actual number of fields sent +integer(kind=int_kind), parameter :: jpfldout = 65 ! actual number of fields sent integer(kind=int_kind), parameter :: jpfldin = 47 ! actual number of fields rcvd character(len=8), dimension(jpfldout) :: cl_writ ! Symb names fields sent @@ -75,7 +75,10 @@ module cpl_parameters ! tropical cooling bias: real(kind=dbl_kind) :: ocn_ssuv_factor = 1.0 ! 0.0 -- turn off the ocn_current into UM. real(kind=dbl_kind) :: iostress_factor = 1.0 ! 0.0 -- turn off stresses into MOM4. - +! +!20171227: Adding options for land ice discharge as iceberg melt (0,1,2,3,4) +integer(kind=int_kind) :: iceberg = 0 +! namelist/coupling/ & caltype, & jobnum, & @@ -103,6 +106,7 @@ module cpl_parameters do_scale_fluxes, & extreme_test, & imsk_evap, & + iceberg, & ocn_ssuv_factor,& iostress_factor,& chk_a2i_fields, & @@ -123,6 +127,8 @@ module cpl_parameters ! the received frazil energy by multiplying 0.5... !--------------------------------------------------------------------------------------- +logical :: newstep_ai = .false. !20171024: for land ice availiblity control + contains !======================================================================================= diff --git a/drivers/access/ice_constants.F90 b/drivers/access/ice_constants.F90 index 05762746..79a56a9a 100644 --- a/drivers/access/ice_constants.F90 +++ b/drivers/access/ice_constants.F90 @@ -34,7 +34,7 @@ module ice_constants !ars599: 11042014: add AusCOM #ifdef AusCOM ! cp_ocn = 3989._dbl_kind ,&! specific heat of ocn (J/kg/K) - cp_ocn = 3992.10322329649_dbl_kind ,& + cp_ocn = 3992.10322329649_dbl_kind,& ! freshwater value needed for enthalpy #else cp_ocn = 4218._dbl_kind ,&! specific heat of ocn (J/kg/K) @@ -45,7 +45,8 @@ module ice_constants !ars599: 26032014 new code (CODE: dragio) ! use new code, mark out #ifndef AusCOM #ifndef AusCOM - dragio = 0.00536_dbl_kind ,&! ice-ocn drag coefficient + !!! dragio = 0.00536_dbl_kind ,&! ice-ocn drag coefficient + dragio = 0.01_dbl_kind ,&!!! 20170922 test new value as per spo #endif albocn = 0.06_dbl_kind ! ocean albedo @@ -88,12 +89,16 @@ module ice_constants ! kice is not used for mushy thermo kice = 2.03_dbl_kind ,&! thermal conductivity of fresh ice(W/m/deg) + !!!kice = 2.63_dbl_kind ,&!!! !20170922: spo suggests to test new kice and ksno ! kseaice is used only for zero-layer thermo kseaice= 2.00_dbl_kind ,&! thermal conductivity of sea ice (W/m/deg) ! (used in zero layer thermodynamics option) - ksno = 0.31_dbl_kind ,&! thermal conductivity of snow (W/m/deg) + ksno = 0.2_dbl_kind ,&! thermal conductivity of snow (W/m/deg) + !!!ksno = 0.31_dbl_kind ,&! thermal conductivity of snow (W/m/deg) + !!!ksno = 0.50_dbl_kind ,&!!! test new value as per spo zref = 10._dbl_kind ,&! reference height for stability (m) -#ifndef AusCOM ! multilayers with the UM coupling +#ifndef AusCOM + ! multilayers with the UM coupling aicenmin_ml = 0.00001_dbl_kind, &! AEW: min aice we want to allow when using snowpatch = 0.02_dbl_kind ! parameter for fractional snow area (m) #else diff --git a/source/ice_read_write.F90 b/source/ice_read_write.F90 index a78f1f1b..8e973873 100755 --- a/source/ice_read_write.F90 +++ b/source/ice_read_write.F90 @@ -998,7 +998,9 @@ end subroutine ice_write_ext subroutine ice_open_nc(filename, fid) - character (char_len_long), intent(in) :: & + !!!character (char_len_long), intent(in) :: & + !PBD 20180104 version: + character (len=*), intent(in) :: & filename ! netCDF filename integer (kind=int_kind), intent(out) :: & From 407ade8e298c8d5649a41ce97989ddf32038a2ca Mon Sep 17 00:00:00 2001 From: martindix Date: Mon, 17 Sep 2018 06:01:52 +0000 Subject: [PATCH 09/52] New branch for GSI 8.1 git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_gsi8.1@393 f6bd92a4-46cf-401b-8a38-b7f7993d28bf From a24447b93d2f42271fcaf475149c9216c7a28b9b Mon Sep 17 00:00:00 2001 From: martindix Date: Mon, 17 Sep 2018 06:42:30 +0000 Subject: [PATCH 10/52] Remove symliink before adding back as file git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_gsi8.1@394 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- drivers/access/cpl_interface.F90 | 1 - 1 file changed, 1 deletion(-) delete mode 120000 drivers/access/cpl_interface.F90 diff --git a/drivers/access/cpl_interface.F90 b/drivers/access/cpl_interface.F90 deleted file mode 120000 index fef8a283..00000000 --- a/drivers/access/cpl_interface.F90 +++ /dev/null @@ -1 +0,0 @@ -cpl_interface.F90_uphalo \ No newline at end of file From 6e363ca22f35ef934d0a2acab2ec5881017aeb6c Mon Sep 17 00:00:00 2001 From: martindix Date: Mon, 17 Sep 2018 06:43:04 +0000 Subject: [PATCH 11/52] Add code changes from Arnold's cice512_av853 git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_gsi8.1@395 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- drivers/access/CICE_RunMod.F90 | 75 +- drivers/access/cpl_interface.F90 | 1818 ++++++++++++++++++ drivers/access/cpl_interface.F90_uphalo | 2 - source/ice_flux.F90 | 62 +- source/ice_history.F90 | 2229 ++++++++++++++++++++--- source/ice_history_pond.F90 | 2 +- source/ice_history_shared.F90 | 181 +- source/ice_meltpond_topo.F90 | 23 +- source/ice_shortwave.F90 | 4 +- source/ice_step_mod.F90 | 28 +- source/ice_therm_bl99.F90 | 118 +- source/ice_therm_shared.F90 | 8 +- source/ice_therm_vertical.F90 | 76 +- 13 files changed, 4176 insertions(+), 450 deletions(-) create mode 100644 drivers/access/cpl_interface.F90 diff --git a/drivers/access/CICE_RunMod.F90 b/drivers/access/CICE_RunMod.F90 index 69323408..19855ffe 100644 --- a/drivers/access/CICE_RunMod.F90 +++ b/drivers/access/CICE_RunMod.F90 @@ -434,7 +434,8 @@ subroutine coupling_prep (iblk) fswthru_ai, fhocn, fswthru, scale_factor, & swvdr, swidr, swvdf, swidf, Tf, Tair, Qa, strairxT, strairyt, & fsens, flat, fswabs, flwout, evap, Tref, Qref, faero_ocn, & - fsurfn_f, flatn_f, scale_fluxes, frzmlt_init, frzmlt + fsurfn_f, flatn_f, scale_fluxes, frzmlt_init, frzmlt, & + snowfrac, snowfracn, evap_ice, evap_snow use ice_grid, only: tmask use ice_ocean, only: oceanmixed_ice, ocean_mixed_layer use ice_shortwave, only: alvdfn, alidfn, alvdrn, alidrn, & @@ -492,6 +493,7 @@ subroutine coupling_prep (iblk) albsno(i,j,iblk) = c0 albpnd(i,j,iblk) = c0 apeff_ai(i,j,iblk) = c0 + snowfrac(i,j,iblk) = c0 ! for history averaging cszn = c0 @@ -524,6 +526,8 @@ subroutine coupling_prep (iblk) apeff_ai(i,j,iblk) = apeff_ai(i,j,iblk) & ! for history + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) + snowfrac(i,j,iblk) = snowfrac(i,j,iblk) & ! for history + + snowfracn(i,j,n,iblk)*aicen(i,j,n,iblk) enddo enddo enddo @@ -587,6 +591,7 @@ subroutine coupling_prep (iblk) fsens (:,:,iblk), flat (:,:,iblk), & fswabs (:,:,iblk), flwout (:,:,iblk), & evap (:,:,iblk), & + evap_ice (:,:,iblk), evap_snow(:,:,iblk),& Tref (:,:,iblk), Qref (:,:,iblk), & fresh (:,:,iblk), fsalt (:,:,iblk), & fhocn (:,:,iblk), fswthru (:,:,iblk), & @@ -628,74 +633,6 @@ subroutine coupling_prep (iblk) end subroutine coupling_prep -!======================================================================= -! -! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can -! be provided at points which do not have ice. (This is could be due to -! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, -! conserve energy and water by passing these fluxes to the ocean. -! -! author: A. McLaren, Met Office - - subroutine sfcflux_to_ocn(nx_block, ny_block, & - tmask, aice, & - fsurfn_f, flatn_f, & - fresh, fhocn) - - use ice_domain_size, only: ncat - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ! block dimensions - - logical (kind=log_kind), dimension (nx_block,ny_block), & - intent(in) :: & - tmask ! land/boundary mask, thickness (T-cell) - - real (kind=dbl_kind), dimension(nx_block,ny_block), & - intent(in):: & - aice ! initial ice concentration - - real (kind=dbl_kind), dimension(nx_block,ny_block,ncat), & - intent(in) :: & - fsurfn_f, & ! net surface heat flux (provided as forcing) - flatn_f ! latent heat flux (provided as forcing) - - real (kind=dbl_kind), dimension(nx_block,ny_block), & - intent(inout):: & - fresh , & ! fresh water flux to ocean (kg/m2/s) - fhocn ! actual ocn/ice heat flx (W/m**2) - -!ars599: 08052014 not sure but add auscom to try, copy from dhb599 fm -!#ifdef CICE_IN_NEMO -#ifdef AusCOM - - ! local variables - integer (kind=int_kind) :: & - i, j, n ! horizontal indices - - real (kind=dbl_kind) :: & - rLsub ! 1/Lsub - - rLsub = c1 / Lsub - - do n = 1, ncat - do j = 1, ny_block - do i = 1, nx_block - if (tmask(i,j) .and. aice(i,j) <= puny) then - fhocn(i,j) = fhocn(i,j) & - + fsurfn_f(i,j,n) + flatn_f(i,j,n) - fresh(i,j) = fresh(i,j) & - + flatn_f(i,j,n) * rLsub - endif - enddo ! i - enddo ! j - enddo ! n - -#endif - - end subroutine sfcflux_to_ocn !======================================================================= diff --git a/drivers/access/cpl_interface.F90 b/drivers/access/cpl_interface.F90 new file mode 100644 index 00000000..dc71f43c --- /dev/null +++ b/drivers/access/cpl_interface.F90 @@ -0,0 +1,1818 @@ +!============================================================================ + module cpl_interface +!============================================================================ +! coupling interface between CICE and the oasis3_25 coupler (via MPI2) using +! the PRISM System Model Interface (PSMILe). +!---------------------------------------------------------------------------- + + !prism stuff + use mod_prism + + !cice stuff + use ice_kinds_mod + use ice_communicate !, only : my_task, master_task + use ice_broadcast + use ice_blocks !, only : nx_block, ny_block, nghost + use ice_domain_size !, only : max_blocks, nx_global, ny_global, ncat + use ice_distribution, only : distrb, nprocsX, nprocsY + use ice_gather_scatter + use ice_constants + use ice_boundary, only : ice_HaloUpdate + use ice_domain !, only : distrb_info + use ice_grid, only : u2tgrid_vector + use ice_grid, only : ANGLE, ANGLET + use ice_exit, only : abort_ice + + !cpl stuff + use cpl_parameters + use cpl_netcdf_setup + use cpl_arrays_setup + use cpl_forcing_handler + + implicit none + + public :: prism_init, init_cpl, coupler_termination, get_time0_sstsss, & + from_atm, into_ocn, from_ocn, into_atm, save_restart_i2a + + private + + logical :: mpiflag + integer(kind=int_kind) :: ierror, ibou + character(len=9) :: chiceout + character(len=3) :: chout + logical :: ll_comparal ! paralell or mono-cpl coupling + integer(kind=int_kind) :: il_comp_id ! Component ID + integer(kind=int_kind) :: il_nbtotproc ! Total number of processes + integer(kind=int_kind) :: il_nbcplproc ! No of processes involved in coupling + integer(kind=int_kind) :: il_part_id ! Local partition ID + integer(kind=int_kind) :: il_length ! Size of partial field for each process + integer(kind=int_kind), dimension(2) :: il_var_nodims + integer(kind=int_kind), dimension(4) :: il_var_shape + + integer(kind=int_kind) :: l_ilo, l_ihi, l_jlo, l_jhi !local partition + integer(kind=int_kind) :: gh_ilo, gh_ihi, gh_jlo, gh_jhi !local ghost outline + integer :: sendsubarray, recvsubarray , resizedrecvsubarray + integer, dimension(:), allocatable :: counts, disps + + integer(kind=int_kind) :: il_flag ! Flag for grid writing + integer(kind=int_kind) :: il_status, il_fileid, il_varid + integer(kind=int_kind) :: io_size, ii, il_bufsize, il_real, il_bufsizebyt + integer(kind=int_kind) :: integer_byte_size, integer_io_size + real(kind=dbl_kind), dimension(:,:), allocatable :: rla_array + real(kind=dbl_kind), dimension(:), allocatable :: rla_bufsend + real(kind=dbl_kind), dimension(:,:), allocatable :: vwork2d + !local domain work array, 4 coupling data passing + contains + +!====================================================================== + subroutine prism_init +!-----------------------! + + include 'mpif.h' + + !----------------------------------- + ! 'define' the model global domain: + !----------------------------------- + il_im = nx_global + il_jm = ny_global + il_imjm = il_im * il_jm + + !allocate rla_array to be used below + allocate (rla_array(il_im,il_jm) ) + + !print *, 'CICE: (prism_init) dbl_kind, ip_realwp_p= ',dbl_kind, ip_realwp_p + + !------------------- + ! Initialize PSMILe. + !------------------- + + ! Initialise MPI + mpiflag = .FALSE. + call MPI_Initialized (mpiflag, ierror) + print *,'CICE: (prism_init) BF MPI_INIT, mpiflag = ',mpiflag + + if ( .not. mpiflag ) then + call MPI_INIT(ierror) + endif + + call MPI_Initialized (mpiflag, ierror) + print *, 'CICE: (prism_init) AF MPI_INIT, mpiflag = ',mpiflag + + print * + print *, 'CICE: (prism_init) calling prism_init_comp_proto...' + + call prism_init_comp_proto (il_comp_id, cp_modnam, ierror) + + if (ierror /= PRISM_Ok) then + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 1') + else + print *, 'CICE: (prism_init) called prism_init_comp_proto !' + endif + + !B: the following part may not be really needed(?) + ! + ! Let's suppose the model attaches to a MPI buffer for its own use + ! + ! ! Sophisticated way to determine buffer size needed (without "kind") + ! ! Here one message containing rla_array + + integer_byte_size = BIT_SIZE(ii)/8 + inquire (iolength=io_size) ii + integer_io_size = io_size + inquire (iolength=io_size) rla_array(1,1) + il_real = io_size/integer_io_size*integer_byte_size + il_bufsize = il_imjm + MPI_BSEND_OVERHEAD/il_real + 1 + allocate (rla_bufsend(il_bufsize), stat = ierror) + il_bufsizebyt = il_bufsize * il_real + call MPI_Buffer_Attach(rla_bufsend, il_bufsizebyt, ierror) + + if (ierror /= PRISM_Ok) then + print *, 'CICE: (prism_init) Error in MPI_Buffer_Attach.' + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 2') + else + print *, 'CICE: (prism_init) MPI_Buffer_Attach ok!' + endif + ! + ! PSMILe attribution of local communicator. + ! + ! Either MPI_COMM_WORLD if MPI2 is used, + ! or a local communicator created by Oasis if MPI1 is used. + ! + call prism_get_localcomm_proto(il_commlocal, ierror) + ! + if (ierror /= PRISM_Ok) then + print *, 'CICE: Error in prism_get_localcomm_proto' + call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 3') + else + print *, 'CICE: _get_localcomm_ OK! il_commlocal= ',il_commlocal + endif + + ! + ! Inquire if model is parallel or not and open the process log file + ! + ! print *, '* CICE: Entering init_cpl.....' + + print *, '* CICE (prism_init) calling MPI_Comm_Size ...' + call MPI_Comm_Size(il_commlocal, il_nbtotproc, ierror) + print *, '* CICE (prism_init) calling MPI_Comm_Rank ...' + call MPI_Comm_Rank(il_commlocal, my_task, ierror) + + print *, '* CICE (prism_init) il_commlocal, il_nbtotproc, my_task = ' + print *, '* CICE (prism_init) ', il_commlocal, il_nbtotproc, my_task + ! + il_nbcplproc = il_nbtotproc !multi-process coupling (real parallel cpl)! + !il_nbcplproc = 1 !mono process coupling + + if (il_nbtotproc /= 1 .and. il_nbcplproc == il_nbtotproc ) then + ll_comparal = .TRUE. ! multi-cpl coupling! + else + ll_comparal = .FALSE. !mono-cpl coupling! + endif + + print *, '* CICE: prism_init called OK!' + + end subroutine prism_init + +!======================================================================= + subroutine init_cpl + + use mpi + use ice_communicate +!--------------------! + integer(kind=int_kind) :: jf, jfs + integer(kind=int_kind), dimension(2) :: il_var_nodims ! see below + integer(kind=int_kind), dimension(4) :: il_var_shape ! see below + + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j, n + type (block) :: this_block ! block information for current block + + integer, dimension(2) :: starts,sizes,subsizes + integer(kind=mpi_address_kind) :: start, extent +! integer, dimension(:), allocatable :: counts, disps + real(kind=dbl_kind) :: realvalue + integer (int_kind) :: nprocs + integer (int_kind),dimension(:), allocatable :: vilo, vjlo + + nprocs = get_num_procs() + allocate(vilo(nprocs)) + allocate(vjlo(nprocs)) +!initialise partition to inexisting region + l_ilo=nx_global + l_ihi=0 + l_jlo=ny_global + l_jhi=0 + gh_ilo=nx_global + gh_ihi=0 + gh_jlo=ny_global + gh_jhi=0 + ! Open the process log file +!20100406 if (my_task == 0 .or. ll_comparal) then + il_out = 85 + my_task + write(chout,'(I3.3)')il_out + chiceout='iceout'//chout + open(il_out,file=chiceout,form='formatted') + + write(il_out,*) 'Number of processes:', il_nbtotproc + write(il_out,*) 'Local process number:', my_task + write(il_out,*) 'Local communicator is : ',il_commlocal + write(il_out,*) 'Grid layout: nx_global,ny_global= ',nx_global,ny_global + write(il_out,*) 'Grid decomposition: nx_block,ny_block,max_blocks= ',& + nx_block,ny_block,max_blocks +!20100406 endif + +! write(il_out,*) 'Number of blocks :', nblocks +! do iblk = 1, nblocks +! +! this_block = get_block(blocks_ice(iblk),iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +!! do j=this_block%jlo,this_block%jhi +!! do i=this_block%ilo,this_block%ihi +!! ARRAY_G(this_block%i_glob(i), & +!! this_block%j_glob(j)) = & +!! ARRAY(i,j,src_dist%blockLocalID(n)) +! +! write(il_out,*) ' block:', iblk, "ilo, jlo, ihi, jhi=", ilo, jlo, ihi, jhi +! +! end do +!find out partition of this processor, which is done by init_domain_blocks + write(il_out,*) 'nblocks_x, nblocks_y, Number of tot blocks :', nblocks_x, nblocks_y, nblocks_tot +!!!!!!!!!!!! +! do iblk=1,nblocks_tot +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! !print local to global mapping +! write(il_out,*) 'block, local ilo ihi jlo jhi:', distrb_info%blockLocalID(iblk), ilo,ihi,jlo,jhi +! write(il_out,*) 'block global:', this_block%i_glob(ilo),this_block%i_glob(ihi), & +! this_block%j_glob(jlo),this_block%j_glob(jhi) +! endif +! end do + +!!debug + if (my_task == 0) then + write(il_out,*) "all block info:" + do iblk=1,nblocks_tot + this_block = get_block(iblk,iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + write(il_out,*) ' this block: cpu, iblock, jblock=', distrb_info%blockLocation(iblk)-1, this_block%iblock, this_block%jblock + write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) + end do + end if + + do iblk=1,nblocks_tot + + if (distrb_info%blockLocation(iblk) == my_task+1) then + + this_block = get_block(iblk,iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + write(il_out,*) ' this block: iblock, jblock=', this_block%iblock, this_block%jblock +! write(il_out,*) ' block:', iblk, "ilo, jlo, ihi, jhi=", ilo, jlo, ihi, jhi + write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) + if (this_block%i_glob(ilo) < l_ilo) then + l_ilo = this_block%i_glob(ilo) + gh_ilo = this_block%i_glob(ilo-nghost) + endif + if (this_block%j_glob(jlo) < l_jlo) then + l_jlo = this_block%j_glob(jlo) + gh_jlo = this_block%j_glob(jlo-nghost) + endif + if (this_block%i_glob(ihi) > l_ihi) then + l_ihi = this_block%i_glob(ihi) + gh_ihi = this_block%i_glob(ihi+nghost) + endif + if (this_block%j_glob(jhi) > l_jhi) then + l_jhi = this_block%j_glob(jhi) + gh_jhi = this_block%j_glob(jhi+nghost) + endif +! l_ilo = min(l_ilo, this_block%i_glob(ilo)) +! l_ihi = max(l_ihi, this_block%i_glob(ihi)) +! l_jlo = min(l_jlo, this_block%j_glob(jlo)) +! l_jhi = max(l_jhi, this_block%j_glob(jhi)) +! else if (distrb_info%blockLocation(n) == 0) then +! write(il_out,*) ' land block:', n + + endif + end do + write(il_out,*) ' local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi + write(il_out,*) ' partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 +!print ghost info + write(il_out,*) ' ghost global:',gh_ilo, gh_ihi, gh_jlo, gh_jhi + +!calculate partition using nprocsX and nprocsX + l_ilo=mod(my_task,nprocsX)*nx_global/nprocsX+1 + l_ihi=l_ilo + nx_global/nprocsX -1 + l_jlo=int(my_task/nprocsX) * ny_global/nprocsY+1 + l_jhi=l_jlo+ny_global/nprocsY - 1 + + write(il_out,*) ' 2local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi + write(il_out,*) ' 2partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 + + call mpi_gather(l_ilo, 1, mpi_integer, vilo, 1, mpi_integer, 0, MPI_COMM_ICE, ierror) + call broadcast_array(vilo, 0) + call mpi_gather(l_jlo, 1, mpi_integer, vjlo, 1, mpi_integer, 0, MPI_COMM_ICE, ierror) + call broadcast_array(vjlo, 0) + +!create subarray of this rank + sizes(1)=l_ihi-l_ilo+1; sizes(2)=l_jhi-l_jlo+1 + subsizes(1)=l_ihi-l_ilo+1; subsizes(2)=l_jhi-l_jlo+1 + starts(1)=0; starts(2)=0 + call mpi_type_create_subarray(2, sizes, subsizes, starts, mpi_order_fortran, & + MPI_REAL8, sendsubarray, ierror) + call mpi_type_commit(sendsubarray,ierror) + if (my_task == 0) then ! create recv buffer in main cpu + sizes(1)=nx_global; sizes(2)=ny_global + subsizes(1)=l_ihi-l_ilo+1; subsizes(2)=l_jhi-l_jlo+1 + starts(1)=0; starts(2)=0 + call mpi_type_create_subarray(2, sizes, subsizes, starts, mpi_order_fortran, & + MPI_REAL8, recvsubarray, ierror) + call mpi_type_commit(recvsubarray, ierror) + extent = sizeof(realvalue) + start = 0 + call mpi_type_create_resized(recvsubarray, start, extent, resizedrecvsubarray, ierror) + call mpi_type_commit(resizedrecvsubarray,ierror) + end if + allocate(counts(nprocs),disps(nprocs)) + forall (n=1:nprocs) counts(n) = 1 + do n=1, nprocs + disps(n) = ((vjlo(n)-1)*nx_global + (vilo(n)-1)) + !disps(n) = ((vilo(n)-1)*ny_global + (vjlo(n)-1)) + end do + + write(il_out,*) ' vilo ', vilo + write(il_out,*) ' vjlo ', vjlo + write(il_out,*) ' counts ', counts + write(il_out,*) ' disps ', disps + +! if ( ll_comparal ) then +! il_im = l_ihi-l_ilo+1 !nx_global +! il_jm = l_jhi-l_jlo+1 !ny_global +! il_imjm = il_im * il_jm +! endif + if (ll_comparal) then + xdim=l_ihi-l_ilo+1 + ydim=l_jhi-l_jlo+1 + else + xdim=il_im + ydim=il_jm + endif + + +!----------------------------------------------------------------------- + if (my_task == 0 .or. ll_comparal) then + ! + ! The following steps need to be done: + ! -> by the process if cice is monoprocess; + ! -> only by the master process, if cice is parallel and only + ! master process is involved in the coupling; + ! -> by all processes, if cice is parallel and all processes + ! are involved in the coupling. + + call decomp_def (il_part_id, il_length, il_imjm, & + my_task, il_nbcplproc, ll_comparal, il_out) + + write(il_out,*)'(init_cpl) called decomp_def, my_task, ierror = ',my_task, ierror + + ! + ! PSMILe coupling fields declaration + ! + + il_var_nodims(1) = 2 ! rank of coupling field + il_var_nodims(2) = 1 ! number of bundles in coupling field (always 1) + !il_var_shape(1)= 1 ! min index for the coupling field local dim + !il_var_shape(2)= xdim !il_im ! max index for the coupling field local dim + !il_var_shape(3)= 1 + !il_var_shape(4)= ydim !il_jm + if (ll_comparal) then + il_var_shape(1)= 1 !l_ilo ! min index for the coupling field local dimension + il_var_shape(2)= l_ihi-l_ilo+1 ! max index for the coupling field local dim + il_var_shape(3)= 1 !l_jlo ! min index for the coupling field local dim + il_var_shape(4)= l_jhi-l_jlo+1 ! max index for the coupling field local dim + else + il_var_shape(1)= 1 ! min index for the coupling field local dimension + il_var_shape(2)= il_im ! max index for the coupling field local dim + il_var_shape(3)= 1 ! min index for the coupling field local dim + il_var_shape(4)= il_jm ! max index for the coupling field local dim + endif + + ! ?Does this help? + !il_var_shape(1)= 2 ! min index for the coupling field local dim + !il_var_shape(2)= il_im+1 ! max index for the coupling field local dim + !il_var_shape(3)= 2 + !il_var_shape(4)= il_jm+1 + + endif !my_task==0 .or. ll_comparal + + !*** ***! + !***B: we now define cl_writ/cl_read on all ranks! (20090403) ***! + !*** ***! + + ! + ! Define name (as in namcouple) and declare each field sent by ice + ! + + ! + ! ice ==> atm + ! + nsend_i2a = 0 + + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='isst_ia' + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'icecon',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'snwthk',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a6,i2.2)')'icethk',jf + enddo + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='uvel_ia' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='vvel_ia' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='co2_i2' + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='co2fx_i2' + ! new fields sending to UM GA7 + nsend_i2a = nsend_i2a + 1 + cl_writ(nsend_i2a)='sstfz_ia' + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'foifr',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'itopt',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'itopk',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'pndfn',jf + enddo + do jf = 1, ncat + nsend_i2a = nsend_i2a + 1 + write(cl_writ(nsend_i2a), '(a5,i2.2)')'pndtn',jf + enddo + + if (my_task == 0) then + write(il_out,*) 'init_cpl: Number of fields sent to atm: ',nsend_i2a + endif + ! + ! ice ==> ocn + ! + nsend_i2o = nsend_i2a + + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='strsu_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='strsv_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='rain_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='snow_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='stflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='htflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='swflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='qflux_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='shflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='lwflx_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='runof_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='press_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='aice_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='melt_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='form_io' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='co2_i1' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='wnd_i1' +!2 more added 20171024: + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='lice_fw' + nsend_i2o = nsend_i2o + 1 + cl_writ(nsend_i2o)='lice_ht' + + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) 'init_cpl: Number of fields sent to ocn: ',nsend_i2o - nsend_i2a + + if (nsend_i2o /= jpfldout) then + write(il_out,*) + write(il_out,*)'!!! Fatal Error: (init_cpl) nsend = ',nsend_i2o + write(il_out,*)'!!! It should be nsend = ',jpfldout + call abort_ice('CICE: Number of outgoing coupling fields incorrect!') + endif + + write(il_out,*) 'init_cpl: Total number of fields sent from ice: ',jpfldout + + !jpfldout == nsend_i2o! + !---------------------! + + do jf=1, jpfldout + call prism_def_var_proto (il_var_id_out(jf),cl_writ(jf), il_part_id, & + il_var_nodims, PRISM_Out, il_var_shape, PRISM_Real, ierror) + enddo + + endif + + ! + ! Define name (as in namcouple) and declare each field received by ice + ! + + ! + ! atm ==> ice + ! + nrecv_a2i = 0 + + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'thflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'pswflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'runoff_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'wme_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'rain_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'snow_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'evap_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'lhflx_i' + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a4,i2.2,a2)')'tmlt',jf,'_i' + enddo + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a4,i2.2,a2)')'bmlt',jf,'_i' + enddo + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'taux_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'tauy_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'swflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'lwflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'shflx_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'press_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'co2_ai' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'wnd_ai' + ! new fields recving from UM GA7 + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'icenth_i' + nrecv_a2i = nrecv_a2i + 1 + cl_read(nrecv_a2i) = 'icesth_i' + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a6,i2.2)')'tsfice',jf + enddo + do jf = 1, ncat + nrecv_a2i = nrecv_a2i + 1 + write(cl_read(nrecv_a2i), '(a6,i2.2)')'iceevp',jf + enddo + + if (my_task==0 .or. ll_comparal) then + write(il_out,*) 'init_cpl: Number of fields rcvd from atm: ',nrecv_a2i + endif + + ! + ! ocn ==> ice + ! + nrecv_o2i = nrecv_a2i + + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sst_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sss_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssu_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssv_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'sslx_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'ssly_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'pfmice_i' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'co2_oi' + nrecv_o2i = nrecv_o2i + 1 + cl_read(nrecv_o2i) = 'co2fx_oi' + + if (my_task==0 .or. ll_comparal) then + + write(il_out,*) 'init_cpl: Number of fields rcvd from ocn: ',nrecv_o2i-nrecv_a2i + + if (nrecv_o2i /= jpfldin) then + write(il_out,*) + write(il_out,*)'!!! Fatal Error: (init_cpl) nrecv = ',nrecv_o2i + write(il_out,*)'!!! It should be nrecv = ',jpfldin + call abort_ice('CICE: Number of incoming coupling fields incorrect!') + endif + !jpfldin == nrecv_o2i! + !--------------------! + + write(il_out,*) 'init_cpl: Total number of fields rcvd by ice: ',jpfldin + + do jf=1, jpfldin + call prism_def_var_proto (il_var_id_in(jf), cl_read(jf), il_part_id, & + il_var_nodims, PRISM_In, il_var_shape, PRISM_Real, ierror) + enddo + + ! + ! PSMILe end of declaration phase + ! + call prism_enddef_proto (ierror) + + endif !my_task==0 + + ! + ! Allocate the 'coupling' fields (to be used) for EACH PROCESS:! + ! + + ! fields in: (local domain) + ! + ! from atm: + allocate (um_thflx(nx_block,ny_block,max_blocks)); um_thflx(:,:,:) = 0 + allocate (um_pswflx(nx_block,ny_block,max_blocks)); um_pswflx(:,:,:) = 0 + allocate (um_runoff(nx_block,ny_block,max_blocks)); um_runoff(:,:,:) = 0 + allocate (um_wme(nx_block,ny_block,max_blocks)); um_wme(:,:,:) = 0 + allocate (um_snow(nx_block,ny_block,max_blocks)); um_snow(:,:,:) = 0 + allocate (um_rain(nx_block,ny_block,max_blocks)); um_rain(:,:,:) = 0 + allocate (um_evap(nx_block,ny_block,max_blocks)); um_evap(:,:,:) = 0 + allocate (um_lhflx(nx_block,ny_block,max_blocks)); um_lhflx(:,:,:) = 0 + allocate (um_taux(nx_block,ny_block,max_blocks)); um_taux(:,:,:) = 0 + allocate (um_tauy(nx_block,ny_block,max_blocks)); um_tauy(:,:,:) = 0 + allocate (um_swflx(nx_block,ny_block,max_blocks)); um_swflx(:,:,:) = 0 + allocate (um_lwflx(nx_block,ny_block,max_blocks)); um_lwflx(:,:,:) = 0 + allocate (um_shflx(nx_block,ny_block,max_blocks)); um_shflx(:,:,:) = 0 + allocate (um_press(nx_block,ny_block,max_blocks)); um_press(:,:,:) = 0 + allocate (um_tmlt(nx_block,ny_block,ncat,max_blocks)); um_tmlt(:,:,:,:) = 0 + allocate (um_bmlt(nx_block,ny_block,ncat,max_blocks)); um_bmlt(:,:,:,:) = 0 + allocate (um_co2(nx_block,ny_block,max_blocks)); um_co2(:,:,:) = 0 + allocate (um_wnd(nx_block,ny_block,max_blocks)); um_wnd(:,:,:) = 0 + allocate (um_icenth(nx_block,ny_block,max_blocks)); um_icenth(:,:,:) = 0 + allocate (um_icesth(nx_block,ny_block,max_blocks)); um_icesth(:,:,:) = 0 + allocate (um_tsfice(nx_block,ny_block,ncat,max_blocks)); um_tsfice(:,:,:,:) = 0 + allocate (um_iceevp(nx_block,ny_block,ncat,max_blocks)); um_iceevp(:,:,:,:) = 0 + !20171024: 6 more arrays added (for land ice discharge into ocean) + allocate (lice_nth(nx_block,ny_block,max_blocks)); lice_nth(:,:,:) = 0 + allocate (lice_sth(nx_block,ny_block,max_blocks)); lice_sth(:,:,:) = 0 + allocate (msk_nth(nx_block,ny_block,max_blocks)); msk_nth(:,:,:) = 0 + allocate (msk_sth(nx_block,ny_block,max_blocks)); msk_sth(:,:,:) = 0 + allocate (amsk_nth(nx_block,ny_block,max_blocks)); amsk_nth(:,:,:) = 0 + allocate (amsk_sth(nx_block,ny_block,max_blocks)); amsk_sth(:,:,:) = 0 + + ! + allocate ( core_runoff(nx_block,ny_block,max_blocks)); core_runoff(:,:,:) = 0. + ! + + ! from ocn: + allocate (ocn_sst(nx_block,ny_block,max_blocks)); ocn_sst(:,:,:) = 0 + allocate (ocn_sss(nx_block,ny_block,max_blocks)); ocn_sss(:,:,:) = 0 + allocate (ocn_ssu(nx_block,ny_block,max_blocks)); ocn_ssu(:,:,:) = 0 + allocate (ocn_ssv(nx_block,ny_block,max_blocks)); ocn_ssv(:,:,:) = 0 + allocate (ocn_sslx(nx_block,ny_block,max_blocks)); ocn_sslx(:,:,:) = 0 + allocate (ocn_ssly(nx_block,ny_block,max_blocks)); ocn_ssly(:,:,:) = 0 + allocate (ocn_pfmice(nx_block,ny_block,max_blocks)); ocn_pfmice(:,:,:) = 0 + allocate (ocn_co2(nx_block,ny_block,max_blocks)); ocn_co2(:,:,:) = 0 + allocate (ocn_co2fx(nx_block,ny_block,max_blocks)); ocn_co2fx(:,:,:) = 0 + + ! fields out: (local domain) + ! + ! to atm: + allocate (ia_sst(nx_block,ny_block,max_blocks)); ia_sst(:,:,:) = 0 + allocate (ia_uvel(nx_block,ny_block,max_blocks)); ia_uvel(:,:,:) = 0 + allocate (ia_vvel(nx_block,ny_block,max_blocks)); ia_vvel(:,:,:) = 0 + allocate (ia_aicen(nx_block,ny_block,ncat,max_blocks)); ia_aicen(:,:,:,:) = 0 + allocate (ia_snown(nx_block,ny_block,ncat,max_blocks)); ia_snown(:,:,:,:) = 0 + allocate (ia_thikn(nx_block,ny_block,ncat,max_blocks)); ia_thikn(:,:,:,:) = 0 + allocate (ia_co2(nx_block,ny_block,max_blocks)); ia_co2(:,:,:) = 0 + allocate (ia_co2fx(nx_block,ny_block,max_blocks)); ia_co2fx(:,:,:) = 0 + allocate (ia_sstfz(nx_block,ny_block,max_blocks)); ia_sstfz(:,:,:) = 0 + allocate (ia_foifr(nx_block,ny_block,ncat,max_blocks)); ia_foifr(:,:,:,:) = 0 + allocate (ia_itopt(nx_block,ny_block,ncat,max_blocks)); ia_itopt(:,:,:,:) = 0 + allocate (ia_itopk(nx_block,ny_block,ncat,max_blocks)); ia_itopk(:,:,:,:) = 0 + allocate (ia_pndfn(nx_block,ny_block,ncat,max_blocks)); ia_pndfn(:,:,:,:) = 0 + allocate (ia_pndtn(nx_block,ny_block,ncat,max_blocks)); ia_pndtn(:,:,:,:) = 0 + ! + ! to ocn: + allocate (io_strsu(nx_block,ny_block,max_blocks)); io_strsu(:,:,:) = 0 + allocate (io_strsv(nx_block,ny_block,max_blocks)); io_strsv(:,:,:) = 0 + allocate (io_rain (nx_block,ny_block,max_blocks)); io_rain (:,:,:) = 0 + allocate (io_snow (nx_block,ny_block,max_blocks)); io_snow (:,:,:) = 0 + allocate (io_stflx(nx_block,ny_block,max_blocks)); io_stflx(:,:,:) = 0 + allocate (io_htflx(nx_block,ny_block,max_blocks)); io_htflx(:,:,:) = 0 + allocate (io_swflx(nx_block,ny_block,max_blocks)); io_swflx(:,:,:) = 0 + allocate (io_qflux(nx_block,ny_block,max_blocks)); io_qflux(:,:,:) = 0 + allocate (io_lwflx(nx_block,ny_block,max_blocks)); io_lwflx(:,:,:) = 0 + allocate (io_shflx(nx_block,ny_block,max_blocks)); io_shflx(:,:,:) = 0 + allocate (io_runof(nx_block,ny_block,max_blocks)); io_runof(:,:,:) = 0 + allocate (io_press(nx_block,ny_block,max_blocks)); io_press(:,:,:) = 0 + allocate (io_aice(nx_block,ny_block,max_blocks)); io_aice(:,:,:) = 0 + allocate (io_melt(nx_block,ny_block,max_blocks)); io_melt(:,:,:) = 0 + allocate (io_form(nx_block,ny_block,max_blocks)); io_form(:,:,:) = 0 + allocate (io_co2(nx_block,ny_block,max_blocks)); io_co2(:,:,:) = 0 + allocate (io_wnd(nx_block,ny_block,max_blocks)); io_wnd(:,:,:) = 0 +!20171024: 2 more added + allocate (io_licefw(nx_block,ny_block,max_blocks)); io_licefw(:,:,:) = 0 + allocate (io_liceht(nx_block,ny_block,max_blocks)); io_liceht(:,:,:) = 0 + + ! temporary arrays: + ! IO cpl int time-average + allocate (maice(nx_block,ny_block,max_blocks)); maice(:,:,:) = 0 + allocate (mstrocnxT(nx_block,ny_block,max_blocks)); mstrocnxT(:,:,:) = 0 + allocate (mstrocnyT(nx_block,ny_block,max_blocks)); mstrocnyT(:,:,:) = 0 + allocate (mfresh(nx_block,ny_block,max_blocks)); mfresh(:,:,:) = 0 + allocate (mfsalt(nx_block,ny_block,max_blocks)); mfsalt(:,:,:) = 0 + allocate (mfhocn(nx_block,ny_block,max_blocks)); mfhocn(:,:,:) = 0 + allocate (mfswthru(nx_block,ny_block,max_blocks)); mfswthru(:,:,:) = 0 + allocate (msicemass(nx_block,ny_block,max_blocks)); msicemass(:,:,:) = 0 + ! IA cpl int time-average (3D) + allocate (maiu(nx_block,ny_block,max_blocks)); maiu(:,:,:) = 0 + allocate (muvel(nx_block,ny_block,max_blocks)); muvel(:,:,:) = 0 + allocate (mvvel(nx_block,ny_block,max_blocks)); mvvel(:,:,:) = 0 + allocate (msst(nx_block,ny_block,max_blocks)); msst(:,:,:) = 0 + allocate (mssu(nx_block,ny_block,max_blocks)); mssu(:,:,:) = 0 + allocate (mssv(nx_block,ny_block,max_blocks)); mssv(:,:,:) = 0 + allocate (mco2(nx_block,ny_block,max_blocks)); mco2(:,:,:) = 0 + allocate (mco2fx(nx_block,ny_block,max_blocks)); mco2fx(:,:,:) = 0 + allocate (msstfz(nx_block,ny_block,max_blocks)); msstfz(:,:,:) = 0 + ! IA cpl int time-average (4D) + allocate (maicen(nx_block,ny_block,ncat,max_blocks)); maicen(:,:,:,:) = 0 + allocate (msnown(nx_block,ny_block,ncat,max_blocks)); msnown(:,:,:,:) = 0 + allocate (mthikn(nx_block,ny_block,ncat,max_blocks)); mthikn(:,:,:,:) = 0 + allocate (mfoifr(nx_block,ny_block,ncat,max_blocks)); mfoifr(:,:,:,:) = 0 + allocate (mitopt(nx_block,ny_block,ncat,max_blocks)); mitopt(:,:,:,:) = 0 + allocate (mitopk(nx_block,ny_block,ncat,max_blocks)); mitopk(:,:,:,:) = 0 + allocate (mpndfn(nx_block,ny_block,ncat,max_blocks)); mpndfn(:,:,:,:) = 0 + allocate (mpndtn(nx_block,ny_block,ncat,max_blocks)); mpndtn(:,:,:,:) = 0 +!BX: + allocate (maicen_saved(nx_block,ny_block,ncat,max_blocks)); maicen_saved(:,:,:,:) = 0 +! + allocate (icebergfw(nx_block,ny_block,12,max_blocks)); icebergfw(:,:,:,:) = 0 + + allocate (vwork(nx_block,ny_block,max_blocks)); vwork(:,:,:) = 0 + allocate (gwork(nx_global,ny_global)); gwork(:,:) = 0 + allocate (sicemass(nx_block,ny_block,max_blocks)); sicemass(:,:,:) = 0. + allocate (vwork2d(l_ilo:l_ihi, l_jlo:l_jhi)); vwork2d(:,:) = 0. !l_ihi-l_ilo+1, l_jhi-l_jlo+1 + + end subroutine init_cpl + +!======================================================================= + subroutine from_atm(isteps) +!----------------------------! + + implicit none + + integer(kind=int_kind), intent(in) :: isteps + + real(kind=dbl_kind) :: tmpu, tmpv + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ncid,currstep,ll,ilout + + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(from_atm) receiving coupling fields at rtime= ', isteps + if (chk_a2i_fields) then + if ( .not. file_exist('fields_a2i_in_ice.nc') ) then + call create_ncfile('fields_a2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_a2i_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_a2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + write(il_out,*) + write(il_out,*) '(from_atm) Total number of fields to be rcvd: ', nrecv_a2i + endif + + write(il_out,*) "prism_get from_atm at sec: ", isteps + do jf = 1, nrecv_a2i + + if (my_task==0 .or. ll_comparal ) then + + !jf-th field in + write(il_out,*) + write(il_out,*) '*** receiving coupling field No. ', jf, cl_read(jf) + !call flush(il_out) + + if (ll_comparal) then + call prism_get_proto (il_var_id_in(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_get_proto (il_var_id_in(jf), isteps, gwork, ierror) + endif + + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Recvd) then + write(il_out,*) 'Err in _get_ sst at time with error: ', isteps, ierror + call prism_abort_proto(il_comp_id, 'cice from_atm','stop 1') + else + write(il_out,*) + write(il_out,*)'(from_atm) rcvd at time with err: ',cl_read(jf),isteps,ierror + + if (ll_comparal .and. chk_a2i_fields) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps, resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_a2i_fields) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif + + if (.not. ll_comparal ) then + call scatter_global(vwork,gwork,master_task,distrb_info, & + field_loc_center, field_type_scalar) +! else +! call unpack_global_dbl(vwork,gwork,master_task,distrb_info, & +! field_loc_center, field_type_scalar) + endif ! not ll_comparal + +#if (MXBLCKS != 1) +#error The following code assumes that max_blocks == 1 +#endif + + !***Note following "select case" works only if cl_read(:) is defined at ALL ranks***! + !-----------------------------------------------------------------------------------! + select case (trim(cl_read(jf))) + case ('thflx_i'); + um_thflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1)=vwork2d(:,:) + case ('pswflx_i'); + um_pswflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) =vwork2d(:,:) + case ('runoff_i'); + um_runoff(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) =vwork2d(:,:) + case ('wme_i'); + um_wme(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) +! case ('rain_i'); um_rain(:,:,:) = vwork(:,:,:) +! case ('snow_i'); um_snow(:,:,:) = vwork(:,:,:) +!---20100825 -- just be cauious: ------------------------- + case ('rain_i'); + um_rain(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) =max(0.0,vwork2d(:,:)) + case ('snow_i'); + um_snow(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) =max(0.0,vwork2d(:,:)) +!--------------------------------------------------------- + case ('evap_i');um_evap(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('lhflx_i');um_lhflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('tmlt01_i');um_tmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1,1) = vwork2d(:,:) + case ('tmlt02_i');um_tmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,2,1) = vwork2d(:,:) + case ('tmlt03_i');um_tmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,3,1) = vwork2d(:,:) + case ('tmlt04_i');um_tmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,4,1) = vwork2d(:,:) + case ('tmlt05_i');um_tmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,5,1) = vwork2d(:,:) + case ('bmlt01_i');um_bmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1,1) = vwork2d(:,:) + case ('bmlt02_i');um_bmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,2,1) = vwork2d(:,:) + case ('bmlt03_i');um_bmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,3,1) = vwork2d(:,:) + case ('bmlt04_i');um_bmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,4,1) = vwork2d(:,:) + case ('bmlt05_i');um_bmlt(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,5,1) = vwork2d(:,:) + case ('taux_i');um_taux(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('tauy_i');um_tauy(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('swflx_i');um_swflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('lwflx_i');um_lwflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('shflx_i');um_shflx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('press_i');um_press(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('co2_ai');um_co2(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('wnd_ai');um_wnd(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('icenth_i');um_icenth(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('icesth_i');um_icesth(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) + case ('tsfice01');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1,1) = vwork2d(:,:) + case ('tsfice02');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,2,1) = vwork2d(:,:) + case ('tsfice03');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,3,1) = vwork2d(:,:) + case ('tsfice04');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,4,1) = vwork2d(:,:) + case ('tsfice05');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,5,1) = vwork2d(:,:) + case ('iceevp01');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1,1) = vwork2d(:,:) + case ('iceevp02');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,2,1) = vwork2d(:,:) + case ('iceevp03');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,3,1) = vwork2d(:,:) + case ('iceevp04');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,4,1) = vwork2d(:,:) + case ('iceevp05');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,5,1) = vwork2d(:,:) + end select + + if (my_task == 0 .or. ll_comparal) then + write(il_out,*) + write(il_out,*)'(from_atm) done: ', jf, trim(cl_read(jf)) + endif + + enddo +!---------------------------------------------------------------------------------------------------- + call ice_HaloUpdate(um_thflx, halo_info,field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_pswflx, halo_info,field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_runoff, halo_info,field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_wme, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(um_rain, halo_info, field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_snow, halo_info, field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_evap, halo_info, field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_lhflx, halo_info,field_loc_center,field_type_scalar) + call ice_HaloUpdate(um_tmlt, halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_bmlt, halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_taux, halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_tauy, halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_swflx, halo_info,field_loc_center,field_type_vector) + call ice_HaloUpdate(um_lwflx, halo_info,field_loc_center,field_type_vector) + call ice_HaloUpdate(um_shflx, halo_info,field_loc_center,field_type_vector) + call ice_HaloUpdate(um_press, halo_info,field_loc_center,field_type_vector) + call ice_HaloUpdate(um_co2, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(um_wnd, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(um_icenth,halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_icesth,halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_tsfice,halo_info, field_loc_center,field_type_vector) + call ice_HaloUpdate(um_iceevp,halo_info, field_loc_center,field_type_vector) + + IF (rotate_winds) THEN !rotate_winds=.t. means oasis does not do the vector rotation. + + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + tmpu = um_taux(i,j,iblk) ! on geographical coord. (T cell) + tmpv = um_tauy(i,j,iblk) + um_taux(i,j,iblk) = tmpu*cos(ANGLET(i,j,iblk)) & ! converted onto model curvelear + + tmpv*sin(ANGLET(i,j,iblk)) ! coord. (T cell) + um_tauy(i,j,iblk) = tmpv*cos(ANGLET(i,j,iblk)) & ! + - tmpu*sin(ANGLET(i,j,iblk)) + enddo + enddo + + enddo + + ENDIF !rotate_winds + + ! need do t-grid to u-grid shift for vectors since all coupling occur on + ! t-grid points: <==No! actually CICE requires the input wind on T grid! + ! (see comment in code ice_flux.F) + !call t2ugrid(uwnd1) + !call t2ugrid(vwnd1) + + !------------------------------- + !if ( chk_a2i_fields ) then + ! call check_a2i_fields(isteps) + !endif + !------------------------------- + + if ( chk_a2i_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine from_atm + +!======================================================================= + subroutine from_ocn(isteps) +!----------------------------! + + integer(kind=int_kind), intent(in) :: isteps + + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) '(from_ocn) receiving coupling fields at rtime: ', isteps + if (chk_o2i_fields) then + if ( .not. file_exist('fields_o2i_in_ice.nc') ) then + call create_ncfile('fields_o2i_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_o2i_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_o2i_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + write(il_out,*) "prism_get from_ocn at sec: ", isteps + do jf = nrecv_a2i + 1, jpfldin + + if (my_task==0 .or. ll_comparal) then + + !jf-th field in + write(il_out,*) + write(il_out,*) '*** receiving coupling fields No. ', jf, cl_read(jf) + if(ll_comparal) then + call prism_get_proto (il_var_id_in(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_get_proto (il_var_id_in(jf), isteps, gwork, ierror) + endif + + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Recvd) then + write(il_out,*) 'Err in _get_ sst at time with error: ', isteps, ierror + call prism_abort_proto(il_comp_id, 'cice from_ocn','stop 1') + else + write(il_out,*) + write(il_out,*)'(from_ocn) rcvd at time with err: ',cl_read(jf),isteps,ierror + if(ll_comparal .and. chk_o2i_fields) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_o2i_fields) then + call write_nc2D(ncid, trim(cl_read(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif + + if (.not. ll_comparal) then + call scatter_global(vwork, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) +! else +! call unpack_global_dbl(vwork, gwork, master_task, distrb_info, & +! field_loc_center, field_type_scalar) + endif + + !Q: 'field_type_scalar' all right for 'vector' (ssu/ssv, sslx/ssly))?! + select case (trim(cl_read(jf))) + case ('sst_i'); + ocn_sst(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('sss_i'); + ocn_sss(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('ssu_i'); + ocn_ssu(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('ssv_i'); + ocn_ssv(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('sslx_i'); + ocn_sslx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('ssly_i'); + ocn_ssly(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('pfmice_i'); + ocn_pfmice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost, 1) =vwork2d + case ('co2_oi'); + ocn_co2(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d + case ('co2fx_oi'); + ocn_co2fx(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost, 1) =vwork2d + end select + + enddo + + call ice_HaloUpdate(ocn_sst, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ocn_sss, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ocn_ssu, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(ocn_ssv, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(ocn_sslx, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(ocn_ssly, halo_info, field_loc_center, field_type_vector) + call ice_HaloUpdate(ocn_pfmice, halo_info,field_loc_center,field_type_scalar) + call ice_HaloUpdate(ocn_co2, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ocn_co2fx, halo_info, field_loc_center,field_type_scalar) + + !------------------------------- + !if (chk_o2i_fields) then + ! call check_o2i_fields(isteps) + !endif + !------------------------------- + + !------------------------------------------------------------------------------- + !B: calculate freezing point here (before "time_average_ocn_fields_4_i2a") ! + !----- should use eos formula to calculate Tf for "consistency" with GCx ----! + Tf (:,:,:) = -depressT*ocn_sss(:,:,:) ! (deg C) + ! + !May use different formula for Tf such as TEOS-10 formulation: + ! + !r1_S0 = 0.875/35.16504 + !zzs(:,:,:) = sqrt(abs(ocn_sss(:,:,:)) * r1_S0) + !Tf(:,:,:) = ((((1.46873e-03 * zzs(:,:,:) - 9.64972e-03) * zzs(:,:,:) + & + ! 2.28348e-02) * zzs(:,:,:) - 3.12775e-02) * zzs(:,:,:) + & + ! 2.07679e-02) * zzs(:,:,:) - 5.87701e-02 + !Tf(:,:,:) = Tf(:,:,:) * sss(:,:,:) ! - 7.53e-4 * 5.0 !!!5.0 is depth in meters + !--------------------------------------------------------------------------------- + + if ( chk_o2i_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine from_ocn + +!======================================================================= + subroutine into_ocn(isteps) +!-----------------------------------! + + implicit none + + integer(kind=int_kind), intent(in) :: isteps + integer(kind=int_kind) :: jf + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(into_ocn) sending coupling fields at stime= ', isteps + if (chk_i2o_fields) then + if ( .not. file_exist('fields_i2o_in_ice.nc') ) then + call create_ncfile('fields_i2o_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + endif + write(il_out,*) 'opening file fields_i2o_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_i2o_in_ice.nc',nf_write,ncid) ) + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + write(il_out,*) "prism_put into_ocn at sec: ", isteps + do jf = nsend_i2a + 1, jpfldout + +!CH: make sure the 'LIMITS' are to be released! + + select case(trim(cl_writ(jf))) +!20100531 for MYM's test (iostress_factor) ............. + case('strsu_io'); vwork = io_strsu * iostress_factor + case('strsv_io'); vwork = io_strsv * iostress_factor +!....................................................... + case('rain_io'); vwork = io_rain + case('snow_io'); vwork = io_snow + !case('stflx_io'); vwork = io_stflx + case('stflx_io') + if (limit_stflx) then + vwork = max(-5.e-6, min(io_stflx, 5.e-6)) + else + vwork = io_stflx + endif + !case('htflx_io'); vwork = io_htflx + !case('htflx_io'); vwork = max(io_htflx, -450.0) + !Jan2010: + case('htflx_io'); vwork = min(io_htflx,0.0) + case('swflx_io'); vwork = io_swflx + case('qflux_io'); vwork = io_qflux + case('shflx_io'); vwork = io_shflx + case('lwflx_io'); vwork = io_lwflx + case('runof_io') + if (use_core_runoff) then + vwork = core_runoff + else + vwork = io_runof + endif + case('press_io'); vwork = io_press + case('aice_io'); vwork = io_aice + case('melt_io'); vwork = io_melt + case('form_io'); vwork = io_form + case('co2_i1'); vwork = io_co2 + case('wnd_i1'); vwork = io_wnd + !2 more added 20171024: + case('lice_fw'); vwork = io_licefw + case('lice_ht'); vwork = io_liceht + end select + + if(.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) + else + call pack_global_dbl(gwork, vwork, master_task, distrb_info) + vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) = gwork(l_ilo:l_ihi, l_jlo:l_jhi) + endif + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) + write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + if(ll_comparal) then + call prism_put_proto(il_var_id_out(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_put_proto(il_var_id_out(jf), isteps, gwork, ierror) + endif + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Sent) then + write(il_out,*) + write(il_out,*) '(into_ocn) Err in _put_ ', cl_writ(jf), isteps, ierror + call prism_abort_proto(il_comp_id, 'cice into_ocn','STOP 1') + else + write(il_out,*) + write(il_out,*)'(into_ocn) sent: ', cl_writ(jf), isteps, ierror + if(chk_i2o_fields .and. ll_comparal) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_i2o_fields) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif !my_task == 0 + + enddo !jf + + !-------------------------------------- + !if (chk_i2o_fields) then + ! call check_i2o_fields(isteps) + !endif + !-------------------------------------- + + if ( chk_i2o_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine into_ocn + +!======================================================================= + subroutine into_atm(isteps) +!----------------------------! + + integer(kind=int_kind), intent(in) :: isteps + integer(kind=int_kind) :: jf + + real(kind=dbl_kind) :: tmpu, tmpv + integer(kind=int_kind) :: ilo,ihi,jlo,jhi,iblk,i,j + type (block) :: this_block ! block information for current block + + integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ + save currstep + + currstep=currstep+1 + +!debug hxy599 +!if (isteps==runtime-3600) then +! chk_i2a_fields=.true. !save the last step +! currstep = 1 +! write (il_out,*) 'hxy599 debug: save i2a fields at time ', isteps +!end if + + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '(into_atm) sending coupling fields at stime= ', isteps + if (chk_i2a_fields) then + if ( .not. file_exist('fields_i2a_in_ice.nc') ) then + call create_ncfile('fields_i2a_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) + else + write(il_out,*) 'opening file fields_i2a_in_ice.nc at nstep = ', isteps + call ncheck( nf_open('fields_i2a_in_ice.nc',nf_write,ncid) ) + end if + call write_nc_1Dtime(real(isteps),currstep,'time',ncid) + endif + endif + + IF (rotate_winds) THEN + + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + !note note uvel/vvel are on the U-cell here. + tmpu = ia_uvel(i,j,iblk); tmpv = ia_vvel(i,j,iblk) ! ice/ocn velocity, m/s + ia_uvel(i,j,iblk) = tmpu*cos(ANGLE(i,j,iblk)) & ! remapped on to geographical + - tmpv*sin(ANGLE(i,j,iblk)) ! grid. + ia_vvel(i,j,iblk) = tmpv*cos(ANGLE(i,j,iblk)) & ! they also need be shifted + + tmpu*sin(ANGLE(i,j,iblk)) ! on to T-cell (below). + enddo + enddo + + enddo + + ENDIF !rotate_winds + + !shift ia_uvel/ia_vvel onto T points before passing into coupler + call u2tgrid_vector(ia_uvel) + call u2tgrid_vector(ia_vvel) + + !hxy599 debug + !call read_restart_i2a("i2a.nc", 0) + + write(il_out,*) "prism_put into_atm at sec: ", isteps + do jf = 1, nsend_i2a + + select case (trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,4,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + !20100305: test effect of ssuv on the tropical cooling biases (as per Harry Henden) + case('uvel_ia'); vwork = ia_uvel * ocn_ssuv_factor !note ice u/v are also + case('vvel_ia'); vwork = ia_vvel * ocn_ssuv_factor ! included here. + case('sstfz_ia'); vwork = ia_sstfz + case('co2_i2'); vwork = ia_co2 + case('co2fx_i2'); vwork = ia_co2fx + case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) + case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) + case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) + case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) + case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) + case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) + case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) + case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) + case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) + case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) + case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) + case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) + case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) + case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) + case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) + case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) + case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) + case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) + case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) + case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) + case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) + case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) + case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) + case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) + case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) + end select + + if (.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) + else + call pack_global_dbl(gwork, vwork, master_task, distrb_info) + vwork2d(l_ilo:l_ihi, l_jlo:l_jhi) = gwork(l_ilo:l_ihi, l_jlo:l_jhi) +! do iblk=1,nblocks_tot +! +! if (distrb_info%blockLocation(iblk) == my_task+1) then +! +! this_block = get_block(iblk,iblk) +! ilo = this_block%ilo +! ihi = this_block%ihi +! jlo = this_block%jlo +! jhi = this_block%jhi +! +! vwork2d(this_block%i_glob(ilo):this_block%i_glob(ihi), & +! this_block%j_glob(jlo):this_block%j_glob(jhi)) = & +! vwork(ilo:ihi,jlo:jhi,distrb_info%blockLocalID(iblk)) +! endif +! end do + + end if + if (my_task == 0 .or. ll_comparal) then + + write(il_out,*) + write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + + !call prism_put_inquire_proto(il_var_id_out(jf),isteps,ierror) + + write(il_out,*) + write(il_out,*) '(into_atm) what to do with this var==> Err= ',ierror + if(ll_comparal) then + call prism_put_proto(il_var_id_out(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) + else + call prism_put_proto(il_var_id_out(jf), isteps, gwork, ierror) + endif + if ( ierror /= PRISM_Ok .and. ierror < PRISM_Sent) then + write(il_out,*) + write(il_out,*) '(into_atm) Err in _put_ ', cl_writ(jf), isteps, ierror + call prism_abort_proto(il_comp_id, 'cice into_atm','STOP 1') + else + write(il_out,*) + write(il_out,*)'(into_atm) sent: ', cl_writ(jf), isteps, ierror + if(chk_i2a_fields .and. ll_comparal) then + call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & + counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) + call MPI_Barrier(MPI_COMM_ICE, ierror) + endif + if (my_task==0 .and. chk_i2a_fields) then + call write_nc2D(ncid, trim(cl_writ(jf)), gwork, 1, il_im,il_jm, & + currstep,ilout=il_out) + endif + endif + + endif !my_task == 0 + + enddo !jf = 1, jpfldout + + !------------------------------- + !if (chk_i2a_fields) then + ! call check_i2a_fields(isteps) + !endif + !------------------------------- + + if ( chk_i2a_fields .and. my_task == 0 ) then + call ncheck(nf_close(ncid)) + endif + + end subroutine into_atm + +!======================================================================= + subroutine coupler_termination +!-------------------------------! + ! + ! Detach from MPI buffer + ! + call MPI_Buffer_Detach(rla_bufsend, il_bufsize, ierror) + deallocate (rla_bufsend) + !deallocate all the coupling associated arrays... (no bother...) + ! + ! 9- PSMILe termination + ! + + call MPI_Barrier(MPI_COMM_ICE, ierror) + call prism_terminate_proto (ierror) + if (ierror /= PRISM_Ok) then + if (my_task == 0) then + write (il_out,*) 'An error occured in prism_terminate = ', ierror + endif + else + if (my_task == 0) then + write(il_out,*) + write(il_out,*) '==================*** END ***=================' + close(il_out) + endif + endif + ! + print * + print *, '********** End of CICE **********' + print * + + call MPI_Finalize (ierror) + + end subroutine coupler_termination + +!======================================================================= + subroutine decomp_def(id_part_id, id_length, id_imjm, & + id_rank, id_nbcplproc, ld_comparal, ld_mparout) +!-------------------------------------------------------! + ! + !use mod_prism_proto + !use mod_prism_def_partition_proto + + implicit none + + integer(kind=int_kind), dimension(:), allocatable :: il_paral ! Decomposition for each proc + integer(kind=int_kind) :: ig_nsegments ! Number of segments of process decomposition + integer(kind=int_kind) :: ig_parsize ! Size of array decomposition + integer(kind=int_kind) :: id_nbcplproc ! Number of processes involved in the coupling + integer(kind=int_kind) :: id_part_id ! Local partition ID + integer(kind=int_kind) :: id_imjm ! Total grid dimension, ib, ierror, my_task + integer(kind=int_kind) :: id_length ! Size of partial field for each process + integer(kind=int_kind) :: id_rank ! Rank of process + integer(kind=int_kind) :: ld_mparout ! Unit of log file + logical :: ld_comparal + integer(kind=int_kind) :: ib, ierror + character(len=80), parameter :: cdec='BOX' + ! + integer(kind=int_kind) :: ilo, ihi, jlo, jhi + ! + ! + ! Refer to oasis/psmile/prism/modules/mod_prism_proto.F90 for integer(kind=int_kind) value + ! of clim_xxxx parameters + ! + if ( .not. ld_comparal .and. id_rank == 0) then + ! Monoprocess model, or parallel model with only master process involved + ! in coupling: the entire field will be exchanged by the process. + ig_nsegments = 1 + ig_parsize = 3 + allocate(il_paral(ig_parsize)) + ! + il_paral ( clim_strategy ) = clim_serial + il_paral ( clim_offset ) = 0 + il_paral ( clim_length ) = id_imjm + id_length = id_imjm + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else + ! Parallel atm with all process involved in the coupling + ! + if (cdec == 'APPLE') then + ! Each process is responsible for a part of field defined by + ! the number of grid points and the offset of the first point + ! + write (ld_mparout,*) 'APPLE partitioning' + ig_nsegments = 1 + ig_parsize = 3 + allocate(il_paral(ig_parsize)) + write(ld_mparout,*)'ig_parsize',ig_parsize + ! + if (id_rank .LT. (id_nbcplproc-1)) then + il_paral ( clim_strategy ) = clim_apple + il_paral ( clim_length ) = id_imjm/id_nbcplproc + il_paral ( clim_offset ) = id_rank*(id_imjm/id_nbcplproc) + else + il_paral ( clim_strategy ) = clim_apple + il_paral ( clim_length ) = id_imjm-(id_rank*(id_imjm/id_nbcplproc)) + il_paral ( clim_offset ) = id_rank*(id_imjm/id_nbcplproc) + endif + id_length = il_paral(clim_length) + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else if (cdec == 'BOX') then + !B: CICE uses a kind of Cartisian decomposition which actually may NOT + ! be simply taken as "BOX" decomposition described here !!! + ! (there is an issue associated with the 'halo' boundary for each + ! segment and may NOT be treated as what we do below! + ! It needs further consideration to make this work correctly + ! for 'paralell coupling' if really needed in the future ...) + ! + ! Each process is responsible for a rectangular box + ! + write (ld_mparout,*) 'BOX partitioning' + ig_parsize = 5 + allocate(il_paral(ig_parsize)) + write(ld_mparout,*)'ig_parsize',ig_parsize + + !ilo = 1 + nghost + !ihi = nx_block - nghost + !jlo = 1 + nghost + !jhi = ny_block - nghost + + il_paral ( clim_strategy ) = clim_Box + il_paral ( clim_offset ) = nx_global * (l_jlo-1) + (l_ilo-1) + !il_paral ( clim_offset ) = (l_ilo-1) + il_paral ( clim_SizeX ) = l_ihi-l_ilo+1 + il_paral ( clim_SizeY ) = l_jhi-l_jlo+1 + il_paral ( clim_LdX ) = nx_global + + write(ld_mparout,*)'il_paral=',il_paral + + id_length = il_paral(clim_sizeX) * il_paral(clim_sizeY) + + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else if (cdec == 'ORANGE') then + !B: NOT FOR COMMON USE! + ! Each process is responsible for arbitrarily distributed + ! pieces of the field (here two segments by process) + ! + write (ld_mparout,*) 'ORANGE partitioning' + ig_nsegments = 2 + ig_parsize = 2 * ig_nsegments + 2 + write(ld_mparout,*)'ig_parsize',ig_parsize + allocate(il_paral(ig_parsize)) + ! + il_paral ( clim_strategy ) = clim_orange + il_paral ( clim_segments ) = 2 + il_paral ( clim_segments+1 ) = id_rank*768 + il_paral ( clim_segments+2 ) = 768 + il_paral ( clim_segments+3 ) = (id_rank+3)*768 + il_paral ( clim_segments+4 ) = 768 + id_length = 0 + do ib=1,2*il_paral(clim_segments) + if (mod(ib,2).eq.0) then + id_length = id_length + il_paral(clim_segments+ib) + endif + enddo + ! + call prism_def_partition_proto (id_part_id, il_paral, ierror) + deallocate(il_paral) + ! + else + write (ld_mparout,*) 'incorrect decomposition ' + endif + endif + + end subroutine decomp_def + +!============================================================================ + +!============================================================================ + subroutine pack_global_dbl(ARRAY_G, ARRAY, dst_task, src_dist) + +! !DESCRIPTION: +! This subroutine gathers a distributed array to a global-sized +! array on the processor dst_task. +! +! !REVISION HISTORY: +! same as module +! +! !REMARKS: +! This is the specific inteface for double precision arrays +! corresponding to the generic interface gather_global. It is shown +! to provide information on the generic interface (the generic +! interface is identical, but chooses a specific inteface based +! on the data type of the input argument). + + +! !USES: + + include 'mpif.h' + +! !INPUT PARAMETERS: + + integer (int_kind), intent(in) :: & + dst_task ! task to which array should be gathered + + type (distrb), intent(in) :: & + src_dist ! distribution of blocks in the source array + + real (dbl_kind), dimension(:,:,:), intent(in) :: & + ARRAY ! array containing horizontal slab of distributed field + +! !OUTPUT PARAMETERS: + + + real (dbl_kind), dimension(:,:), intent(inout) :: & + ARRAY_G ! array containing global horizontal field on dst_task + +!EOP +!BOC +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + i,j,n ,&! dummy loop counters + nsends ,&! number of actual sends + src_block ,&! block locator for send + ierr ! MPI error flag + + integer (int_kind), dimension(MPI_STATUS_SIZE) :: & + status + + integer (int_kind), dimension(:), allocatable :: & + snd_request + + integer (int_kind), dimension(:,:), allocatable :: & + snd_status + + real (dbl_kind), dimension(:,:), allocatable :: & + msg_buffer + + type (block) :: & + this_block ! block info for current block + + do n=1,nblocks_tot + + !*** copy local blocks + + if (src_dist%blockLocation(n) == my_task+1) then + + this_block = get_block(n,n) + +! do j=this_block%jlo,this_block%jhi +! do i=this_block%ilo,this_block%ihi +! ARRAY_G(this_block%i_glob(i), & +! this_block%j_glob(j)) = & +! ARRAY(i,j,src_dist%blockLocalID(n)) +! end do +! end do + ARRAY_G(this_block%i_glob(this_block%ilo):this_block%i_glob(this_block%ihi), & + this_block%j_glob(this_block%jlo):this_block%j_glob(this_block%jhi)) = & + ARRAY(this_block%ilo:this_block%ihi,this_block%jlo:this_block%jhi,src_dist%blockLocalID(n)) + + !*** fill land blocks with special values + + else if (src_dist%blockLocation(n) == 0) then + + this_block = get_block(n,n) + +! do j=this_block%jlo,this_block%jhi +! do i=this_block%ilo,this_block%ihi +! ARRAY_G(this_block%i_glob(i), & +! this_block%j_glob(j)) = spval_dbl +! end do +! end do + ARRAY_G(this_block%i_glob(this_block%ilo):this_block%i_glob(this_block%ihi), & + this_block%j_glob(this_block%jlo):this_block%j_glob(this_block%jhi)) = spval_dbl + endif + + end do + + end subroutine pack_global_dbl +!============================================================================ + +!============================================================================== +subroutine save_restart_i2a(fname, nstep) +! output the last i2a forcing data in cice by the end of the run, +! to be read in at the beginning of next run by cice and sent to atm + +implicit none + +character*(*), intent(in) :: fname +integer(kind=int_kind), intent(in) :: nstep +integer(kind=int_kind) :: ncid +integer(kind=int_kind) :: jf, jfs, ll, ilout + +if (my_task == 0) then + call open_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) +endif + +do jf = 1, nsend_i2a + select case (trim(cl_writ(jf))) + case('isst_ia'); vwork = ia_sst + case('icecon01'); vwork(:,:,:) = ia_aicen(:,:,1,:) + case('icecon02'); vwork(:,:,:) = ia_aicen(:,:,2,:) + case('icecon03'); vwork(:,:,:) = ia_aicen(:,:,3,:) + case('icecon04'); vwork(:,:,:) = ia_aicen(:,:,4,:) + case('icecon05'); vwork(:,:,:) = ia_aicen(:,:,5,:) + case('snwthk01'); vwork(:,:,:) = ia_snown(:,:,1,:) + case('snwthk02'); vwork(:,:,:) = ia_snown(:,:,2,:) + case('snwthk03'); vwork(:,:,:) = ia_snown(:,:,3,:) + case('snwthk04'); vwork(:,:,:) = ia_snown(:,:,4,:) + case('snwthk05'); vwork(:,:,:) = ia_snown(:,:,5,:) + case('icethk01'); vwork(:,:,:) = ia_thikn(:,:,1,:) + case('icethk02'); vwork(:,:,:) = ia_thikn(:,:,2,:) + case('icethk03'); vwork(:,:,:) = ia_thikn(:,:,3,:) + case('icethk04'); vwork(:,:,:) = ia_thikn(:,:,4,:) + case('icethk05'); vwork(:,:,:) = ia_thikn(:,:,5,:) + !20100305: test effect of ssuv on the tropical cooling biases (as per Harry Henden) + case('uvel_ia'); vwork = ia_uvel !* ocn_ssuv_factor !note ice u/v are also + case('vvel_ia'); vwork = ia_vvel !* ocn_ssuv_factor ! included here. + case('sstfz_ia'); vwork = ia_sstfz + case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) + case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) + case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) + case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) + case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) + case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) + case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) + case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) + case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) + case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) + case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) + case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) + case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) + case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) + case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) + case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) + case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) + case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) + case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) + case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) + case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) + case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) + case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) + case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) + case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) + end select + +! if (.not. ll_comparal) then + call gather_global(gwork, vwork, master_task, distrb_info) +! else +! call pack_global_dbl(gwork, vwork, master_task, distrb_info) +! end if + if (my_task == 0) then + call modify_nc2D(ncid, cl_writ(jf), gwork, 2, il_im, il_jm, 1, ilout=il_out) + endif + +enddo + +if (my_task == 0) call ncheck( nf_close(ncid) ) + +end subroutine save_restart_i2a +!========================================================== + + end module cpl_interface diff --git a/drivers/access/cpl_interface.F90_uphalo b/drivers/access/cpl_interface.F90_uphalo index dc71f43c..dd17e6f8 100644 --- a/drivers/access/cpl_interface.F90_uphalo +++ b/drivers/access/cpl_interface.F90_uphalo @@ -785,8 +785,6 @@ allocate (mpndtn(nx_block,ny_block,ncat,max_blocks)); mpndtn(:,:,:,:) = 0 !BX: allocate (maicen_saved(nx_block,ny_block,ncat,max_blocks)); maicen_saved(:,:,:,:) = 0 -! - allocate (icebergfw(nx_block,ny_block,12,max_blocks)); icebergfw(:,:,:,:) = 0 allocate (vwork(nx_block,ny_block,max_blocks)); vwork(:,:,:) = 0 allocate (gwork(nx_global,ny_global)); gwork(:,:) = 0 diff --git a/source/ice_flux.F90 b/source/ice_flux.F90 index a4adbe54..5f92570b 100755 --- a/source/ice_flux.F90 +++ b/source/ice_flux.F90 @@ -74,7 +74,10 @@ module ice_flux dardg1dt, & ! rate of area loss by ridging ice (1/s) dardg2dt, & ! rate of area gain by new ridges (1/s) dvirdgdt, & ! rate of ice volume ridged (m/s) - opening ! rate of opening due to divergence/shear (1/s) + opening , & ! rate of opening due to divergence/shear (1/s) + ice_freeboard ! height of ice surface (i.e. not snow surface) + ! above sea level (m) + real (kind=dbl_kind), & dimension (nx_block,ny_block,ncat,max_blocks), public :: & @@ -89,7 +92,10 @@ module ice_flux araftn, & ! rafting ice area vraftn, & ! rafting ice volume aredistn, & ! redistribution function: fraction of new ridge area - vredistn ! redistribution function: fraction of new ridge volume + vredistn , & ! redistribution function: fraction of new ridge volume + ice_freeboardn ! category height of ice surface (i.e. not snow + ! surface) above sea level (m) + ! restart @@ -177,7 +183,10 @@ module ice_flux Tref , & ! 2m atm reference temperature (K) Qref , & ! 2m atm reference spec humidity (kg/kg) Uref , & ! 10m atm reference wind speed (m/s) - evap ! evaporative water flux (kg/m^2/s) + evap , & ! evaporative water flux (kg/m^2/s) + evap_ice, & ! evaporative water flux over ice only (kg/m^2/s) + evap_snow ! evaporative water flux over snow only (kg/m^2/s) + ! albedos aggregated over categories (if calc_Tsfc) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), public :: & @@ -194,7 +203,10 @@ module ice_flux albice , & ! bare ice albedo albsno , & ! snow albedo albpnd , & ! melt pond albedo - apeff_ai ! effective pond area used for radiation calculation + apeff_ai , & ! effective pond area used for radiation calculation + snowfrac ! snow fraction used in radiation + + real (kind=dbl_kind), & dimension(nx_block,ny_block,max_blocks,max_nstrm), public :: & @@ -268,6 +280,7 @@ module ice_flux real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & fsurf , & ! net surface heat flux (excluding fcondtop)(W/m^2) fcondtop,&! top surface conductive flux (W/m^2) + fcondbot,&! bottom surface conductive flux (W/m^2) congel, & ! basal ice growth (m/step-->cm/day) frazil, & ! frazil ice growth (m/step-->cm/day) snoice, & ! snow-ice formation (m/step-->cm/day) @@ -287,9 +300,14 @@ module ice_flux dimension (nx_block,ny_block,ncat,max_blocks), public :: & fsurfn, & ! category fsurf fcondtopn,& ! category fcondtop + fcondbotn,& ! category fcondbot fsensn, & ! category sensible heat flux flatn ! category latent heat flux + real (kind=dbl_kind), & + dimension (nx_block,ny_block,ncat,max_blocks), public :: & + snowfracn + ! As above but these remain grid box mean values i.e. they are not ! divided by aice at end of ice_dynamics. These are used in ! CICE_IN_NEMO for coupling and also for generating @@ -570,6 +588,7 @@ subroutine init_history_therm fsurf (:,:,:) = c0 fcondtop(:,:,:)= c0 + fcondbot(:,:,:)= c0 congel (:,:,:) = c0 frazil (:,:,:) = c0 snoice (:,:,:) = c0 @@ -578,6 +597,7 @@ subroutine init_history_therm melts (:,:,:) = c0 meltb (:,:,:) = c0 meltl (:,:,:) = c0 + ice_freeboard (:,:,:) = c0 daidtt (:,:,:) = aice(:,:,:) ! temporary initial area dvidtt (:,:,:) = vice(:,:,:) ! temporary initial volume dvsdtt (:,:,:) = vsno(:,:,:) ! temporary initial volume @@ -588,6 +608,7 @@ subroutine init_history_therm endif fsurfn (:,:,:,:) = c0 fcondtopn (:,:,:,:) = c0 + fcondbotn (:,:,:,:) = c0 flatn (:,:,:,:) = c0 fsensn (:,:,:,:) = c0 fpond (:,:,:) = c0 @@ -598,6 +619,8 @@ subroutine init_history_therm albice (:,:,:) = c0 albsno (:,:,:) = c0 albpnd (:,:,:) = c0 + snowfracn (:,:,:,:) = c0 + snowfrac (:,:,:) = c0 ! drag coefficients are computed prior to the atmo_boundary call, ! during the thermodynamics section @@ -684,18 +707,24 @@ subroutine merge_fluxes (nx_block, ny_block, & strairxn, strairyn, & Cdn_atm_ratio_n, & fsurfn, fcondtopn, & + fcondbotn, & fsensn, flatn, & fswabsn, flwoutn, & evapn, & + evapn_ice,evapn_snow, & + ice_freeboardn, & Trefn, Qrefn, & freshn, fsaltn, & fhocnn, fswthrun, & strairxT, strairyT, & Cdn_atm_ratio, & fsurf, fcondtop, & + fcondbot, & fsens, flat, & fswabs, flwout, & evap, & + evap_ice, evap_snow, & + ice_freeboard, & Tref, Qref, & fresh, fsalt, & fhocn, fswthru, & @@ -723,6 +752,7 @@ subroutine merge_fluxes (nx_block, ny_block, & Cdn_atm_ratio_n, & ! ratio of total drag over neutral drag (atm) fsurfn , & ! net heat flux to top surface (W/m**2) fcondtopn,& ! downward cond flux at top sfc (W/m**2) + fcondbotn,& ! downward cond flux at bottom sfc (W/m**2) fsensn , & ! sensible heat flx (W/m**2) flatn , & ! latent heat flx (W/m**2) fswabsn , & ! shortwave absorbed heat flx (W/m**2) @@ -738,7 +768,12 @@ subroutine merge_fluxes (nx_block, ny_block, & meltbn , & ! bottom ice melt (m) meltsn , & ! snow melt (m) congeln , & ! congelation ice growth (m) - snoicen ! snow-ice growth (m) + snoicen , & ! snow-ice growth (m) + ice_freeboardn , & ! ice freeboard (m) + evapn_ice, & ! evaporation over ice only (kg/m2/s) + evapn_snow ! evaporation over snow only (kg/m2/s) + + real (kind=dbl_kind), dimension(nx_block,ny_block), optional, intent(in):: & Urefn ! air speed reference level (m/s) @@ -751,6 +786,7 @@ subroutine merge_fluxes (nx_block, ny_block, & Cdn_atm_ratio, & ! ratio of total drag over neutral drag (atm) fsurf , & ! net heat flux to top surface (W/m**2) fcondtop, & ! downward cond flux at top sfc (W/m**2) + fcondbot, & ! downward cond flux at bottom sfc (W/m**2) fsens , & ! sensible heat flx (W/m**2) flat , & ! latent heat flx (W/m**2) fswabs , & ! shortwave absorbed heat flx (W/m**2) @@ -766,7 +802,11 @@ subroutine merge_fluxes (nx_block, ny_block, & meltb , & ! bottom ice melt (m) melts , & ! snow melt (m) congel , & ! congelation ice growth (m) - snoice ! snow-ice growth (m) + snoice , & ! snow-ice growth (m) + ice_freeboard, & ! ice freeboard + evap_ice, & ! evaporation over ice only + evap_snow ! evaporation over snow only + real (kind=dbl_kind), dimension(nx_block,ny_block), optional, & intent(inout):: & @@ -797,12 +837,17 @@ subroutine merge_fluxes (nx_block, ny_block, & Cdn_atm_ratio_n (i,j)*aicen(i,j) fsurf (i,j) = fsurf (i,j) + fsurfn (i,j)*aicen(i,j) fcondtop (i,j) = fcondtop(i,j) + fcondtopn(i,j)*aicen(i,j) + fcondbot (i,j) = fcondbot(i,j) + fcondbotn(i,j)*aicen(i,j) fsens (i,j) = fsens (i,j) + fsensn (i,j)*aicen(i,j) flat (i,j) = flat (i,j) + flatn (i,j)*aicen(i,j) fswabs (i,j) = fswabs (i,j) + fswabsn (i,j)*aicen(i,j) flwout (i,j) = flwout (i,j) & + (flwoutn(i,j) - (c1-emissivity)*flw(i,j))*aicen(i,j) evap (i,j) = evap (i,j) + evapn (i,j)*aicen(i,j) + evap_ice (i,j) = evap_ice(i,j) + evapn_ice(i,j)*aicen(i,j) + evap_snow (i,j) = evap_snow(i,j) + evapn_snow(i,j)*aicen(i,j) + ice_freeboard (i,j) = ice_freeboard(i,j) + & + ice_freeboardn(i,j)*aicen(i,j) Tref (i,j) = Tref (i,j) + Trefn (i,j)*aicen(i,j) Qref (i,j) = Qref (i,j) + Qrefn (i,j)*aicen(i,j) if (present(Urefn) .and. present(Uref)) then @@ -843,6 +888,7 @@ subroutine scale_fluxes (nx_block, ny_block, & fsens, flat, & fswabs, flwout, & evap, & + evap_ice, evap_snow,& Tref, Qref, & fresh, fsalt, & fhocn, fswthru, & @@ -882,6 +928,8 @@ subroutine scale_fluxes (nx_block, ny_block, & fswabs , & ! shortwave absorbed heat flx (W/m**2) flwout , & ! upwd lw emitted heat flx (W/m**2) evap , & ! evaporation (kg/m2/s) + evap_ice, & ! evaporation over ice only (kg/m2/s) + evap_snow,& ! evaporation over snow only (kg/m2/s) Tref , & ! air tmp reference level (K) Qref , & ! air sp hum reference level (kg/kg) fresh , & ! fresh water flux to ocean (kg/m2/s) @@ -932,6 +980,8 @@ subroutine scale_fluxes (nx_block, ny_block, & fswabs (i,j) = fswabs (i,j) * ar flwout (i,j) = flwout (i,j) * ar evap (i,j) = evap (i,j) * ar + evap_ice(i,j) = evap_ice(i,j) * ar + evap_snow(i,j) = evap_snow(i,j) * ar Tref (i,j) = Tref (i,j) * ar Qref (i,j) = Qref (i,j) * ar if (present(Uref)) then diff --git a/source/ice_history.F90 b/source/ice_history.F90 index cd712630..f37be927 100755 --- a/source/ice_history.F90 +++ b/source/ice_history.F90 @@ -214,6 +214,8 @@ subroutine init_hist (dt) ! call broadcast_scalar (f_example, master_task) call broadcast_scalar (f_hi, master_task) call broadcast_scalar (f_hs, master_task) + call broadcast_scalar (f_snowfrac, master_task) + call broadcast_scalar (f_snowfracn, master_task) call broadcast_scalar (f_Tsfc, master_task) call broadcast_scalar (f_aice, master_task) call broadcast_scalar (f_uvel, master_task) @@ -222,6 +224,7 @@ subroutine init_hist (dt) call broadcast_scalar (f_vatm, master_task) call broadcast_scalar (f_sice, master_task) call broadcast_scalar (f_fswdn, master_task) + call broadcast_scalar (f_fswup, master_task) call broadcast_scalar (f_flwdn, master_task) call broadcast_scalar (f_snow, master_task) call broadcast_scalar (f_snow_ai, master_task) @@ -241,6 +244,10 @@ subroutine init_hist (dt) call broadcast_scalar (f_alidr, master_task) call broadcast_scalar (f_alvdf, master_task) call broadcast_scalar (f_alidf, master_task) + call broadcast_scalar (f_alvdr_ai, master_task) + call broadcast_scalar (f_alidr_ai, master_task) + call broadcast_scalar (f_alvdf_ai, master_task) + call broadcast_scalar (f_alidf_ai, master_task) call broadcast_scalar (f_albice, master_task) call broadcast_scalar (f_albsno, master_task) call broadcast_scalar (f_albpnd, master_task) @@ -253,6 +260,8 @@ subroutine init_hist (dt) call broadcast_scalar (f_flwup_ai, master_task) call broadcast_scalar (f_evap, master_task) call broadcast_scalar (f_evap_ai, master_task) + call broadcast_scalar (f_evap_ice_ai, master_task) + call broadcast_scalar (f_evap_snow_ai, master_task) call broadcast_scalar (f_Tair, master_task) call broadcast_scalar (f_Tref, master_task) call broadcast_scalar (f_Qref, master_task) @@ -299,6 +308,61 @@ subroutine init_hist (dt) call broadcast_scalar (f_frz_onset, master_task) call broadcast_scalar (f_aisnap, master_task) call broadcast_scalar (f_hisnap, master_task) + call broadcast_scalar (f_sithick, master_task) + call broadcast_scalar (f_siage, master_task) + call broadcast_scalar (f_sisnconc, master_task) + call broadcast_scalar (f_sisnthick, master_task) + call broadcast_scalar (f_sitemptop, master_task) + call broadcast_scalar (f_sitempsnic, master_task) + call broadcast_scalar (f_sitempbot, master_task) + call broadcast_scalar (f_siu, master_task) + call broadcast_scalar (f_siv, master_task) + call broadcast_scalar (f_sidmasstranx, master_task) + call broadcast_scalar (f_sidmasstrany, master_task) + call broadcast_scalar (f_sifb, master_task) + call broadcast_scalar (f_sistrxdtop, master_task) + call broadcast_scalar (f_sistrydtop, master_task) + call broadcast_scalar (f_sistrxubot, master_task) + call broadcast_scalar (f_sistryubot, master_task) + call broadcast_scalar (f_siforcetiltx, master_task) + call broadcast_scalar (f_siforcetilty, master_task) + call broadcast_scalar (f_siforcecoriolx, master_task) + call broadcast_scalar (f_siforcecorioly, master_task) + call broadcast_scalar (f_siforceintstrx, master_task) + call broadcast_scalar (f_siforceintstry, master_task) + call broadcast_scalar (f_sicompstren, master_task) + call broadcast_scalar (f_sispeed, master_task) + call broadcast_scalar (f_sialb, master_task) + call broadcast_scalar (f_sidivvel, master_task) + call broadcast_scalar (f_sihc, master_task) + call broadcast_scalar (f_sisnhc, master_task) + call broadcast_scalar (f_sidconcth, master_task) + call broadcast_scalar (f_sidconcdyn, master_task) + call broadcast_scalar (f_sidmassth, master_task) + call broadcast_scalar (f_sidmassdyn, master_task) + call broadcast_scalar (f_sidmassgrowthwat, master_task) + call broadcast_scalar (f_sidmassgrowthbot, master_task) + call broadcast_scalar (f_sidmasssi, master_task) + call broadcast_scalar (f_sidmassevapsubl, master_task) + call broadcast_scalar (f_sidmassmelttop, master_task) + call broadcast_scalar (f_sidmassmeltbot, master_task) + call broadcast_scalar (f_sidmasslat, master_task) + call broadcast_scalar (f_sndmasssnf, master_task) + call broadcast_scalar (f_sndmassmelt, master_task) + call broadcast_scalar (f_siflswdtop, master_task) + call broadcast_scalar (f_siflswutop, master_task) + call broadcast_scalar (f_siflswdbot, master_task) + call broadcast_scalar (f_sifllwdtop, master_task) + call broadcast_scalar (f_sifllwutop, master_task) + call broadcast_scalar (f_siflsenstop, master_task) + call broadcast_scalar (f_siflsensupbot, master_task) + call broadcast_scalar (f_sifllatstop, master_task) + call broadcast_scalar (f_siflcondtop, master_task) + call broadcast_scalar (f_siflcondbot, master_task) + call broadcast_scalar (f_sipr, master_task) + call broadcast_scalar (f_siflsaltbot, master_task) + call broadcast_scalar (f_siflfwbot, master_task) + call broadcast_scalar (f_sisaltmass, master_task) call broadcast_scalar (f_aicen, master_task) call broadcast_scalar (f_vicen, master_task) call broadcast_scalar (f_vsnon, master_task) @@ -355,6 +419,11 @@ subroutine init_hist (dt) "grid cell mean snow thickness", & "snow volume per unit grid cell area", c1, c0, & ns1, f_hs) + call define_hist_field(n_snowfrac,"snowfrac","1",tstr2D, tcstr, & + "grid cell mean snow fraction", & + "snow fraction per unit grid cell area", c1, c0, & + ns1, f_snowfrac) + call define_hist_field(n_Tsfc,"Tsfc","C",tstr2D, tcstr, & "snow/ice surface temperature", & @@ -396,6 +465,11 @@ subroutine init_hist (dt) "positive downward", c1, c0, & ns1, f_fswdn) + call define_hist_field(n_fswup,"fswup","W/m^2",tstr2D, tcstr, & + "upward solar flux", & + "positive upward", c1, c0, & + ns1, f_fswup) + call define_hist_field(n_flwdn,"flwdn","W/m^2",tstr2D, tcstr, & "down longwave flux", & "positive downward", c1, c0, & @@ -421,7 +495,7 @@ subroutine init_hist (dt) "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_rain_ai) - call define_hist_field(n_sst,"sst","C",tstr2D, tcstr, & + call define_hist_field(n_sst,"sst","degC",tstr2D, tcstr, & "sea surface temperature", & "none", c1, c0, & ns1, f_sst) @@ -466,11 +540,6 @@ subroutine init_hist (dt) "weighted by ice area", c1, c0, & ns1, f_fswabs_ai) -! call define_hist_field(n_albsni,"albsni","%",tstr2D, tcstr, & -! "snow/ice broad band albedo", & -! "scaled (divided) by aice", c100, c0, & -! ns1, f_albsni) - call define_hist_field(n_albsni,"albsni","%",tstr2D, tcstr, & "snow/ice broad band albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & @@ -496,6 +565,26 @@ subroutine init_hist (dt) "scaled (divided) by aice", c100, c0, & ns1, f_alidf) + call define_hist_field(n_alvdr_ai,"alvdr_ai","%",tstr2D, tcstr, & + "visible direct albedo", & + " ", c100, c0, & + ns1, f_alvdr_ai) + + call define_hist_field(n_alidr_ai,"alidr_ai","%",tstr2D, tcstr, & + "near IR direct albedo", & + " ", c100, c0, & + ns1, f_alidr_ai) + + call define_hist_field(n_alvdf_ai,"alvdf_ai","%",tstr2D, tcstr, & + "visible diffuse albedo", & + " ", c100, c0, & + ns1, f_alvdf_ai) + + call define_hist_field(n_alidf_ai,"alidf_ai","%",tstr2D, tcstr, & + "near IR diffuse albedo", & + " ", c100, c0, & + ns1, f_alidf_ai) + call define_hist_field(n_albice,"albice","%",tstr2D, tcstr, & "bare ice albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & @@ -556,12 +645,22 @@ subroutine init_hist (dt) "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_evap_ai) - call define_hist_field(n_Tair,"Tair","C",tstr2D, tcstr, & + call define_hist_field(n_evap_ice_ai,"evap_ice_ai","cm/day",tstr2D, tcstr, & + "evaporative water flux over ice only", & + "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & + ns1, f_evap_ice_ai) + + call define_hist_field(n_evap_snow_ai,"evap_snow_ai","cm/day",tstr2D, tcstr, & + "evaporative water flux over snow only", & + "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & + ns1, f_evap_snow_ai) + + call define_hist_field(n_Tair,"Tair","degC",tstr2D, tcstr, & "air temperature", & "none", c1, -Tffresh, & ns1, f_Tair) - call define_hist_field(n_Tref,"Tref","C",tstr2D, tcstr, & + call define_hist_field(n_Tref,"Tref","degC",tstr2D, tcstr, & "2m reference temperature", & "none", c1, -Tffresh, & ns1, f_Tref) @@ -881,12 +980,287 @@ subroutine init_hist (dt) "first-year ice area", & "weighted by ice area", c1, c0, & ns1, f_FY) + ! CMIP6 2D variables + + call define_hist_field(n_sithick,"sithick","m",tstr2D, tcstr, & + "sea ice thickness", & + "volume divided by area", c1, c0, & + ns1, f_sithick) + + call define_hist_field(n_siage,"siage","s",tstr2D, tcstr, & + "sea ice age", & + "none", c1, c0, & + ns1, f_siage) + call define_hist_field(n_sifb,"sifb","m",tstr2D, tcstr, & + "sea ice freeboard", & + "none", c1, c0, & + ns1, f_sifb) + call define_hist_field(n_sisnconc,"sisnconc","1",tstr2D, tcstr, & + "snow area fraction", & + "none", c1, c0, & + ns1, f_sisnconc) + call define_hist_field(n_sisnthick,"sisnthick","m",tstr2D, tcstr, & + "sea ice snow thickness", & + "snow volume divided by area", c1, c0, & + ns1, f_sisnthick) + call define_hist_field(n_sitemptop,"sitemptop","degC",tstr2D, tcstr, & + "sea ice surface temperature", & + "none", c1, c0, & + ns1, f_sitemptop) + call define_hist_field(n_sitempsnic,"sitempsnic","degC",tstr2D, tcstr, & + "snow ice interface temperature", & + "surface temperature when no snow present", c1, c0, & + ns1, f_sitempsnic) + call define_hist_field(n_sitempbot,"sitempbot","degK",tstr2D, tcstr, & + "sea ice bottom temperature", & + "none", c1, c0, & + ns1, f_sitempbot) + call define_hist_field(n_siu,"siu","m/s",ustr2D, ucstr, & + "ice x velocity component", & + "none", c1, c0, & + ns1, f_siu) + call define_hist_field(n_siv,"siv","m/s",ustr2D, ucstr, & + "ice y velocity component", & + "none", c1, c0, & + ns1, f_siv) + + call define_hist_field(n_sidmasstranx,"sidmasstranx","kg/s",ustr2D, ucstr, & + "x component of snow and sea ice mass transport", & + "none", c1, c0, & + ns1, f_sidmasstranx) + + call define_hist_field(n_sidmasstrany,"sidmasstrany","kg/s",ustr2D, ucstr, & + "y component of snow and sea ice mass transport", & + "none", c1, c0, & + ns1, f_sidmasstrany) + + call define_hist_field(n_sistrxdtop,"sistrxdtop","N m^-2",ustr2D, ucstr, & + "x component of atmospheric stress on sea ice", & + "none", c1, c0, & + ns1, f_sistrxdtop) + + call define_hist_field(n_sistrydtop,"sistrydtop","N m^-2",ustr2D, ucstr, & + "y component of atmospheric stress on sea ice", & + "none", c1, c0, & + ns1, f_sistrydtop) + + + call define_hist_field(n_sistrxubot,"sistrxubot","N m^-2",ustr2D, ucstr, & + "x component of ocean stress on sea ice", & + "none", c1, c0, & + ns1, f_sistrxubot) + + call define_hist_field(n_sistryubot,"sistryubot","N m^-2",ustr2D, ucstr, & + "y component of ocean stress on sea ice", & + "none", c1, c0, & + ns1, f_sistryubot) + + call define_hist_field(n_siforcetiltx,"siforcetiltx","N m^-2",ustr2D, ucstr, & + "x component of sea surface tilt force", & + "none", c1, c0, & + ns1, f_siforcetiltx) + + call define_hist_field(n_siforcetilty,"siforcetilty","N m^-2",ustr2D, ucstr, & + "y component of sea surface tilt force", & + "none", c1, c0, & + ns1, f_siforcetilty) + + call define_hist_field(n_siforcecoriolx,"siforcecoriolx","N m^-2",ustr2D, ucstr, & + "x component of Coriolis force", & + "none", c1, c0, & + ns1, f_siforcecoriolx) + + call define_hist_field(n_siforcecorioly,"siforcecorioly","N m^-2",ustr2D, ucstr, & + "y component of Coriolis force", & + "none", c1, c0, & + ns1, f_siforcecorioly) + + call define_hist_field(n_siforceintstrx,"siforceintstrx","N m^-2",ustr2D, ucstr, & + "x component of internal ice stress force", & + "none", c1, c0, & + ns1, f_siforceintstrx) + + call define_hist_field(n_siforceintstry,"siforceintstry","N m^-2",ustr2D, ucstr, & + "y component of internal ice stress force", & + "none", c1, c0, & + ns1, f_siforceintstry) + + call define_hist_field(n_sicompstren,"sicompstren","N/m",ustr2D, ucstr, & + "compressive sea ice strength", & + "none", c1, c0, & + ns1, f_sicompstren) + + call define_hist_field(n_sidivvel,"sidivvel","1/s",ustr2D, ucstr, & + "divergence of the sea ice velocity field (ice area weighted)", & + "none", c1, c0, & + ns1, f_sidivvel) + + call define_hist_field(n_sispeed,"sispeed","m/s",ustr2D, ucstr, & + "ice speed", & + "none", c1, c0, & + ns1, f_sispeed) + + call define_hist_field(n_sialb,"sialb","1",tstr2D, tcstr, & + "sea ice albedo", & + "none", c1, c0, & + ns1, f_sialb) + + call define_hist_field(n_sihc,"sihc","J m^-2",tstr2D, tcstr, & + "sea ice heat content", & + "none", c1, c0, & + ns1, f_sihc) + + call define_hist_field(n_sisnhc,"sisnhc","J m^-2",tstr2D, tcstr, & + "snow heat content", & + "none", c1, c0, & + ns1, f_sisnhc) + + + + call define_hist_field(n_sidconcth,"sidconcth","1/s",tstr2D, tcstr, & + "sea ice area change from thermodynamics", & + "none", c1, c0, & + ns1, f_sidconcth) + call define_hist_field(n_sidconcdyn,"sidconcdyn","1/s",tstr2D, tcstr, & + "sea ice area change from dynamics", & + "none", c1, c0, & + ns1, f_sidconcdyn) + + call define_hist_field(n_sidmassth,"sidmassth","kg m^-2 s^-1",tstr2D, tcstr, & + "sea ice mass change from thermodynamics", & + "none", c1, c0, & + ns1, f_sidmassth) + + call define_hist_field(n_sidmassdyn,"sidmassdyn","kg m^-2 s^-1",tstr2D, tcstr, & + "sea ice mass change from dynamics", & + "none", c1, c0, & + ns1, f_sidmassdyn) + + call define_hist_field(n_sidmassgrowthwat,"sidmassgrowthwat","kg m^-2 s^-1",tstr2D, tcstr, & + "sea ice mass change from frazil growth", & + "none", c1, c0, & + ns1, f_sidmassgrowthwat) + + call define_hist_field(n_sidmassgrowthbot,"sidmassgrowthbot","kg m^-2 s^-1",tstr2D, tcstr, & + "sea ice mass change from bottom growth", & + "none", c1, c0, & + ns1, f_sidmassgrowthbot) + + call define_hist_field(n_sidmasssi,"sidmasssi","kg m^-2 s^-1",tstr2D, tcstr, & + "sea ice mass change from snow ice conversion", & + "none", c1, c0, & + ns1, f_sidmasssi) + + call define_hist_field(n_sidmassevapsubl,"sidmassevapsubl","kg m^-2 s^-1",tstr2D, tcstr, & + "sea ice mass change from evaporation and sublimation", & + "none", c1, c0, & + ns1, f_sidmassevapsubl) + + call define_hist_field(n_sidmassmelttop,"sidmassmelttop","kg m^-2 s^-1",tstr2D, tcstr, & + "sea ice mass change from top ice melt", & + "none", c1, c0, & + ns1, f_sidmassmelttop) + + call define_hist_field(n_sidmassmeltbot,"sidmassmeltbot","kg m^-2 s^-1",tstr2D, tcstr, & + "sea ice mass change from bottom ice melt", & + "none", c1, c0, & + ns1, f_sidmassmeltbot) + + call define_hist_field(n_sidmasslat,"sidmasslat","kg m^-2 s^-1",tstr2D, tcstr, & + "sea ice mass change from lateral ice melt", & + "none", c1, c0, & + ns1, f_sidmasslat) + + call define_hist_field(n_sndmasssnf,"sndmasssnf","kg m^-2 s^-1",tstr2D, tcstr, & + "snow mass change from snow fall", & + "none", c1, c0, & + ns1, f_sndmasssnf) + + call define_hist_field(n_sndmassmelt,"sndmassmelt","kg m^-2 s^-1",tstr2D, tcstr, & + "snow mass change from melt", & + "none", c1, c0, & + ns1, f_sndmassmelt) + + call define_hist_field(n_siflswdtop,"siflswdtop","W/m^2",tstr2D, tcstr, & + "down shortwave flux over sea ice", & + "positive downward", c1, c0, & + ns1, f_siflswdtop) + + call define_hist_field(n_siflswutop,"siflswutop","W/m^2",tstr2D, tcstr, & + "upward shortwave flux over sea ice", & + "positive downward", c1, c0, & + ns1, f_siflswutop) + + call define_hist_field(n_siflswdbot,"siflswdbot","W/m^2",tstr2D, tcstr, & + "down shortwave flux at bottom of ice", & + "positive downward", c1, c0, & + ns1, f_siflswdbot) + + call define_hist_field(n_sifllwdtop,"sifllwdtop","W/m^2",tstr2D, tcstr, & + "down longwave flux over sea ice", & + "positive downward", c1, c0, & + ns1, f_sifllwdtop) + + call define_hist_field(n_sifllwutop,"sifllwutop","W/m^2",tstr2D, tcstr, & + "upward longwave flux over sea ice", & + "positive downward", c1, c0, & + ns1, f_sifllwutop) + + call define_hist_field(n_siflsenstop,"siflsenstop","W/m^2",tstr2D, tcstr, & + "sensible heat flux over sea ice", & + "positive downward", c1, c0, & + ns1, f_siflsenstop) + + call define_hist_field(n_siflsensupbot,"siflsensupbot","W/m^2",tstr2D, tcstr, & + "sensible heat flux at bottom of sea ice", & + "positive downward", c1, c0, & + ns1, f_siflsensupbot) + + + call define_hist_field(n_sifllatstop,"sifllatstop","W/m^2",tstr2D, tcstr, & + "latent heat flux over sea ice", & + "positive downward", c1, c0, & + ns1, f_sifllatstop) + + call define_hist_field(n_siflcondtop,"siflcondtop","W/m^2",tstr2D, tcstr, & + "conductive heat flux at top of sea ice", & + "positive downward", c1, c0, & + ns1, f_siflcondtop) + + call define_hist_field(n_siflcondbot,"siflcondbot","W/m^2",tstr2D, tcstr, & + "conductive heat flux at bottom of sea ice", & + "positive downward", c1, c0, & + ns1, f_siflcondbot) + + call define_hist_field(n_sipr,"sipr","kg m^-2 s^-1",tstr2D, tcstr, & + "rainfall over sea ice", & + "none", c1, c0, & + ns1, f_sipr) + + + call define_hist_field(n_siflsaltbot,"siflsaltbot","kg m^-2 s^-1",tstr2D, tcstr, & + "salt flux from sea ice", & + "positive downward", c1, c0, & + ns1, f_siflsaltbot) + + call define_hist_field(n_siflfwbot,"siflfwbot","kg m^-2 s^-1",tstr2D, tcstr, & + "fresh water flux from sea ice", & + "positive downward", c1, c0, & + ns1, f_siflfwbot) + + + call define_hist_field(n_sisaltmass,"sisaltmass","kg m^-2",tstr2D,& + tcstr, "mass of salt in sea ice (for ocean fluxes)",& + + "none", c1, c0, & + ns1, f_sisaltmass) endif ! if (histfreq(ns1) /= 'x') then enddo ! ns1 ! other 2D history variables + ! mechanical redistribution call init_hist_mechred_2D @@ -916,6 +1290,12 @@ subroutine init_hist (dt) "snow depth on ice, categories","volume per unit area of snow", c1, c0, & ns1, f_vsnon) + call define_hist_field(n_snowfracn,"snowfracn","1",tstr3Dc, tcstr, & + "category mean snow fraction", & + "snow fraction per unit grid cell area", c1, c0, & + ns1, f_snowfracn) + + call define_hist_field(n_fsurfn_ai,"fsurfn_ai","W/m^2",tstr3Dc, tcstr, & "net surface heat flux, categories","weighted by ice area", c1, c0, & ns1, f_fsurfn_ai) @@ -937,7 +1317,7 @@ subroutine init_hist (dt) ns1, f_fsensn_ai) call define_hist_field(n_Tn_top,"Tn_top","K",tstr3Dc, tcstr, & - "temperature of the top layer (snow or ice), categories","multilayer scheme", c1, c0, & + "temperature of the top layer (snow or ice), categories","multilayer scheme", c1, c0, & ns1, f_Tn_top) call define_hist_field(n_keffn_top,"keffn_top","W/m^2/K",tstr3Dc, tcstr, & @@ -981,7 +1361,7 @@ subroutine init_hist (dt) do ns1 = 1, nstreams if (histfreq(ns1) /= 'x') then - call define_hist_field(n_Tinz,"Tinz","C",tstr4Di, tcstr, & + call define_hist_field(n_Tinz,"Tinz","degC",tstr4Di, tcstr, & "ice internal temperatures on CICE grid", & "vertical profile", c1, c0, & ns1, f_Tinz) @@ -997,7 +1377,7 @@ subroutine init_hist (dt) do ns1 = 1, nstreams if (histfreq(ns1) /= 'x') then - call define_hist_field(n_Tsnz,"Tsnz","C",tstr4Ds, tcstr, & + call define_hist_field(n_Tsnz,"Tsnz","degC",tstr4Ds, tcstr, & "snow internal temperatures", & "vertical profile", c1, c0, & ns1, f_Tsnz) @@ -1009,10 +1389,6 @@ subroutine init_hist (dt) if (allocated(Tinz4d)) deallocate(Tinz4d) allocate(Tinz4d(nx_block,ny_block,nzilyr,ncat_hist)) endif - if (f_Sinz (1:1) /= 'x') then - if (allocated(Sinz4d)) deallocate(Sinz4d) - allocate(Sinz4d(nx_block,ny_block,nzilyr,ncat_hist)) - endif if (f_Tsnz (1:1) /= 'x') then if (allocated(Tsnz4d)) deallocate(Tsnz4d) allocate(Tsnz4d(nx_block,ny_block,nzslyr,ncat_hist)) @@ -1144,10 +1520,14 @@ end subroutine init_hist subroutine accum_hist (dt) use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_domain_size, only: nilyr, nslyr + use ice_fileunits, only: nu_diag + use ice_constants, only: c0, c1, p25, puny, secday, depressT, & - awtvdr, awtidr, awtvdf, awtidf, Lfresh, rhos, cp_ice, spval + awtvdr, awtidr, awtvdf, awtidf, Lfresh, rhoi, rhos, cp_ice, & + spval_dbl, Tffresh, ice_ref_salinity, c1000 use ice_domain, only: blocks_ice, nblocks - use ice_grid, only: tmask, lmask_n, lmask_s + use ice_grid, only: tmask, lmask_n, lmask_s, tarea, HTE, HTN #ifdef AusCOM use ice_grid, only: umask !ars599: 27032014 @@ -1163,7 +1543,7 @@ subroutine accum_hist (dt) new_month use ice_dyn_eap, only: a11, a12, e11, e12, e22, s11, s12, s22, & yieldstress11, yieldstress12, yieldstress22 - use ice_dyn_shared, only: kdyn, principal_stress + use ice_dyn_shared, only: kdyn, principal_stress,a_min use ice_flux, only: fsw, flw, fsnow, frain, sst, sss, uocn, vocn, & frzmlt_init, fswfac, fswabs, fswthru, alvdr, alvdf, alidr, alidf, & albice, albsno, albpnd, coszen, flat, fsens, flwout, evap, & @@ -1172,13 +1552,16 @@ subroutine accum_hist (dt) fhocn, fhocn_ai, uatm, vatm, & fswthru_ai, strairx, strairy, strtltx, strtlty, strintx, strinty, & strocnx, strocny, fm, daidtt, dvidtt, dvsdtt, daidtd, dvidtd, dvsdtd, fsurf, & - fcondtop, fsurfn, fcondtopn, flatn, fsensn, albcnt, prs_sig, & + fcondtop, fsurfn, fcondtopn, & + fcondbot, fcondbotn, ice_freeboard, & + flatn, fsensn, albcnt, prs_sig, & stressp_1, stressm_1, stress12_1, & stressp_2, stressm_2, stress12_2, & stressp_3, stressm_3, stress12_3, & stressp_4, stressm_4, stress12_4, sig1, sig2, & mlt_onset, frz_onset, dagedtt, dagedtd, fswint_ai, Tn_top, & - keffn_top + keffn_top, snowfrac, snowfracn, alvdr_ai, alvdf_ai, alidr_ai, & + alidf_ai, evap_snow, evap_ice use ice_atmo, only: formdrag use ice_history_shared ! almost everything use ice_history_write, only: ice_write_hist @@ -1186,8 +1569,11 @@ subroutine accum_hist (dt) use ice_history_mechred, only: accum_hist_mechred use ice_history_pond, only: accum_hist_pond use ice_history_drag, only: accum_hist_drag + use ice_itd, only: hs_min, aicenmin + use ice_meltpond_cesm, only: hs0 use ice_state ! almost everything - use ice_therm_shared, only: calculate_Tin_from_qin, Tmlt, ktherm + use ice_therm_shared, only: calculate_Tin_from_qin, Tmlt, ktherm, & + Ti_bot, Tsnic use ice_therm_mushy, only: temperature_mush, temperature_snow use ice_timers, only: ice_timer_start, ice_timer_stop, timer_readwrite use ice_zbgc_shared, only: skl_bgc @@ -1209,10 +1595,19 @@ subroutine accum_hist (dt) real (kind=dbl_kind) :: & qn , & ! temporary variable for enthalpy + hs , & ! temporary variable for snow depth Tmlts ! temporary variable for melting temperature + real (kind=dbl_kind) :: & + area_threshold ! min time mean ice area allowed for dividing + ! (maximum of a_min and aicenmin - + ! dynamic + ! and thermodynamic ice areas) + + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - worka, workb + worka, workb, ravgip type (block) :: & this_block ! block information for current block @@ -1281,7 +1676,9 @@ subroutine accum_hist (dt) !--------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & - !$OMP k,n,qn,ns,worka,workb) + !$OMP k,n,qn,ns,hs,worka,workb,Tinz4d,Sinz4d,Tsnz4d) + + do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -1289,7 +1686,6 @@ subroutine accum_hist (dt) jlo = this_block%jlo jhi = this_block%jhi - workb(:,:) = aice_init(:,:,iblk) ! if (f_example(1:1) /= 'x') & ! call accum_hist_field(n_example,iblk, vice(:,:,iblk), a2D) @@ -1297,6 +1693,10 @@ subroutine accum_hist (dt) call accum_hist_field(n_hi, iblk, vice(:,:,iblk), a2D) if (f_hs (1:1) /= 'x') & call accum_hist_field(n_hs, iblk, vsno(:,:,iblk), a2D) + if (f_sifb (1:1) /= 'x') & + call accum_hist_field(n_sifb, iblk, ice_freeboard(:,:,iblk), a2D) + if (f_snowfrac(1:1) /= 'x') & + call accum_hist_field(n_snowfrac, iblk, snowfrac(:,:,iblk), a2D) if (f_Tsfc (1:1) /= 'x') & call accum_hist_field(n_Tsfc, iblk, trcr(:,:,nt_Tsfc,iblk), a2D) if (f_aice (1:1) /= 'x') & @@ -1325,6 +1725,9 @@ subroutine accum_hist (dt) if (f_fswdn (1:1) /= 'x') & call accum_hist_field(n_fswdn, iblk, fsw(:,:,iblk), a2D) + + workb(:,:) = aice_init(:,:,iblk) + if (f_flwdn (1:1) /= 'x') & call accum_hist_field(n_flwdn, iblk, flw(:,:,iblk), a2D) if (f_snow (1:1) /= 'x') & @@ -1355,19 +1758,30 @@ subroutine accum_hist (dt) if (f_fswint_ai (1:1) /= 'x') & call accum_hist_field(n_fswint_ai, iblk, fswint_ai(:,:,iblk), a2D) + workb(:,:) = aice(:,:,iblk) + if (f_fswabs_ai(1:1)/= 'x') & call accum_hist_field(n_fswabs_ai, iblk, fswabs(:,:,iblk)*workb(:,:), a2D) + if (f_fswup(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice_init(i,j,iblk) > puny) then + worka(i,j) = (fsw(i,j,iblk)-fswabs(i,j,iblk) & + * aice(i,j,iblk)/aice_init(i,j,iblk)) + endif + enddo + enddo + call accum_hist_field(n_fswup, iblk, worka(:,:), a2D) + endif + if (f_albsni (1:1) /= 'x') & call accum_hist_field(n_albsni, iblk, & (awtvdr*alvdr(:,:,iblk) & + awtidr*alidr(:,:,iblk) & + awtvdf*alvdf(:,:,iblk) & - + awtidf*alidf(:,:,iblk))*aice(:,:,iblk), a2D) -! awtvdr*alvdr(:,:,iblk) & -! + awtidr*alidr(:,:,iblk) & -! + awtvdf*alvdf(:,:,iblk) & -! + awtidf*alidf(:,:,iblk), a2D) + + awtidf*alidf(:,:,iblk))*workb(:,:), a2D) if (f_alvdr (1:1) /= 'x') & call accum_hist_field(n_alvdr, iblk, alvdr(:,:,iblk), a2D) if (f_alidr (1:1) /= 'x') & @@ -1377,6 +1791,15 @@ subroutine accum_hist (dt) if (f_alidf (1:1) /= 'x') & call accum_hist_field(n_alidf, iblk, alidf(:,:,iblk), a2D) + if (f_alvdr_ai (1:1) /= 'x') & + call accum_hist_field(n_alvdr_ai, iblk, alvdr_ai(:,:,iblk), a2D) + if (f_alidr_ai (1:1) /= 'x') & + call accum_hist_field(n_alidr_ai, iblk, alidr_ai(:,:,iblk), a2D) + if (f_alvdf_ai (1:1) /= 'x') & + call accum_hist_field(n_alvdf_ai, iblk, alvdf_ai(:,:,iblk), a2D) + if (f_alidf_ai (1:1) /= 'x') & + call accum_hist_field(n_alidf_ai, iblk, alidf_ai(:,:,iblk), a2D) + if (f_albice (1:1) /= 'x') & call accum_hist_field(n_albice, iblk, albice(:,:,iblk), a2D) if (f_albsno (1:1) /= 'x') & @@ -1402,13 +1825,19 @@ subroutine accum_hist (dt) call accum_hist_field(n_evap, iblk, evap(:,:,iblk), a2D) if (f_evap_ai(1:1) /= 'x') & call accum_hist_field(n_evap_ai,iblk, evap(:,:,iblk)*workb(:,:), a2D) + if (f_evap_ice_ai(1:1) /= 'x') & + call accum_hist_field(n_evap_ice_ai,iblk, evap_ice(:,:,iblk)*workb(:,:), a2D) + if (f_evap_snow_ai(1:1) /= 'x') & + call accum_hist_field(n_evap_snow_ai,iblk, evap_snow(:,:,iblk)*workb(:,:), a2D) if (f_Tair (1:1) /= 'x') & call accum_hist_field(n_Tair, iblk, Tair(:,:,iblk), a2D) if (f_Tref (1:1) /= 'x') & - call accum_hist_field(n_Tref, iblk, Tref(:,:,iblk), a2D) + call accum_hist_field(n_Tref, iblk, & + Tref(:,:,iblk)*workb(:,:), a2D) if (f_Qref (1:1) /= 'x') & - call accum_hist_field(n_Qref, iblk, Qref(:,:,iblk), a2D) + call accum_hist_field(n_Qref, iblk, & + Qref(:,:,iblk)*workb(:,:), a2D) if (f_congel (1:1) /= 'x') & call accum_hist_field(n_congel, iblk, congel(:,:,iblk), a2D) if (f_frazil (1:1) /= 'x') & @@ -1507,191 +1936,864 @@ subroutine accum_hist (dt) if (f_icepresent(1:1) /= 'x') then worka(:,:) = c0 + area_threshold = max(a_min,aicenmin) do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) worka(i,j) = c1 + if (aice(i,j,iblk) > area_threshold) worka(i,j) = c1 enddo enddo call accum_hist_field(n_icepresent, iblk, worka(:,:), a2D) endif - ! 3D category fields - if (f_aicen (1:1) /= 'x') & - call accum_hist_field(n_aicen-n2D, iblk, ncat_hist, & - aicen(:,:,1:ncat_hist,iblk), a3Dc) - if (f_vicen (1:1) /= 'x') & - call accum_hist_field(n_vicen-n2D, iblk, ncat_hist, & - vicen(:,:,1:ncat_hist,iblk), a3Dc) - if (f_vsnon (1:1) /= 'x') & - call accum_hist_field(n_vsnon-n2D, iblk, ncat_hist, & - vsnon(:,:,1:ncat_hist,iblk), a3Dc) - if (f_Tn_top (1:1) /= 'x') & - call accum_hist_field(n_Tn_top-n2D, iblk, ncat_hist, & - Tn_top(:,:,1:ncat_hist,iblk), a3Dc) - if (f_keffn_top (1:1) /= 'x') & - call accum_hist_field(n_keffn_top-n2D, iblk, ncat_hist, & - keffn_top(:,:,1:ncat_hist,iblk), a3Dc) - if (f_fsurfn_ai (1:1) /= 'x') & - call accum_hist_field(n_fsurfn_ai-n2D, iblk, ncat_hist, & - fsurfn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) - if (f_fcondtopn_ai (1:1) /= 'x') & - call accum_hist_field(n_fcondtopn_ai-n2D, iblk, ncat_hist, & - fcondtopn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) - if (f_flatn_ai (1:1) /= 'x') & - call accum_hist_field(n_flatn_ai-n2D, iblk, ncat_hist, & - flatn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) - if (f_fsensn_ai (1:1) /= 'x') & - call accum_hist_field(n_fsensn_ai-n2D, iblk, ncat_hist, & - fsensn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) - ! Calculate surface heat flux that causes melt (calculated by the - ! atmos in HadGEM3 so needed for checking purposes) - if (f_fmelttn_ai (1:1) /= 'x') & - call accum_hist_field(n_fmelttn_ai-n2D, iblk, ncat_hist, & - max(fsurfn(:,:,1:ncat_hist,iblk) - fcondtopn(:,:,1:ncat_hist,iblk),c0) & - *aicen_init(:,:,1:ncat_hist,iblk), a3Dc) - -! example for 3D field (x,y,z) -! if (f_field3dz (1:1) /= 'x') & -! call accum_hist_field(n_field3dz-n3Dccum, iblk, nzilyr, & -! field3dz(:,:,1:nzilyr,iblk), a3Dz) + !2D CMIP6 fields - ! 4D category fields - if (f_Tinz (1:1) /= 'x') then - Tinz4d(:,:,:,:) = c0 - if (ktherm == 2) then - do n = 1, ncat_hist - do j = jlo, jhi - do i = ilo, ihi - do k = 1, nzilyr - Tinz4d(i,j,k,n) = temperature_mush( & - trcrn(i,j,nt_qice+k-1,n,iblk), trcrn(i,j,nt_sice+k-1,n,iblk)) - enddo - enddo - enddo - enddo - else - do n = 1, ncat_hist - do j = jlo, jhi - do i = ilo, ihi - do k = 1, nzilyr - qn = trcrn(i,j,nt_qice+k-1,n,iblk) -! Tinz4d(i,j,k,n) = calculate_Tin_from_qin(qn,Tmlt(k)) - Tmlts = -trcrn(i,j,nt_sice+k-1,n,iblk)*depressT - Tinz4d(i,j,k,n) = calculate_Tin_from_qin(qn,Tmlts) - enddo - enddo - enddo - enddo - endif - call accum_hist_field(n_Tinz-n3Dbcum, iblk, nzilyr, ncat_hist, & - Tinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) + if (f_sithick(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) worka(i,j) = vice(i,j,iblk) + enddo + enddo + call accum_hist_field(n_sithick, iblk, worka(:,:), a2D) endif - if (f_Sinz (1:1) /= 'x') then - Sinz4d(:,:,:,:) = c0 - do n = 1, ncat_hist - do j = jlo, jhi - do i = ilo, ihi - if (vicen(i,j,n,iblk) > puny) then - Sinz4d(i,j,1:nzilyr,n) = trcrn(i,j,nt_sice:nt_sice+nzilyr-1,n,iblk) - endif - enddo - enddo - enddo - call accum_hist_field(n_Sinz-n3Dbcum, iblk, nzilyr, ncat_hist, & - Sinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) + + if (f_siage(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) worka(i,j) = aice(i,j,iblk)*trcr(i,j,nt_iage,iblk) + enddo + enddo + call accum_hist_field(n_siage, iblk, worka(:,:), a2D) endif - - if (f_Tsnz (1:1) /= 'x') then - Tsnz4d(:,:,:,:) = c0 - if (ktherm == 2) then - do n = 1, ncat_hist - do j = jlo, jhi - do i = ilo, ihi - do k = 1, nzslyr - qn = trcrn(i,j,nt_qsno+k-1,n,iblk) - Tsnz4d(i,j,k,n) = temperature_snow(trcrn(i,j,nt_qsno+k-1,n,iblk)) - enddo - enddo - enddo - enddo - else - do n = 1, ncat_hist - do j = jlo, jhi - do i = ilo, ihi - do k = 1, nzslyr - qn = trcrn(i,j,nt_qsno+k-1,n,iblk) - Tsnz4d(i,j,k,n) = (Lfresh + qn/rhos)/cp_ice - enddo - enddo - enddo - enddo - endif - call accum_hist_field(n_Tsnz-n4Dicum, iblk, nzslyr, ncat_hist, & - Tsnz4d(:,:,1:nzslyr,1:ncat_hist), a4Ds) + + + if (f_sisnconc(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) worka(i,j) = snowfrac(i,j,iblk) + enddo + enddo + call accum_hist_field(n_sisnconc, iblk, worka(:,:), a2D) endif - - ! Calculate aggregate surface melt flux by summing category values - if (f_fmeltt_ai(1:1) /= 'x') then - do ns = 1, nstreams - if (n_fmeltt_ai(ns) /= 0) then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - do n=1,ncat_hist - worka(i,j) = worka(i,j) + a3Dc(i,j,n,n_fmelttn_ai(ns)-n2D,iblk) - enddo ! n - endif ! tmask - enddo ! i - enddo ! j - a2D(:,:,n_fmeltt_ai(ns),iblk) = worka(:,:) + + if (f_sisnthick(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny .and. snowfrac(i,j,iblk) > puny) & + worka(i,j) = vsno(i,j,iblk) + enddo + enddo + call accum_hist_field(n_sisnthick, iblk, worka(:,:), a2D) endif - enddo - endif - !--------------------------------------------------------------- - ! accumulate other history output - !--------------------------------------------------------------- - ! mechanical redistribution - call accum_hist_mechred (iblk) + if (f_sitemptop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + worka(i,j) = aice(i,j,iblk)*trcr(i,j,nt_Tsfc,iblk) + enddo + enddo + call accum_hist_field(n_sitemptop, iblk, worka(:,:), a2D) + endif - ! melt ponds - if (tr_pond) call accum_hist_pond (iblk) - ! biogeochemistry - if (tr_aero .or. tr_brine .or. skl_bgc) call accum_hist_bgc (iblk) + if (f_sitempsnic(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (vsno(i,j,iblk) > puny .and. aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*Tsnic(i,j,iblk)/aice_init(i,j,iblk) + else + worka(i,j) = aice(i,j,iblk)*trcr(i,j,nt_Tsfc,iblk) + endif + enddo + enddo + call accum_hist_field(n_sitempsnic, iblk, worka(:,:), a2D) + endif - ! form drag - if (formdrag) call accum_hist_drag (iblk) + if (f_sitempbot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice_init(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*(Ti_bot(i,j,iblk)+Tffresh) + enddo + enddo + call accum_hist_field(n_sitempbot, iblk, worka(:,:), a2D) + endif - enddo ! iblk - !$OMP END PARALLEL DO + if (f_siu(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) worka(i,j) = aice(i,j,iblk)*uvel(i,j,iblk) + enddo + enddo + call accum_hist_field(n_siu, iblk, worka(:,:), a2D) + endif - !--------------------------------------------------------------- - ! Write output files at prescribed intervals - !--------------------------------------------------------------- + if (f_siv(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) worka(i,j) = aice(i,j,iblk)*vvel(i,j,iblk) + enddo + enddo + call accum_hist_field(n_siv, iblk, worka(:,:), a2D) + endif - nstrm = nstreams - if (write_ic) nstrm = 1 - do ns = 1, nstrm - if (write_history(ns) .or. write_ic) then + if (f_sispeed(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) worka(i,j) = aice(i,j,iblk) & + * sqrt(uvel(i,j,iblk)*uvel(i,j,iblk)+vvel(i,j,iblk)*vvel(i,j,iblk)) + enddo + enddo + call accum_hist_field(n_sispeed, iblk, worka(:,:), a2D) + endif - !--------------------------------------------------------------- - ! Mask out land points and convert units - !--------------------------------------------------------------- - ravgct = c1/avgct(ns) - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & - !$OMP n,nn,ravgctz) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo + if (f_sidmasstranx(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = (rhoi*0.5*(vice(i+1,j,iblk)+vice(i,j,iblk))*HTE(i,j,iblk) & + + rhos*0.5*(vsno(i+1,j,iblk)+vsno(i,j,iblk))*HTE(i,j,iblk)) & + * 0.5*(uvel(i,j-1,iblk)+uvel(i,j,iblk)) + enddo + enddo + call accum_hist_field(n_sidmasstranx, iblk, worka(:,:), a2D) + endif + + if (f_sidmasstrany(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = (rhoi*0.5*(vice(i,j+1,iblk)+vice(i,j,iblk))*HTN(i,j,iblk) & + + rhos*0.5*(vsno(i,j+1,iblk)+vsno(i,j,iblk))*HTN(i,j,iblk)) & + * 0.5*(vvel(i-1,j,iblk)+vvel(i,j,iblk)) + enddo + enddo + call accum_hist_field(n_sidmasstrany, iblk, worka(:,:), a2D) + endif + + if (f_sistrxdtop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*strairx(i,j,iblk) + enddo + enddo + call accum_hist_field(n_sistrxdtop, iblk, worka(:,:), a2D) + endif + + if (f_sistrydtop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*strairy(i,j,iblk) + enddo + enddo + call accum_hist_field(n_sistrydtop, iblk, worka(:,:), a2D) + endif + + if (f_sistrxubot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*strocnx(i,j,iblk) + enddo + enddo + call accum_hist_field(n_sistrxubot, iblk, worka(:,:), a2D) + endif + + if (f_sistryubot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*strocny(i,j,iblk) + enddo + enddo + call accum_hist_field(n_sistryubot, iblk, worka(:,:), a2D) + endif + + if (f_siforcetiltx(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*strtltx(i,j,iblk) + enddo + enddo + call accum_hist_field(n_siforcetiltx, iblk, worka(:,:), a2D) + endif + + if (f_siforcetilty(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*strtlty(i,j,iblk) + enddo + enddo + call accum_hist_field(n_siforcetilty, iblk, worka(:,:), a2D) + endif + + if (f_siforcecoriolx(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*fm(i,j,iblk)*vvel(i,j,iblk) + enddo + enddo + call accum_hist_field(n_siforcecoriolx, iblk, worka(:,:), a2D) + endif + + if (f_siforcecorioly(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = -aice(i,j,iblk)*fm(i,j,iblk)*uvel(i,j,iblk) + enddo + enddo + call accum_hist_field(n_siforcecorioly, iblk, worka(:,:), a2D) + endif + + if (f_siforceintstrx(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*strintx(i,j,iblk) + enddo + enddo + call accum_hist_field(n_siforceintstrx, iblk, worka(:,:), a2D) + endif + + if (f_siforceintstry(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*strinty(i,j,iblk) + enddo + enddo + call accum_hist_field(n_siforceintstry, iblk, worka(:,:), a2D) + endif + + if (f_sicompstren(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*strength(i,j,iblk) + enddo + enddo + call accum_hist_field(n_sicompstren, iblk, worka(:,:), a2D) + endif + + if (f_sidivvel(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*divu(i,j,iblk) + enddo + enddo + call accum_hist_field(n_sidivvel, iblk, worka(:,:), a2D) + endif + + if (f_sialb(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (fsw(i,j,iblk) > puny .and. aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*(fsw(i,j,iblk)-fswabs(i,j,iblk) & + * aice(i,j,iblk)/aice_init(i,j,iblk)) & + * fsw(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sialb, iblk, worka(:,:), a2D) + endif + + if (f_sihc(1:1) /= 'x') then + worka(:,:) = c0 + do k = 1,nilyr + do j = jlo, jhi + do i = ilo, ihi + worka(i,j) = worka(i,j) + trcr(i,j,nt_qice+k-1,iblk)*vice(i,j,iblk)/real(nilyr,kind=dbl_kind) + enddo + enddo + enddo + call accum_hist_field(n_sihc, iblk, worka(:,:), a2D) + endif + + if (f_sisnhc(1:1) /= 'x') then + worka(:,:) = c0 + do k = 1,nslyr + do j = jlo, jhi + do i = ilo, ihi + worka(i,j) = worka(i,j) + trcr(i,j,nt_qsno+k-1,iblk)*vsno(i,j,iblk)/real(nslyr,kind=dbl_kind) + enddo + enddo + enddo + call accum_hist_field(n_sisnhc, iblk, worka(:,:), a2D) + endif + + if (f_sidconcth(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = daidtt(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sidconcth, iblk, worka(:,:), a2D) + endif + + if (f_sidconcdyn(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = daidtd(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sidconcdyn, iblk, worka(:,:), a2D) + endif + + if (f_sidmassth(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = dvidtt(i,j,iblk) * rhoi + endif + enddo + enddo + call accum_hist_field(n_sidmassth, iblk, worka(:,:), a2D) + endif + + if (f_sidmassdyn(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = dvidtd(i,j,iblk) * rhoi + endif + enddo + enddo + call accum_hist_field(n_sidmassdyn, iblk, worka(:,:), a2D) + endif + + if (f_sidmassgrowthwat(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*frazil(i,j,iblk)*rhoi / aice_init(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sidmassgrowthwat, iblk, worka(:,:), a2D) + endif + + if (f_sidmassgrowthbot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*congel(i,j,iblk)*rhoi / aice_init(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sidmassgrowthbot, iblk, worka(:,:), a2D) + endif + + if (f_sidmasssi(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*snoice(i,j,iblk)*rhoi / aice_init(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sidmasssi, iblk, worka(:,:), a2D) + endif + + if (f_sidmassevapsubl(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*evap(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sidmassevapsubl, iblk, worka(:,:), a2D) + endif + + if (f_sidmassmelttop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*meltt(i,j,iblk)*rhoi / aice_init(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sidmassmelttop, iblk, worka(:,:), a2D) + endif + + if (f_sidmassmeltbot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*meltb(i,j,iblk)*rhoi / aice_init(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sidmassmeltbot, iblk, worka(:,:), a2D) + endif + + if (f_sidmasslat(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*meltl(i,j,iblk)*rhoi / aice_init(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sidmasslat, iblk, worka(:,:), a2D) + endif + + if (f_sndmasssnf(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk) * fsnow(i,j,iblk) * dt + endif + enddo + enddo + call accum_hist_field(n_sndmasssnf, iblk, worka(:,:), a2D) + endif + + if (f_sndmassmelt(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*melts(i,j,iblk)*rhoi / aice_init(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sndmassmelt, iblk, worka(:,:), a2D) + endif + + if (f_siflswdtop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (fsw(i,j,iblk) > puny .and. aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*fsw(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_siflswdtop, iblk, worka(:,:), a2D) + endif + + if (f_siflswutop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (fsw(i,j,iblk) > puny .and. aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*(fsw(i,j,iblk)-fswabs(i,j,iblk) & + * aice(i,j,iblk)/aice_init(i,j,iblk)) + endif + enddo + enddo + call accum_hist_field(n_siflswutop, iblk, worka(:,:), a2D) + endif + + if (f_siflswdbot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*fswthru(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_siflswdbot, iblk, worka(:,:), a2D) + endif + + if (f_sifllwdtop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*flw(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sifllwdtop, iblk, worka(:,:), a2D) + endif + + if (f_sifllwutop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*flwout(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sifllwutop, iblk, worka(:,:), a2D) + endif + + if (f_siflsenstop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*fsens(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_siflsenstop, iblk, worka(:,:), a2D) + endif + + if (f_siflsensupbot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*fhocn(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_siflsensupbot, iblk, worka(:,:), a2D) + endif + + if (f_sifllatstop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*flat(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sifllatstop, iblk, worka(:,:), a2D) + endif + + if (f_siflcondtop(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*fcondtop(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_siflcondtop, iblk, worka(:,:), a2D) + endif + + if (f_siflcondbot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice_init(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*fcondbot(i,j,iblk)/aice_init(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_siflcondbot, iblk, worka(:,:), a2D) + endif + + if (f_sipr(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*frain(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sipr, iblk, worka(:,:), a2D) + endif + + if (f_siflsaltbot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*fsalt(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_siflsaltbot, iblk, worka(:,:), a2D) + endif + + if (f_sisaltmass(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = ice_ref_salinity * rhoi * vice(i,j,iblk) / c1000 + endif + enddo + enddo + call accum_hist_field(n_sisaltmass, iblk, worka(:,:), a2D) + endif + + if (f_siflfwbot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*fresh(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_siflfwbot, iblk, worka(:,:), a2D) + endif + + + if (f_siflsaltbot(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + worka(i,j) = aice(i,j,iblk)*fsalt(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_siflsaltbot, iblk, worka(:,:), a2D) + endif + +!3D category fields + + if (f_aicen (1:1) /= 'x') & + call accum_hist_field(n_aicen-n2D, iblk, ncat_hist, & + aicen(:,:,1:ncat_hist,iblk), a3Dc) + if (f_vicen (1:1) /= 'x') & + call accum_hist_field(n_vicen-n2D, iblk, ncat_hist, & + vicen(:,:,1:ncat_hist,iblk), a3Dc) + if (f_vsnon (1:1) /= 'x') & + call accum_hist_field(n_vsnon-n2D, iblk, ncat_hist, & + vsnon(:,:,1:ncat_hist,iblk), a3Dc) + if (f_snowfracn(1:1) /= 'x') & + call accum_hist_field(n_snowfracn-n2D, iblk, ncat_hist, & + snowfracn(:,:,1:ncat_hist,iblk)*aicen(:,:,:,iblk), a3Dc) + + if (f_Tn_top (1:1) /= 'x') & + call accum_hist_field(n_Tn_top-n2D, iblk, ncat_hist, & + Tn_top(:,:,1:ncat_hist,iblk), a3Dc) + if (f_keffn_top (1:1) /= 'x') & + call accum_hist_field(n_keffn_top-n2D, iblk, ncat_hist, & + keffn_top(:,:,1:ncat_hist,iblk), a3Dc) + if (f_fsurfn_ai (1:1) /= 'x') & + call accum_hist_field(n_fsurfn_ai-n2D, iblk, ncat_hist, & + fsurfn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + if (f_fcondtopn_ai (1:1) /= 'x') & + call accum_hist_field(n_fcondtopn_ai-n2D, iblk, ncat_hist, & + fcondtopn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + if (f_flatn_ai (1:1) /= 'x') & + call accum_hist_field(n_flatn_ai-n2D, iblk, ncat_hist, & + flatn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + if (f_fsensn_ai (1:1) /= 'x') & + call accum_hist_field(n_fsensn_ai-n2D, iblk, ncat_hist, & + fsensn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + ! Calculate surface heat flux that causes melt (calculated by the + ! atmos in HadGEM3 so needed for checking purposes) + if (f_fmelttn_ai (1:1) /= 'x') & + call accum_hist_field(n_fmelttn_ai-n2D, iblk, ncat_hist, & + max(fsurfn(:,:,1:ncat_hist,iblk) - fcondtopn(:,:,1:ncat_hist,iblk),c0) & + *aicen_init(:,:,1:ncat_hist,iblk), a3Dc) + +! example for 3D field (x,y,z) +! if (f_field3dz (1:1) /= 'x') & +! call accum_hist_field(n_field3dz-n3Dccum, iblk, nzilyr, & +! field3dz(:,:,1:nzilyr,iblk), a3Dz) + + ! 4D category fields + if (f_Tinz (1:1) /= 'x') then + Tinz4d(:,:,:,:) = c0 + if (ktherm == 2) then + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzilyr + Tinz4d(i,j,k,n) = temperature_mush( & + trcrn(i,j,nt_qice+k-1,n,iblk), trcrn(i,j,nt_sice+k-1,n,iblk)) + enddo + enddo + enddo + enddo + else + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzilyr + qn = trcrn(i,j,nt_qice+k-1,n,iblk) +! Tinz4d(i,j,k,n) = calculate_Tin_from_qin(qn,Tmlt(k)) + Tmlts = -trcrn(i,j,nt_sice+k-1,n,iblk)*depressT + Tinz4d(i,j,k,n) = calculate_Tin_from_qin(qn,Tmlts) + enddo + enddo + enddo + enddo + endif + call accum_hist_field(n_Tinz-n3Dbcum, iblk, nzilyr, ncat_hist, & + Tinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) + endif + if (f_Sinz (1:1) /= 'x') then + Sinz4d(:,:,:,:) = c0 + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (vicen(i,j,n,iblk) > puny) then + Sinz4d(i,j,1:nzilyr,n) = trcrn(i,j,nt_sice:nt_sice+nzilyr-1,n,iblk) + endif + enddo + enddo + enddo + call accum_hist_field(n_Sinz-n3Dbcum, iblk, nzilyr, ncat_hist, & + Sinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) + endif + + if (f_Tsnz (1:1) /= 'x') then + Tsnz4d(:,:,:,:) = c0 + if (ktherm == 2) then + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzslyr + qn = trcrn(i,j,nt_qsno+k-1,n,iblk) + Tsnz4d(i,j,k,n) = temperature_snow(qn) + enddo + enddo + enddo + enddo + else + do n = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + do k = 1, nzslyr + qn = trcrn(i,j,nt_qsno+k-1,n,iblk) + Tsnz4d(i,j,k,n) = (Lfresh + qn/rhos)/cp_ice + enddo + enddo + enddo + enddo + endif + call accum_hist_field(n_Tsnz-n4Dicum, iblk, nzslyr, ncat_hist, & + Tsnz4d(:,:,1:nzslyr,1:ncat_hist), a4Ds) + endif + + ! Calculate aggregate surface melt flux by summing category values + if (f_fmeltt_ai(1:1) /= 'x') then + do ns = 1, nstreams + if (n_fmeltt_ai(ns) /= 0) then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + do n=1,ncat_hist + worka(i,j) = worka(i,j) + a3Dc(i,j,n,n_fmelttn_ai(ns)-n2D,iblk) + enddo ! n + endif ! tmask + enddo ! i + enddo ! j + a2D(:,:,n_fmeltt_ai(ns),iblk) = worka(:,:) + endif + enddo + endif + + !--------------------------------------------------------------- + ! accumulate other history output + !--------------------------------------------------------------- + + ! mechanical redistribution + call accum_hist_mechred (iblk) + + ! melt ponds + if (tr_pond) call accum_hist_pond (iblk) + + ! biogeochemistry + if (tr_aero .or. tr_brine .or. skl_bgc) call accum_hist_bgc (iblk) + + ! form drag + if (formdrag) call accum_hist_drag (iblk) + + enddo ! iblk + !$OMP END PARALLEL DO + + !--------------------------------------------------------------- + ! Write output files at prescribed intervals + !--------------------------------------------------------------- + + nstrm = nstreams + if (write_ic) nstrm = 1 + + do ns = 1, nstrm + if (write_history(ns) .or. write_ic) then + + !--------------------------------------------------------------- + ! Mask out land points and convert units + !--------------------------------------------------------------- + + ravgct = c1/avgct(ns) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & + !$OMP n,nn,ravgctz,ravgip) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + ! Alex West - enforce time mean ice area threshold based + ! on + ! the maximum of aicenmin (thermodynamic min ice fraction) + ! and a_min (dynamic min ice fraction) so that intensive + ! variables are reported only where both dynamics and + ! thermodynamics have been active + area_threshold = max(aicenmin,a_min) + if (a2D(i,j,n_aice(ns),iblk)*ravgct > area_threshold) then + ravgip(i,j) = c1/(a2D(i,j,n_aice(ns),iblk)) + else + ravgip(i,j) = c0 + endif + enddo ! i + enddo ! j + + + do n = 1, num_avail_hist_fields_2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) then @@ -1700,14 +2802,14 @@ subroutine accum_hist (dt) #ifdef AusCOM if (n_uocn(ns)==n.or.n_vocn(ns)==n) then if (.not. umask(i,j,iblk)) then ! mask out land points - a2D(i,j,n,iblk) = spval + a2D(i,j,n,iblk) = spval_dbl else ! convert units a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & * ravgct + avail_hist_fields(n)%conb endif else if (.not. tmask(i,j,iblk)) then ! mask out land points - a2D(i,j,n,iblk) = spval + a2D(i,j,n,iblk) = spval_dbl else ! convert units a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & * ravgct + avail_hist_fields(n)%conb @@ -1715,7 +2817,7 @@ subroutine accum_hist (dt) endif #else if (.not. tmask(i,j,iblk)) then ! mask out land points - a2D(i,j,n,iblk) = spval + a2D(i,j,n,iblk) = spval_dbl else ! convert units a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & * ravgct + avail_hist_fields(n)%conb @@ -1724,7 +2826,658 @@ subroutine accum_hist (dt) enddo ! i enddo ! j - ! back out albedo/zenith angle dependence + ! Only average for timesteps when ice present + if (index(avail_hist_fields(n)%vname,'sithick') /= 0) then + if (f_sithick(1:1) /= 'x' .and. n_sithick(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sithick(ns),iblk) = & + a2D(i,j,n_sithick(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sithick(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sifb') /= 0) then + if (f_sifb(1:1) /= 'x' .and. n_sifb(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sifb(ns),iblk) = & + a2D(i,j,n_sifb(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sifb(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siage') /= 0) then + if (f_siage(1:1) /= 'x' .and. n_siage(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siage(ns),iblk) = & + a2D(i,j,n_siage(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siage(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + + if (index(avail_hist_fields(n)%vname,'sisnconc') /= 0) then + if (f_sisnconc(1:1) /= 'x' .and. n_sisnconc(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sisnconc(ns),iblk) = & + a2D(i,j,n_sisnconc(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sisnconc(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sisnthick') /= 0) then + if (f_sisnthick(1:1) /= 'x' .and. n_sisnthick(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sisnthick(ns),iblk) = & + a2D(i,j,n_sisnthick(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sisnthick(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sitemptop') /= 0) then + if (f_sitemptop(1:1) /= 'x' .and. n_sitemptop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sitemptop(ns),iblk) = & + a2D(i,j,n_sitemptop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sitemptop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sitempsnic') /= 0) then + if (f_sitempsnic(1:1) /= 'x' .and. n_sitempsnic(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sitempsnic(ns),iblk) = & + a2D(i,j,n_sitempsnic(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sitempsnic(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sitempbot') /= 0) then + if (f_sitempbot(1:1) /= 'x' .and. n_sitempbot(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sitempbot(ns),iblk) = & + a2D(i,j,n_sitempbot(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sitempbot(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siu') /= 0) then + if (f_siu(1:1) /= 'x' .and. n_siu(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siu(ns),iblk) = & + a2D(i,j,n_siu(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siu(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siv') /= 0) then + if (f_siv(1:1) /= 'x' .and. n_siv(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siv(ns),iblk) = & + a2D(i,j,n_siv(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siv(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sistrxdtop') /= 0) then + if (f_sistrxdtop(1:1) /= 'x' .and. n_sistrxdtop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sistrxdtop(ns),iblk) = & + a2D(i,j,n_sistrxdtop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sistrxdtop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sistrydtop') /= 0) then + if (f_sistrydtop(1:1) /= 'x' .and. n_sistrydtop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sistrydtop(ns),iblk) = & + a2D(i,j,n_sistrydtop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sistrydtop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sistrxubot') /= 0) then + if (f_sistrxubot(1:1) /= 'x' .and. n_sistrxubot(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sistrxubot(ns),iblk) = & + a2D(i,j,n_sistrxubot(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sistrxubot(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sistryubot') /= 0) then + if (f_sistryubot(1:1) /= 'x' .and. n_sistryubot(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sistryubot(ns),iblk) = & + a2D(i,j,n_sistryubot(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sistryubot(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siforcetiltx') /= 0) then + if (f_siforcetiltx(1:1) /= 'x' .and. n_siforcetiltx(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siforcetiltx(ns),iblk) = & + a2D(i,j,n_siforcetiltx(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siforcetiltx(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siforcetilty') /= 0) then + if (f_siforcetilty(1:1) /= 'x' .and. n_siforcetilty(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siforcetilty(ns),iblk) = & + a2D(i,j,n_siforcetilty(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siforcetilty(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siforcecoriolx') /= 0) then + if (f_siforcecoriolx(1:1) /= 'x' .and. n_siforcecoriolx(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siforcecoriolx(ns),iblk) = & + a2D(i,j,n_siforcecoriolx(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siforcecoriolx(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siforcecorioly') /= 0) then + if (f_siforcecorioly(1:1) /= 'x' .and. n_siforcecorioly(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siforcecorioly(ns),iblk) = & + a2D(i,j,n_siforcecorioly(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siforcecorioly(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siforceintstrx') /= 0) then + if (f_siforceintstrx(1:1) /= 'x' .and. n_siforceintstrx(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siforceintstrx(ns),iblk) = & + a2D(i,j,n_siforceintstrx(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siforceintstrx(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siforceintstry') /= 0) then + if (f_siforceintstry(1:1) /= 'x' .and. n_siforceintstry(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siforceintstry(ns),iblk) = & + a2D(i,j,n_siforceintstry(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siforceintstry(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sicompstren') /= 0) then + if (f_sicompstren(1:1) /= 'x' .and. n_sicompstren(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sicompstren(ns),iblk) = & + a2D(i,j,n_sicompstren(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sicompstren(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sidivvel') /= 0) then + if (f_sidivvel(1:1) /= 'x' .and. n_sidivvel(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sidivvel(ns),iblk) = & + a2D(i,j,n_sidivvel(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sidivvel(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sispeed') /= 0) then + if (f_sispeed(1:1) /= 'x' .and. n_sispeed(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sispeed(ns),iblk) = & + a2D(i,j,n_sispeed(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sispeed(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sialb') /= 0) then + if (f_sialb(1:1) /= 'x' .and. n_sialb(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sialb(ns),iblk) = & + a2D(i,j,n_sialb(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sialb(ns),iblk) = spval_dbl + if (albcnt(i,j,iblk,ns) <= puny) a2D(i,j,n_sialb(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sidmassgrowthwat') /= 0) then + if (f_sidmassgrowthwat(1:1) /= 'x' .and. n_sidmassgrowthwat(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sidmassgrowthwat(ns),iblk) = & + a2D(i,j,n_sidmassgrowthwat(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sidmassgrowthwat(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sidmassgrowthbot') /= 0) then + if (f_sidmassgrowthbot(1:1) /= 'x' .and. n_sidmassgrowthbot(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sidmassgrowthbot(ns),iblk) = & + a2D(i,j,n_sidmassgrowthbot(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sidmassgrowthbot(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sidmasssi') /= 0) then + if (f_sidmasssi(1:1) /= 'x' .and. n_sidmasssi(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sidmasssi(ns),iblk) = & + a2D(i,j,n_sidmasssi(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sidmasssi(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sidmassevapsubl') /= 0) then + if (f_sidmassevapsubl(1:1) /= 'x' .and. n_sidmassevapsubl(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sidmassevapsubl(ns),iblk) = & + a2D(i,j,n_sidmassevapsubl(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sidmassevapsubl(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sidmassmelttop') /= 0) then + if (f_sidmassmelttop(1:1) /= 'x' .and. n_sidmassmelttop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sidmassmelttop(ns),iblk) = & + a2D(i,j,n_sidmassmelttop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sidmassmelttop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sidmassmeltbot') /= 0) then + if (f_sidmassmeltbot(1:1) /= 'x' .and. n_sidmassmeltbot(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sidmassmeltbot(ns),iblk) = & + a2D(i,j,n_sidmassmeltbot(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sidmassmeltbot(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sidmasslat') /= 0) then + if (f_sidmasslat(1:1) /= 'x' .and. n_sidmasslat(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sidmasslat(ns),iblk) = & + a2D(i,j,n_sidmasslat(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sidmasslat(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + + + + if (index(avail_hist_fields(n)%vname,'sndmasssnf') /= 0) then + if (f_sndmasssnf(1:1) /= 'x' .and. n_sndmasssnf(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sndmasssnf(ns),iblk) = & + a2D(i,j,n_sndmasssnf(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sndmasssnf(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sndmassmelt') /= 0) then + if (f_sndmassmelt(1:1) /= 'x' .and. n_sndmassmelt(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sndmassmelt(ns),iblk) = & + a2D(i,j,n_sndmassmelt(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sndmassmelt(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siflswdtop') /= 0) then + if (f_siflswdtop(1:1) /= 'x' .and. n_siflswdtop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siflswdtop(ns),iblk) = & + a2D(i,j,n_siflswdtop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siflswdtop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siflswutop') /= 0) then + if (f_siflswutop(1:1) /= 'x' .and. n_siflswutop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siflswutop(ns),iblk) = & + a2D(i,j,n_siflswutop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siflswutop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if(index(avail_hist_fields(n)%vname,'siflswdbot') /= 0) then + if (f_siflswdbot(1:1) /= 'x' .and. n_siflswdbot(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siflswdbot(ns),iblk) = & + a2D(i,j,n_siflswdbot(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siflswdbot(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sifllwdtop') /= 0) then + if (f_sifllwdtop(1:1) /= 'x' .and. n_sifllwdtop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sifllwdtop(ns),iblk) = & + a2D(i,j,n_sifllwdtop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sifllwdtop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sifllwutop') /= 0) then + if (f_sifllwutop(1:1) /= 'x' .and. n_sifllwutop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sifllwutop(ns),iblk) = & + a2D(i,j,n_sifllwutop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sifllwutop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siflsenstop') /= 0) then + if (f_siflsenstop(1:1) /= 'x' .and. n_siflsenstop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siflsenstop(ns),iblk) = & + a2D(i,j,n_siflsenstop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siflsenstop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siflsensupbot') /= 0) then + if (f_siflsensupbot(1:1) /= 'x' .and. n_siflsensupbot(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siflsensupbot(ns),iblk) = & + a2D(i,j,n_siflsensupbot(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siflsensupbot(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sifllatstop') /= 0) then + if (f_sifllatstop(1:1) /= 'x' .and. n_sifllatstop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sifllatstop(ns),iblk) = & + a2D(i,j,n_sifllatstop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sifllatstop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'sipr') /= 0) then + if (f_sipr(1:1) /= 'x' .and. n_sipr(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_sipr(ns),iblk) = & + a2D(i,j,n_sipr(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_sipr(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siflcondtop') /= 0) then + if (f_siflcondtop(1:1) /= 'x' .and. n_siflcondtop(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siflcondtop(ns),iblk) = & + a2D(i,j,n_siflcondtop(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siflcondtop(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siflcondbot') /= 0) then + if (f_siflcondbot(1:1) /= 'x' .and. n_siflcondbot(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siflcondbot(ns),iblk) = & + a2D(i,j,n_siflcondbot(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siflcondbot(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siflsaltbot') /= 0) then + if (f_siflsaltbot(1:1) /= 'x' .and. n_siflsaltbot(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siflsaltbot(ns),iblk) = & + a2D(i,j,n_siflsaltbot(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siflsaltbot(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + if (index(avail_hist_fields(n)%vname,'siflfwbot') /= 0) then + if (f_siflfwbot(1:1) /= 'x' .and. n_siflfwbot(ns) /= 0) then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + a2D(i,j,n_siflfwbot(ns),iblk) = & + a2D(i,j,n_siflfwbot(ns),iblk)*avgct(ns)*ravgip(i,j) + if (ravgip(i,j) == c0) a2D(i,j,n_siflfwbot(ns),iblk) = spval_dbl + endif + enddo ! i + enddo ! j + endif + endif + + + !back out albedo/zenith angle dependence if (avail_hist_fields(n)%vname(1:6) == 'albice') then do j = jlo, jhi do i = ilo, ihi @@ -1760,6 +3513,30 @@ subroutine accum_hist (dt) enddo ! j endif + if (avail_hist_fields(n)%vname(1:8) == 'alvdr_ai') then + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j,iblk)) then + ravgctz = c0 + if (albcnt(i,j,iblk,ns) > puny) & + ravgctz = c1/albcnt(i,j,iblk,ns) + if (f_alvdr_ai (1:1) /= 'x' .and. n_alvdr_ai(ns) /= 0) & + a2D(i,j,n_alvdr_ai(ns),iblk) = & + a2D(i,j,n_alvdr_ai(ns),iblk)*avgct(ns)*ravgctz + if (f_alvdf_ai (1:1) /= 'x' .and. n_alvdf_ai(ns) /= 0) & + a2D(i,j,n_alvdf_ai(ns),iblk) = & + a2D(i,j,n_alvdf_ai(ns),iblk)*avgct(ns)*ravgctz + if (f_alidr_ai (1:1) /= 'x' .and. n_alidr_ai(ns) /= 0) & + a2D(i,j,n_alidr_ai(ns),iblk) = & + a2D(i,j,n_alidr_ai(ns),iblk)*avgct(ns)*ravgctz + if (f_alidf_ai (1:1) /= 'x' .and. n_alidf_ai(ns) /= 0) & + a2D(i,j,n_alidf_ai(ns),iblk) = & + a2D(i,j,n_alidf_ai(ns),iblk)*avgct(ns)*ravgctz + endif + enddo ! i + enddo ! j + endif + endif enddo ! n @@ -1770,7 +3547,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (.not. tmask(i,j,iblk)) then ! mask out land points - a3Dc(i,j,k,n,iblk) = spval + a3Dc(i,j,k,n,iblk) = spval_dbl else ! convert units a3Dc(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Dc(i,j,k,n,iblk) & * ravgct + avail_hist_fields(nn)%conb @@ -1788,7 +3565,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (.not. tmask(i,j,iblk)) then ! mask out land points - a3Dz(i,j,k,n,iblk) = spval + a3Dz(i,j,k,n,iblk) = spval_dbl else ! convert units a3Dz(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Dz(i,j,k,n,iblk) & * ravgct + avail_hist_fields(nn)%conb @@ -1805,7 +3582,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (.not. tmask(i,j,iblk)) then ! mask out land points - a3Db(i,j,k,n,iblk) = spval + a3Db(i,j,k,n,iblk) = spval_dbl else ! convert units a3Db(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Db(i,j,k,n,iblk) & * ravgct + avail_hist_fields(nn)%conb @@ -1824,7 +3601,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (.not. tmask(i,j,iblk)) then ! mask out land points - a4Di(i,j,k,ic,n,iblk) = spval + a4Di(i,j,k,ic,n,iblk) = spval_dbl else ! convert units a4Di(i,j,k,ic,n,iblk) = avail_hist_fields(nn)%cona*a4Di(i,j,k,ic,n,iblk) & * ravgct + avail_hist_fields(nn)%conb @@ -1844,7 +3621,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (.not. tmask(i,j,iblk)) then ! mask out land points - a4Ds(i,j,k,ic,n,iblk) = spval + a4Ds(i,j,k,ic,n,iblk) = spval_dbl else ! convert units a4Ds(i,j,k,ic,n,iblk) = avail_hist_fields(nn)%cona*a4Ds(i,j,k,ic,n,iblk) & * ravgct + avail_hist_fields(nn)%conb @@ -1863,7 +3640,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (.not. tmask(i,j,iblk)) then ! mask out land points - a4Db(i,j,k,ic,n,iblk) = spval + a4Db(i,j,k,ic,n,iblk) = spval_dbl else ! convert units a4Db(i,j,k,ic,n,iblk) = avail_hist_fields(nn)%cona*a4Db(i,j,k,ic,n,iblk) & * ravgct + avail_hist_fields(nn)%conb @@ -1892,31 +3669,31 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (.not. tmask(i,j,iblk)) then ! mask out land points - if (n_divu (ns) /= 0) a2D(i,j,n_divu(ns), iblk) = spval - if (n_shear (ns) /= 0) a2D(i,j,n_shear(ns), iblk) = spval - if (n_sig1 (ns) /= 0) a2D(i,j,n_sig1(ns), iblk) = spval - if (n_sig2 (ns) /= 0) a2D(i,j,n_sig2(ns), iblk) = spval - if (n_mlt_onset(ns) /= 0) a2D(i,j,n_mlt_onset(ns),iblk) = spval - if (n_frz_onset(ns) /= 0) a2D(i,j,n_frz_onset(ns),iblk) = spval - if (n_hisnap (ns) /= 0) a2D(i,j,n_hisnap(ns), iblk) = spval - if (n_aisnap (ns) /= 0) a2D(i,j,n_aisnap(ns), iblk) = spval - if (n_trsig (ns) /= 0) a2D(i,j,n_trsig(ns), iblk) = spval - if (n_iage (ns) /= 0) a2D(i,j,n_iage(ns), iblk) = spval - if (n_FY (ns) /= 0) a2D(i,j,n_FY(ns), iblk) = spval - if (n_Tn_top (ns) /= 0) a3Dc(i,j,:,n_Tn_top(ns)-n2D,iblk) = spval - if (n_keffn_top (ns) /= 0) a3Dc(i,j,:,n_keffn_top(ns)-n2D,iblk) = spval - - if (n_a11 (ns) /= 0) a2D(i,j,n_a11(ns), iblk) = spval - if (n_a12 (ns) /= 0) a2D(i,j,n_a12(ns), iblk) = spval - if (n_e11 (ns) /= 0) a2D(i,j,n_e11(ns), iblk) = spval - if (n_e12 (ns) /= 0) a2D(i,j,n_e12(ns), iblk) = spval - if (n_e22 (ns) /= 0) a2D(i,j,n_e22(ns), iblk) = spval - if (n_s11 (ns) /= 0) a2D(i,j,n_s11(ns), iblk) = spval - if (n_s12 (ns) /= 0) a2D(i,j,n_s12(ns), iblk) = spval - if (n_s22 (ns) /= 0) a2D(i,j,n_s22(ns), iblk) = spval - if (n_yieldstress11 (ns) /= 0) a2D(i,j,n_yieldstress11(ns),iblk) = spval - if (n_yieldstress12 (ns) /= 0) a2D(i,j,n_yieldstress12(ns),iblk) = spval - if (n_yieldstress22 (ns) /= 0) a2D(i,j,n_yieldstress22(ns),iblk) = spval + if (n_divu (ns) /= 0) a2D(i,j,n_divu(ns), iblk) = spval_dbl + if (n_shear (ns) /= 0) a2D(i,j,n_shear(ns), iblk) = spval_dbl + if (n_sig1 (ns) /= 0) a2D(i,j,n_sig1(ns), iblk) = spval_dbl + if (n_sig2 (ns) /= 0) a2D(i,j,n_sig2(ns), iblk) = spval_dbl + if (n_mlt_onset(ns) /= 0) a2D(i,j,n_mlt_onset(ns),iblk) = spval_dbl + if (n_frz_onset(ns) /= 0) a2D(i,j,n_frz_onset(ns),iblk) = spval_dbl + if (n_hisnap (ns) /= 0) a2D(i,j,n_hisnap(ns), iblk) = spval_dbl + if (n_aisnap (ns) /= 0) a2D(i,j,n_aisnap(ns), iblk) = spval_dbl + if (n_trsig (ns) /= 0) a2D(i,j,n_trsig(ns), iblk) = spval_dbl + if (n_iage (ns) /= 0) a2D(i,j,n_iage(ns), iblk) = spval_dbl + if (n_FY (ns) /= 0) a2D(i,j,n_FY(ns), iblk) = spval_dbl + if (n_Tn_top (ns) /= 0) a3Dc(i,j,:,n_Tn_top(ns)-n2D,iblk) = spval_dbl + if (n_keffn_top (ns) /= 0) a3Dc(i,j,:,n_keffn_top(ns)-n2D,iblk) = spval_dbl + + if (n_a11 (ns) /= 0) a2D(i,j,n_a11(ns), iblk) = spval_dbl + if (n_a12 (ns) /= 0) a2D(i,j,n_a12(ns), iblk) = spval_dbl + if (n_e11 (ns) /= 0) a2D(i,j,n_e11(ns), iblk) = spval_dbl + if (n_e12 (ns) /= 0) a2D(i,j,n_e12(ns), iblk) = spval_dbl + if (n_e22 (ns) /= 0) a2D(i,j,n_e22(ns), iblk) = spval_dbl + if (n_s11 (ns) /= 0) a2D(i,j,n_s11(ns), iblk) = spval_dbl + if (n_s12 (ns) /= 0) a2D(i,j,n_s12(ns), iblk) = spval_dbl + if (n_s22 (ns) /= 0) a2D(i,j,n_s22(ns), iblk) = spval_dbl + if (n_yieldstress11 (ns) /= 0) a2D(i,j,n_yieldstress11(ns),iblk) = spval_dbl + if (n_yieldstress12 (ns) /= 0) a2D(i,j,n_yieldstress12(ns),iblk) = spval_dbl + if (n_yieldstress22 (ns) /= 0) a2D(i,j,n_yieldstress22(ns),iblk) = spval_dbl else if (n_divu (ns) /= 0) a2D(i,j,n_divu(ns),iblk) = & divu (i,j,iblk)*avail_hist_fields(n_divu(ns))%cona diff --git a/source/ice_history_pond.F90 b/source/ice_history_pond.F90 index 263620d0..716581fa 100755 --- a/source/ice_history_pond.F90 +++ b/source/ice_history_pond.F90 @@ -136,7 +136,7 @@ subroutine init_hist_pond_2D call define_hist_field(n_apond_ai,"apond_ai","1",tstr2D, tcstr, & "melt pond fraction of grid cell", & "weighted by ice area", c1, c0, & - ns, f_apond) + ns, f_apond_ai) if (f_hpond(1:1) /= 'x') & call define_hist_field(n_hpond,"hpond","m",tstr2D, tcstr, & diff --git a/source/ice_history_shared.F90 b/source/ice_history_shared.F90 index ffefcd5a..5af8250b 100755 --- a/source/ice_history_shared.F90 +++ b/source/ice_history_shared.F90 @@ -36,7 +36,7 @@ module ice_history_shared logical (kind=log_kind), public :: & hist_avg ! if true, write averaged data instead of snapshots - character (len=char_len), public :: & + character (len=char_len_long), public :: & history_file , & ! output file for history incond_file ! output file for snapshot initial conditions @@ -183,10 +183,12 @@ module ice_history_shared character (len=max_nstrm), public :: & ! f_example = 'md', & f_hi = 'm', f_hs = 'm', & + f_snowfrac = 'x', f_snowfracn = 'x', & f_Tsfc = 'm', f_aice = 'm', & f_uvel = 'm', f_vvel = 'm', & f_uatm = 'm', f_vatm = 'm', & f_fswdn = 'm', f_flwdn = 'm', & + f_fswup = 'm', & f_snow = 'm', f_snow_ai = 'm', & f_rain = 'm', f_rain_ai = 'm', & f_sst = 'm', f_sss = 'm', & @@ -195,14 +197,17 @@ module ice_history_shared f_fswfac = 'm', f_fswint_ai = 'x', & f_fswabs = 'm', f_fswabs_ai = 'm', & f_albsni = 'm', & - f_alvdr = 'm', f_alidr = 'm', & - f_alvdf = 'm', f_alidf = 'm', & + f_alvdr = 'x', f_alidr = 'x', & + f_alvdf = 'x', f_alidf = 'x', & + f_alvdr_ai = 'm', f_alidr_ai = 'm', & + f_alvdf_ai = 'm', f_alidf_ai = 'm', & f_albice = 'm', f_albsno = 'm', & f_albpnd = 'm', f_coszen = 'm', & f_flat = 'm', f_flat_ai = 'm', & f_fsens = 'm', f_fsens_ai = 'm', & f_flwup = 'm', f_flwup_ai = 'm', & f_evap = 'm', f_evap_ai = 'm', & + f_evap_ice_ai = 'm', f_evap_snow_ai = 'm', & f_Tair = 'm', & f_Tref = 'm', f_Qref = 'm', & f_congel = 'm', f_frazil = 'm', & @@ -228,6 +233,47 @@ module ice_history_shared f_mlt_onset = 'm', f_frz_onset = 'm', & f_iage = 'm', f_FY = 'm', & f_hisnap = 'm', f_aisnap = 'm', & + f_sithick = 'x', f_sisnthick = 'x', & + f_sisnconc = 'x', f_siage = 'x', & + f_sitemptop = 'x', f_sitempsnic = 'x', & + f_sitempbot = 'x', f_sispeed = 'x', & + f_siu = 'x', f_siv = 'x', & + f_sidmasstranx = 'x', f_sidmasstrany = 'x', & + f_sistrxdtop = 'x', f_sistrydtop = 'x', & + f_sistrxubot = 'x', f_sistryubot = 'x', & + f_siforcetiltx = 'x', f_siforcetilty = 'x', & + f_siforcecoriolx = 'x', f_siforcecorioly = 'x', & + f_siforceintstrx = 'x', f_siforceintstry = 'x', & + f_sicompstren = 'x', & + f_sialb = 'x', & + f_sihc = 'x', f_sisnhc = 'x', & + f_sidconcth = 'x', f_sidconcdyn = 'x', & + f_sifb = 'x', & + f_sidmassth = 'x', f_sidmassdyn = 'x', & + f_sidmassgrowthwat = 'x', & + f_sidmassgrowthbot = 'x', & + f_sidmasssi = 'x', & + f_sidmassevapsubl = 'x', & + f_sidmassmelttop = 'x', & + f_sidmassmeltbot = 'x', & + f_sidmasslat = 'x', & + f_sndmasssnf = 'x', & + f_sndmassmelt = 'x', & + f_sidivvel = 'x', & + f_siflswdtop = 'x', & + f_siflswutop = 'x', & + f_siflswdbot = 'x', & + f_sifllwdtop = 'x', & + f_sifllwutop = 'x', & + f_siflsenstop = 'x', & + f_siflsensupbot = 'x', & + f_sifllatstop = 'x', & + f_siflcondtop = 'x', & + f_siflcondbot = 'x', & + f_sipr = 'x', & + f_siflsaltbot = 'x', & + f_siflfwbot = 'x', & + f_sisaltmass = 'x', & f_aicen = 'x', f_vicen = 'x', & f_vsnon = 'x', & f_trsig = 'm', f_icepresent = 'm', & @@ -265,10 +311,12 @@ module ice_history_shared f_VGRDb , & ! f_example , & f_hi, f_hs , & + f_snowfrac, f_snowfracn, & f_Tsfc, f_aice , & f_uvel, f_vvel , & f_uatm, f_vatm , & f_fswdn, f_flwdn , & + f_fswup, & f_snow, f_snow_ai , & f_rain, f_rain_ai , & f_sst, f_sss , & @@ -279,12 +327,15 @@ module ice_history_shared f_albsni , & f_alvdr, f_alidr , & f_alvdf, f_alidf , & + f_alvdr_ai, f_alidr_ai , & + f_alvdf_ai, f_alidf_ai , & f_albice, f_albsno , & f_albpnd, f_coszen , & f_flat, f_flat_ai , & f_fsens, f_fsens_ai , & f_flwup, f_flwup_ai , & f_evap, f_evap_ai , & + f_evap_ice_ai, f_evap_snow_ai, & f_Tair , & f_Tref, f_Qref , & f_congel, f_frazil , & @@ -293,8 +344,8 @@ module ice_history_shared f_meltb, f_meltl , & f_fresh, f_fresh_ai , & f_fsalt, f_fsalt_ai , & + f_fswthru, f_fswthru_ai, & f_fhocn, f_fhocn_ai , & - f_fswthru, f_fswthru_ai,& f_strairx, f_strairy , & f_strtltx, f_strtlty , & f_strcorx, f_strcory , & @@ -310,6 +361,47 @@ module ice_history_shared f_mlt_onset, f_frz_onset, & f_iage, f_FY , & f_hisnap, f_aisnap , & + f_sithick, f_sisnthick, & + f_sisnconc, f_siage, & + f_sifb, & + f_sitemptop, f_sitempsnic,& + f_sitempbot, f_sispeed, & + f_siu, f_siv, & + f_sidmasstranx, f_sidmasstrany, & + f_sistrxdtop, f_sistrydtop, & + f_sistrxubot, f_sistryubot, & + f_siforcetiltx, f_siforcetilty, & + f_siforcecoriolx, f_siforcecorioly, & + f_siforceintstrx, f_siforceintstry, & + f_sicompstren, & + f_sialb, & + f_sidivvel, & + f_sihc, f_sisnhc, & + f_sidconcth, f_sidconcdyn,& + f_sidmassth, f_sidmassdyn,& + f_sidmassgrowthwat, & + f_sidmassgrowthbot, & + f_sidmasssi, & + f_sidmassevapsubl, & + f_sidmassmelttop, & + f_sidmassmeltbot, & + f_sidmasslat, & + f_sndmasssnf, & + f_sndmassmelt, & + f_siflswdtop, & + f_siflswutop, & + f_siflswdbot, & + f_sifllwdtop, & + f_sifllwutop, & + f_siflsenstop, & + f_siflsensupbot, & + f_sifllatstop, & + f_siflcondtop, & + f_siflcondbot, & + f_sipr, & + f_siflsaltbot, & + f_siflfwbot, & + f_sisaltmass, & f_aicen, f_vicen , & f_vsnon, & f_trsig, f_icepresent,& @@ -362,11 +454,13 @@ module ice_history_shared integer (kind=int_kind), dimension(max_nstrm), public :: & ! n_example , & n_hi , n_hs , & + n_snowfrac, n_snowfracn, & n_Tsfc , n_aice , & n_uvel , n_vvel , & n_uatm , n_vatm , & n_sice , & n_fswdn , n_flwdn , & + n_fswup, & n_snow , n_snow_ai , & n_rain , n_rain_ai , & n_sst , n_sss , & @@ -377,12 +471,15 @@ module ice_history_shared n_albsni , & n_alvdr , n_alidr , & n_alvdf , n_alidf , & + n_alvdr_ai , n_alidr_ai , & + n_alvdf_ai , n_alidf_ai , & n_albice , n_albsno , & n_albpnd , n_coszen , & n_flat , n_flat_ai , & n_fsens , n_fsens_ai , & n_flwup , n_flwup_ai , & n_evap , n_evap_ai , & + n_evap_ice_ai, n_evap_snow_ai , & n_Tair , & n_Tref , n_Qref , & n_congel , n_frazil , & @@ -391,6 +488,47 @@ module ice_history_shared n_meltb , n_meltl , & n_fresh , n_fresh_ai , & n_fsalt , n_fsalt_ai , & + n_sidivvel, & + n_sithick , n_sisnthick , & + n_sisnconc, n_siage, & + n_sifb, & + n_sitemptop , n_sitempsnic , & + n_sitempbot , n_sispeed, & + n_siu, n_siv, & + n_sidmasstranx, n_sidmasstrany, & + n_sistrxdtop, n_sistrydtop, & + n_sistrxubot, n_sistryubot, & + n_siforcetiltx, n_siforcetilty, & + n_siforcecoriolx, n_siforcecorioly, & + n_siforceintstrx, n_siforceintstry, & + n_sicompstren, & + n_sialb, & + n_sihc , n_sisnhc, & + n_sidconcth , n_sidconcdyn, & + n_sidmassth , n_sidmassdyn, & + n_sidmassgrowthwat, & + n_sidmassgrowthbot, & + n_sidmasssi, & + n_sidmassevapsubl, & + n_sidmassmelttop, & + n_sidmassmeltbot, & + n_sidmasslat, & + n_sndmasssnf, & + n_sndmassmelt, & + n_siflswdtop, & + n_siflswutop, & + n_siflswdbot, & + n_sifllwdtop, & + n_sifllwutop, & + n_siflsenstop, & + n_siflsensupbot, & + n_sifllatstop, & + n_siflcondtop, & + n_siflcondbot, & + n_sipr, & + n_siflsaltbot, & + n_siflfwbot, & + n_sisaltmass, & n_vsnon, & n_fhocn , n_fhocn_ai , & n_fswthru , n_fswthru_ai , & @@ -456,8 +594,9 @@ subroutine construct_filename(ncfile,suffix,ns) integer (kind=int_kind), intent(in) :: ns integer (kind=int_kind) :: iyear, imonth, iday, isec + character (len=1) :: cstream - iyear = nyr + year_init - 1 ! set year_init=1 in ice_in to get iyear=nyr + iyear = nyr + year_init - 1 ! set year_init=1 in ice_in to get iyear=nyr imonth = month iday = mday isec = sec - dt @@ -487,28 +626,35 @@ subroutine construct_filename(ncfile,suffix,ns) endif endif + cstream = '' + if (ns > 10) write(cstream,'(i1.1)') ns-1 +! ABK: Disable the addition of a stream number to the history file +! (now only occurs for more than 10 streams, which is v unlikely!) + if (histfreq(ns) == '1') then ! instantaneous, write every dt write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & - history_file(1:lenstr(history_file)),'_inst.', & - iyear,'-',imonth,'-',iday,'-',sec,'.',suffix + history_file(1:lenstr(history_file))//trim(cstream),'_inst.', & + iyear,'-',imonth,'-',iday,'-',sec,'.',suffix elseif (hist_avg) then ! write averaged data if (histfreq(ns) == 'd'.or.histfreq(ns) == 'D') then ! daily write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,a)') & - history_file(1:lenstr(history_file)), & + history_file(1:lenstr(history_file))//trim(cstream), & '.',iyear,'-',imonth,'-',iday,'.',suffix elseif (histfreq(ns) == 'h'.or.histfreq(ns) == 'H') then ! hourly write(ncfile,'(a,a,i2.2,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & - history_file(1:lenstr(history_file)),'_',histfreq_n(ns),'h.', & - iyear,'-',imonth,'-',iday,'-',sec,'.',suffix + history_file(1:lenstr(history_file))//trim(cstream),'_', & + histfreq_n(ns),'h.',iyear,'-',imonth,'-',iday,'-',sec,'.',suffix + elseif (histfreq(ns) == 'm'.or.histfreq(ns) == 'M') then ! monthly write(ncfile,'(a,a,i4.4,a,i2.2,a,a)') & - history_file(1:lenstr(history_file)),'.', & + history_file(1:lenstr(history_file))//trim(cstream),'.', & iyear,'-',imonth,'.',suffix elseif (histfreq(ns) == 'y'.or.histfreq(ns) == 'Y') then ! yearly write(ncfile,'(a,a,i4.4,a,a)') & - history_file(1:lenstr(history_file)),'.', iyear,'.',suffix + history_file(1:lenstr(history_file))//trim(cstream),'.', & + iyear,'.',suffix endif else ! instantaneous with histfreq > dt @@ -607,8 +753,9 @@ subroutine define_hist_field(id, vname, vunit, vcoord, vcellmeas, & id(ns) = num_avail_hist_fields_tot stmp = vname - if (ns > 1) & - write(stmp,'(a,a1,a1)') trim(stmp),'_',vhistfreq(ns1:ns1) +! if (ns > 1) & +! write(stmp,'(a,a1,a1)') trim(stmp),'_',vhistfreq(ns1:ns1) +! This was disabled in Met office version by ABK. avail_hist_fields(id(ns))%vname = trim(stmp) avail_hist_fields(id(ns))%vunit = trim(vunit) @@ -752,7 +899,7 @@ subroutine accum_hist_field_3D(id, iblk, ndim, field_accum, field) do k = 1, ndim do j = jlo, jhi do i = ilo, ihi - if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk) .and. abs(field_accum(i,j,k)) < 1.0e+10_dbl_kind) then field(i,j,k,idns,iblk) = field(i,j,k,idns,iblk) + field_accum(i,j,k) endif enddo @@ -818,9 +965,11 @@ subroutine accum_hist_field_4D(id, iblk, ndim3, ndim4, field_accum, field) do n = 1, ndim3 do j = jlo, jhi do i = ilo, ihi - if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk) .and. abs(field_accum(i,j,n,k)) < 1.0e+10_dbl_kind) then field(i,j,n,k,idns,iblk) = field(i,j,n,k,idns,iblk) + field_accum(i,j,n,k) endif + endif enddo enddo enddo diff --git a/source/ice_meltpond_topo.F90 b/source/ice_meltpond_topo.F90 index b74291dd..9ff69344 100755 --- a/source/ice_meltpond_topo.F90 +++ b/source/ice_meltpond_topo.F90 @@ -68,7 +68,7 @@ end subroutine init_meltponds_topo subroutine compute_ponds_topo(nx_block,ny_block, & ilo, ihi, jlo, jhi, & - dt, & + dt,snowfracn, & aice, aicen, & vice, vicen, & vsno, vsnon, & @@ -103,7 +103,8 @@ subroutine compute_ponds_topo(nx_block,ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & intent(inout) :: & - vicen ! ice volume, per category (m) + vicen, & ! ice volume, per category (m) + snowfracn real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), & intent(in) :: & @@ -141,6 +142,7 @@ subroutine compute_ponds_topo(nx_block,ny_block, & real (kind=dbl_kind) :: & hi, & ! ice thickness (m) + hs, & ! snow thickness (m) dHui, & ! change in thickness of ice lid (m) omega, & ! conduction dTice, & ! temperature difference across ice lid (C) @@ -185,6 +187,13 @@ subroutine compute_ponds_topo(nx_block,ny_block, & hpondn(i,j,n) = c0 ! pond depth, per category apondn(i,j,n) = c0 ! pond area, per category + ! set snow fraction using JULES empirical formula based + ! on snow volume + ! this will be updated for meltpond covered areas in + ! pond_area() below + hs = vsnon(i,j,n) / aicen(i,j,n) + snowfracn(i,j,n) = c1 - exp(-p2*rhos*hs) + enddo enddo indxii(:,n) = 0 @@ -229,7 +238,8 @@ subroutine compute_ponds_topo(nx_block,ny_block, & !-------------------------------------------------------------- ! calculate pond area and depth !-------------------------------------------------------------- - call pond_area(dt,aice(i,j), vice(i,j),vsno(i,j), & + call pond_area(dt, snowfracn(i,j,:), & + aice(i,j), vice(i,j),vsno(i,j), & aicen(i,j,:), vicen(i,j,:), vsnon(i,j,:), & qicen(i,j,:,:), sicen(i,j,:,:), & volpn(i,j,:), volp(i,j), & @@ -376,7 +386,7 @@ end subroutine compute_ponds_topo ! Computes melt pond area, pond depth and melting rates - subroutine pond_area(dt, & + subroutine pond_area(dt, snowfracn, & aice, vice, vsno, & aicen, vicen, vsnon, & qicen, sicen, & @@ -405,7 +415,7 @@ subroutine pond_area(dt, & volp, dvolp real (kind=dbl_kind), dimension(ncat), intent(out) :: & - apondn, hpondn + apondn, hpondn,snowfracn ! local variables @@ -482,6 +492,9 @@ subroutine pond_area(dt, & asnon(n) = reduced_aicen(n) endif + ! update snow fraction from asnon + snowfracn(n) = asnon(n) + ! This choice for alfa and beta ignores hydrostatic equilibium of categories. ! Hydrostatic equilibium of the entire ITD is accounted for below, assuming ! a surface topography implied by alfa=0.6 and beta=0.4, and rigidity across all diff --git a/source/ice_shortwave.F90 b/source/ice_shortwave.F90 index 21e4208c..cd664918 100755 --- a/source/ice_shortwave.F90 +++ b/source/ice_shortwave.F90 @@ -162,7 +162,7 @@ subroutine init_shortwave use ice_domain, only: nblocks, blocks_ice use ice_flux, only: alvdf, alidf, alvdr, alidr, & alvdr_ai, alidr_ai, alvdf_ai, alidf_ai, & - swvdr, swvdf, swidr, swidf, & + swvdr, swvdf, swidr, swidf, snowfrac,snowfracn, & albice, albsno, albpnd, apeff_ai, albcnt, coszen, fsnow use ice_orbital, only: init_orbit use ice_state, only: aicen, vicen, vsnon, trcrn, nt_Tsfc @@ -382,6 +382,8 @@ subroutine init_shortwave apeff_ai(i,j,iblk) = apeff_ai(i,j,iblk) & + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) + snowfrac(i,j,iblk) = snowfrac(i,j,iblk) & + + snowfracn(i,j,n,iblk) * aicen(i,j,n,iblk) enddo enddo ! ncat diff --git a/source/ice_step_mod.F90 b/source/ice_step_mod.F90 index 65d78e7a..4afab596 100755 --- a/source/ice_step_mod.F90 +++ b/source/ice_step_mod.F90 @@ -182,12 +182,14 @@ subroutine step_therm1 (dt, iblk) use ice_fileunits, only: nu_diag use ice_flux, only: frzmlt, sst, Tf, strocnxT, strocnyT, rside, & meltsn, melttn, meltbn, congeln, snoicen, dsnown, uatm, vatm, & - wind, rhoa, potT, Qa, zlvl, strax, stray, flatn, fsensn, fsurfn, fcondtopn, & + wind, rhoa, potT, Qa, zlvl, strax, stray, flatn, fsensn, fsurfn, & + fcondtopn, fcondbotn, fcondbot, & + ice_freeboardn, ice_freeboard, snowfracn, & flw, fsnow, fpond, sss, mlt_onset, frz_onset, faero_atm, faero_ocn, & frain, Tair, coszen, strairxT, strairyT, fsurf, fcondtop, fsens, & flat, fswabs, flwout, evap, Tref, Qref, Uref, fresh, fsalt, fhocn, & fswthru, meltt, melts, meltb, meltl, congel, snoice, & - merge_fluxes + merge_fluxes, evap_ice, evap_snow use ice_firstyear, only: update_FYarea use ice_grid, only: lmask_n, lmask_s, TLAT, TLON use ice_itd, only: hi_min @@ -202,7 +204,7 @@ subroutine step_therm1 (dt, iblk) nt_apnd, nt_hpnd, nt_ipnd, nt_alvl, nt_vlvl, nt_Tsfc, & tr_iage, nt_iage, tr_FY, nt_FY, tr_aero, tr_pond, tr_pond_cesm, & tr_pond_lvl, nt_qice, nt_sice, tr_pond_topo, uvel, vvel - use ice_therm_shared, only: calc_Tsfc + use ice_therm_shared, only: calc_Tsfc, Tsnic, Ti_bot use ice_therm_vertical, only: frzmlt_bottom_lateral, thermo_vertical use ice_timers, only: ice_timer_start, ice_timer_stop, timer_ponds !BBB: @@ -237,6 +239,8 @@ subroutine step_therm1 (dt, iblk) fswabsn , & ! shortwave absorbed by ice (W/m^2) flwoutn , & ! upward LW at surface (W/m^2) evapn , & ! flux of vapor, atmos to ice (kg m-2 s-1) + evapn_ice , & ! flux of vapor, atmos to ice (kg m-2 s-1) + evapn_snow , & ! flux of vapor, atmos to snow (kg m-2 s-1) freshn , & ! flux of water, ice to ocean (kg/m^2/s) fsaltn , & ! flux of salt, ice to ocean (kg/m^2/s) fhocnn , & ! fbot corrected for leftover energy (W/m^2) @@ -553,10 +557,13 @@ subroutine step_therm1 (dt, iblk) Sswabsn(:,:,:,n,iblk), & Iswabsn(:,:,:,n,iblk), & fsurfn(:,:,n,iblk), & - fcondtopn(:,:,n,iblk), & + fcondtopn(:,:,n,iblk), fcondbotn(:,:,n,iblk), & fsensn(:,:,n,iblk), flatn(:,:,n,iblk), & flwoutn, & - evapn, freshn, & + ice_freeboardn, & + evapn, & + evapn_ice, evapn_snow, & + freshn, & fsaltn, fhocnn, & melttn(:,:,n,iblk), meltsn(:,:,n,iblk), & meltbn(:,:,n,iblk), & @@ -710,18 +717,24 @@ subroutine step_therm1 (dt, iblk) strairxn, strairyn, & Cdn_atm_ratio_n, & fsurfn(:,:,n,iblk), fcondtopn(:,:,n,iblk),& + fcondbotn(:,:,n,iblk), & fsensn(:,:,n,iblk), flatn(:,:,n,iblk), & fswabsn, flwoutn, & evapn, & + evapn_ice, evapn_snow, & + ice_freeboardn(:,:,n,iblk), & Trefn, Qrefn, & freshn, fsaltn, & fhocnn, fswthrun(:,:,n,iblk), & strairxT(:,:,iblk), strairyT (:,:,iblk), & Cdn_atm_ratio(:,:,iblk), & fsurf (:,:,iblk), fcondtop (:,:,iblk), & + fcondbot(:,:,iblk), & fsens (:,:,iblk), flat (:,:,iblk), & fswabs (:,:,iblk), flwout (:,:,iblk), & evap (:,:,iblk), & + evap_ice(:,:,iblk), evap_snow (:,:,iblk), & + ice_freeboard(:,:,iblk), & Tref (:,:,iblk), Qref (:,:,iblk), & fresh (:,:,iblk), fsalt (:,:,iblk), & fhocn (:,:,iblk), fswthru (:,:,iblk), & @@ -735,6 +748,9 @@ subroutine step_therm1 (dt, iblk) enddo ! ncat + Ti_bot(:,:,iblk) = Tbot(:,:) * aice(:,:,iblk) + Tsnic(:,:,iblk) = c0 + !----------------------------------------------------------------- ! Calculate ponds from the topographic scheme !----------------------------------------------------------------- @@ -742,7 +758,7 @@ subroutine step_therm1 (dt, iblk) if (tr_pond_topo) then call compute_ponds_topo(nx_block, ny_block, & ilo, ihi, jlo, jhi, & - dt, & + dt,snowfracn(:,:,:,iblk), & aice (:,:,iblk), aicen(:,:,:,iblk), & vice (:,:,iblk), vicen(:,:,:,iblk), & vsno (:,:,iblk), vsnon(:,:,:,iblk), & diff --git a/source/ice_therm_bl99.F90 b/source/ice_therm_bl99.F90 index 4cdd57df..427a15a9 100755 --- a/source/ice_therm_bl99.F90 +++ b/source/ice_therm_bl99.F90 @@ -338,24 +338,14 @@ subroutine temperature_changes (nx_block, ny_block, & !mclaren: Should there be an if calc_Tsfc statement here then?? ! Alex West: Temporarily at least, yes. Here goes. - if (istep1==15552) then - do m = 1, icells - i = indxi(m) - j = indxj(m) - - if ((i==91) .AND. (j==46) .and. (my_task==24)) then - write(nu_diag,*) ' ' - write(nu_diag,*) ' ' - write(nu_diag,*) 'Printing initial temperature profile' - write(nu_diag,*) 'Tsn_init, Tin_init = ', Tsn_init(m,:), Tin_init(m,:) - endif - - enddo - endif if (calc_Tsfc) then - frac = 0.9 - dTemp = 0.02_dbl_kind +! frac = 0.9 +! dTemp = 0.02_dbl_kind +!ars599: 031418 +!MetOffice Setting + frac = 1 - puny + dTemp = 0.01_dbl_kind do k = 1, nilyr do ij = 1, icells i = indxi(ij) @@ -375,7 +365,10 @@ subroutine temperature_changes (nx_block, ny_block, & endif if (Iswabs_tmp < puny) Iswabs_tmp = c0 - dswabs = min(Iswabs(i,j,k) - Iswabs_tmp, fswint(i,j)) +! dswabs = min(Iswabs(i,j,k) - Iswabs_tmp, fswint(i,j)) +!ars599: 031418 +!MetOffice Setting + dswabs = Iswabs(i,j,k) - Iswabs_tmp fswsfc(i,j) = fswsfc(i,j) + dswabs fswint(i,j) = fswint(i,j) - dswabs @@ -415,16 +408,6 @@ subroutine temperature_changes (nx_block, ny_block, & do niter = 1, nitermax - if ((istep1==15552) .and. (my_task==24)) then - - write(nu_diag,*) ' ' - write(nu_diag,*) ' ' - - write(nu_diag,*) '--------------------' - write(nu_diag,*) 'Entering iteration', niter - write(nu_diag,*) ' ' - endif - !----------------------------------------------------------------- ! Identify cells, if any, where calculation has not converged. @@ -582,20 +565,7 @@ subroutine temperature_changes (nx_block, ny_block, & ! See if we need to reduce fcondtopn anywhere fcondtopn_force = fcondtopn - fcondtopn_reduction - if (istep1==15552) then - do ij = 1, isolve - i = indxii(ij) - j = indxjj(ij) - m = indxij(ij) - - if ((i==91) .AND. (j==46) .and. (my_task==24)) then - write(nu_diag,*) 'Calling solver with fcondtopn_force, fcondtopn, fcondtopn_reduction = ', & - fcondtopn_force(i,j), fcondtopn(i,j), fcondtopn_reduction(i,j) - write(nu_diag,*) 'and etai, etas, kh = ', etai(ij,:), etas(ij,:), kh(m,:) - endif - enddo - endif call get_matrix_elements_know_Tsfc & (nx_block, ny_block, & @@ -649,17 +619,6 @@ subroutine temperature_changes (nx_block, ny_block, & ! to conserve energy) in the thickness_changes subroutine. !----------------------------------------------------------------- - if (istep1==15552) then - do ij = 1, isolve - i = indxii(ij) - j = indxjj(ij) - m = indxij(ij) - - if ((i==91) .and. (j==46) .and. (my_task==24)) then - write(nu_diag,*) 'Matrix solution of temperatures Tmat = ', Tmat(ij,:) - endif - enddo - endif if (calc_Tsfc) then @@ -771,20 +730,11 @@ subroutine temperature_changes (nx_block, ny_block, & ! go into the ocean ! This is done to avoid an 'infinite loop' whereby temp continually evolves ! to the same point above zero, is reset, ad infinitum - if ((i==91) .and. (j==46) .and. (my_task==24)) then - write(nu_diag,*) - write(nu_diag,*) 'Resetting Tsn1' - write(nu_diag,*) 'zTsn, dqmat_sn = ', zTsn(m,k), dqmat_sn(m,k) - endif if (l_snow(m) .AND. k == 1) then if (Top_T_was_reset_last_time(m)) then fcondtopn_reduction(i,j) = fcondtopn_reduction(i,j) + dqmat_sn(m,k)*hslyr(m) / dt Top_T_was_reset_last_time(m) = .false. enum(m) = enum(m) + hslyr(m) * dqmat_sn(m,k) - if ((i==91) .and. (j==46) .and. (my_task==24)) then - write(nu_diag,*) 'Adjusting forcing' - write(nu_diag,*) 'fcondtopn_reduction, enum = ', fcondtopn_reduction(i,j), enum(m) - endif else Top_T_was_reset_last_time(m) = .true. endif @@ -812,18 +762,6 @@ subroutine temperature_changes (nx_block, ny_block, & enddo ! ij enddo ! nslyr - if (istep1==15552) then - do ij = 1, isolve - i = indxii(ij) - j = indxjj(ij) - m = indxij(ij) - - if ((i==91) .and. (j==46) .and. (my_task==24)) then - write(nu_diag,*) 'After numerical corrections, zTsn = ', zTsn(m,:) - write(nu_diag,*) 'zqsn, enew, enum = ', zqsn(m,k), enew(ij), enum(m) - endif - enddo - endif dTmat(:,:) = c0 @@ -854,20 +792,11 @@ subroutine temperature_changes (nx_block, ny_block, & ! go into the ocean ! This is done to avoid an 'infinite loop' whereby temp continually evolves ! to the same point above zero, is reset, ad infinitum - if ((i==91) .and. (j==46) .and. (my_task==24)) then - write(nu_diag,*) - write(nu_diag,*) 'Resetting Tin1' - write(nu_diag,*) 'zTin, dTmat, dqmat = ', zTin(m,k), dTmat(m,k), dqmat(m,k) - endif if ((.NOT. (l_snow(m))) .AND. (k == 1)) then if (Top_T_was_reset_last_time(m)) then fcondtopn_reduction(i,j) = fcondtopn_reduction(i,j) + dqmat(m,k)*hilyr(m) / dt Top_T_was_reset_last_time(m) = .false. enum(m) = enum(m) + hilyr(m) * dqmat(m,k) - if ((i==91) .and. (j==46) .and. (my_task==24)) then - write(nu_diag,*) 'Adjusting forcing' - write(nu_diag,*) 'fcondtopn_reduction, enum = ', fcondtopn_reduction(i,j), enum(m) - endif else Top_T_was_reset_last_time(m) = .true. endif @@ -926,20 +855,6 @@ subroutine temperature_changes (nx_block, ny_block, & enddo ! ij enddo ! nilyr - if (istep1==15552) then - do ij = 1, isolve - i = indxii(ij) - j = indxjj(ij) - m = indxij(ij) - - if ((i==91) .and. (j==46) .and. (my_task==24)) then - write(nu_diag,*) 'After numerical corrections, zTin = ', zTin(m,:) - write(nu_diag,*) 'zqin, enew, enum = ', zqin(m,k), enew(ij), enum(m) - endif - enddo - endif - - if (calc_Tsfc) then !DIR$ CONCURRENT !Cray @@ -1002,18 +917,6 @@ subroutine temperature_changes (nx_block, ny_block, & ferr(m) = abs( (enew(ij) - einit(m) + enum(m))/dt & - (fcondtopn(i,j) - fcondbot(m) + fswint(i,j)) ) - if (istep1==15552) then - if ((i==91) .and. (j==46) .and. (my_task==24)) then - write(nu_diag,*) ' ' - write(nu_diag,*) 'Testing for convergence' - write(nu_diag,*) 'enew, einit, enum = ', enew(ij), einit(m), enum(m) - write(nu_diag,*) 'enew/dt, einit/dt, enum/dt = ', enew(ij)/dt, einit(m)/dt, enum(m)/dt - write(nu_diag,*) 'fcondtop, fcondbot, fswint = ', fcondtopn(i,j), fcondbot(m), fswint(i,j) - write(nu_diag,*) '(enew(ij) - einit(m) + enum(m))/dt = ', (enew(ij) - einit(m) + enum(m))/dt - write(nu_diag,*) 'fcondtopn(i,j) - fcondbot(m) + fswint(i,j)', fcondtopn(i,j) - fcondbot(m) + fswint(i,j) - write(nu_diag,*) 'fcondtopn_force(i,j), fcondtopn_reduction(i,j) = ', fcondtopn_force(i,j), fcondtopn_reduction(i,j) - endif - endif ! factor of 0.9 allows for roundoff errors later if (ferr(m) > 0.9_dbl_kind*ferrmax) then ! condition (5) @@ -1027,7 +930,6 @@ subroutine temperature_changes (nx_block, ny_block, & do k = 1, nilyr if (reduce_kh(m,k) .and. dqmat(m,k) > c0) then frac = max(0.5*(c1-ferr(m)/abs(fcondtopn(i,j)-fcondbot(m))),p1) -! frac = p1 kh(m,k+nslyr+1) = kh(m,k+nslyr+1) * frac kh(m,k+nslyr) = kh(m,k+nslyr+1) endif diff --git a/source/ice_therm_shared.F90 b/source/ice_therm_shared.F90 index 4b231874..4d0a3f54 100755 --- a/source/ice_therm_shared.F90 +++ b/source/ice_therm_shared.F90 @@ -10,7 +10,8 @@ module ice_therm_shared use ice_kinds_mod - use ice_domain_size, only: ncat, nilyr, nslyr, max_ntrcr + use ice_domain_size, only: ncat, nilyr, nslyr, max_ntrcr, max_blocks + use ice_blocks, only: nx_block, ny_block implicit none save @@ -40,6 +41,11 @@ module ice_therm_shared character (char_len), public :: & conduct ! 'MU71' or 'bubbly' + real (kind=dbl_kind), & + dimension(nx_block,ny_block,max_blocks), & + public :: & + Tsnic, Ti_bot + logical (kind=log_kind), public :: & l_brine ! if true, treat brine pocket effects diff --git a/source/ice_therm_vertical.F90 b/source/ice_therm_vertical.F90 index abf2cb50..73491ce1 100755 --- a/source/ice_therm_vertical.F90 +++ b/source/ice_therm_vertical.F90 @@ -88,8 +88,12 @@ subroutine thermo_vertical (nx_block, ny_block, & fswsfc, fswint, & Sswabs, Iswabs, & fsurfn, fcondtopn, & + fcondbotn, & fsensn, flatn, & - flwoutn, evapn, & + flwoutn, & + ice_freeboardn, & + evapn, & + evapn_ice, evapn_snow,& freshn, fsaltn, & fhocnn, meltt, & melts, meltb, & @@ -161,14 +165,17 @@ subroutine thermo_vertical (nx_block, ny_block, & ! coupler fluxes to atmosphere real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & flwoutn , & ! outgoing longwave radiation (W/m^2) - evapn ! evaporative water flux (kg/m^2/s) + evapn , & ! evaporative water flux (kg/m^2/s) + evapn_ice, &! evaporative water flux over ice (kg/m^2/s) + evapn_snow ! evaporative water flux over snow(kg/m^2/s) ! Note: these are intent out if calc_Tsfc = T, otherwise intent in real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout):: & fsensn , & ! sensible heat flux (W/m^2) flatn , & ! latent heat flux (W/m^2) fsurfn , & ! net flux to top surface, excluding fcondtopn - fcondtopn ! downward cond flux at top surface (W m-2) + fcondtopn, & ! downward cond flux at top surface (W m-2) + fcondbotn ! downward cond flux at bottom surface (W m-2) ! coupler fluxes to ocean real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & @@ -186,7 +193,10 @@ subroutine thermo_vertical (nx_block, ny_block, & snoice , & ! snow-ice formation (m/step-->cm/day) dsnow , & ! change in snow thickness (m/step-->cm/day) mlt_onset, & ! day of year that sfc melting begins - frz_onset ! day of year that freezing begins (congel or frazil) + frz_onset, & ! day of year that freezing begins (congel or frazil) + ice_freeboardn ! height of ice surface (i.e. not snow surface) + ! above sea level in m + real (kind=dbl_kind), intent(in) :: & yday ! day of year @@ -273,6 +283,10 @@ subroutine thermo_vertical (nx_block, ny_block, & fsaltn (i,j) = c0 fhocnn (i,j) = c0 fadvocn(i,j) = c0 + fcondbotn(i,j) = c0 + ice_freeboardn(i,j) = c0 + + meltt (i,j) = c0 meltb (i,j) = c0 @@ -448,6 +462,15 @@ subroutine thermo_vertical (nx_block, ny_block, & endif ! heat_capacity + ! Alex West: Read 1D bottom conductive flux array into 2D array + ! for diagnostics (SIMIP)i + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + fcondbotn(i,j) = fcondbot(ij) + enddo + + ! intermediate energy for error check do ij = 1, icells einter(ij) = c0 @@ -478,8 +501,10 @@ subroutine thermo_vertical (nx_block, ny_block, & fbot, Tbot, & flatn, fsurfn, & fcondtopn, fcondbot, & + ice_freeboardn, & fsnow, hsn_new, & fhocnn, evapn, & + evapn_ice, evapn_snow,& meltt, melts, & meltb, iage, & congel, snoice, & @@ -1420,8 +1445,10 @@ subroutine thickness_changes (nx_block, ny_block, & fbot, Tbot, & flatn, fsurfn, & fcondtopn, fcondbot, & + ice_freeboardn, & fsnow, hsn_new, & fhocnn, evapn, & + evapn_ice, evapn_snow,& meltt, melts, & meltb, iage, & congel, snoice, & @@ -1480,7 +1507,10 @@ subroutine thickness_changes (nx_block, ny_block, & dsnow , & ! snow formation (m/step-->cm/day) iage , & ! ice age (s) mlt_onset , & ! day of year that sfc melting begins - frz_onset ! day of year that freezing begins (congel or frazil) + frz_onset , & ! day of year that freezing begins (congel or frazil) + ice_freeboardn ! height of ice surface (i.e. not snow surface) + ! above sea level in m + real (kind=dbl_kind), dimension (icells), & intent(inout) :: & @@ -1492,8 +1522,9 @@ subroutine thickness_changes (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & fhocnn , & ! fbot, corrected for any surplus energy (W m-2) - evapn ! ice/snow mass sublimated/condensed (kg m-2 s-1) - + evapn , & ! ice/snow mass sublimated/condensed (kg m-2 s-1) + evapn_ice , & ! ice mass sublimated/condensed (kg m-2 s-1) + evapn_snow ! snow mass sublimated/condensed (kg m-2 s-1) real (kind=dbl_kind), dimension (icells), intent(out):: & hsn_new ! thickness of new snow (m) @@ -1668,15 +1699,20 @@ subroutine thickness_changes (nx_block, ny_block, & !-------------------------------------------------------------- evapn (i,j) = c0 ! initialize + evapn_ice(i,j) = c0 + evapn_snow(i,j) = c0 if (hsn(ij) > puny) then ! add snow with enthalpy zqsn(ij,1) dhs = econ(ij) / (zqsn(ij,1) - rhos*Lvap) ! econ < 0, dhs > 0 dzs(ij,1) = dzs(ij,1) + dhs evapn(i,j) = evapn(i,j) + dhs*rhos + evapn_snow(i,j) = evapn_snow(i,j) + dhs*rhos else ! add ice with enthalpy zqin(ij,1) dhi = econ(ij) / (qm(ij,1) - rhoi*Lvap) ! econ < 0, dhi > 0 dzi(ij,1) = dzi(ij,1) + dhi evapn(i,j) = evapn(i,j) + dhi*rhoi + evapn_ice(i,j) = evapn_ice(i,j) + dhi*rhoi + ! enthalpy of melt water emlt_atm(ij) = emlt_atm(ij) - qmlt(ij,1) * dhi endif @@ -1778,6 +1814,8 @@ subroutine thickness_changes (nx_block, ny_block, & esub(ij) = esub(ij) - dhs*qsub esub(ij) = max(esub(ij), c0) ! in case of roundoff error evapn(i,j) = evapn(i,j) + dhs*rhos + evapn_snow(i,j) = evapn_snow(i,j) + dhs*rhos + !-------------------------------------------------------------- ! Melt snow (top) @@ -1814,6 +1852,8 @@ subroutine thickness_changes (nx_block, ny_block, & esub(ij) = esub(ij) - dhi*qsub esub(ij) = max(esub(ij), c0) evapn(i,j) = evapn(i,j) + dhi*rhoi + evapn_ice(i,j) = evapn_ice(i,j) + dhi*rhoi + emlt_ocn(ij) = emlt_ocn(ij) - qmlt(ij,k) * dhi !-------------------------------------------------------------- @@ -1975,7 +2015,7 @@ subroutine thickness_changes (nx_block, ny_block, & hin, hsn, & zqin, zqsn, & dzi, dzs, & - dsnow) + dsnow, ice_freeboardn) !---!------------------------------------------------------------------- !---! Repartition the ice and snow into equal-thickness layers, @@ -2121,6 +2161,8 @@ subroutine thickness_changes (nx_block, ny_block, & j = indxj(ij) efinal(ij) = -evapn(i,j)*Lvap evapn(i,j) = evapn(i,j)/dt + evapn_ice(i,j) = evapn_ice(i,j)/dt + evapn_snow(i,j) = evapn_snow(i,j)/dt enddo do k = 1, nslyr @@ -2176,7 +2218,7 @@ subroutine freeboard (nx_block, ny_block, & hin, hsn, & zqin, zqsn, & dzi, dzs, & - dsnow) + dsnow, ice_freeboardn) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -2195,6 +2237,12 @@ subroutine freeboard (nx_block, ny_block, & dsnow , & ! change in snow thickness after snow-ice formation (m) iage ! snow thickness (m) + real (kind=dbl_kind), dimension(nx_block,ny_block), & + intent(inout) :: & + ice_freeboardn ! height of ice surface (i.e. not snow surface) + ! above sea level in m + + real (kind=dbl_kind), dimension (icells), & intent(inout) :: & hin , & ! ice thickness (m) @@ -2303,6 +2351,16 @@ subroutine freeboard (nx_block, ny_block, & endif ! dhin > puny enddo ! ij + ! Calculate diagnostic sea ice freeboard after adjustments (SIMIP) + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) + + ice_freeboardn(i,j) = & + hin(ij) * (1 - rhoi / rhow) - hsn(ij) * (rhos / rhow) + enddo + + end subroutine freeboard !======================================================================= From 995340112181e3eec60bc9128175c8517acf680f Mon Sep 17 00:00:00 2001 From: martindix Date: Wed, 19 Sep 2018 00:30:28 +0000 Subject: [PATCH 12/52] Add iceberg_factor (from Dave) git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_gsi8.1@396 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- drivers/access/cpl_forcing_handler.F90 | 4 +++- drivers/access/cpl_parameters.F90 | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/access/cpl_forcing_handler.F90 b/drivers/access/cpl_forcing_handler.F90 index 8569e583..bc1da8a7 100644 --- a/drivers/access/cpl_forcing_handler.F90 +++ b/drivers/access/cpl_forcing_handler.F90 @@ -520,7 +520,9 @@ subroutine get_lice_discharge_masks_or_iceberg(fname) write(il_out,*) '(get_lice_discharge_masks_or_iceberg) reading in data, month= ',im !call ice_read_nc(ncid_i2o, im, trim(myvar), icebergfw(:,:,im,:), dbug) call ice_read_nc(ncid_i2o, im, trim(myvar), vwork, dbug) - icebergfw(:,:,im,:) = vwork(:,:,:) + icebergfw(:,:,im,:) = vwork(:,:,:) * iceberg_factor + !iceberg_factor is 1.0 as default, but can be bigger/smaller for other runs + !(e.g. in CABLE runs, iceberffw needs to be enhanced for water balance.) enddo !call check_iceberg_reading('chk_iceberg_readin.nc') diff --git a/drivers/access/cpl_parameters.F90 b/drivers/access/cpl_parameters.F90 index 19273d50..0d3c4ef2 100644 --- a/drivers/access/cpl_parameters.F90 +++ b/drivers/access/cpl_parameters.F90 @@ -78,6 +78,8 @@ module cpl_parameters ! !20171227: Adding options for land ice discharge as iceberg melt (0,1,2,3,4) integer(kind=int_kind) :: iceberg = 0 +!20180528: Adding "enhancement" factor for the iceberg waterflux +real(kind=dbl_kind) :: iceberg_factor = 1.0 ! namelist/coupling/ & caltype, & @@ -107,6 +109,7 @@ module cpl_parameters extreme_test, & imsk_evap, & iceberg, & + iceberg_factor, & ocn_ssuv_factor,& iostress_factor,& chk_a2i_fields, & From 8b553d9a5d4b02ded4307651155f14acf00f90f8 Mon Sep 17 00:00:00 2001 From: martindix Date: Wed, 13 Mar 2019 23:58:48 +0000 Subject: [PATCH 13/52] Add extra call to set_calendar to fix naming of history files in leap years. git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_gsi8.1@402 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- source/ice_calendar.F90 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/ice_calendar.F90 b/source/ice_calendar.F90 index dbd7ded9..d3e69885 100755 --- a/source/ice_calendar.F90 +++ b/source/ice_calendar.F90 @@ -288,6 +288,9 @@ subroutine calendar(ttime) idate = (nyr+year_init-1)*10000 + month*100 + mday ! date (yyyymmdd) #ifdef AusCOM + ! Need this extra call to set_calendar to handle history + ! file naming in leap years properly + call set_calendar(nyr+year_init-1) write(il_out,*) '(calendar) runtime0 = ', runtime0 write(il_out,*) '(calendar) nyr, year_init, month, mday = ', nyr, year_init, month, mday write(il_out,*) '(calendar) idate = ', idate From 575c1e7f97a8bc958d71f1f7cadf4f5716fb8e6d Mon Sep 17 00:00:00 2001 From: martindix Date: Thu, 14 Mar 2019 00:01:34 +0000 Subject: [PATCH 14/52] Write messages to stderr and call mpi_abort with correct arguments. git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_gsi8.1@403 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- mpi/ice_exit.F90 | 9 +++++++-- source/ice_fileunits.F90 | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/mpi/ice_exit.F90 b/mpi/ice_exit.F90 index 70f74148..7946e4e3 100644 --- a/mpi/ice_exit.F90 +++ b/mpi/ice_exit.F90 @@ -27,7 +27,8 @@ subroutine abort_ice(error_message) use ice_fileunits, only: nu_diag, flush_fileunit use shr_sys_mod #else - use ice_fileunits, only: nu_diag, ice_stderr, flush_fileunit + use ice_fileunits, only: nu_diag, ice_stderr, ice_stdout, & + flush_fileunit include 'mpif.h' ! MPI Fortran include file #endif @@ -37,6 +38,7 @@ subroutine abort_ice(error_message) #ifndef CCSMCOUPLED integer (int_kind) :: ierr ! MPI error flag + integer (int_kind) :: error_code #endif #if (defined CCSMCOUPLED) @@ -47,10 +49,13 @@ subroutine abort_ice(error_message) #else call flush_fileunit(nu_diag) + write (ice_stdout,*) error_message + call flush_fileunit(ice_stdout) write (ice_stderr,*) error_message call flush_fileunit(ice_stderr) - call MPI_ABORT(MPI_COMM_WORLD, ierr) + error_code = 1 + call MPI_ABORT(MPI_COMM_WORLD, error_code, ierr) stop #endif diff --git a/source/ice_fileunits.F90 b/source/ice_fileunits.F90 index ad0004a7..aa2be15d 100755 --- a/source/ice_fileunits.F90 +++ b/source/ice_fileunits.F90 @@ -79,7 +79,7 @@ module ice_fileunits integer (kind=int_kind), parameter, public :: & ice_stdin = 5, & ! reserved unit for standard input ice_stdout = 6, & ! reserved unit for standard output - ice_stderr = 6 ! reserved unit for standard error + ice_stderr = 0 ! reserved unit for standard error integer (kind=int_kind), parameter :: & ice_IOUnitsMinUnit = NUMIN, & ! do not use unit numbers below From d1c40ef0b64d9e6f8640cd81a169d501d4eda833 Mon Sep 17 00:00:00 2001 From: martindix Date: Tue, 8 Sep 2020 20:59:14 +0000 Subject: [PATCH 15/52] Create netCDF files as netcdf4-classic for better detection of truncation errors. git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_gsi8.1@404 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- io_netcdf/ice_history_write.F90 | 6 ++++-- io_netcdf/ice_restart.F90 | 3 +-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/io_netcdf/ice_history_write.F90 b/io_netcdf/ice_history_write.F90 index 1174afb3..bd6d6d31 100644 --- a/io_netcdf/ice_history_write.F90 +++ b/io_netcdf/ice_history_write.F90 @@ -71,7 +71,7 @@ subroutine ice_write_hist (ns) integer (kind=int_kind) :: i,k,ic,n,nn, & ncid,status,imtid,jmtid,kmtidi,kmtids,kmtidb, cmtid,timid,varid, & - nvertexid,ivertex + nvertexid,ivertex,iflag integer (kind=int_kind), dimension(3) :: dimid integer (kind=int_kind), dimension(4) :: dimidz integer (kind=int_kind), dimension(5) :: dimidcz @@ -127,7 +127,9 @@ subroutine ice_write_hist (ns) endif ! create file - status = nf90_create(ncfile(ns), nf90_clobber, ncid) + iflag = ior(NF90_NETCDF4, NF90_CLASSIC_MODEL); + iflag = ior(iflag, NF90_CLOBBER); + status = nf90_create(ncfile(ns), iflag, ncid) if (status /= nf90_noerr) call abort_ice( & 'ice: Error creating history ncfile '//ncfile(ns)) diff --git a/io_netcdf/ice_restart.F90 b/io_netcdf/ice_restart.F90 index d5de5455..aaa464af 100644 --- a/io_netcdf/ice_restart.F90 +++ b/io_netcdf/ice_restart.F90 @@ -162,8 +162,7 @@ subroutine init_restart_write(filename_spec) write(nu_rst_pointer,'(a)') filename close(nu_rst_pointer) - iflag = 0 - if (lcdf64) iflag = nf90_64bit_offset + iflag = ior(NF90_NETCDF4, NF90_CLASSIC_MODEL); status = nf90_create(trim(filename), iflag, ncid) if (status /= nf90_noerr) call abort_ice( & 'ice: Error creating restart ncfile '//trim(filename)) From d49e6bc43d05b8ab3000e19a919d65ca178b152f Mon Sep 17 00:00:00 2001 From: martindix Date: Fri, 20 Aug 2021 22:06:36 +0000 Subject: [PATCH 16/52] Only write namelist info from master PE git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_gsi8.1@405 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- drivers/access/CICE_InitMod.F90 | 54 ++++++++++++++++++------------- drivers/access/cpl_parameters.F90 | 9 ++++-- 2 files changed, 39 insertions(+), 24 deletions(-) diff --git a/drivers/access/CICE_InitMod.F90 b/drivers/access/CICE_InitMod.F90 index e4169b8b..ea7fc1c3 100644 --- a/drivers/access/CICE_InitMod.F90 +++ b/drivers/access/CICE_InitMod.F90 @@ -21,7 +21,7 @@ module CICE_InitMod use cpl_forcing_handler use cpl_interface !ars599: 27032014: defind my_task - use ice_communicate, only: my_task + use ice_communicate, only: my_task, master_task #endif implicit none @@ -110,13 +110,15 @@ subroutine cice_init MPI_COMM_ICE = il_commlocal ! call init_cpl ! initialize message passing call get_cpl_timecontrol - write(il_out,*)' CICE (cice_init) 1 jobnum = ',jobnum - write(il_out,*)' CICE (cice_init) 1 inidate = ',inidate - write(il_out,*)' CICE (cice_init) 1 init_date = ',init_date - write(il_out,*)' CICE (cice_init) 1 runtime0 = ',runtime0 - write(il_out,*)' CICE (cice_init) 1 runtime = ',runtime - write(il_out,*)' CICE (cice_init) 1 idate = ',my_task, idate - !write(il_out,*)' CICE (cice_init) 1 runtype = ',runtype + if (my_task == master_task) then + write(il_out,*)' CICE (cice_init) 1 jobnum = ',jobnum + write(il_out,*)' CICE (cice_init) 1 inidate = ',inidate + write(il_out,*)' CICE (cice_init) 1 init_date = ',init_date + write(il_out,*)' CICE (cice_init) 1 runtime0 = ',runtime0 + write(il_out,*)' CICE (cice_init) 1 runtime = ',runtime + write(il_out,*)' CICE (cice_init) 1 idate = ',my_task, idate + !write(il_out,*)' CICE (cice_init) 1 runtype = ',runtype + end if #endif call init_fileunits ! unit numbers @@ -177,9 +179,11 @@ subroutine cice_init call init_restart ! initialize restart variables #ifdef AusCOM - write(il_out,*) 'CICE (cice_init) 2 time = ', my_task, time - write(il_out,*) 'CICE (cice_init) 2 runtime0 = ', my_task, runtime0 - !write(il_out,*) 'CICE (cice_init) 2 idate = ', my_task, idate + if (my_task == master_task) then + write(il_out,*) 'CICE (cice_init) 2 time = ', my_task, time + write(il_out,*) 'CICE (cice_init) 2 runtime0 = ', my_task, runtime0 + !write(il_out,*) 'CICE (cice_init) 2 idate = ', my_task, idate + end if if (jobnum == 1 ) then time = 0.0 !NOTE, the first job must be set back to 0 and @@ -208,9 +212,11 @@ subroutine cice_init !ars599: 26032014 original code ! call calendar(time) ! at the end of the first timestep call calendar(time-runtime0) - write(il_out,*) 'CICE (cice_init) 3 time = ', my_task, time - write(il_out,*) 'CICE (cice_init) 3 runtime0 = ', my_task, runtime0 - write(il_out,*) 'CICE (cice_init) 3 idate = ', my_task, idate + if (my_task == master_task) then + write(il_out,*) 'CICE (cice_init) 3 time = ', my_task, time + write(il_out,*) 'CICE (cice_init) 3 runtime0 = ', my_task, runtime0 + write(il_out,*) 'CICE (cice_init) 3 idate = ', my_task, idate + end if #endif !-------------------------------------------------------------------- @@ -270,16 +276,20 @@ subroutine cice_init !for continue runs, mice data MUST be available. call get_restart_mice(trim(restartdir)//'/mice.nc') else - write(6,*)'* WARNING: No initial mice.nc data available here! *' - write(6,*)'* WARNING: ALL mice variables will be set to ZERO! *' - write(6,*)'* WARNING: This is allowed for the init run ONLY ! *' - endif - if (use_core_runoff) then - call get_core_runoff(trim(inputdir)//'/core_runoff_regrid.nc',& - 'runoff',1) + if (my_task == master_task) then + write(6,*)'* WARNING: No initial mice.nc data available here! *' + write(6,*)'* WARNING: ALL mice variables will be set to ZERO! *' + write(6,*)'* WARNING: This is allowed for the init run ONLY ! *' + endif endif +if (use_core_runoff) then + call get_core_runoff(trim(inputdir)//'/core_runoff_regrid.nc',& + 'runoff',1) +endif - write(il_out,*)' calling ave_ocn_fields_4_i2a time_sec = ',0 !time_sec + if (my_task == master_task) then + write(il_out,*)' calling ave_ocn_fields_4_i2a time_sec = ',0 !time_sec + endif call time_average_ocn_fields_4_i2a !accumulate/average ocn fields needed for IA coupling diff --git a/drivers/access/cpl_parameters.F90 b/drivers/access/cpl_parameters.F90 index 0d3c4ef2..76acef68 100644 --- a/drivers/access/cpl_parameters.F90 +++ b/drivers/access/cpl_parameters.F90 @@ -166,6 +166,7 @@ subroutine get_cpl_timecontrol use ice_exit use ice_fileunits +use ice_communicate, only: my_task, master_task implicit none @@ -176,7 +177,9 @@ subroutine get_cpl_timecontrol call get_fileunit(nu_nml) open(unit=nu_nml,file="input_ice.nml",form="formatted",status="old",iostat=nml_error) ! -write(6,*)'CICE: input_ice.nml opened at unit = ', nu_nml +if (my_task == master_task) then + write(6,*)'CICE: input_ice.nml opened at unit = ', nu_nml +endif ! if (nml_error /= 0) then nml_error = -1 @@ -189,7 +192,9 @@ subroutine get_cpl_timecontrol end do if (nml_error == 0) close(nu_nml) -write(6,coupling) +if (my_task == master_task) then + write(6,coupling) +endif call release_fileunit(nu_nml) From 61ab72a21ecb8be2c4a6d529a9b386466649183d Mon Sep 17 00:00:00 2001 From: martindix Date: Wed, 25 Aug 2021 04:41:11 +0000 Subject: [PATCH 17/52] Add coupling timers for ACCESS git-svn-id: file:///g/data/access/access-svn/cice/branches/access/cice_gsi8.1@406 f6bd92a4-46cf-401b-8a38-b7f7993d28bf --- drivers/access/CICE_RunMod.F90 | 19 ++++++++++--------- mpi/ice_timers.F90 | 12 ++++++++++++ 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/drivers/access/CICE_RunMod.F90 b/drivers/access/CICE_RunMod.F90 index 19855ffe..5e725278 100644 --- a/drivers/access/CICE_RunMod.F90 +++ b/drivers/access/CICE_RunMod.F90 @@ -59,7 +59,8 @@ subroutine CICE_Run #ifdef ACCESS use ice_timers, only: ice_timer_start, & - ice_timer_stop, timer_couple, timer_step + ice_timer_stop, timer_couple, timer_step, & + timer_from_atm, timer_into_atm, timer_from_ocn, timer_into_ocn use ice_grid, only: t2ugrid_vector, u2tgrid_vector integer (kind=int_kind) :: time_sec, itap, icpl_ai, tmp_time integer (kind=int_kind) :: rtimestamp_ai, stimestamp_ai @@ -91,12 +92,12 @@ subroutine CICE_Run !receive a2i fields rtimestamp_ai = time_sec - !call ice_timer_start(timer_from_atm) ! atm/ice coupling + call ice_timer_start(timer_from_atm) ! atm/ice coupling write(il_out,*)' calling from_atm at icpl_ai, time_sec = ', icpl_ai, time_sec !=========================== call from_atm(rtimestamp_ai) !=========================== - !call ice_timer_stop(timer_from_atm) ! atm/ice coupling + call ice_timer_stop(timer_from_atm) ! atm/ice coupling !"TTI" approach ice fluxes converted to GBM units call atm_icefluxes_back2GBM @@ -115,12 +116,12 @@ subroutine CICE_Run write(il_out,'(a,3i10)') & ' calling into_ocn at icpl_ai, itap, time_sec = ', icpl_ai, itap, time_sec - !call ice_timer_start(timer_into_ocn) ! atm/ocn coupling + call ice_timer_start(timer_into_ocn) ! atm/ocn coupling !=========================== !call check_iceberg_fields('chk_iceberg_i2o.nc') call into_ocn(stimestamp_io) !=========================== - !call ice_timer_stop(timer_into_ocn) ! atm/ocn coupling + call ice_timer_stop(timer_into_ocn) ! atm/ocn coupling !set boundary condition (forcing) call get_sbc_ice @@ -147,7 +148,7 @@ subroutine CICE_Run tmp_time = time_sec + dt if ( mod(tmp_time, dt_cpl_ai) == 0 ) then !this happens at itap = num_ice_ai - !call ice_timer_start(timer_into_atm) + call ice_timer_start(timer_into_atm) !i2a fields ready to be sent for next IA cpl int in atm. call get_i2a_fields @@ -162,7 +163,7 @@ subroutine CICE_Run !set time averaged ice and ocn variables back to 0 call initialize_mocn_fields_4_i2a call initialize_mice_fields_4_i2a - !call ice_timer_stop(timer_into_atm) ! atm/ocn coupling + call ice_timer_stop(timer_into_atm) ! atm/ocn coupling endif istep = istep + 1 ! update time step counters @@ -182,11 +183,11 @@ subroutine CICE_Run !get o2i fields for next time step ice update write(il_out,'(a,3i10)') & ' calling from_ocn at icpl_ai, itap, time_sec = ',icpl_ai, itap, time_sec - !call ice_timer_start(timer_from_ocn) + call ice_timer_start(timer_from_ocn) !=========================== call from_ocn(rtimestamp_io) !=========================== - !call ice_timer_stop(timer_from_ocn) + call ice_timer_stop(timer_from_ocn) !accumulate/average ocn fields needed for IA coupling call time_average_ocn_fields_4_i2a end if diff --git a/mpi/ice_timers.F90 b/mpi/ice_timers.F90 index 835e7675..eb06b149 100644 --- a/mpi/ice_timers.F90 +++ b/mpi/ice_timers.F90 @@ -56,6 +56,12 @@ module ice_timers timer_rcvsnd, &! time between receive to send timer_cplsend, &! send to coupled timer_sndrcv, &! time between send to receive +#endif +#ifdef ACCESS + timer_from_atm, & + timer_into_atm, & + timer_from_ocn, & + timer_into_ocn, & #endif timer_bound, &! boundary updates timer_bgc ! biogeochemistry @@ -179,6 +185,12 @@ subroutine init_ice_timers call get_ice_timer(timer_cplsend, 'Cpl-Send', nblocks,distrb_info%nprocs) call get_ice_timer(timer_sndrcv, 'Snd->Rcv', nblocks,distrb_info%nprocs) #endif +#ifdef ACCESS + call get_ice_timer(timer_from_atm, 'Cpl_fromA', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_into_atm, 'Cpl_toA', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_from_ocn, 'Cpl_fromO', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_into_ocn, 'Cpl_toO', nblocks,distrb_info%nprocs) +#endif ! call get_ice_timer(timer_tmp, ' ',nblocks,distrb_info%nprocs) !----------------------------------------------------------------------- From 0a84ab156217cc8e782affbe7f13071e59a230d8 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Fri, 29 Nov 2024 08:47:23 +1100 Subject: [PATCH 18/52] Create README.md --- README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000..d60842cf --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +This branch is a record of the version of CICE5 used within CSIRO builds of ACCESS-CM2 (e.g. for CMIP6). It's available here as an archive for reference. From 8be14fec14c89357c40f5dae497f05fd04ab0634 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Wed, 29 Jan 2025 09:03:08 +1100 Subject: [PATCH 19/52] Add build ci (#21) * Add build ci for access-esm1.6 branch --- .github/workflows/model-build-test-ci.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/model-build-test-ci.yml diff --git a/.github/workflows/model-build-test-ci.yml b/.github/workflows/model-build-test-ci.yml new file mode 100644 index 00000000..e894d0ab --- /dev/null +++ b/.github/workflows/model-build-test-ci.yml @@ -0,0 +1,21 @@ + +name: Test model build + +on: + workflow_dispatch: + pull_request: + branches: + - "master" + - "access-esm1.6" + push: + branches: + - "master" + - "access-esm1.6" +jobs: + build: + name: Build ${{ github.repository }} via spack + uses: access-nri/build-ci/.github/workflows/model-1-build.yml@main + with: + rhs-branch: ${{ (github.base_ref == 'access-esm1.6' || github.ref_name == 'access-esm1.6') && 'access-esm1.6' }} + permissions: + packages: read From 28986316a06d4ae86086cda26ec3d381d22f9018 Mon Sep 17 00:00:00 2001 From: Harshula Jayasuriya Date: Mon, 3 Mar 2025 09:30:53 +1100 Subject: [PATCH 20/52] bld/makdep.c: add a return type to main() (#22) * Needed to support oneapi 2025 compiler. --- bld/makdep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bld/makdep.c b/bld/makdep.c index ca9a9e78..f4786770 100755 --- a/bld/makdep.c +++ b/bld/makdep.c @@ -51,7 +51,7 @@ static struct node *suffix_list; /* List of Fortran suffixes to look for */ static void check (char *, struct node *, char *, int); static int already_found (char *, struct node *); -main (int argc, char **argv) +int main (int argc, char **argv) { int lastdot; /* points to the last . in fname */ int c; /* return from getopt */ From ec1e2cb531bb73c70eb73c3d5968b1de5f27abe2 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Thu, 17 Apr 2025 13:51:40 +1000 Subject: [PATCH 21/52] Set netcdf output calendar attributes correctly (#26) CICE actually uses a proleptic gregorian calendar, see https://cfconventions.org/Data/cf-conventions/cf-conventions-1.12/cf-conventions.html#calendar --- io_netcdf/ice_history_write.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_netcdf/ice_history_write.F90 b/io_netcdf/ice_history_write.F90 index bd6d6d31..1fdb5f0a 100644 --- a/io_netcdf/ice_history_write.F90 +++ b/io_netcdf/ice_history_write.F90 @@ -203,7 +203,7 @@ subroutine ice_write_hist (ns) if (status /= nf90_noerr) call abort_ice( & 'ice Error: time calendar') elseif (use_leap_years) then - status = nf90_put_att(ncid,varid,'calendar','Gregorian') + status = nf90_put_att(ncid,varid,'calendar','proleptic_gregorian') if (status /= nf90_noerr) call abort_ice( & 'ice Error: time calendar') else From 77baf8a5b402fe3d77f8751f71cb1f98e54e8708 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Thu, 22 May 2025 09:30:04 +1000 Subject: [PATCH 22/52] Add license file (#29) Add license file from CICE-consortium --- LICENSE.pdf | Bin 0 -> 46491 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 LICENSE.pdf diff --git a/LICENSE.pdf b/LICENSE.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0fef8f73055bac72a2e27ee805728e92870523e7 GIT binary patch literal 46491 zcmce-bChIHx2{{&W!uWKx@_CFZQFKrnO(MR+w5|6*|x1yzrDY`?|1G#W8C}K$y_5N zX5`FRF`xP7h?Nm5N#%t_X&7i(ph30|=wr0-e02X!tov4Mivx(z(X>H(aB5Y!0XZ-E;pQXHk zqm2{b9|%Hr)-E=-P5@>AoxG!ov4xSdog;vO@jEsEo!U1E2EaFoe>h0k8k@L(H!uPi z>HpILnk2&(BfubWnyG!p*LY> zVr4XC=3rrEHezNnF)(CdVKrhhVq@U_|Mt-NCkal@jwS{+(C(Q=hQ`}w`iA=KP$a3c z{_3Lqfpu8I>pzh5BmINFDC5qp0XxV)4F7z)z`DaKPWFm}0s~>%LGt~R#(#D4e>tJh z|3LdMLCPDLnK%I${>AY>vnpiZY+!9?_HSBa`L|E}A5*LZVER_-e+hy4eEKHWxhpZAZYiGROr8-tlwH<<6!(pG2f#5cKq-76iuA$TpW$QBmPH+ zBJR#&O3vRx{SQlw9>DNVoQnUUL(%V3{719sck@5z{#KkQ1Au|!zdhs~?TnO6z6YW8 zeHvlW@2{wdyYshZZN7;L{;L%FS1Iv5mVea!pM3ve{%>&q3F5!f@qdK--&k2$|2MD~ zI%~EhBk?{*YS$hCHb);nFu~cAM5kH7uOZxRutJ;so4ydNSvw-|#>(e?h$`A8B=;Cd zA~=#$Mq9;#@>=2Nz`s-T*ko(zM!tCT_;B6=9g=4w^wZlQt-;ay zE$9I=;LQF2!){B2%WfjfejG8x0IAFv1q6-}<3O%~$BI@5f~lUABiA2Lf1r3a`qB~_ zG`1?skk?ZGsJ^{=*^z<`diuyAFRq^ao#hB=gjeTC1T%}1dIHhwL*6*qFZLExQ<%X9 z@~`t(lHaGH#rc8A5C=bU0CBy57Zcglzm+rfzQ^`EB-DHj*W#C6I^}NPO2Q86V?XLA z4KW+jn1)>eMlj<>1K)zSrTxQeZ)3Ep9j(~WkTPF4Y+;baIp;nTTnNRb0At{G364u& zo4hLhdQwvKATX1RiqV6F;rx zmK+qbSd5QC_!pgsz6PXG7+%f?;r?p8uVHh}618a22=r~ZMz=aym0Yl*mJJ=8b)3c# zN@o6sBnJcryp=B=q>~aEhRaK9>4s$uwOO5_7+N4NeZcm|rMn|X9joXtG~JN5QAz}i zg}C^W2+JV{hmT5`%6AC~{W>jUHw3z&ikaffJ+dfQcDzY=0KkI?6p{1)90Dx1THI_a zMU$aCIcu)31|c*l$P&q882%a*wyPnqH+JAbEy6`EC*lPUrcF4H%O!#0)Y?#y?<1v$ zC1fDZ23_*c5RB_LRwC2IwTv*7ivvP(QaY9<&mv`et6{DHIh9T+E|qFB12>GWc9?CA zAyK@TJ&TmTh{rhY>eZ%@*}*oa>f&9iPc5Zq8! zBK8Y_Zdk$XbWo(EtAQ^~+JDLY1U;C)qc-VQp^Qh4_!~NY(f|e`?-MxUpV2?^jO2sV zxEAsRAp$TxvwQ?+iH$xzC!g=;it!W8F|Kr5avQSPf~rfk?QoOt*~v00W;W_@u&FP) z(+cO#x`>=D(qfm}eq~L}PpRdIdV(%z; zRdjNnEJsalm|P6(cAyxtc34yw0yJuq$?U7+Z~;5GRTT@qcH<_0Udx`0tCl{zt7}sZ z8+vYyZDk9U#yWEiQH|1N1qt%I-sB)Vt{Nhx7qiHGf2r^C_=2G*6IvDk+^ z5j*iue0)h>T>Kr3jCrqHL@;~OA|4CY%~14cS!CZkxQ~z^)u|47xP-0qp{ta*zdQA= zt~k~*I?=g$vgXRHmRV*7o4TPIJ#8VzRp!XnPbG8J1V!HbB^?B$s5#3Tu29bV`3fOv zOr*tClIEBhm+hg&8=@x9M~fhbo79^kxM3_Wof4ez0HQ=VDxMmw-yEyEA;-tcfn@O0 zdYS&_e{OJ#M!hNO!v6^7%1ESj+9p_{boL<}ShS5+uzhF{%^|DZdZKcMy37|1J_aP} zm(6344zg-=bw7iSde#mvh6bHs&U$mRScZzXQLIWiQBdX8?88<-t>Jw*7z=@1t{af%ZXNxX41gfKc`<~q7p zh-IL$A@IP6D^JU9(>Q0zf#)a6wB2b(ALOczKH6c}ivzZ~rI)puyUr;+#Rgtxxaj>4 zgdcbW?}I&6e1pZ)XjrnL6+*7Cbz*p`Co@!}_*L!-E2z~2MV+ToOqGQ>f^Cct?HQ`d z1;KycnFqMk5@x#TF?XL~D6J*WYG1)$0$nEWxoCQQMQhsowhl`j$)MpO?ppV|ss zTmc{=7+^oCMDx2eCReyh%{r=GA70OFp+WvuQTQ$jK;L?YL(Bh|jxA14PAtj`FHxj} zSBVNpR1@680j$CowWPt~y_SN&Ci%|KGR9@Z(Q_eU?^Ny2SxI;y?gswO2E-Gi5fnh8 zmSjD<2)&NzW4;4OFQc-#xS@}~fbCedwOSYkrrn%*_F z3T!ymJL3o{@|{082v*Lg57Q757o2F- z!|=5P1d7MvDP7!Im^ewnp=3&qNV?yWo`wtjk^#Go{O&$)Jbg1I@}(;Bomwu?%)hlX zEtuj)%H^U39Q6STZfoF;sE*7 zZ?Y03*l88}1N|`{%%yy)$TuGxN2Nk(n`l$&QN}w!`qaDc<7T?ON6)c2U50*fetviq z3l`Wv8Kaffa}9>5Vhv@5F{fL4I~AYt{3!Pfv3Y0h^V z4~7N~Zq>wT8<L9rKB`WZsL`kT3@EO^3=`m_VhMPuOE zR-`KjZKwiv1vk$v?`XD&@>cL{rUz$Wc68cGL;*_JYZ&iw;%B=L=Y_xMwSk}oa&r>c z5D&SK6cf1eRpzECe;J58IM`_!Y6@QRZU^Sq^(SI8oH9;Vkb*D;eqO`;7|x2g ziS#)$G28OTXpegv0X1Y;GDI4e>GMec)$Q z-Q+i&*x3~Lmi{u+KIH2)UCG?)&d2!wINh9*4L!CM6O!6oNU1pv8t#JDjk;E^1ktI@ zPW`fSFDjO|@&O)c_l24TaAI#X^fr|JwL=vY!5W^Pb*3X_rAtIqw2F|_1ZM&92Q+K) z(N%g8;=xOYFRYG-@5$kS4pIB>Jel0I#gttY3yfsqB z$Vns}G~Ft)WQ3-&!VekENpjhYA`x2Ua*Da3V@QHxZDYM=2jf~%03W+%*;M}i^n_h) zP#!A{rZh@rW|P@?AmD0orc* zYz&ez*K@M&o&&f1GBk{XaqQBq!(QN>>c)-1hho@y_2wt`*^Q#Pck_09XroSNfwt6h$xj(=O13|MlA;e!w+oo1dS)1J9D6ALZ z9VN?3xNC^935#UJVGKs5x|S^jI{yODY&)qbf(_V#ee)ihPd>V{GVKQ2V&E2k4CJ9S zeb1|C{>N_{D99yDp>}9~wT{LUwrwM@@+xy)fZEcK;%79#ZNkIy>ul8O*aZ5D=Vmo} zEY2QQ7LVi*Q>iVYy7>SwRrV~D%`ltb?^feZa4e_Jj10%n&L&>Y`7 znzm4n&am=ftUGT4ME1|XX| z8Cwila;i=~Z`&JdF&wFbQyd)M7g&fR4#tV#!0^(CD>%cHPn*`Tt z*7NohVJARtCHJx6ur)EojVDa3nJLFAAgBUsvzY6l^GQtDcdkCz7@O#x$nAgl=p6P> z8Y|qxS=#!|UuH&fA3NT81IwuEn5bZCGN6$wJ>k00*NJVLqwL@#^h-x(5?H4%kg$vK zgo=psUDJ?VEA*+3;8aVFFD<6eM5sn8tiV^6aB0pl*@Z1pclqMxW`(bvU)SZvA&(fL zOC>=PC=EA2bioZ5-@qasJY!e6LQmv!pb(03RgF{HPNfI#5MlT1R{vB~KCljeaT>B5 zXQ3zs1hp;5<{#gU+*4(7_SS5oc5`y_a!Yh1-PT&PCzLf6{1lRN1?T>gVJ>k2dKlOX z;;{j_M9P2OzJ+PVDzA~kMP_k!!OG(o3L+ns-+08uZRWuV_tS<3QY$qo7-NLeoNL&O z5<6FrFtx=t4&=EAE5)&}#t}>eqTn)De;-G2)2`!Hn(pU`JuY5*=)D5*hwm(Q=p}dN zQOm)fPl+H$C(Z{Sid-(ELEbdTtv5<^? zY!H0=s$?ES5H*55pMr5zfwM#-Hxl#vm`I~*&o(B2a4sZBvDy)Ktn5)JzK8ieZ?}VP z49l9ubDf;7gkfMj=<#eq4E4&UC4YH;*@6Q_*TfONsu^3d2{MSNGKaWzmUL{$wuxm8 zo{6l@q>-wf>NK`~l-yUdO4g@W?0ZhUm_o+=N4EX_r*o6~fSmHBfvmvvj&96?0rA-| zP@7;zr@Ukz!L7c_ul&)~7h{&l$H}9WM^J!`qp;($c%8_zqzD1U>fuN}6`6wqR=Lq_ z6()MMKG+t!O@v-qV*b|R{hZo(tB`%B7sr}Ge^o;1z831cwHSpuLxFveuza`X*5HRx z7ZE>g(iwHhapn_@JHuIycT)1`*Nj?YsI~8iZ@V_Ih9#W#!rV0bU9%&Z8h>X>Mzu9PEjx5wXsX=GlcK&Q)~z}(=q0UOt7@=}T?;RAX{)`s%@_GuXkVTFDs4yjj_ea4dMXS@#_{lo`BgghbZSzQ=F=I)^tZ%xM&LB5wgJvs@6_ z84U$^ko58VAtp?JtJWHGD)-w)_yPIGj_B32V)0);f_aJdPtcK&N&h~`$QTS3#hv~A z4Wb(TP~DzlVxYM&#IYXeXo}!FRc9l89EN6qi_5PTSPN{(HAoHW9tDS3!qn+3u3+C` z?(!CdjA=C9py;gV#X$Q6Q zev`A7^9AAE{73hHtjx3j|68GD05EYdvD5$eIyfUeJ^Meq8voB`!FiX5x6B7aJVs#O8rlSAkJ!L}jpfcDfg;dVra+Kmeg)u{j*& zD|=Qs5XWB#QCu9R`!h&ljy2W2*a$?Lv3-ztp!2NzH2}q7=~T|W(cyYTAPW$E4L;x9 z>pXjBroVWnp*w$2LZ?y-Klz{7i;ZyIFY!1H_N|Pa@q04|fS_I_ltg7;9Y*$RfYOcH zi_zV;PQt$2^>eMLyxKvb^{cHOrv(jdIRVpw9>uTra#@nf%mVu)lG{GaVqjP8%<5(=^TEBs2|~lhb)WQ z73v^vFv#J~#e|FO+o(%###j$J7hWSa_k@oSbB>s^Rhj`89~rfw!tw=oqP*qtTMD}) zS=@t-7NN6;H*&;$RUfw^*$rUw9)>a)d>Q$?stLa>cQMrdQA6ZS$7%Cd-YB(W+sn(a z?}!6C5Fa5#M)WyWNbS7@4ZPK)(FF%$B;C}2z0!IbeD>`G8hqkO$a%p{{Qk*^h$`fuc%}?8)HLE7T-}Lqi!HoSUgEuBkn`fHE&wC%r^aUh+_Ho@=Df^j9MvGnCbY z$Jt0JFXNg5)+i;fVFon)7Y#l9NRY?Cz1VR@dFao~30+KG9~7OTJZ^#Eh(-3P3yNu! zYvyrhMlONG6)2*JHR7#E{!O%&c@D-ex_i^OQ*-PR-01nwlAAA_nZ#Y!#2q58sApmesM<=l2st<7{|{f=~4ON^Z_oe}j0+-?JkbPtHrS zkA;oO6gDLfJApUd9@#nqsne0^3*r`sbx}vIkqc>`&kT!#TQp~!Q-+oSF#~&R9r6)- zh{v0X7ulKf+24_yVU6{1vPE!4EpFFZog>=B1)^JOInkyFDri^SJ1W^9Dj zS0ClcKqv;UW{qaHp5Co}ALrb4z9vKRNi-|_%~|mA4;=!nAPTwjCwA39Mm>-Tdt7(} zq-wD!F;s`NQ72&iqqiShC?VwXQ{Hr%@CZ;PGkV)54ZS2+7%%o1j~_Z5@K4IJ9+~bK z*2?dYY1TlitCpC3M-j0>Phrztme|KCH|FHB{8G(mcY{ITcU37Sh=E5YUE3xjd3wS1 z^6(I(r`B?l17~Z16sQ0Wve-ZMKlg8sBkezS>w7uTZf4XT$$f)ec7a-9mS(_BIIQJi zds#3Lv}$9_>ehiaF@7E;J)Mzn+iL4Y&a%>oW`NXxBE&2}t~qcs%~TTyJ0{J#)kKZ%*24YJ7DJWG#XI~QM3L_cl+VS4m|(DN3wK3c(}{4nbNFcPZR!A> z=rD4{sl6b@oSryz(hGfq^zNb%LM)+3`2dQ0>-|8r)_cZhg>>sRT-bvzCzpqZ$)&u+ z0Ck~*nq`FM;3f|>Q5bblOhT>!^jUN8!tmy=fzq7PYzaLzc*u`~3l`5H<4%Igtc10y ze)*Sx62lsZZBwd(O}M8H38W(q->Bu=jqSw>*nRIJ_Fq$BPTtnO8@8sv1uz<5oNkim z8Wh-9*JVD_pGrXg{sKFT4)BHx3JGX!Mko3b+Xu^*vn^nB#DF*hmpO2Z20S(4vpuA_ zinVP7I7NIMyu*DAb+g_#bjSM5@_A%->i!(Lt1SVVhK-+x^h^5tkT+q#Wyte^%L&D! zPqq>WZc5`!h=FKjv<$pq+zvAf@${@}jGm8W1x81BNSG6FuQE7gj_K2)Y7bopbmj|p zg6~Vaor{;pQiAyeYlybn_1ZYRZS$exZ4?e=Z{$vwxx zx(f|Iq>Wjb8FQBsaU-FABx7Jro_G;b_f^LC1+|aa7-xbhQhI}Vb_7!P&=W8pT^csO zym{@n*z*Nv?|VcPVe;|`dc%On7?&6q2%iYEz26BQT!Jc1I`OwTz9B5+Tj=Q#2>L&Pv$OH$j}mEc*AEg*jO=G zL0_;r_d=|2_F$}un*Ht1(Vk49FTN5#pU>wj#e0oVBfgFIG>kb7wj9FGwMJjwFOaU6 zN(H{JuV1X5ruHb&dU;zIw%(5~HB;;ls7DRe49ouFG_OgVmtszOzu%_ozc34K zUt}JQ>U4mlcn-Rmp7h$TCO^~)apGuI!yWy=ubp_WFh;c^U{g8){2LfTqZZ)6N-c-e z`Y%TE<{(a^+P9D%QPuzh$8=$HLZ6IosUE~|0HkFf`6Lomut`7o4oI<4mpT$oKfak< zeaQ-0b6D3TuhCZI+Ys9z!5}qs7LmY?YNN`%N|chcytL}1!cXN&MQc@Sm2>6J8uq+r zVhe#e0{jB<&BT3)M^NrR+%()8+0EJ9a$a&~)56o*)27pR*%+PqY*SVbWL_YfrkkIe zUY&C8EhO{CBJ~U_nYcOU(yrue67XbhQuyiIb7DRT(`OKT4`=TQTe~f9I-jV1p?qsO zM+8@NPt#9yk92o@ms{I@eLZV_y?!%(%U{`Fsb5TAYyqA86f+F+$e8$;XqZn~&siTG z%_%1bzgZfGmFC;YYKnbi!qTT$x-6lsOqYIEdE}Eh+Nj(3UeLKL5iJoReFkIyok-A< zb4@7NmrGjLqG;=ZnE|xDP_%`G$VAbDoDEzWP%le1huRp^)l)sXJ!WB|>!HU+rWhsM zQ3*yQ^dzkx6y6p-;AfFf8Qrvw2?#?Mn^_{jB(Q( zZFoYZ+NS=3oz0!4FBB1S!CZ!&(BlvHX3c!AVl2t(ars_!SshjTIA35+d)PcQsVwQX zHUrm;)comU{aOuLlB_WS_SS*DUOYKg`f!3ll=Jp@YI`~Qe0H2@d!xTc{&8{I7U9&; zlj-VuzLItX@15mvxznIERn6USJ2}iG1w6$)GLhYsmF5jjbC-s0CTLbaRhW6{*uL$& zd9prVC1;jp>(D-SvOf=1LQ^tUmLvP&e%hggA?q_FmF0Q1kf&6W-LER!<#D>A#3hH8 zdFoz1P?n@LR?=CLkhwU;m!&6JlV9>we#>z%Ki)K6l4O-skzA2`OuZ$ z6<(rbz^>O(_z->Piv zl;U$E?BP(~$0#rRONGyt3QOMt&U@T1P3g`~>9UiMqL(Zc0Vdc>pVS)!X2(DsT5dpf zIf_>BdvS}<9hY`sx)wJ(c<7olGgRst);dUU2cz`|&JLJ6#AYBKJ#6RzG$yc^K1nCs zRX@ZvekX$1jus{uK|cdKJYB!}GZcP^+&1wurptrZGt%!~b&3!Ndf=ufU3CN$14ijU zN_p^keX7%YpKH8H{)=oVgkI}~yA*wp$q){C_`7Y0XNYrh>aoONIcu1o-$F=ZKv*BI|Dav71QA>eHc*pLL70f{Lyh)x2xE-qzE09mfm z7|SE1QxZPj@)p}8YdIXxRWOx3Jh4*%J`r@6=oiVaPKnJxzHPZ@=x#FGT|(Ui=}oe$ zIJYsRTYL`!n?r~}F~#s4&`}1*OmA^sB7#J@86j~}i$W-gusi#JO#eiCbl#xp9n{PK zxg8kCU)p4Rd%6!WFUi`3YGZt3SNjOJ!gs&ko*xvZ8Y@TY}P3nC~M$Sz6S<{I)L+D&$}qVg#B$n_TVF=nZcabAo@g7etV>kc9(=9P;KKM-_{{ZmkDLF$HN%9pJ(bfB9?|OFquILoedRvk)_K6S zd5ibm{Kd1yu#hK2(eq*Baj|;XJZ6!cmAq0fa%U>T6nG*trwH=752hRUMo7~m zyz|`e(*3zjSsQzD4wZ;3C(t@^&wvI-npM}XqbWnKR}!CIj8!XDAfg(ny?mI_7`eQP zl9tyT*xW%;dcRy&JHONhIKOr|j082Tn1dd5y!ms?np+(lpG^h!~27A!+IRPcOrI;d(E zEF`Gk7?|~Bhj^zTBF3(;#(yL!>LGZsJEoff%}C1LG84_Ka3V-fkXE9i^#*6xyy@B?};}E!ULL0(y;zw^D9)Ixl2Y! zGYV$J1)GFq22%_LlmjifzLby&0d_7Vjld7JUNN>*v0R?|0CV>Q`L|@d$W;h-%~Ov? zDV1YdO2XKKVV!O08n_*7O9!F1(33OT57$&M^QX@sYAR;7!n|@3oK7B5`Mta^Kuw7f zCiwRF{hpjkVLP0dfgpP@(b*tV?x#qBVIK}7+2FI_yZwcA`Yafnj)(|GPKlI=#L!H+ z_zQiG*saQWNzMEcH5ppc0=2Ble(zkceTqp4MFm5l%{)9WJv3`;a5wYlv2%eC3f|9VW`K_)}9xb7WafQj%yBHT28x9?9qL0T7P1GWmS7G(# z(Y(2N)|#fFVD=BEsD-6yQu;^QL8j$>+9F=%aXW#Sd}^?pAVN?)qWyp;A$~t36@?)K zVP7XXXt5%FI_c)}P$T0+!&TwSvpPN8Z2YPwtwA_4}`I(P?22dKP4 zy=|TRk{8c}tITOJc~9&sWS)=#&JZ`^58SrtlcVLPShu5n)60}J{X5LP#}P~|!6*hT zrix>+jxY1w>U_Pc8_#G6xyO5U+@31!06CXL0iKN8F5;aE{m6<%Ycw@U2B?s;B| zrME=VgYO}ItlScCIL#UGYNLXc1y^zBMP664Q5(4C`h*ek+}GXhVn)U81z`oa1LbFg zm_42#Td=4-uwqJqoGwGJOkQ!Vl?BWzxloUTIS+SQ_}A9r2G%&I+kxP~RI7&pJz)60 zupGWHlX{`JJz$qPVc)OAa$+%CKZu8kM(hImgyQyE_-j5#cd8i0yYEUHw7& zr?o9y3NmCpO9~Pi#YktJ(yh{?a=yzwygNXRQCIgAj#QGClEMWA-vP-8`6L-tBA-M5 zpv1dD!lZwGwcGF1@2doV0dL{n;1!Z&SGY2KR?PJ){uD-GZBBPC+q+J5#zoXM>N+>l}Kl0Wnfh~ZlCZQZYRs-wqEMZvhosq6f6L1}2z z_6aI#rF;70m({PgjYAcxC=Se0B5`>4{mM5Tn{}?3QH>*m{`8GYj?8nXX;&AQSf?>4 z;sU6rBm)X#=O;K=SGR#z&W_MFArti@UbpTud^cTYHELbcoDMKoLQIqSx&|3-$AWH> zwtip^6=P`%p*BSh@fE2R&9t&fdCz29lYB-23xzb5gz?N&KiD7hPNHw z(RC0^Pp80eUX2tmC-e)vg*@)31a@=lns2q$)_S2~Q5=CFykOns4Zr_9QX&lqI|7NN zqD;4wVa_3#FzQB?rO(lX^X16VtNMM%(0_M9B}IAMNlhy9{o9y7Ewg0u&?56FoZ#;H z!x7(#12_H-x9;~H_s*judJ0SfN!$L zroz0c^G!bm4Le;8R=WzTDLs;->;3A^D%?#~_;0(kVjX>ybJJ^v7R~vmaEns0SE*{y zt7gJB&AE9pSyM`KOV$!E`3}&Y-uZk!M)d$n7Jgr1x~utW0QpY(t^~O6awO@>QR5vM zE!rkjd9&kOi2#?U^GSzHI40{*l68H0;vROFa}Wt6Ct zL43WZ1G9H6Y&z8q5IdsmjfZ_L&@wR57E$+JV>R_31mW8! zDbEA`@*;PWx-v=~2RB(q1QUlbq`*WNz*Yu;6r!C-b{Q%ZitSUV*F|HgpII)$y>g0t&S8I(v*)HMd!2$Ij41ykns zqigN5{E$@Gtf+ltDrI>qjloA@t-GicjaPT0HSEQL>Q%aC?i9UeS45yBMo389w_5!L zpC>n46Wmi*bxwRBkNdzw5Bz(;^_iR@k+l$^EhCDsVU*~WLPI7dVQ-&nHQ=#i@0ipe zieAG##AUF8fZ7P*7tBym#MLSIB3|`d?PX5<4!641Y zhgF8u~BNxY;fwbP!b32*CB&G~p1be)%Z#3#o0UHApb_aq-m z^3a-n6oW7)f@*o{n3U3JrqI+Owb$L(A=k1=3@F} zQ+-kVjQTos!}kKAtq)>vMyNf+g2qb|qN8-K%^2-rli5Votj)c~_omGxm23uUV0TN)LacnnSzU*U=oOGRG|g9dEwfG9#lDMx>$7 zL4~nGO*uBBhl^o=%S6)c-6SZWgXUQ_siVURqSj~q89Wy6L9c{6$V*fp008Nmp2~B{ zAT?&Bn`<*yiI^$A7+CZHy5NV%gBV^`aJm%k!t_Tsgww5bARn*>d%R_quqC)$y zK#M|$1FeDw@~Ww(gN@;as30!b*&H>RLA~h9AN^VB0Hf_MrVpbb{{$r6pSWk%+I)rD zF=J$yBCez`V3rykq6U=p%)7MNO>RhK3YEtwQ$MYSX*I$`C4-1 zki=&Ivk`1LT7D+sCejX>Xy0j;C%69+o?)&pZOnZi7<65!7@Rk*Um0-o1@aE^#?*|| z9T7+-Lf9zc9Rtn?=3+4%Enmzm_|g-0s*QTexsYx*BbMq+>|8C{%ZoJ!6m?-2d{2&_ z{QKid{{%~fyB+y24N$Wl&^f!U84|ys-C**Webo=~K_)-sn3d?4&M}5uX~j#>>pgtj z-kajfju^7(^?sV~o(qI_5L+_^(0&JYih+5b7#=p2q+MXz1=#*L45h0HfWioPIO+S?2emI zLFMSd_F%3Toe4tSA4M-Z6%xuYMAWX{vHV;upiJH5s)^`k_Kp`L3-z%Vu^Wl&gECAx zVi7hH9ZPUTA(1L^r6j$yX{TMNUZhRgW%ZSrt(>`#&B&Q0A>BxF^3craGI};)n@5)W zdqKIY--arvT-;^UzYZVLU`dsvt4sOP74fLp-7(HB+bsuGF4WL`fDaUjJWb2YM6=2? zlPrXeg9VXXqk&N?qtKsDqYRV&a_?c@P40sIaStwM(%bp#uSUZ+5vH>k!&e4_{br2N z^Rh^9T{X3*;TRRV_>!~iPUo~JLL)yaozbS3pU|b?CItcYabl1sGAnz_+OLp0Rfcr* z`QRbqU`^uANw~A4K5TbLz&q*33%>}v7_=%06k+1s2jj#=!XAOfh+*tQ9+IZ^N&|0~ z^DBOC{g#q$^A48By?>R44yi|*@<6XDJHK}LBJZ)}P3D>L{rxsnEf#wHc>qjL`bY=W z4cBB#-mKL56JN2j(p&4HTFRrNT7GGZUY4!jzUSq$mGm&^5M~MnDkZC3XfM^Nn(bil zyFzLVlYs=ky5{H3vB{B`7Lia>fdT6id*!=4-`{Ke43ycsu}|QR@y zJhbr4DA1%zx)XVsJK6-c4(FHFiHGSL+IdHm^@P!sw!~pPpAE zT8(l(YK>-^gEBA>Yx(FPp;o2S$w}H9?YwMF^9P9H_rU#EBG7TIin@jSQ9SyIqq)fUCCW zV6wR#4X|ON zK>aunJSMG(;Lj3ONqZGL772|bR2~&$jGr7TI;9^Rj@44U+3&3W%4?v$J!J$};JP7F9*yF&8rxk+)SBSy54$vGQ3- zdP`FTF*Uk5v0Gvb`-5SF;h2$1QLKx6)z_ z$4U7A(GGcr3RVhJ&s8Iy7cjQ2^0(}>K0ID9jMviL_vh=4{p*}@Wzy>S?LC%p5*HWi zMK3$d%ZE|}B<;J}lFX=-`u&j&s)#&h5^w^qMs9wh37dW3Qz*W!*OBHi`g+{X-soWZF#QZ0@ej zu&>U?POrfNVg&6Z#Z62cSy2*jj{xNXMJ(S&zo&$pN^0eyq-x^r_;>g-^~{TX{E`)m z?nDPWQ`i>fL`EJ^(>P77l{r-kcXupxNN?V&sdaqWw%p^iZ@gWma7~u0FCfRVfj{B( zeP3gMDfec+Us^|H=UH*W?tCkOIToOIx?|J%(-8YNoxr!_<})g|e0{dy1z!A`wb17z z9E5=Lyx5tX0$`j`Oh#H>Uu0eA)`?0U-|c`-*+QEX3-|g~i!n;qeD(e}3)`BWj7kkA zwcMDd)Nw7>t#Z!V6{kmBaY%=Lv`CSx~1Q+3UQ84#ty*v(GI4-`i-& z1JqkQb@!{g$G?Ad!p(0iw;T69PkY}jyij&wISX6%cjp!cwgDcY8O6%fsYFctim}{M zur%cPtm&pR>Mxi#cb2?fl};C#+#J?=U0p?}!<@#Rm9;sD95MKIHhDv00lD$7BlU?T zy58bY`aHx_n21;9ll}ntzTnp*q@5EZ1+ceGePJQ74#fJhX}dFm~sg=Pw+ zi$@77W95{MT%}z=hyKJ%1np2RNv(_Sb9})l7Q_l3XXwsZfiA@S)xR{j_+Ghnq*s^HDcamGY9ZALaf%dVkp&NqlY;q( z*eR+JkrMNg)VdoPNibPo&j1tN0mC=m7%}&r+t^mJEtT6T+yfww?BX8~ogY zmF?QDZ7Mb+Ki@E;1V_&7+I%C;VhyW`@`>d(j;F%y(3y#Z%JAMIp{A%>40Rff%2udZ zT6A-3HZ1LkC6XxBlEq7%2pUth&fbw;vJeXS;opxC^*T;0GQ|q3$c`=9X~;8-tq`3X z3_gz=OYDWSB^(7wP7Rr6$TPv_VQpD}nkCk?ZJyBe2>)gxA9LgpG0>Z2PBbqU%n$>H=%h?{C$GJ^8=<9Yj@e3aVn_0S%T z#i-n>d&i#PSVh;vt&06{J#+fi9(AYtb(-EjDz)~-{|Y`NRcrAI?F!={2>?nvzjDhq-pNRqbpgX=G_Gp-hJH}x3N?|?c z`MS!TRdNmViI~RBjtNrj1H8saB@X0ukC~rd zT7C*z`>3Pan28_}bc*s}s)`l|*r)!@)eA-5$2DCvIZrvRjahiARD3FQ9lh{3-_N`uK{otH8Rm&a5wo#3 zc=z_3yQf8r4vO~DatkbT9860L-25d6c6IDNRv()bIPTn0)8-6W@noek4obzk#Y&|N zxg>qcjb-bq>2t}(_5SQBc!ITS@gkvAG^T1bG;d{!l9Vzi>hYH~?U$BK=j|n_VGaD# z>#Yfw>DYDdn3+jC2A(4&f88Oy+6|!S zRQW7yBd8AN5>wcLSj&u1;ahBTt|ze ziA&I%o~nwr&vetlpj5v6E!9VMR(Q6u@S;S{kmo#%9Qgrpj}&yKs5%mO z1<8IwpOtcw8d7prB`U}a&l0euaMmBz9$;4JjL*kNmu5VI%_6TH3C=4B{~(^92->at zQLaM-ti)L$MPBUqNhdBTHd@x!$hH?M*q#HQQ`Xlr_q(Wuq}?GCZXe3#%j-x>rMlWWQ7v%^xfr#YL|bO z`F55}7K>w%b+J^w{&_~{gO4{isj2J?HQPJqi*c!P-vk+|7{gQtd%g{LG8^p8-v70rBS`{hxA)3vnL*?Z>=qtS-u z_=7%4mA|bhZPRv$=EWALjaxCBT5U0bl>>WJYF3V&_K7!o+k_tiL8Upiphn7%DYiPL ztHy*2rpBlnL(5wU4jx1GGf2!4mvB7r>2N{+_E2oqTy@5*{Xr0WrKu#BO*_Htk^u{f zcit+MXWPWo>_hQZd4kdXGXYqZnUnCjJkYZa9m^fAz6|3n@7LqgDhSkZ)a5#fbp=sr zc5lqF`73wzVKYmq-5ao&Zy~vL0*yu&ro|5-nZ~T*?aZ%_nF*)V6201HW#aL+*A=`@ zD{mgr%O|jV#G?UmB(JE<$qAgsziYXWU1@)9*l!-^$*K(Rw=yG*K6^&f>2_9hl>ZyD z@3b96P@tR0QRFr^A-V?u>$NhTan+R#7r2+cvA=Tk)HpW!J}le-0PbwrP95g|?Ci!c ztr)0lBp&U_-CsMe`IiQE5^qDI{{+kb!MUhhL2j#3(WTVsYxiqSYDd{rd7?Io^=oQg zuTVM1&uVB^#iEs8lv_x@%wN1#yIs6mKCeE5_mKk^c|bZ8J=85SB-6cXYfIqELUG;H zD2R4J;KOCdiq&>ioMNb1xG~mFPsCDz8;4$tMzfofgX;Wq>!A$N%`*`2Iz(V|4 z-@Pq+a(YPJV6eK481n0PHB7C+AfKoKJF%UzsXIWf;?wIFSpXXc0;$sa#Z+OW=hy+` z*@eQ`^X9oNQ0CKTI%@SQKf3iVy3H<6;Kiu1|7C$WNs^5biQC~cEX3(>_wDWI60tDP zU5Gk&SZ^>y|8;!M4c^SFk6&k3i?Ow_*?C*+)8<3_qeFZV`9Sj7Q6{o#<2%xWi50rj-yM6J@r8)V^vH-}OZ6F=snBd5d;y!kkTPcWNW8b-}$1{DUEFQ_tNB;zHt* zk*c@lN2(T!422P^*@H?-FfvmRH6F)ymSm3;`#Z^XOO#AyOx2i%lX+3qWrCRZG*?YL zC1$cVU0IPksnL6MW|fa&<$yR_AaIIjRxEAQXd;$BDUznV4P8TVX|BBA5tTgcG9Dv@ zgA{R=kJ;{u5;3*fwzq8MaiIBp*51=_BrJ56u4=tG4hr3uTX8U7ul)U+w65p*{WDz2 ze0!dQ3}8N7G&K_<-u@3=0TOE1xWOyHt(ZwZr<|ZdP3lZrnr-d?%BHq)t|@vo!y$}Sk0^O0c{`i* zUQ`){!VtiDUD&HPZ@J=p-H30VXL}3!6|#{Fp8P@WhQi(^Eu|2yq}YNJMN3}Q^dCXX zskd};@V~0$V7_*xT0TZvoX)A_N8?$jLd2J`BkJb~3L$lh3_9t$FP|_E&<7mM8?J== z#h=?t2IR?e)FHuI-?H@{!?(tbSt~-l>_8cFJ91FFY?{byC{c-pio+)#Qz{vv{f^*a|`JC}! zN)4q2oGUa{YHkR-07v`>Gpu~{Th?p+Hint^xhI}zlnv9DYoDu(dh_)or**v>7J-^` zWft4@7Nv3#{L2q0fdT|_A#PoSRLhn^a*@lntqS^js-?)dAvnAKr2Wa|*48b+wJ~-% zUNL^rx5}eVHz1Rdw56xc_ae7~mSx3)O2=D=bA0x!@cMw>a$$F~PpZPX-Q6QODdr(& zR;BOp=cp^ix={=LUd>g*EQ?m2T8C*oI1ZxC;8+oUG|*=CN)dAYn8g|h&H3_mYBngx))T zm{rZ^pV~+Chf#q2n2qs8)Tc3)uC2)!j*T%)u`Elp}?8Pq8@?X*01V@h7dg0U5M z?CGvz^3|{XYwvqDmbc|+nNVGT(|aWi|8;haWo&&kpUc=$-h+Dl(-@t2Qa#lG_w%y> zdo%P2u?n&0cSOfw?}`xw>8)ZDr0XRZeOZ{$Ve+L(T{zHDaQ29@n3_YPRnBZ7lP*ZI z{|qG+qelExBfL@DLSm^{TW*Q3Mz_dzy{2h!Wm27cymJSqRlIsin*Qg^lZ1fP;G|FH zLTEe>6oT)t>!o#+Aa{~7dN1`17sS3o_DidPtI{4KROp1VDNLYR_n=6oVVf2%E$>nv6E(25##pQ6PVQtR-GGSe}Rp+UgmgPr( z0VdX(AeZpPfL1FJcQz0&#&$*Uo4uBDAFsMUWH!O$J%qb2034~KWv!GROF?@w&eR8b z+BL#bX#Bk1HgTWOzc$`W4ozI}Kj;rtkq13)zfoXG0lH(rY?G&yqk)d`L7l{eUI0-% z>K_^wllV59?~SY=NWi1zz`d!#W|N`msRKn(c7Qsp*bZo$;7@%VY_C4RHLX2B%tD&T z+}Ec^c%0GJ(e=Jj$1ZtSYD}9vyKdy8h4`_fVKLuzmg`}vm9Mw(CHY64%W^39aig&x zl`k9)*VXW}cSH5#>5bSfe@*w3Y@5$S{FXg_#ScTrYPX=d`p%C^9u3XOHCmP2m)-~0 zU8^0Z%+Lb_zC{#xCS{D~y!|_e&)9+>b=a^W%V``rRIP3v*EI~Sru(KD%)goM`a$&J zuSK72S@PB8wUKvA?mBoDx`RxC~+Vb`$iJ=3D&H)K!VO)<5~+)u z9k!mwZ3seqkHKWVott<(F}hhHBxE-R4F@edf}VyL=dz#}z`D6mwPYe~BWqJzXC1cq z=HzAt#5+;5_=XleJG#dA8g&Tz0(k-K5$qD`)0a%Y2I<)y_IvmztLyJ!XySq7v`6m9 ziLo?lWe&NRpBGm8!K^)itRN+YVCzdK?>H*#9^r<7$U!m?$$URb98i+n1u@^2o*X=^ z5glFE*MIbbDj-<(8Tq|5IAmlk9tC??jF$o?I%_z$rtQAdeKf5y^wA$LGrOX<)#p7w zu+n*;~!`aTrV%dA93+`;EBka|^}MQ(l|msl;Hu1sNZjw=5-BvRQnC zGWi<9w{&e)_(*O(VnN|1n{%YKuhi1gBG*3XCLs*Cu-8khp)#tr&Z$++GQ>?+ynbJM z2>pgV9hEwzKTV$b?~SO62QM-7pX6OIK1$X<8~U@Q=W<1M;c=jyVqJIFWc&0T zRjtHahpJ-atouzeI7s8ajVs9x9V^;m>9RRGLdVbVwy_QgA+Rv@wDp9&nV+xST2-s+ zQ&OmqZ>5$Q?5|6_RW#1ffN#Jeh?HYlGg!{RGKQ_MHY>h`gf^hb`jXGRoo3ZnZbB1J=DMP zpsW`LKi!Z7f2GuW!1#w@;u|f!DjisB);IWC{cOKB->x-m^QDnttTA=yY@VugoutcF ze23_K`taIa4g9nb<%bd&j;9^9UhpjCsz6Z0%jT8%Y4QmiceGz)*TH(VOk1_JoaH$! zP*BVPr$}dtnlLJ(S46XkZII|C`c+$3dsUlPyH(p)_p&HvS|2IEDrYszV^>oz#X-eL zW?y7yDd#=3o7dblG*hzXSvF`jskKr6b=k=4&jz+3@yD`|pTMt{e!gUzI)@%&wQCZk zbdS@um*_1@H;FJQJIweGgW8Udif@5Gb?q@EgwMDLeD)0b3LyL8)gT##1}8zDFlfGN z&}=%!WRQYT;UJ!lLK&r85Y^~T>d3O%mOqen3OTr1N8l~GRWB_znMv2}7+sX=WKB+o zDUdKo(#n;3WSwNSDh;t1LAxu)N}dJT#{~hGnDnCbb4Gk>WRO3n&St+EHGm$8i%FhO zMJ(*eBB?q_d7NkUz8Z@gVMy6v8xNl`?>pk-3CasvH`$j4RxRI@(H zPukWoB8R>H&XK>J@A=uCr)N>x5xh-+Wp}Hy#5sB@9kW<$`Zq(3&0|}S(NRt&@=JJe zc-fK(+(42QOD&1gu{AqChElL~stL#8i^PtI0ruL|QGdvp|$f zN~=~EU>czA+QU*^txljplLC3BS)aVpB`$^;{iTNWbTwlzo{U(~;<<&8(b!n*UZgga!-IQ zzb-ML<|6;Y_p9*|oSgqX2U(c+ht{9Yn`>#vb{t_O5lt;eP2!+hTlbCv55ZLH+bz30 zaznBucFl|X*ldC}ue_el2^j)zqKM3k`YAJLsdhsV87}9RSSADT6O9VU9UmK?FTsZBQu3M*FS=M@ZLgeq@=6Zo zgGFw;g$kpTP6hk+X4&lEHO;EJTlQR)8n$n>+w4P{-sh9IDf6^gcw#{E=RFlHe8*{t z`dKMuwlljA`=wlmwJTAh+Lw40iNKYRv@x2?7hkdbgSiOK4%3sKWb6-_=6x7oR>XTT z%PbhuL0k`2QU#K&V@`SzzS9~aDX>|QU6L;M0uq(f; zMrPyyg>OJZf~o9n89`>_{}!RJE>>35+TOZm)#uUs=VhaBf4qbG)nMlxEVzA<^M?mj z@rqo_=?h#=7lW8_CD#ABd)E%}$HafThfa~l|7qxF@%*41@^(1+O5qQwe4}OjyjYA6 z2S=ClA{}Tf54*4=A&YVmWsO+tkb`vYhVAm^bj60MUS)4fZA)*eQHxZzxqS4cIXxTZ z2>M8Rmdv5_p#@FprA5|F+ZJJ;pn;LLxjshQP@{B{n75+Wx@-SK@QKc(!-w`cqRddL zDF@Eqq;XsEbuKz5C%>kVAmE}I%Z2wAoM`o`NY?DX+%>|ETrqn|y3HMz3DKREmOuI? zx?^~as`Ed@cD-Dtr6N<|Q%kyH+&8RArc{!1cVtcTMew1#ofw_^+<-QQ!XoaehO!Q1 zNSQ}kRNkgJw1Rwt$v%$&lGD1)tA*4@grAm*X!$NzrIY@dY0!L727mNloS+ES{yt%_ zKN8V!%KCX1j<59qgV*|=**zh~V1@^LOVZs$4m+6nl(Abnx=^u3u!$g&2+>L@j%VOLO12Un-tPhX=G0m($sGqKL(v1;@oi*%% zAHz}|aqlHoa^M`pDVVr#J=lkuVRHrh{F|-M_JemwO*M^g+zm86oynZXK0z8Z6q0yzzCvAH-@&hj7c{F{AU(c9VSF}{E!DIThoJs zItqp1uIma;CT3!~t#+oyorg})6z%bwYNp1xPux_Mi3{3s(f<-!MZEJ|Q4jz3IOr~& z_h@<0?40PXRq7TZ_|#)zyl+?&pY*|dFyCa$Y-5`soV|>^UlG4Y=(r-uR7lv2TFG3a zitj?sCpq;OK3O#LOP=CPeb>v;V87_ARMP1bzwi8LcnZ&Cf)*d3!h#u0^fII^6o^TD zCtyOp5tEHQeh{RDlM~9@4LA0w$QcY~K5Xz|J>s6c7x`thvEG?uMSL1exqsLdj6b0s zY~M^2-?V}5@MsArWoA%`yAB{9A-#?^6E`6&ohbP*_BD&3X2QQEJ>uVrwjwnW7EvfQ z>>`+AXIoKb^QV>`3nusQ)IaP=AZ{B_37epYk6&g-Gbtpe9Ake#j`_EvkYGbMoKoR@ z!|bf1d!b@NpE6F0$B>>&Q?^UHIzWB9DB6Ko($FBAba}#u!|+-uYad&7_eE-obN$QK z!5+UQe5ju~@vJcX%47ZToWI2oJ+NeZr8={*RA`ylpz{sLPe9A{U|rgEis`? zErwI$?LlXrMQ6pYvD#u<$jhi=8X1>9`IzT?oqHrY);BqT{s-(%pULE;ZyiH15I2 zN&a=zD&u4BsY_pn>bRbFwr&3M%}~b47Rj^Vo$*zbpg8blE0v3v;?W*_ZsAqUt15T% zFvUiQkL^>Z#LjJNT$3i)or{Nd zKI?pu?-}1o*=FnAZ{P?UXO~ST=R1Jd#)D2~?$vKxJnH*U6BtlxwD&f}&mf*`m0jto zc(9GJ&-lCZdggU%LtSLVo;uuo)yWrCDxY9sO0~t99{-ZH`suITmA2iUNV^f%j1Bq| zTA)sVo6t$FL%Q3OkK|7-O4{zh0L;rBOD76xKmmcd^b-QRDabAD4WhfQ0J$fG1vJgk}CVWmpA>B0> zbail10PB?F#rNNa!za#fUk>3#V}xuuNgq5fdh^a6FH(_vw?Pa+Q;*9QqmlQ_*fopz znrqHAoi%Pe0}*duLnb6CRGU#^X-lr|K1g zad}`JsKVLUG=uYKP)m89H+7qUxXloWdt#HtcLk>Hhk|M8p!#A{K~!$d$4~Wf~6xd20&q zyrkx|NvqybW&An;>&qlLzr(6E;DD`Zi)%k6;Yw*Zv&2QlrO&tbgv7W(_h4>@;jjNh z=88;Ns$N!mtCv+*$fl9_n``M^KgE>^neVj5Zc9TK#l%(xqoN!wIK6= zb?VFNXqhMPU)KBs+Uxs;`-iYXAj(f`KIhKNPeC*9hqDU$CJ#Hvnti|N zlu}$OgL0(jXtibT4i3q5S)*a0Cwptqq|SRo-I@WF{D-166Xmp&jX>sLXl+9CyoFjU zmD1#1+3Ot}08Nd$N>OJu9NC2O>4u2t-sBr$g_OC@5{+O&6m^QU$AJeJYkDMT;5_KXW+*ojJX^14v|QYr-Fu3{QExDiX+Of-o zBDxu@USkM*n-ODq&?s>WCZGOTZzWJH4%iQ-X?D;oFd(?wpOK2BVG-hbVkdHK4! zk<#miHk*8h(6)2@-; zs1=&jn@dY`p^4FUmdz&4Kq-KzM%bAEf#wmDq5tW3c`yWwbtr)GS&LQ%<@*lc%&Lf? zXwY<&{#Hx#)-y}oK=zeinFBOKjOK4exB}m_t^qjqIIjE3g1enF)B$}8VttuNG&iRD zlh}&X_=+vv-gNfTBc7XLff>e5b12H~(&S#X$R|n&a5XpwJ~`9j zdlC0Ls}l{_j%J0n*rfII4j68k6_03|J&TplXl)q$O_R*wNa?Hj2Kjbr3{ed z`OEk4<;KbHurYKcEZrDAnT z?I@e^o_$ASeBRH98>y3MALTb?3~S}~e6stY42<7V_}GbtO8&mkJPPJ`x>%#JyII5W z-)-|r6Xnl6WQ523`!7NQ@Xps&Ub+QIQ4yKW`_ou|yRXiB+**H!ug3dVa=j$T;G1{% zl^S*13Us?_heyQhu51Sm0|&qOPvT+~43ZJ6u(U2!FTZ1mUp08X@(D0>f`jXoanzb#Y?hk8U-YTcJxCC_~1@kt_Lc zny*RFT;`=`PI;Xx518kS=G*y_QzNU+^|E7O8Iez0lx^9p?X%uW++{R#-%@g(CK?*J z%2H>tKJMen(~z)(j>g3KHd|fpsnR_=uJHh?1P&J!jvVPY$A_#CL5EuX*O@wUg8Hv3 z!9O?aV+9jm&#Q?dNzu?U((*?}1jXM8FYMB4W zy5|2OH~+aKakBsCZu9@U?J#q)F|qz{$-3_G_E8;P|HJPxv##UT^lHPgo-=Lko@|@6 zJ$*7m!6n5(j06G{c(!d!hB|mch%E;MoSQjm`q|YLag_u zMGw@Q|9giJ`G7zOitv1gZO9--96JP01WKx{21~Q!W%_jh94ZnQNm;c{x5;ccSRk8J zi0YOHDwTJ8a}oU~`R_CE**#alfj*UDNi-qYmjh5{k4-oSUQeL zQI7B!==0loi(WQl5`l3N5T}TDZ=a2wg4Q*t9Mm&i81pKe?HP8vgr^au@CF}Lg@Bmp z4M|9v?@x+VCe53>X)4Ks=LsUBdcs$#&W$1EPQ1Ddg4C5GwFq34?;ihsvnBRTQeosP zr8rr*rGkn;UnzN?I4$6}BqEq+c`VwiAep8*n3fI(aB8I?5DWauj^aXCM8Czwjdqa06~hIZn1|o8Zw@{A71deuF9g2vR3&{Vbi2Vf zO`#JJzacQ)yAju}g{Ch#)P)@K593N`I4uO>enAUp0CoL>RN@Xuz z*+4AB)}_2KJ1uy>0@9xN0^;geR-_Q)b^?|*@sF1cxLl{4(`A`?7NiUEv9R5$06OG8 zTQfsd8e4?s=epqp1bZV+156aav9g89o6m*)=9#QbNcZjx~{!#@CzeHL6# zJG~+FzfiA&t$+$>8_Ek5q)jNxs1hLvz<6RD{KF|Rj5d=Lye>!Gj%WoS&YfC zJ`*rJ8l5M8f^9|}!4~_R$G61vg6xU)3wSM7oRN%pPQY(JcH|m zU038*bdbOv3uH+|?J&Aor-JlUWS9_nx5ROr5C19$0#_vVf<*@ys|HC2HXTz%tN`T7 zF`xev6|!pwuM`>-`mqJ96v0~}!2@vT#9cXYL|y^jb3K-X!ooTp>kH)3JR6(Zn9Wl* zS<8Q1#0Q@~6z$|%6xFnlbJX?KwUu>Mv8e7oyWc%K@^Y@|+9os4$Lb%?TbosFqK0i* zaGtnp#A}o(Xpf{=Dn#iHG$D#~ZR0X*bS~A!=vs1-LrISmACZTzWaM_B7PPMs;0fEI<#EaFq>m^im6jT=vuv18YgJMya zvWxS}uOX9j{OWs6N%QT}$@%$QpCceycj=nk;sP-9j1S9PQ1RfI z3-x8BL=Vbg@fIiX3>b;E?O}QG6!-II`tN3jsSRoXNN>QwRUh<(()6jqn^4XLoJX}4_E2kUO|Y{be2OqahFvX2yv zyvL1nFw>vOk>%1egPK#2Bg!3KTh&=it*|otIN9_R^}4t=@BCYhcZ+k&&B4;Y*;|gA zH-Q;G6V#(rm?ji?Z|bT^{NS@g{%gU=P=-RXeJ}cYamp_4>cSIIe(}Cjn{im~ye3oQ zHf@@H;;)9^#x>{nA0pYM=mM6B6WoK6)<8jbTIvpIC89GWgHF~`h;~hsbJ-v(pk^w3 zre?@`jl)83_v=rSAnW0Th|k`Z2eqql8>;t)ud?9eJ_!qN8@tJGgl>q&hqB057jE|(6H zQ7w2xhQJ3>q)=^ihlyhwUaa*dC%^M)KA5OJ=)E_*CIOhlkW_oohMLy&^fgWlsoIAa zVk22Xk{z9XNzsuUL_SQs-b3jRmDp(P!#)kyUUUw&9T(OUhRwCE3#`q};un08GbXEx zd@B{{@T;n`lB&UVTqpfqCj_W>weQz8v*pC)TgwYxIsatsEaWyDWXo-Q z!@{v5(pZX9(2`*DbOoCcd+;ran|q$IfdNL^lk*^6+rB8I+7xmrMoYd)k*O7^W^gG^ z4J?6a!-Y}YE{ydB%fP~j5+Nrm*_&PxJeyEWiOi(|1?zFrXhwLcqfDggqDgps?8qHO zVJRAk50#o787VS&>;*rDc)ZNd^YV}x0)O_@O^5A7LnAwx&?5;`i?|6qq;Nu=5*3d6ijz~Z~H^p1xKss|Gaudl;<&(w@ z?T3&TP-t}r4DFaHq|lfi?IBhFxGwKU1+9+R+UJ~W$oQH!eXD?g)Ztrl+Jt;Avee z^LcQ_ujg(<85UKKAO>IJ@Ujys2E{9T+E_5v=H;f5>-~Xe_LEAcbWt8#eGYvX>wrIS zda}yIo;^}HU_>1|278zVdx!^n*o8h)26^WV{Nf7Kl_kPN8MA;sLWDj_277PEH`=B};e# zTSybsMwOHSv63fhLXk89$;=dtLXBVy^gxx=2N{>#!J_parD?6uT`_3pOoPWZT^H*4WwfZ2C+buoE&gzw4T($h5kuw0CTV?f%E@E1F zMU@Hw&f~4Tf=Wj%y;4dQ7M_UY5a(POYtT&9gY>n!`$^lNhS(4QGQ_3C(}QZXo^uAp!pZzoY2a3XlO(J~{>| zhcQIxX9@TVv3{FP)4zn^M}XDDhRh+{ve0xS|ypt!Dpft|2$4h^1yu8~j`g2qH|5WHsl>s@mB|ttB;i&W5>o z6C2g;6G|HL4n!4r3Rn|@NSsjcgR39xF@h=od#`#vwc=(#kT-r*bNPL5Ns z(G4hx{gOy)+o8GkX1ABv%(7@Fq01lOcNrOb4B|~2Z(Oj?}Xq9 z2`E^Zc$ysR5veKP6N@^7C$nctFCm_#E0a$$uzE_wk`pKa?A!$C7y1qtD9#gQ15bQ> z%Ea;ZFhKz1l|L$plHcrwV3lVg57<;ds8S_EC$omg7|{jRg%w4pH?{*g4IiPu}{T>bBHrAO%NtbLk__RXyw{v796A$ z$iniqzvQ~8m9$v!jy1g-K*w0iB3?KFMx{bMU>B1_luM~Yo11{15JxUYT?@J@4wfAc zF+bkf;2gLF+`gBgN4$2WqfC80&{eERBHtu=D;!k8SRkWz91^=s9C5ku+OV7_RdB9M zFVF67wmi(y$3l;p(Z_)|-ye^g=yZ$GM>=tMvKaN(L3VNK1pYxZpmB^ftiZaqsouo$`$i0GA(qYH@GD z_KWAd@B{J#=Rw~;;jfzpHsS{3GWz}V2SogT{(_4FGN+RT;NHBPZC6@T#)iZ9PjI8G zV0=y!<}PF~iwh{B6&1;$>NbUn`6#1d$DMyD#*%jTM}G-r3WJE)$eJ)3U~q@qXTW{f!~ zo&|x54;k_r6ubw|seFxu-UK_04_AbTyt}>;kG!skm&p1v&*|H&BVKN?e1As9I;Xb< zF3sO>OwnEa&i`98jrsXQQo=NTj)X5MyivEiqaIU;?amXjh=$L-G=Qts&pVfPtkVIn z{eQiO7Jm>!phpcTwRY}V=}bF{@3?qjXN&F333eGCc!PNyVb@YCH-dBmwlIeFHt!i| za&i+;_jizI-S@lBV!eqeSKV{5kUc9a(cgdu-r&EuYre>j48z{&zdoFp6RVFrgoLoi z;~GQYDg7LZjM3Tp*vnFSK5#BQuhC~nyNzdwN+|W2Gc5gZ+NE4fVC=5nr1JWzJsIw- z816n%i7pT0_YR73avE9df+2eJQ46mP+`FWHidN;Q%^34W1zWD(K3xro9e2*@KKFPr z^czc4bN^60;DP>wDx;y9kjFpfbs$9djI#%K|L$GUe0hjx?!&BlHC{NrIZ)nswr>lU zFEQkN>N%B9Je^URaTzTiV}=$7sIFra5e;-}H@Yd2bKWIh zZ$a5_$25TuaFy7Yh)sGGg{&(O6fIG21EwPbcVZk27%QM>D+hrP3luuxjPQk6u@QU^ zn>46=o9vy8nXH*|u5#v`(2b`V1WdV>+n!7F>xGJ1BcoO;P6Uhm3lFN6u2C9GI)Z5s ztu*9H1i^p`&44dMJTg>6NUOq8+oLW8An^?OA%2UKr;&2eYRHK!n%dch+oFCld1<~R zT&zmo!=;&oCKNRuG@CY&k*^nUklS$iHoiV|PR8r;JHBJ4?{?GGcGBtcdvQ6xUzs6u zM;?#%r1#7kb5#yF6X$=@cyPVN@9hS6{L3*^pF^L5GY5Mwptpq23JxpaD2JH{Iv~Oo z{dXb=8)u2DWFjLc8cv*VfC}w_qtN)(Eg4wGOk#6m$rpCN!$B3x)YYG-L$bxNw$>)v z+;WXyL)j3j8sAR=og-M>;Bqv>5l=~vLQy6?Xl0iO610`O5{5$x?n0_A5qs*D{|^uw z>ElSH4`%Sem3CYieYEsf%=waABu|w3dr#Qxt9?G)!?ZQ&jn^5 z6Y^i-VLOeVn}{YMXL5wzPGf%)L50UY_iA{>D*D5LfFd+B5l39wdW35j>GW)ZGn1h%Dr-S(+0to(S{Fpz!Z{K&``)z|319jmBuzfgww;Kq}u&O{RGMe!p9%Zv3SV=6>cvANn{t8Kf;Cb-u`i#m8=efY-jF`$FVOk_U zDqlj`JVMg?picIE$Y7iY;>>RicYENYZ`wRzf;eHc=*<2~^mAUm8J%{nZO)~AkLKp9@BItv zI`4JV=jL=tcvIZ)DoH!_3h~KJZ-Vbb(Xb zz#rUz2lZxF*fIYxboM1OB<5CR{3>kmjL-?>1M0+~UkRocVWBA{Yg zDQV9FQGwV`N|qaHW&Zn>A#CoO%ptw`p5(W>{w#mcmo2#7gI~X&+tc5@@I;^Q#4}Ly zFiG+O)O;riGTmm-UwoQ8u;@O_h3vhqhmY6B!>h8wB2<42KHM5)E78pP=lN_S`cK9u zn za~GDTEAjP`Vqh-P-MhX|H8d&&a@=G>RhpExN{;egRnEazhb$1g(if{-)i#4R1IA^6 z!ryG8)P4LzBp=4_-(U8UER1Y%xm}LjkG9=igZC_#sqCNW*Z#h@yf!o)gp#9vPUGq3 zmx0&s-*Zge{YRqdO)+{Z*kckx8qwIG#Xb6|i5amDD`$T{qgq}8gFKxvbo3(Ic*cU| z^AvNJ-h44N3z+UZjpr`+Sv1S$HPw^Q_>Y*67>WXZm4csbt3E_bboKwQv9pSeE9ka# zVvL!YIc=skGsO&TW@e0;F=mL_ZZk78vmG-tGc(2P$<;jEx%xGlDe0(6I`wk2rKQrU zv%kekm1LuoI~JE~LK%~{yDJnB&2qdWGJ=mt9hCpg`WzoZHheiX!O~Gvbs?#PX^#91 zaZ^cqOBY-=ig174+ZVOxX|+Xh8!)XW3%>--%3fY(*q#>&?UVrMyE}`;8{I zFYqcP0n$*f*p`-~$N$(8sZ$dl0q*VM=v~T8 z!EA`|lVQw!QW_7#FoW^`=EaerfV)mE21S>`Gt=K#Iu?YCbJ36HSJU(n8mb1Q0 zI0Kx8KOCN)cDCN8&av&Z3WThec)ESIe`kIuR0%R(mm2#M~^@Ik@zWhUBa-zE#w zOQM-bK_9@&O6Dfk#taGr_IdWtTF1l9!g@M@@Ks^uz# z@PPW2a~7)dwNvNALOYzf@N5cd ze^Bu41}F3oIDwh`FPo!<-r9A{)*2gIo8#lOOEFmAa2)k?x$311F?QBbLF-x%?9sqZ1BF^4_tcQZs&qzeOpT%{rlMXJK$^l?Orpg4`nFH~A{ zsj-RKaZo(zJ6xEf%=L#XWE6r|ir@8zMu%=SCxnyKtcPInhHQ>dii%(3$JaJ~78b%` zsbyG{75_AOYX~i?SU~{Ymnr;#9^yVvYpDqP_Y0PnFqJmC!G-#K+FnZeQ`9-g?Y_{> z3E{k~P$mga@2b6JKtC?saMtatOOJTTd?xUi>75P1=JuvrGf>2tOCyf;14GVr;$HFo zrra4%?p6$jZ$!BL*Ry1)+U+RhXc&mnIEghRZq!6Vtx#&o9ryz%I1^@qznAK7vfMN3 zXj8`mW&&;``+heW8xg=9jjNljB~)`!%va(0=HzHvbP;p*6KY+*4g4x8 z*c*g4!z})=uhb-A69V5j(nIagL3J67H17I1$+e6rfl?TSwU)*Z*e5Cjp7p(+s^8Q; zf_ozg(g9bd32c zC+T(N$_3|WAP(9RG`6w7cr;xWf4=9{?hRZ6Y~*hSkFx4D9;bthMp)i7Vx4Boph@+> ztFiHUDY1|hO~&;AoDcXV@2yz$&gIR$6^!?{;FP?hE1g!J<;{%p`&X=!=YRIh3+b*9 ziS!Jp_eiO>jHP#v*<;9Cpmw|`P3};XtW@gk9;Xfu8u`aiZfubFno~l;m@4<$oIfv( zN(*4(EEhzBPUCDT`_)O+=0+M9Di%IcM9;92dMyXp{zKpaZaPMju zapid?rX}Ylq17%SV`RZZu3sVXu&kS&$qlzr%D0;}-&eqwyse;#b*`dC>(sY(Mo$^X z2a`#k(xfD}G~ZV*!#rR?UDn!N9RZ#Gpo!Pr|bdQ>f8X`i>V?BoeF}F;3#v2U^py25#>3>@%u?z7MqLkhBCBXJ5DwEG7rI#8 z(tu+l&+q6z#C`4`Z>F@Z=EQvkt=X+0!7cf$GJoxV0Z*#uduRx(P}e1kl3C!@sM*N4 zEBGD5M2DbS2Q65M;laz4E5fCTKh_pl*ytrCV|eyc|KxrQ!CfY|uWk$tzcXTcjn-dq zlz093d|0Hr=bDHZl!7s@$){NLd*H+3=_(4CbfOS3a_rKKQu{Us&k!iPW>5>2;aHeH z;cP|>PsJN2xGjiD%c{BZ#hL{tt=9n_VQvs7x`j88<(^Fw(xRPWtMh@jq-F^Do7%FMWNKjUvI&kI>luK~{#v zYW$JJ);d1|9Sj{RQaUQK$UN>ytwzhxRD@mk=5UGCNA_4q2{}=WU}P2=Euq{qh>0cl z6B`+^D=^mj38wX&0{oF^5~pJ2aJaY0S1eWQ@^;aulPN*omA1@E&9JPe6`;O#H^xI2 zB&{-qz}Dp%#oth}vVp1lI(oK57C#_v^8N&Ut`(oN%24pS~c3c4NH{+E4z-7VLTB^Q88);#A9UCn?@qy}h;{ zSr3~?1f}2P@brR5>)%pi)8HdT$GerTBz8H7tXGr4;1qcx-GD>seThyY)kdi!AEZcx zD3j)cVF#Q+5t}+YscW}{%IA;cC@3$c%+G?IrsDP`Rm93}H`kB#fV9iN8_S0k>gR9k%k-2uo{`*{2$@7VRv`onkGqzX*%fo=flM)GcwTHk3- zcDZ;!kkE_b@MZk5OQ^_;<#_`Oe|kceoeOv6az|xB!R*POwFKcVJvlz+nV}#I^5d3L zxlx|3{=ThyGO=CfbW_$D+8P1Q0bBY3Tjwdb^PV635=)6L*LQc>0@p%=d3mmGmaR-L zU^)}~33&InkB^7QMS8~4F58?;Fo925?y5Y!eVGIy_s>72Mx~}Bw?Rntbp;=J1m(z6 zR+0&9TX>%+GVhBqrfXJVzwuIA1}-6qZgSV4lTyyxJ3RDm5oEc|DFd`orK5Ph^^SLV zERO>;>$z34z$1l+Kj}HC_fMP$v|KWtw+S&gz;#cNv`I{7d14@cYU3-VxOgExM+krf{fLBf>0J;CDI6PYlttb-BQ#G*n%0wbcF;_p0&UsFZ_LXew9E z_p{W!d~{im*WUzvzu#(Yu&LsXsZv2SaOW zARHr&5t<}PogAj!2l3e}tRioYR!L2t^_~`WXGE@yB>4kTfBQST;#cXz;kA&Xf}^=5aW z+&$)tkf}=ZC#w{XjTlG6Xsq+YEc*2vz!%u4Q@f_#xNL2vsYN7{9 zf2XA?N&LrDmjDs(egUf%1F!cl=&^ezwP>W3!Qx?ZahjE-*h!;?IHslZ?#bU5yh5zj z@I0SHMT59-gqXuQ@%T~|OF7e=Q;FlK@&sV1ZF;YcUdOE#JTe&7hL_{T2|5gs-JTI=#}&&CaU$<7d!@(s~% zc6V}L6A`9ZpCro)0X%SprmX8w!7WlU=*5dYtXi!LL(---s3#z9DpqqzlM1#ugBmgz z>qbx~GFj;aL+}RKjq1E6+TQ|jRPeV>YH8-eWYi&aa${IM9>Sis5sMV)ww0?N3O>9q zy#H8&HG9Rh?^+M#Er+}s&b=6^AwO)vu5QxzUF|Kw`ux4BA=obrHTo(~d@+*|)l*LI z`q3=<7g=gbvhD^q4O1_t3(H+2UtF88j;qD!f^RFlOYu??-RW#y1=Fu41Tl9b`M>~j zCbcu8-1ed<8IA~A&C=Fi6Qj~gs>ih6CnteZ2=ViMA!y!g=V@JdV}FqSM;ivH{=l&( z4a7Vd8JW7&+N$!Unq!3eCfS6?+(KvH9kdV{yWMy@njsTA%a>QaF>9SqJo4nR-8#+L z2tI`~vC(+8&!uSdU5MPHXs*PlFjxc(m@3!;KF>%H=$xWXhygW}-7cE#u$wvp`bB>vO*I~EdnH0s$#X}74c}D$BVlY+KmzrJEV&ON6A7^G7hjP|CZ8I z=W|J6JIPrle>1%QXJnIAD9BXCsOwGEc{yIott5$<-gl5Ewb1JOTCpnkZc|M>Z>R4+ zSRw&uh}>`Pk=4yO&h=EpFUs?)j-5;&xXnrwSUpb140Y{79%vwLst~Yp6tacgcjt=3 z!GXJew3KL3BY!M)7DvMCMI?)?hU3RJDqejJg#_q-4=aptQ>u@pAGnR742#37F*ZLr z2&7CM3eq}Am>=%M&h?ymw?Vdr#oP(^9S`$5*tyw37SHu3QLV(94YSp-F6K0qXg&mC zH_BJ1tV-0Z4`Rm(n{I2t!^1mC6gY@;oPw51FtUbRam@e4*RMkz*{O9mY}T9jEbOC) zLcn!*3;15-deQv;Bc(vEQYui6qwIvMxbi ztL3*g>_0a>B+BgX2X3SX$;NMH61>;%9#tPKy^5UCUhS@$$!qKS{llX#bf(K3O|&=m zSXZ{W{J7s9kLYvG@B6K9==nNoMGvHd9}c45hy2jT=Jy9F37lePv~p=fDP2GYN+NlY z=JTw?of{Cn{xUI+IB@t5U1+p|jI(3N8W%lwpnP;x9CLgej$^YRLRc@pU)YZNhB>c+Ibz-qYCnLHMP|H1#LN^UWg4Drpynpv6-mA zvJ%NOH_H8ZOw%_q;#X#jUfg1ov!Xl6^2J*%wdAV7f5);Kt=^=K996Jt#p%~x8(EzC zT_2G)txEaXTb|6IulC@EeraR;x-cO=*@38{UjwL&oiZk!UjvO~<_0nAU&iAf23}=D zSlEXFEC5XdQNCeAbGs$X`KPlb74X=g579?=U*3^4sVSx83nxyC6#NV5ZXth={Y-3; z`m&Q2wU&eb4Mx1^$2>g_PQJz!pbJ%{q%8R!8ikm|^SLMuXe#YxS?Tul_z|#vJNxgt zCW6^MLpR+t4u_g(-^#k4(a-fd;cF1Lr_CdXOx^ErzEoZ1(NVvU#C;lpwF||>CU-`Z zp_*;=9AC|;+n{+Y+JfgZ;J^tXh~5gpzSU&Mq+)OwPje4)n|7aLu}L9TG#ZYuO1FG7 zE`E15fU+9stShZV%ic(+ z(b*v(>Jy3h?;M>5`RxQs(8x~ge9mHEOZ3weG1l$^-&FSQ*1q@%y@HALhy%y9rHB@F zx`!YBz+=?ARMuZ+3NtJZphk4Mik`xDWT0}Q4t^m<5*5DAf-{Sv5Q*r(Br2h@8Wd6s zeN_Sa`J#yfDvbm&0Zi)UzbL;IkC~)aM$uX9l)(b2LlZ25AFD@+&SwzMk9;Y>XD8R0 zCyYk2(68hWFZ6+$=>F4eRI%Nz>c!e0L6*LiG8!D^D77WDS`)tCSQ&&CECgQ4Yf6R$ zrY_{;JqrgboVQAiDrn`I9 zDUpED*&dE_4EoYd+9s=P^$U5B$@nOx%_xk#iP`Eo5Lkce-iSO^R==W!tC^fV26j!C zM1mnl$AkFBkSeYQ(+aOO&Wei-WjB@^#k&_ECfZ5rQ6LE~c@OgQBxX7`8;ZbgbV<t3|Pt#o3QZ{*~beamftO~U`>vL>0 z#V0o+aqP0QE5o~5T~c`G5A8Qp>MqVAtDpxj5WFvX=ko|Bpzd@QZ(H!sGX#X%ZUgJGJQ??M!&Wb1SeM`8Qu(MK9nNW^UY|B9N0zjRlVBi z_RXLP3Xr7U(kOjXS%5&(j|s1L2o=Q^?aMAtz5I$@zKo(FF~}GcWqN}w!P!SS*q>0q zMH1gZJSB59fAlZ)@r4R!?}Nv15atIO?=yWIm;ESI$NN5f^G4!0_}#;=Oz{XhFI_wr zNB`}Xe7viOSdd}-F|9zn?j|p|F~&r2xC-6`2WkawexJ@(|(hC9fZV?IE#=glE~&n8<8a(^DBYFgURDjV-awn95YVAANHxmt=UCR>? z6c+5ox6Ni*vP(7vfWX;XTWu+WrRMYt*JT8HCI6zDf2syLU-AqIY7XfE@~MNC06}s{ zs*l$kPJ8iHv#V9B_mr7*OrDTG5m_^V6kAEEoSb*kn(@6^aoNcm)5qvEXPJ}7W+@vL zq{*&bffL(-Sn?0MruF#im#v&tQI{F2lj)cVdd9tN`08b8*$XPToqtc$ngSGKlrAf> zlL$KV=~E4-sbh%Ge{!h0%*d-Hx;dvH?afp_$GMcLjJ#zR({mgt%r(g zmg!WE{tz+9ZgUr-tsI}2`E8Ps;hrE#5%k6?SJ_+7+5u;o5o!Oc=$k=AcccE;bg7mM z9&PJxobZC~A&;Z% zpk<_mB3#|ymU1OACtT1U{Qx=nm=UwiAc?r`+^fn*8tr$`h?UPn+4%9++r&0q|J6s+ zXX9}d5$7UfW-^F>y5<1;VT+0X`KE2mkLUiz^X-73yZn+Q-RUZls5@%#i7oGri+h*_ zAt_Hgn9$Jz$*=>f^;Wm$V1uS5Y7h``|AoN)i-`9eS{OQVXjEAg(Z|#3b}<2G4S9Dr zAy|2N77Z)>eWG&ex<|GuWjQTDT9G-w2mM>ZnwcFVv-&uSZR<_KZ0m&U+S)?koUD8l z;>yE;jhCpnT7;SerH1Mu=yPvMAxqo7cZ|oY{8?SUubr^od|&p3jRVExM1ntX!h)s0 zYQct-=4%y&d=fletO|qaeT52p1u0#knyn2gKqFX^Awjx+Lxa~S@~kDAo_F5^+2&@- zJvLNqpk}=Xq`YuHk{FhHYD@m5tI8^<`nzUZQ`LI=rq209hMsW%Dr1{##5=*tY7yZL zNyIz%#6#C{Gw-SU%0h%pCQw2~SRuimLvWP|w-cZWZ~`r77Z@bw$?piOE9+S9rv!@x zEiP{dPcUG&Vy0s@)8Q89N%q!A=Bj;*FcU8pp~L{p-Xft&U0@&%d85&-rc}#&%o>dV zdKAdFrzIsMgm~iFJe>XH6KbvjYr+OYbyPA&p-(bKuun1^u?!F!`#LTllrx7OuF?(_ys?9t)E%q3(^Q0u>fo3gM>F4Q%X*uYl$kC|_Cj4*es|dg@0qooR$m6fVj+jx z5iA0KD_ctFVFla@{Yx|Ulsb2y$JgBNQwud6uAA6ony}uE@;XL(`||!kCiFLIVEK_t z;MiIY24)#d=F8WFI4MwgZ6KD+FG?sf$=Pfw+Dtv_dMUBnILxb7R`N%;kr{bm8yc~F zJ+%e)kE%bA?LxP)fgRQ+H3a8r1M*p?yf)z(&`HRh-b_`|9~zn3Y3ZujahyJ0$H}&T zYx8F-%1ntqkHZ&Tk<^M;|Mj}kl6VH*^mv>1Y;KGQz`nL(iAb!*&8wX4D3*P7bw!4w z^AK{tb!p|o$@^Nt8gtDdzN;afJ8UjdR|4HH4Hm~fq&I4<4c)xid+&L0z2JFG6!r1E z_A>n>76j*ozOy-`anu*Bt9DNV)J_TQGN>1iF#SOPvUmLq)czFRnwV3)>LhNs8=J;d z{wyNdQpENOH}Mu${@!J5Z*SRhrsi!dlcX%?qQQByrGq}57UKymd^jguvO{71gbw6Z zjGx5}M2Sy}w#tkqJSIU7j|WPWe=BvCjs-UGJaRJ{FOdN}PM-Q1rXk%}bVz9N1 zK&j)kX#76N&a?vjwAoi$73}W^Qf1+Vo-~n2lI7BMs~Z10P~#^`@yJU@!KF*ZC%6?e z#|;L3mX>yw=Q@Ohi!KVEkjhh^{I%Jj-R1f(9KNI(?%2#pKkp6w8D0)9Wr~~?!!{09 z;0K$TyYAk@KdibaOqY}W1AxWKv@fFQjlWWASk=gJ3~mzq6t+Pg1swS6-1`%> z-&AC2PZH3>m7?7R)^I6s$N%mo6>D$To@5cwKfBy6E~D3R{K;eR+bo^J%Ove|43WzxKqE?he9!FDj(*4;kQ;b%7U6>;9#Z3^-}J9F665?L8{r7GD$ zGBI*4D_0y`mJnu7l*!#u-qSRxf%9=37+x@UkwbH^NCcDUhvNz22jfi+Q+299pkl8V z=dZV(AjTd?09}1nEg|6ZgBbN}%D5`%|rxHF1>bH7Y z9M;|)S!Oswg{^(rtCtLf4|b*w)BujvPn>U5NDkJ;hmj6N9hYd(VvtPSqK)z6d?#gU z)ukk8i>yVWC9>(KtM2H#)yg1e3qrzQ)JMr3>X-XGSo3~^AXOwF0x z@d#*z%m+R}QxDUJJh}>x86iGjE6Ii(q?-X#tVFD#909w%!h6bF!3S&Rg*ZX z-3s-=i=2gmj>Mzu<4eb7hz0$Gq5g+;$jaejeto}rvuC*qBYryalRn+vEY6o3|B)9O zhjai}M5(y`8cZ2wQ*q&IlC*}{94_X~b7J=9+^Z@~L0#QsF|VaTf`qsKu6GMOZltDo zhDrS$^R{%jn>TSqRLWF!{bYk2y?7`&@e`(qA)2Go9bsySBO~IdtKKS01f0ZceiQUt zAtSPj(C)^JU_8-}I92@>cvm-c7;Nh}V+$i{qlZ%sPO0j#n75ioq(>2lrb@W#XR*V1 z=HdHv)XEE0@4(_GerJC9d4H@r*EdI949&wf)HOhu*O8n<-%Bc?A@>1|1bA)^h^bpw z3Kyd;6!C?{=0MM5BK45-ICRz5B2li z_v=Z4*Q6s^D5hfB$_Pcbq@tGk!K>p5NVb!gYnhb#dtN>i6Txk8pR6 z(?A{|i8|XT5!6eGHC9j;DgweV7m?0aqQ<7c!&jwMu4GA2={{b7>bS>x1fX0F_ioSI z0(?UDhrZSZJS)|8I$F-TueO;&7pc&7TPiX77K=rRAIxRIaI+bQNG2$0PI=a401xDr z=RJO%4l*!Ts~hTRqH^NgT545}+i*zFMcG8$7?MRNghMq}2`5rb7}XOeXwhqdCCRbx zyc$*sWs+shHmz^65Zp)BJM=q_8Ms7I_X|V!i}~k2u_-rq8S}LrX@0ivzYD+EnHuS1 ze&WGKZB-)@Y1f)4<+S3AE#h*QomEtq4v&%SQsj^6gM|$eiLq!kgRWlz zzw_K}#6zVxzx}25Q~h%~QP4(nj95&=m3NWP+)H6PsP*{YGs-0 z-nly6uh{KmbK~#1eNs~lqfWDjILY;n?{GsHGUsV@-cBa# z-GfbqPA2O8(N3acGU<43A#&xZmH2+S*FzBm@*%LBc(1-AymUM$d`(tvg%W3`mvQM& z3i)hNLsb~*c7j+%0!M0L?Fm9O^xsij;afT4G%Dgo3}_z0NE0-5u*Z#h`z&=Tth*&> z95s5TXtqvn+=;yI6SVVtlCqOC!j)YP-l;to;nRJ>ElHG?VG8F z>@yc9+WuN|$3gCy^k*W6GX0(0FONRgcUsdFQ!%|g;aN$u26~1h9EY__x#Qs)xBy|;P$#P0pxv=?4!?^qDCM`*Cf#Yp~Xqe_S) z+R79|vnMARwkMtV+fVXC9ZFt8`ufk$4B=6!3~_2}P^^W_=nv%&u!GdS1Yt7-x z5E>DHbOz(efoQw4RJonL_)tx-%KD{p$+hzQOkc|Gx@xWGFA~yi5CJUA;{AgS?)cH# zqV0N*J>fZ7Gh*C~{jnx-`q%A^GmZxv|6I<;e##R?QGS9&WkIBeoFV_JbMQV(XHEPQU2EHlOF8&N6{_O*0)ucoh~fqJ)~(w zX#;Bhcjhf(9)k>mXYaeTEXI<+_K>!=&??ksh|9Cnt-k85Wj3z)<|;o_PtBD^GTCo> zP>J*XNI?hQSJTr%txI10NSyl#A24_AOO+20g^4FYOO+RA`n8koFg)6O7b`7AXVTW} zC=WSF78F`RZPPIGzg`qM_s^zDd*IoBjiUXw%C%$ALO$^EkBf(xmL{R?Dl#Lx7ymF1 z6KH8B(ZULlOK{~Y3qX4@h+^LM{SIh7oWy?VmPHh~fcLyYHRx27mrfR~=5Oc*I=e!V z{7SqBLREUO6V18^4dRRW(s4nawi}udUtSeg8u6nD|1R*wNURQcb{3wh<%Rh^2hvwaM8-oQZ?U{rmMP{&|Q?#B2_xl4I7D z;N%S>A5m`B7c!BtI|8=$*6!ugAvViM#l9f_ak>j#1!?B2vB9Teiy>i8cOv__;h>0% z2^N4wXSv1i;DWYdi6%X5Us808_7$z{GVTF|F!JcjG3Fu_vU8t`+O%CTw8%_{?yT zKQSb=33=$KE`C+m9n=@O%Ub&TH?!nJH%<-NaVw$86?AKy{m1@WSjR}z;w+D&-)3Ul z=X0kCa>2TcrLNRN^4q5}i*={`)+dTmt8?LhGDTSa|Cl1AZ0!FNA@YCnUM|S{I?!WL z(PPDoWkDviSG^OJHr zqGnEGUxP3FY?oNj3DfPFdOUZlNIl*S7x*WqR(h_JcZ%MO{~!p2iuf$jsj@UZt}pO< zlN-u4Z|7NkkPM#-kn&A3OkKQ@x5FdcC?2(-cv2$Pf_tIHwC~~(w0Th_l(bSF?Mpnwsw*L)xVP@eZWssYCQR)bAv}pi zxN5j`9_ftG+tsTX!;JGDsU%adp=IMbn3MNzGry@olD96cq4-z|mi`)RWVl)kfIkav z7)?f;#LwWsmpSwEybCG&y=M2pY~AGSeCMXEPBO)0fpyQ4nU(x!?$U50q3ATlwJTrn z`4JDV?7E4rJrnHTn^A(^fN!vx49=RR)Kih<`y2_U^JNw}7C4x|u6XUcQWrA#M zjp@BTz3Z|zV!uC# z#j(Gu>_V~G%;n3!renFo4Id}^>-32_%^j#|c?FrGwablzXO~V7#(#z0KvQYmZQ8is zP#aR0CcN}F;3lU^$4OGpo`DbkJjNQy4v$M1Oq{_(a>T7$tOK!N6=C`k%KH;j$Nmjy zSi`Ac6nN$2j)GE6fA zYQ0Y^YZ8d>(kyeHF2AFy0ShIcWh$so?I0rmPm)sn_kIwPKrQ0NrBo^ICL%lx_3<6r6lBAeqJyx%Ul zsRKMF_x+Cxa5Esb0-J+Hhl!X!G#pX9!i=Zk@pO@2lcqw9$y^-b>c@tJpprWud(jKV zIiNd2z2|LyDQEzi@EZaLfSx<(o^dAq6hTNWK_X??|%Qz8sXX8qy2$6kd-#Q2MYAIp$| z9UEi+qSJ6Ma2&GPXI3f-mJ7HV45zKnzO8u<1wu*|olm{B0P;18287X99kV_uF-B$j z<8Z&2B@6+5T(9qT)$qo9Q3u_bopIZj+f4CtUq>U1WHVH5J#55{y1Lw#h9h9Qy*DD9 z12M4r`Wl)4%nn3R=T#eE^9*W@iRp92seLx(3HMcNp%F$eZ1Qv;h<|?3$>-|%PkH$d zblcJ-kiKVf#y(1~he`yXW zlRBw3=|3XXzekspN!ilV^xv9EN{m#SkA;Jcl?%vh%Ff2YYR1kD Date: Tue, 3 Jun 2025 20:40:30 +1000 Subject: [PATCH 23/52] Set netcdf history timestamp in middle of month (#34) Ported https://github.com/ACCESS-NRI/cice4/blob/694a9fbd4ac29dc841b38aff002eb36da5b650f1/source/ice_history.F90#L2184-L2198 to cice5. This avoids post-processing interpreting end of the time interval timestamps as the subsequent time interval. --- io_netcdf/ice_history_write.F90 | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/io_netcdf/ice_history_write.F90 b/io_netcdf/ice_history_write.F90 index 1fdb5f0a..a8e6e90b 100644 --- a/io_netcdf/ice_history_write.F90 +++ b/io_netcdf/ice_history_write.F90 @@ -41,7 +41,7 @@ subroutine ice_write_hist (ns) use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_scalar use ice_calendar, only: time, sec, idate, idate0, write_ic, & - histfreq, dayyr, days_per_year, use_leap_years + histfreq, dayyr, days_per_year, use_leap_years, month, daymo use ice_communicate, only: my_task, master_task use ice_constants, only: c0, c360, secday, spval, rad_to_deg use ice_domain, only: distrb_info @@ -114,8 +114,22 @@ subroutine ice_write_hist (ns) CHARACTER (char_len), dimension(ncoord) :: coord_bounds if (my_task == master_task) then - +#if defined(AusCOM) || defined(ACCESS) + ! set timestamp in middle of time interval + if (histfreq(ns) == 'm' .or. histfreq(ns) == 'M') then + if (month /= 1) then + ltime=time/int(secday)-real(daymo(month-1))/2.0 + else + ltime=time/int(secday)-real(daymo(12))/2.0 + endif + else if(histfreq(ns) == 'd' .or. histfreq(ns) == 'D') then + ltime=time/int(secday) - 0.5 + else + ltime=time/int(secday) + endif +#else ltime=time/int(secday) +#endif call construct_filename(ncfile(ns),'nc',ns) From ed83b972f93133f400cfc0cb0e5f50919ab3f943 Mon Sep 17 00:00:00 2001 From: Spencer Wong Date: Tue, 3 Jun 2025 13:41:05 +1000 Subject: [PATCH 24/52] Modify dump_last for use in esm1.6 --- drivers/access/CICE_RunMod.F90 | 8 +++++++- source/ice_calendar.F90 | 1 - 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/access/CICE_RunMod.F90 b/drivers/access/CICE_RunMod.F90 index 5e725278..7d94217a 100644 --- a/drivers/access/CICE_RunMod.F90 +++ b/drivers/access/CICE_RunMod.F90 @@ -48,7 +48,8 @@ subroutine CICE_Run use ice_calendar, only: istep, istep1, time, dt, stop_now, calendar use ice_forcing, only: get_forcing_atmo, get_forcing_ocn #ifdef ACCESS - use ice_calendar, only: month, mday, istep, istep1, time, dt, stop_now, calendar + use ice_calendar, only: month, mday, istep, istep1, time, dt, stop_now, calendar, & + write_restart, dump_last use ice_restart_driver, only: dumpfile !temporary debug #endif use ice_flux, only: init_flux_atm, init_flux_ocn @@ -135,6 +136,11 @@ subroutine CICE_Run ! ' calling dumpfile at icpl_ai, itap, time_sec, idate = ', icpl_ai, itap, time_sec, idate ! call dumpfile !endif + + ! Write restart on final timestep + if (dump_last .and. (itap == num_ice_ai) .and. (icpl_ai == num_cpl_ai)) then + write_restart = 1 + endif !*** ice "update" ***! call ice_step diff --git a/source/ice_calendar.F90 b/source/ice_calendar.F90 index d3e69885..1845ac28 100755 --- a/source/ice_calendar.F90 +++ b/source/ice_calendar.F90 @@ -296,7 +296,6 @@ subroutine calendar(ttime) write(il_out,*) '(calendar) idate = ', idate #endif if (istep >= npt+1) stop_now = 1 - if (istep == npt .and. dump_last) write_restart = 1 ! last timestep if (nyr /= nyrp) new_year = .true. if (month /= monthp) new_month = .true. if (mday /= mdayp) new_day = .true. From d87c45923d0f950856efc307aa18ce260c1f4804 Mon Sep 17 00:00:00 2001 From: Spencer Wong Date: Wed, 4 Jun 2025 11:51:04 +1000 Subject: [PATCH 25/52] Keep original implementation for non-coupled model --- source/ice_calendar.F90 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/ice_calendar.F90 b/source/ice_calendar.F90 index 1845ac28..02a29d24 100755 --- a/source/ice_calendar.F90 +++ b/source/ice_calendar.F90 @@ -296,6 +296,9 @@ subroutine calendar(ttime) write(il_out,*) '(calendar) idate = ', idate #endif if (istep >= npt+1) stop_now = 1 +#ifndef ACCESS + if (istep == npt .and. dump_last) write_restart = 1 ! last timestep +#endif if (nyr /= nyrp) new_year = .true. if (month /= monthp) new_month = .true. if (mday /= mdayp) new_day = .true. From 8f074fa3afe165ee479572d4032b02d16a7879b3 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Thu, 5 Jun 2025 09:21:55 +1000 Subject: [PATCH 26/52] expose ice_ref_salinity to the namelist (#36) This makes the assumed salinity of sea ice configurable, so we can set it the same as the MOM value (configured in MOM [here](https://github.com/ACCESS-NRI/access-esm1.6-configs/blob/88ac5aab5d6d2500209b7d610e68e5c2928222d4/ocean/input.nml#L367)) We use 4ppt in MOM for historical reasons, and did that in [CICE4](https://github.com/ACCESS-NRI/cice4/blob/694a9fbd4ac29dc841b38aff002eb36da5b650f1/source/ice_init.F90#L243) too. --- drivers/access/ice_constants.F90 | 39 +++++++++++++++----------------- source/ice_init.F90 | 8 +++++-- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/drivers/access/ice_constants.F90 b/drivers/access/ice_constants.F90 index 79a56a9a..482c4852 100644 --- a/drivers/access/ice_constants.F90 +++ b/drivers/access/ice_constants.F90 @@ -44,10 +44,7 @@ module ice_constants !ars599: 26032014 new code (CODE: dragio) ! use new code, mark out #ifndef AusCOM -#ifndef AusCOM - !!! dragio = 0.00536_dbl_kind ,&! ice-ocn drag coefficient - dragio = 0.01_dbl_kind ,&!!! 20170922 test new value as per spo -#endif + albocn = 0.06_dbl_kind ! ocean albedo real (kind=dbl_kind), parameter, public :: & @@ -58,10 +55,7 @@ module ice_constants real (kind=dbl_kind), parameter, public :: & secday = 86400.0_dbl_kind ,&! seconds in calendar day viscosity_dyn = 1.79e-3_dbl_kind, & ! dynamic viscosity of brine (kg/m/s) -#ifndef AusCOM - Tocnfrz = -1.8_dbl_kind ,&! freezing temp of seawater (C), - ! used as Tsfcn for open water -#endif + rhofresh = 1000.0_dbl_kind ,&! density of fresh water (kg/m^3) zvir = 0.606_dbl_kind ,&! rh2o/rair - 1.0 vonkar = 0.4_dbl_kind ,&! von Karman constant @@ -73,7 +67,6 @@ module ice_constants Lfresh = Lsub-Lvap ,&! latent heat of melting of fresh ice (J/kg) Timelt = 0.0_dbl_kind ,&! melting temperature, ice top surface (C) Tsmelt = 0.0_dbl_kind ,&! melting temperature, snow top surface (C) - ice_ref_salinity = 5._dbl_kind, &!8._dbl_kind ,&! (ppt) ! ocn_ref_salinity = 34.7_dbl_kind,&! (ppt) spval_dbl = 1.0e30_dbl_kind ! special value (double precision) @@ -98,21 +91,25 @@ module ice_constants !!!ksno = 0.50_dbl_kind ,&!!! test new value as per spo zref = 10._dbl_kind ,&! reference height for stability (m) #ifndef AusCOM + snowpatch = 0.02_dbl_kind, & ! parameter for fractional snow area (m) +#endif ! multilayers with the UM coupling - aicenmin_ml = 0.00001_dbl_kind, &! AEW: min aice we want to allow when using - snowpatch = 0.02_dbl_kind ! parameter for fractional snow area (m) -#else aicenmin_ml = 0.00001_dbl_kind! AEW: min aice we want to allow when using -#endif -#ifdef AusCOM - ! in namelist therefore not parameter, which is counterintuitive, - ! since this modules name is ice_constants -!ars599: 26032014: change to public -!ars599: 24042015: remove dragio!! +#ifndef AusCOM + real (kind=dbl_kind), parameter, public :: & + !!! dragio = 0.00536_dbl_kind ,&! ice-ocn drag coefficient + dragio = 0.01_dbl_kind ,&!!! 20170922 test new value as per spo + Tocnfrz = -1.8_dbl_kind ,&! freezing temp of seawater (C), + ! used as Tsfcn for open water + ice_ref_salinity = 5._dbl_kind ! reference salinity for ice–ocean exchanges (ppt) + ! n.b. CICE6 uses 4 ppt +#else + ! get these in ice_init from namelist real (kind=dbl_kind), public :: & - dragio , & ! ice-ocn drag coefficient - Tocnfrz ! freezing temp of seawater (C), - ! used as Tsfcn for open water + dragio , & ! ice-ocn drag coefficient + Tocnfrz , & ! freezing temp of seawater (C), + ! used as Tsfcn for open water + ice_ref_salinity ! reference salinity for ice–ocean exchanges (ppt) #endif ! weights for albedos diff --git a/source/ice_init.F90 b/source/ice_init.F90 index 71f99edd..cb04f778 100755 --- a/source/ice_init.F90 +++ b/source/ice_init.F90 @@ -42,7 +42,7 @@ subroutine input_data use ice_broadcast, only: broadcast_scalar, broadcast_array !ars599: 24042015 for the namelist variables use ice_constants, only: c0, c1, puny, dragio, & - awtvdr, awtidr, awtvdf, awtidf, Tocnfrz + awtvdr, awtidr, awtvdf, awtidf, Tocnfrz, ice_ref_salinity use ice_diagnostics, only: diag_file, print_global, print_points, latpnt, lonpnt use ice_domain_size, only: max_nstrm, nilyr, nslyr, max_ntrcr, ncat, n_aero use ice_fileunits, only: nu_nml, nu_diag, nml_filename, diag_type, & @@ -144,7 +144,7 @@ subroutine input_data a_rapid_mode, Rac_rapid_mode, aspect_rapid_mode, & !ars599: 24092014 (CODE: petteri) #ifdef AusCOM - chio, & + chio, ice_ref_salinity, & #endif saltmax, dSdt_slow_mode, phi_c_slow_mode, phi_i_mushy @@ -315,6 +315,7 @@ subroutine input_data Tocnfrz = -1.8_dbl_kind ! freezing temp of seawater (C), ! used as Tsfcn for open water chio = 0.006_dbl_kind ! unitless param for basal heat flx ala McPhee and Maykut + ice_ref_salinity = 5._dbl_kind #endif atmbndy = 'default' ! or 'constant' @@ -790,6 +791,7 @@ subroutine input_data call broadcast_scalar(sinw, master_task) call broadcast_scalar(dragio, master_task) call broadcast_scalar(chio, master_task) + call broadcast_scalar(ice_ref_salinity, master_task) call broadcast_scalar(Tocnfrz, master_task) #endif call broadcast_scalar(atmbndy, master_task) @@ -1030,6 +1032,8 @@ subroutine input_data write(nu_diag,1005) ' sinw = ', sinw write(nu_diag,1005) ' dragio = ', dragio write(nu_diag,1005) ' chio = ', chio + write(nu_diag,1005) ' ice_ref_salinity = ', ice_ref_salinity + #endif write(nu_diag,1005) ' ustar_min = ', ustar_min write(nu_diag, *) ' fbot_xfer_type = ', & From 15fa7de6aae2a05090ed96a9334f0bad905b7a3e Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Fri, 6 Jun 2025 14:24:37 +1000 Subject: [PATCH 27/52] Set cp_ocn to same value as MOM (#40) It's hard to set this in the namelist because it is used at compile time to define several parameters Set per MOM5 value: https://github.com/ACCESS-NRI/FMS/blob/bf9b80423ea4f66efa389e03d3c19b26c009e8b6/constants/constants.F90#L90 --- drivers/access/ice_constants.F90 | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/access/ice_constants.F90 b/drivers/access/ice_constants.F90 index 482c4852..a3b14625 100644 --- a/drivers/access/ice_constants.F90 +++ b/drivers/access/ice_constants.F90 @@ -33,9 +33,8 @@ module ice_constants cp_ice = 2106._dbl_kind ,&! specific heat of fresh ice (J/kg/K) !ars599: 11042014: add AusCOM #ifdef AusCOM -! cp_ocn = 3989._dbl_kind ,&! specific heat of ocn (J/kg/K) - cp_ocn = 3992.10322329649_dbl_kind,& - ! freshwater value needed for enthalpy + cp_ocn = 3989.24495292815_dbl_kind, & ! mom5 constant + ! cp_ocn = 3992.10322329649_dbl_kind,& ! used for cm2 #else cp_ocn = 4218._dbl_kind ,&! specific heat of ocn (J/kg/K) ! freshwater value needed for enthalpy From 267d4be191cf106885b82b259ab4b419cbee7522 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Mon, 16 Jun 2025 11:08:12 +1000 Subject: [PATCH 28/52] Expose ksno (thermal conductivity of snow ) to the namelist (#41) * Expose ksno (thermal conductivity of snow ) to the namelist Typical default value is 0.3, for CM2, use 0.2 --- drivers/access/ice_constants.F90 | 9 ++++----- source/ice_init.F90 | 11 +++++++---- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/access/ice_constants.F90 b/drivers/access/ice_constants.F90 index a3b14625..04bfc4b2 100644 --- a/drivers/access/ice_constants.F90 +++ b/drivers/access/ice_constants.F90 @@ -85,9 +85,6 @@ module ice_constants ! kseaice is used only for zero-layer thermo kseaice= 2.00_dbl_kind ,&! thermal conductivity of sea ice (W/m/deg) ! (used in zero layer thermodynamics option) - ksno = 0.2_dbl_kind ,&! thermal conductivity of snow (W/m/deg) - !!!ksno = 0.31_dbl_kind ,&! thermal conductivity of snow (W/m/deg) - !!!ksno = 0.50_dbl_kind ,&!!! test new value as per spo zref = 10._dbl_kind ,&! reference height for stability (m) #ifndef AusCOM snowpatch = 0.02_dbl_kind, & ! parameter for fractional snow area (m) @@ -100,15 +97,17 @@ module ice_constants dragio = 0.01_dbl_kind ,&!!! 20170922 test new value as per spo Tocnfrz = -1.8_dbl_kind ,&! freezing temp of seawater (C), ! used as Tsfcn for open water - ice_ref_salinity = 5._dbl_kind ! reference salinity for ice–ocean exchanges (ppt) + ice_ref_salinity = 5._dbl_kind, & ! reference salinity for ice–ocean exchanges (ppt) ! n.b. CICE6 uses 4 ppt + ksno = 0.3_dbl_kind ! thermal conductivity of snow (W/m/deg) #else ! get these in ice_init from namelist real (kind=dbl_kind), public :: & dragio , & ! ice-ocn drag coefficient Tocnfrz , & ! freezing temp of seawater (C), ! used as Tsfcn for open water - ice_ref_salinity ! reference salinity for ice–ocean exchanges (ppt) + ice_ref_salinity, & ! reference salinity for ice–ocean exchanges (ppt) + ksno ! thermal conductivity of snow (W/m/deg) #endif ! weights for albedos diff --git a/source/ice_init.F90 b/source/ice_init.F90 index cb04f778..7ab85351 100755 --- a/source/ice_init.F90 +++ b/source/ice_init.F90 @@ -42,7 +42,7 @@ subroutine input_data use ice_broadcast, only: broadcast_scalar, broadcast_array !ars599: 24042015 for the namelist variables use ice_constants, only: c0, c1, puny, dragio, & - awtvdr, awtidr, awtvdf, awtidf, Tocnfrz, ice_ref_salinity + awtvdr, awtidr, awtvdf, awtidf, Tocnfrz, ice_ref_salinity, ksno use ice_diagnostics, only: diag_file, print_global, print_points, latpnt, lonpnt use ice_domain_size, only: max_nstrm, nilyr, nslyr, max_ntrcr, ncat, n_aero use ice_fileunits, only: nu_nml, nu_diag, nml_filename, diag_type, & @@ -144,7 +144,7 @@ subroutine input_data a_rapid_mode, Rac_rapid_mode, aspect_rapid_mode, & !ars599: 24092014 (CODE: petteri) #ifdef AusCOM - chio, ice_ref_salinity, & + chio, ice_ref_salinity, ksno, & #endif saltmax, dSdt_slow_mode, phi_c_slow_mode, phi_i_mushy @@ -315,7 +315,9 @@ subroutine input_data Tocnfrz = -1.8_dbl_kind ! freezing temp of seawater (C), ! used as Tsfcn for open water chio = 0.006_dbl_kind ! unitless param for basal heat flx ala McPhee and Maykut - ice_ref_salinity = 5._dbl_kind + ice_ref_salinity = 5._dbl_kind ! (ppt) + ksno = 0.30_dbl_kind ! thermal conductivity of snow (W/m/deg) + ! (use 0.2 for cm2) #endif atmbndy = 'default' ! or 'constant' @@ -792,6 +794,7 @@ subroutine input_data call broadcast_scalar(dragio, master_task) call broadcast_scalar(chio, master_task) call broadcast_scalar(ice_ref_salinity, master_task) + call broadcast_scalar(ksno, master_task) call broadcast_scalar(Tocnfrz, master_task) #endif call broadcast_scalar(atmbndy, master_task) @@ -1033,7 +1036,7 @@ subroutine input_data write(nu_diag,1005) ' dragio = ', dragio write(nu_diag,1005) ' chio = ', chio write(nu_diag,1005) ' ice_ref_salinity = ', ice_ref_salinity - + write(nu_diag,1005) ' ksno = ', ksno #endif write(nu_diag,1005) ' ustar_min = ', ustar_min write(nu_diag, *) ' fbot_xfer_type = ', & From 58eeccfb1c3521f03bcf0d17c11f7cd42b1af498 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Tue, 17 Jun 2025 10:07:31 +1000 Subject: [PATCH 29/52] ESM1.6 Readme (#44) Co-authored-by: Spencer Wong <88933912+blimlim@users.noreply.github.com> --- README.md | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d60842cf..344f1671 100644 --- a/README.md +++ b/README.md @@ -1 +1,20 @@ -This branch is a record of the version of CICE5 used within CSIRO builds of ACCESS-CM2 (e.g. for CMIP6). It's available here as an archive for reference. + +## Overview +This branch contains the CICE5 version used within ACCESS-ESM1.6. It was forked from https://code.metoffice.gov.uk/trac/cice/browser/main/branches/pkg/Config/vn5.1.2_GSI8.1_package_branch/cice?order=name&rev=334#source (MOSRS account required), which in turn captured the trunk from the subversion (svn) repository of the Los Alamos Sea Ice Model, version 5.1.2. It has significantly diverged to support ACCESS coupling (with UM7.3 atmoshpere) and contains some differences from the ACCESS-OM2 branch of CICE5 (master). + +More recent versions are found in the [CICE](https://github.com/CICE-Consortium/CICE) and [Icepack](https://github.com/CICE-Consortium/Icepack) repositories, which are maintained by the CICE Consortium. For significant developments and new features, we suggest using those models. + +There is [PDF documentation](https://github.com/ACCESS-NRI/cice5/blob/master/doc/cicedoc.pdf) available for CICE 5.1.2, however changes were made to this branch to support coupling with ACCESS-ESM1.6. + +## Useful links +* **Wiki**: https://github.com/CICE-Consortium/CICE-svn-trunk/wiki + + Information about the CICE model prior to version 6 including how to obtain the code. + +* **Version Index**: https://github.com/CICE-Consortium/CICE-svn-trunk/wiki/CICE-Versions-Index-(older) + + Numbered CICE releases prior to version 6. + +* **Resource Index**: https://github.com/CICE-Consortium/About-Us/wiki/Resource-Index + + List of resources for information about the Consortium and its repositories as well as model documentation, testing, and development. \ No newline at end of file From 90a716400ba317fa230134c57ddaf7a84ff625d0 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Tue, 17 Jun 2025 14:09:26 +1000 Subject: [PATCH 30/52] Setup coupling for esm1.6 (#33) This change couples the fields expect by UM7.3 with CICE5, and implements spreading of meltwater from icesheets. This coupling is limited to 0-layer thermodynamics in CICE5, due to how the fluxes from the UM are calculated and the assumptions made to calculate the fluxes. Test results are shown in https://github.com/ACCESS-NRI/access-esm1.6-configs/issues/80 --------- Co-authored-by: Dave Bi Co-authored-by: Spencer Wong <88933912+blimlim@users.noreply.github.com> --- drivers/access/CICE_InitMod.F90 | 70 ++- drivers/access/CICE_RunMod.F90 | 26 +- drivers/access/cpl_arrays_setup.F90 | 41 +- drivers/access/cpl_forcing_handler.F90 | 731 +++++++------------------ drivers/access/cpl_interface.F90 | 243 ++------ drivers/access/cpl_parameters.F90 | 47 +- io_netcdf/ice_history_write.F90 | 3 +- io_netcdf/ice_restart.F90 | 2 +- source/ice_calendar.F90 | 8 +- source/ice_init.F90 | 14 +- source/ice_restart_driver.F90 | 7 +- source/ice_therm_vertical.F90 | 3 +- 12 files changed, 374 insertions(+), 821 deletions(-) diff --git a/drivers/access/CICE_InitMod.F90 b/drivers/access/CICE_InitMod.F90 index ea7fc1c3..afd2df08 100644 --- a/drivers/access/CICE_InitMod.F90 +++ b/drivers/access/CICE_InitMod.F90 @@ -102,6 +102,7 @@ subroutine cice_init #ifdef AusCOM integer(kind=int_kind) :: idate_save + character (char_len_long) :: filename #endif call init_communicate ! initial setup for message passing @@ -263,29 +264,32 @@ subroutine cice_init ! for continue runs, need restart o2i forcing fields and time-averaged ice ! variables ('mice')saved at the end of last run from ice models; ! for initial run, pre-processed o2i (and maybe mice) fields are required. - call get_restart_o2i(trim(restartdir)//'/o2i.nc') - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !if no lag for ice to atm coupling, then cice has to read restart file i2a.nc and - !put the data to atm. the call is not needed if there is lag for ice2atm coupling - !must call after get_restart_o2i(), by which the ocn_sst ect are read in and re-used by put_restart_i2a() -! call put_restart_i2a('i2a.nc', 0) -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - if ( file_exist(trim(restartdir)//'/mice.nc') ) then - !for continue runs, mice data MUST be available. - call get_restart_mice(trim(restartdir)//'/mice.nc') - else - if (my_task == master_task) then - write(6,*)'* WARNING: No initial mice.nc data available here! *' - write(6,*)'* WARNING: ALL mice variables will be set to ZERO! *' - write(6,*)'* WARNING: This is allowed for the init run ONLY ! *' + if ( trim(runtype) == 'continue' ) then + write(il_out,*)' calling get_restart_o2i at time_sec = ',0 + filename = trim(restartdir)//'/o2i.nc' + if ( file_exist(filename) ) then + call get_restart_o2i(filename) + else + call abort_ice('file NOT found: '//filename //& + " This is allowed for runtype='initial' ONLY") + endif + !if no lag for ice to atm coupling, then cice has to read restart file i2a.nc and + !put the data to atm. the call is not needed if there is lag for ice2atm coupling + !must call after get_restart_o2i(), by which the ocn_sst ect are read in and re-used by put_restart_i2a() + ! call put_restart_i2a('i2a.nc', 0) + filename = trim(restartdir)//'/mice.nc' + if ( file_exist(filename) ) then + !for continue runs, mice data MUST be available. + call get_restart_mice(filename) + else + call abort_ice("file NOT found: "//filename//& + " This is allowed for runtype='initial' ONLY") endif endif -if (use_core_runoff) then - call get_core_runoff(trim(inputdir)//'/core_runoff_regrid.nc',& - 'runoff',1) -endif + if (use_core_runoff) then + call get_core_runoff(trim(inputdir)//'/core_runoff_regrid.nc',& + 'runoff',1) + endif if (my_task == master_task) then write(il_out,*)' calling ave_ocn_fields_4_i2a time_sec = ',0 !time_sec @@ -299,20 +303,26 @@ subroutine cice_init #ifdef ACCESS !!! options for land ice discharged as iceberg melting around AA and Gnld ! 0: "even" distribution as for u-ar676; - ! 1: use AC2 data but GC3.1 iceberg climatological pattern, each month takes the - ! total discharge as that diagnosed in u-ar676 (yrs2-101); - ! 2: use GC3 iceberg climatological pattern, each month enhanced by ac2/gc3 annual ratio - ! of land ice discharge to make sure the annual total discharge is same as case 1; + !================== for ESM1.5/1.6, "0" option is NOT used ===============! + ! 1: use AC2 data but GC3.1 iceberg climatological pattern, each month takes + ! the total discharge as that diagnosed in u-ar676 (yrs2-101); + ! 2: use GC3 iceberg climatological pattern, each month enhanced by ac2/gc3 + ! annual ratio of land ice discharge to make sure the annual total + ! discharge is same as case 1; ! 3: as case 1 but use annual mean ! 4: as case 2 but use annual mean - !!! Note 3 and 4 are similar but NOT the same; 1-4 cases should have idential annual + !!! Note 3 and 4 are similar but NOT the same; 1-4 cases should have identical annual !!! discharge of land ice (as iceberg) into ocean. - if ( file_exist(trim(inputdir)//'/lice_discharge_masks_iceberg.nc') ) then - call get_lice_discharge_masks_or_iceberg(trim(inputdir)//'/lice_discharge_masks_iceberg.nc') + filename = trim(inputdir)//'/lice_discharge_iceberg.nc' + if ( file_exist(filename) ) then + call get_lice_discharge(filename) else - write(6,*)'* CICE stopped -- land ice discharge masks and iceberg datafile missing.*' - call abort_ice ('ice: land ice discharge masks and iceberg datafile missing!') + if (my_task == master_task) then + write(6,*)'* CICE stopped -- iceberg datafile missing.*' + endif + call abort_ice ('ice: land ice discharge iceberg datafile missing: '//& + filename //' *') endif #endif diff --git a/drivers/access/CICE_RunMod.F90 b/drivers/access/CICE_RunMod.F90 index 7d94217a..adf71938 100644 --- a/drivers/access/CICE_RunMod.F90 +++ b/drivers/access/CICE_RunMod.F90 @@ -93,15 +93,16 @@ subroutine CICE_Run !receive a2i fields rtimestamp_ai = time_sec - call ice_timer_start(timer_from_atm) ! atm/ice coupling + !call ice_timer_start(timer_from_atm) ! atm/ice coupling write(il_out,*)' calling from_atm at icpl_ai, time_sec = ', icpl_ai, time_sec !=========================== call from_atm(rtimestamp_ai) !=========================== - call ice_timer_stop(timer_from_atm) ! atm/ice coupling + !call ice_timer_stop(timer_from_atm) ! atm/ice coupling !"TTI" approach ice fluxes converted to GBM units - call atm_icefluxes_back2GBM + !call atm_icefluxes_back2GBM (CM2 requires) + do itap = 1, num_ice_ai ! cice time loop ! Note I <==> O coupling happens at each time step @@ -117,12 +118,12 @@ subroutine CICE_Run write(il_out,'(a,3i10)') & ' calling into_ocn at icpl_ai, itap, time_sec = ', icpl_ai, itap, time_sec - call ice_timer_start(timer_into_ocn) ! atm/ocn coupling + !call ice_timer_start(timer_into_ocn) ! atm/ocn coupling !=========================== !call check_iceberg_fields('chk_iceberg_i2o.nc') call into_ocn(stimestamp_io) !=========================== - call ice_timer_stop(timer_into_ocn) ! atm/ocn coupling + !call ice_timer_stop(timer_into_ocn) ! atm/ocn coupling !set boundary condition (forcing) call get_sbc_ice @@ -154,7 +155,7 @@ subroutine CICE_Run tmp_time = time_sec + dt if ( mod(tmp_time, dt_cpl_ai) == 0 ) then !this happens at itap = num_ice_ai - call ice_timer_start(timer_into_atm) + !call ice_timer_start(timer_into_atm) !i2a fields ready to be sent for next IA cpl int in atm. call get_i2a_fields @@ -169,7 +170,7 @@ subroutine CICE_Run !set time averaged ice and ocn variables back to 0 call initialize_mocn_fields_4_i2a call initialize_mice_fields_4_i2a - call ice_timer_stop(timer_into_atm) ! atm/ocn coupling + !call ice_timer_stop(timer_into_atm) ! atm/ocn coupling endif istep = istep + 1 ! update time step counters @@ -189,24 +190,17 @@ subroutine CICE_Run !get o2i fields for next time step ice update write(il_out,'(a,3i10)') & ' calling from_ocn at icpl_ai, itap, time_sec = ',icpl_ai, itap, time_sec - call ice_timer_start(timer_from_ocn) + !call ice_timer_start(timer_from_ocn) !=========================== call from_ocn(rtimestamp_io) !=========================== - call ice_timer_stop(timer_from_ocn) + !call ice_timer_stop(timer_from_ocn) !accumulate/average ocn fields needed for IA coupling call time_average_ocn_fields_4_i2a end if end do !itap - !reset land ice amount lice_nth and lice_sth for "previous" a2i step: - - !debug: check landice fields---- - !call check_landice_fields_1('chk_lice_fields_ai.nc') - - lice_nth = um_icenth - lice_sth = um_icesth newstep_ai = .true. END DO !icpl_ai diff --git a/drivers/access/cpl_arrays_setup.F90 b/drivers/access/cpl_arrays_setup.F90 index 1294ecb3..b7727e9f 100644 --- a/drivers/access/cpl_arrays_setup.F90 +++ b/drivers/access/cpl_arrays_setup.F90 @@ -27,11 +27,6 @@ module cpl_arrays_setup ! (24) surface pressure um_press ! (25) co2 um_co2 ! (26) wind speed um_wnd -! --------- add new fields for ACCESS2 -------- -! (27) north (greenland) ice amount um_icenth -! (28) south (antarctic) ice amount um_icesth -! (29 - 33) ice surface/skin temperature um_tsfice(,,1:5) -! (34 - 38) ice surface evaporation (sublimation) um_iceevp(,,1:5) ! ! B> ocn (MOM4) ==> ice (CICE) [* at T or U cell center *] ! @@ -55,13 +50,6 @@ module cpl_arrays_setup ! (18) ice/ocn velocity 'meridional' ia_vvel ! (19) co2 ia_co2 ! (20) co2 flux ia_co2fx -! --------- add new fields for ACCESS2 -------- -! (21) ocean surface freezing temperature ia_sstfz -! (22 - 26 ) first order ice concentration ia_foifr(,,1:5) -! (27 - 31 ) ice top layer temperature ia_itopt(,,1:5) -! (32 - 36 ) ice top layer effective conductivity ia_itopk(,,1:5) -! (37 - 41 ) ice melt pond concentration ia_pndfn(,,1:5) -! (42 - 46 ) ice melt pond thickness ia_pndtn(,,1:5) ! ! D> ice (CICE) ==> ocn (MOM4) [* at T or U cell center *] ! @@ -99,7 +87,7 @@ module cpl_arrays_setup ! ! Therefore, currently we have ! -! *for ACCESS1.x, 31 in, 33 out => thus jpfldout=33, jpfldin=31 in cpl_parameters. +! *for ACCESS-ESM1.x, (26 + 9) in, (20 + 19) out => thus jpfldout=39, jpfldin=35 in cpl_parameters. ! for ACCESS-CM2, 47 in, 63 out => thus jpfldout=63, jpfldin=47 in cpl_parameters. ! now (20171024) 47 in, 65 out 65 47 !---------------------------------------------------------------------------------- @@ -116,13 +104,10 @@ module cpl_arrays_setup real(kind=dbl_kind), dimension(:,:,:), allocatable :: & !from atm (UM) um_thflx, um_pswflx, um_runoff, um_wme, um_snow, um_rain, & um_evap, um_lhflx, um_taux, um_tauy, & - um_swflx, um_lwflx, um_shflx, um_press,um_co2, um_wnd, & - um_icenth, um_icesth, & - !!20171024 added for calculation of land ice increment - lice_nth, lice_sth, msk_nth, msk_sth, amsk_nth, amsk_sth + um_swflx, um_lwflx, um_shflx, um_press,um_co2, um_wnd real(kind=dbl_kind), dimension(:,:,:,:), allocatable :: & - um_tmlt, um_bmlt, um_tsfice, um_iceevp + um_tmlt, um_bmlt ! CORE runoff remapped onto the AusCOM grid (optional) real(kind=dbl_kind), dimension(:,:,:), allocatable :: & @@ -140,10 +125,9 @@ module cpl_arrays_setup ! Fields out: !============ real(kind=dbl_kind),dimension(:,:,:), allocatable :: & !to atm (timeaveraged) - ia_sst, ia_uvel, ia_vvel, ia_co2, ia_co2fx, ia_sstfz + ia_sst, ia_uvel, ia_vvel, ia_co2, ia_co2fx !!!, ia_sstfz real(kind=dbl_kind), dimension(:,:,:,:), allocatable :: & - ia_aicen, ia_snown, ia_thikn, & - ia_foifr, ia_itopt, ia_itopk, ia_pndfn, ia_pndtn + ia_aicen, ia_snown, ia_thikn real(kind=dbl_kind),dimension(:,:,:), allocatable :: & !to ocn (time averaged) io_strsu, io_strsv, io_rain, io_snow, io_stflx, io_htflx, io_swflx, & @@ -158,8 +142,7 @@ module cpl_arrays_setup real(kind=dbl_kind),dimension(:,:,:), allocatable :: & maiu, muvel, mvvel real(kind=dbl_kind), dimension(:,:,:,:), allocatable :: & - maicen, msnown, mthikn, & - mfoifr, mitopt, mitopk, mpndfn, mpndtn + maicen, msnown, mthikn real(kind=dbl_kind), dimension(:,:,:,:), allocatable :: & !BX: just in case...... maicen_saved @@ -172,7 +155,7 @@ module cpl_arrays_setup ! 3. ocn fields averaged over IA cpl interval: real(kind=dbl_kind),dimension(:,:,:), allocatable :: & - msst, mssu, mssv, mco2, mco2fx, msstfz + msst, mssu, mssv, mco2, mco2fx ! other stuff @@ -180,8 +163,14 @@ module cpl_arrays_setup real(kind=dbl_kind),dimension(:,:,:), allocatable :: & sicemass !ice mass -real(kind=dbl_kind),dimension(:,:,:,:), allocatable :: & - icebergfw !land ice discharge into ocean as monthly iceberg melt waterflux ==>io_licefw +real(kind=dbl_kind),dimension(:,:,:), allocatable :: & + gicebergfw !monthly iceberg flux on global domain +real(kind=dbl_kind),dimension(:,:), allocatable :: & + gtarea, & !tarea on global domain + grunoff !runoff on global domain + +real(kind=dbl_kind),dimension(:), allocatable :: & + ticeberg_s, ticeberg_n !monthly land ice off Anrarctica and Greenland (NH) !=========================================================================== end module cpl_arrays_setup diff --git a/drivers/access/cpl_forcing_handler.F90 b/drivers/access/cpl_forcing_handler.F90 index bc1da8a7..5b649e5a 100644 --- a/drivers/access/cpl_forcing_handler.F90 +++ b/drivers/access/cpl_forcing_handler.F90 @@ -10,15 +10,18 @@ MODULE cpl_forcing_handler use ice_flux !forcing data definition (Tair, Qa, uocn, etc.) !Tn_top, keffn_top ...(for multilayer configuration) !use ice_state, only : aice, aicen, trcr, trcrn, nt_hpnd !ice concentration and tracers -use ice_state, only : aice, aicen, trcr, trcrn, nt_hpnd, nt_Tsfc !ice concentration and tracers +use ice_state, only : aice, aicen, trcr !!!, trcrn, nt_hpnd, nt_Tsfc !ice concentration and tracers use ice_state, only: uvel, vvel, vsnon, vicen use ice_gather_scatter +use ice_broadcast use ice_constants use ice_grid, only : tmask, to_ugrid use ice_communicate, only : my_task, master_task !use ice_ocean, only : cprho use ice_exit, only : abort_ice use ice_shortwave, only : apeffn +use ice_grid, only: tarea +use ice_calendar, only: month use cpl_parameters use cpl_netcdf_setup @@ -147,19 +150,6 @@ subroutine read_access_a2i_data(fname,nrec,istep) call ice_read_nc(ncid, nrec, 'press_i', um_press, dbug) call ice_read_nc(ncid, nrec, 'co2_ai', um_co2, dbug) call ice_read_nc(ncid, nrec, 'wnd_ai', um_wnd, dbug) -! - call ice_read_nc(ncid, nrec, 'icenth_i', um_icenth, dbug) - call ice_read_nc(ncid, nrec, 'icesth_i', um_icesth, dbug) - call ice_read_nc(ncid, nrec, 'tsfice01', um_tsfice(:,:,1,:), dbug) - call ice_read_nc(ncid, nrec, 'tsfice02', um_tsfice(:,:,2,:), dbug) - call ice_read_nc(ncid, nrec, 'tsfice03', um_tsfice(:,:,3,:), dbug) - call ice_read_nc(ncid, nrec, 'tsfice04', um_tsfice(:,:,4,:), dbug) - call ice_read_nc(ncid, nrec, 'tsfice05', um_tsfice(:,:,5,:), dbug) - call ice_read_nc(ncid, nrec, 'iceevp01', um_iceevp(:,:,1,:), dbug) - call ice_read_nc(ncid, nrec, 'iceevp02', um_iceevp(:,:,2,:), dbug) - call ice_read_nc(ncid, nrec, 'iceevp03', um_iceevp(:,:,3,:), dbug) - call ice_read_nc(ncid, nrec, 'iceevp04', um_iceevp(:,:,4,:), dbug) - call ice_read_nc(ncid, nrec, 'iceevp05', um_iceevp(:,:,5,:), dbug) if (my_task == master_task) call ice_close_nc(ncid) else @@ -173,19 +163,6 @@ subroutine read_access_a2i_data(fname,nrec,istep) end subroutine read_access_a2i_data -!================================================= -subroutine atm_icefluxes_back2GBM -!convert the a2i fluxes into GBM units for those that are scaled up in UM -!by "/maicen" before being sent to cice [needed for GSI8 TTI approach]. - -implicit none - -um_tmlt(:,:,:,:) = um_tmlt(:,:,:,:) * maicen_saved(:,:,:,:) -um_bmlt(:,:,:,:) = um_bmlt(:,:,:,:) * maicen_saved(:,:,:,:) -um_iceevp(:,:,:,:) = um_iceevp(:,:,:,:) * maicen_saved(:,:,:,:) - -end subroutine atm_icefluxes_back2GBM - !================================================= subroutine read_restart_i2a(fname, sec) !'i2a.nc', 0) @@ -224,32 +201,6 @@ subroutine read_restart_i2a(fname, sec) !'i2a.nc', 0) call ice_read_nc(ncid, 1, 'vvel_ia', ia_vvel, dbug) call ice_read_nc(ncid, 1, 'co2_i2', ia_co2, dbug) call ice_read_nc(ncid, 1, 'co2fx_i2', ia_co2fx, dbug) - call ice_read_nc(ncid, 1, 'sstfz_ia', ia_sstfz, dbug) - call ice_read_nc(ncid, 1, 'foifr01', ia_foifr(:,:,1,:), dbug) - call ice_read_nc(ncid, 1, 'foifr02', ia_foifr(:,:,2,:), dbug) - call ice_read_nc(ncid, 1, 'foifr03', ia_foifr(:,:,3,:), dbug) - call ice_read_nc(ncid, 1, 'foifr04', ia_foifr(:,:,4,:), dbug) - call ice_read_nc(ncid, 1, 'foifr05', ia_foifr(:,:,5,:), dbug) - call ice_read_nc(ncid, 1, 'itopt01', ia_itopt(:,:,1,:), dbug) - call ice_read_nc(ncid, 1, 'itopt02', ia_itopt(:,:,2,:), dbug) - call ice_read_nc(ncid, 1, 'itopt03', ia_itopt(:,:,3,:), dbug) - call ice_read_nc(ncid, 1, 'itopt04', ia_itopt(:,:,4,:), dbug) - call ice_read_nc(ncid, 1, 'itopt05', ia_itopt(:,:,5,:), dbug) - call ice_read_nc(ncid, 1, 'itopk01', ia_itopk(:,:,1,:), dbug) - call ice_read_nc(ncid, 1, 'itopk02', ia_itopk(:,:,2,:), dbug) - call ice_read_nc(ncid, 1, 'itopk03', ia_itopk(:,:,3,:), dbug) - call ice_read_nc(ncid, 1, 'itopk04', ia_itopk(:,:,4,:), dbug) - call ice_read_nc(ncid, 1, 'itopk05', ia_itopk(:,:,5,:), dbug) - call ice_read_nc(ncid, 1, 'pndfn01', ia_pndfn(:,:,1,:), dbug) - call ice_read_nc(ncid, 1, 'pndfn02', ia_pndfn(:,:,2,:), dbug) - call ice_read_nc(ncid, 1, 'pndfn03', ia_pndfn(:,:,3,:), dbug) - call ice_read_nc(ncid, 1, 'pndfn04', ia_pndfn(:,:,4,:), dbug) - call ice_read_nc(ncid, 1, 'pndfn05', ia_pndfn(:,:,5,:), dbug) - call ice_read_nc(ncid, 1, 'pndtn01', ia_pndtn(:,:,1,:), dbug) - call ice_read_nc(ncid, 1, 'pndtn02', ia_pndtn(:,:,2,:), dbug) - call ice_read_nc(ncid, 1, 'pndtn03', ia_pndtn(:,:,3,:), dbug) - call ice_read_nc(ncid, 1, 'pndtn04', ia_pndtn(:,:,4,:), dbug) - call ice_read_nc(ncid, 1, 'pndtn05', ia_pndtn(:,:,5,:), dbug) if (my_task == master_task) then call ice_close_nc(ncid) @@ -306,31 +257,6 @@ subroutine read_restart_i2asum(fname, sec) !'i2a.nc', 0) call ice_read_nc(ncid, 1, 'maiu', maiu, dbug) ! !call ice_read_nc(ncid, 1, 'maice_ia', maice_ia, dbug) - call ice_read_nc(ncid, 1, 'mfoifr01', mfoifr(:,:,1,:), dbug) - call ice_read_nc(ncid, 1, 'mfoifr02', mfoifr(:,:,2,:), dbug) - call ice_read_nc(ncid, 1, 'mfoifr03', mfoifr(:,:,3,:), dbug) - call ice_read_nc(ncid, 1, 'mfoifr04', mfoifr(:,:,4,:), dbug) - call ice_read_nc(ncid, 1, 'mfoifr05', mfoifr(:,:,5,:), dbug) - call ice_read_nc(ncid, 1, 'mitopt01', mitopt(:,:,1,:), dbug) - call ice_read_nc(ncid, 1, 'mitopt02', mitopt(:,:,2,:), dbug) - call ice_read_nc(ncid, 1, 'mitopt03', mitopt(:,:,3,:), dbug) - call ice_read_nc(ncid, 1, 'mitopt04', mitopt(:,:,4,:), dbug) - call ice_read_nc(ncid, 1, 'mitopt05', mitopt(:,:,5,:), dbug) - call ice_read_nc(ncid, 1, 'mitopk01', mitopk(:,:,1,:), dbug) - call ice_read_nc(ncid, 1, 'mitopk02', mitopk(:,:,2,:), dbug) - call ice_read_nc(ncid, 1, 'mitopk03', mitopk(:,:,3,:), dbug) - call ice_read_nc(ncid, 1, 'mitopk04', mitopk(:,:,4,:), dbug) - call ice_read_nc(ncid, 1, 'mitopk05', mitopk(:,:,5,:), dbug) - call ice_read_nc(ncid, 1, 'mpndfn01', mpndfn(:,:,1,:), dbug) - call ice_read_nc(ncid, 1, 'mpndfn02', mpndfn(:,:,2,:), dbug) - call ice_read_nc(ncid, 1, 'mpndfn03', mpndfn(:,:,3,:), dbug) - call ice_read_nc(ncid, 1, 'mpndfn04', mpndfn(:,:,4,:), dbug) - call ice_read_nc(ncid, 1, 'mpndfn05', mpndfn(:,:,5,:), dbug) - call ice_read_nc(ncid, 1, 'mpndtn01', mpndtn(:,:,1,:), dbug) - call ice_read_nc(ncid, 1, 'mpndtn02', mpndtn(:,:,2,:), dbug) - call ice_read_nc(ncid, 1, 'mpndtn03', mpndtn(:,:,3,:), dbug) - call ice_read_nc(ncid, 1, 'mpndtn04', mpndtn(:,:,4,:), dbug) - call ice_read_nc(ncid, 1, 'mpndtn05', mpndtn(:,:,5,:), dbug) if (my_task == master_task) then call ice_close_nc(ncid) @@ -433,16 +359,15 @@ subroutine get_restart_mice(fname) dbug = .true. if ( file_exist(fname) ) then if (my_task==0) then - write(il_out,*) '(get_restart_mice) reading in mice variables......' + write(il_out,*) '(get_restart_mice) opening file: ', fname endif + call ice_open_nc(fname, ncid_o2i) -!B: 20170825 ==> need maicen_saved variables call ice_read_nc(ncid_o2i, 1, 'maicen1', maicen_saved(:,:,1,:), dbug) call ice_read_nc(ncid_o2i, 1, 'maicen2', maicen_saved(:,:,2,:), dbug) call ice_read_nc(ncid_o2i, 1, 'maicen3', maicen_saved(:,:,3,:), dbug) call ice_read_nc(ncid_o2i, 1, 'maicen4', maicen_saved(:,:,4,:), dbug) call ice_read_nc(ncid_o2i, 1, 'maicen5', maicen_saved(:,:,5,:), dbug) -!b. call ice_read_nc(ncid_o2i, 1, 'maice', maice, dbug) call ice_read_nc(ncid_o2i, 1, 'mstrocnxT', mstrocnxT, dbug) call ice_read_nc(ncid_o2i, 1, 'mstrocnyT', mstrocnyT, dbug) @@ -451,9 +376,7 @@ subroutine get_restart_mice(fname) call ice_read_nc(ncid_o2i, 1, 'mfhocn', mfhocn, dbug) call ice_read_nc(ncid_o2i, 1, 'mfswthru', mfswthru, dbug) call ice_read_nc(ncid_o2i, 1, 'msicemass', msicemass, dbug) -!20171024: added 2 more: - call ice_read_nc(ncid_o2i, 1, 'lice_sth', lice_sth, dbug) - call ice_read_nc(ncid_o2i, 1, 'lice_nth', lice_nth, dbug) + write(il_out,*) '(get_restart_mice) ALL variables read in! ' if (my_task == master_task) then call ice_close_nc(ncid_o2i) @@ -470,8 +393,9 @@ subroutine get_restart_mice(fname) return end subroutine get_restart_mice + !================================================= -subroutine get_lice_discharge_masks_or_iceberg(fname) +subroutine get_lice_discharge(fname) ! Called at beginning of each run trunk to read in land ice discharge mask or iceberg ! (off Antarctica and Greenland). @@ -480,125 +404,61 @@ subroutine get_lice_discharge_masks_or_iceberg(fname) character(len=*), intent(in) :: fname character*80 :: myvar = 'ficeberg' -integer(kind=int_kind) :: ncid_i2o, im, k -logical :: dbug -!!! -!character(:), allocatable :: fname_trim -!!! - -dbug = .true. - -!!! -!fname_trim = trim(fname) -!!! -if (my_task == 0) write(*,'(a,a)'),'BBB1: opening file ',fname -if (my_task == 0) write(*,'(a,a)'),'BBB2: opening file ',trim(fname) +integer(kind=int_kind) :: ncid_i2o, im, k, i, j +logical :: dbug = .true. call ice_open_nc(trim(fname), ncid_i2o) -!call ice_open_nc(fname_trim, ncid_i2o) -!deallocate(fname_trim) -if (iceberg == 0) then - if (my_task==0) then - write(il_out,*) '(get_lice_discharge_masks_or_iceberg) reading in lice_mask and total areas......' - endif - call ice_read_nc(ncid_i2o, 1, 'msk_nth', msk_nth, dbug) - call ice_read_nc(ncid_i2o, 1, 'msk_sth', msk_sth, dbug) - call ice_read_nc(ncid_i2o, 1, 'amsk_nth', amsk_nth, dbug) - call ice_read_nc(ncid_i2o, 1, 'amsk_sth', amsk_sth, dbug) +write(il_out,*) '(get_lice_discharge) opened datafile: ', trim(fname) +write(il_out,*) '(get_lice_discharge) ncid_i2o= ', ncid_i2o + +if (iceberg .lt. 1 .or. iceberg .gt. 4) then + write(il_out,*) '(get_lice_discharge) in ESM only supports iceberg = 1,2,3,4) ' + call abort_ice('CICE stopped: ESM only supports iceberg = 1,2,3,4. Please set it to 1,2,3,4') else + call gather_global(gtarea, tarea, master_task, distrb_info) select case (iceberg) case (1); myvar = 'FICEBERG_AC2' case (2); myvar = 'FICEBERG_GC3' case (3); myvar = 'FICEBERG_AC2_AVE' case (4); myvar = 'FICEBERG_GC3_AVE' end select - if (my_task==0) then - write(il_out,'(a,a)') '(get_lice_discharge_masks_or_iceberg) reading in iceberg data, myvar= ',trim(myvar) - endif + write(il_out,*)'(get_lice_discharge), iceberg = ', iceberg + write(il_out,'(a,a)') '(get_lice_discharge) reading in iceberg data, myvar= ',trim(myvar) do im = 1, 12 - write(il_out,*) '(get_lice_discharge_masks_or_iceberg) reading in data, month= ',im - !call ice_read_nc(ncid_i2o, im, trim(myvar), icebergfw(:,:,im,:), dbug) - call ice_read_nc(ncid_i2o, im, trim(myvar), vwork, dbug) - icebergfw(:,:,im,:) = vwork(:,:,:) * iceberg_factor - !iceberg_factor is 1.0 as default, but can be bigger/smaller for other runs - !(e.g. in CABLE runs, iceberffw needs to be enhanced for water balance.) - enddo + write(il_out,*) '(get_lice_discharge) reading in data, month= ',im + call ice_read_global_nc(ncid_i2o, im, trim(myvar), gwork, dbug) -!call check_iceberg_reading('chk_iceberg_readin.nc') -!above call results segmentation fault !?! + if ( my_task == master_task ) then + gicebergfw(:,:,im) = gwork(:,:) -endif -if (my_task == master_task) then - call ice_close_nc(ncid_i2o) - write(il_out,*) '(get_lice_discharge_masks_or_iceberg) reading completed!' -endif - -return -end subroutine get_lice_discharge_masks_or_iceberg - -!================================================= -subroutine get_iceberg_distribution(fname) !, mychoice) - -! This routine is called at beginning of each job. - -implicit none - -character*(*), intent(in) :: fname -!integer(kind=int_kind), intent(in) :: mychoice !iceberg distribution option (1,2,3,4) -logical :: dbug -integer(kind=int_kind) :: ncid, im - -dbug = .true. -!dbug = .false. + ticeberg_s(im) = 0.0 + do j = 1, iceberg_je_s !1, ny_global/2 (iceberg_je_s smaller than ny_global/2 thus saves time) + do i = 1, nx_global + ticeberg_s(im) = ticeberg_s(im) + gtarea(i,j) * gwork(i,j) + enddo + enddo + ticeberg_n(im) = 0.0 + do j = iceberg_js_n, ny_global !ny_global/2 + 1, ny_global !(iceberg_js_n bigger than ny_global/2 +1) + do i = 1, nx_global + ticeberg_n(im) = ticeberg_n(im) + gtarea(i,j) * gwork(i,j) + enddo + enddo -IF (file_exist(fname)) THEN + write(il_out, *) '(get_lice_discharge) check: im, ticeberg_s, ticeberg_n = ',im, ticeberg_s(im), ticeberg_n(im) + endif -if (my_task==0) then - write(*,*) '(get_iceberg_distribution) opening ncfile: ',fname - write(il_out,*) '(get_iceberg_distribution) opening ncfile: ',fname -endif + enddo -call ice_open_nc(trim(fname), ncid) -if (my_task==0) then - write(*,*) '(get_iceberg_distribution) reading in iceberg data, option: ',iceberg !mychoice - write(il_out,'(a,a)') '(get_iceberg_distribution) reading in iceberg data, option: ',iceberg !mychoice endif -!!if (mychoice == 1) then -if (iceberg == 1) then - do im = 1, 12 - call ice_read_nc(ncid, im, 'FICEBERG_AC2', icebergfw(:,:,im,:), dbug) - enddo -!!else if (mychoice == 2) then -else if (iceberg == 2) then - do im = 1, 12 - call ice_read_nc(ncid, im, 'FICEBERG_GC3', icebergfw(:,:,im,:), dbug) - enddo -!!else if (mychoice == 3) then -else if (iceberg == 3) then - do im = 1, 12 - !set monthly to be annual mean: - call ice_read_nc(ncid, 1, 'FICEBERG_AC2_AVE', icebergfw(:,:,im,:), dbug) - enddo -else if (iceberg == 4) then - do im = 1, 12 - !set monthly to be annual mean: - call ice_read_nc(ncid, 1, 'FICEBERG_GC3_AVE', icebergfw(:,:,im,:), dbug) - enddo +if (my_task == master_task) then + call ice_close_nc(ncid_i2o) endif -if (my_task == master_task) call ice_close_nc(ncid) - -ELSE - -write(6,'(a,a)')'CICE stopped -- iceberg data missing ----> ', fname -write(il_out,'(a,a)')'CICE stopped -- iceberg data missing ----> ', fname -call abort_ice ('ice: iceberg data missing!') +return -ENDIF +end subroutine get_lice_discharge -return -end subroutine get_iceberg_distribution !================================================= subroutine get_restart_i2o(fname) @@ -645,7 +505,7 @@ subroutine get_restart_i2o(fname) enddo if (my_task == master_task) then call ice_close_nc(ncid_i2o) - write(il_out,*) '(get_time0_i2o_fields) has read in 11 i2o fields.' + write(il_out,*) '(get_time0_i2o_fields) has read in 19 i2o fields.' endif else if (my_task==0) then @@ -659,7 +519,10 @@ subroutine get_restart_i2o(fname) end subroutine get_restart_i2o !================================================= -subroutine set_sbc_ice !!NOTE: This routine is NOT used!! +subroutine set_sbc_ice +!------------------------- +!This routine is NOT used! +!------------------------- ! ! Set coupling fields (in units of GMB, from UM and MOM4) needed for CICE ! @@ -696,9 +559,9 @@ subroutine set_sbc_ice !!NOTE: This routine is NOT used!! flatn_f(i,j,1,k) = um_lhflx(i,j,k) else do cat = 1, ncat - !!!B: flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * maicen(i,j,cat,k)/maice(i,j,k) + flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * maicen(i,j,cat,k)/maice(i,j,k) !???: flatn_f(i,j,cat,k) = um_iceevp(i,j,cat,k) * Lsub - flatn_f(i,j,cat,k) = - um_iceevp(i,j,cat,k) * Lsub + !flatn_f(i,j,cat,k) = - um_iceevp(i,j,cat,k) * Lsub enddo endif enddo @@ -752,7 +615,7 @@ subroutine set_sbc_ice !!NOTE: This routine is NOT used!! !(7) surface slope ssly ss_tlty = ocn_ssly -!(as per S.O.) make sure Tf if properly initialized +!(as per S.O.) make sure Tf is properly initialized Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) ! !B: May use different formula for Tf such as TEOS-10 formulation: @@ -787,33 +650,35 @@ subroutine get_sbc_ice ! Fields from UM (all on T cell center): !(1) windstress taux: -strax = um_taux * aice !*tmask ? +strax = um_taux * aice !(2) windstress tauy: -stray = um_tauy * aice !*tmask ? +stray = um_tauy * aice !(3) surface downward latent heat flux (==> multi_category) -!BX: where is flatn_f "used" in CICE? do j = 1, ny_block do i = 1, nx_block do k = 1, nblocks - !BX 20160826: as in NEMO sbccpl.F90, there is no "open water field" um_lhflx involved: - ! qla_ice(:,:,1:jpl) = - frcv(jpr_ievp)%z3(:,:,1:jpl) * lsub - !------------------------------------------------------------------------------------- - !if (aice(i,j,k)==0.0) then - ! do cat = 1, ncat - ! flatn_f(i,j,cat,k) = 0.0 - ! enddo - ! ! This will then be conserved in CICE (done in sfcflux_to_ocn) - ! flatn_f(i,j,1,k) = um_lhflx(i,j,k) - !else + ! Notes from ACCESS-CM2: + !BX 20160826: as in NEMO sbccpl.F90, there is no "open water field" um_lhflx involved: + ! qla_ice(:,:,1:jpl) = - frcv(jpr_ievp)%z3(:,:,1:jpl) * lsub + !------------------------------------------------------------------------------------- + ! CM2 Uses this, as um_lhflx is not available: + ! do cat = 1, ncat + ! flatn_f(i,j,cat,k) = - um_iceevp(i,j,cat,k) * Lsub + ! enddo + + if (aice(i,j,k)==0.0) then + do cat = 1, ncat + flatn_f(i,j,cat,k) = 0.0 + enddo + ! This will then be conserved in CICE (done in sfcflux_to_ocn) + flatn_f(i,j,1,k) = um_lhflx(i,j,k) + else do cat = 1, ncat - !!!BX: flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * aicen(i,j,cat,k)/aice(i,j,k) - !!! Double check "Lsub" used here !!! - !?! flatn_f(i,j,cat,k) = um_iceevp(i,j,cat,k) * Lsub - flatn_f(i,j,cat,k) = - um_iceevp(i,j,cat,k) * Lsub + flatn_f(i,j,cat,k) = um_lhflx(i,j,k) * aicen(i,j,cat,k)/aice(i,j,k) enddo - !endif + endif enddo enddo enddo @@ -825,39 +690,8 @@ subroutine get_sbc_ice fsurfn_f (:,:,cat,:) = um_tmlt(:,:,cat,:) + um_bmlt(:,:,cat,:) enddo -!!! 20130419: Martin Dix's investigation suggests that frain and fsnow should NOT be scaled by -!!! aice here. This scaling would caused double-scaling with "fresh" calculation.. -!(14) snowfall -!!!fsnow = max(aice * um_snow,0.0) -!fsnow = max(um_snow,0.0) !no more scaling as per M.D.! -!(15) rainfall -!!!frain = max(aice * um_rain,0.0) -!frain = max(um_rain,0.0) !no more scaling as per M.D.! -!!! 20130420: I dug deeper and checked all the associated steps of "fresh" calculation, found -!!! the original weighting is CORRECT! so back to *aice: fsnow = max(aice * um_snow,0.0) frain = max(aice * um_rain,0.0) -! -!ice surface skin temperature (from UM)------------------------------------- -!see: tsfc_ice definition in sbccpl.F90 at -!/short/p66/hxy599/fcm_make_ocean_GC3/extract/nemo/NEMOGCM/NEMO/OPA_SRC/SBC -!--------------------------------------------------------------------------- -do cat = 1, ncat - !!! trcrn(:,:,nt_Tsfc,cat,:) = um_tsfice(:,:,cat,:) - do k = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (um_tsfice(i,j,cat,k) > 0.0) then - trcrn(i,j,nt_Tsfc,cat,k) = 0.0 - else if (um_tsfice(i,j,cat,k) < -60.0) then - trcrn(i,j,nt_Tsfc,cat,k) = -60.0 - else - trcrn(i,j,nt_Tsfc,cat,k) = um_tsfice(i,j,cat,k) - endif - enddo - enddo - enddo -enddo ! Fields from MOM4 (SSU/V and sslx/y are on U points): @@ -890,7 +724,7 @@ subroutine get_sbc_ice ! * (as per S. O'Farrel) make sure Tf if properly initialized !----- should use eos formula to calculate Tf for "consistency" with GCx ----! -!Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) +Tf (:,:,:) = -depressT*sss(:,:,:) ! freezing temp (C) ! !B: May use different formula for Tf such as TEOS-10 formulation: ! @@ -983,32 +817,6 @@ subroutine save_restart_i2asum(fname, nstep) sumfld(19)='msnown3' sumfld(20)='msnown4' sumfld(21)='msnown5' -! -sumfld(22)='mfoifr1' -sumfld(23)='mfoifr2' -sumfld(24)='mfoifr3' -sumfld(25)='mfoifr4' -sumfld(26)='mfoifr5' -sumfld(27)='mitopt1' -sumfld(28)='mitopt2' -sumfld(29)='mitopt3' -sumfld(30)='mitopt4' -sumfld(31)='mitopt5' -sumfld(32)='mitopk1' -sumfld(33)='mitopk2' -sumfld(34)='mitopk3' -sumfld(35)='mitopk4' -sumfld(36)='mitopk5' -sumfld(37)='mpndfn1' -sumfld(38)='mpndfn2' -sumfld(39)='mpndfn3' -sumfld(40)='mpndfn4' -sumfld(41)='mpndfn5' -sumfld(42)='mpndtn1' -sumfld(43)='mpndtn2' -sumfld(44)='mpndtn3' -sumfld(45)='mpndtn4' -sumfld(46)='mpndtn5' if (my_task == 0) then call create_ncfile(fname, ncid, il_im, il_jm, ll=1, ilout=il_out) @@ -1037,32 +845,6 @@ subroutine save_restart_i2asum(fname, nstep) case('msnown3'); vwork = msnown(:,:,3,:) case('msnown4'); vwork = msnown(:,:,4,:) case('msnown5'); vwork = msnown(:,:,5,:) - case('mfoifr1'); vwork = mfoifr(:,:,1,:) - case('mfoifr2'); vwork = mfoifr(:,:,2,:) - case('mfoifr3'); vwork = mfoifr(:,:,3,:) - case('mfoifr4'); vwork = mfoifr(:,:,4,:) - case('mfoifr5'); vwork = mfoifr(:,:,5,:) - case('mitopt1'); vwork = mitopt(:,:,1,:) - case('mitopt2'); vwork = mitopt(:,:,2,:) - case('mitopt3'); vwork = mitopt(:,:,3,:) - case('mitopt4'); vwork = mitopt(:,:,4,:) - case('mitopt5'); vwork = mitopt(:,:,5,:) - case('mitopk1'); vwork = mitopk(:,:,1,:) - case('mitopk2'); vwork = mitopk(:,:,2,:) - case('mitopk3'); vwork = mitopk(:,:,3,:) - case('mitopk4'); vwork = mitopk(:,:,4,:) - case('mitopk5'); vwork = mitopk(:,:,5,:) - case('mpndfn1'); vwork = mpndfn(:,:,1,:) - case('mpndfn2'); vwork = mpndfn(:,:,2,:) - case('mpndfn3'); vwork = mpndfn(:,:,3,:) - case('mpndfn4'); vwork = mpndfn(:,:,4,:) - case('mpndfn5'); vwork = mpndfn(:,:,5,:) - case('mpndtn1'); vwork = mpndtn(:,:,1,:) - case('mpndtn2'); vwork = mpndtn(:,:,2,:) - case('mpndtn3'); vwork = mpndtn(:,:,3,:) - case('mpndtn4'); vwork = mpndtn(:,:,4,:) - case('mpndtn5'); vwork = mpndtn(:,:,5,:) - end select call gather_global(gwork, vwork, master_task, distrb_info) if (my_task == 0) then @@ -1094,6 +876,7 @@ subroutine save_restart_mice(fname, nstep) call write_nc_1Dtime(real(nstep), 1, 'time', ncid) endif +! maicen_saved appears to be the same as maicen in CICE5-UM7.3 !B: 20170825 ==> add maicen_saved for atm_icefluxes_back2GBM calculation! ! note maicen_saved is the last ia interval mean. vwork(:,:,:) = maicen_saved(:,:,1,:) @@ -1139,13 +922,6 @@ subroutine save_restart_mice(fname, nstep) vwork = msicemass call gather_global(gwork, vwork, master_task, distrb_info) if (my_task == 0) call write_nc2D(ncid, 'msicemass', gwork, 2, il_im, il_jm, 1, ilout=il_out) -!2 more added 20171024 for calculation of N/S land ice increase between a2i cpl interval: -vwork = um_icenth -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'lice_nth', gwork, 2, il_im, il_jm, 1, ilout=il_out) -vwork = um_icesth -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'lice_sth', gwork, 2, il_im, il_jm, 1, ilout=il_out) if (my_task == 0) call ncheck( nf_close(ncid) ) @@ -1161,7 +937,6 @@ subroutine get_i2a_fields ia_sst(:,:,:) = msst(:,:,:) !(2-3) ice/ocn combined surface velocity -!CH: should use "aiu", not aice! ia_uvel(:,:,:) = mssu(:,:,:) * (1. - maiu(:,:,:)) + muvel(:,:,:) * maiu(:,:,:) ia_vvel(:,:,:) = mssv(:,:,:) * (1. - maiu(:,:,:)) + mvvel(:,:,:) * maiu(:,:,:) @@ -1170,43 +945,16 @@ subroutine get_i2a_fields !BX: save it for use in atm_icefluxes_back2GBM --- maicen_saved = maicen -!XXX -- As per Alex West, only two of the ice vaiables below need to be scaled down -! by "* aice": ice top layer "temperature" and "effective conductivity"! - !(9-13) ice thickness ia_thikn(:,:,:,:) = mthikn(:,:,:,:) -!ia_thikn(:,:,:,:) = mthikn(:,:,:,:) * mfoifr(:,:,:,:) !X !(14-18) snow thickness ia_snown(:,:,:,:) = msnown(:,:,:,:) -!ia_snown(:,:,:,:) = msnown(:,:,:,:) * mfoifr(:,:,:,:) !X !(19-20) co2 flux stuff ia_co2 = mco2 ia_co2fx = mco2fx -!(21) ocean surface freezing temperature -ia_sstfz(:,:,:) = msstfz(:,:,:) + 273.15 - -!(22-26) first order ice concentration -ia_foifr(:,:,:,:) = mfoifr(:,:,:,:) - -!(27-31) ice top layer temperature -!XXX ia_itopt(:,:,:,:) = mitopt(:,:,:,:) + 273.15 -ia_itopt(:,:,:,:) = (mitopt(:,:,:,:) + 273.15) * mfoifr(:,:,:,:) !Y - -!(32-36) ice top layer effective conductivity -!XXX ia_itopk(:,:,:,:) = mitopk(:,:,:,:) -ia_itopk(:,:,:,:) = mitopk(:,:,:,:) * mfoifr(:,:,:,:) !Y - -!(37-41) ice melt pond concentration -ia_pndfn(:,:,:,:) = mpndfn(:,:,:,:) -!ia_pndfn(:,:,:,:) = mpndfn(:,:,:,:) * mfoifr(:,:,:,:) !X - -!(42-46) ice melt pond thickness -ia_pndtn(:,:,:,:) = mpndtn(:,:,:,:) -!ia_pndtn(:,:,:,:) = mpndtn(:,:,:,:) * mfoifr(:,:,:,:) !X - return end subroutine get_i2a_fields @@ -1220,6 +968,13 @@ subroutine get_i2o_fields real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: pice integer (kind=int_kind) :: i,j,k +real (kind=dbl_kind) :: & + trunoff_s = c0, & + trunoff_n = c0, & + r_s = c1, & + r_n = c1, & + r_runoff= c1 !=(1-min(r_max_iceberg. r_s(or r_n)) + ! Fields obtained here are all at T cell center. before being sent to MOM4, vector ! (Taux, Tauy) should be shifted on to U point as required !------------------------------------------------------------------------------- @@ -1229,17 +984,16 @@ subroutine get_i2o_fields ! have already been 'weighted' using aice (when calculated in "evp_finish". ! But, this weight has been removed in strocnx/yT (see "evp_finish"), therfore ! we need put it on again here. -io_strsu = um_taux * (1. - maice) - mstrocnxT * maice -io_strsv = um_tauy * (1. - maice) - mstrocnyT * maice +io_strsu = um_taux * (c1 - maice) - mstrocnxT * maice +io_strsv = um_tauy * (c1 - maice) - mstrocnyT * maice !(3) freshwater flux to ocean: rainfall (+ ice melting water flux ?) -io_rain = um_rain * (1. - maice) -!!CH: confirmed: -!!if (ice_fwflux) io_rain = io_rain + mfresh !always .t. -!!NOTE mfresh is now splitted into melt (14) and form (15) and passed into ocn seperately. +io_rain = um_rain * (c1 - maice) +!202412: fixing watermass loss from ocean by adding a small, constant fwflux into rain-- +io_rain = io_rain + add_lprec !(4) freshwater flux to ocean: snowfall -io_snow = um_snow * (1. - maice) +io_snow = um_snow * (c1 - maice) !(5) salt flux to ocean io_stflx = mfsalt @@ -1250,10 +1004,8 @@ subroutine get_i2o_fields !(7) short wave radiation !(CH: the (1-aice) weight should not be here 'cos all fluxes passed in from ! UM have already been aice-weighted when they are calculated there!!!) -!io_swflx = um_swflx * (1. - maice) + mfswthru +!io_swflx = um_swflx * (c1 - maice) + mfswthru io_swflx = um_swflx + mfswthru -!!!20100616: test for more swflx -!!!io_swflx = 1.064 * um_swflx + mfswthru !(8) latent heat flux (positive out of ocean as required by MOM4) io_qflux = um_evap * Lvap !Note it's already weighted in UM for open sea. @@ -1264,59 +1016,123 @@ subroutine get_i2o_fields !(10) net long wave radiation positive down io_lwflx = um_lwflx -!(11) runoff (!check the incoming field! pattern? remapping ok? conserved? ...) -io_runof = um_runoff -! CHECK with SM about the annual cycle of core-runoff! (we only have annual mean) +!(11) runoff +!*** mask off "extra/useless" runoff on dry points *** +where (tmask) + io_runof = um_runoff +else where + um_runoff = c0 +end where + +call gather_global(grunoff, io_runof, master_task, distrb_info) + +if (my_task == master_task) then + + trunoff_s = c0 + do j = 1, runoff_je_s + do i = 1, nx_global + trunoff_s = trunoff_s + gtarea(i,j) * grunoff(i,j) + grunoff(i,j) = grunoff(i,j) * (c1 - iceberg_rate_s) !do deduction + enddo + enddo + trunoff_n = c0 + do j = runoff_js_n, runoff_je_n + do i = runoff_is_n, runoff_ie_n + trunoff_n = trunoff_n + gtarea(i,j) * grunoff(i,j) + grunoff(i,j) = grunoff(i,j) * (c1 - iceberg_rate_n) !do deduction + enddo + enddo + !Now global runoff has been "updated" (deduction done for iceberg). +endif + +!distributed the resultant runoff and iceberg fluxes: +call scatter_global(vwork, grunoff, master_task, distrb_info, & + field_loc_center, field_type_scalar) +io_runof(:,:,:) = vwork(:,:,:) +!2 new flux items associated with the iceberg discharged into ocean +!(18) water flux due to land ice melt off Antarctica and Greenland (kg/m^2/s) +!(19) heat flux due to land ice melt off Antarctica and Greenland + +!XXXXXX +IF (my_task == master_task) THEN + + gwork(:,:) = c0 + do i = 1, nx_global + do j = 1, iceberg_je_s + gwork(i, j) = gicebergfw(i, j, month) * iceberg_rate_s * trunoff_s / ticeberg_s(month) + enddo + do j = iceberg_js_n, ny_global + gwork(i, j) = gicebergfw(i, j, month) * iceberg_rate_n * trunoff_n / ticeberg_n(month) + enddo + enddo + !Now global iceberg has been defined (using the deduction from runoff) + +ENDIF +!XXXXXX + +call scatter_global(vwork, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) +io_licefw(:,:,:) = vwork(:,:,:) !i2o field No 18. + +!Also count in the latent heat carried with the runoff part, as done below, thus allowing +!for (rough) consistency of energy exchange no matter what iceberg_rate_s/n are used. +!Warning: the follow approach would lose all the runoff LH, if runoff_lh=.false., in no-iceberg case + +if (my_task == master_task) then + if ( runoff_lh ) then + + do i = 1, nx_global + do j = 1, runoff_je_s + gwork(i,j) = gwork(i,j) + grunoff(i,j) + enddo + enddo + do i = runoff_is_n, runoff_ie_n + do j = runoff_js_n, runoff_je_n + gwork(i,j) = gwork(i,j) + grunoff(i,j) + enddo + enddo + else + !If runoff with latent heat flux crashes the model in no-iceberg case, due probably to too big LH(?) + !all the LH carried by runoff is applied to iceberg areas. + do i = 1, nx_global + do j = 1, iceberg_je_s + gwork(i,j) = gwork(i,j)/max(iceberg_rate_s, 0.0001_dbl_kind) !get the whole runoff LH onto iceberg + enddo + do j = iceberg_js_n, ny_global + gwork(i,j) = gwork(i,j)/max(iceberg_rate_n, 0.0001_dbl_kind) + enddo + enddo + endif +endif + +call scatter_global(vwork, gwork, master_task, distrb_info, & + field_loc_center, field_type_scalar) +io_liceht = - vwork * Lfresh * iceberg_lh !FW converted into LH flux (W/m^2). + !!i2o field No 19. !(12) pressure pice = gravit * msicemass !---------------------------------------------------------------------------- !sicemass = rho_ice x hi + rho_snow x hs (in m) -! -! Should we set limit to the ovelying ice pressure as suggested in MOM4 code? -!(see ocean_sbc.F90) if yes, we may use following -!pice(i,j) = min(pice(i,j), gravit*rhow*max_ice_thickness) -! (note rhow = 1026 kg/m^3 here, but mom4 instead uses rho0 = 1035 kg/m^3) -! No, let mom4 handle it (see ocean_sbc.F90) -! -!as GFDL SIS, we use patm 'anormaly' and then add in the ice/snow pressure! -!29/11/2007 -!---------------------------------------------------------------------------- if (ice_pressure_on) then io_press = pice * maice endif if (air_pressure_on) then !as GFDL SIS, we use patm anormaly, i.e., taking off 1.e5 Pa ! - io_press(:,:,:) = io_press(:,:,:) + um_press(:,:,:) - 1.0e5 + io_press(:,:,:) = io_press(:,:,:) + um_press(:,:,:) - 1.0e5_dbl_kind endif !(13) ice concentration io_aice = maice !(14) ice melt fwflux -io_melt = max(0.0,mfresh(:,:,:)) +io_melt = max(c0,mfresh(:,:,:)) !(15) ice form fwflux -io_form = min(0.0,mfresh(:,:,:)) +io_form = min(c0,mfresh(:,:,:)) !(16) CO2 io_co2 = um_co2 !(17) 10m winnspeed io_wnd = um_wnd -!XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -!2 more flux items induced by land ice "increment" discharged into ocean -!(18) water flux due to land ice melt off Antarctica and Greenland (kg/m^2/s) -!(19) heat flux due to land ice melt off Antarctica and Greenland -! -!Note there are 5 options for the land ice discharge as icebgerg melt waterflux! - -IF (iceberg == 0) THEN - io_licefw = (max(0.0, um_icenth - lice_nth) * msk_nth/amsk_nth + & - max(0.0, um_icesth - lice_sth) * msk_sth/amsk_sth)/dt_cpl_ai -ELSE !case 1, 2, 3 ,4: - io_licefw(:,:,:) = icebergfw(:,:,month,:) -ENDIF - -io_liceht = - io_licefw * Lfresh !?! (W/m^2) - return end subroutine get_i2o_fields @@ -1350,12 +1166,6 @@ subroutine initialize_mice_fields_4_i2a mthikn = 0. msnown = 0. -mfoifr = 0. -mitopt = 0. -mitopk = 0. -mpndfn = 0. -mpndtn = 0. - return end subroutine initialize_mice_fields_4_i2a @@ -1369,7 +1179,6 @@ subroutine initialize_mocn_fields_4_i2a mssv = 0. mco2 = 0. mco2fx = 0. -msstfz = 0. return end subroutine initialize_mocn_fields_4_i2a @@ -1384,11 +1193,11 @@ subroutine time_average_ocn_fields_4_i2a mssv(:,:,:) = mssv(:,:,:) + ocn_ssv(:,:,:) * coef_ai mco2(:,:,:) = mco2(:,:,:) + ocn_co2(:,:,:) * coef_ai mco2fx(:,:,:) = mco2fx(:,:,:) + ocn_co2fx(:,:,:) * coef_ai -msstfz(:,:,:) = msstfz(:,:,:) + Tf(:,:,:) * coef_ai return end subroutine time_average_ocn_fields_4_i2a +!================================================= subroutine time_average_fields_4_i2o !now for each timestep io coupling, so no time-averaging is required. implicit none @@ -1420,24 +1229,6 @@ subroutine time_average_fields_4_i2a call to_ugrid(aice, aiiu) maiu(:,:,:) = maiu(:,:,:) + aiiu(:,:,:) * coef_ai !U cell ice concentraction -!BX: "First order" ice fraction (mfoifr, below) is required for GSI8 "Time-Travelling Ice" (TTI) -! coupling approach. It may be different than the "normal" ice fraction (maicen, above) if -! maicen is regridded with second order conservation scheme (as "proposed" in GC3). -! BUT, GC3 actually uses 1st order remapping for both of them, so they are identical! -! In ACCESS practice, no second order remapping has been appllied to any coupling field, and -! maicen and mfoifr are ALWAYS the same thing. -! We pass both of them to UM for "concictency" (thus keeping UM coupling code intact)! -mfoifr(:,:,:,:) = mfoifr(:,:,:,:) + aicen(:,:,:,:)* coef_ai !==maicen -mitopt(:,:,:,:) = mitopt(:,:,:,:) + Tn_top(:,:,:,:) * coef_ai -mitopk(:,:,:,:) = mitopk(:,:,:,:) + keffn_top(:,:,:,:) * coef_ai -mpndfn(:,:,:,:) = mpndfn(:,:,:,:) + apeffn(:,:,:,:) * coef_ai -mpndtn(:,:,:,:) = mpndtn(:,:,:,:) + trcrn(:,:,nt_hpnd,:,:) * coef_ai - -!add one more a-i interval mean field (integrated ice concentration), which, togthere with maicen, -!should be saved at the end of current run for use at the beginning of the continue run (e.g., -!converting ice fluxes into GBM. see routines "atm_icefluxes_back2GBM", and "get_sbc_ice")...... -!maice_ia(:,:,:) = maice_ia(:,:,:) + aice(:,:,:) * coef_ai - !ocn fields: !must be done after calling from_ocn so as to get the most recently updated ocn fields, !therefore a separate call to "time_average_ocn_fields_4_i2a" is done for this purpose. @@ -1490,32 +1281,6 @@ subroutine check_i2a_fields(nstep) case('vvel_ia'); vwork = ia_vvel case('co2_i2'); vwork = ia_co2 case('co2fx_i2'); vwork = ia_co2fx - case('sstfz_ia'); vwork = ia_sstfz - case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) - case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) - case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) - case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) - case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) - case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) - case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) - case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) - case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) - case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) - case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) - case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) - case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) - case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) - case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) - case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) - case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) - case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) - case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) - case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) - case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) - case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) - case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) - case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) - case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) end select call gather_global(gwork, vwork, master_task, distrb_info) @@ -1583,18 +1348,6 @@ subroutine check_a2i_fields(nstep) case ('press_i'); vwork = um_press case ('co2_ai'); vwork = um_co2 case ('wnd_ai'); vwork = um_wnd - case ('icenth_i'); vwork = um_icenth - case ('icesth_i'); vwork = um_icesth - case ('tsfice01'); vwork = um_tsfice(:,:,1,:) - case ('tsfice02'); vwork = um_tsfice(:,:,2,:) - case ('tsfice03'); vwork = um_tsfice(:,:,3,:) - case ('tsfice04'); vwork = um_tsfice(:,:,4,:) - case ('tsfice05'); vwork = um_tsfice(:,:,5,:) - case ('iceevp01'); vwork = um_iceevp(:,:,1,:) - case ('iceevp02'); vwork = um_iceevp(:,:,2,:) - case ('iceevp03'); vwork = um_iceevp(:,:,3,:) - case ('iceevp04'); vwork = um_iceevp(:,:,4,:) - case ('iceevp05'); vwork = um_iceevp(:,:,5,:) end select call gather_global(gwork, vwork, master_task, distrb_info) @@ -1670,6 +1423,11 @@ subroutine check_i2o_fields(nstep, scale) vwork = scale * io_co2 case('wnd_i1') vwork = scale * io_wnd + !202407: 2 more fields added: + case('lice_fw') + vwork = scale * io_licefw + case('lice_ht') + vwork = scale * io_liceht end select call gather_global(gwork, vwork, master_task, distrb_info) @@ -1941,22 +1699,6 @@ subroutine check_ice_sbc_fields(ncfilenm) call gather_global(gwork, frain, master_task, distrb_info) if (my_task == 0) call write_nc2D(ncid, 'frain', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -v3d(:,:,:) = trcrn(:,:,nt_Tsfc,1,:) -call gather_global(gwork, v3d, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'trcrn1', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -v3d(:,:,:) = trcrn(:,:,nt_Tsfc,2,:) -call gather_global(gwork, v3d, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'trcrn2', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -v3d(:,:,:) = trcrn(:,:,nt_Tsfc,3,:) -call gather_global(gwork, v3d, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'trcrn3', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -v3d(:,:,:) = trcrn(:,:,nt_Tsfc,4,:) -call gather_global(gwork, v3d, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'trcrn4', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -v3d(:,:,:) = trcrn(:,:,nt_Tsfc,5,:) -call gather_global(gwork, v3d, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'trcrn5', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - !from ocen: call gather_global(gwork, frzmlt, master_task, distrb_info) if (my_task == 0) call write_nc2D(ncid, 'frzmlt', gwork, 1, il_im,il_jm,currstep,ilout=il_out) @@ -2050,83 +1792,6 @@ subroutine check_iceberg_fields(ncfilenm) return end subroutine check_iceberg_fields -!================================================= -subroutine check_iceberg_reading(ncfilenm) - -!this is used to check land ice fields read in - -implicit none - -character*(*), intent(in) :: ncfilenm -integer(kind=int_kind) :: ncid,currstep, ilout, ll -data currstep/0/ -save currstep - -currstep=currstep+1 - -if (my_task == 0 .and. .not. file_exist(ncfilenm) ) then - call create_ncfile(ncfilenm,ncid,il_im,il_jm,ll=1,ilout=il_out) -endif - -if (my_task == 0) then - write(il_out,*) 'opening ncfile at nstep ', ncfilenm, currstep - call ncheck( nf_open(ncfilenm, nf_write,ncid) ) - call write_nc_1Dtime(real(currstep),currstep,'time',ncid) -end if - -vwork(:,:,:) = icebergfw(:,:,1,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm01', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,2,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm02', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,3,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm03', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,4,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm04', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,5,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm05', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,6,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm06', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,7,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm07', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,8,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm08', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,9,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm09', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,10,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm10', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,11,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm11', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -vwork(:,:,:) = icebergfw(:,:,12,:) -call gather_global(gwork, vwork, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'icebergfm12', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - -if (my_task == 0) call ncheck(nf_close(ncid)) - -return - -end subroutine check_iceberg_reading !================================================= subroutine check_landice_fields_1(ncfilenm) @@ -2152,15 +1817,6 @@ subroutine check_landice_fields_1(ncfilenm) call write_nc_1Dtime(real(currstep),currstep,'time',ncid) end if -call gather_global(gwork, lice_sth, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'lice_sth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -call gather_global(gwork, lice_nth, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'lice_nth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -call gather_global(gwork, um_icesth, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'um_icesth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -call gather_global(gwork, um_icenth, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'um_icenth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - if (my_task == 0) call ncheck(nf_close(ncid)) return @@ -2191,15 +1847,6 @@ subroutine check_landice_fields_2(ncfilenm) call write_nc_1Dtime(real(currstep),currstep,'time',ncid) end if -call gather_global(gwork, lice_sth, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'lice_sth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -call gather_global(gwork, lice_nth, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'lice_nth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -call gather_global(gwork, um_icesth, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'um_icesth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) -call gather_global(gwork, um_icenth, master_task, distrb_info) -if (my_task == 0) call write_nc2D(ncid, 'um_icenth', gwork, 1, il_im,il_jm,currstep,ilout=il_out) - if (my_task == 0) call ncheck(nf_close(ncid)) return diff --git a/drivers/access/cpl_interface.F90 b/drivers/access/cpl_interface.F90 index dc71f43c..34e324b5 100644 --- a/drivers/access/cpl_interface.F90 +++ b/drivers/access/cpl_interface.F90 @@ -62,6 +62,9 @@ module cpl_interface real(kind=dbl_kind), dimension(:), allocatable :: rla_bufsend real(kind=dbl_kind), dimension(:,:), allocatable :: vwork2d !local domain work array, 4 coupling data passing + + logical, parameter :: debug = .false. + contains !====================================================================== @@ -277,9 +280,9 @@ subroutine init_cpl ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - write(il_out,*) ' this block: iblock, jblock=', this_block%iblock, this_block%jblock -! write(il_out,*) ' block:', iblk, "ilo, jlo, ihi, jhi=", ilo, jlo, ihi, jhi - write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) + ! write(il_out,*) ' this block: iblock, jblock=', this_block%iblock, this_block%jblock + ! write(il_out,*) ' block:', iblk, "ilo, jlo, ihi, jhi=", ilo, jlo, ihi, jhi + ! write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) if (this_block%i_glob(ilo) < l_ilo) then l_ilo = this_block%i_glob(ilo) gh_ilo = this_block%i_glob(ilo-nghost) @@ -305,10 +308,10 @@ subroutine init_cpl endif end do - write(il_out,*) ' local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi - write(il_out,*) ' partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 + ! write(il_out,*) ' local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi + ! write(il_out,*) ' partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 !print ghost info - write(il_out,*) ' ghost global:',gh_ilo, gh_ihi, gh_jlo, gh_jhi + ! write(il_out,*) ' ghost global:',gh_ilo, gh_ihi, gh_jlo, gh_jhi !calculate partition using nprocsX and nprocsX l_ilo=mod(my_task,nprocsX)*nx_global/nprocsX+1 @@ -316,8 +319,8 @@ subroutine init_cpl l_jlo=int(my_task/nprocsX) * ny_global/nprocsY+1 l_jhi=l_jlo+ny_global/nprocsY - 1 - write(il_out,*) ' 2local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi - write(il_out,*) ' 2partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 + ! write(il_out,*) ' 2local partion, ilo, ihi, jlo, jhi=', l_ilo, l_ihi, l_jlo, l_jhi + ! write(il_out,*) ' 2partition x,y sizes:', l_ihi-l_ilo+1, l_jhi-l_jlo+1 call mpi_gather(l_ilo, 1, mpi_integer, vilo, 1, mpi_integer, 0, MPI_COMM_ICE, ierror) call broadcast_array(vilo, 0) @@ -350,10 +353,10 @@ subroutine init_cpl !disps(n) = ((vilo(n)-1)*ny_global + (vjlo(n)-1)) end do - write(il_out,*) ' vilo ', vilo - write(il_out,*) ' vjlo ', vjlo - write(il_out,*) ' counts ', counts - write(il_out,*) ' disps ', disps + ! write(il_out,*) ' vilo ', vilo + ! write(il_out,*) ' vjlo ', vjlo + ! write(il_out,*) ' counts ', counts + ! write(il_out,*) ' disps ', disps ! if ( ll_comparal ) then ! il_im = l_ihi-l_ilo+1 !nx_global @@ -382,7 +385,7 @@ subroutine init_cpl call decomp_def (il_part_id, il_length, il_imjm, & my_task, il_nbcplproc, ll_comparal, il_out) - write(il_out,*)'(init_cpl) called decomp_def, my_task, ierror = ',my_task, ierror + if (debug) write(il_out,*)'(init_cpl) called decomp_def, my_task, ierror = ',my_task, ierror ! ! PSMILe coupling fields declaration @@ -449,30 +452,7 @@ subroutine init_cpl cl_writ(nsend_i2a)='co2_i2' nsend_i2a = nsend_i2a + 1 cl_writ(nsend_i2a)='co2fx_i2' - ! new fields sending to UM GA7 - nsend_i2a = nsend_i2a + 1 - cl_writ(nsend_i2a)='sstfz_ia' - do jf = 1, ncat - nsend_i2a = nsend_i2a + 1 - write(cl_writ(nsend_i2a), '(a5,i2.2)')'foifr',jf - enddo - do jf = 1, ncat - nsend_i2a = nsend_i2a + 1 - write(cl_writ(nsend_i2a), '(a5,i2.2)')'itopt',jf - enddo - do jf = 1, ncat - nsend_i2a = nsend_i2a + 1 - write(cl_writ(nsend_i2a), '(a5,i2.2)')'itopk',jf - enddo - do jf = 1, ncat - nsend_i2a = nsend_i2a + 1 - write(cl_writ(nsend_i2a), '(a5,i2.2)')'pndfn',jf - enddo - do jf = 1, ncat - nsend_i2a = nsend_i2a + 1 - write(cl_writ(nsend_i2a), '(a5,i2.2)')'pndtn',jf - enddo - + if (my_task == 0) then write(il_out,*) 'init_cpl: Number of fields sent to atm: ',nsend_i2a endif @@ -523,7 +503,7 @@ subroutine init_cpl if (my_task == 0 .or. ll_comparal) then - write(il_out,*) 'init_cpl: Number of fields sent to ocn: ',nsend_i2o - nsend_i2a + if (debug) write(il_out,*) 'init_cpl: Number of fields sent to ocn: ',nsend_i2o - nsend_i2a if (nsend_i2o /= jpfldout) then write(il_out,*) @@ -532,7 +512,7 @@ subroutine init_cpl call abort_ice('CICE: Number of outgoing coupling fields incorrect!') endif - write(il_out,*) 'init_cpl: Total number of fields sent from ice: ',jpfldout + if (debug) write(il_out,*) 'init_cpl: Total number of fields sent from ice: ',jpfldout !jpfldout == nsend_i2o! !---------------------! @@ -593,22 +573,9 @@ subroutine init_cpl cl_read(nrecv_a2i) = 'co2_ai' nrecv_a2i = nrecv_a2i + 1 cl_read(nrecv_a2i) = 'wnd_ai' - ! new fields recving from UM GA7 - nrecv_a2i = nrecv_a2i + 1 - cl_read(nrecv_a2i) = 'icenth_i' - nrecv_a2i = nrecv_a2i + 1 - cl_read(nrecv_a2i) = 'icesth_i' - do jf = 1, ncat - nrecv_a2i = nrecv_a2i + 1 - write(cl_read(nrecv_a2i), '(a6,i2.2)')'tsfice',jf - enddo - do jf = 1, ncat - nrecv_a2i = nrecv_a2i + 1 - write(cl_read(nrecv_a2i), '(a6,i2.2)')'iceevp',jf - enddo if (my_task==0 .or. ll_comparal) then - write(il_out,*) 'init_cpl: Number of fields rcvd from atm: ',nrecv_a2i + if (debug) write(il_out,*) 'init_cpl: Number of fields rcvd from atm: ',nrecv_a2i endif ! @@ -637,7 +604,7 @@ subroutine init_cpl if (my_task==0 .or. ll_comparal) then - write(il_out,*) 'init_cpl: Number of fields rcvd from ocn: ',nrecv_o2i-nrecv_a2i + if (debug) write(il_out,*) 'init_cpl: Number of fields rcvd from ocn: ',nrecv_o2i-nrecv_a2i if (nrecv_o2i /= jpfldin) then write(il_out,*) @@ -648,7 +615,7 @@ subroutine init_cpl !jpfldin == nrecv_o2i! !--------------------! - write(il_out,*) 'init_cpl: Total number of fields rcvd by ice: ',jpfldin + if (debug) write(il_out,*) 'init_cpl: Total number of fields rcvd by ice: ',jpfldin do jf=1, jpfldin call prism_def_var_proto (il_var_id_in(jf), cl_read(jf), il_part_id, & @@ -687,18 +654,6 @@ subroutine init_cpl allocate (um_bmlt(nx_block,ny_block,ncat,max_blocks)); um_bmlt(:,:,:,:) = 0 allocate (um_co2(nx_block,ny_block,max_blocks)); um_co2(:,:,:) = 0 allocate (um_wnd(nx_block,ny_block,max_blocks)); um_wnd(:,:,:) = 0 - allocate (um_icenth(nx_block,ny_block,max_blocks)); um_icenth(:,:,:) = 0 - allocate (um_icesth(nx_block,ny_block,max_blocks)); um_icesth(:,:,:) = 0 - allocate (um_tsfice(nx_block,ny_block,ncat,max_blocks)); um_tsfice(:,:,:,:) = 0 - allocate (um_iceevp(nx_block,ny_block,ncat,max_blocks)); um_iceevp(:,:,:,:) = 0 - !20171024: 6 more arrays added (for land ice discharge into ocean) - allocate (lice_nth(nx_block,ny_block,max_blocks)); lice_nth(:,:,:) = 0 - allocate (lice_sth(nx_block,ny_block,max_blocks)); lice_sth(:,:,:) = 0 - allocate (msk_nth(nx_block,ny_block,max_blocks)); msk_nth(:,:,:) = 0 - allocate (msk_sth(nx_block,ny_block,max_blocks)); msk_sth(:,:,:) = 0 - allocate (amsk_nth(nx_block,ny_block,max_blocks)); amsk_nth(:,:,:) = 0 - allocate (amsk_sth(nx_block,ny_block,max_blocks)); amsk_sth(:,:,:) = 0 - ! allocate ( core_runoff(nx_block,ny_block,max_blocks)); core_runoff(:,:,:) = 0. ! @@ -725,12 +680,6 @@ subroutine init_cpl allocate (ia_thikn(nx_block,ny_block,ncat,max_blocks)); ia_thikn(:,:,:,:) = 0 allocate (ia_co2(nx_block,ny_block,max_blocks)); ia_co2(:,:,:) = 0 allocate (ia_co2fx(nx_block,ny_block,max_blocks)); ia_co2fx(:,:,:) = 0 - allocate (ia_sstfz(nx_block,ny_block,max_blocks)); ia_sstfz(:,:,:) = 0 - allocate (ia_foifr(nx_block,ny_block,ncat,max_blocks)); ia_foifr(:,:,:,:) = 0 - allocate (ia_itopt(nx_block,ny_block,ncat,max_blocks)); ia_itopt(:,:,:,:) = 0 - allocate (ia_itopk(nx_block,ny_block,ncat,max_blocks)); ia_itopk(:,:,:,:) = 0 - allocate (ia_pndfn(nx_block,ny_block,ncat,max_blocks)); ia_pndfn(:,:,:,:) = 0 - allocate (ia_pndtn(nx_block,ny_block,ncat,max_blocks)); ia_pndtn(:,:,:,:) = 0 ! ! to ocn: allocate (io_strsu(nx_block,ny_block,max_blocks)); io_strsu(:,:,:) = 0 @@ -773,23 +722,29 @@ subroutine init_cpl allocate (mssv(nx_block,ny_block,max_blocks)); mssv(:,:,:) = 0 allocate (mco2(nx_block,ny_block,max_blocks)); mco2(:,:,:) = 0 allocate (mco2fx(nx_block,ny_block,max_blocks)); mco2fx(:,:,:) = 0 - allocate (msstfz(nx_block,ny_block,max_blocks)); msstfz(:,:,:) = 0 ! IA cpl int time-average (4D) allocate (maicen(nx_block,ny_block,ncat,max_blocks)); maicen(:,:,:,:) = 0 allocate (msnown(nx_block,ny_block,ncat,max_blocks)); msnown(:,:,:,:) = 0 allocate (mthikn(nx_block,ny_block,ncat,max_blocks)); mthikn(:,:,:,:) = 0 - allocate (mfoifr(nx_block,ny_block,ncat,max_blocks)); mfoifr(:,:,:,:) = 0 - allocate (mitopt(nx_block,ny_block,ncat,max_blocks)); mitopt(:,:,:,:) = 0 - allocate (mitopk(nx_block,ny_block,ncat,max_blocks)); mitopk(:,:,:,:) = 0 - allocate (mpndfn(nx_block,ny_block,ncat,max_blocks)); mpndfn(:,:,:,:) = 0 - allocate (mpndtn(nx_block,ny_block,ncat,max_blocks)); mpndtn(:,:,:,:) = 0 -!BX: allocate (maicen_saved(nx_block,ny_block,ncat,max_blocks)); maicen_saved(:,:,:,:) = 0 -! - allocate (icebergfw(nx_block,ny_block,12,max_blocks)); icebergfw(:,:,:,:) = 0 + + allocate (ticeberg_s(12)); ticeberg_s(:) = 0 + allocate (ticeberg_n(12)); ticeberg_n(:) = 0 + allocate (gwork(nx_global,ny_global)); gwork(:,:) = 0 + + + if (my_task == master_task) then + !global domain runoff for iceberg runoff calcs + allocate (gicebergfw(nx_global,ny_global,12)); gicebergfw(:,:,:) = 0 + allocate (grunoff(nx_global,ny_global)); grunoff(:,:) = 0 + allocate (gtarea(nx_global,ny_global)); gtarea(:,:) = 0 + else + allocate (gicebergfw(1,1,12)); gicebergfw(:,:,:) = 0 + allocate (grunoff(1,1)); grunoff(:,:) = 0 + allocate (gtarea(1,1)); gtarea(:,:) = 0 + endif allocate (vwork(nx_block,ny_block,max_blocks)); vwork(:,:,:) = 0 - allocate (gwork(nx_global,ny_global)); gwork(:,:) = 0 allocate (sicemass(nx_block,ny_block,max_blocks)); sicemass(:,:,:) = 0. allocate (vwork2d(l_ilo:l_ihi, l_jlo:l_jhi)); vwork2d(:,:) = 0. !l_ihi-l_ilo+1, l_jhi-l_jlo+1 @@ -830,14 +785,13 @@ subroutine from_atm(isteps) write(il_out,*) '(from_atm) Total number of fields to be rcvd: ', nrecv_a2i endif - write(il_out,*) "prism_get from_atm at sec: ", isteps + if (debug) write(il_out,*) "prism_get from_atm at sec: ", isteps do jf = 1, nrecv_a2i if (my_task==0 .or. ll_comparal ) then !jf-th field in - write(il_out,*) - write(il_out,*) '*** receiving coupling field No. ', jf, cl_read(jf) + if (debug) write(il_out,*) '*** receiving coupling field No. ', jf, cl_read(jf) !call flush(il_out) if (ll_comparal) then @@ -850,8 +804,7 @@ subroutine from_atm(isteps) write(il_out,*) 'Err in _get_ sst at time with error: ', isteps, ierror call prism_abort_proto(il_comp_id, 'cice from_atm','stop 1') else - write(il_out,*) - write(il_out,*)'(from_atm) rcvd at time with err: ',cl_read(jf),isteps,ierror + if (debug) write(il_out,*)'(from_atm) rcvd at time with err: ',cl_read(jf),isteps,ierror if (ll_comparal .and. chk_a2i_fields) then call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & @@ -889,9 +842,6 @@ subroutine from_atm(isteps) um_runoff(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) =vwork2d(:,:) case ('wme_i'); um_wme(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) -! case ('rain_i'); um_rain(:,:,:) = vwork(:,:,:) -! case ('snow_i'); um_snow(:,:,:) = vwork(:,:,:) -!---20100825 -- just be cauious: ------------------------- case ('rain_i'); um_rain(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) =max(0.0,vwork2d(:,:)) case ('snow_i'); @@ -917,23 +867,10 @@ subroutine from_atm(isteps) case ('press_i');um_press(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) case ('co2_ai');um_co2(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) case ('wnd_ai');um_wnd(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) - case ('icenth_i');um_icenth(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) - case ('icesth_i');um_icesth(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1) = vwork2d(:,:) - case ('tsfice01');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1,1) = vwork2d(:,:) - case ('tsfice02');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,2,1) = vwork2d(:,:) - case ('tsfice03');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,3,1) = vwork2d(:,:) - case ('tsfice04');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,4,1) = vwork2d(:,:) - case ('tsfice05');um_tsfice(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,5,1) = vwork2d(:,:) - case ('iceevp01');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,1,1) = vwork2d(:,:) - case ('iceevp02');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,2,1) = vwork2d(:,:) - case ('iceevp03');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,3,1) = vwork2d(:,:) - case ('iceevp04');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,4,1) = vwork2d(:,:) - case ('iceevp05');um_iceevp(1+nghost:nx_block-nghost,1+nghost:ny_block-nghost,5,1) = vwork2d(:,:) end select if (my_task == 0 .or. ll_comparal) then - write(il_out,*) - write(il_out,*)'(from_atm) done: ', jf, trim(cl_read(jf)) + if (debug) write(il_out,*)'(from_atm) done: ', jf, trim(cl_read(jf)) endif enddo @@ -956,10 +893,6 @@ subroutine from_atm(isteps) call ice_HaloUpdate(um_press, halo_info,field_loc_center,field_type_vector) call ice_HaloUpdate(um_co2, halo_info, field_loc_center, field_type_vector) call ice_HaloUpdate(um_wnd, halo_info, field_loc_center, field_type_vector) - call ice_HaloUpdate(um_icenth,halo_info, field_loc_center,field_type_vector) - call ice_HaloUpdate(um_icesth,halo_info, field_loc_center,field_type_vector) - call ice_HaloUpdate(um_tsfice,halo_info, field_loc_center,field_type_vector) - call ice_HaloUpdate(um_iceevp,halo_info, field_loc_center,field_type_vector) IF (rotate_winds) THEN !rotate_winds=.t. means oasis does not do the vector rotation. @@ -1032,14 +965,13 @@ subroutine from_ocn(isteps) endif endif - write(il_out,*) "prism_get from_ocn at sec: ", isteps + if (debug) write(il_out,*) "prism_get from_ocn at sec: ", isteps do jf = nrecv_a2i + 1, jpfldin if (my_task==0 .or. ll_comparal) then !jf-th field in - write(il_out,*) - write(il_out,*) '*** receiving coupling fields No. ', jf, cl_read(jf) + if (debug) write(il_out,*) '*** receiving coupling fields No. ', jf, cl_read(jf) if(ll_comparal) then call prism_get_proto (il_var_id_in(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) else @@ -1050,8 +982,7 @@ subroutine from_ocn(isteps) write(il_out,*) 'Err in _get_ sst at time with error: ', isteps, ierror call prism_abort_proto(il_comp_id, 'cice from_ocn','stop 1') else - write(il_out,*) - write(il_out,*)'(from_ocn) rcvd at time with err: ',cl_read(jf),isteps,ierror + if (debug) write(il_out,*)'(from_ocn) rcvd at time with err: ',cl_read(jf),isteps,ierror if(ll_comparal .and. chk_o2i_fields) then call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) @@ -1146,14 +1077,14 @@ subroutine into_ocn(isteps) type (block) :: this_block ! block information for current block integer(kind=int_kind) :: ncid,currstep,ll,ilout + data currstep/0/ save currstep currstep=currstep+1 if (my_task == 0) then - write(il_out,*) - write(il_out,*) '(into_ocn) sending coupling fields at stime= ', isteps + if (debug) write(il_out,*) '(into_ocn) sending coupling fields at stime= ', isteps if (chk_i2o_fields) then if ( .not. file_exist('fields_i2o_in_ice.nc') ) then call create_ncfile('fields_i2o_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) @@ -1164,7 +1095,7 @@ subroutine into_ocn(isteps) endif endif - write(il_out,*) "prism_put into_ocn at sec: ", isteps + if (debug) write(il_out,*) "prism_put into_ocn at sec: ", isteps do jf = nsend_i2a + 1, jpfldout !CH: make sure the 'LIMITS' are to be released! @@ -1216,8 +1147,8 @@ subroutine into_ocn(isteps) endif if (my_task == 0 .or. ll_comparal) then - write(il_out,*) - write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + if (debug) write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + if(ll_comparal) then call prism_put_proto(il_var_id_out(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) else @@ -1228,8 +1159,7 @@ subroutine into_ocn(isteps) write(il_out,*) '(into_ocn) Err in _put_ ', cl_writ(jf), isteps, ierror call prism_abort_proto(il_comp_id, 'cice into_ocn','STOP 1') else - write(il_out,*) - write(il_out,*)'(into_ocn) sent: ', cl_writ(jf), isteps, ierror + if (debug) write(il_out,*)'(into_ocn) sent: ', cl_writ(jf), isteps, ierror if(chk_i2o_fields .and. ll_comparal) then call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) @@ -1282,13 +1212,12 @@ subroutine into_atm(isteps) !end if if (my_task == 0) then - write(il_out,*) - write(il_out,*) '(into_atm) sending coupling fields at stime= ', isteps + if (debug) write(il_out,*) '(into_atm) sending coupling fields at stime= ', isteps if (chk_i2a_fields) then if ( .not. file_exist('fields_i2a_in_ice.nc') ) then call create_ncfile('fields_i2a_in_ice.nc',ncid,il_im,il_jm,ll=1,ilout=il_out) else - write(il_out,*) 'opening file fields_i2a_in_ice.nc at nstep = ', isteps + if (debug) write(il_out,*) 'opening file fields_i2a_in_ice.nc at nstep = ', isteps call ncheck( nf_open('fields_i2a_in_ice.nc',nf_write,ncid) ) end if call write_nc_1Dtime(real(isteps),currstep,'time',ncid) @@ -1324,10 +1253,7 @@ subroutine into_atm(isteps) call u2tgrid_vector(ia_uvel) call u2tgrid_vector(ia_vvel) - !hxy599 debug - !call read_restart_i2a("i2a.nc", 0) - - write(il_out,*) "prism_put into_atm at sec: ", isteps + if (debug) write(il_out,*) "prism_put into_atm at sec: ", isteps do jf = 1, nsend_i2a select case (trim(cl_writ(jf))) @@ -1350,34 +1276,8 @@ subroutine into_atm(isteps) !20100305: test effect of ssuv on the tropical cooling biases (as per Harry Henden) case('uvel_ia'); vwork = ia_uvel * ocn_ssuv_factor !note ice u/v are also case('vvel_ia'); vwork = ia_vvel * ocn_ssuv_factor ! included here. - case('sstfz_ia'); vwork = ia_sstfz case('co2_i2'); vwork = ia_co2 case('co2fx_i2'); vwork = ia_co2fx - case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) - case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) - case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) - case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) - case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) - case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) - case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) - case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) - case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) - case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) - case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) - case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) - case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) - case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) - case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) - case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) - case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) - case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) - case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) - case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) - case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) - case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) - case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) - case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) - case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) end select if (.not. ll_comparal) then @@ -1404,13 +1304,11 @@ subroutine into_atm(isteps) end if if (my_task == 0 .or. ll_comparal) then - write(il_out,*) - write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) + if (debug) write(il_out,*) '*** sending coupling field No. ', jf, cl_writ(jf) !call prism_put_inquire_proto(il_var_id_out(jf),isteps,ierror) - write(il_out,*) - write(il_out,*) '(into_atm) what to do with this var==> Err= ',ierror + if (debug) write(il_out,*) '(into_atm) what to do with this var==> Err= ',ierror if(ll_comparal) then call prism_put_proto(il_var_id_out(jf), isteps, vwork2d(l_ilo:l_ihi, l_jlo:l_jhi), ierror) else @@ -1421,8 +1319,7 @@ subroutine into_atm(isteps) write(il_out,*) '(into_atm) Err in _put_ ', cl_writ(jf), isteps, ierror call prism_abort_proto(il_comp_id, 'cice into_atm','STOP 1') else - write(il_out,*) - write(il_out,*)'(into_atm) sent: ', cl_writ(jf), isteps, ierror + if (debug) write(il_out,*)'(into_atm) sent: ', cl_writ(jf), isteps, ierror if(chk_i2a_fields .and. ll_comparal) then call mpi_gatherv(vwork2d(l_ilo:l_ihi, l_jlo:l_jhi),1,sendsubarray,gwork, & counts,disps,resizedrecvsubarray, 0,MPI_COMM_ICE,ierror) @@ -1771,32 +1668,6 @@ subroutine save_restart_i2a(fname, nstep) !20100305: test effect of ssuv on the tropical cooling biases (as per Harry Henden) case('uvel_ia'); vwork = ia_uvel !* ocn_ssuv_factor !note ice u/v are also case('vvel_ia'); vwork = ia_vvel !* ocn_ssuv_factor ! included here. - case('sstfz_ia'); vwork = ia_sstfz - case('foifr01'); vwork(:,:,:) = ia_foifr(:,:,1,:) - case('foifr02'); vwork(:,:,:) = ia_foifr(:,:,2,:) - case('foifr03'); vwork(:,:,:) = ia_foifr(:,:,3,:) - case('foifr04'); vwork(:,:,:) = ia_foifr(:,:,4,:) - case('foifr05'); vwork(:,:,:) = ia_foifr(:,:,5,:) - case('itopt01'); vwork(:,:,:) = ia_itopt(:,:,1,:) - case('itopt02'); vwork(:,:,:) = ia_itopt(:,:,2,:) - case('itopt03'); vwork(:,:,:) = ia_itopt(:,:,3,:) - case('itopt04'); vwork(:,:,:) = ia_itopt(:,:,4,:) - case('itopt05'); vwork(:,:,:) = ia_itopt(:,:,5,:) - case('itopk01'); vwork(:,:,:) = ia_itopk(:,:,1,:) - case('itopk02'); vwork(:,:,:) = ia_itopk(:,:,2,:) - case('itopk03'); vwork(:,:,:) = ia_itopk(:,:,3,:) - case('itopk04'); vwork(:,:,:) = ia_itopk(:,:,4,:) - case('itopk05'); vwork(:,:,:) = ia_itopk(:,:,5,:) - case('pndfn01'); vwork(:,:,:) = ia_pndfn(:,:,1,:) - case('pndfn02'); vwork(:,:,:) = ia_pndfn(:,:,2,:) - case('pndfn03'); vwork(:,:,:) = ia_pndfn(:,:,3,:) - case('pndfn04'); vwork(:,:,:) = ia_pndfn(:,:,4,:) - case('pndfn05'); vwork(:,:,:) = ia_pndfn(:,:,5,:) - case('pndtn01'); vwork(:,:,:) = ia_pndtn(:,:,1,:) - case('pndtn02'); vwork(:,:,:) = ia_pndtn(:,:,2,:) - case('pndtn03'); vwork(:,:,:) = ia_pndtn(:,:,3,:) - case('pndtn04'); vwork(:,:,:) = ia_pndtn(:,:,4,:) - case('pndtn05'); vwork(:,:,:) = ia_pndtn(:,:,5,:) end select ! if (.not. ll_comparal) then diff --git a/drivers/access/cpl_parameters.F90 b/drivers/access/cpl_parameters.F90 index 76acef68..c433a8d0 100644 --- a/drivers/access/cpl_parameters.F90 +++ b/drivers/access/cpl_parameters.F90 @@ -15,8 +15,8 @@ module cpl_parameters !integer(kind=int_kind), parameter :: nrecv = 50 ! maxium no of flds rcvd allowed integer(kind=int_kind) :: nsend_i2a, nsend_i2o integer(kind=int_kind) :: nrecv_a2i, nrecv_o2i -integer(kind=int_kind), parameter :: jpfldout = 65 ! actual number of fields sent -integer(kind=int_kind), parameter :: jpfldin = 47 ! actual number of fields rcvd +integer(kind=int_kind), parameter :: jpfldout = 39 ! actual number of fields sent +integer(kind=int_kind), parameter :: jpfldin = 35 ! actual number of fields rcvd character(len=8), dimension(jpfldout) :: cl_writ ! Symb names fields sent character(len=8), dimension(jpfldin) :: cl_read ! Symb names fields rcvd @@ -77,10 +77,32 @@ module cpl_parameters real(kind=dbl_kind) :: iostress_factor = 1.0 ! 0.0 -- turn off stresses into MOM4. ! !20171227: Adding options for land ice discharge as iceberg melt (0,1,2,3,4) -integer(kind=int_kind) :: iceberg = 0 -!20180528: Adding "enhancement" factor for the iceberg waterflux -real(kind=dbl_kind) :: iceberg_factor = 1.0 -! +integer(kind=int_kind) :: iceberg = 2 +!Allow scaling: factor for the iceberg waterflux (won't change water mass budget) +real(kind=dbl_kind) :: & + iceberg_rate_s = 0.5, & !rate of "iceberg" taken from the runoff off Antarctica + iceberg_rate_n = 0.5, & !........................................... Greenland + iceberg_lh = 1.0 !iceberg latent heat (=0 if CABLE already calculated melting) + +logical :: runoff_lh = .true. !allow runoff to carry LH when discharged into ocean + !which would lead to ocean surface cooling, + !when .false., only carry LH to areas where + !runoff spread by the lice (iceberg) mask +integer(kind=int_kind) :: & + iceberg_je_s = 70, & !(iceberg_js_s=1, always) + runoff_je_s = 45, & !(runoff_js_s =1, always) + iceberg_js_n = 201, & !(iceberg_je_n=300, always) + runoff_is_n = 222, & !------ + runoff_ie_n = 270, & !These 4 indices define the + runoff_js_n = 230, & !Greenland runoff domain + runoff_je_n = 300 !----- +!202412: add option for "fixing" ocean water mass imbalance: ESM1.5 sees ~ 0.18543417E+08 kg/s +! (annual mean) water loss from ocean in a 100-year test run (liceA0G0), meaning a net +! loss rate of 0.18543417E+08/0.36133599E+15(ocean-surface-area) = 0.513190E-07 kg/m2/s. +! which will be compensated for by adding this much of waterflux to global lprec field-- +real(kind=dbl_kind) :: & + add_lprec = 0.513190E-07 !kg/m2/s. ==> set to 0.0 if no fixin! + namelist/coupling/ & caltype, & jobnum, & @@ -109,7 +131,18 @@ module cpl_parameters extreme_test, & imsk_evap, & iceberg, & - iceberg_factor, & + iceberg_rate_s, & + iceberg_rate_n, & + iceberg_lh, & + iceberg_je_s, & + runoff_je_s, & + iceberg_js_n, & + runoff_is_n, & + runoff_ie_n, & + runoff_js_n, & + runoff_je_n, & + runoff_lh, & + add_lprec, & ocn_ssuv_factor,& iostress_factor,& chk_a2i_fields, & diff --git a/io_netcdf/ice_history_write.F90 b/io_netcdf/ice_history_write.F90 index a8e6e90b..cf3cb338 100644 --- a/io_netcdf/ice_history_write.F90 +++ b/io_netcdf/ice_history_write.F90 @@ -141,8 +141,7 @@ subroutine ice_write_hist (ns) endif ! create file - iflag = ior(NF90_NETCDF4, NF90_CLASSIC_MODEL); - iflag = ior(iflag, NF90_CLOBBER); + iflag = ior(NF90_NETCDF4, NF90_CLOBBER) status = nf90_create(ncfile(ns), iflag, ncid) if (status /= nf90_noerr) call abort_ice( & 'ice: Error creating history ncfile '//ncfile(ns)) diff --git a/io_netcdf/ice_restart.F90 b/io_netcdf/ice_restart.F90 index aaa464af..484222c3 100644 --- a/io_netcdf/ice_restart.F90 +++ b/io_netcdf/ice_restart.F90 @@ -162,7 +162,7 @@ subroutine init_restart_write(filename_spec) write(nu_rst_pointer,'(a)') filename close(nu_rst_pointer) - iflag = ior(NF90_NETCDF4, NF90_CLASSIC_MODEL); + iflag = NF90_NETCDF4 status = nf90_create(trim(filename), iflag, ncid) if (status /= nf90_noerr) call abort_ice( & 'ice: Error creating restart ncfile '//trim(filename)) diff --git a/source/ice_calendar.F90 b/source/ice_calendar.F90 index 02a29d24..4df0c065 100755 --- a/source/ice_calendar.F90 +++ b/source/ice_calendar.F90 @@ -247,7 +247,7 @@ subroutine calendar(ttime) write_restart=0 #ifdef AusCOM - write(il_out,*) '(calendar) ttime = ', ttime + ! write(il_out,*) '(calendar) ttime = ', ttime #endif sec = mod(ttime,secday) ! elapsed seconds into date at ! end of dt @@ -291,9 +291,9 @@ subroutine calendar(ttime) ! Need this extra call to set_calendar to handle history ! file naming in leap years properly call set_calendar(nyr+year_init-1) - write(il_out,*) '(calendar) runtime0 = ', runtime0 - write(il_out,*) '(calendar) nyr, year_init, month, mday = ', nyr, year_init, month, mday - write(il_out,*) '(calendar) idate = ', idate + ! write(il_out,*) '(calendar) runtime0 = ', runtime0 + ! write(il_out,*) '(calendar) nyr, year_init, month, mday = ', nyr, year_init, month, mday + ! write(il_out,*) '(calendar) idate = ', idate #endif if (istep >= npt+1) stop_now = 1 #ifndef ACCESS diff --git a/source/ice_init.F90 b/source/ice_init.F90 index 7ab85351..759c75a5 100755 --- a/source/ice_init.F90 +++ b/source/ice_init.F90 @@ -389,7 +389,7 @@ subroutine input_data call get_fileunit(nu_nml) if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then nml_error = -1 else @@ -429,7 +429,7 @@ subroutine input_data call abort_ice('ice: error reading namelist') endif call release_fileunit(nu_nml) - + !----------------------------------------------------------------- ! set up diagnostics output and resolve conflicts !----------------------------------------------------------------- @@ -633,6 +633,16 @@ subroutine input_data cap_fluxes = .false. endif +!20250214: add ktherm == 0 case: + if (ktherm == 0 .and. trim(tfrz_option) /= 'linear_salt') then + if (my_task == master_task) then + write (nu_diag,*) & + 'WARNING: ktherm = 0 and tfrz_option = ',trim(tfrz_option) + write (nu_diag,*) & + 'WARNING: For consistency, set tfrz_option = linear_salt' + endif + endif + if (ktherm == 1 .and. trim(tfrz_option) /= 'linear_salt') then if (my_task == master_task) then write (nu_diag,*) & diff --git a/source/ice_restart_driver.F90 b/source/ice_restart_driver.F90 index 0a1a5cc7..764caaf5 100755 --- a/source/ice_restart_driver.F90 +++ b/source/ice_restart_driver.F90 @@ -362,8 +362,7 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,stress12_1,'ruf8', & 'stress12_1',1,diag,field_loc_center,field_type_scalar) ! stress12_1 call read_restart_field(nu_restart,0,stress12_3,'ruf8', & - 'stress12_3',1,diag,field_loc_center,field_type_scalar) ! stress12_1 - + 'stress12_3',1,diag,field_loc_center,field_type_scalar) ! stress12_3 call read_restart_field(nu_restart,0,stress12_2,'ruf8', & 'stress12_2',1,diag,field_loc_center,field_type_scalar) ! stress12_2 call read_restart_field(nu_restart,0,stress12_4,'ruf8', & @@ -597,9 +596,9 @@ subroutine restartfile_v4 (ice_ic) write(nu_diag,*) 'Restart read at istep=',istep0,time,time_forc endif #ifndef AusCOM - call calendar(time) + call calendar(time) #else - call calendar(time-runtime0) + call calendar(time-runtime0) #endif call broadcast_scalar(istep0,master_task) istep1 = istep0 diff --git a/source/ice_therm_vertical.F90 b/source/ice_therm_vertical.F90 index 73491ce1..7a54934e 100755 --- a/source/ice_therm_vertical.F90 +++ b/source/ice_therm_vertical.F90 @@ -270,6 +270,8 @@ subroutine thermo_vertical (nx_block, ny_block, & istop = 0 jstop = 0 + enum = c0 + do j=1, ny_block do i=1, nx_block @@ -532,7 +534,6 @@ subroutine thermo_vertical (nx_block, ny_block, & istop, jstop, & fcondtopn_solve,fcondtopn_extra, & enum) - if (l_stop) return !----------------------------------------------------------------- From d8804b60b88e95640a28d49f70a6482985163b8c Mon Sep 17 00:00:00 2001 From: Spencer Wong <88933912+blimlim@users.noreply.github.com> Date: Thu, 19 Jun 2025 10:45:32 +1000 Subject: [PATCH 31/52] Replace `caltype` with `use_leap_years` (#42) * Replace caltype with use_leap_years * Abort on coupling namelist read error Co-authored-by: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> * Produce error when dt_cpl_io configured in namelist --------- Co-authored-by: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> --- drivers/access/cpl_parameters.F90 | 28 ++++++++++++++++++++-------- source/ice_calendar.F90 | 31 +++++++++++++++---------------- source/ice_history.F90 | 3 --- 3 files changed, 35 insertions(+), 27 deletions(-) diff --git a/drivers/access/cpl_parameters.F90 b/drivers/access/cpl_parameters.F90 index c433a8d0..db729287 100644 --- a/drivers/access/cpl_parameters.F90 +++ b/drivers/access/cpl_parameters.F90 @@ -63,10 +63,10 @@ module cpl_parameters integer(kind=int_kind) :: init_date = 00010101 !beginning date of this EXP (yyyymmdd) integer(kind=int_kind) :: dt_cice = 3600 !time step of this model (seconds) integer(kind=int_kind) :: dt_cpl_ai = 21600 !atm<==>ice coupling interval (seconds) -integer(kind=int_kind) :: dt_cpl_io = 21600 !ice<==>ocn coupling interval (seconds) -integer(kind=int_kind) :: caltype = 0 !calendar type: 0 (365daye/yr, 'Juilian' ) - ! 1 (365/366 days/yr, 'Gregorian') - ! n (n days/month) +integer(kind=int_kind) :: dt_cpl_io = -99 !ice<==>ocn coupling interval (seconds). + !Hardwired to equal dt_cice and should not + !be set in namelist. +integer(kind=int_kind) :: caltype = -99 !deprecated !integer(kind=int_kind) :: runtime0 !accumulated run time by the end of last run (s) real(kind=dbl_kind) :: runtime0 = 0.0 ! can be too large as int to read in correctly! integer(kind=int_kind) :: runtime = 86400 !the time length for this run segment (s) @@ -232,10 +232,22 @@ subroutine get_cpl_timecontrol call release_fileunit(nu_nml) if (nml_error /= 0) then - !!!call abort_ice('ice: error reading coupling') - write(6, *) - write(6, *)'XXX Warning: after reading coupling, nml_error = ',nml_error - write(6, *) + if (my_task == master_task) then + call abort_ice('ice: error reading coupling namelist in "input_ice.nml"') + endif +endif + +if (caltype /= -99) then + if (my_task == master_task) then + call abort_ice('ice: ERROR caltype deprecated. Remove from "input_ice.nml"') + endif +endif + +if (dt_cpl_io /= -99) then + if (my_task == master_task) then + call abort_ice('ice: ERROR dt_cpl_io should not be set in namelist. '// & + 'Remove from "input_ice.nml"') + endif endif !hardrwire dt_cpl_io == dt_cice diff --git a/source/ice_calendar.F90 b/source/ice_calendar.F90 index 4df0c065..f84a848e 100755 --- a/source/ice_calendar.F90 +++ b/source/ice_calendar.F90 @@ -21,7 +21,7 @@ module ice_calendar use ice_exit, only: abort_ice #ifdef AusCOM use cpl_parameters, only : inidate, iniday, inimon, iniyear, init_date - use cpl_parameters, only : il_out, caltype + use cpl_parameters, only : il_out use cpl_parameters, only : runtime0 !accumulated runtime by the end of last run #endif @@ -152,7 +152,7 @@ subroutine init_calendar end if #ifdef AusCOM - if ((days_year(year_init) == 366) .and. (caltype == 1)) days_per_year = 366 + if (days_year(year_init) == 366) days_per_year = 366 #endif write(*,*)'CICE (calendar) days_per_year = ', days_per_year @@ -541,7 +541,7 @@ subroutine set_calendar(year) if (mod(year,400) == 0) isleap = .true. ! Ensure the calendar is set correctly - if (isleap) then + if (isleap .and. use_leap_years) then daycal = daycal366 daymo = daymo366 dayyr=real(daycal(13), kind=dbl_kind) @@ -575,7 +575,7 @@ subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) inc_day = int ((ttime + 0.5)/86400. ) khfin = (ttime - inc_day*86400)/3600 -IF (caltype .eq. 0 .or. caltype .eq. 1) THEN +IF (days_per_year == 365 .or. days_per_year == 366) THEN ! ! 1. Length of the months @@ -588,7 +588,7 @@ subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) !* Leap years ! lleap = .FALSE. - IF (caltype .eq. 1) THEN + IF (use_leap_years) THEN IF (MOD(iniyear, 4) .eq. 0) lleap = .TRUE. IF (MOD(iniyear,100) .eq. 0) lleap = .FALSE. IF (MOD(iniyear,400) .eq. 0) lleap = .TRUE. @@ -597,7 +597,7 @@ subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) if (lleap) klmo(jm) = 29 ENDIF ENDDO !jm=1,12 - + kdfin = iniday kmfin = inimon kyfin = iniyear @@ -618,7 +618,7 @@ subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) !* Leap years ! lleap = .FALSE. - IF (caltype .eq. 1) THEN + IF (use_leap_years) THEN IF (MOD(kyfin, 4) .eq. 0) lleap = .TRUE. IF (MOD(kyfin,100) .eq. 0) lleap = .FALSE. IF (MOD(kyfin,400) .eq. 0) lleap = .TRUE. @@ -627,13 +627,13 @@ subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) if (lleap) klmo(2) = 29 210 CONTINUE -ELSE !for years with constant length of months +ELSEIF(days_per_year == 360) THEN ! ! 1. Calculate month lengths for current year ! DO jm = 1, 12 - klmo(jm) = caltype + klmo(jm) = 30 ENDDO kdfin = iniday kmfin = inimon @@ -655,32 +655,31 @@ subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) ENDIF + end subroutine get_idate !======================================================================= function days_year(year) -use cpl_parameters, only : caltype - implicit none integer, intent(in) :: year real (kind=dbl_kind) :: days_year logical :: lleap -IF (caltype .eq. 0 .or. caltype .eq. 1) THEN +IF (days_per_year == 365 .or. days_per_year == 366) THEN lleap = .FALSE. days_year = 365. - IF (caltype .eq. 1) THEN + IF (use_leap_years) THEN IF (MOD(year, 4) .eq. 0) lleap = .TRUE. IF (MOD(year,100) .eq. 0) lleap = .FALSE. IF (MOD(year,400) .eq. 0) lleap = .TRUE. ENDIF if (lleap) days_year = 366. -ELSE - days_year = dayyr -ENDIF +ELSEIF (days_per_year == 360) THEN + days_year = 360. +ENDIF return end function days_year #endif diff --git a/source/ice_history.F90 b/source/ice_history.F90 index f37be927..d8d2a4db 100755 --- a/source/ice_history.F90 +++ b/source/ice_history.F90 @@ -32,9 +32,6 @@ module ice_history use ice_kinds_mod -#ifdef AusCOM - use cpl_parameters, only: caltype -#endif implicit none private From ce970cdaa640889dab9454cbd7efaf9f628a2e8a Mon Sep 17 00:00:00 2001 From: Spencer Wong <88933912+blimlim@users.noreply.github.com> Date: Thu, 19 Jun 2025 11:29:42 +1000 Subject: [PATCH 32/52] ESM1.6: Write current year to restart (#46) * Write model year as restart attribute --------- Co-authored-by: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> --- io_netcdf/ice_restart.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/io_netcdf/ice_restart.F90 b/io_netcdf/ice_restart.F90 index 484222c3..33acffe3 100644 --- a/io_netcdf/ice_restart.F90 +++ b/io_netcdf/ice_restart.F90 @@ -142,13 +142,12 @@ subroutine init_restart_write(filename_spec) character (len=3) :: nchar ! construct path/file + iyear = nyr + year_init - 1 if (present(filename_spec)) then filename = trim(filename_spec) else - iyear = nyr + year_init - 1 imonth = month iday = mday - write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & restart_dir(1:lenstr(restart_dir)), & restart_file(1:lenstr(restart_file)),'.', & @@ -170,7 +169,8 @@ subroutine init_restart_write(filename_spec) status = nf90_put_att(ncid,nf90_global,'istep1',istep1) status = nf90_put_att(ncid,nf90_global,'time',time) status = nf90_put_att(ncid,nf90_global,'time_forc',time_forc) - status = nf90_put_att(ncid,nf90_global,'nyr',nyr) + status = nf90_put_att(ncid,nf90_global,'nyr',nyr) ! year count since year_init + status = nf90_put_att(ncid,nf90_global,'year',iyear) ! calendar year status = nf90_put_att(ncid,nf90_global,'month',month) status = nf90_put_att(ncid,nf90_global,'mday',mday) status = nf90_put_att(ncid,nf90_global,'sec',sec) From 0bd0131c1c3d3ddeaf82490869b7973476d780e0 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Fri, 20 Jun 2025 15:17:50 +1000 Subject: [PATCH 33/52] Fail on non midnight restarts (#47) * Fail on non-midnight restarts, because the start time and date calculations don't support this with access/auscom mods. --- io_netcdf/ice_restart.F90 | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/io_netcdf/ice_restart.F90 b/io_netcdf/ice_restart.F90 index 33acffe3..2e7974ee 100644 --- a/io_netcdf/ice_restart.F90 +++ b/io_netcdf/ice_restart.F90 @@ -78,6 +78,13 @@ subroutine init_restart_read(ice_ic) status = nf90_get_att(ncid, nf90_global, 'mday', mday) status = nf90_get_att(ncid, nf90_global, 'sec', sec) endif + + if ( sec .ne. 0 ) then + call abort_ice('ice: restart ncfile '//trim(filename)//' has '//& + 'restart "sec" attribute not set to 0. This is not supported '//& + 'as a start time.') + endif + endif ! use namelist values if use_restart_time = F write(nu_diag,*) 'Restart read at istep=',istep0,time,time_forc From 381f5b4a179bf37246d1f98b8424b91046403895 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Thu, 3 Jul 2025 15:39:14 +1000 Subject: [PATCH 34/52] Use ifort for preprocessor directives (#52) --- bld/Makefile | 4 ++-- bld/Makefile.std | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bld/Makefile b/bld/Makefile index c2d232ff..3edf5900 100644 --- a/bld/Makefile +++ b/bld/Makefile @@ -113,11 +113,11 @@ $(EXEC): $(OBJS) cc $(CFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< .F.o: - $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< > $*.f + $(FC) -P $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< -o $*.f $(FC) -c $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR) $*.f .F90.o: - $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< > $*.f90 + $(FC) -P $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< -o $*.f90 $(FC) -c $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR) $*.f90 mostlyclean: diff --git a/bld/Makefile.std b/bld/Makefile.std index c2d232ff..3edf5900 100644 --- a/bld/Makefile.std +++ b/bld/Makefile.std @@ -113,11 +113,11 @@ $(EXEC): $(OBJS) cc $(CFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< .F.o: - $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< > $*.f + $(FC) -P $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< -o $*.f $(FC) -c $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR) $*.f .F90.o: - $(CPP) $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< > $*.f90 + $(FC) -P $(CPPFLAGS) $(CPPDEFS) $(INCS) $(INCLDIR) $< -o $*.f90 $(FC) -c $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR) $*.f90 mostlyclean: From fe901d67d4be5616d98c0179dea0cd6783b2b0f6 Mon Sep 17 00:00:00 2001 From: Spencer Wong <88933912+blimlim@users.noreply.github.com> Date: Thu, 3 Jul 2025 16:02:03 +1000 Subject: [PATCH 35/52] ESM1.6 Read start date and time from restart file (#48) * Hardcode init_date to 00010101 * Set runtime0, iniyear, iniday, inimon from restart file * Hardcode time=0 for callendar call during restart initialisation. Comment linking to frz_onset issue * Provide detailed namelist error messages Co-authored-by: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Co-authored-by: anton-seaice --- drivers/access/CICE_InitMod.F90 | 17 ++++--- drivers/access/cpl_parameters.F90 | 74 +++++++++++++++---------------- io_netcdf/ice_restart.F90 | 21 +++++++++ source/ice_calendar.F90 | 53 ++++++++++++++++++---- source/ice_init.F90 | 8 ++++ 5 files changed, 120 insertions(+), 53 deletions(-) diff --git a/drivers/access/CICE_InitMod.F90 b/drivers/access/CICE_InitMod.F90 index afd2df08..58bcc7e4 100644 --- a/drivers/access/CICE_InitMod.F90 +++ b/drivers/access/CICE_InitMod.F90 @@ -113,9 +113,7 @@ subroutine cice_init call get_cpl_timecontrol if (my_task == master_task) then write(il_out,*)' CICE (cice_init) 1 jobnum = ',jobnum - write(il_out,*)' CICE (cice_init) 1 inidate = ',inidate write(il_out,*)' CICE (cice_init) 1 init_date = ',init_date - write(il_out,*)' CICE (cice_init) 1 runtime0 = ',runtime0 write(il_out,*)' CICE (cice_init) 1 runtime = ',runtime write(il_out,*)' CICE (cice_init) 1 idate = ',my_task, idate !write(il_out,*)' CICE (cice_init) 1 runtype = ',runtype @@ -189,9 +187,10 @@ subroutine cice_init if (jobnum == 1 ) then time = 0.0 !NOTE, the first job must be set back to 0 and idate = idate_save !idate back to the 'initial' value, in any case + runtime0 = 0.0 else !BX: 20160720 - time = runtime0 !............ - endif + runtime0 = time ! Record initial time read from init_restart + endif #endif call init_diags ! initialize diagnostic output points @@ -216,6 +215,9 @@ subroutine cice_init if (my_task == master_task) then write(il_out,*) 'CICE (cice_init) 3 time = ', my_task, time write(il_out,*) 'CICE (cice_init) 3 runtime0 = ', my_task, runtime0 + write(il_out,*) 'CICE (cice_init) 3 iniyear = ', my_task, iniyear + write(il_out,*) 'CICE (cice_init) 3 inimon = ', my_task, inimon + write(il_out,*) 'CICE (cice_init) 3 iniday = ', my_task, iniday write(il_out,*) 'CICE (cice_init) 3 idate = ', my_task, idate end if #endif @@ -366,7 +368,12 @@ subroutine init_restart call restartfile() ! given by pointer in ice_in !ars599: 11042014: markout call calendar ! according to dhb599 initmod at cice4.1_fm - call calendar(time) ! update time parameters + ! TODO: 'time' argument in the calendar call below affects frz_onset output. + ! Hardcoding time=0.0 gives the same results as the older CM2 based calendar + ! initialisation. + ! See https://github.com/ACCESS-NRI/cice5/issues/49 + ! and https://github.com/ACCESS-NRI/cice5/pull/48 for details. + call calendar(0.0) ! update time parameters if (kdyn == 2) call read_restart_eap ! EAP else if (restart) then ! ice_ic = core restart file call restartfile (ice_ic) ! or 'default' or 'none' diff --git a/drivers/access/cpl_parameters.F90 b/drivers/access/cpl_parameters.F90 index db729287..bd1aea8a 100644 --- a/drivers/access/cpl_parameters.F90 +++ b/drivers/access/cpl_parameters.F90 @@ -1,3 +1,4 @@ + !============================================================================ ! module cpl_parameters @@ -8,6 +9,12 @@ module cpl_parameters implicit none +#ifdef __INTEL_COMPILER +! for intel runtime errors +! see https://www.intel.com/content/www/us/en/docs/fortran-compiler/developer-guide-reference/2025-2/list-of-runtime-error-messages.html +include "for_iosdef.for" +#endif + integer(kind=int_kind) :: il_im, il_jm, il_imjm ! il_im=nx_global, il_jm=ny_global ! assigned in prism_init integer (kind=int_kind) :: xdim, ydim @@ -59,16 +66,17 @@ module cpl_parameters chk_i2o_fields = .false. , & chk_o2i_fields = .false. integer(kind=int_kind) :: jobnum = 1 !1 for initial, >1 restart -integer(kind=int_kind) :: inidate = 01010101 !beginning date of this run (yyyymmdd) integer(kind=int_kind) :: init_date = 00010101 !beginning date of this EXP (yyyymmdd) +integer(kind=int_kind) :: iniday = 1, & ! beginning date of this run. Read from restart + inimon = 1, & + iniyear = 1 integer(kind=int_kind) :: dt_cice = 3600 !time step of this model (seconds) integer(kind=int_kind) :: dt_cpl_ai = 21600 !atm<==>ice coupling interval (seconds) integer(kind=int_kind) :: dt_cpl_io = -99 !ice<==>ocn coupling interval (seconds). !Hardwired to equal dt_cice and should not !be set in namelist. -integer(kind=int_kind) :: caltype = -99 !deprecated -!integer(kind=int_kind) :: runtime0 !accumulated run time by the end of last run (s) -real(kind=dbl_kind) :: runtime0 = 0.0 ! can be too large as int to read in correctly! +real(kind=dbl_kind) :: runtime0 = 0.0 !accumulated runtime from init_date to + !run start date integer(kind=int_kind) :: runtime = 86400 !the time length for this run segment (s) !20100305: Harry Henden suggests turning off ocean current into UM might reduce the @@ -104,15 +112,10 @@ module cpl_parameters add_lprec = 0.513190E-07 !kg/m2/s. ==> set to 0.0 if no fixin! namelist/coupling/ & - caltype, & jobnum, & - inidate, & - init_date, & - runtime0, & runtime, & dt_cice, & dt_cpl_ai, & - dt_cpl_io, & inputdir, & restartdir, & pop_icediag, & @@ -150,7 +153,6 @@ module cpl_parameters chk_i2o_fields, & chk_o2i_fields -integer(kind=int_kind) :: iniday, inimon, iniyear !from inidate real(kind=dbl_kind) :: coef_ai !dt_ice/dt_cpl_ai, for i2a fields tavg real(kind=dbl_kind) :: frazil_factor = 0.5 @@ -187,23 +189,20 @@ subroutine get_cpl_timecontrol_simple coef_ai = float(dt_cice)/float(dt_cpl_ai) -iniday = mod(inidate, 100) -inimon = mod( (inidate - iniday)/100, 100) -iniyear = inidate / 10000 - return end subroutine get_cpl_timecontrol_simple !=============================================================================== -subroutine get_cpl_timecontrol -use ice_exit -use ice_fileunits +subroutine get_cpl_timecontrol +use ice_exit, only: abort_ice +use ice_fileunits, only: nu_nml, ice_stderr, ice_stdout, get_fileunit, release_fileunit use ice_communicate, only: my_task, master_task implicit none integer (int_kind) :: nml_error ! namelist read error flag +character (len=256) :: errstr, tmpstr ! For holding namelist read errors ! all processors read the namelist-- @@ -211,7 +210,7 @@ subroutine get_cpl_timecontrol open(unit=nu_nml,file="input_ice.nml",form="formatted",status="old",iostat=nml_error) ! if (my_task == master_task) then - write(6,*)'CICE: input_ice.nml opened at unit = ', nu_nml + write(ice_stdout,*)'CICE: input_ice.nml opened at unit = ', nu_nml endif ! if (nml_error /= 0) then @@ -219,9 +218,25 @@ subroutine get_cpl_timecontrol else nml_error = 1 endif + do while (nml_error > 0) - read(nu_nml, nml=coupling,iostat=nml_error) - if (nml_error > 0) read(nu_nml,*) ! for Nagware compiler + read(nu_nml, nml=coupling,iostat=nml_error,iomsg=errstr) + ! check if error + if (nml_error /= 0) then + if (my_task == master_task) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt=*) tmpstr +#ifdef __INTEL_COMPILER + if (nml_error == FOR$IOS_INVREFVAR) then + write(ice_stderr,*)'CICE: Invalid reference to variable '//trim(tmpstr) + write(ice_stderr,*)'CICE: is '//trim(tmpstr)//' deprecated ?' + endif +#endif + call abort_ice('CICE ERROR in input_ice.nml when' // & + ' reading ' // trim(tmpstr) // ' - ' //errstr) + endif + endif end do if (nml_error == 0) close(nu_nml) @@ -237,19 +252,6 @@ subroutine get_cpl_timecontrol endif endif -if (caltype /= -99) then - if (my_task == master_task) then - call abort_ice('ice: ERROR caltype deprecated. Remove from "input_ice.nml"') - endif -endif - -if (dt_cpl_io /= -99) then - if (my_task == master_task) then - call abort_ice('ice: ERROR dt_cpl_io should not be set in namelist. '// & - 'Remove from "input_ice.nml"') - endif -endif - !hardrwire dt_cpl_io == dt_cice dt_cpl_io = dt_cice @@ -260,12 +262,6 @@ subroutine get_cpl_timecontrol coef_ai = float(dt_cice)/float(dt_cpl_ai) -iniday = mod(inidate, 100) -inimon = mod( (inidate - iniday)/100, 100) -iniyear = inidate / 10000 - -!!idate = inidate - return end subroutine get_cpl_timecontrol diff --git a/io_netcdf/ice_restart.F90 b/io_netcdf/ice_restart.F90 index 2e7974ee..9de8aa31 100644 --- a/io_netcdf/ice_restart.F90 +++ b/io_netcdf/ice_restart.F90 @@ -35,6 +35,10 @@ subroutine init_restart_read(ice_ic) use ice_calendar, only: sec, month, mday, nyr, istep0, istep1, & time, time_forc, year_init, npt +#ifdef ACCESS + use cpl_parameters, only: iniyear, inimon, iniday + use ice_calendar, only: check_start_date +#endif use ice_communicate, only: my_task, master_task use ice_domain, only: nblocks use ice_fileunits, only: nu_diag, nu_rst_pointer @@ -47,6 +51,7 @@ subroutine init_restart_read(ice_ic) filename, filename0 integer (kind=int_kind) :: status + integer (kind=int_kind) :: year if (present(ice_ic)) then filename = trim(ice_ic) @@ -73,6 +78,9 @@ subroutine init_restart_read(ice_ic) status = nf90_get_att(ncid, nf90_global, 'time', time) status = nf90_get_att(ncid, nf90_global, 'time_forc', time_forc) status = nf90_get_att(ncid, nf90_global, 'nyr', nyr) + status = nf90_get_att(ncid, nf90_global, 'year', year) + if (status /= nf90_noerr) call abort_ice( & + 'ice: Error reading year attribute from ncfile '//trim(filename)) if (status == nf90_noerr) then status = nf90_get_att(ncid, nf90_global, 'month', month) status = nf90_get_att(ncid, nf90_global, 'mday', mday) @@ -93,6 +101,19 @@ subroutine init_restart_read(ice_ic) call broadcast_scalar(istep0,master_task) call broadcast_scalar(time,master_task) call broadcast_scalar(time_forc,master_task) + +#ifdef ACCESS + ! Set run start date + call broadcast_scalar(year,master_task) + call broadcast_scalar(month,master_task) + call broadcast_scalar(mday,master_task) + iniyear = year + inimon = month + iniday = mday + + ! Check starting date and time are consistent + call check_start_date +#endif istep1 = istep0 diff --git a/source/ice_calendar.F90 b/source/ice_calendar.F90 index f84a848e..e0c01fbe 100755 --- a/source/ice_calendar.F90 +++ b/source/ice_calendar.F90 @@ -20,7 +20,7 @@ module ice_calendar use ice_domain_size, only: max_nstrm use ice_exit, only: abort_ice #ifdef AusCOM - use cpl_parameters, only : inidate, iniday, inimon, iniyear, init_date + use cpl_parameters, only : iniday, inimon, iniyear, init_date use cpl_parameters, only : il_out use cpl_parameters, only : runtime0 !accumulated runtime by the end of last run #endif @@ -30,6 +30,9 @@ module ice_calendar save public :: init_calendar, calendar, time2sec, sec2time +#ifdef ACCESS + public :: check_start_date +#endif integer (kind=int_kind), public :: & days_per_year , & ! number of days in one year @@ -558,6 +561,8 @@ end subroutine set_calendar #ifdef AusCOM !======================================================================= subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) +! Calculate the date ttime seconds from the run start date given by iniyear +! inimon and iniday. use cpl_parameters @@ -572,13 +577,18 @@ subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) logical :: lleap +! Initialise date inc_day = int ((ttime + 0.5)/86400. ) khfin = (ttime - inc_day*86400)/3600 +kdfin = iniday +kmfin = inimon +kyfin = iniyear + IF (days_per_year == 365 .or. days_per_year == 366) THEN ! - ! 1. Length of the months + ! 1. Length of the months in initial year ! DO jm = 1, 12 klmo(jm) = 31 @@ -598,10 +608,6 @@ subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) ENDIF ENDDO !jm=1,12 - kdfin = iniday - kmfin = inimon - kyfin = iniyear - ! ! 2. Loop on the days ! @@ -635,9 +641,6 @@ subroutine get_idate(ttime, khfin, kdfin, kmfin, kyfin) DO jm = 1, 12 klmo(jm) = 30 ENDDO - kdfin = iniday - kmfin = inimon - kyfin = iniyear ! ! 2. Loop on the days @@ -683,6 +686,38 @@ function days_year(year) return end function days_year #endif + +!======================================================================= + +#ifdef ACCESS + subroutine check_start_date + ! Check that the start date and time variables from the restart file + ! are consistent. + use ice_communicate, only: my_task, master_task + implicit none + + integer(kind=int_kind) :: init_year, init_mon, init_day + real (kind=dbl_kind) :: sec_init_date, sec_start_date, sec_init_to_start + + init_day = mod(init_date, 100) + init_mon = mod( (init_date - init_day)/100, 100) + init_year = init_date / 10000 + + call time2sec(init_year, init_mon, init_day, sec_init_date) + call time2sec(iniyear, inimon, iniday, sec_start_date) + + sec_init_to_start = sec_start_date - sec_init_date + + if (sec_init_to_start /= time) then + if (my_task == master_task) then + write(il_out,*) 'CICE: ERROR restart time: ', time, ' and date: ', & + iniyear, inimon, iniday, ' are inconsistent' + call abort_ice('CICE: ERROR Restart file time and date variables are inconsistent') + endif + endif + + end subroutine check_start_date +#endif !======================================================================= end module ice_calendar diff --git a/source/ice_init.F90 b/source/ice_init.F90 index 759c75a5..e5f36b29 100755 --- a/source/ice_init.F90 +++ b/source/ice_init.F90 @@ -712,6 +712,14 @@ subroutine input_data fbot_xfer_type = 'constant' endif +#ifdef ACCESS + if (trim(runtype) == 'continue' .and. .not. use_restart_time) then + if (my_task == master_task) then + write (nu_diag,*) 'ERROR: ACCESS ESM continue runs require use_restart_time=.true.' + call abort_ice('ice_init: "use_restart_time" must be .true. when "runtype = continue"') + endif + endif +#endif call broadcast_scalar(days_per_year, master_task) call broadcast_scalar(use_leap_years, master_task) From 3ce7ce803fb1594fc8f70102ecb28ae48e73c229 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Fri, 4 Jul 2025 14:01:45 +1000 Subject: [PATCH 36/52] Set aicenmin consistent with UM7.3 (#54) Co-authored-by: Siobhan O'Farrell --- source/ice_itd.F90 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/ice_itd.F90 b/source/ice_itd.F90 index 16d36f43..5d561029 100755 --- a/source/ice_itd.F90 +++ b/source/ice_itd.F90 @@ -242,7 +242,8 @@ subroutine init_itd (calc_Tsfc, heat_capacity) ! Alex West: added these two ar hi_min = p2 ! 0.2m hs_min = p1 ! 0.1m else - aicenmin = puny ! Standard CICE setting + ! aicenmin = puny ! Standard CICE setting + aicenmin = 2.0e-4_dbl_kind ! Same as setting in UM7.3 for ESM1.6 endif if (my_task == master_task) then From e8720da8a63c86d60ce8dca3042fa9d7b3fdbd55 Mon Sep 17 00:00:00 2001 From: Spencer Wong <88933912+blimlim@users.noreply.github.com> Date: Tue, 15 Jul 2025 11:30:59 +1000 Subject: [PATCH 37/52] Mask iceberg flux (#55) --- drivers/access/cpl_forcing_handler.F90 | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/access/cpl_forcing_handler.F90 b/drivers/access/cpl_forcing_handler.F90 index 5b649e5a..62a755c0 100644 --- a/drivers/access/cpl_forcing_handler.F90 +++ b/drivers/access/cpl_forcing_handler.F90 @@ -427,7 +427,16 @@ subroutine get_lice_discharge(fname) write(il_out,'(a,a)') '(get_lice_discharge) reading in iceberg data, myvar= ',trim(myvar) do im = 1, 12 write(il_out,*) '(get_lice_discharge) reading in data, month= ',im - call ice_read_global_nc(ncid_i2o, im, trim(myvar), gwork, dbug) + call ice_read_nc(ncid_i2o, im, trim(myvar), vwork, dbug) + + ! Restrict iceberg fluxes to ocean points + where (tmask) + vwork = vwork + else where + vwork = c0 + end where + + call gather_global(gwork, vwork, master_task, distrb_info) if ( my_task == master_task ) then gicebergfw(:,:,im) = gwork(:,:) From 831e2eb13e69d97622340b49730b60d0a320be2b Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Wed, 23 Jul 2025 09:37:51 +1000 Subject: [PATCH 38/52] Merge netcdf code from master branch (#57) --------- Co-authored-by: Nic Hannah --- drivers/access/cpl_parameters.F90 | 2 +- io_netcdf/ice_history_write.F90 | 1450 ++++++++++++++++++++--------- io_netcdf/ice_restart.F90 | 103 +- source/ice_domain.F90 | 40 +- source/ice_history_shared.F90 | 13 + source/ice_init.F90 | 27 +- source/ice_restart_driver.F90 | 2 +- source/ice_restart_shared.F90 | 2 - 8 files changed, 1164 insertions(+), 475 deletions(-) diff --git a/drivers/access/cpl_parameters.F90 b/drivers/access/cpl_parameters.F90 index bd1aea8a..1b4d2dcc 100644 --- a/drivers/access/cpl_parameters.F90 +++ b/drivers/access/cpl_parameters.F90 @@ -252,7 +252,7 @@ subroutine get_cpl_timecontrol endif endif -!hardrwire dt_cpl_io == dt_cice +!hardwire dt_cpl_io == dt_cice dt_cpl_io = dt_cice ! * make sure runtime is mutliple of dt_cpl_ai, dt_cpl_ai is mutliple of dt_cpl_io, diff --git a/io_netcdf/ice_history_write.F90 b/io_netcdf/ice_history_write.F90 index cf3cb338..73dfab72 100644 --- a/io_netcdf/ice_history_write.F90 +++ b/io_netcdf/ice_history_write.F90 @@ -19,15 +19,66 @@ module ice_history_write + use netcdf + use mpi, only: MPI_INFO_NULL, MPI_COMM_WORLD + + use ice_kinds_mod + use ice_constants, only: c0, c360, secday, spval, rad_to_deg + use ice_blocks, only: nx_block, ny_block, block, get_block + use ice_exit, only: abort_ice + use ice_domain, only: distrb_info, nblocks, blocks_ice + use ice_domain, only: equal_num_blocks_per_cpu + use ice_communicate, only: my_task, master_task, MPI_COMM_ICE + use ice_broadcast, only: broadcast_scalar + use ice_gather_scatter, only: gather_global + use ice_domain_size, only: nx_global, ny_global, max_nstrm, max_blocks + use ice_grid, only: TLON, TLAT, ULON, ULAT, hm, bm, tarea, uarea, & + dxu, dxt, dyu, dyt, HTN, HTE, ANGLE, ANGLET, & + lont_bounds, latt_bounds, lonu_bounds, latu_bounds + use ice_history_shared + use ice_itd, only: hin_max + use ice_calendar, only: write_ic, histfreq + + implicit none private public :: ice_write_hist save - -!======================================================================= + + type coord_attributes ! netcdf coordinate attributes + character (len=11) :: short_name + character (len=45) :: long_name + character (len=20) :: units + end type coord_attributes + + type req_attributes ! req'd netcdf attributes + type (coord_attributes) :: req + character (len=20) :: coordinates + end type req_attributes + + ! 4 coordinate variables: TLON, TLAT, ULON, ULAT + INTEGER (kind=int_kind), PARAMETER :: ncoord = 4 + + ! 4 vertices in each grid cell + INTEGER (kind=int_kind), PARAMETER :: nverts = 4 + + ! 4 variables describe T, U grid boundaries: + ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds + INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 4 contains + +subroutine check(status, msg) + integer, intent (in) :: status + character(len=*), intent (in) :: msg + + if(status /= nf90_noerr) then + call abort_ice('ice: NetCDF error '//trim(nf90_strerror(status)//' '//trim(msg))) + end if +end subroutine check + + !======================================================================= ! ! write average ice quantities or snapshots @@ -36,33 +87,18 @@ module ice_history_write subroutine ice_write_hist (ns) - use ice_kinds_mod -#ifdef ncdf - use ice_blocks, only: nx_block, ny_block - use ice_broadcast, only: broadcast_scalar - use ice_calendar, only: time, sec, idate, idate0, write_ic, & - histfreq, dayyr, days_per_year, use_leap_years, month, daymo - use ice_communicate, only: my_task, master_task - use ice_constants, only: c0, c360, secday, spval, rad_to_deg - use ice_domain, only: distrb_info - use ice_domain_size, only: nx_global, ny_global, max_nstrm, max_blocks - use ice_exit, only: abort_ice + use ice_calendar, only: time, sec, idate, idate0, & +#ifdef ACCESS + month, daymo, & +#endif + dayyr, days_per_year, use_leap_years use ice_fileunits, only: nu_diag - use ice_gather_scatter, only: gather_global - use ice_grid, only: TLON, TLAT, ULON, ULAT, hm, bm, tarea, uarea, & - dxu, dxt, dyu, dyt, HTN, HTE, ANGLE, ANGLET, & - lont_bounds, latt_bounds, lonu_bounds, latu_bounds - use ice_history_shared - use ice_itd, only: hin_max use ice_restart_shared, only: runid - use netcdf -#endif integer (kind=int_kind), intent(in) :: ns ! local variables -#ifdef ncdf real (kind=dbl_kind), dimension(:,:), allocatable :: work_g1 real (kind=real_kind), dimension(:,:), allocatable :: work_gr real (kind=real_kind), dimension(:,:,:), allocatable :: work_gr3 @@ -71,7 +107,7 @@ subroutine ice_write_hist (ns) integer (kind=int_kind) :: i,k,ic,n,nn, & ncid,status,imtid,jmtid,kmtidi,kmtids,kmtidb, cmtid,timid,varid, & - nvertexid,ivertex,iflag + nvertexid,ivertex integer (kind=int_kind), dimension(3) :: dimid integer (kind=int_kind), dimension(4) :: dimidz integer (kind=int_kind), dimension(5) :: dimidcz @@ -81,40 +117,34 @@ subroutine ice_write_hist (ns) character (char_len) :: title character (char_len_long) :: ncfile(max_nstrm) + integer (kind=int_kind) :: shuffle, deflate, deflate_level + integer (kind=int_kind) :: ind,boundid character (char_len) :: start_time,current_date,current_time character (len=8) :: cdate - ! 4 coordinate variables: TLON, TLAT, ULON, ULAT - INTEGER (kind=int_kind), PARAMETER :: ncoord = 4 - - ! 4 vertices in each grid cell - INTEGER (kind=int_kind), PARAMETER :: nverts = 4 - - ! 4 variables describe T, U grid boundaries: - ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds - INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 4 - - TYPE coord_attributes ! netcdf coordinate attributes - character (len=11) :: short_name - character (len=45) :: long_name - character (len=20) :: units - END TYPE coord_attributes - - TYPE req_attributes ! req'd netcdf attributes - type (coord_attributes) :: req - character (len=20) :: coordinates - END TYPE req_attributes - TYPE(req_attributes), dimension(nvar) :: var TYPE(coord_attributes), dimension(ncoord) :: coord_var TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts TYPE(coord_attributes), dimension(nvarz) :: var_nz CHARACTER (char_len), dimension(ncoord) :: coord_bounds - if (my_task == master_task) then -#if defined(AusCOM) || defined(ACCESS) + ! We leave shuffle at 0, this is only useful for integer data. + shuffle = 0 + + ! If history_deflate_level < 0 then don't do deflation, + ! otherwise it sets the deflate level + if (history_deflate_level < 0) then + deflate = 0 + deflate_level = 0 + else + deflate = 1 + deflate_level = history_deflate_level + endif + + if (my_task == master_task .or. history_parallel_io) then +#if defined(ACCESS) ! set timestamp in middle of time interval if (histfreq(ns) == 'm' .or. histfreq(ns) == 'M') then if (month /= 1) then @@ -141,92 +171,74 @@ subroutine ice_write_hist (ns) endif ! create file - iflag = ior(NF90_NETCDF4, NF90_CLOBBER) - status = nf90_create(ncfile(ns), iflag, ncid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error creating history ncfile '//ncfile(ns)) + if (history_parallel_io) then + call check(nf90_create(ncfile(ns), ior(NF90_NETCDF4, NF90_MPIIO), ncid, & + comm=MPI_COMM_ICE, info=MPI_INFO_NULL), & + 'create history ncfile '//ncfile(ns)) + if (.not. equal_num_blocks_per_cpu) then + call abort_ice('ice: error history_parallel_io needs equal_num_blocks_per_cpu') + endif + else + call check(nf90_create(ncfile(ns), ior(NF90_CLASSIC_MODEL, NF90_HDF5), ncid), & + 'create history ncfile '//ncfile(ns)) + endif !----------------------------------------------------------------- ! define dimensions !----------------------------------------------------------------- if (hist_avg) then - status = nf90_def_dim(ncid,'d2',2,boundid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining dim d2') + call check(nf90_def_dim(ncid,'d2',2,boundid), 'def dim d2') endif - status = nf90_def_dim(ncid,'ni',nx_global,imtid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining dim ni') - - status = nf90_def_dim(ncid,'nj',ny_global,jmtid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining dim nj') - - status = nf90_def_dim(ncid,'nc',ncat_hist,cmtid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining dim nc') - - status = nf90_def_dim(ncid,'nkice',nzilyr,kmtidi) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining dim nki') - - status = nf90_def_dim(ncid,'nksnow',nzslyr,kmtids) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining dim nks') - - status = nf90_def_dim(ncid,'nkbio',nzblyr,kmtidb) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining dim nkb') - - status = nf90_def_dim(ncid,'time',NF90_UNLIMITED,timid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining dim time') - - status = nf90_def_dim(ncid,'nvertices',nverts,nvertexid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining dim nverts') + call check(nf90_def_dim(ncid, 'ni', nx_global, imtid), & + 'def dim ni') + call check(nf90_def_dim(ncid, 'nj', ny_global, jmtid), & + 'def dim nj') + call check(nf90_def_dim(ncid, 'nc', ncat_hist, cmtid), & + 'def dim nc') + call check(nf90_def_dim(ncid, 'nkice', nzilyr, kmtidi), & + 'def dim nkice') + call check(nf90_def_dim(ncid, 'nksnow', nzslyr, kmtids), & + 'def dim nksnow') + call check(nf90_def_dim(ncid, 'nkbio', nzblyr, kmtidb), & + 'def dim nkbio') + call check(nf90_def_dim(ncid, 'time', NF90_UNLIMITED, timid), & + 'def dim time') + call check(nf90_def_dim(ncid, 'nvertices', nverts, nvertexid), & + 'def dim nverts') !----------------------------------------------------------------- ! define coordinate variables !----------------------------------------------------------------- - status = nf90_def_var(ncid,'time',nf90_float,timid,varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining var time') - - status = nf90_put_att(ncid,varid,'long_name','model time') - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: time long_name') + call check(nf90_def_var(ncid,'time',nf90_float,timid,varid), & + 'def var time') + call check(nf90_put_att(ncid,varid,'long_name','model time'), & + 'put_att long_name') write(cdate,'(i8.8)') idate0 write(title,'(a,a,a,a,a,a,a,a)') 'days since ', & cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' 00:00:00' - status = nf90_put_att(ncid,varid,'units',title) - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: time units') + call check(nf90_put_att(ncid,varid,'units',title), & + 'put_att time units') if (days_per_year == 360) then - status = nf90_put_att(ncid,varid,'calendar','360_day') - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: time calendar') + call check(nf90_put_att(ncid,varid,'calendar','360_day'), & + 'att time calendar') elseif (days_per_year == 365 .and. .not.use_leap_years ) then - status = nf90_put_att(ncid,varid,'calendar','NoLeap') - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: time calendar') + call check(nf90_put_att(ncid,varid,'calendar','NoLeap'), & + 'att time calendar') elseif (use_leap_years) then - status = nf90_put_att(ncid,varid,'calendar','proleptic_gregorian') - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: time calendar') + call check(nf90_put_att(ncid,varid,'calendar','proleptic_gregorian'), & + 'att time calendar') else call abort_ice( 'ice Error: invalid calendar settings') endif if (hist_avg) then - status = nf90_put_att(ncid,varid,'bounds','time_bounds') - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: time bounds') + call check(nf90_put_att(ncid,varid,'bounds','time_bounds'), & + 'att time bounds') endif !----------------------------------------------------------------- @@ -236,19 +248,18 @@ subroutine ice_write_hist (ns) if (hist_avg) then dimid(1) = boundid dimid(2) = timid - status = nf90_def_var(ncid,'time_bounds',nf90_float,dimid(1:2),varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining var time_bounds') - status = nf90_put_att(ncid,varid,'long_name', & - 'boundaries for time-averaging interval') - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: time_bounds long_name') + call check(nf90_def_var(ncid, 'time_bounds', & + nf90_float,dimid(1:2),varid), & + 'def var time_bounds') + + call check(nf90_put_att(ncid,varid,'long_name', & + 'boundaries for time-averaging interval'), & + 'att time_bounds long_name') write(cdate,'(i8.8)') idate0 write(title,'(a,a,a,a,a,a,a,a)') 'days since ', & cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' 00:00:00' - status = nf90_put_att(ncid,varid,'units',title) - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: time_bounds units') + call check(nf90_put_att(ncid,varid,'units',title), & + 'att time_bounds units') endif !----------------------------------------------------------------- @@ -335,110 +346,144 @@ subroutine ice_write_hist (ns) dimid(3) = timid do i = 1, ncoord - status = nf90_def_var(ncid, coord_var(i)%short_name, nf90_float, & - dimid(1:2), varid) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining short_name for '//coord_var(i)%short_name) - status = nf90_put_att(ncid,varid,'long_name',coord_var(i)%long_name) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining long_name for '//coord_var(i)%short_name) - status = nf90_put_att(ncid, varid, 'units', coord_var(i)%units) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining units for '//coord_var(i)%short_name) - status = nf90_put_att(ncid,varid,'missing_value',spval) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining missing_value for '//coord_var(i)%short_name) - status = nf90_put_att(ncid,varid,'_FillValue',spval) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining _FillValue for '//coord_var(i)%short_name) + call check(nf90_def_var(ncid, coord_var(i)%short_name, nf90_float, & + dimid(1:2), varid), & + 'def var '//coord_var(i)%short_name) + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y /)), & + 'def var chunking '//coord_var(i)%short_name) + endif + + call check(nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level), & + 'deflate '//coord_var(i)%short_name) + + call check(nf90_put_att(ncid,varid,'long_name',coord_var(i)%long_name), & + 'put att long_name '//coord_var(i)%short_name) + call check(nf90_put_att(ncid, varid, 'units', coord_var(i)%units), & + 'put att units '//coord_var(i)%short_name) + call check(nf90_put_att(ncid,varid,'missing_value',spval), & + 'put att missing_value '//coord_var(i)%short_name) + + call check(nf90_put_att(ncid, varid, '_FillValue', spval), & + 'put att _FillValue '//coord_var(i)%short_name) + if (coord_var(i)%short_name == 'ULAT') then - status = nf90_put_att(ncid,varid,'comment', & - 'Latitude of NE corner of T grid cell') - if (status /= nf90_noerr) call abort_ice( & - 'Error defining comment for '//coord_var(i)%short_name) + call check(nf90_put_att(ncid,varid,'comment', & + 'Latitude of NE corner of T grid cell'), & + 'put att comment for '//coord_var(i)%short_name) endif if (f_bounds) then - status = nf90_put_att(ncid, varid, 'bounds', coord_bounds(i)) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining bounds for '//coord_var(i)%short_name) - endif + call check(nf90_put_att(ncid, varid, 'bounds', coord_bounds(i)), & + 'put att bounds '//coord_var(i)%short_name) + endif + enddo + ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR) dimidex(1)=cmtid dimidex(2)=kmtidi dimidex(3)=kmtids dimidex(4)=kmtidb - + do i = 1, nvarz if (igrdz(i)) then - status = nf90_def_var(ncid, var_nz(i)%short_name, & - nf90_float, dimidex(i), varid) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining short_name for '//var_nz(i)%short_name) - status = nf90_put_att(ncid,varid,'long_name',var_nz(i)%long_name) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining long_name for '//var_nz(i)%short_name) - status = nf90_put_att(ncid, varid, 'units', var_nz(i)%units) - if (Status /= nf90_noerr) call abort_ice( & - 'Error defining units for '//var_nz(i)%short_name) + call check(nf90_def_var(ncid, var_nz(i)%short_name, & + nf90_float, dimidex(i), varid), & + 'def var '//var_nz(i)%short_name) + + call check(nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level), & + 'deflate '//var_nz(i)%short_name) + + call check(nf90_put_att(ncid,varid,'long_name',var_nz(i)%long_name), & + 'put att long_name '//var_nz(i)%short_name) + call check(nf90_put_att(ncid, varid, 'units', var_nz(i)%units), & + 'for att units '//var_nz(i)%short_name) endif enddo ! Attributes for tmask, blkmask defined separately, since they have no units if (igrd(n_tmask)) then - status = nf90_def_var(ncid, 'tmask', nf90_float, dimid(1:2), varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining var tmask') - status = nf90_put_att(ncid,varid, 'long_name', 'ocean grid mask') - if (status /= nf90_noerr) call abort_ice('ice Error: tmask long_name') - status = nf90_put_att(ncid, varid, 'coordinates', 'TLON TLAT') - if (status /= nf90_noerr) call abort_ice('ice Error: tmask units') - status = nf90_put_att(ncid,varid,'comment', '0 = land, 1 = ocean') - if (status /= nf90_noerr) call abort_ice('ice Error: tmask comment') - status = nf90_put_att(ncid,varid,'missing_value',spval) - if (status /= nf90_noerr) call abort_ice('Error defining missing_value for tmask') - status = nf90_put_att(ncid,varid,'_FillValue',spval) - if (status /= nf90_noerr) call abort_ice('Error defining _FillValue for tmask') + call check(nf90_def_var(ncid, 'tmask', nf90_float, dimid(1:2), varid), & + 'def var tmask') + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y /)), & + 'def var chunking tmask') + endif + + call check(nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level), 'deflating var tmask') + + call check(nf90_put_att(ncid,varid, 'long_name', 'ocean grid mask'), & + 'put att tmask long_name') + call check(nf90_put_att(ncid, varid, 'coordinates', 'TLON TLAT'), & + 'put att tmask units') + call check(nf90_put_att(ncid,varid,'comment', '0 = land, 1 = ocean'), & + 'put att tmask comment') + call check(nf90_put_att(ncid,varid,'missing_value',spval), & + 'put att missing_value for tmask') + call check(nf90_put_att(ncid,varid,'_FillValue',spval), & + 'put att _FillValue for tmask') endif if (igrd(n_blkmask)) then - status = nf90_def_var(ncid, 'blkmask', nf90_float, dimid(1:2), varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error defining var blkmask') - status = nf90_put_att(ncid,varid, 'long_name', 'ice grid block mask') - if (status /= nf90_noerr) call abort_ice('ice Error: blkmask long_name') - status = nf90_put_att(ncid, varid, 'coordinates', 'TLON TLAT') - if (status /= nf90_noerr) call abort_ice('ice Error: blkmask units') - status = nf90_put_att(ncid,varid,'comment', 'mytask + iblk/100') - if (status /= nf90_noerr) call abort_ice('ice Error: blkmask comment') - status = nf90_put_att(ncid,varid,'missing_value',spval) - if (status /= nf90_noerr) call abort_ice('Error defining missing_value for blkmask') - status = nf90_put_att(ncid,varid,'_FillValue',spval) - if (status /= nf90_noerr) call abort_ice('Error defining _FillValue for blkmask') + call check(nf90_def_var(ncid, 'blkmask', nf90_float, dimid(1:2), varid), & + 'def var blkmask') + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y /)), & + 'def var chunking blkmask') + endif + + call check(nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level), & + 'deflating var blkmask') + + call check(nf90_put_att(ncid,varid, 'long_name', 'ice grid block mask'), & + 'put att blkmask long_name') + call check(nf90_put_att(ncid, varid, 'coordinates', 'TLON TLAT'), & + 'put att blkmask coordinates') + call check(nf90_put_att(ncid,varid,'comment', 'mytask + iblk/100'), & + 'put att blkmask comment') + call check(nf90_put_att(ncid,varid,'missing_value',spval), & + 'put att blkmask missing_value') + call check(nf90_put_att(ncid,varid,'_FillValue',spval), & + 'put att blkmask _FillValue') endif do i = 3, nvar ! note n_tmask=1, n_blkmask=2 if (igrd(i)) then - status = nf90_def_var(ncid, var(i)%req%short_name, & - nf90_float, dimid(1:2), varid) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining variable '//var(i)%req%short_name) - status = nf90_put_att(ncid,varid, 'long_name', var(i)%req%long_name) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining long_name for '//var(i)%req%short_name) - status = nf90_put_att(ncid, varid, 'units', var(i)%req%units) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining units for '//var(i)%req%short_name) - status = nf90_put_att(ncid, varid, 'coordinates', var(i)%coordinates) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining coordinates for '//var(i)%req%short_name) - status = nf90_put_att(ncid,varid,'missing_value',spval) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining missing_value for '//var(i)%req%short_name) - status = nf90_put_att(ncid,varid,'_FillValue',spval) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining _FillValue for '//var(i)%req%short_name) + call check(nf90_def_var(ncid, var(i)%req%short_name, & + nf90_float, dimid(1:2), varid), & + 'def variable '//var(i)%req%short_name) + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y /)), & + 'def var chunking '//var(i)%req%short_name) + endif + + call check(nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level), & + 'deflate var '//var(i)%req%short_name) + + call check(nf90_put_att(ncid,varid, 'long_name', var(i)%req%long_name), & + 'put att long_name '//var(i)%req%short_name) + call check(nf90_put_att(ncid, varid, 'units', var(i)%req%units), & + 'put att units '//var(i)%req%short_name) + call check(nf90_put_att(ncid, varid, 'coordinates', var(i)%coordinates), & + 'put att coordinates '//var(i)%req%short_name) + call check(nf90_put_att(ncid,varid,'missing_value',spval), & + 'put att missing_value '//var(i)%req%short_name) + call check(nf90_put_att(ncid,varid,'_FillValue',spval), & + 'put att _FillValue '//var(i)%req%short_name) endif enddo @@ -448,64 +493,75 @@ subroutine ice_write_hist (ns) dimid_nverts(3) = jmtid do i = 1, nvar_verts if (f_bounds) then - status = nf90_def_var(ncid, var_nverts(i)%short_name, & - nf90_float,dimid_nverts, varid) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining variable '//var_nverts(i)%short_name) - status = nf90_put_att(ncid,varid, 'long_name', var_nverts(i)%long_name) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining long_name for '//var_nverts(i)%short_name) - status = nf90_put_att(ncid, varid, 'units', var_nverts(i)%units) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining units for '//var_nverts(i)%short_name) - status = nf90_put_att(ncid,varid,'missing_value',spval) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining missing_value for '//var_nverts(i)%short_name) - status = nf90_put_att(ncid,varid,'_FillValue',spval) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining _FillValue for '//var_nverts(i)%short_name) + call check(nf90_def_var(ncid, var_nverts(i)%short_name, & + nf90_float,dimid_nverts, varid), & + 'def var '//var_nverts(i)%short_name) + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ 1, history_chunksize_x, history_chunksize_y /)), & + 'def var chunking '//var_nverts(i)%short_name) + endif + + call check(nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level), & + 'deflate var '//var_nverts(i)%short_name) + + call check(nf90_put_att(ncid,varid, 'long_name', & + var_nverts(i)%long_name), & + 'put att long_name '//var_nverts(i)%short_name) + call check(nf90_put_att(ncid, varid, 'units', var_nverts(i)%units), & + 'put att units '//var_nverts(i)%short_name) + call check(nf90_put_att(ncid,varid,'missing_value',spval), & + 'put att missing_value '//var_nverts(i)%short_name) + call check(nf90_put_att(ncid,varid,'_FillValue',spval), & + 'put att _FillValue '//var_nverts(i)%short_name) endif enddo do n=1,num_avail_hist_fields_2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & - nf90_float, dimid, varid) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining cell measures for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'missing_value',spval) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining missing_value for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'_FillValue',spval) - if (status /= nf90_noerr) call abort_ice( & - 'Error defining _FillValue for '//avail_hist_fields(n)%vname) + call check(nf90_def_var(ncid, avail_hist_fields(n)%vname, & + nf90_float, dimid, varid), & + 'def var '//avail_hist_fields(n)%vname) + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y, 1 /)), & + 'def var chunking '//avail_hist_fields(n)%vname) + endif - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- + call check(nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level), & + 'deflate '//avail_hist_fields(n)%vname) + + call check(nf90_put_att(ncid,varid,'units', & + avail_hist_fields(n)%vunit), & + 'put att units '//avail_hist_fields(n)%vname) + call check(nf90_put_att(ncid,varid, 'long_name', & + avail_hist_fields(n)%vdesc), & + 'put att long_name '//avail_hist_fields(n)%vname) + call check(nf90_put_att(ncid,varid,'coordinates', & + avail_hist_fields(n)%vcoord), & + 'put att coordinates '//avail_hist_fields(n)%vname) + call check(nf90_put_att(ncid,varid,'cell_measures', & + avail_hist_fields(n)%vcellmeas), & + 'put att cell_measures '//avail_hist_fields(n)%vname) + call check(nf90_put_att(ncid,varid,'missing_value',spval), & + 'put att missing_value '//avail_hist_fields(n)%vname) + call check(nf90_put_att(ncid,varid,'_FillValue',spval), & + 'put att _FillValue '//avail_hist_fields(n)%vname) + + !--------------------------------------------------------------- + ! Add cell_methods attribute to variables if averaged + !--------------------------------------------------------------- if (hist_avg) then - if (TRIM(avail_hist_fields(n)%vname)/='sig1' & - .or.TRIM(avail_hist_fields(n)%vname)/='sig2') then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice( & - 'Error defining cell methods for '//avail_hist_fields(n)%vname) - endif + if (TRIM(avail_hist_fields(n)%vname)/='sig1' .or. & + TRIM(avail_hist_fields(n)%vname)/='sig2') then + + call check(nf90_put_att(ncid,varid,'cell_methods','time: mean'), & + 'put att cell methods time mean '//avail_hist_fields(n)%vname) + endif endif if (histfreq(ns) == '1' .or. .not. hist_avg & @@ -514,9 +570,11 @@ subroutine ice_write_hist (ns) .or. n==n_trsig(ns) & .or. n==n_mlt_onset(ns) .or. n==n_frz_onset(ns) & .or. n==n_hisnap(ns) .or. n==n_aisnap(ns)) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') + call check(nf90_put_att(ncid,varid,'time_rep','instantaneous'), & + 'put att time_rep instantaneous') else - status = nf90_put_att(ncid,varid,'time_rep','averaged') + call check(nf90_put_att(ncid,varid,'time_rep','averaged'), & + 'put att time_rep averaged') endif endif enddo ! num_avail_hist_fields_2D @@ -532,6 +590,18 @@ subroutine ice_write_hist (ns) nf90_float, dimidz, varid) if (status /= nf90_noerr) call abort_ice( & 'Error defining variable '//avail_hist_fields(n)%vname) + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y, 1, 1 /)), & + 'def var chunking '//avail_hist_fields(n)%vname) + endif + + status = nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level) + if (status /= nf90_noerr) call abort_ice( & + 'Error deflating variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & avail_hist_fields(n)%vunit) if (status /= nf90_noerr) call abort_ice( & @@ -555,9 +625,9 @@ subroutine ice_write_hist (ns) if (status /= nf90_noerr) call abort_ice( & 'Error defining _FillValue for '//avail_hist_fields(n)%vname) - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- + !--------------------------------------------------------------- + ! Add cell_methods attribute to variables if averaged + !--------------------------------------------------------------- if (hist_avg) then status = nf90_put_att(ncid,varid,'cell_methods','time: mean') if (status /= nf90_noerr) call abort_ice( & @@ -583,6 +653,18 @@ subroutine ice_write_hist (ns) nf90_float, dimidz, varid) if (status /= nf90_noerr) call abort_ice( & 'Error defining variable '//avail_hist_fields(n)%vname) + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y, 1, 1 /)), & + 'def var chunking '//avail_hist_fields(n)%vname) + endif + + status = nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level) + if (status /= nf90_noerr) call abort_ice( & + 'Error deflating variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & avail_hist_fields(n)%vunit) if (status /= nf90_noerr) call abort_ice( & @@ -620,6 +702,18 @@ subroutine ice_write_hist (ns) nf90_float, dimidz, varid) if (status /= nf90_noerr) call abort_ice( & 'Error defining variable '//avail_hist_fields(n)%vname) + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y, 1, 1 /)), & + 'def var chunking '//avail_hist_fields(n)%vname) + endif + + status = nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level) + if (status /= nf90_noerr) call abort_ice( & + 'Error deflating variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & avail_hist_fields(n)%vunit) if (status /= nf90_noerr) call abort_ice( & @@ -642,7 +736,6 @@ subroutine ice_write_hist (ns) status = nf90_put_att(ncid,varid,'_FillValue',spval) if (status /= nf90_noerr) call abort_ice( & 'Error defining _FillValue for '//avail_hist_fields(n)%vname) - endif enddo ! num_avail_hist_fields_3Db @@ -655,10 +748,22 @@ subroutine ice_write_hist (ns) do n = n3Dbcum + 1, n4Dicum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & -! nf90_float, dimidcz, varid) - nf90_float, dimidcz(1:4), varid) ! ferret + !nf90_float, dimidcz, varid) ! ferret + nf90_float, dimidcz(1:4), varid) if (status /= nf90_noerr) call abort_ice( & 'Error defining variable '//avail_hist_fields(n)%vname) + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y, 1, 1 /)), & + 'def var chunking '//avail_hist_fields(n)%vname) + endif + + status = nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level) + if (status /= nf90_noerr) call abort_ice( & + 'Error deflating variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & avail_hist_fields(n)%vunit) if (status /= nf90_noerr) call abort_ice( & @@ -682,9 +787,9 @@ subroutine ice_write_hist (ns) if (status /= nf90_noerr) call abort_ice( & 'Error defining _FillValue for '//avail_hist_fields(n)%vname) - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- + !--------------------------------------------------------------- + ! Add cell_methods attribute to variables if averaged + !--------------------------------------------------------------- if (hist_avg) then status = nf90_put_att(ncid,varid,'cell_methods','time: mean') if (status /= nf90_noerr) call abort_ice( & @@ -708,10 +813,22 @@ subroutine ice_write_hist (ns) do n = n4Dicum + 1, n4Dscum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & -! nf90_float, dimidcz, varid) - nf90_float, dimidcz(1:4), varid) ! ferret + !nf90_float, dimidcz, varid) ! ferret + nf90_float, dimidcz(1:4), varid) if (status /= nf90_noerr) call abort_ice( & 'Error defining variable '//avail_hist_fields(n)%vname) + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y, 1, 1 /)), & + 'def var chunking '//avail_hist_fields(n)%vname) + endif + + status = nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level) + if (status /= nf90_noerr) call abort_ice( & + 'Error deflating variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & avail_hist_fields(n)%vunit) if (status /= nf90_noerr) call abort_ice( & @@ -735,9 +852,9 @@ subroutine ice_write_hist (ns) if (status /= nf90_noerr) call abort_ice( & 'Error defining _FillValue for '//avail_hist_fields(n)%vname) - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- + !--------------------------------------------------------------- + ! Add cell_methods attribute to variables if averaged + !--------------------------------------------------------------- if (hist_avg) then status = nf90_put_att(ncid,varid,'cell_methods','time: mean') if (status /= nf90_noerr) call abort_ice( & @@ -761,10 +878,22 @@ subroutine ice_write_hist (ns) do n = n4Dscum + 1, n4Dbcum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & -! nf90_float, dimidcz, varid) - nf90_float, dimidcz(1:4), varid) ! ferret + !nf90_float, dimidcz, varid) ! ferret + nf90_float, dimidcz(1:4), varid) if (status /= nf90_noerr) call abort_ice( & 'Error defining variable '//avail_hist_fields(n)%vname) + + if (history_chunksize_x > 0 .and. history_chunksize_y > 0) then + call check(nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, & + (/ history_chunksize_x, history_chunksize_y, 1, 1 /)), & + 'def var chunking '//avail_hist_fields(n)%vname) + endif + + status = nf90_def_var_deflate(ncid, varid, shuffle, deflate, & + deflate_level) + if (status /= nf90_noerr) call abort_ice( & + 'Error deflating variable '//avail_hist_fields(n)%vname) + status = nf90_put_att(ncid,varid,'units', & avail_hist_fields(n)%vunit) if (status /= nf90_noerr) call abort_ice( & @@ -788,9 +917,9 @@ subroutine ice_write_hist (ns) if (status /= nf90_noerr) call abort_ice( & 'Error defining _FillValue for '//avail_hist_fields(n)%vname) - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- + !--------------------------------------------------------------- + ! Add cell_methods attribute to variables if averaged + !--------------------------------------------------------------- if (hist_avg) then status = nf90_put_att(ncid,varid,'cell_methods','time: mean') if (status /= nf90_noerr) call abort_ice( & @@ -798,9 +927,11 @@ subroutine ice_write_hist (ns) endif if (histfreq(ns) == '1' .or. .not. hist_avg) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') + call check(nf90_put_att(ncid,varid,'time_rep','instantaneous'), & + 'put att time_rep instantaneous') else - status = nf90_put_att(ncid,varid,'time_rep','averaged') + call check(nf90_put_att(ncid,varid,'time_rep','averaged'), & + 'put att time_rep averaged') endif endif enddo ! num_avail_hist_fields_4Db @@ -811,49 +942,44 @@ subroutine ice_write_hist (ns) ! ... the user should change these to something useful ... !----------------------------------------------------------------- #ifdef CCSMCOUPLED - status = nf90_put_att(ncid,nf90_global,'title',runid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error in global attribute title') + call check(nf90_put_att(ncid,nf90_global,'title',runid), & + 'in global attribute title') #else title = 'sea ice model output for CICE' - status = nf90_put_att(ncid,nf90_global,'title',title) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error in global attribute title') + call check(nf90_put_att(ncid,nf90_global,'title',title), & + 'global attribute title') #endif title = 'Diagnostic and Prognostic Variables' - status = nf90_put_att(ncid,nf90_global,'contents',title) - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: global attribute contents') + call check(nf90_put_att(ncid,nf90_global,'contents',title), & + 'global attribute contents') title = 'Los Alamos Sea Ice Model (CICE) Version 5' - status = nf90_put_att(ncid,nf90_global,'source',title) - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: global attribute source') + call check(nf90_put_att(ncid,nf90_global,'source',title), & + 'global attribute source') +#if defined(AUSCOM) && !defined(ACCESS) + write(title,'(a,i3,a)') 'This Year Has ',int(dayyr),' days' +#else if (use_leap_years) then write(title,'(a,i3,a)') 'This year has ',int(dayyr),' days' else write(title,'(a,i3,a)') 'All years have exactly ',int(dayyr),' days' endif - status = nf90_put_att(ncid,nf90_global,'comment',title) - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: global attribute comment') +#endif + call check(nf90_put_att(ncid,nf90_global,'comment',title), & + 'global attribute comment') write(title,'(a,i8.8)') 'File written on model date ',idate - status = nf90_put_att(ncid,nf90_global,'comment2',title) - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: global attribute date1') + call check(nf90_put_att(ncid,nf90_global,'comment2',title), & + 'global attribute comment2') write(title,'(a,i6)') 'seconds elapsed into model date: ',sec - status = nf90_put_att(ncid,nf90_global,'comment3',title) - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: global attribute date2') + call check(nf90_put_att(ncid,nf90_global,'comment3',title), & + 'global attribute comment3') title = 'CF-1.0' - status = & - nf90_put_att(ncid,nf90_global,'conventions',title) - if (status /= nf90_noerr) call abort_ice( & - 'Error in global attribute conventions') + call check(nf90_put_att(ncid,nf90_global,'conventions',title), & + 'global attribute conventions') call date_and_time(date=current_date, time=current_time) write(start_time,1000) current_date(1:4), current_date(5:6), & @@ -862,67 +988,123 @@ subroutine ice_write_hist (ns) 1000 format('This dataset was created on ', & a,'-',a,'-',a,' at ',a,':',a,':',a) - status = nf90_put_att(ncid,nf90_global,'history',start_time) - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: global attribute history') + call check(nf90_put_att(ncid,nf90_global,'history',start_time), & + 'global attribute history') - status = nf90_put_att(ncid,nf90_global,'io_flavor','io_netcdf') - if (status /= nf90_noerr) call abort_ice( & - 'ice Error: global attribute io_flavor') + call check(nf90_put_att(ncid,nf90_global,'io_flavor','io_netcdf'), & + 'global attribute io_flavor') !----------------------------------------------------------------- ! end define mode !----------------------------------------------------------------- - status = nf90_enddef(ncid) - if (status /= nf90_noerr) call abort_ice('ice: Error in nf90_enddef') + call check(nf90_enddef(ncid), 'enddef') !----------------------------------------------------------------- ! write time variable !----------------------------------------------------------------- - status = nf90_inq_varid(ncid,'time',varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting time varid') - status = nf90_put_var(ncid,varid,ltime) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing time variable') + call check(nf90_inq_varid(ncid,'time',varid), & + 'inq varid time') + if (history_parallel_io) then + ! unlimited dimensions need to have collective access set + call check(nf90_var_par_access(ncid, varid, NF90_COLLECTIVE), & + 'parallel access time') + endif + call check(nf90_put_var(ncid,varid,ltime), & + 'put var time') !----------------------------------------------------------------- ! write time_bounds info !----------------------------------------------------------------- if (hist_avg) then - status = nf90_inq_varid(ncid,'time_bounds',varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting time_bounds id') - status = nf90_put_var(ncid,varid,time_beg(ns),start=(/1/)) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing time_beg') - status = nf90_put_var(ncid,varid,time_end(ns),start=(/2/)) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing time_end') + call check(nf90_inq_varid(ncid,'time_bounds',varid), & + 'inq varid time_bounds') + call check(nf90_put_var(ncid,varid,time_beg(ns),start=(/1/)), & + 'put var time_bounds beginning') + call check(nf90_put_var(ncid,varid,time_end(ns),start=(/2/)), & + 'put var time_bounds end') endif + endif ! master_task or history_parallel_io + + !----------------------------------------------------------------- + ! write coordinate variables + !----------------------------------------------------------------- + + if (history_parallel_io) then + call write_coordinate_variables_parallel(ncid, coord_var, var_nz) + else + call write_coordinate_variables(ncid, coord_var, var_nz) + endif + + !----------------------------------------------------------------- + ! write grid masks, area and rotation angle + !----------------------------------------------------------------- + + if (history_parallel_io) then + call write_grid_variables_parallel(ncid, var, var_nverts) + else + call write_grid_variables(ncid, var, var_nverts) + endif + + + !----------------------------------------------------------------- + ! write 2d variable data + !----------------------------------------------------------------- + + if (history_parallel_io) then + call write_2d_variables_parallel(ns, ncid) + else + call write_2d_variables(ns, ncid) + endif - endif ! master_task + if (history_parallel_io) then + call write_3d_and_4d_variables_parallel(ns, ncid) + else + call write_3d_and_4d_variables(ns, ncid) + endif + + !----------------------------------------------------------------- + ! close output dataset + !----------------------------------------------------------------- + + if (my_task == master_task .or. history_parallel_io) then + call check(nf90_close(ncid), 'closing netCDF history file') + write(nu_diag,*) ' ' + write(nu_diag,*) 'Finished writing ',trim(ncfile(ns)) + endif + +end subroutine ice_write_hist + +subroutine write_coordinate_variables(ncid, coord_var, var_nz) + + integer, intent(in) :: ncid + type(coord_attributes), dimension(ncoord), intent(in) :: coord_var + type(coord_attributes), dimension(nvarz) :: var_nz + + real (kind=dbl_kind), dimension(:,:), allocatable :: work_g1 + real (kind=real_kind), dimension(:,:), allocatable :: work_gr + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work1 + + integer :: i, k, status + integer :: varid + character (len=len(coord_var(1)%short_name)) :: coord_var_name if (my_task==master_task) then allocate(work_g1(nx_global,ny_global)) allocate(work_gr(nx_global,ny_global)) else - allocate(work_gr(1,1)) ! to save memory allocate(work_g1(1,1)) + allocate(work_gr(1,1)) ! to save memory endif work_g1(:,:) = c0 - !----------------------------------------------------------------- - ! write coordinate variables - !----------------------------------------------------------------- - do i = 1,ncoord - call broadcast_scalar(coord_var(i)%short_name,master_task) - SELECT CASE (coord_var(i)%short_name) + coord_var_name = coord_var(i)%short_name + call broadcast_scalar(coord_var_name, master_task) + SELECT CASE (coord_var_name) CASE ('TLON') ! Convert T grid longitude from -180 -> 180 to 0 to 360 work1 = TLON*rad_to_deg + c360 @@ -942,12 +1124,10 @@ subroutine ice_write_hist (ns) if (my_task == master_task) then work_gr = work_g1 - status = nf90_inq_varid(ncid, coord_var(i)%short_name, varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//coord_var(i)%short_name) - status = nf90_put_var(ncid,varid,work_gr) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing'//coord_var(i)%short_name) + call check(nf90_inq_varid(ncid, coord_var_name, varid), & + 'inq varid '//coord_var_name) + call check(nf90_put_var(ncid,varid,work_gr), & + 'put var '//coord_var_name) endif enddo @@ -957,9 +1137,8 @@ subroutine ice_write_hist (ns) if (igrdz(i)) then call broadcast_scalar(var_nz(i)%short_name,master_task) if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_nz(i)%short_name, varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//var_nz(i)%short_name) + call check(nf90_inq_varid(ncid, var_nz(i)%short_name, varid), & + 'inq varid '//var_nz(i)%short_name) SELECT CASE (var_nz(i)%short_name) CASE ('NCAT') status = nf90_put_var(ncid,varid,hin_max(1:ncat_hist)) @@ -976,20 +1155,51 @@ subroutine ice_write_hist (ns) endif enddo - !----------------------------------------------------------------- - ! write grid masks, area and rotation angle - !----------------------------------------------------------------- + deallocate(work_g1) + deallocate(work_gr) + +end subroutine write_coordinate_variables + + + +subroutine write_grid_variables(ncid, var, var_nverts) + + integer, intent(in) :: ncid + type(req_attributes), dimension(nvar), intent(in) :: var + type(coord_attributes), dimension(nvar_verts), intent(in) :: var_nverts + + real (kind=dbl_kind), dimension(:,:), allocatable :: work_g1 + real (kind=real_kind), dimension(:,:), allocatable :: work_gr + real (kind=real_kind), dimension(:,:, :), allocatable :: work_gr3 + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work1 + + integer :: ivertex, i + integer :: varid + character (len=len(var(1)%req%short_name)) :: var_name + character (len=len(var_nverts(1)%short_name)) :: var_nverts_name + + if (my_task == master_task) then + allocate(work_g1(nx_global,ny_global)) + allocate(work_gr(nx_global,ny_global)) + allocate(work_gr3(nverts,nx_global,ny_global)) + else + allocate(work_g1(1,1)) + allocate(work_gr(1,1)) ! to save memory + allocate(work_gr3(1,1,1)) + endif + + work_g1(:,:) = c0 + work_gr(:,:) = c0 + work_gr3(:,:,:) = c0 if (igrd(n_tmask)) then call gather_global(work_g1, hm, master_task, distrb_info) if (my_task == master_task) then - work_gr=work_g1 - status = nf90_inq_varid(ncid, 'tmask', varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for tmask') - status = nf90_put_var(ncid,varid,work_gr) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable tmask') + work_gr = work_g1 + call check(nf90_inq_varid(ncid, 'tmask', varid), & + 'inq var tmask') + call check(nf90_put_var(ncid,varid,work_gr), & + 'put var blkmask') endif endif @@ -997,19 +1207,19 @@ subroutine ice_write_hist (ns) call gather_global(work_g1, bm, master_task, distrb_info) if (my_task == master_task) then work_gr=work_g1 - status = nf90_inq_varid(ncid, 'blkmask', varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for blkmask') - status = nf90_put_var(ncid,varid,work_gr) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable blkmask') + call check(nf90_inq_varid(ncid, 'blkmask', varid), & + 'inq var blkmask') + call check(nf90_put_var(ncid,varid,work_gr), & + 'put var blkmask') endif endif do i = 3, nvar ! note n_tmask=1, n_blkmask=2 if (igrd(i)) then - call broadcast_scalar(var(i)%req%short_name,master_task) - SELECT CASE (var(i)%req%short_name) + var_name = var(i)%req%short_name + + call broadcast_scalar(var_name,master_task) + SELECT CASE (var_name) CASE ('tarea') call gather_global(work_g1, tarea, master_task, distrb_info) CASE ('uarea') @@ -1034,35 +1244,26 @@ subroutine ice_write_hist (ns) if (my_task == master_task) then work_gr=work_g1 - status = nf90_inq_varid(ncid, var(i)%req%short_name, varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//var(i)%req%short_name) - status = nf90_put_var(ncid,varid,work_gr) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable '//var(i)%req%short_name) + call check(nf90_inq_varid(ncid, var_name, varid), & + 'inq varid '//var_name) + call check(nf90_put_var(ncid,varid,work_gr), & + 'put var '//var_name) endif endif enddo - deallocate(work_gr) - !---------------------------------------------------------------- ! Write coordinates of grid box vertices !---------------------------------------------------------------- if (f_bounds) then - if (my_task==master_task) then - allocate(work_gr3(nverts,nx_global,ny_global)) - else - allocate(work_gr3(1,1,1)) ! to save memory - endif - work_gr3(:,:,:) = c0 work1 (:,:,:) = c0 do i = 1, nvar_verts - call broadcast_scalar(var_nverts(i)%short_name,master_task) - SELECT CASE (var_nverts(i)%short_name) + var_nverts_name = var_nverts(i)%short_name + call broadcast_scalar(var_nverts_name,master_task) + SELECT CASE (var_nverts_name) CASE ('lont_bounds') do ivertex = 1, nverts work1(:,:,:) = lont_bounds(ivertex,:,:,:) @@ -1090,46 +1291,83 @@ subroutine ice_write_hist (ns) END SELECT if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_nverts(i)%short_name, varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//var_nverts(i)%short_name) - status = nf90_put_var(ncid,varid,work_gr3) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable '//var_nverts(i)%short_name) + call check(nf90_inq_varid(ncid, var_nverts_name, varid), & + 'inq varid '//var_nverts_name) + call check(nf90_put_var(ncid,varid,work_gr3), & + 'put var '//var_nverts_name) endif enddo - deallocate(work_gr3) endif - !----------------------------------------------------------------- - ! write variable data - !----------------------------------------------------------------- + deallocate(work_g1) + deallocate(work_gr) + deallocate(work_gr3) - if (my_task==master_task) then +end subroutine write_grid_variables + + +subroutine write_2d_variables(ns, ncid) + + integer, intent(in) :: ns + integer, intent(in) :: ncid + + real (kind=dbl_kind), dimension(:,:), allocatable :: work_g1 + real (kind=real_kind), dimension(:,:), allocatable :: work_gr + + integer :: n + integer :: varid + + if (my_task == master_task) then + allocate(work_g1(nx_global,ny_global)) allocate(work_gr(nx_global,ny_global)) else + allocate(work_g1(1,1)) allocate(work_gr(1,1)) ! to save memory endif + work_gr(:,:) = c0 work_g1(:,:) = c0 - do n=1,num_avail_hist_fields_2D + do n=1, num_avail_hist_fields_2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then call gather_global(work_g1, a2D(:,:,n,:), & master_task, distrb_info) if (my_task == master_task) then work_gr(:,:) = work_g1(:,:) - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//avail_hist_fields(n)%vname) - status = nf90_put_var(ncid,varid,work_gr(:,:), & - count=(/nx_global,ny_global/)) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable '//avail_hist_fields(n)%vname) + call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & + 'inq varid '//avail_hist_fields(n)%vname) + call check(nf90_put_var(ncid,varid,work_gr(:,:), & + count=(/nx_global,ny_global/)), & + 'put var '//avail_hist_fields(n)%vname) endif endif enddo ! num_avail_hist_fields_2D + deallocate(work_g1) + deallocate(work_gr) + +end subroutine write_2d_variables + + +subroutine write_3d_and_4d_variables(ns, ncid) + + integer, intent(in) :: ns + integer, intent(in) :: ncid + + real (kind=dbl_kind), dimension(:,:), allocatable :: work_g1 + real (kind=real_kind), dimension(:,:), allocatable :: work_gr + + integer :: varid + integer :: n, nn, k, ic + + if (my_task == master_task) then + allocate(work_g1(nx_global,ny_global)) + allocate(work_gr(nx_global,ny_global)) + else + allocate(work_g1(1,1)) + allocate(work_gr(1,1)) ! to save memory + endif + work_gr(:,:) = c0 work_g1(:,:) = c0 @@ -1137,9 +1375,8 @@ subroutine ice_write_hist (ns) nn = n - n2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & + 'inq varid '//avail_hist_fields(n)%vname) endif do k = 1, ncat_hist call gather_global(work_g1, a3Dc(:,:,k,nn,:), & @@ -1147,14 +1384,10 @@ subroutine ice_write_hist (ns) work_gr(:,:) = work_g1(:,:) if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//avail_hist_fields(n)%vname) - status = nf90_put_var(ncid,varid,work_gr(:,:), & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable '//avail_hist_fields(n)%vname) + call check(nf90_put_var(ncid,varid,work_gr(:,:), & + start=(/ 1, 1, k/), & + count=(/nx_global,ny_global, 1/)), & + 'put var '//avail_hist_fields(n)%vname) endif enddo ! k endif @@ -1167,9 +1400,8 @@ subroutine ice_write_hist (ns) nn = n - n3Dccum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & + 'inq varid '//avail_hist_fields(n)%vname) endif do k = 1, nzilyr call gather_global(work_g1, a3Dz(:,:,k,nn,:), & @@ -1177,11 +1409,10 @@ subroutine ice_write_hist (ns) work_gr(:,:) = work_g1(:,:) if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_gr(:,:), & + call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable '//avail_hist_fields(n)%vname) + count=(/nx_global,ny_global,1/)), & + 'put var '//avail_hist_fields(n)%vname) endif enddo ! k endif @@ -1194,9 +1425,8 @@ subroutine ice_write_hist (ns) nn = n - n3Dzcum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & + 'inq varid '//avail_hist_fields(n)%vname) endif do k = 1, nzblyr call gather_global(work_g1, a3Db(:,:,k,nn,:), & @@ -1204,11 +1434,10 @@ subroutine ice_write_hist (ns) work_gr(:,:) = work_g1(:,:) if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_gr(:,:), & + call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable '//avail_hist_fields(n)%vname) + count=(/nx_global,ny_global,1/)), & + 'put var '//avail_hist_fields(n)%vname) endif enddo ! k endif @@ -1221,9 +1450,8 @@ subroutine ice_write_hist (ns) nn = n - n3Dbcum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & + 'inq varid '//avail_hist_fields(n)%vname) endif do ic = 1, ncat_hist do k = 1, nzilyr @@ -1231,11 +1459,10 @@ subroutine ice_write_hist (ns) master_task, distrb_info) work_gr(:,:) = work_g1(:,:) if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_gr(:,:), & + call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/ 1, 1,k,ic/), & - count=(/nx_global,ny_global,1, 1/)) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable '//avail_hist_fields(n)%vname) + count=(/nx_global,ny_global,1, 1/)), & + 'put var '//avail_hist_fields(n)%vname) endif enddo ! k enddo ! ic @@ -1249,9 +1476,8 @@ subroutine ice_write_hist (ns) nn = n - n4Dicum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & + 'inq var '//avail_hist_fields(n)%vname) endif do ic = 1, ncat_hist do k = 1, nzslyr @@ -1259,11 +1485,10 @@ subroutine ice_write_hist (ns) master_task, distrb_info) work_gr(:,:) = work_g1(:,:) if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_gr(:,:), & + call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/ 1, 1,k,ic/), & - count=(/nx_global,ny_global,1, 1/)) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable '//avail_hist_fields(n)%vname) + count=(/nx_global,ny_global,1, 1/)), & + 'put var '//avail_hist_fields(n)%vname) endif enddo ! k enddo ! ic @@ -1277,9 +1502,8 @@ subroutine ice_write_hist (ns) nn = n - n4Dscum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error getting varid for '//avail_hist_fields(n)%vname) + call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & + 'inq varid '//avail_hist_fields(n)%vname) endif do ic = 1, ncat_hist do k = 1, nzblyr @@ -1287,11 +1511,10 @@ subroutine ice_write_hist (ns) master_task, distrb_info) work_gr(:,:) = work_g1(:,:) if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_gr(:,:), & + call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/ 1, 1,k,ic/), & - count=(/nx_global,ny_global,1, 1/)) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing variable '//avail_hist_fields(n)%vname) + count=(/nx_global,ny_global,1, 1/)), & + 'put var '//avail_hist_fields(n)%vname) endif enddo ! k enddo ! ic @@ -1301,23 +1524,354 @@ subroutine ice_write_hist (ns) deallocate(work_gr) deallocate(work_g1) - !----------------------------------------------------------------- - ! close output dataset - !----------------------------------------------------------------- +end subroutine write_3d_and_4d_variables - if (my_task == master_task) then - status = nf90_close(ncid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error closing netCDF history file') - write(nu_diag,*) ' ' - write(nu_diag,*) 'Finished writing ',trim(ncfile(ns)) + +subroutine write_coordinate_variables_parallel(ncid, coord_var, var_nz) + + integer, intent(in) :: ncid + type(coord_attributes), dimension(ncoord), intent(in) :: coord_var + type(coord_attributes), dimension(nvarz) :: var_nz + + integer :: varid + integer :: iblk, i, k + real(kind=dbl_kind), dimension(nx_block,ny_block, max_blocks) :: work1 + + do i = 1,ncoord + SELECT CASE (coord_var(i)%short_name) + CASE ('TLON') + ! Convert T grid longitude from -180 -> 180 to 0 to 360 + work1 = TLON*rad_to_deg + c360 + where (work1 > c360) work1 = work1 - c360 + where (work1 < c0 ) work1 = work1 + c360 + CASE ('TLAT') + work1 = TLAT*rad_to_deg + CASE ('ULON') + work1 = ULON*rad_to_deg + CASE ('ULAT') + work1 = ULAT*rad_to_deg + END SELECT + + call put_2d_with_blocks(ncid, coord_var(i)%short_name, work1) + enddo + + ! Extra dimensions (NCAT, VGRD*) + do i = 1, nvarz + if (igrdz(i)) then + call check(nf90_inq_varid(ncid, var_nz(i)%short_name, varid), & + 'inq_varid '//var_nz(i)%short_name) + SELECT CASE (var_nz(i)%short_name) + CASE ('NCAT') + call check(nf90_put_var(ncid, varid, hin_max(1:ncat_hist)), & + 'put var NCAT') + CASE ('VGRDi') ! index - needed for Met Office analysis code + call check(nf90_put_var(ncid, varid, (/(k, k=1, nzilyr)/)), & + 'put var VGRDi') + CASE ('VGRDs') ! index - needed for Met Office analysis code + call check(nf90_put_var(ncid, varid, (/(k, k=1, nzslyr)/)), & + 'put var VGRDs') + CASE ('VGRDb') + call check(nf90_put_var(ncid, varid, (/(k, k=1, nzblyr)/)), & + 'put var VGRDb') + END SELECT endif -#endif + enddo - end subroutine ice_write_hist +end subroutine write_coordinate_variables_parallel -!======================================================================= - end module ice_history_write +subroutine write_grid_variables_parallel(ncid, var, var_nverts) -!======================================================================= + integer, intent(in) :: ncid + type(req_attributes), dimension(nvar), intent(in) :: var + type(coord_attributes), dimension(nvar_verts), intent(in) :: var_nverts + + real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks) :: work1 + real (kind=dbl_kind), dimension(nverts, nx_block, ny_block, max_blocks) :: work2 + + integer :: iblk + integer :: ilo, jlo, ihi, jhi, gilo, gjlo, gihi, gjhi + integer, dimension(3) :: start, count + type(block) :: the_block + + integer :: i + integer :: varid + + if (igrd(n_tmask)) then + call put_2d_with_blocks(ncid, 'tmask', hm) + endif + + if (igrd(n_blkmask)) then + call put_2d_with_blocks(ncid, 'blkmask', bm) + endif + + do i = 3, nvar ! note n_tmask=1, n_blkmask=2 + if (igrd(i)) then + SELECT CASE (var(i)%req%short_name) + CASE ('tarea') + work1 = tarea + CASE ('uarea') + work1 = uarea + CASE ('dxu') + work1 = dxu + CASE ('dyu') + work1 = dyu + CASE ('dxt') + work1 = dxt + CASE ('dyt') + work1 = dyt + CASE ('HTN') + work1 = HTN + CASE ('HTE') + work1 = HTE + CASE ('ANGLE') + work1 = ANGLE + CASE ('ANGLET') + work1 = ANGLET + END SELECT + + call put_2d_with_blocks(ncid, var(i)%req%short_name, work1) + endif + enddo + + !---------------------------------------------------------------- + ! Write coordinates of grid box vertices + !---------------------------------------------------------------- + + if (f_bounds) then + do i = 1, nvar_verts + SELECT CASE (var_nverts(i)%short_name) + CASE ('lont_bounds') + work2(:, :, :, :) = lont_bounds(:, :, :, :) + CASE ('latt_bounds') + work2(:, :, :, :) = latt_bounds(:, :, :, :) + CASE ('lonu_bounds') + work2(:, :, :, :) = lonu_bounds(:, :, :, :) + CASE ('latu_bounds') + work2(:, :, :, :) = lonu_bounds(:, :, :, :) + END SELECT + + call check(nf90_inq_varid(ncid, var_nverts(i)%short_name, varid), & + 'inq varid '//var_nverts(i)%short_name) + + do iblk=1, nblocks + the_block = get_block(blocks_ice(iblk), iblk) + ilo = the_block%ilo + jlo = the_block%jlo + ihi = the_block%ihi + jhi = the_block%jhi + + gilo = the_block%i_glob(ilo) + gjlo = the_block%j_glob(jlo) + gihi = the_block%i_glob(ihi) + gjhi = the_block%j_glob(jhi) + + start = (/ 1, gilo, gjlo /) + count = (/ nverts, gihi - gilo + 1, gjhi - gjlo + 1 /) + call check(nf90_put_var(ncid, varid, & + work2(1:nverts, ilo:ihi, jlo:jhi, iblk), & + start=start, count=count), & + 'grid vars _put '//trim(var_nverts(i)%short_name)) + enddo + enddo + endif + +end subroutine write_grid_variables_parallel + + +subroutine write_2d_variables_parallel(ns, ncid) + + integer, intent(in) :: ns + integer, intent(in) :: ncid + + integer :: varid + integer :: n + + do n=1, num_avail_hist_fields_2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + call put_2d_with_blocks(ncid, avail_hist_fields(n)%vname, & + a2D(:, :, n, :)) + endif + enddo ! num_avail_hist_fields_2D + +end subroutine write_2d_variables_parallel + + + +subroutine write_3d_and_4d_variables_parallel(ns, ncid) + + integer, intent(in) :: ns + integer, intent(in) :: ncid + + integer :: varid + integer :: n, nn, k, ic + + do n = n2D + 1, n3Dccum + nn = n - n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + call put_3d_with_blocks(ncid, avail_hist_fields(n)%vname, & + ncat_hist, a3Dc(:, :, :, nn, :)) + endif + enddo ! num_avail_hist_fields_3Dc + + + do n = n3Dccum+1, n3Dzcum + nn = n - n3Dccum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + call put_3d_with_blocks(ncid, avail_hist_fields(n)%vname, & + nzilyr, a3Dz(:, :, :, nn, :)) + endif + enddo ! num_avail_hist_fields_3Dz + + + do n = n3Dzcum+1, n3Dbcum + nn = n - n3Dzcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + call put_3d_with_blocks(ncid, avail_hist_fields(n)%vname, & + nzblyr, a3Db(:, :, :, nn, :)) + endif + enddo ! num_avail_hist_fields_3Db + + + do n = n3Dbcum+1, n4Dicum + nn = n - n3Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + call put_4d_with_blocks(ncid, avail_hist_fields(n)%vname, & + nzilyr, ncat_hist, a4Di(:, :, :, :, nn, :)) + endif + enddo ! num_avail_hist_fields_4Di + + + do n = n4Dicum+1, n4Dscum + nn = n - n4Dicum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + call put_4d_with_blocks(ncid, avail_hist_fields(n)%vname, & + nzslyr, ncat_hist, a4Ds(:, :, :, :, nn, :)) + endif + enddo ! num_avail_hist_fields_4Ds + + do n = n4Dscum+1, n4Dbcum + nn = n - n4Dscum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + call put_4d_with_blocks(ncid, avail_hist_fields(n)%vname, & + nzblyr, ncat_hist, a4Db(:, :, :, :, nn, :)) + endif + enddo ! num_avail_hist_fields_4Db + +end subroutine write_3d_and_4d_variables_parallel + + +subroutine put_2d_with_blocks(ncid, var_name, data) + + integer, intent(in) :: ncid + character(len=*), intent(in) :: var_name + real(kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: data + + integer :: varid + integer :: iblk + integer :: ilo, jlo, ihi, jhi, gilo, gjlo, gihi, gjhi + integer, dimension(2) :: start, count + type(block) :: the_block + + call check(nf90_inq_varid(ncid, var_name, varid), & + 'inq varid for '//var_name) + + do iblk=1, nblocks + the_block = get_block(blocks_ice(iblk), iblk) + ilo = the_block%ilo + jlo = the_block%jlo + ihi = the_block%ihi + jhi = the_block%jhi + + gilo = the_block%i_glob(ilo) + gjlo = the_block%j_glob(jlo) + gihi = the_block%i_glob(ihi) + gjhi = the_block%j_glob(jhi) + + start = (/ gilo, gjlo /) + count = (/ gihi - gilo + 1, gjhi - gjlo + 1 /) + call check(nf90_put_var(ncid, varid, real(data(ilo:ihi, jlo:jhi, iblk)), & + start=start, count=count), & + 'put_2d_with_blocks put '//trim(var_name)) + enddo + +end subroutine put_2d_with_blocks + +subroutine put_3d_with_blocks(ncid, var_name, len_3dim, data) + + integer, intent(in) :: ncid, len_3dim + character(len=*), intent(in) :: var_name + real(kind=dbl_kind), dimension(nx_block, ny_block, len_3dim, max_blocks), intent(in) :: data + + integer :: varid + integer :: iblk + integer :: ilo, jlo, ihi, jhi, gilo, gjlo, gihi, gjhi + integer, dimension(3) :: start, count + type(block) :: the_block + + call check(nf90_inq_varid(ncid, var_name, varid), & + 'inq varid for '//var_name) + + do iblk=1, nblocks + the_block = get_block(blocks_ice(iblk), iblk) + ilo = the_block%ilo + jlo = the_block%jlo + ihi = the_block%ihi + jhi = the_block%jhi + + gilo = the_block%i_glob(ilo) + gjlo = the_block%j_glob(jlo) + gihi = the_block%i_glob(ihi) + gjhi = the_block%j_glob(jhi) + + start = (/ gilo, gjlo, 1 /) + count = (/ gihi - gilo + 1, gjhi - gjlo + 1, len_3dim /) + call check(nf90_put_var(ncid, varid, & + real(data(ilo:ihi, jlo:jhi, 1:len_3dim, iblk)), & + start=start, count=count), & + 'put_3d_with_blocks put '//trim(var_name)) + enddo + +end subroutine put_3d_with_blocks + + +subroutine put_4d_with_blocks(ncid, var_name, len_3dim, len_4dim, data) + + integer, intent(in) :: ncid, len_3dim, len_4dim + character(len=*), intent(in) :: var_name + real(kind=dbl_kind), dimension(nx_block, ny_block, len_3dim, & + len_4dim, max_blocks), intent(in) :: data + + integer :: varid + integer :: iblk + integer :: ilo, jlo, ihi, jhi, gilo, gjlo, gihi, gjhi + integer, dimension(4) :: start, count + type(block) :: the_block + + call check(nf90_inq_varid(ncid, var_name, varid), & + 'inq varid for '//var_name) + + do iblk=1, nblocks + the_block = get_block(blocks_ice(iblk), iblk) + ilo = the_block%ilo + jlo = the_block%jlo + ihi = the_block%ihi + jhi = the_block%jhi + + gilo = the_block%i_glob(ilo) + gjlo = the_block%j_glob(jlo) + gihi = the_block%i_glob(ihi) + gjhi = the_block%j_glob(jhi) + + start = (/ gilo, gjlo, 1, 1 /) + count = (/ gihi - gilo + 1, gjhi - gjlo + 1, len_3dim, len_4dim /) + call check(nf90_put_var(ncid, varid, & + real(data(ilo:ihi, jlo:jhi, 1:len_3dim, 1:len_4dim, iblk)), & + start=start, count=count), & + 'put_4d_with_blocks put '//trim(var_name)) + enddo + +end subroutine put_4d_with_blocks + + + end module ice_history_write diff --git a/io_netcdf/ice_restart.F90 b/io_netcdf/ice_restart.F90 index 9de8aa31..753381c4 100644 --- a/io_netcdf/ice_restart.F90 +++ b/io_netcdf/ice_restart.F90 @@ -13,7 +13,7 @@ module ice_restart use netcdf use ice_restart_shared, only: & restart, restart_ext, restart_dir, restart_file, pointer_file, & - runid, runtype, use_restart_time, restart_format, lcdf64, lenstr + runid, runtype, use_restart_time, restart_format, lenstr implicit none private @@ -72,19 +72,39 @@ subroutine init_restart_read(ice_ic) status = nf90_open(trim(filename), nf90_nowrite, ncid) if (status /= nf90_noerr) call abort_ice( & 'ice: Error reading restart ncfile '//trim(filename)) - + if (use_restart_time) then status = nf90_get_att(ncid, nf90_global, 'istep1', istep0) + call assert(status == NF90_NOERR, & + 'in init_restart_read, on nf90_get_att(istep1)', status) + status = nf90_get_att(ncid, nf90_global, 'time', time) + call assert(status == NF90_NOERR, & + 'in init_restart_read, on nf90_get_att(time)', status) + status = nf90_get_att(ncid, nf90_global, 'time_forc', time_forc) + call assert(status == NF90_NOERR, & + 'in init_restart_read, on nf90_get_att(time_forc)', status) + status = nf90_get_att(ncid, nf90_global, 'nyr', nyr) + call assert(status == NF90_NOERR, & + 'in init_restart_read, on nf90_get_att(nyr)', status) + status = nf90_get_att(ncid, nf90_global, 'year', year) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error reading year attribute from ncfile '//trim(filename)) + call assert(status == NF90_NOERR, & + ' reading year attribute from ncfile '//trim(filename), status) if (status == nf90_noerr) then status = nf90_get_att(ncid, nf90_global, 'month', month) + call assert(status == NF90_NOERR, & + 'in init_restart_read, on nf90_get_att(month)', status) + status = nf90_get_att(ncid, nf90_global, 'mday', mday) + call assert(status == NF90_NOERR, & + 'in init_restart_read, on nf90_get_att(mday)', status) + status = nf90_get_att(ncid, nf90_global, 'sec', sec) + call assert(status == NF90_NOERR, & + 'in init_restart_read, on nf90_get_att(sec)', status) endif if ( sec .ne. 0 ) then @@ -114,7 +134,7 @@ subroutine init_restart_read(ice_ic) ! Check starting date and time are consistent call check_start_date #endif - + istep1 = istep0 ! if runid is bering then need to correct npt for istep0 @@ -164,7 +184,6 @@ subroutine init_restart_write(filename_spec) dimid_ni, & ! netCDF identifiers dimid_nj, & ! dimid_ncat, & ! - iflag, & ! netCDF creation flag status ! status variable from netCDF routine character (len=3) :: nchar @@ -189,19 +208,42 @@ subroutine init_restart_write(filename_spec) write(nu_rst_pointer,'(a)') filename close(nu_rst_pointer) - iflag = NF90_NETCDF4 - status = nf90_create(trim(filename), iflag, ncid) - if (status /= nf90_noerr) call abort_ice( & - 'ice: Error creating restart ncfile '//trim(filename)) + status = nf90_create(trim(filename), NF90_NETCDF4, ncid) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_create '//trim(filename), status) status = nf90_put_att(ncid,nf90_global,'istep1',istep1) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_put_att(istep1)', status) + status = nf90_put_att(ncid,nf90_global,'time',time) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_put_att(time)', status) + status = nf90_put_att(ncid,nf90_global,'time_forc',time_forc) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_put_att(time_forc)', status) + status = nf90_put_att(ncid,nf90_global,'nyr',nyr) ! year count since year_init + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_put_att(nyr)', status) + status = nf90_put_att(ncid,nf90_global,'year',iyear) ! calendar year + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_put_att(year)', status) + status = nf90_put_att(ncid,nf90_global,'nyr',nyr) + status = nf90_put_att(ncid,nf90_global,'month',month) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_put_att(month)', status) + status = nf90_put_att(ncid,nf90_global,'mday',mday) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_put_att(mday)', status) + status = nf90_put_att(ncid,nf90_global,'sec',sec) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_put_att(sec)', status) nx = nx_global ny = ny_global @@ -210,9 +252,15 @@ subroutine init_restart_write(filename_spec) ny = ny_global + 2*nghost endif status = nf90_def_dim(ncid,'ni',nx,dimid_ni) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_def_dim(ni)', status) status = nf90_def_dim(ncid,'nj',ny,dimid_nj) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_def_dim(nj)', status) status = nf90_def_dim(ncid,'ncat',ncat,dimid_ncat) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_def_dim(ncat)', status) !----------------------------------------------------------------- ! 2D restart fields @@ -392,8 +440,11 @@ subroutine init_restart_write(filename_spec) deallocate(dims) status = nf90_enddef(ncid) + call assert(status == NF90_NOERR, & + 'in init_restart_write on nf90_enddef', status) write(nu_diag,*) 'Writing ',filename(1:lenstr(filename)) + endif ! master_task end subroutine init_restart_write @@ -494,6 +545,7 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) use ice_domain_size, only: max_blocks, ncat use ice_fileunits, only: nu_diag use ice_read_write, only: ice_write, ice_write_nc + use ice_communicate, only: my_task, master_task integer (kind=int_kind), intent(in) :: & nu , & ! unit number @@ -524,6 +576,10 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) work2 ! input array (real, 8-byte) status = nf90_inq_varid(ncid,trim(vname),varid) + if (my_task == master_task) then + call assert(status == NF90_NOERR, & + 'in write_restart_field on '//trim(vname), status) + endif if (ndim3 == ncat) then if (restart_ext) then call ice_write_nc(ncid, 1, varid, work, diag, restart_ext) @@ -556,10 +612,11 @@ subroutine final_restart() integer (kind=int_kind) :: status - status = nf90_close(ncid) - - if (my_task == master_task) & + if (my_task == master_task) then + status = nf90_close(ncid) + call assert(status == NF90_NOERR, 'in final_restart', status) write(nu_diag,*) 'Restart read/written ',istep1,time,time_forc + endif end subroutine final_restart @@ -580,11 +637,31 @@ subroutine define_rest_field(ncid, vname, dims) status ! status variable from netCDF routine status = nf90_def_var(ncid,trim(vname),nf90_double,dims,varid) + call assert(status == NF90_NOERR, & + 'in define_rest_field, on '//trim(vname), status) + status = nf90_def_var_deflate(ncid, varid, 1, 1, 1) + call assert(status == NF90_NOERR, & + 'deflate in define_rest_field, on '//trim(vname), status) end subroutine define_rest_field !======================================================================= + subroutine assert(logical_arg, str_msg, error_code) + + logical, intent(in) :: logical_arg + character(len=*), intent(in) :: str_msg + integer, intent(in) :: error_code + character(len=3) :: err_code_str + + write(err_code_str, '(I3)') error_code + + if (.not. logical_arg) then + call abort_ice('ice: Error '//err_code_str//' '//str_msg) + endif + + end subroutine assert + end module ice_restart !======================================================================= diff --git a/source/ice_domain.F90 b/source/ice_domain.F90 index b9f84f97..b1dd4348 100755 --- a/source/ice_domain.F90 +++ b/source/ice_domain.F90 @@ -48,7 +48,8 @@ module ice_domain logical (kind=log_kind), public :: & maskhalo_dyn , & ! if true, use masked halo updates for dynamics maskhalo_remap , & ! if true, use masked halo updates for transport - maskhalo_bound ! if true, use masked halo updates for bound_state + maskhalo_bound , & ! if true, use masked halo updates for bound_state + equal_num_blocks_per_cpu ! if true, all CPUs have the same number of blocks !----------------------------------------------------------------------- ! @@ -110,7 +111,8 @@ subroutine init_domain_blocks ns_boundary_type, & maskhalo_dyn, & maskhalo_remap, & - maskhalo_bound + maskhalo_bound, & + equal_num_blocks_per_cpu !---------------------------------------------------------------------- ! @@ -127,6 +129,7 @@ subroutine init_domain_blocks maskhalo_dyn = .false. ! if true, use masked halos for dynamics maskhalo_remap = .false. ! if true, use masked halos for transport maskhalo_bound = .false. ! if true, use masked halos for bound_state + equal_num_blocks_per_cpu = .false. ! if true, all CPUs have the same number of blocks call get_fileunit(nu_nml) if (my_task == master_task) then @@ -157,6 +160,7 @@ subroutine init_domain_blocks call broadcast_scalar(maskhalo_dyn, master_task) call broadcast_scalar(maskhalo_remap, master_task) call broadcast_scalar(maskhalo_bound, master_task) + call broadcast_scalar(equal_num_blocks_per_cpu, master_task) !---------------------------------------------------------------------- ! @@ -267,7 +271,7 @@ subroutine init_domain_distribution(KMTG,ULATG) max_work_unit=10 ! quantize the work into values from 1,max integer (int_kind) :: & - i,j,n ,&! dummy loop indices + i,j,n,p ,&! dummy loop indices ig,jg ,&! global indices work_unit ,&! size of quantized work unit tblocks_tmp ,&! total number of blocks @@ -281,6 +285,10 @@ subroutine init_domain_distribution(KMTG,ULATG) type (block) :: & this_block ! block information for current block + ! The number of extra blocks that need to be added so that each PE + ! has an equal number. + integer (int_kind) :: num_padding_blocks, num_work_blocks + !---------------------------------------------------------------------- ! ! check that there are at least nghost+1 rows or columns of land cells @@ -429,6 +437,32 @@ subroutine init_domain_distribution(KMTG,ULATG) end where deallocate(nocn) + if (equal_num_blocks_per_cpu) then + ! Make sure that each PE has + ! the same number of blocks. This is needed when using parallel NetCDF + ! because all parallel writes are synchronous. + + ! First calculate the number of padding blocks needed. + num_work_blocks = count(work_per_block /= 0) + num_padding_blocks = ceiling(real(num_work_blocks) / real(nprocs))*nprocs - num_work_blocks + + ! Add padding blocks wherever necessary + p = 0 + do n=1, nblocks_tot + if (p >= num_padding_blocks) then + exit + elseif (work_per_block(n) == 0) then + work_per_block(n) = 1 + p = p + 1 + endif + enddo + + if (p /= num_padding_blocks) then + call abort_ice("ice: can't assign work to pad blocks") + endif + endif + + !---------------------------------------------------------------------- ! ! determine the distribution of blocks across processors diff --git a/source/ice_history_shared.F90 b/source/ice_history_shared.F90 index 5af8250b..27ecad12 100755 --- a/source/ice_history_shared.F90 +++ b/source/ice_history_shared.F90 @@ -44,6 +44,19 @@ module ice_history_shared history_dir , & ! directory name for history file incond_dir ! directory for snapshot initial conditions + integer, public :: & + history_deflate_level ! Deflation/compression level to use for + ! netCDF4 output + + logical, public :: & + history_parallel_io ! Use parallel write for netCDF4 output + + integer, public :: & + history_chunksize_x ! NetCDF chunksize in x/lon dimension + + integer, public :: & + history_chunksize_y ! NetCDF chunksize in y/lat dimension + character (len=char_len_long), public :: & pointer_file ! input pointer file for restarts diff --git a/source/ice_init.F90 b/source/ice_init.F90 index e5f36b29..1ed2f647 100755 --- a/source/ice_init.F90 +++ b/source/ice_init.F90 @@ -54,8 +54,10 @@ subroutine input_data write_ic, dump_last use ice_restart_shared, only: & restart, restart_ext, restart_dir, restart_file, pointer_file, & - runid, runtype, use_restart_time, restart_format, lcdf64 + runid, runtype, use_restart_time, restart_format use ice_history_shared, only: hist_avg, history_dir, history_file, & + history_deflate_level, history_parallel_io, & + history_chunksize_x, history_chunksize_y, & incond_dir, incond_file use ice_exit, only: abort_ice use ice_itd, only: kitd, kcatbound @@ -127,12 +129,13 @@ subroutine input_data dt, npt, ndtd, & runtype, runid, bfbflag, & ice_ic, restart, restart_dir, restart_file, & - restart_ext, use_restart_time, restart_format, lcdf64, & + restart_ext, use_restart_time, restart_format, & pointer_file, dumpfreq, dumpfreq_n, dump_last, & diagfreq, diag_type, diag_file, & print_global, print_points, latpnt, lonpnt, & dbug, histfreq, histfreq_n, hist_avg, & - history_dir, history_file, & + history_dir, history_file, history_deflate_level, & + history_parallel_io, history_chunksize_x, history_chunksize_y, & write_ic, incond_dir, incond_file namelist /grid_nml/ & @@ -222,6 +225,14 @@ subroutine input_data hist_avg = .true. ! if true, write time-averages (not snapshots) history_dir = './' ! write to executable dir for default history_file = 'iceh' ! history file name prefix + history_deflate_level = -1 ! Deflate/compression level to use when + ! writing netCDF4 history files, -1 + ! means no deflation + history_parallel_io = .false. ! Use NetCDF parallel IO to write out history files + history_chunksize_x = -1 ! NetCDF chunksize in x/lon dimension. -1 + ! means use default selected by NetCDF library + history_chunksize_y = -1 ! NetCDF chunksize in y/lat dimension + ! means use default selected by NetCDF library write_ic = .false. ! write out initial condition incond_dir = history_dir ! write to history dir for default incond_file = 'iceh_ic'! file prefix @@ -235,7 +246,6 @@ subroutine input_data use_restart_time = .true. ! if true, use time info written in file pointer_file = 'ice.restart_file' restart_format = 'nc' ! file format ('bin'=binary or 'nc'=netcdf or 'pio') - lcdf64 = .false. ! 64 bit offset for netCDF ice_ic = 'default' ! latitude and sst-dependent grid_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) grid_type = 'rectangular' ! define rectangular grid internally @@ -740,6 +750,10 @@ subroutine input_data call broadcast_scalar(hist_avg, master_task) call broadcast_scalar(history_dir, master_task) call broadcast_scalar(history_file, master_task) + call broadcast_scalar(history_deflate_level, master_task) + call broadcast_scalar(history_parallel_io, master_task) + call broadcast_scalar(history_chunksize_x, master_task) + call broadcast_scalar(history_chunksize_y, master_task) call broadcast_scalar(write_ic, master_task) call broadcast_scalar(incond_dir, master_task) call broadcast_scalar(incond_file, master_task) @@ -752,7 +766,6 @@ subroutine input_data call broadcast_scalar(restart_ext, master_task) call broadcast_scalar(use_restart_time, master_task) call broadcast_scalar(restart_format, master_task) - call broadcast_scalar(lcdf64, master_task) call broadcast_scalar(pointer_file, master_task) call broadcast_scalar(ice_ic, master_task) call broadcast_scalar(grid_format, master_task) @@ -910,6 +923,8 @@ subroutine input_data trim(history_dir) write(nu_diag,*) ' history_file = ', & trim(history_file) + write(nu_diag,*) ' history_deflate_level = ', & + history_deflate_level if (write_ic) then write (nu_diag,*) 'Initial condition will be written in ', & trim(incond_dir) @@ -924,8 +939,6 @@ subroutine input_data write(nu_diag,*) ' restart_ext = ', restart_ext write(nu_diag,*) ' restart_format = ', & trim(restart_format) - write(nu_diag,*) ' lcdf64 = ', & - lcdf64 write(nu_diag,*) ' restart_file = ', & trim(restart_file) write(nu_diag,*) ' pointer_file = ', & diff --git a/source/ice_restart_driver.F90 b/source/ice_restart_driver.F90 index 764caaf5..a6b3c939 100755 --- a/source/ice_restart_driver.F90 +++ b/source/ice_restart_driver.F90 @@ -21,7 +21,7 @@ module ice_restart_driver use ice_kinds_mod use ice_restart_shared, only: & restart, restart_ext, restart_dir, restart_file, pointer_file, & - runid, runtype, use_restart_time, restart_format, lcdf64, lenstr + runid, runtype, use_restart_time, restart_format, lenstr use ice_restart #ifdef AusCOM use cpl_parameters, only: runtime0 diff --git a/source/ice_restart_shared.F90 b/source/ice_restart_shared.F90 index 93061f26..087dc965 100755 --- a/source/ice_restart_shared.F90 +++ b/source/ice_restart_shared.F90 @@ -27,8 +27,6 @@ module ice_restart_shared character (len=char_len), public :: & restart_format ! format of restart files 'nc' - logical (kind=log_kind), public :: lcdf64 - !======================================================================= contains From c3437aa63fc2237413bb368125281c0a1c6c5915 Mon Sep 17 00:00:00 2001 From: Spencer Wong <88933912+blimlim@users.noreply.github.com> Date: Wed, 30 Jul 2025 09:35:14 +1000 Subject: [PATCH 39/52] ESM1.6: Abort on failed namelist open (#62) * Abort if failed to open input_ice.nml --- drivers/access/cpl_parameters.F90 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/access/cpl_parameters.F90 b/drivers/access/cpl_parameters.F90 index 1b4d2dcc..70c4c687 100644 --- a/drivers/access/cpl_parameters.F90 +++ b/drivers/access/cpl_parameters.F90 @@ -207,14 +207,16 @@ subroutine get_cpl_timecontrol ! all processors read the namelist-- call get_fileunit(nu_nml) -open(unit=nu_nml,file="input_ice.nml",form="formatted",status="old",iostat=nml_error) +open(unit=nu_nml,file="input_ice.nml",form="formatted",status="old",iostat=nml_error, iomsg=errstr) ! if (my_task == master_task) then write(ice_stdout,*)'CICE: input_ice.nml opened at unit = ', nu_nml endif ! if (nml_error /= 0) then - nml_error = -1 + write(tmpstr, '(a,i3,a)') 'CICE: ERROR failed to open input_ice.nml. Error code: ', nml_error, & + ' - ' // trim(errstr) + call abort_ice(trim(tmpstr)) else nml_error = 1 endif From bce3629b7b11df704e2a632392ec139023e95ff1 Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Thu, 7 Aug 2025 16:47:10 +1000 Subject: [PATCH 40/52] Support multiple history timesteps per file (#59) The file names now follow ocean convention: e.g. iceh-daily_0786-01.nc instead of iceh_d.0786-01.nc --- io_netcdf/ice_history_write.F90 | 580 ++++++++++++++++++-------------- source/ice_calendar.F90 | 5 +- source/ice_history_shared.F90 | 102 +++--- source/ice_init.F90 | 21 +- 4 files changed, 400 insertions(+), 308 deletions(-) diff --git a/io_netcdf/ice_history_write.F90 b/io_netcdf/ice_history_write.F90 index 73dfab72..75014d23 100644 --- a/io_netcdf/ice_history_write.F90 +++ b/io_netcdf/ice_history_write.F90 @@ -38,6 +38,7 @@ module ice_history_write use ice_history_shared use ice_itd, only: hin_max use ice_calendar, only: write_ic, histfreq + use ice_fileunits, only: nu_diag, ice_stderr, ice_stdout implicit none @@ -74,7 +75,11 @@ subroutine check(status, msg) character(len=*), intent (in) :: msg if(status /= nf90_noerr) then - call abort_ice('ice: NetCDF error '//trim(nf90_strerror(status)//' '//trim(msg))) + !sometimes the netcdf error string is quit long, so print seperately to prevent overrun + write(nu_diag,*) trim(nf90_strerror(status)) + write (ice_stdout,*) trim(nf90_strerror(status)) + write (ice_stderr,*) trim(nf90_strerror(status)) + call abort_ice('ice: NetCDF error '//trim(msg)) end if end subroutine check @@ -87,61 +92,30 @@ end subroutine check subroutine ice_write_hist (ns) - use ice_calendar, only: time, sec, idate, idate0, & + use ice_calendar, only: time, sec, idate, idate0, & #ifdef ACCESS month, daymo, & #endif dayyr, days_per_year, use_leap_years - use ice_fileunits, only: nu_diag - use ice_restart_shared, only: runid - integer (kind=int_kind), intent(in) :: ns + integer (kind=int_kind), intent(in) :: ns !history stream number ! local variables - real (kind=dbl_kind), dimension(:,:), allocatable :: work_g1 - real (kind=real_kind), dimension(:,:), allocatable :: work_gr - real (kind=real_kind), dimension(:,:,:), allocatable :: work_gr3 - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & - work1 - - integer (kind=int_kind) :: i,k,ic,n,nn, & - ncid,status,imtid,jmtid,kmtidi,kmtids,kmtidb, cmtid,timid,varid, & - nvertexid,ivertex - integer (kind=int_kind), dimension(3) :: dimid - integer (kind=int_kind), dimension(4) :: dimidz - integer (kind=int_kind), dimension(5) :: dimidcz - integer (kind=int_kind), dimension(3) :: dimid_nverts - integer (kind=int_kind), dimension(4) :: dimidex - real (kind=real_kind) :: ltime - character (char_len) :: title - character (char_len_long) :: ncfile(max_nstrm) - - integer (kind=int_kind) :: shuffle, deflate, deflate_level - - integer (kind=int_kind) :: ind,boundid - - character (char_len) :: start_time,current_date,current_time - character (len=8) :: cdate + real (kind=real_kind) :: ltime !history timestamp in seconds + character (char_len_long) :: ncfile(max_nstrm) !filenames + character (char_len) :: time_string !model time for logging + logical :: file_exists + integer (kind=int_kind) :: & + ncid, & ! netcdf id + varid, & + i_time, & ! time index + timid ! time var id TYPE(req_attributes), dimension(nvar) :: var TYPE(coord_attributes), dimension(ncoord) :: coord_var TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts TYPE(coord_attributes), dimension(nvarz) :: var_nz - CHARACTER (char_len), dimension(ncoord) :: coord_bounds - - ! We leave shuffle at 0, this is only useful for integer data. - shuffle = 0 - - ! If history_deflate_level < 0 then don't do deflation, - ! otherwise it sets the deflate level - if (history_deflate_level < 0) then - deflate = 0 - deflate_level = 0 - else - deflate = 1 - deflate_level = history_deflate_level - endif if (my_task == master_task .or. history_parallel_io) then #if defined(ACCESS) @@ -161,7 +135,7 @@ subroutine ice_write_hist (ns) ltime=time/int(secday) #endif - call construct_filename(ncfile(ns),'nc',ns) + call construct_filename(ncfile(ns),'nc',ns,time_string) ! add local directory path name to ncfile if (write_ic) then @@ -170,97 +144,244 @@ subroutine ice_write_hist (ns) ncfile(ns) = trim(history_dir)//ncfile(ns) endif - ! create file - if (history_parallel_io) then - call check(nf90_create(ncfile(ns), ior(NF90_NETCDF4, NF90_MPIIO), ncid, & - comm=MPI_COMM_ICE, info=MPI_INFO_NULL), & - 'create history ncfile '//ncfile(ns)) - if (.not. equal_num_blocks_per_cpu) then - call abort_ice('ice: error history_parallel_io needs equal_num_blocks_per_cpu') - endif + inquire(file=trim(ncfile(ns)),exist=file_exists) + if (.not. file_exists) then + call ice_hist_create(ns, ncfile(ns), ncid, var, coord_var, var_nverts, var_nz) + write(nu_diag,*) 'Created:'//trim(ncfile(ns)) else - call check(nf90_create(ncfile(ns), ior(NF90_CLASSIC_MODEL, NF90_HDF5), ncid), & - 'create history ncfile '//ncfile(ns)) + if (history_parallel_io) then + call check(nf90_open(trim(ncfile(ns)), NF90_WRITE, ncid, & + comm=MPI_COMM_ICE, info=MPI_INFO_NULL), & + 'parallel open existing history file '//ncfile(ns)) + else + call check(nf90_open(trim(ncfile(ns)), NF90_WRITE, ncid), & + "opening existing history file "//ncfile(ns)) + endif + endif + + !----------------------------------------------------------------- + ! write time variable + !----------------------------------------------------------------- + call check(nf90_inq_dimid(ncid, 'time', timid), & + 'inq dimid time') + call check(nf90_inquire_dimension(ncid, timid, len=i_time), & + 'inquire dim time') + call check(nf90_inq_varid(ncid,'time',varid), & + 'inq varid time') + if (history_parallel_io) then + ! unlimited dimensions need to have collective access set + call check(nf90_var_par_access(ncid, varid, NF90_COLLECTIVE), & + 'parallel access time') endif + i_time = i_time + 1 ! index of the current history time + call check(nf90_put_var(ncid,varid,ltime,start=(/i_time/)), & + 'put var time') !----------------------------------------------------------------- - ! define dimensions + ! write time_bounds info !----------------------------------------------------------------- if (hist_avg) then - call check(nf90_def_dim(ncid,'d2',2,boundid), 'def dim d2') + call check(nf90_inq_varid(ncid,'time_bounds',varid), & + 'inq varid time_bounds') + if (history_parallel_io) then + call check(nf90_var_par_access(ncid, varid, NF90_COLLECTIVE), & + 'parallel access time_bounds') + endif + call check(nf90_put_var(ncid,varid,time_beg(ns),start=(/1,i_time/)), & + 'put var time_bounds beginning') + call check(nf90_put_var(ncid,varid,time_end(ns),start=(/2,i_time/)), & + 'put var time_bounds end') endif + endif ! master_task or history_parallel_io - call check(nf90_def_dim(ncid, 'ni', nx_global, imtid), & - 'def dim ni') - call check(nf90_def_dim(ncid, 'nj', ny_global, jmtid), & - 'def dim nj') - call check(nf90_def_dim(ncid, 'nc', ncat_hist, cmtid), & - 'def dim nc') - call check(nf90_def_dim(ncid, 'nkice', nzilyr, kmtidi), & - 'def dim nkice') - call check(nf90_def_dim(ncid, 'nksnow', nzslyr, kmtids), & - 'def dim nksnow') - call check(nf90_def_dim(ncid, 'nkbio', nzblyr, kmtidb), & - 'def dim nkbio') - call check(nf90_def_dim(ncid, 'time', NF90_UNLIMITED, timid), & - 'def dim time') - call check(nf90_def_dim(ncid, 'nvertices', nverts, nvertexid), & - 'def dim nverts') + call broadcast_scalar(i_time, master_task) !we need this on every processor for parallel writes + if (i_time == 1) then + ! these variables are time-invariant, only write once per file + ! ice_hist_create is only run on master task, but these variables are distributed, so call on all tasks + !----------------------------------------------------------------- + ! write coordinate variables + !----------------------------------------------------------------- + if (history_parallel_io) then + call write_coordinate_variables_parallel(ncid, coord_var, var_nz) + else + call write_coordinate_variables(ncid, coord_var, var_nz) + endif !----------------------------------------------------------------- - ! define coordinate variables + ! write grid masks, area and rotation angle !----------------------------------------------------------------- + if (history_parallel_io) then + call write_grid_variables_parallel(ncid, var, var_nverts) + else + call write_grid_variables(ncid, var, var_nverts) + endif + + endif + !----------------------------------------------------------------- + ! write 2d variable data + !----------------------------------------------------------------- + + if (history_parallel_io) then + call write_2d_variables_parallel(ns, ncid, i_time) + else + call write_2d_variables(ns, ncid, i_time) + endif + + if (history_parallel_io) then + call write_3d_and_4d_variables_parallel(ns, ncid, i_time) + else + call write_3d_and_4d_variables(ns, ncid, i_time) + endif + + !----------------------------------------------------------------- + ! close output dataset + !----------------------------------------------------------------- + + if (my_task == master_task .or. history_parallel_io) then + call check(nf90_close(ncid), 'closing netCDF history file') + write(nu_diag,*) 'Wrote ',trim(ncfile(ns)),' at time ',trim(time_string) + endif + +end subroutine ice_write_hist + +subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) + + use ice_calendar, only: idate, idate0, & + dayyr, days_per_year, use_leap_years + use ice_restart_shared, only: runid + + integer (kind=int_kind), intent(in) :: ns + character (char_len_long), intent(in) :: ncfile + integer (kind=int_kind), intent(out) :: ncid + TYPE(req_attributes), dimension(nvar), intent(inout) :: var + TYPE(coord_attributes), dimension(ncoord), intent(inout) :: coord_var + TYPE(coord_attributes), dimension(nvar_verts), intent(inout) :: var_nverts + TYPE(coord_attributes), dimension(nvarz), intent(inout) :: var_nz + + ! local variables + + integer (kind=int_kind) :: i,k,ic,n,nn, & + status,imtid,jmtid,kmtidi,kmtids,kmtidb, cmtid,timid,varid, & + nvertexid,ivertex + integer (kind=int_kind), dimension(3) :: dimid, dimid_nverts + integer (kind=int_kind), dimension(4) :: dimidz, dimidex + integer (kind=int_kind), dimension(5) :: dimidcz + + integer (kind=int_kind) :: shuffle, deflate, deflate_level ! comrpession settings + + integer (kind=int_kind) :: ind,boundid + + character (char_len) :: title, start_time,current_date,current_time + character (len=8) :: cdate + + + CHARACTER (char_len), dimension(ncoord) :: coord_bounds + + ! We leave shuffle at 0, this is only useful for integer data. + shuffle = 0 + + ! If history_deflate_level < 0 then don't do deflation, + ! otherwise it sets the deflate level + if (history_deflate_level < 0) then + deflate = 0 + deflate_level = 0 + else + deflate = 1 + deflate_level = history_deflate_level + endif + + ! create file + if (history_parallel_io) then + call check(nf90_create(ncfile, ior(NF90_NETCDF4, NF90_MPIIO), ncid, & + comm=MPI_COMM_ICE, info=MPI_INFO_NULL), & + 'create history ncfile '//ncfile) + if (.not. equal_num_blocks_per_cpu) then + call abort_ice('ice: error history_parallel_io needs equal_num_blocks_per_cpu') + endif + else + call check(nf90_create(ncfile, ior(NF90_CLASSIC_MODEL, NF90_HDF5), ncid), & + 'create history ncfile '//ncfile) + endif + + !----------------------------------------------------------------- + ! define dimensions + !----------------------------------------------------------------- - call check(nf90_def_var(ncid,'time',nf90_float,timid,varid), & - 'def var time') - call check(nf90_put_att(ncid,varid,'long_name','model time'), & - 'put_att long_name') + if (hist_avg) then + call check(nf90_def_dim(ncid,'d2',2,boundid), 'def dim d2') + endif + + call check(nf90_def_dim(ncid, 'ni', nx_global, imtid), & + 'def dim ni') + call check(nf90_def_dim(ncid, 'nj', ny_global, jmtid), & + 'def dim nj') + call check(nf90_def_dim(ncid, 'nc', ncat_hist, cmtid), & + 'def dim nc') + call check(nf90_def_dim(ncid, 'nkice', nzilyr, kmtidi), & + 'def dim nkice') + call check(nf90_def_dim(ncid, 'nksnow', nzslyr, kmtids), & + 'def dim nksnow') + call check(nf90_def_dim(ncid, 'nkbio', nzblyr, kmtidb), & + 'def dim nkbio') + call check(nf90_def_dim(ncid, 'time', NF90_UNLIMITED, timid), & + 'def dim time') + call check(nf90_def_dim(ncid, 'nvertices', nverts, nvertexid), & + 'def dim nverts') + + !----------------------------------------------------------------- + ! define coordinate variables + !----------------------------------------------------------------- + + call check(nf90_def_var(ncid,'time',nf90_float,timid,varid), & + 'def var time') + call check(nf90_put_att(ncid,varid,'long_name','model time'), & + 'put_att long_name') + + write(cdate,'(i8.8)') idate0 + write(title,'(a,a,a,a,a,a,a,a)') 'days since ', & + cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' 00:00:00' + call check(nf90_put_att(ncid,varid,'units',title), & + 'put_att time units') + + if (days_per_year == 360) then + call check(nf90_put_att(ncid,varid,'calendar','360_day'), & + 'att time calendar') + elseif (days_per_year == 365 .and. .not.use_leap_years ) then + call check(nf90_put_att(ncid,varid,'calendar','NoLeap'), & + 'att time calendar') + elseif (use_leap_years) then + call check(nf90_put_att(ncid,varid,'calendar','proleptic_gregorian'), & + 'att time calendar') + else + call abort_ice( 'ice Error: invalid calendar settings') + endif + + if (hist_avg) then + call check(nf90_put_att(ncid,varid,'bounds','time_bounds'), & + 'att time bounds') + endif + + !----------------------------------------------------------------- + ! Define attributes for time bounds if hist_avg is true + !----------------------------------------------------------------- + + if (hist_avg) then + dimid(1) = boundid + dimid(2) = timid + call check(nf90_def_var(ncid, 'time_bounds', & + nf90_float,dimid(1:2),varid), & + 'def var time_bounds') + call check(nf90_put_att(ncid,varid,'long_name', & + 'boundaries for time-averaging interval'), & + 'att time_bounds long_name') write(cdate,'(i8.8)') idate0 write(title,'(a,a,a,a,a,a,a,a)') 'days since ', & cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' 00:00:00' call check(nf90_put_att(ncid,varid,'units',title), & - 'put_att time units') - - if (days_per_year == 360) then - call check(nf90_put_att(ncid,varid,'calendar','360_day'), & - 'att time calendar') - elseif (days_per_year == 365 .and. .not.use_leap_years ) then - call check(nf90_put_att(ncid,varid,'calendar','NoLeap'), & - 'att time calendar') - elseif (use_leap_years) then - call check(nf90_put_att(ncid,varid,'calendar','proleptic_gregorian'), & - 'att time calendar') - else - call abort_ice( 'ice Error: invalid calendar settings') - endif - - if (hist_avg) then - call check(nf90_put_att(ncid,varid,'bounds','time_bounds'), & - 'att time bounds') - endif - - !----------------------------------------------------------------- - ! Define attributes for time bounds if hist_avg is true - !----------------------------------------------------------------- - - if (hist_avg) then - dimid(1) = boundid - dimid(2) = timid - call check(nf90_def_var(ncid, 'time_bounds', & - nf90_float,dimid(1:2),varid), & - 'def var time_bounds') - - call check(nf90_put_att(ncid,varid,'long_name', & - 'boundaries for time-averaging interval'), & - 'att time_bounds long_name') - write(cdate,'(i8.8)') idate0 - write(title,'(a,a,a,a,a,a,a,a)') 'days since ', & - cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' 00:00:00' - call check(nf90_put_att(ncid,varid,'units',title), & - 'att time_bounds units') - endif + 'att time_bounds units') + endif !----------------------------------------------------------------- ! define information for required time-invariant variables @@ -969,14 +1090,10 @@ subroutine ice_write_hist (ns) call check(nf90_put_att(ncid,nf90_global,'comment',title), & 'global attribute comment') - write(title,'(a,i8.8)') 'File written on model date ',idate + write(title,'(a,i8.8)') 'File started on model date ',idate call check(nf90_put_att(ncid,nf90_global,'comment2',title), & 'global attribute comment2') - write(title,'(a,i6)') 'seconds elapsed into model date: ',sec - call check(nf90_put_att(ncid,nf90_global,'comment3',title), & - 'global attribute comment3') - title = 'CF-1.0' call check(nf90_put_att(ncid,nf90_global,'conventions',title), & 'global attribute conventions') @@ -994,88 +1111,14 @@ subroutine ice_write_hist (ns) call check(nf90_put_att(ncid,nf90_global,'io_flavor','io_netcdf'), & 'global attribute io_flavor') - !----------------------------------------------------------------- - ! end define mode - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! end define mode + !----------------------------------------------------------------- call check(nf90_enddef(ncid), 'enddef') - !----------------------------------------------------------------- - ! write time variable - !----------------------------------------------------------------- - - call check(nf90_inq_varid(ncid,'time',varid), & - 'inq varid time') - if (history_parallel_io) then - ! unlimited dimensions need to have collective access set - call check(nf90_var_par_access(ncid, varid, NF90_COLLECTIVE), & - 'parallel access time') - endif - call check(nf90_put_var(ncid,varid,ltime), & - 'put var time') +end subroutine ice_hist_create - !----------------------------------------------------------------- - ! write time_bounds info - !----------------------------------------------------------------- - - if (hist_avg) then - call check(nf90_inq_varid(ncid,'time_bounds',varid), & - 'inq varid time_bounds') - call check(nf90_put_var(ncid,varid,time_beg(ns),start=(/1/)), & - 'put var time_bounds beginning') - call check(nf90_put_var(ncid,varid,time_end(ns),start=(/2/)), & - 'put var time_bounds end') - endif - endif ! master_task or history_parallel_io - - !----------------------------------------------------------------- - ! write coordinate variables - !----------------------------------------------------------------- - - if (history_parallel_io) then - call write_coordinate_variables_parallel(ncid, coord_var, var_nz) - else - call write_coordinate_variables(ncid, coord_var, var_nz) - endif - - !----------------------------------------------------------------- - ! write grid masks, area and rotation angle - !----------------------------------------------------------------- - - if (history_parallel_io) then - call write_grid_variables_parallel(ncid, var, var_nverts) - else - call write_grid_variables(ncid, var, var_nverts) - endif - - - !----------------------------------------------------------------- - ! write 2d variable data - !----------------------------------------------------------------- - - if (history_parallel_io) then - call write_2d_variables_parallel(ns, ncid) - else - call write_2d_variables(ns, ncid) - endif - - if (history_parallel_io) then - call write_3d_and_4d_variables_parallel(ns, ncid) - else - call write_3d_and_4d_variables(ns, ncid) - endif - - !----------------------------------------------------------------- - ! close output dataset - !----------------------------------------------------------------- - - if (my_task == master_task .or. history_parallel_io) then - call check(nf90_close(ncid), 'closing netCDF history file') - write(nu_diag,*) ' ' - write(nu_diag,*) 'Finished writing ',trim(ncfile(ns)) - endif - -end subroutine ice_write_hist subroutine write_coordinate_variables(ncid, coord_var, var_nz) @@ -1101,8 +1144,8 @@ subroutine write_coordinate_variables(ncid, coord_var, var_nz) work_g1(:,:) = c0 - do i = 1,ncoord - coord_var_name = coord_var(i)%short_name + do i = 1,ncoord + if (my_task == master_task) coord_var_name = coord_var(i)%short_name call broadcast_scalar(coord_var_name, master_task) SELECT CASE (coord_var_name) CASE ('TLON') @@ -1121,9 +1164,9 @@ subroutine write_coordinate_variables(ncid, coord_var, var_nz) work1 = ULAT*rad_to_deg call gather_global(work_g1,work1,master_task,distrb_info) END SELECT - + if (my_task == master_task) then - work_gr = work_g1 + work_gr = work_g1 call check(nf90_inq_varid(ncid, coord_var_name, varid), & 'inq varid '//coord_var_name) call check(nf90_put_var(ncid,varid,work_gr), & @@ -1134,12 +1177,13 @@ subroutine write_coordinate_variables(ncid, coord_var, var_nz) ! Extra dimensions (NCAT, VGRD*) do i = 1, nvarz + if (my_task == master_task) coord_var_name = var_nz(i)%short_name if (igrdz(i)) then - call broadcast_scalar(var_nz(i)%short_name,master_task) + call broadcast_scalar(coord_var_name,master_task) if (my_task == master_task) then - call check(nf90_inq_varid(ncid, var_nz(i)%short_name, varid), & - 'inq varid '//var_nz(i)%short_name) - SELECT CASE (var_nz(i)%short_name) + call check(nf90_inq_varid(ncid, coord_var_name, varid), & + 'inq varid '//coord_var_name) + SELECT CASE (coord_var_name) CASE ('NCAT') status = nf90_put_var(ncid,varid,hin_max(1:ncat_hist)) CASE ('VGRDi') ! index - needed for Met Office analysis code @@ -1150,7 +1194,7 @@ subroutine write_coordinate_variables(ncid, coord_var, var_nz) status = nf90_put_var(ncid,varid,(/(k, k=1,nzblyr)/)) END SELECT if (status /= nf90_noerr) call abort_ice( & - 'ice: Error writing'//var_nz(i)%short_name) + 'ice: Error writing'//coord_var_name) endif endif enddo @@ -1161,7 +1205,6 @@ subroutine write_coordinate_variables(ncid, coord_var, var_nz) end subroutine write_coordinate_variables - subroutine write_grid_variables(ncid, var, var_nverts) integer, intent(in) :: ncid @@ -1216,10 +1259,9 @@ subroutine write_grid_variables(ncid, var, var_nverts) do i = 3, nvar ! note n_tmask=1, n_blkmask=2 if (igrd(i)) then - var_name = var(i)%req%short_name - - call broadcast_scalar(var_name,master_task) - SELECT CASE (var_name) + if (my_task == master_task) var_name = var(i)%req%short_name + call broadcast_scalar(var_name,master_task) + SELECT CASE (var_name) CASE ('tarea') call gather_global(work_g1, tarea, master_task, distrb_info) CASE ('uarea') @@ -1261,9 +1303,9 @@ subroutine write_grid_variables(ncid, var, var_nverts) work1 (:,:,:) = c0 do i = 1, nvar_verts - var_nverts_name = var_nverts(i)%short_name - call broadcast_scalar(var_nverts_name,master_task) - SELECT CASE (var_nverts_name) + if (my_task == master_task) var_nverts_name = var_nverts(i)%short_name + call broadcast_scalar(var_nverts_name,master_task) + SELECT CASE (var_nverts_name) CASE ('lont_bounds') do ivertex = 1, nverts work1(:,:,:) = lont_bounds(ivertex,:,:,:) @@ -1306,10 +1348,9 @@ subroutine write_grid_variables(ncid, var, var_nverts) end subroutine write_grid_variables -subroutine write_2d_variables(ns, ncid) +subroutine write_2d_variables(ns, ncid, i_time) - integer, intent(in) :: ns - integer, intent(in) :: ncid + integer, intent(in) :: ns, ncid, i_time real (kind=dbl_kind), dimension(:,:), allocatable :: work_g1 real (kind=real_kind), dimension(:,:), allocatable :: work_gr @@ -1337,6 +1378,7 @@ subroutine write_2d_variables(ns, ncid) call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & 'inq varid '//avail_hist_fields(n)%vname) call check(nf90_put_var(ncid,varid,work_gr(:,:), & + start=(/1,1,i_time/), & count=(/nx_global,ny_global/)), & 'put var '//avail_hist_fields(n)%vname) endif @@ -1349,10 +1391,9 @@ subroutine write_2d_variables(ns, ncid) end subroutine write_2d_variables -subroutine write_3d_and_4d_variables(ns, ncid) +subroutine write_3d_and_4d_variables(ns, ncid, i_time) - integer, intent(in) :: ns - integer, intent(in) :: ncid + integer, intent(in) :: ns, ncid, i_time real (kind=dbl_kind), dimension(:,:), allocatable :: work_g1 real (kind=real_kind), dimension(:,:), allocatable :: work_gr @@ -1385,7 +1426,7 @@ subroutine write_3d_and_4d_variables(ns, ncid) if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & - start=(/ 1, 1, k/), & + start=(/1,1,k,i_time/), & count=(/nx_global,ny_global, 1/)), & 'put var '//avail_hist_fields(n)%vname) endif @@ -1410,7 +1451,7 @@ subroutine write_3d_and_4d_variables(ns, ncid) if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & - start=(/ 1, 1,k/), & + start=(/1,1,k,i_time/), & count=(/nx_global,ny_global,1/)), & 'put var '//avail_hist_fields(n)%vname) endif @@ -1435,7 +1476,7 @@ subroutine write_3d_and_4d_variables(ns, ncid) if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & - start=(/ 1, 1,k/), & + start=(/1,1,k,i_time/), & count=(/nx_global,ny_global,1/)), & 'put var '//avail_hist_fields(n)%vname) endif @@ -1460,7 +1501,7 @@ subroutine write_3d_and_4d_variables(ns, ncid) work_gr(:,:) = work_g1(:,:) if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & - start=(/ 1, 1,k,ic/), & + start=(/1,1,k,ic,i_time/), & count=(/nx_global,ny_global,1, 1/)), & 'put var '//avail_hist_fields(n)%vname) endif @@ -1486,7 +1527,7 @@ subroutine write_3d_and_4d_variables(ns, ncid) work_gr(:,:) = work_g1(:,:) if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & - start=(/ 1, 1,k,ic/), & + start=(/1,1,k,ic,i_time/), & count=(/nx_global,ny_global,1, 1/)), & 'put var '//avail_hist_fields(n)%vname) endif @@ -1512,7 +1553,7 @@ subroutine write_3d_and_4d_variables(ns, ncid) work_gr(:,:) = work_g1(:,:) if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & - start=(/ 1, 1,k,ic/), & + start=(/1,1,k,ic,i_time/), & count=(/nx_global,ny_global,1, 1/)), & 'put var '//avail_hist_fields(n)%vname) endif @@ -1552,7 +1593,7 @@ subroutine write_coordinate_variables_parallel(ncid, coord_var, var_nz) work1 = ULAT*rad_to_deg END SELECT - call put_2d_with_blocks(ncid, coord_var(i)%short_name, work1) + call put_2d_with_blocks(ncid, 1, coord_var(i)%short_name, work1) enddo ! Extra dimensions (NCAT, VGRD*) @@ -1560,6 +1601,8 @@ subroutine write_coordinate_variables_parallel(ncid, coord_var, var_nz) if (igrdz(i)) then call check(nf90_inq_varid(ncid, var_nz(i)%short_name, varid), & 'inq_varid '//var_nz(i)%short_name) + call check(nf90_var_par_access(ncid, varid, NF90_COLLECTIVE), & + 'parallel access '//var_nz(i)%short_name) SELECT CASE (var_nz(i)%short_name) CASE ('NCAT') call check(nf90_put_var(ncid, varid, hin_max(1:ncat_hist)), & @@ -1598,11 +1641,11 @@ subroutine write_grid_variables_parallel(ncid, var, var_nverts) integer :: varid if (igrd(n_tmask)) then - call put_2d_with_blocks(ncid, 'tmask', hm) + call put_2d_with_blocks(ncid, 1, 'tmask', hm) endif if (igrd(n_blkmask)) then - call put_2d_with_blocks(ncid, 'blkmask', bm) + call put_2d_with_blocks(ncid, 1, 'blkmask', bm) endif do i = 3, nvar ! note n_tmask=1, n_blkmask=2 @@ -1630,7 +1673,7 @@ subroutine write_grid_variables_parallel(ncid, var, var_nverts) work1 = ANGLET END SELECT - call put_2d_with_blocks(ncid, var(i)%req%short_name, work1) + call put_2d_with_blocks(ncid, 1, var(i)%req%short_name, work1) endif enddo @@ -1653,6 +1696,8 @@ subroutine write_grid_variables_parallel(ncid, var, var_nverts) call check(nf90_inq_varid(ncid, var_nverts(i)%short_name, varid), & 'inq varid '//var_nverts(i)%short_name) + call check(nf90_var_par_access(ncid, varid, NF90_COLLECTIVE), & + 'parallel access '//var_nverts(i)%short_name) do iblk=1, nblocks the_block = get_block(blocks_ice(iblk), iblk) @@ -1679,17 +1724,16 @@ subroutine write_grid_variables_parallel(ncid, var, var_nverts) end subroutine write_grid_variables_parallel -subroutine write_2d_variables_parallel(ns, ncid) +subroutine write_2d_variables_parallel(ns, ncid, i_time) - integer, intent(in) :: ns - integer, intent(in) :: ncid + integer, intent(in) :: ns, ncid, i_time integer :: varid integer :: n do n=1, num_avail_hist_fields_2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call put_2d_with_blocks(ncid, avail_hist_fields(n)%vname, & + call put_2d_with_blocks(ncid, i_time, avail_hist_fields(n)%vname, & a2D(:, :, n, :)) endif enddo ! num_avail_hist_fields_2D @@ -1698,10 +1742,9 @@ end subroutine write_2d_variables_parallel -subroutine write_3d_and_4d_variables_parallel(ns, ncid) +subroutine write_3d_and_4d_variables_parallel(ns, ncid, i_time) - integer, intent(in) :: ns - integer, intent(in) :: ncid + integer, intent(in) :: ns, ncid, i_time integer :: varid integer :: n, nn, k, ic @@ -1709,7 +1752,7 @@ subroutine write_3d_and_4d_variables_parallel(ns, ncid) do n = n2D + 1, n3Dccum nn = n - n2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call put_3d_with_blocks(ncid, avail_hist_fields(n)%vname, & + call put_3d_with_blocks(ncid, i_time, avail_hist_fields(n)%vname, & ncat_hist, a3Dc(:, :, :, nn, :)) endif enddo ! num_avail_hist_fields_3Dc @@ -1718,7 +1761,7 @@ subroutine write_3d_and_4d_variables_parallel(ns, ncid) do n = n3Dccum+1, n3Dzcum nn = n - n3Dccum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call put_3d_with_blocks(ncid, avail_hist_fields(n)%vname, & + call put_3d_with_blocks(ncid, i_time, avail_hist_fields(n)%vname, & nzilyr, a3Dz(:, :, :, nn, :)) endif enddo ! num_avail_hist_fields_3Dz @@ -1727,7 +1770,7 @@ subroutine write_3d_and_4d_variables_parallel(ns, ncid) do n = n3Dzcum+1, n3Dbcum nn = n - n3Dzcum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call put_3d_with_blocks(ncid, avail_hist_fields(n)%vname, & + call put_3d_with_blocks(ncid, i_time, avail_hist_fields(n)%vname, & nzblyr, a3Db(:, :, :, nn, :)) endif enddo ! num_avail_hist_fields_3Db @@ -1736,7 +1779,7 @@ subroutine write_3d_and_4d_variables_parallel(ns, ncid) do n = n3Dbcum+1, n4Dicum nn = n - n3Dbcum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call put_4d_with_blocks(ncid, avail_hist_fields(n)%vname, & + call put_4d_with_blocks(ncid, i_time, avail_hist_fields(n)%vname, & nzilyr, ncat_hist, a4Di(:, :, :, :, nn, :)) endif enddo ! num_avail_hist_fields_4Di @@ -1745,7 +1788,7 @@ subroutine write_3d_and_4d_variables_parallel(ns, ncid) do n = n4Dicum+1, n4Dscum nn = n - n4Dicum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call put_4d_with_blocks(ncid, avail_hist_fields(n)%vname, & + call put_4d_with_blocks(ncid, i_time, avail_hist_fields(n)%vname, & nzslyr, ncat_hist, a4Ds(:, :, :, :, nn, :)) endif enddo ! num_avail_hist_fields_4Ds @@ -1753,7 +1796,7 @@ subroutine write_3d_and_4d_variables_parallel(ns, ncid) do n = n4Dscum+1, n4Dbcum nn = n - n4Dscum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call put_4d_with_blocks(ncid, avail_hist_fields(n)%vname, & + call put_4d_with_blocks(ncid, i_time, avail_hist_fields(n)%vname, & nzblyr, ncat_hist, a4Db(:, :, :, :, nn, :)) endif enddo ! num_avail_hist_fields_4Db @@ -1761,20 +1804,26 @@ subroutine write_3d_and_4d_variables_parallel(ns, ncid) end subroutine write_3d_and_4d_variables_parallel -subroutine put_2d_with_blocks(ncid, var_name, data) +subroutine put_2d_with_blocks(ncid, i_start, var_name, data) - integer, intent(in) :: ncid + ! by convention only, 2d variables are actually 3d if you consider time + ! sometimes the third array is a different index (e.g. number of bounds ) + ! typically i_start is the current time index, but can be different + + integer, intent(in) :: ncid, i_start character(len=*), intent(in) :: var_name real(kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: data integer :: varid integer :: iblk integer :: ilo, jlo, ihi, jhi, gilo, gjlo, gihi, gjhi - integer, dimension(2) :: start, count + integer, dimension(3) :: start, count type(block) :: the_block call check(nf90_inq_varid(ncid, var_name, varid), & 'inq varid for '//var_name) + call check(nf90_var_par_access(ncid, varid, NF90_COLLECTIVE), & + 'parallel access '//var_name) do iblk=1, nblocks the_block = get_block(blocks_ice(iblk), iblk) @@ -1788,8 +1837,8 @@ subroutine put_2d_with_blocks(ncid, var_name, data) gihi = the_block%i_glob(ihi) gjhi = the_block%j_glob(jhi) - start = (/ gilo, gjlo /) - count = (/ gihi - gilo + 1, gjhi - gjlo + 1 /) + start = (/ gilo, gjlo,i_start /) + count = (/ gihi - gilo + 1, gjhi - gjlo + 1 , 1/) call check(nf90_put_var(ncid, varid, real(data(ilo:ihi, jlo:jhi, iblk)), & start=start, count=count), & 'put_2d_with_blocks put '//trim(var_name)) @@ -1797,20 +1846,25 @@ subroutine put_2d_with_blocks(ncid, var_name, data) end subroutine put_2d_with_blocks -subroutine put_3d_with_blocks(ncid, var_name, len_3dim, data) +subroutine put_3d_with_blocks(ncid, i_time, var_name, len_3dim, data) - integer, intent(in) :: ncid, len_3dim + ! by convention only, 3d variables are actually 4d if you consider time + + + integer, intent(in) :: ncid, i_time, len_3dim character(len=*), intent(in) :: var_name real(kind=dbl_kind), dimension(nx_block, ny_block, len_3dim, max_blocks), intent(in) :: data integer :: varid integer :: iblk integer :: ilo, jlo, ihi, jhi, gilo, gjlo, gihi, gjhi - integer, dimension(3) :: start, count + integer, dimension(4) :: start, count type(block) :: the_block call check(nf90_inq_varid(ncid, var_name, varid), & 'inq varid for '//var_name) + call check(nf90_var_par_access(ncid, varid, NF90_COLLECTIVE), & + 'parallel access '//var_name) do iblk=1, nblocks the_block = get_block(blocks_ice(iblk), iblk) @@ -1824,8 +1878,8 @@ subroutine put_3d_with_blocks(ncid, var_name, len_3dim, data) gihi = the_block%i_glob(ihi) gjhi = the_block%j_glob(jhi) - start = (/ gilo, gjlo, 1 /) - count = (/ gihi - gilo + 1, gjhi - gjlo + 1, len_3dim /) + start = (/ gilo, gjlo, 1 , i_time/) + count = (/ gihi - gilo + 1, gjhi - gjlo + 1, len_3dim, 1 /) call check(nf90_put_var(ncid, varid, & real(data(ilo:ihi, jlo:jhi, 1:len_3dim, iblk)), & start=start, count=count), & @@ -1835,9 +1889,11 @@ subroutine put_3d_with_blocks(ncid, var_name, len_3dim, data) end subroutine put_3d_with_blocks -subroutine put_4d_with_blocks(ncid, var_name, len_3dim, len_4dim, data) +subroutine put_4d_with_blocks(ncid, i_time, var_name, len_3dim, len_4dim, data) + ! by convention only, 4d variables are actually 5d if you consider time + - integer, intent(in) :: ncid, len_3dim, len_4dim + integer, intent(in) :: ncid, i_time, len_3dim, len_4dim character(len=*), intent(in) :: var_name real(kind=dbl_kind), dimension(nx_block, ny_block, len_3dim, & len_4dim, max_blocks), intent(in) :: data @@ -1845,11 +1901,13 @@ subroutine put_4d_with_blocks(ncid, var_name, len_3dim, len_4dim, data) integer :: varid integer :: iblk integer :: ilo, jlo, ihi, jhi, gilo, gjlo, gihi, gjhi - integer, dimension(4) :: start, count + integer, dimension(5) :: start, count type(block) :: the_block call check(nf90_inq_varid(ncid, var_name, varid), & 'inq varid for '//var_name) + call check(nf90_var_par_access(ncid, varid, NF90_COLLECTIVE), & + 'parallel access '//var_name) do iblk=1, nblocks the_block = get_block(blocks_ice(iblk), iblk) @@ -1863,8 +1921,8 @@ subroutine put_4d_with_blocks(ncid, var_name, len_3dim, len_4dim, data) gihi = the_block%i_glob(ihi) gjhi = the_block%j_glob(jhi) - start = (/ gilo, gjlo, 1, 1 /) - count = (/ gihi - gilo + 1, gjhi - gjlo + 1, len_3dim, len_4dim /) + start = (/ gilo, gjlo, 1, 1 , i_time/) + count = (/ gihi - gilo + 1, gjhi - gjlo + 1, len_3dim, len_4dim, 1 /) call check(nf90_put_var(ncid, varid, & real(data(ilo:ihi, jlo:jhi, 1:len_3dim, 1:len_4dim, iblk)), & start=start, count=count), & diff --git a/source/ice_calendar.F90 b/source/ice_calendar.F90 index e0c01fbe..fdeb7f58 100755 --- a/source/ice_calendar.F90 +++ b/source/ice_calendar.F90 @@ -111,8 +111,9 @@ module ice_calendar write_history(max_nstrm) ! write history now character (len=1), public :: & - histfreq(max_nstrm), & ! history output frequency, 'y','m','d','h','1' - dumpfreq ! restart frequency, 'y','m','d' + histfreq(max_nstrm), & ! history output frequency, 'y','m','d','h','1' + hist_file_freq(max_nstrm), & ! history output file save frequency, 'y','m','d','h','1' + dumpfreq ! restart frequency, 'y','m','d' character (len=char_len),public :: calendar_type diff --git a/source/ice_history_shared.F90 b/source/ice_history_shared.F90 index 27ecad12..9773aeb2 100755 --- a/source/ice_history_shared.F90 +++ b/source/ice_history_shared.F90 @@ -25,7 +25,6 @@ module ice_history_shared use ice_kinds_mod - use ice_fileunits, only: nu_diag use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, max_nstrm implicit none @@ -594,22 +593,27 @@ module ice_history_shared !======================================================================= - subroutine construct_filename(ncfile,suffix,ns) + subroutine construct_filename(ncfile,suffix,ns,time_string) + + ! construct filenames for history output + ! we follow cosima convention: + ! e.g. ice-1daily-mean_0001-01.nc for daily data in january 0001 use ice_calendar, only: time, sec, nyr, month, daymo, & - mday, write_ic, histfreq, histfreq_n, & + mday, write_ic, histfreq, hist_file_freq, histfreq_n, & year_init, new_year, new_month, new_day, & dt use ice_restart_shared, only: lenstr character (char_len_long), intent(inout) :: ncfile + character (char_len), intent(out), optional :: time_string + character (char_len) :: ldate_string character (len=2), intent(in) :: suffix integer (kind=int_kind), intent(in) :: ns integer (kind=int_kind) :: iyear, imonth, iday, isec - character (len=1) :: cstream - iyear = nyr + year_init - 1 ! set year_init=1 in ice_in to get iyear=nyr + iyear = nyr + year_init - 1 ! set year_init=1 in ice_in to get iyear=nyr imonth = month iday = mday isec = sec - dt @@ -619,9 +623,9 @@ subroutine construct_filename(ncfile,suffix,ns) #endif ! construct filename if (write_ic) then - write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & - incond_file(1:lenstr(incond_file)),'.',iyear,'-', & - imonth,'-',iday,'-',isec,'.',suffix + ncfile=incond_file(1:lenstr(incond_file)) + write(ldate_string,'(i4.4,a,i2.2,a,i2.2,a,i5.5)') & + iyear,'-',imonth,'-',iday,'-',sec else if (hist_avg .and. histfreq(ns) /= '1') then @@ -639,42 +643,56 @@ subroutine construct_filename(ncfile,suffix,ns) endif endif - cstream = '' - if (ns > 10) write(cstream,'(i1.1)') ns-1 -! ABK: Disable the addition of a stream number to the history file -! (now only occurs for more than 10 streams, which is v unlikely!) - - if (histfreq(ns) == '1') then ! instantaneous, write every dt - write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & - history_file(1:lenstr(history_file))//trim(cstream),'_inst.', & - iyear,'-',imonth,'-',iday,'-',sec,'.',suffix - - elseif (hist_avg) then ! write averaged data - - if (histfreq(ns) == 'd'.or.histfreq(ns) == 'D') then ! daily - write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,a)') & - history_file(1:lenstr(history_file))//trim(cstream), & - '.',iyear,'-',imonth,'-',iday,'.',suffix - elseif (histfreq(ns) == 'h'.or.histfreq(ns) == 'H') then ! hourly - write(ncfile,'(a,a,i2.2,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & - history_file(1:lenstr(history_file))//trim(cstream),'_', & - histfreq_n(ns),'h.',iyear,'-',imonth,'-',iday,'-',sec,'.',suffix - - elseif (histfreq(ns) == 'm'.or.histfreq(ns) == 'M') then ! monthly - write(ncfile,'(a,a,i4.4,a,i2.2,a,a)') & - history_file(1:lenstr(history_file))//trim(cstream),'.', & - iyear,'-',imonth,'.',suffix - elseif (histfreq(ns) == 'y'.or.histfreq(ns) == 'Y') then ! yearly - write(ncfile,'(a,a,i4.4,a,a)') & - history_file(1:lenstr(history_file))//trim(cstream),'.', & - iyear,'.',suffix - endif + ncfile=history_file(1:lenstr(history_file)) + + ! frequency of history ouput (typically 1) + if (histfreq_n(ns)>9) then + write(ldate_string,'(i2)') histfreq_n(ns) + else + write(ldate_string,'(i1)') histfreq_n(ns) + endif + + ncfile=ncfile(1:lenstr(ncfile))//'-'//trim(ldate_string) + + ! name file based on history frequency (e.g. "daily-mean") + if (histfreq(ns) == 'd'.or.histfreq(ns) == 'D') then ! daily + ncfile=ncfile(1:lenstr(ncfile))//'daily' + elseif (histfreq(ns) == 'h'.or.histfreq(ns) == 'H') then ! hourly + ncfile=ncfile(1:lenstr(ncfile))//'hourly' + elseif (histfreq(ns) == 'm'.or.histfreq(ns) == 'M') then ! monthly + ncfile=ncfile(1:lenstr(ncfile))//'monthly' + elseif (histfreq(ns) == 'y'.or.histfreq(ns) == 'Y') then ! yearly + ncfile=ncfile(1:lenstr(ncfile))//'yearly' + endif - else ! instantaneous with histfreq > dt - write(ncfile,'(a,a,i4.4,a,i2.2,a,i2.2,a,i5.5,a,a)') & - history_file(1:lenstr(history_file)),'_inst.', & - iyear,'-',imonth,'-',iday,'-',sec,'.',suffix + if (hist_avg) then + ncfile=ncfile(1:lenstr(ncfile))//'-mean' + else + ncfile=ncfile(1:lenstr(ncfile))//'-snap' endif + + ! date in filename is based on history file output frequency (e.g. "0001-01" for one file per month) + if (hist_file_freq(ns) == 'd'.or.hist_file_freq(ns) == 'D') then ! daily + write(ldate_string,'(i4.4,a,i2.2,a,i2.2)') iyear,'-',imonth,'-',iday + elseif (hist_file_freq(ns) == 'h'.or.hist_file_freq(ns) == 'H') then ! hourly + write(ldate_string,'(i4.4,a,i2.2,a,i2.2,a,i5.5)') iyear,'-',imonth,'-',iday,'-',sec + elseif (hist_file_freq(ns) == 'm'.or.hist_file_freq(ns) == 'M') then ! monthly + write(ldate_string,'(i4.4,a,i2.2)') iyear,'-',imonth + elseif (hist_file_freq(ns) == 'y'.or.hist_file_freq(ns) == 'Y') then ! yearly + write(ldate_string,'(i4.4)') iyear + else !instantaneous + write(ldate_string,'(i4.4,a,i2.2,a,i2.2,a,i5.5)') iyear,'-',imonth,'-',iday,'-',sec + endif + + endif + + ! join the pieces, typically iceh-1daily-mean_0001-01.nc + ncfile=ncfile(1:lenstr(ncfile))//'_'//ldate_string(1:lenstr(ldate_string))//'.'//suffix + + ! create a string of current time for debugging + if ( present(time_string) ) then + write(time_string,'(i4.4,a,i2.2,a,i2.2,a,i5.5)') & + iyear,'-',imonth,'-',iday,'-',sec endif end subroutine construct_filename diff --git a/source/ice_init.F90 b/source/ice_init.F90 index 1ed2f647..bf35fadd 100755 --- a/source/ice_init.F90 +++ b/source/ice_init.F90 @@ -51,7 +51,7 @@ subroutine input_data use ice_calendar, only: year_init, istep0, histfreq, histfreq_n, & dumpfreq, dumpfreq_n, diagfreq, nstreams, & npt, dt, ndtd, days_per_year, use_leap_years, & - write_ic, dump_last + write_ic, dump_last, hist_file_freq use ice_restart_shared, only: & restart, restart_ext, restart_dir, restart_file, pointer_file, & runid, runtype, use_restart_time, restart_format @@ -134,6 +134,7 @@ subroutine input_data diagfreq, diag_type, diag_file, & print_global, print_points, latpnt, lonpnt, & dbug, histfreq, histfreq_n, hist_avg, & + hist_file_freq, & history_dir, history_file, history_deflate_level, & history_parallel_io, history_chunksize_x, history_chunksize_y, & write_ic, incond_dir, incond_file @@ -222,6 +223,7 @@ subroutine input_data histfreq(4) = 'm' ! output frequency option for different streams histfreq(5) = 'y' ! output frequency option for different streams histfreq_n(:) = 1 ! output frequency + hist_file_freq(:) = 'x' ! default to histfreq (below) hist_avg = .true. ! if true, write time-averages (not snapshots) history_dir = './' ! write to executable dir for default history_file = 'iceh' ! history file name prefix @@ -731,6 +733,13 @@ subroutine input_data endif #endif + !if hist_file_freq not set, default to histfreq + if (my_task == master_task) then + do n = 1, max_nstrm + if (hist_file_freq(n)=='x' .or. hist_file_freq(n) == 'X') hist_file_freq(n) = histfreq(n) + enddo + endif + call broadcast_scalar(days_per_year, master_task) call broadcast_scalar(use_leap_years, master_task) call broadcast_scalar(year_init, master_task) @@ -745,7 +754,10 @@ subroutine input_data call broadcast_scalar(diag_file, master_task) do n = 1, max_nstrm call broadcast_scalar(histfreq(n), master_task) - enddo + enddo + do n = 1, max_nstrm + call broadcast_scalar(hist_file_freq(n), master_task) + enddo call broadcast_array(histfreq_n, master_task) call broadcast_scalar(hist_avg, master_task) call broadcast_scalar(history_dir, master_task) @@ -916,6 +928,7 @@ subroutine input_data write(nu_diag,1010) ' print_points = ', print_points write(nu_diag,1010) ' bfbflag = ', bfbflag write(nu_diag,1050) ' histfreq = ', histfreq(:) + write(nu_diag,1050) ' hist_file_freq = ', hist_file_freq(:) write(nu_diag,1040) ' histfreq_n = ', histfreq_n(:) write(nu_diag,1010) ' hist_avg = ', hist_avg if (.not. hist_avg) write (nu_diag,*) 'History data will be snapshots' @@ -923,8 +936,10 @@ subroutine input_data trim(history_dir) write(nu_diag,*) ' history_file = ', & trim(history_file) - write(nu_diag,*) ' history_deflate_level = ', & + write(nu_diag,1020) ' history_deflate_level = ', & history_deflate_level + write(nu_diag,1010) ' history_parallel_io = ', & + history_parallel_io if (write_ic) then write (nu_diag,*) 'Initial condition will be written in ', & trim(incond_dir) From 16e26c8585eca4cb2ba97eb6462ce3b803a8b46c Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Thu, 25 Sep 2025 15:53:24 +1000 Subject: [PATCH 41/52] Move aicenmin to namelist (#64) For access-cm2 with multilayer coupling, default is 1e-5, otherwise default to puny --- drivers/access/ice_constants.F90 | 4 +--- source/ice_init.F90 | 22 ++++++++++++++++++---- source/ice_itd.F90 | 9 ++------- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/drivers/access/ice_constants.F90 b/drivers/access/ice_constants.F90 index 04bfc4b2..cefaea10 100644 --- a/drivers/access/ice_constants.F90 +++ b/drivers/access/ice_constants.F90 @@ -85,12 +85,10 @@ module ice_constants ! kseaice is used only for zero-layer thermo kseaice= 2.00_dbl_kind ,&! thermal conductivity of sea ice (W/m/deg) ! (used in zero layer thermodynamics option) - zref = 10._dbl_kind ,&! reference height for stability (m) #ifndef AusCOM snowpatch = 0.02_dbl_kind, & ! parameter for fractional snow area (m) #endif - ! multilayers with the UM coupling - aicenmin_ml = 0.00001_dbl_kind! AEW: min aice we want to allow when using + zref = 10._dbl_kind ! reference height for stability (m) #ifndef AusCOM real (kind=dbl_kind), parameter, public :: & !!! dragio = 0.00536_dbl_kind ,&! ice-ocn drag coefficient diff --git a/source/ice_init.F90 b/source/ice_init.F90 index bf35fadd..619884ab 100755 --- a/source/ice_init.F90 +++ b/source/ice_init.F90 @@ -60,7 +60,7 @@ subroutine input_data history_chunksize_x, history_chunksize_y, & incond_dir, incond_file use ice_exit, only: abort_ice - use ice_itd, only: kitd, kcatbound + use ice_itd, only: kitd, kcatbound, aicenmin use ice_ocean, only: oceanmixed_ice, tfrz_option use ice_firstyear, only: restart_FY use ice_flux, only: update_ocn_f, l_mpond_fresh @@ -146,9 +146,8 @@ subroutine input_data namelist /thermo_nml/ & kitd, ktherm, conduct, & a_rapid_mode, Rac_rapid_mode, aspect_rapid_mode, & -!ars599: 24092014 (CODE: petteri) #ifdef AusCOM - chio, ice_ref_salinity, ksno, & + chio, ice_ref_salinity, ksno, aicenmin, & #endif saltmax, dSdt_slow_mode, phi_c_slow_mode, phi_i_mushy @@ -330,6 +329,9 @@ subroutine input_data ice_ref_salinity = 5._dbl_kind ! (ppt) ksno = 0.30_dbl_kind ! thermal conductivity of snow (W/m/deg) ! (use 0.2 for cm2) + aicenmin = 99 ! maximum ice concentration to zap + ! we set a sensible default after namelist read + #endif atmbndy = 'default' ! or 'constant' @@ -441,7 +443,16 @@ subroutine input_data call abort_ice('ice: error reading namelist') endif call release_fileunit(nu_nml) - + +#ifdef ACCESS + if (ktherm == 1 .and. aicenmin == 99) then + !Set a higher value + ! of aicenmin if we're using multilayers with UM-style coupling for stability. + aicenmin = 0.00001_dbl_kind + endif +#endif + if (aicenmin == 99) aicenmin = puny + !----------------------------------------------------------------- ! set up diagnostics output and resolve conflicts !----------------------------------------------------------------- @@ -838,6 +849,7 @@ subroutine input_data call broadcast_scalar(chio, master_task) call broadcast_scalar(ice_ref_salinity, master_task) call broadcast_scalar(ksno, master_task) + call broadcast_scalar(aicenmin, master_task) call broadcast_scalar(Tocnfrz, master_task) #endif call broadcast_scalar(atmbndy, master_task) @@ -1083,6 +1095,7 @@ subroutine input_data write(nu_diag,1005) ' chio = ', chio write(nu_diag,1005) ' ice_ref_salinity = ', ice_ref_salinity write(nu_diag,1005) ' ksno = ', ksno + write(nu_diag,1006) ' aicenmin = ', aicenmin #endif write(nu_diag,1005) ' ustar_min = ', ustar_min write(nu_diag, *) ' fbot_xfer_type = ', & @@ -1220,6 +1233,7 @@ subroutine input_data 1000 format (a30,2x,f9.2) ! a30 to align formatted, unformatted statements 1005 format (a30,2x,f9.6) ! float + 1006 format (a30,2x,f16.12)! double 1010 format (a30,2x,l6) ! logical 1020 format (a30,2x,i6) ! integer 1030 format (a30, a8) ! character diff --git a/source/ice_itd.F90 b/source/ice_itd.F90 index 5d561029..7cda2249 100755 --- a/source/ice_itd.F90 +++ b/source/ice_itd.F90 @@ -53,7 +53,7 @@ module ice_itd real (kind=dbl_kind), public :: & aicenmin ! AEW: This variable will replace puny as the min ice conc to ! allow when zap_small_areas is called. Set equal to puny - ! if zerolayers or standard coupling, set equal to aicenmin_ml + ! if zerolayers or standard coupling, set equal to 1e-5 ! if multilayers AND UM-style coupling real (kind=dbl_kind), public :: & @@ -230,20 +230,15 @@ subroutine init_itd (calc_Tsfc, heat_capacity) ! Alex West: added these two ar endif ! kcatbound ! AEW: (based on Alison McLaren's vn4 modifications) Set a higher value - ! of aicenmin if we're using multilayers with UM-style coupling. + ! of aicenmin in ice_init if we're using multilayers with UM-style coupling. ! Also allow higher values of hi_min, hs_min to be set (this is a ! bit ad-hoc). - ! Set aicenmin - this may want to be done via a namelist in future !----------------------------------------------------------------- if (heat_capacity) then ! Set higher values to help with stability - aicenmin = aicenmin_ml ! 1.e-5. Changed from 1.e-2 hi_min = p2 ! 0.2m hs_min = p1 ! 0.1m - else - ! aicenmin = puny ! Standard CICE setting - aicenmin = 2.0e-4_dbl_kind ! Same as setting in UM7.3 for ESM1.6 endif if (my_task == master_task) then From 681a2c6a9e2e0a194f0c3a538f717e36f3b18b5a Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Thu, 13 Nov 2025 11:23:13 +1100 Subject: [PATCH 42/52] Update to CMIP7 diagnostics (#35) * port CICE6 history features for CMIP variables: avg_ice_present, mask_ice_free_points * add some error handling for variables not available in the ACCESS coupling * add comment metadata for area and time weighting of variables * use variable descriptions from data request, and set cell_methods * add variables which are high priority, available and not previously included * for snow variables, add an _intensive version, see https://airtable.com/appGoMZhCpkGESoVk/tblpo5L8maBIGlM1B/viwNNzrqK5oPL7zk2?blocks=hide This change is based on the CMIP7 data request v1.2.2.2 - https://airtable.com/appGoMZhCpkGESoVk/tblpo5L8maBIGlM1B/viwNNzrqK5oPL7zk2?blocks=hide Co-authored-by: Spencer Wong <88933912+blimlim@users.noreply.github.com> Co-authored-by: SIobhan O'Farrell --- drivers/access/CICE_RunMod.F90 | 62 +- drivers/access/cpl_forcing_handler.F90 | 4 +- drivers/access/cpl_interface.F90 | 53 +- io_netcdf/ice_history_write.F90 | 57 +- source/ice_fileunits.F90 | 50 +- source/ice_flux.F90 | 51 +- source/ice_history.F90 | 2462 ++++++++++-------------- source/ice_history_shared.F90 | 115 +- source/ice_read_write.F90 | 27 + source/ice_step_mod.F90 | 27 +- source/ice_therm_shared.F90 | 3 +- source/ice_therm_vertical.F90 | 85 +- 12 files changed, 1374 insertions(+), 1622 deletions(-) diff --git a/drivers/access/CICE_RunMod.F90 b/drivers/access/CICE_RunMod.F90 index adf71938..96f6afdd 100644 --- a/drivers/access/CICE_RunMod.F90 +++ b/drivers/access/CICE_RunMod.F90 @@ -62,14 +62,13 @@ subroutine CICE_Run use ice_timers, only: ice_timer_start, & ice_timer_stop, timer_couple, timer_step, & timer_from_atm, timer_into_atm, timer_from_ocn, timer_into_ocn - use ice_grid, only: t2ugrid_vector, u2tgrid_vector + use ice_grid, only: t2ugrid_vector integer (kind=int_kind) :: time_sec, itap, icpl_ai, tmp_time integer (kind=int_kind) :: rtimestamp_ai, stimestamp_ai integer (kind=int_kind) :: rtimestamp_io, stimestamp_io !receive and send timestamps (seconds) integer (kind=int_kind) :: imon - logical :: write_tmp_dump = .true. #endif !-------------------------------------------------------------------- @@ -83,9 +82,12 @@ subroutine CICE_Run !-------------------------------------------------------------------- #ifdef ACCESS - write(il_out,*)'A <==> I coupling num_cpl_ai = ',num_cpl_ai - write(il_out,*)' ice steps per ai interval num_ice_ai = ',num_ice_ai - write(il_out,*)' runtime, runtime0 = ',runtime, runtime0 + + if (my_task == master_task) then + write(il_out,*)'A <==> I coupling num_cpl_ai = ',num_cpl_ai + write(il_out,*)' ice steps per ai interval num_ice_ai = ',num_ice_ai + write(il_out,*)' runtime, runtime0 = ',runtime, runtime0 + endif time_sec = 0 @@ -94,7 +96,7 @@ subroutine CICE_Run !receive a2i fields rtimestamp_ai = time_sec !call ice_timer_start(timer_from_atm) ! atm/ice coupling - write(il_out,*)' calling from_atm at icpl_ai, time_sec = ', icpl_ai, time_sec + ! write(il_out,*)' calling from_atm at icpl_ai, time_sec = ', icpl_ai, time_sec !=========================== call from_atm(rtimestamp_ai) !=========================== @@ -116,8 +118,8 @@ subroutine CICE_Run call t2ugrid_vector(io_strsu) call t2ugrid_vector(io_strsv) - write(il_out,'(a,3i10)') & - ' calling into_ocn at icpl_ai, itap, time_sec = ', icpl_ai, itap, time_sec + ! write(il_out,'(a,3i10)') & + ! ' calling into_ocn at icpl_ai, itap, time_sec = ', icpl_ai, itap, time_sec !call ice_timer_start(timer_into_ocn) ! atm/ocn coupling !=========================== !call check_iceberg_fields('chk_iceberg_i2o.nc') @@ -161,8 +163,8 @@ subroutine CICE_Run stimestamp_ai = time_sec - write(il_out,'(a,3i10)') & - ' calling into_atm at icpl_ai, itap, time_sec = ',icpl_ai, itap, time_sec + ! write(il_out,'(a,3i10)') & + ! ' calling into_atm at icpl_ai, itap, time_sec = ',icpl_ai, itap, time_sec !=========================== call into_atm(stimestamp_ai) !=========================== @@ -292,6 +294,10 @@ subroutine ice_step use ice_algae, only: bgc_diags, write_restart_bgc use ice_zbgc, only: init_history_bgc, biogeochemistry use ice_zbgc_shared, only: skl_bgc +#ifdef ACCESS + use ice_state, only: vsno, aice, tr_pond + use ice_flux, only: snowfrac +#endif integer (kind=int_kind) :: & iblk , & ! block index @@ -428,20 +434,12 @@ subroutine coupling_prep (iblk) use ice_constants, only: c0, c1, puny, rhofresh use ice_coupling, only: top_layer_Tandk_run, sfcflux_to_ocn use ice_domain_size, only: ncat - use ice_flux, only: alvdf, alidf, alvdr, alidr, albice, albsno, & - albpnd, albcnt, apeff_ai, coszen, fpond, fresh, & - alvdf_ai, alidf_ai, alvdr_ai, alidr_ai, fhocn_ai, & - fresh_ai, fsalt_ai, fsalt, & - fswthru_ai, fhocn, fswthru, scale_factor, & - swvdr, swidr, swvdf, swidf, Tf, Tair, Qa, strairxT, strairyt, & - fsens, flat, fswabs, flwout, evap, Tref, Qref, faero_ocn, & - fsurfn_f, flatn_f, scale_fluxes, frzmlt_init, frzmlt, & - snowfrac, snowfracn, evap_ice, evap_snow + use ice_flux use ice_grid, only: tmask use ice_ocean, only: oceanmixed_ice, ocean_mixed_layer use ice_shortwave, only: alvdfn, alidfn, alvdrn, alidrn, & albicen, albsnon, albpndn, apeffn - use ice_state, only: aicen, aice, aice_init, nbtrcr + use ice_state, only: aicen, aice, aice_init, nbtrcr, tr_pond, vsno use ice_therm_shared, only: calc_Tsfc, heat_capacity use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop use ice_zbgc_shared, only: flux_bio, flux_bio_ai @@ -527,8 +525,19 @@ subroutine coupling_prep (iblk) apeff_ai(i,j,iblk) = apeff_ai(i,j,iblk) & ! for history + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) - snowfrac(i,j,iblk) = snowfrac(i,j,iblk) & ! for history - + snowfracn(i,j,n,iblk)*aicen(i,j,n,iblk) + + if ( .not. tr_pond .and. .not. calc_Tsfc ) then + ! calculate a snowfrac diagnostic in the same way the UM does + ! set snow fraction using JULES empirical formula based + ! on snow volume + ! ref: https://github.com/ACCESS-NRI/UM7/blob/6602dadd15c190ee37c6644190f52d428bc66917/umbase_hg3/src/atmosphere/short_wave_radiation/ftsa.F90#L201-L202 + if (aice(i,j,iblk) > 2e-4) & + snowfrac(i,j,iblk) = c1 - exp(-p2*rhos*(vsno(i,j,iblk) / aice(i,j,iblk))) + else + snowfrac(i,j,iblk) = snowfrac(i,j,iblk) & ! for history + + snowfracn(i,j,n,iblk)*aicen(i,j,n,iblk) + endif + enddo enddo enddo @@ -555,6 +564,15 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) + fsens_ai (i,j,iblk) = fsens(i,j,iblk) + flat_ai (i,j,iblk) = flat(i,j,iblk) + fswabs_ai (i,j,iblk) = fswabs(i,j,iblk) + flwout_ai (i,j,iblk) = flwout(i,j,iblk) + evap_ai (i,j,iblk) = evap(i,j,iblk) + evap_ice_ai(i,j,iblk) = evap_ice(i,j,iblk) + evap_snow_ai(i,j,iblk) = evap_snow(i,j,iblk) + fcondtop_ai(i,j,iblk) = fcondtop(i,j,iblk) + fsurf_ai(i,j,iblk) = fsurf(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr diff --git a/drivers/access/cpl_forcing_handler.F90 b/drivers/access/cpl_forcing_handler.F90 index 62a755c0..85660c6d 100644 --- a/drivers/access/cpl_forcing_handler.F90 +++ b/drivers/access/cpl_forcing_handler.F90 @@ -424,9 +424,9 @@ subroutine get_lice_discharge(fname) case (4); myvar = 'FICEBERG_GC3_AVE' end select write(il_out,*)'(get_lice_discharge), iceberg = ', iceberg - write(il_out,'(a,a)') '(get_lice_discharge) reading in iceberg data, myvar= ',trim(myvar) + ! write(il_out,'(a,a)') '(get_lice_discharge) reading in iceberg data, myvar= ',trim(myvar) do im = 1, 12 - write(il_out,*) '(get_lice_discharge) reading in data, month= ',im + ! write(il_out,*) '(get_lice_discharge) reading in data, month= ',im call ice_read_nc(ncid_i2o, im, trim(myvar), vwork, dbug) ! Restrict iceberg fluxes to ocean points diff --git a/drivers/access/cpl_interface.F90 b/drivers/access/cpl_interface.F90 index 34e324b5..0f2052a4 100644 --- a/drivers/access/cpl_interface.F90 +++ b/drivers/access/cpl_interface.F90 @@ -92,24 +92,23 @@ subroutine prism_init ! Initialise MPI mpiflag = .FALSE. call MPI_Initialized (mpiflag, ierror) - print *,'CICE: (prism_init) BF MPI_INIT, mpiflag = ',mpiflag + ! print *,'CICE: (prism_init) BF MPI_INIT, mpiflag = ',mpiflag if ( .not. mpiflag ) then call MPI_INIT(ierror) endif call MPI_Initialized (mpiflag, ierror) - print *, 'CICE: (prism_init) AF MPI_INIT, mpiflag = ',mpiflag + ! print *, 'CICE: (prism_init) AF MPI_INIT, mpiflag = ',mpiflag - print * - print *, 'CICE: (prism_init) calling prism_init_comp_proto...' + ! print *, 'CICE: (prism_init) calling prism_init_comp_proto...' call prism_init_comp_proto (il_comp_id, cp_modnam, ierror) if (ierror /= PRISM_Ok) then call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 1') - else - print *, 'CICE: (prism_init) called prism_init_comp_proto !' + ! else + ! if ( my_task == master_task ) print *, 'CICE: (prism_init) called prism_init_comp_proto !' endif !B: the following part may not be really needed(?) @@ -132,8 +131,8 @@ subroutine prism_init if (ierror /= PRISM_Ok) then print *, 'CICE: (prism_init) Error in MPI_Buffer_Attach.' call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 2') - else - print *, 'CICE: (prism_init) MPI_Buffer_Attach ok!' + ! else + ! print *, 'CICE: (prism_init) MPI_Buffer_Attach ok!' endif ! ! PSMILe attribution of local communicator. @@ -147,7 +146,7 @@ subroutine prism_init print *, 'CICE: Error in prism_get_localcomm_proto' call prism_abort_proto(il_comp_id, 'cice prism_init','STOP 3') else - print *, 'CICE: _get_localcomm_ OK! il_commlocal= ',il_commlocal + if ( my_task == master_task ) print *, 'CICE: _get_localcomm_ OK! il_commlocal= ',il_commlocal endif ! @@ -155,13 +154,13 @@ subroutine prism_init ! ! print *, '* CICE: Entering init_cpl.....' - print *, '* CICE (prism_init) calling MPI_Comm_Size ...' + !print *, '* CICE (prism_init) calling MPI_Comm_Size ...' call MPI_Comm_Size(il_commlocal, il_nbtotproc, ierror) - print *, '* CICE (prism_init) calling MPI_Comm_Rank ...' + !print *, '* CICE (prism_init) calling MPI_Comm_Rank ...' call MPI_Comm_Rank(il_commlocal, my_task, ierror) - print *, '* CICE (prism_init) il_commlocal, il_nbtotproc, my_task = ' - print *, '* CICE (prism_init) ', il_commlocal, il_nbtotproc, my_task + if ( my_task == master_task ) print *, '* CICE (prism_init) il_commlocal, il_nbtotproc, my_task = ' + if ( my_task == master_task ) print *, '* CICE (prism_init) ', il_commlocal, il_nbtotproc, my_task ! il_nbcplproc = il_nbtotproc !multi-process coupling (real parallel cpl)! !il_nbcplproc = 1 !mono process coupling @@ -258,18 +257,18 @@ subroutine init_cpl ! end do !!debug - if (my_task == 0) then - write(il_out,*) "all block info:" - do iblk=1,nblocks_tot - this_block = get_block(iblk,iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - write(il_out,*) ' this block: cpu, iblock, jblock=', distrb_info%blockLocation(iblk)-1, this_block%iblock, this_block%jblock - write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) - end do - end if + ! if (my_task == 0) then + ! write(il_out,*) "all block info:" + ! do iblk=1,nblocks_tot + ! this_block = get_block(iblk,iblk) + ! ilo = this_block%ilo + ! ihi = this_block%ihi + ! jlo = this_block%jlo + ! jhi = this_block%jhi + ! write(il_out,*) ' this block: cpu, iblock, jblock=', distrb_info%blockLocation(iblk)-1, this_block%iblock, this_block%jblock + ! write(il_out,*) ' block:', iblk, "gilo, gjlo, gihi, gjhi=", this_block%i_glob(ilo), this_block%j_glob(jlo), this_block%i_glob(ihi), this_block%j_glob(jhi) + ! end do + ! end if do iblk=1,nblocks_tot @@ -781,8 +780,8 @@ subroutine from_atm(isteps) call ncheck( nf_open('fields_a2i_in_ice.nc',nf_write,ncid) ) call write_nc_1Dtime(real(isteps),currstep,'time',ncid) endif - write(il_out,*) - write(il_out,*) '(from_atm) Total number of fields to be rcvd: ', nrecv_a2i + ! write(il_out,*) + ! write(il_out,*) '(from_atm) Total number of fields to be rcvd: ', nrecv_a2i endif if (debug) write(il_out,*) "prism_get from_atm at sec: ", isteps diff --git a/io_netcdf/ice_history_write.F90 b/io_netcdf/ice_history_write.F90 index 75014d23..4d75b228 100644 --- a/io_netcdf/ice_history_write.F90 +++ b/io_netcdf/ice_history_write.F90 @@ -668,6 +668,11 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) call check(nf90_put_att(ncid,varid,'cell_measures', & avail_hist_fields(n)%vcellmeas), & 'put att cell_measures '//avail_hist_fields(n)%vname) + if (avail_hist_fields(n)%vcomment /= "none") then + call check(nf90_put_att(ncid,varid,'comment', & + avail_hist_fields(n)%vcomment), & + 'put att comment '//avail_hist_fields(n)%vname) + endif call check(nf90_put_att(ncid,varid,'missing_value',spval), & 'put att missing_value '//avail_hist_fields(n)%vname) call check(nf90_put_att(ncid,varid,'_FillValue',spval), & @@ -679,9 +684,20 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) if (hist_avg) then if (TRIM(avail_hist_fields(n)%vname)/='sig1' .or. & TRIM(avail_hist_fields(n)%vname)/='sig2') then - - call check(nf90_put_att(ncid,varid,'cell_methods','time: mean'), & + if (avail_hist_fields(n)%avg_ice_present) then + call check(nf90_put_att(ncid,varid,'cell_methods',& + 'area: time: mean where sea_ice (mask=siconc)'), & + 'put att cell methods time mean '//avail_hist_fields(n)%vname) + else + if (TRIM(avail_hist_fields(n)%vname(1:2))/='si') then !native diags + call check(nf90_put_att(ncid,varid,'cell_methods','time: mean'), & + 'put att cell methods time mean '//avail_hist_fields(n)%vname) + else !cmip diags + call check(nf90_put_att(ncid,varid,'cell_methods', & + 'area: mean where sea time: mean'), & 'put att cell methods time mean '//avail_hist_fields(n)%vname) + endif + endif endif endif @@ -739,6 +755,11 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) avail_hist_fields(n)%vcellmeas) if (status /= nf90_noerr) call abort_ice( & 'Error defining cell measures for '//avail_hist_fields(n)%vname) + if (avail_hist_fields(n)%vcomment /= "none") then + call check(nf90_put_att(ncid,varid,'comment', & + avail_hist_fields(n)%vcomment), & + 'put att comment '//avail_hist_fields(n)%vname) + endif status = nf90_put_att(ncid,varid,'missing_value',spval) if (status /= nf90_noerr) call abort_ice( & 'Error defining missing_value for '//avail_hist_fields(n)%vname) @@ -802,6 +823,11 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) avail_hist_fields(n)%vcellmeas) if (status /= nf90_noerr) call abort_ice( & 'Error defining cell measures for '//avail_hist_fields(n)%vname) + if (avail_hist_fields(n)%vcomment /= "none") then + call check(nf90_put_att(ncid,varid,'comment', & + avail_hist_fields(n)%vcomment), & + 'put att comment '//avail_hist_fields(n)%vname) + endif status = nf90_put_att(ncid,varid,'missing_value',spval) if (status /= nf90_noerr) call abort_ice( & 'Error defining missing_value for '//avail_hist_fields(n)%vname) @@ -851,6 +877,11 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) avail_hist_fields(n)%vcellmeas) if (status /= nf90_noerr) call abort_ice( & 'Error defining cell measures for '//avail_hist_fields(n)%vname) + if (avail_hist_fields(n)%vcomment /= "none") then + call check(nf90_put_att(ncid,varid,'comment', & + avail_hist_fields(n)%vcomment), & + 'put att comment '//avail_hist_fields(n)%vname) + endif status = nf90_put_att(ncid,varid,'missing_value',spval) if (status /= nf90_noerr) call abort_ice( & 'Error defining missing_value for '//avail_hist_fields(n)%vname) @@ -901,6 +932,11 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) avail_hist_fields(n)%vcellmeas) if (status /= nf90_noerr) call abort_ice( & 'Error defining cell measures for '//avail_hist_fields(n)%vname) + if (avail_hist_fields(n)%vcomment /= "none") then + call check(nf90_put_att(ncid,varid,'comment', & + avail_hist_fields(n)%vcomment), & + 'put att comment '//avail_hist_fields(n)%vname) + endif status = nf90_put_att(ncid,varid,'missing_value',spval) if (status /= nf90_noerr) call abort_ice( & 'Error defining missing_value for '//avail_hist_fields(n)%vname) @@ -966,6 +1002,11 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) avail_hist_fields(n)%vcellmeas) if (status /= nf90_noerr) call abort_ice( & 'Error defining cell measures for '//avail_hist_fields(n)%vname) + if (avail_hist_fields(n)%vcomment /= "none") then + call check(nf90_put_att(ncid,varid,'comment', & + avail_hist_fields(n)%vcomment), & + 'put att comment '//avail_hist_fields(n)%vname) + endif status = nf90_put_att(ncid,varid,'missing_value',spval) if (status /= nf90_noerr) call abort_ice( & 'Error defining missing_value for '//avail_hist_fields(n)%vname) @@ -1031,6 +1072,11 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) avail_hist_fields(n)%vcellmeas) if (status /= nf90_noerr) call abort_ice( & 'Error defining cell measures for '//avail_hist_fields(n)%vname) + if (avail_hist_fields(n)%vcomment /= "none") then + call check(nf90_put_att(ncid,varid,'comment', & + avail_hist_fields(n)%vcomment), & + 'put att comment '//avail_hist_fields(n)%vname) + endif status = nf90_put_att(ncid,varid,'missing_value',spval) if (status /= nf90_noerr) call abort_ice( & 'Error defining missing_value for '//avail_hist_fields(n)%vname) @@ -1094,9 +1140,10 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) call check(nf90_put_att(ncid,nf90_global,'comment2',title), & 'global attribute comment2') - title = 'CF-1.0' - call check(nf90_put_att(ncid,nf90_global,'conventions',title), & - 'global attribute conventions') + ! TO-DO: Update output for CF compliance ! + ! title = 'CF-1.0' + ! call check(nf90_put_att(ncid,nf90_global,'conventions',title), & + ! 'global attribute conventions') call date_and_time(date=current_date, time=current_time) write(start_time,1000) current_date(1:4), current_date(5:6), & diff --git a/source/ice_fileunits.F90 b/source/ice_fileunits.F90 index aa2be15d..4cdcad73 100755 --- a/source/ice_fileunits.F90 +++ b/source/ice_fileunits.F90 @@ -29,7 +29,7 @@ module ice_fileunits implicit none private public :: init_fileunits, get_fileunit, flush_fileunit, & - release_fileunit, release_all_fileunits + release_fileunit, release_all_fileunits, goto_nml save character (len=char_len), public :: & @@ -292,6 +292,54 @@ subroutine flush_fileunit(iunit) end subroutine flush_fileunit +!======================================================================= +! Namelist error handling ported from https://github.com/CICE-Consortium/CICE/blob/8e3ef7c4cb657705ceff5bfec3e12b49dec4973e/cicecore/shared/ice_fileunits.F90#L328 + subroutine goto_nml(iunit, nml, status) + ! Search to namelist group within ice_in file. + ! for compilers that do not allow optional namelists + + ! passed variables + integer(kind=int_kind), intent(in) :: & + iunit ! namelist file unit + + character(len=*), intent(in) :: & + nml ! namelist to search for + + integer(kind=int_kind), intent(out) :: & + status ! status of subroutine + + ! local variables + character(len=char_len) :: & + file_str, & ! string in file + nml_str ! namelist string to test + + integer(kind=int_kind) :: & + i, n ! dummy integers + + + ! rewind file + rewind(iunit) + + ! define test string with ampersand + nml_str = '&' // trim(adjustl(nml)) + + ! search for the record containing the namelist group we're looking for + do + read(iunit, '(a)', iostat=status) file_str + if (status /= 0) then + exit ! e.g. end of file + else + if (index(adjustl(file_str), nml_str) == 1) then + exit ! i.e. found record we're looking for + end if + end if + end do + + ! backspace to namelist name in file + backspace(iunit) + + end subroutine goto_nml + !======================================================================= end module ice_fileunits diff --git a/source/ice_flux.F90 b/source/ice_flux.F90 index 5f92570b..602b8de6 100755 --- a/source/ice_flux.F90 +++ b/source/ice_flux.F90 @@ -74,10 +74,7 @@ module ice_flux dardg1dt, & ! rate of area loss by ridging ice (1/s) dardg2dt, & ! rate of area gain by new ridges (1/s) dvirdgdt, & ! rate of ice volume ridged (m/s) - opening , & ! rate of opening due to divergence/shear (1/s) - ice_freeboard ! height of ice surface (i.e. not snow surface) - ! above sea level (m) - + opening ! rate of opening due to divergence/shear (1/s) real (kind=dbl_kind), & dimension (nx_block,ny_block,ncat,max_blocks), public :: & @@ -92,10 +89,7 @@ module ice_flux araftn, & ! rafting ice area vraftn, & ! rafting ice volume aredistn, & ! redistribution function: fraction of new ridge area - vredistn , & ! redistribution function: fraction of new ridge volume - ice_freeboardn ! category height of ice surface (i.e. not snow - ! surface) above sea level (m) - + vredistn ! redistribution function: fraction of new ridge volume ! restart @@ -314,12 +308,21 @@ module ice_flux ! ice diagnostics and history files as these are more accurate. ! (The others suffer from problem of incorrect values at grid boxes ! that change from an ice free state to an icy state.) - + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), public :: & fresh_ai, & ! fresh water flux to ocean (kg/m^2/s) fsalt_ai, & ! salt flux to ocean (kg/m^2/s) fhocn_ai, & ! net heat flux to ocean (W/m^2) - fswthru_ai ! shortwave penetrating to ocean (W/m^2) + fswthru_ai, & ! shortwave penetrating to ocean (W/m^2) + fsens_ai, & ! sensible heat flux (W/m^2) + flat_ai, & ! latent heat flux (W/m^2) + fswabs_ai, & ! shortwave absorbed heat flx (W/m^2) + flwout_ai, & ! upwd lw emitted heat flx (W/m^2) + evap_ai, & ! & evaporation (kg/m2/s) + evap_ice_ai, & ! & evaporation (kg/m2/s) + evap_snow_ai, & ! & evaporation (kg/m2/s) + fcondtop_ai, & ! downward cond flux at top surface (W m-2) + fsurf_ai ! net flux to top surface, excluding fcondtop (W/m^2) ! Used with data assimilation in hadgem drivers real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & @@ -475,6 +478,8 @@ subroutine init_coupler_flux flwout (:,:,:) = -stefan_boltzmann*Tffresh**4 ! in case atm model diagnoses Tsfc from flwout evap (:,:,:) = c0 + evap_ice (:,:,:) = c0 + evap_snow (:,:,:) = c0 Tref (:,:,:) = c0 Qref (:,:,:) = c0 Uref (:,:,:) = c0 @@ -535,6 +540,8 @@ subroutine init_flux_atm fswabs (:,:,:) = c0 flwout (:,:,:) = c0 evap (:,:,:) = c0 + evap_ice(:,:,:) = c0 + evap_snow(:,:,:) = c0 Tref (:,:,:) = c0 Qref (:,:,:) = c0 Uref (:,:,:) = c0 @@ -597,7 +604,6 @@ subroutine init_history_therm melts (:,:,:) = c0 meltb (:,:,:) = c0 meltl (:,:,:) = c0 - ice_freeboard (:,:,:) = c0 daidtt (:,:,:) = aice(:,:,:) ! temporary initial area dvidtt (:,:,:) = vice(:,:,:) ! temporary initial volume dvsdtt (:,:,:) = vsno(:,:,:) ! temporary initial volume @@ -616,11 +622,20 @@ subroutine init_history_therm fsalt_ai (:,:,:) = c0 fhocn_ai (:,:,:) = c0 fswthru_ai(:,:,:) = c0 - albice (:,:,:) = c0 - albsno (:,:,:) = c0 - albpnd (:,:,:) = c0 + fsens_ai (:,:,:) = c0 + flat_ai (:,:,:) = c0 + fswabs_ai (:,:,:) = c0 + flwout_ai (:,:,:) = c0 + evap_ai (:,:,:) = c0 + evap_ice_ai(:,:,:) = c0 + evap_snow_ai(:,:,:) = c0 + fcondtop_ai(:,:,:) = c0 + fsurf_ai (:,:,:) = c0 + albice (:,:,:) = c0 + albsno (:,:,:) = c0 + albpnd (:,:,:) = c0 snowfracn (:,:,:,:) = c0 - snowfrac (:,:,:) = c0 + snowfrac (:,:,:) = c0 ! drag coefficients are computed prior to the atmo_boundary call, ! during the thermodynamics section @@ -712,7 +727,6 @@ subroutine merge_fluxes (nx_block, ny_block, & fswabsn, flwoutn, & evapn, & evapn_ice,evapn_snow, & - ice_freeboardn, & Trefn, Qrefn, & freshn, fsaltn, & fhocnn, fswthrun, & @@ -724,7 +738,6 @@ subroutine merge_fluxes (nx_block, ny_block, & fswabs, flwout, & evap, & evap_ice, evap_snow, & - ice_freeboard, & Tref, Qref, & fresh, fsalt, & fhocn, fswthru, & @@ -769,7 +782,6 @@ subroutine merge_fluxes (nx_block, ny_block, & meltsn , & ! snow melt (m) congeln , & ! congelation ice growth (m) snoicen , & ! snow-ice growth (m) - ice_freeboardn , & ! ice freeboard (m) evapn_ice, & ! evaporation over ice only (kg/m2/s) evapn_snow ! evaporation over snow only (kg/m2/s) @@ -803,7 +815,6 @@ subroutine merge_fluxes (nx_block, ny_block, & melts , & ! snow melt (m) congel , & ! congelation ice growth (m) snoice , & ! snow-ice growth (m) - ice_freeboard, & ! ice freeboard evap_ice, & ! evaporation over ice only evap_snow ! evaporation over snow only @@ -846,8 +857,6 @@ subroutine merge_fluxes (nx_block, ny_block, & evap (i,j) = evap (i,j) + evapn (i,j)*aicen(i,j) evap_ice (i,j) = evap_ice(i,j) + evapn_ice(i,j)*aicen(i,j) evap_snow (i,j) = evap_snow(i,j) + evapn_snow(i,j)*aicen(i,j) - ice_freeboard (i,j) = ice_freeboard(i,j) + & - ice_freeboardn(i,j)*aicen(i,j) Tref (i,j) = Tref (i,j) + Trefn (i,j)*aicen(i,j) Qref (i,j) = Qref (i,j) + Qrefn (i,j)*aicen(i,j) if (present(Urefn) .and. present(Uref)) then diff --git a/source/ice_history.F90 b/source/ice_history.F90 index d8d2a4db..ff419e62 100755 --- a/source/ice_history.F90 +++ b/source/ice_history.F90 @@ -32,12 +32,14 @@ module ice_history use ice_kinds_mod + use ice_exit, only: abort_ice + implicit none private public :: init_hist, accum_hist save - + !======================================================================= contains @@ -58,13 +60,12 @@ subroutine init_hist (dt) use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_scalar, broadcast_array use ice_communicate, only: my_task, master_task - use ice_constants, only: c0, c1, c2, c100, mps_to_cmpdy, rhofresh, & - Tffresh, kg_to_g, secday + use ice_constants, only: c0, c1, c2, c100, c1000, mps_to_cmpdy, rhoi, rhos, & + rhow, rhofresh, Tffresh, kg_to_g, secday, ice_ref_salinity use ice_calendar, only: yday, days_per_year, histfreq, & histfreq_n, nstreams use ice_domain_size, only: max_blocks, max_nstrm use ice_dyn_shared, only: kdyn - use ice_exit, only: abort_ice use ice_fileunits, only: nu_nml, nml_filename, nu_diag, & get_fileunit, release_fileunit use ice_flux, only: mlt_onset, frz_onset, albcnt @@ -78,6 +79,11 @@ subroutine init_hist (dt) use ice_state, only: tr_iage, tr_FY, tr_lvl, tr_pond, tr_aero, tr_brine use ice_therm_shared, only: calc_Tsfc, heat_capacity use ice_zbgc_shared, only: skl_bgc + use ice_fileunits, only: goto_nml + +#ifdef AusCOM + use cpl_parameters, only: do_scale_fluxes +#endif real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -88,30 +94,46 @@ subroutine init_hist (dt) integer (kind=int_kind), dimension(max_nstrm) :: & ntmp integer (kind=int_kind) :: nml_error ! namelist i/o error flag + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! text namelist name !----------------------------------------------------------------- ! read namelist !----------------------------------------------------------------- - call get_fileunit(nu_nml) if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + nml_name = 'icefields_nml' + write(nu_diag,*) 'ice: Reading ', trim(nml_name) + + ! open file + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) + if (nml_error /= 0) then + call abort_ice('ice ERROR: '//trim(nml_name)//' open file ') + endif + + ! seek to this namelist + call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 + call abort_ice('ice ERROR: searching for '// trim(nml_name)) endif + + ! read namelist + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice('ice ERROR: ' // trim(nml_name) // ' reading '// & + trim(tmpstr2)) + endif end do - if (nml_error == 0) close(nu_nml) - endif - call release_fileunit(nu_nml) - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - close (nu_nml) - call abort_ice('ice: error reading icefields_nml') + close(nu_nml) + call release_fileunit(nu_nml) endif ! histfreq options ('1','h','d','m','y') @@ -130,7 +152,7 @@ subroutine init_hist (dt) call abort_ice('ice: histfreq contains illegal element') endif enddo - if (nstreams == 0) write (nu_diag,*) 'WARNING: No history output' + if (nstreams == 0 .and. my_task == master_task) write (nu_diag,*) 'WARNING: No history output' do ns1 = 1, nstreams do ns2 = 1, nstreams if (histfreq(ns1) == histfreq(ns2) .and. ns1/=ns2 & @@ -140,55 +162,112 @@ subroutine init_hist (dt) enddo enddo - if (.not. tr_iage) then - f_iage = 'x' - f_dagedtt = 'x' - f_dagedtd = 'x' - endif - if (.not. tr_FY) f_FY = 'x' - - if (kdyn /= 2) then - f_a11 = 'x' - f_a12 = 'x' - f_e11 = 'x' - f_e12 = 'x' - f_e22 = 'x' - f_s11 = 'x' - f_s12 = 'x' - f_s22 = 'x' - f_yieldstress11 = 'x' - f_yieldstress12 = 'x' - f_yieldstress22 = 'x' - endif + if ( my_task == master_task ) then !check history configuration + if (.not. tr_iage) then + !todo: abort here if trying to use these diagnostics and the tracer (and its restart) are not available + f_iage = 'x' + f_siage = 'x' + f_dagedtt = 'x' + f_dagedtd = 'x' + endif + if (.not. tr_FY) f_FY = 'x' + + if (kdyn /= 2) then + f_a11 = 'x' + f_a12 = 'x' + f_e11 = 'x' + f_e12 = 'x' + f_e22 = 'x' + f_s11 = 'x' + f_s12 = 'x' + f_s22 = 'x' + f_yieldstress11 = 'x' + f_yieldstress12 = 'x' + f_yieldstress22 = 'x' + endif - ! these must be output at the same frequency because of - ! cos(zenith angle) averaging - if (f_albice(1:1) /= 'x' .and. f_albsni(1:1) /= 'x') f_albice = f_albsni - if (f_albsno(1:1) /= 'x') f_albsno = f_albice - if (f_albpnd(1:1) /= 'x') f_albpnd = f_albice - if (f_coszen(1:1) /= 'x' .and. f_albice(1:1) /= 'x') f_coszen = f_albice - if (f_coszen(1:1) /= 'x' .and. f_albsni(1:1) /= 'x') f_coszen = f_albsni - - ! to prevent array-out-of-bounds when aggregating - if (f_fmeltt_ai(1:1) /= 'x') f_fmelttn_ai = f_fmeltt_ai - - ! AEW: These are only calculated under certain circumstances - ! (if using multilayers with UM-style coupling) - if (calc_Tsfc .or. .not. heat_capacity) then - f_Tn_top = 'x' - f_keffn_top = 'x' - endif + ! these must be output at the same frequency because of + ! cos(zenith angle) averaging + if (f_albice(1:1) /= 'x' .and. f_albsni(1:1) /= 'x') f_albice = f_albsni + if (f_albsno(1:1) /= 'x') f_albsno = f_albice + if (f_albpnd(1:1) /= 'x') f_albpnd = f_albice + if (f_coszen(1:1) /= 'x' .and. f_albice(1:1) /= 'x') f_coszen = f_albice + if (f_coszen(1:1) /= 'x' .and. f_albsni(1:1) /= 'x') f_coszen = f_albsni + + ! to prevent array-out-of-bounds when aggregating + if (f_fmeltt_ai(1:1) /= 'x') f_fmelttn_ai = f_fmeltt_ai + + ! AEW: These are only calculated under certain circumstances + ! (if using multilayers with UM-style coupling) + if (calc_Tsfc .or. .not. heat_capacity) then + if (f_Tn_top /= 'x') call abort_ice("f_Tn_top not available, set to 'x'") + if (f_keffn_top /= 'x') call abort_ice("f_keffn_top not available, set to 'x' ") + endif + + if ( .not. calc_Tsfc ) then + if (f_Tair /= 'x') call abort_ice ("f_Tair not available with calc_Tsfc = .false., set to 'x'") + if (f_sialb /= 'x') call abort_ice ("f_sialb not available with calc_Tsfc = .false., set to 'x'") + endif + + if ( .not. calc_Tsfc .and. .not. heat_capacity) then ! access-esm1.6 + ! surface temperature is neither coupled or calculated within cice + ! prognostic in the UM only + if (f_Tsfc /= 'x') call abort_ice ("f_Tsfc not available, set to 'x'") + if (f_snowfracn /= 'x') call abort_ice ("f_snowfracn not available, set to 'x'") + if (f_sitemptop /= 'x') call abort_ice ("f_sitemptop not available, set to 'x'") + if (f_sitempsnic /= 'x') call abort_ice ("f_sitempsnic not available, set to 'x'") + endif + + ! rain goes straight to ocean + if ( (.not. tr_pond) .and. f_sipr /= 'x') call abort_ice ("f_sipr not available, set to 'x'") #ifndef ncdf - f_bounds = .false. + f_bounds = .false. +#endif + + ! write dimensions for 3D or 4D history variables + ! note: list of variables checked here is incomplete + if (f_aicen(1:1) /= 'x' .or. f_vicen(1:1) /= 'x' .or. & + f_Tinz (1:1) /= 'x' .or. f_Sinz (1:1) /= 'x') f_NCAT = .true. + if (f_Tinz (1:1) /= 'x' .or. f_Sinz (1:1) /= 'x') f_VGRDi = .true. + if (f_Tsnz (1:1) /= 'x') f_VGRDs = .true. + +#ifdef ACCESS + ! these are not available with UM style coupling + if ( f_siflsenstop /= 'x' ) call abort_ice("f_siflsenstop not available, set to 'x'") + if ( f_sifllwdtop /= 'x' ) call abort_ice("f_sifllwdtop not available, set to 'x'") + if ( f_sifllwutop /= 'x' ) call abort_ice("f_sifllwutop not available, set to 'x'") + if ( f_siflswdtop /= 'x' ) call abort_ice("f_siflswdtop not available, set to 'x'") + if ( f_siflswutop /= 'x' ) call abort_ice("f_siflswutop not available, set to 'x'") + if ( f_sisnconc /= 'x' ) call abort_ice("f_sisnconc not available, set to 'x'") + ! there is a calculation of a sisnconc based on snow volume, but it doesn't represent a process + + if ( f_snowfrac /= 'x' ) call abort_ice("f_snowfrac not available, set to 'x'") + if ( f_sisnthick /= 'x' ) call abort_ice("f_sisnthick not available, set to 'x'") #endif - ! write dimensions for 3D or 4D history variables - ! note: list of variables checked here is incomplete - if (f_aicen(1:1) /= 'x' .or. f_vicen(1:1) /= 'x' .or. & - f_Tinz (1:1) /= 'x' .or. f_Sinz (1:1) /= 'x') f_NCAT = .true. - if (f_Tinz (1:1) /= 'x' .or. f_Sinz (1:1) /= 'x') f_VGRDi = .true. - if (f_Tsnz (1:1) /= 'x') f_VGRDs = .true. +#ifdef AusCOM + if ( .not. do_scale_fluxes ) then + ! normal case is these are scaled in place to ice area average, + ! however without do_scale_fluxes, these are grid cell averages + if ( f_fsens /= 'x' ) call abort_ice("f_fsens not available, use f_fsens_ai") + if ( f_flat /= 'x' ) call abort_ice("f_flat not available, use f_flat_ai") + if ( f_fswabs /= 'x' ) call abort_ice("f_fswabs not available, use f_fswabs_ai") + if ( f_flwup /= 'x' ) call abort_ice("f_flwup not available, use f_flwup_ai") + if ( f_evap /= 'x' ) call abort_ice("f_evap not available, use f_evap_ai") + if ( f_Tref /= 'x' ) call abort_ice("f_Tref not available, set to 'x'") + if ( f_Qref /= 'x' ) call abort_ice("f_Qref not available, set to 'x'") + if ( f_fresh /= 'x' ) call abort_ice("f_fresh not available, use f_fresh_ai") + if ( f_fsalt /= 'x' ) call abort_ice("f_fsalt not available, use f_fsalt_ai") + if ( f_fhocn /= 'x' ) call abort_ice("f_fhocn not available, use f_fhocn_ai") + if ( f_fswthru /= 'x' ) call abort_ice("f_fswthru not available, use f_fswthru_ai") + if ( f_alvdr /= 'x' ) call abort_ice("f_alvdr not available, use f_alvdr_ai") + if ( f_alidr /= 'x' ) call abort_ice("f_alidr not available, use f_alidr_ai") + if ( f_alvdf /= 'x' ) call abort_ice("f_alvdf not available, use f_alvdf_ai") + if ( f_alidf /= 'x' ) call abort_ice("f_alidf not available, use f_alidf_ai") + endif +#endif + endif ! end check history config call broadcast_scalar (f_tmask, master_task) call broadcast_scalar (f_blkmask, master_task) @@ -210,18 +289,20 @@ subroutine init_hist (dt) ! call broadcast_scalar (f_example, master_task) call broadcast_scalar (f_hi, master_task) + call broadcast_scalar (f_sivol, master_task) call broadcast_scalar (f_hs, master_task) call broadcast_scalar (f_snowfrac, master_task) call broadcast_scalar (f_snowfracn, master_task) call broadcast_scalar (f_Tsfc, master_task) call broadcast_scalar (f_aice, master_task) + call broadcast_scalar (f_siconc, master_task) call broadcast_scalar (f_uvel, master_task) call broadcast_scalar (f_vvel, master_task) call broadcast_scalar (f_uatm, master_task) call broadcast_scalar (f_vatm, master_task) call broadcast_scalar (f_sice, master_task) - call broadcast_scalar (f_fswdn, master_task) call broadcast_scalar (f_fswup, master_task) + call broadcast_scalar (f_fswdn, master_task) call broadcast_scalar (f_flwdn, master_task) call broadcast_scalar (f_snow, master_task) call broadcast_scalar (f_snow_ai, master_task) @@ -306,9 +387,12 @@ subroutine init_hist (dt) call broadcast_scalar (f_aisnap, master_task) call broadcast_scalar (f_hisnap, master_task) call broadcast_scalar (f_sithick, master_task) + call broadcast_scalar (f_simass, master_task) call broadcast_scalar (f_siage, master_task) call broadcast_scalar (f_sisnconc, master_task) call broadcast_scalar (f_sisnthick, master_task) + call broadcast_scalar (f_sisnmass, master_task) + call broadcast_scalar (f_sisnmass_intensive, master_task) call broadcast_scalar (f_sitemptop, master_task) call broadcast_scalar (f_sitempsnic, master_task) call broadcast_scalar (f_sitempbot, master_task) @@ -316,17 +400,10 @@ subroutine init_hist (dt) call broadcast_scalar (f_siv, master_task) call broadcast_scalar (f_sidmasstranx, master_task) call broadcast_scalar (f_sidmasstrany, master_task) - call broadcast_scalar (f_sifb, master_task) call broadcast_scalar (f_sistrxdtop, master_task) call broadcast_scalar (f_sistrydtop, master_task) call broadcast_scalar (f_sistrxubot, master_task) call broadcast_scalar (f_sistryubot, master_task) - call broadcast_scalar (f_siforcetiltx, master_task) - call broadcast_scalar (f_siforcetilty, master_task) - call broadcast_scalar (f_siforcecoriolx, master_task) - call broadcast_scalar (f_siforcecorioly, master_task) - call broadcast_scalar (f_siforceintstrx, master_task) - call broadcast_scalar (f_siforceintstry, master_task) call broadcast_scalar (f_sicompstren, master_task) call broadcast_scalar (f_sispeed, master_task) call broadcast_scalar (f_sialb, master_task) @@ -340,12 +417,26 @@ subroutine init_hist (dt) call broadcast_scalar (f_sidmassgrowthwat, master_task) call broadcast_scalar (f_sidmassgrowthbot, master_task) call broadcast_scalar (f_sidmasssi, master_task) + call broadcast_scalar (f_sidmassgrowthsi, master_task) call broadcast_scalar (f_sidmassevapsubl, master_task) + call broadcast_scalar (f_sndmasssubl, master_task) + call broadcast_scalar (f_sisndmasssubl, master_task) + call broadcast_scalar (f_sisndmasssubl_intensive, master_task) call broadcast_scalar (f_sidmassmelttop, master_task) call broadcast_scalar (f_sidmassmeltbot, master_task) call broadcast_scalar (f_sidmasslat, master_task) + call broadcast_scalar (f_sidmassmeltlat, master_task) call broadcast_scalar (f_sndmasssnf, master_task) + call broadcast_scalar (f_sisndmasssnf, master_task) + call broadcast_scalar (f_sisndmasssnf_intensive, master_task) call broadcast_scalar (f_sndmassmelt, master_task) + call broadcast_scalar (f_sisndmassmelt, master_task) + call broadcast_scalar (f_sisndmassmelt_intensive, master_task) + call broadcast_scalar (f_sisndmasssi, master_task) + call broadcast_scalar (f_sisndmasssi_intensive, master_task) + call broadcast_scalar (f_sndmassdyn, master_task) + call broadcast_scalar (f_sisndmassdyn, master_task) + call broadcast_scalar (f_sisndmassdyn_intensive, master_task) call broadcast_scalar (f_siflswdtop, master_task) call broadcast_scalar (f_siflswutop, master_task) call broadcast_scalar (f_siflswdbot, master_task) @@ -353,18 +444,29 @@ subroutine init_hist (dt) call broadcast_scalar (f_sifllwutop, master_task) call broadcast_scalar (f_siflsenstop, master_task) call broadcast_scalar (f_siflsensupbot, master_task) + call broadcast_scalar (f_siflsensbot, master_task) call broadcast_scalar (f_sifllatstop, master_task) call broadcast_scalar (f_siflcondtop, master_task) call broadcast_scalar (f_siflcondbot, master_task) call broadcast_scalar (f_sipr, master_task) + call broadcast_scalar (f_sifb, master_task) call broadcast_scalar (f_siflsaltbot, master_task) call broadcast_scalar (f_siflfwbot, master_task) call broadcast_scalar (f_sisaltmass, master_task) + call broadcast_scalar (f_siflfwdrain, master_task) + call broadcast_scalar (f_siforcetiltx, master_task) + call broadcast_scalar (f_siforcetilty, master_task) + call broadcast_scalar (f_siforcecoriolx, master_task) + call broadcast_scalar (f_siforcecorioly, master_task) + call broadcast_scalar (f_siforceintstrx, master_task) + call broadcast_scalar (f_siforceintstry, master_task) + call broadcast_scalar (f_siitdconc, master_task) call broadcast_scalar (f_aicen, master_task) call broadcast_scalar (f_vicen, master_task) call broadcast_scalar (f_vsnon, master_task) call broadcast_scalar (f_trsig, master_task) call broadcast_scalar (f_icepresent, master_task) + call broadcast_scalar (f_sitimefrac, master_task) call broadcast_scalar (f_fsurf_ai, master_task) call broadcast_scalar (f_fcondtop_ai, master_task) call broadcast_scalar (f_fmeltt_ai, master_task) @@ -416,22 +518,22 @@ subroutine init_hist (dt) "grid cell mean snow thickness", & "snow volume per unit grid cell area", c1, c0, & ns1, f_hs) + call define_hist_field(n_snowfrac,"snowfrac","1",tstr2D, tcstr, & "grid cell mean snow fraction", & "snow fraction per unit grid cell area", c1, c0, & ns1, f_snowfrac) - call define_hist_field(n_Tsfc,"Tsfc","C",tstr2D, tcstr, & "snow/ice surface temperature", & "averaged with Tf if no ice is present", c1, c0, & ns1, f_Tsfc) - + call define_hist_field(n_aice,"aice","1",tstr2D, tcstr, & "ice area (aggregate)", & "none", c1, c0, & ns1, f_aice) - + call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & "ice velocity (x)", & "positive is x direction on U grid", c1, c0, & @@ -572,31 +674,31 @@ subroutine init_hist (dt) " ", c100, c0, & ns1, f_alidr_ai) - call define_hist_field(n_alvdf_ai,"alvdf_ai","%",tstr2D, tcstr, & - "visible diffuse albedo", & - " ", c100, c0, & - ns1, f_alvdf_ai) + call define_hist_field(n_alvdf_ai,"alvdf_ai","%",tstr2D, tcstr, & + "visible diffuse albedo", & + " ", c100, c0, & + ns1, f_alvdf_ai) - call define_hist_field(n_alidf_ai,"alidf_ai","%",tstr2D, tcstr, & - "near IR diffuse albedo", & - " ", c100, c0, & - ns1, f_alidf_ai) + call define_hist_field(n_alidf_ai,"alidf_ai","%",tstr2D, tcstr, & + "near IR diffuse albedo", & + " ", c100, c0, & + ns1, f_alidf_ai) call define_hist_field(n_albice,"albice","%",tstr2D, tcstr, & "bare ice albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albice) - + call define_hist_field(n_albsno,"albsno","%",tstr2D, tcstr, & "snow albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albsno) - + call define_hist_field(n_albpnd,"albpnd","%",tstr2D, tcstr, & "melt pond albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albpnd) - + call define_hist_field(n_coszen,"coszen","radian",tstr2D, tcstr, & "cosine of the zenith angle", & "negative below horizon", c1, c0, & @@ -606,42 +708,42 @@ subroutine init_hist (dt) "latent heat flux (cpl)", & "positive downward", c1, c0, & ns1, f_flat) - + call define_hist_field(n_flat_ai,"flat_ai","W/m^2",tstr2D, tcstr, & "latent heat flux", & "weighted by ice area", c1, c0, & ns1, f_flat_ai) - + call define_hist_field(n_fsens,"fsens","W/m^2",tstr2D, tcstr, & "sensible heat flux (cpl)", & "positive downward", c1, c0, & ns1, f_fsens) - + call define_hist_field(n_fsens_ai,"fsens_ai","W/m^2",tstr2D, tcstr, & "sensible heat flux", & "weighted by ice area", c1, c0, & ns1, f_fsens_ai) - + call define_hist_field(n_flwup,"flwup","W/m^2",tstr2D, tcstr, & "upward longwave flux (cpl)", & "positive downward", c1, c0, & ns1, f_flwup) - + call define_hist_field(n_flwup_ai,"flwup_ai","W/m^2",tstr2D, tcstr, & "upward longwave flux", & "weighted by ice area", c1, c0, & ns1, f_flwup_ai) - + call define_hist_field(n_evap,"evap","cm/day",tstr2D, tcstr, & "evaporative water flux (cpl)", & "none", mps_to_cmpdy/rhofresh, c0, & ns1, f_evap) - + call define_hist_field(n_evap_ai,"evap_ai","cm/day",tstr2D, tcstr, & "evaporative water flux", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_evap_ai) - + call define_hist_field(n_evap_ice_ai,"evap_ice_ai","cm/day",tstr2D, tcstr, & "evaporative water flux over ice only", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & @@ -656,208 +758,208 @@ subroutine init_hist (dt) "air temperature", & "none", c1, -Tffresh, & ns1, f_Tair) - + call define_hist_field(n_Tref,"Tref","degC",tstr2D, tcstr, & "2m reference temperature", & "none", c1, -Tffresh, & ns1, f_Tref) - + call define_hist_field(n_Qref,"Qref","g/kg",tstr2D, tcstr, & "2m reference specific humidity", & "none", kg_to_g, c0, & ns1, f_Qref) - + call define_hist_field(n_congel,"congel","cm/day",tstr2D, tcstr, & "congelation ice growth", & - "none", mps_to_cmpdy/dt, c0, & + "ice area average", mps_to_cmpdy/dt, c0, & ns1, f_congel) - + call define_hist_field(n_frazil,"frazil","cm/day",tstr2D, tcstr, & "frazil ice growth", & "none", mps_to_cmpdy/dt, c0, & ns1, f_frazil) - + call define_hist_field(n_snoice,"snoice","cm/day",tstr2D, tcstr, & "snow-ice formation", & - "none", mps_to_cmpdy/dt, c0, & - ns1, f_snoice) - + "ice area average", mps_to_cmpdy/dt, c0, & + ns1, f_snoice) !rename to snoice_ai ? + call define_hist_field(n_dsnow,"dsnow","cm/day",tstr2D, tcstr, & "snow formation", & "none", mps_to_cmpdy/dt, c0, & ns1, f_dsnow) - + call define_hist_field(n_meltt,"meltt","cm/day",tstr2D, tcstr, & "top ice melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_meltt) - + call define_hist_field(n_melts,"melts","cm/day",tstr2D, tcstr, & "top snow melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_melts) - + call define_hist_field(n_meltb,"meltb","cm/day",tstr2D, tcstr, & "basal ice melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_meltb) - + call define_hist_field(n_meltl,"meltl","cm/day",tstr2D, tcstr, & "lateral ice melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_meltl) - + call define_hist_field(n_fresh,"fresh","cm/day",tstr2D, tcstr, & "freshwtr flx ice to ocn (cpl)", & "if positive, ocean gains fresh water", & mps_to_cmpdy/rhofresh, c0, & ns1, f_fresh) - + call define_hist_field(n_fresh_ai,"fresh_ai","cm/day",tstr2D, tcstr, & "freshwtr flx ice to ocn", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_fresh_ai) - + call define_hist_field(n_fsalt,"fsalt","kg/m^2/s",tstr2D, tcstr, & "salt flux ice to ocn (cpl)", & "if positive, ocean gains salt", c1, c0, & ns1, f_fsalt) - + call define_hist_field(n_fsalt_ai,"fsalt_ai","kg/m^2/s",tstr2D, tcstr, & "salt flux ice to ocean", & "weighted by ice area", c1, c0, & ns1, f_fsalt_ai) - + call define_hist_field(n_fhocn,"fhocn","W/m^2",tstr2D, tcstr, & "heat flux ice to ocn (cpl)", & "if positive, ocean gains heat", c1, c0, & ns1, f_fhocn) - + call define_hist_field(n_fhocn_ai,"fhocn_ai","W/m^2",tstr2D, tcstr, & "heat flux ice to ocean", & "weighted by ice area", c1, c0, & ns1, f_fhocn_ai) - + call define_hist_field(n_fswthru,"fswthru","W/m^2",tstr2D, tcstr, & "SW thru ice to ocean (cpl)", & "if positive, ocean gains heat", c1, c0, & ns1, f_fswthru) - + call define_hist_field(n_fswthru_ai,"fswthru_ai","W/m^2",tstr2D, tcstr,& "SW flux thru ice to ocean", & "weighted by ice area", c1, c0, & ns1, f_fswthru_ai) - + call define_hist_field(n_strairx,"strairx","N/m^2",ustr2D, ucstr, & "atm/ice stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strairx) - + call define_hist_field(n_strairy,"strairy","N/m^2",ustr2D, ucstr, & "atm/ice stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_strairy) - + call define_hist_field(n_strtltx,"strtltx","N/m^2",ustr2D, ucstr, & "sea sfc tilt stress (x)", & "none", c1, c0, & ns1, f_strtltx) - + call define_hist_field(n_strtlty,"strtlty","N/m^2",ustr2D, ucstr, & "sea sfc tilt stress (y)", & "none", c1, c0, & ns1, f_strtlty) - + call define_hist_field(n_strcorx,"strcorx","N/m^2",ustr2D, ucstr, & "coriolis stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strcorx) - + call define_hist_field(n_strcory,"strcory","N/m^2",ustr2D, ucstr, & "coriolis stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_strcory) - + call define_hist_field(n_strocnx,"strocnx","N/m^2",ustr2D, ucstr, & "ocean/ice stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strocnx) - + call define_hist_field(n_strocny,"strocny","N/m^2",ustr2D, ucstr, & "ocean/ice stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_strocny) - + call define_hist_field(n_strintx,"strintx","N/m^2",ustr2D, ucstr, & "internal ice stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strintx) - + call define_hist_field(n_strinty,"strinty","N/m^2",ustr2D, ucstr, & "internal ice stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_strinty) - + call define_hist_field(n_strength,"strength","N/m",tstr2D, tcstr, & "compressive ice strength", & "none", c1, c0, & ns1, f_strength) - + call define_hist_field(n_divu,"divu","%/day",tstr2D, tcstr, & "strain rate (divergence)", & - "none", secday*c100, c0, & + "divu is instantaneous, on T grid", secday*c100, c0, & ns1, f_divu) - + call define_hist_field(n_shear,"shear","%/day",tstr2D, tcstr, & "strain rate (shear)", & "none", secday*c100, c0, & ns1, f_shear) - + call define_hist_field(n_sig1,"sig1","1",ustr2D, ucstr, & "norm. principal stress 1", & "sig1 is instantaneous", c1, c0, & ns1, f_sig1) - + call define_hist_field(n_sig2,"sig2","1",ustr2D, ucstr, & "norm. principal stress 2", & "sig2 is instantaneous", c1, c0, & ns1, f_sig2) - + call define_hist_field(n_dvidtt,"dvidtt","cm/day",tstr2D, tcstr, & "ice volume tendency thermo", & "none", mps_to_cmpdy, c0, & ns1, f_dvidtt) - + call define_hist_field(n_dvsdtt,"dvsdtt","cm/day",tstr2D, tcstr, & "snow volume tendency thermo", & "none", mps_to_cmpdy, c0, & ns1, f_dvsdtt) - + call define_hist_field(n_dvidtd,"dvidtd","cm/day",tstr2D, tcstr, & "ice volume tendency dynamics", & "none", mps_to_cmpdy, c0, & ns1, f_dvidtd) - + call define_hist_field(n_dvsdtd,"dvsdtd","cm/day",tstr2D, tcstr, & "snow volume tendency dynamics", & "none", mps_to_cmpdy, c0, & ns1, f_dvsdtd) - + call define_hist_field(n_daidtt,"daidtt","%/day",tstr2D, tcstr, & "area tendency thermo", & "none", secday*c100, c0, & ns1, f_daidtt) - + call define_hist_field(n_daidtd,"daidtd","%/day",tstr2D, tcstr, & "area tendency dynamics", & "none", secday*c100, c0, & ns1, f_daidtd) - + call define_hist_field(n_dagedtt,"dagedtt","day/day",tstr2D, tcstr, & "age tendency thermo", & "excludes time step increment", c1, c0, & ns1, f_dagedtt) - + call define_hist_field(n_dagedtd,"dagedtd","day/day",tstr2D, tcstr, & "age tendency dynamics", & "excludes time step increment", c1, c0, & @@ -877,22 +979,22 @@ subroutine init_hist (dt) "ice volume snapshot", & "none", c1, c0, & ns1, f_hisnap) - + call define_hist_field(n_aisnap,"aisnap","1",tstr2D, tcstr, & "ice area snapshot", & "none", c1, c0, & ns1, f_aisnap) - - call define_hist_field(n_trsig,"trsig","N/m^2",tstr2D, tcstr, & + + call define_hist_field(n_trsig,"trsig","N/m",tstr2D, tcstr, & "internal stress tensor trace", & "ice strength approximation", c1, c0, & ns1, f_trsig) - + call define_hist_field(n_icepresent,"ice_present","1",tstr2D, tcstr, & "fraction of time-avg interval that ice is present", & "ice extent flag", c1, c0, & ns1, f_icepresent) - + call define_hist_field(n_fsurf_ai,"fsurf_ai","W/m^2",tstr2D, tcstr, & "net surface heat flux", & "positive downward, excludes conductive flux, weighted by ice area", & @@ -979,278 +1081,405 @@ subroutine init_hist (dt) ns1, f_FY) ! CMIP6 2D variables + ! these definitions follow the intensive/extensive/inst def in Notz 2016 + ! we interpret cell methods of "mean where sea" equivalent to "extensive" + ! extensive means a normal average (over all time and grid box area) + ! and "mean where sea_ice" as intensive + ! intensive vars can be grid box or ice area means, and are then calulated as + ! an ice-fraction weighted mean in time + ! extensive vars tend to zero when aice is zero, intensive vars do not + + ! In general, this implementation is limited by only weighting intensive + ! variables by aice. It would be better if averaging using aice_init/aice_mid + ! was possible. These would then be used when accumulating history and averaging + ! variables which are calculated before cice runs (would be averaged using aice_init), + ! or based on thermodynamics only (would be averaged using aice_mid) + ! It would require new history variables for aice_init and aice_mid + + call define_hist_field(n_siconc,"siconc","%",tstr2D, tcstr, & + "Sea-Ice Area Percentage (Ocean Grid)", & + "none", c100, c0, & + ns1, f_siconc) + + call define_hist_field(n_icepresent,"sitimefrac","1",tstr2D, tcstr, & + "fraction of time-avg interval that ice is present", & + "ice extent flag", c1, c0, & + ns1, f_sitimefrac) + + call define_hist_field(n_sivol,"sivol","m",tstr2D, tcstr, & + "Sea-Ice Volume per Area", & + "ice volume per unit grid cell area", c1, c0, & + ns1, f_sivol) + call define_hist_field(n_sithick,"sithick","m",tstr2D, tcstr, & - "sea ice thickness", & - "volume divided by area", c1, c0, & - ns1, f_sithick) + "Sea-Ice Thickness", & + "area weighted average of volume divided by ice area", c1, c0, & + ns1, f_sithick, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_simass,"simass","kg m^-2",tstr2D, tcstr, & + "Sea-Ice Mass", & + "ice mass per unit grid cell area", rhoi, c0, & + ns1, f_simass) call define_hist_field(n_siage,"siage","s",tstr2D, tcstr, & - "sea ice age", & - "none", c1, c0, & - ns1, f_siage) + "Age of Sea Ice", & + "area weighted average of age of sea ice", c1, c0, & + ns1, f_siage, avg_ice_present=.true., mask_ice_free_points=.true.) + call define_hist_field(n_sifb,"sifb","m",tstr2D, tcstr, & - "sea ice freeboard", & - "none", c1, c0, & - ns1, f_sifb) - call define_hist_field(n_sisnconc,"sisnconc","1",tstr2D, tcstr, & - "snow area fraction", & - "none", c1, c0, & - ns1, f_sisnconc) - call define_hist_field(n_sisnthick,"sisnthick","m",tstr2D, tcstr, & - "sea ice snow thickness", & - "snow volume divided by area", c1, c0, & - ns1, f_sisnthick) - call define_hist_field(n_sitemptop,"sitemptop","degC",tstr2D, tcstr, & - "sea ice surface temperature", & - "none", c1, c0, & - ns1, f_sitemptop) - call define_hist_field(n_sitempsnic,"sitempsnic","degC",tstr2D, tcstr, & - "snow ice interface temperature", & - "surface temperature when no snow present", c1, c0, & - ns1, f_sitempsnic) - call define_hist_field(n_sitempbot,"sitempbot","degK",tstr2D, tcstr, & - "sea ice bottom temperature", & - "none", c1, c0, & - ns1, f_sitempbot) + "Sea-Ice Freeboard", & + "area weighted average of height of sea ice above ocean surface", c1, c0, & + ns1, f_sifb, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sisnconc,"sisnconc","%",tstr2D, tcstr, & + "Snow Area Percentage", & + "area weighted average of Percentage of the sea-ice surface that is covered by snow", c100, c0, & + ns1, f_sisnconc, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sisnthick,"sisnthick","m",tstr2D, tcstr, & + "Snow Thickness", & + "area weighted average of actual thickness of snow over the snow-covered part of the sea ice", c1, c0, & + ns1, f_sisnthick, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sisnmass,"sisnmass","kg m^-2",tstr2D, tcstr, & + "Snow Mass per Area", & + "snow mass per unit grid cell area", rhos, c0, & + ns1, f_sisnmass) + + call define_hist_field(n_sisnmass_intensive,"sisnmass_intensive","kg m^-2",tstr2D, tcstr, & + "Snow Mass per Area", & + "area weighted average of snow mass per unit grid cell area", rhos, c0, & + ns1, f_sisnmass_intensive, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sitemptop,"sitemptop","K",tstr2D, tcstr, & + "Surface Temperature of Sea Ice", & + "area weighted average of skin temperautre", c1, Tffresh, & + ns1, f_sitemptop, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sitempsnic,"sitempsnic","K",tstr2D, tcstr, & + "Temperature at Snow-Ice Interface", & + "area weighted average of temperature at snow-ice interface,surface temperature when no snow present", c1, Tffresh, & + ns1, f_sitempsnic, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sitempbot,"sitempbot","K",tstr2D, tcstr, & + "Temperature at Ice-Ocean Interface", & + "area weighted average of ice-ocean interface temperature", c1, Tffresh, & + ns1, f_sitempbot, avg_ice_present=.true., mask_ice_free_points=.true.) + call define_hist_field(n_siu,"siu","m/s",ustr2D, ucstr, & - "ice x velocity component", & - "none", c1, c0, & - ns1, f_siu) + "X-Component of Sea-Ice Velocity", & + "area weighted average", c1, c0, & + ns1, f_siu, avg_ice_present=.true., mask_ice_free_points=.true.) + call define_hist_field(n_siv,"siv","m/s",ustr2D, ucstr, & - "ice y velocity component", & - "none", c1, c0, & - ns1, f_siv) + "Y-Component of Sea-Ice Velocity", & + "area weighted average", c1, c0, & + ns1, f_siv, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sidmasstranx,"sidmasstranx","kg/s",ustr2D, ucstr, & - "x component of snow and sea ice mass transport", & - "none", c1, c0, & + "X-Component of Sea-Ice Mass Transport", & + "includes sea-ice and snow transport", c1, c0, & ns1, f_sidmasstranx) call define_hist_field(n_sidmasstrany,"sidmasstrany","kg/s",ustr2D, ucstr, & - "y component of snow and sea ice mass transport", & - "none", c1, c0, & + "Y-Component of Sea-Ice Mass Transport", & + "includes sea-ice and snow transport", c1, c0, & ns1, f_sidmasstrany) call define_hist_field(n_sistrxdtop,"sistrxdtop","N m^-2",ustr2D, ucstr, & - "x component of atmospheric stress on sea ice", & - "none", c1, c0, & - ns1, f_sistrxdtop) + "X-Component of Atmospheric Stress on Sea Ice", & + "area weighted average", c1, c0, & + ns1, f_sistrxdtop, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sistrydtop,"sistrydtop","N m^-2",ustr2D, ucstr, & - "y component of atmospheric stress on sea ice", & - "none", c1, c0, & - ns1, f_sistrydtop) - + "Y-Component of Atmospheric Stress on Sea Ice", & + "area weighted average", c1, c0, & + ns1, f_sistrydtop, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sistrxubot,"sistrxubot","N m^-2",ustr2D, ucstr, & - "x component of ocean stress on sea ice", & - "none", c1, c0, & - ns1, f_sistrxubot) + "X-Component of Ocean Stress on Sea Ice", & + "area weighted average", c1, c0, & + ns1, f_sistrxubot, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sistryubot,"sistryubot","N m^-2",ustr2D, ucstr, & - "y component of ocean stress on sea ice", & - "none", c1, c0, & - ns1, f_sistryubot) + "Y-Component of Ocean Stress on Sea Ice", & + "area weighted average", c1, c0, & + ns1, f_sistryubot, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_siforcetiltx,"siforcetiltx","N m^-2",ustr2D, ucstr, & - "x component of sea surface tilt force", & - "none", c1, c0, & - ns1, f_siforcetiltx) + "Sea-Surface Tilt Term in Force Balance (X-Component)", & + "area weighted average", c1, c0, & + ns1, f_siforcetiltx, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_siforcetilty,"siforcetilty","N m^-2",ustr2D, ucstr, & - "y component of sea surface tilt force", & - "none", c1, c0, & - ns1, f_siforcetilty) - - call define_hist_field(n_siforcecoriolx,"siforcecoriolx","N m^-2",ustr2D, ucstr, & - "x component of Coriolis force", & - "none", c1, c0, & - ns1, f_siforcecoriolx) - - call define_hist_field(n_siforcecorioly,"siforcecorioly","N m^-2",ustr2D, ucstr, & - "y component of Coriolis force", & - "none", c1, c0, & - ns1, f_siforcecorioly) - - call define_hist_field(n_siforceintstrx,"siforceintstrx","N m^-2",ustr2D, ucstr, & - "x component of internal ice stress force", & - "none", c1, c0, & - ns1, f_siforceintstrx) - - call define_hist_field(n_siforceintstry,"siforceintstry","N m^-2",ustr2D, ucstr, & - "y component of internal ice stress force", & - "none", c1, c0, & - ns1, f_siforceintstry) + "Sea-Surface Tilt Term in Force Balance (Y-Component)", & + "area weighted average", c1, c0, & + ns1, f_siforcetilty, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siforcecoriolx,"siforcecoriolx","N m^-2",ustr2D, ucstr, & + "Coriolis Force Term in Force Balance (X-Component)", & + "area weighted average", c1, c0, & + ns1, f_siforcecoriolx, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siforcecorioly,"siforcecorioly","N m^-2",ustr2D, ucstr, & + "Coriolis Force Term in Force Balance (Y-Component)", & + "area weighted average", c1, c0, & + ns1, f_siforcecorioly, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siforceintstrx,"siforceintstrx","N m^-2",ustr2D, ucstr, & + "Internal Stress Term in Force Balance (X-Component)", & + "area weighted average", c1, c0, & + ns1, f_siforceintstrx, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siforceintstry,"siforceintstry","N m^-2",ustr2D, ucstr, & + "Internal Stress Term in Force Balance (Y-Component)", & + "area weighted average", c1, c0, & + ns1, f_siforceintstry, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sicompstren,"sicompstren","N/m",ustr2D, ucstr, & - "compressive sea ice strength", & - "none", c1, c0, & - ns1, f_sicompstren) + "Compressive Sea Ice Strength", & + "area weighted average", c1, c0, & + ns1, f_sicompstren, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sidivvel,"sidivvel","1/s",ustr2D, ucstr, & - "divergence of the sea ice velocity field (ice area weighted)", & - "none", c1, c0, & - ns1, f_sidivvel) + "Divergence of the Sea-Ice Velocity Field", & + "area weighted average", c1, c0, & + ns1, f_sidivvel, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sispeed,"sispeed","m/s",ustr2D, ucstr, & - "ice speed", & - "none", c1, c0, & - ns1, f_sispeed) + "Sea-Ice Speed", & + "area weighted average", c1, c0, & + ns1, f_sispeed, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sialb,"sialb","1",tstr2D, tcstr, & - "sea ice albedo", & - "none", c1, c0, & - ns1, f_sialb) + "Sea-Ice Albedo", & + "area weighted average", c1, c0, & + ns1, f_sialb, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sihc,"sihc","J m^-2",tstr2D, tcstr, & - "sea ice heat content", & - "none", c1, c0, & + "Sea-Ice Heat Content", & + "per unit grid cell area", c1, c0, & ns1, f_sihc) - call define_hist_field(n_sisnhc,"sisnhc","J m^-2",tstr2D, tcstr, & - "snow heat content", & - "none", c1, c0, & + call define_hist_field(n_sisnhc,"sisnhc","J m^-2",tstr2D, tcstr, & + "Snow Heat Content", & + "per unit grid cell area", c1, c0, & ns1, f_sisnhc) + call define_hist_field(n_sidconcth,"sidconcth","1/s",tstr2D, tcstr, & + "Sea-Ice Area Fraction Tendency Due to Thermodynamics", & + "none", c1, c0, & + ns1, f_sidconcth) - - call define_hist_field(n_sidconcth,"sidconcth","1/s",tstr2D, tcstr, & - "sea ice area change from thermodynamics", & - "none", c1, c0, & - ns1, f_sidconcth) - call define_hist_field(n_sidconcdyn,"sidconcdyn","1/s",tstr2D, tcstr, & - "sea ice area change from dynamics", & - "none", c1, c0, & + call define_hist_field(n_sidconcdyn,"sidconcdyn","1/s",tstr2D, tcstr, & + "Sea-Ice Area Fraction Tendency Due to Dynamics", & + "none", c1, c0, & ns1, f_sidconcdyn) call define_hist_field(n_sidmassth,"sidmassth","kg m^-2 s^-1",tstr2D, tcstr, & - "sea ice mass change from thermodynamics", & - "none", c1, c0, & + "Sea-Ice Mass Change from Thermodynamics", & + "per unit grid cell area", rhoi, c0, & ns1, f_sidmassth) call define_hist_field(n_sidmassdyn,"sidmassdyn","kg m^-2 s^-1",tstr2D, tcstr, & - "sea ice mass change from dynamics", & - "none", c1, c0, & - ns1, f_sidmassdyn) + "Sea-Ice Mass Change from Dynamics", & + "per unit grid cell area", rhoi, c0, & + ns1, f_sidmassdyn) call define_hist_field(n_sidmassgrowthwat,"sidmassgrowthwat","kg m^-2 s^-1",tstr2D, tcstr, & - "sea ice mass change from frazil growth", & - "none", c1, c0, & - ns1, f_sidmassgrowthwat) + "Sea-Ice Mass Change Through Growth in Supercooled Open Water (Frazil)", & + "per unit grid cell area", rhoi/dt, c0, & + ns1, f_sidmassgrowthwat) call define_hist_field(n_sidmassgrowthbot,"sidmassgrowthbot","kg m^-2 s^-1",tstr2D, tcstr, & - "sea ice mass change from bottom growth", & - "none", c1, c0, & - ns1, f_sidmassgrowthbot) + "Sea-Ice Mass Change Through Basal Growth", & + "per unit grid cell area", rhoi/dt, c0, & + ns1, f_sidmassgrowthbot) - call define_hist_field(n_sidmasssi,"sidmasssi","kg m^-2 s^-1",tstr2D, tcstr, & - "sea ice mass change from snow ice conversion", & - "none", c1, c0, & + call define_hist_field(n_sidmasssi,"sidmasssi","kg m^-2 s^-1",tstr2D, tcstr, & + "Sea-Ice Mass Change Through Snow-to-Ice Conversion", & + "per unit grid cell area", rhoi/dt, c0, & ns1, f_sidmasssi) - call define_hist_field(n_sidmassevapsubl,"sidmassevapsubl","kg m^-2 s^-1",tstr2D, tcstr, & - "sea ice mass change from evaporation and sublimation", & - "none", c1, c0, & - ns1, f_sidmassevapsubl) - - call define_hist_field(n_sidmassmelttop,"sidmassmelttop","kg m^-2 s^-1",tstr2D, tcstr, & - "sea ice mass change from top ice melt", & - "none", c1, c0, & + call define_hist_field(n_sidmasssi,"sidmassgrowthsi","kg m^-2 s^-1",tstr2D, tcstr, & + "Sea-Ice Mass Change Through Snow-to-Ice Conversion", & + "per unit grid cell area", rhoi/dt, c0, & + ns1, f_sidmassgrowthsi) + + call define_hist_field(n_sidmassevapsubl,"sidmassevapsubl","kg m^-2 s^-1",tstr2D, tcstr, & + "Sea-Ice Mass Change Through Evaporation and Sublimation", & + "per unit grid cell area", c1, c0, & + ns1, f_sidmassevapsubl) + + call define_hist_field(n_sisndmasssubl,"sndmasssubl","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Evaporation or Sublimation", & + "per unit grid cell area", c1, c0, & + ns1, f_sndmasssubl) + + call define_hist_field(n_sisndmasssubl,"sisndmasssubl","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Evaporation or Sublimation", & + "per unit grid cell area", c1, c0, & + ns1, f_sisndmasssubl) + + call define_hist_field(n_sisndmasssubl_intensive,"sisndmasssubl_intensive","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Evaporation or Sublimation, divided by grid cell area", & + "area weighted average per unit grid cell area", c1, c0, & + ns1, f_sisndmasssubl_intensive, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sidmassmelttop,"sidmassmelttop","kg m^-2 s^-1",tstr2D, tcstr, & + "Sea-Ice Mass Change Through Surface Melting", & + "per unit grid cell area", -c1*rhoi/dt, c0, & ns1, f_sidmassmelttop) call define_hist_field(n_sidmassmeltbot,"sidmassmeltbot","kg m^-2 s^-1",tstr2D, tcstr, & - "sea ice mass change from bottom ice melt", & - "none", c1, c0, & - ns1, f_sidmassmeltbot) + "Sea-Ice Mass Change Through Bottom Melting", & + "per unit grid cell area", -c1*rhoi/dt, c0, & + ns1, f_sidmassmeltbot) call define_hist_field(n_sidmasslat,"sidmasslat","kg m^-2 s^-1",tstr2D, tcstr, & - "sea ice mass change from lateral ice melt", & - "none", c1, c0, & - ns1, f_sidmasslat) - - call define_hist_field(n_sndmasssnf,"sndmasssnf","kg m^-2 s^-1",tstr2D, tcstr, & - "snow mass change from snow fall", & - "none", c1, c0, & - ns1, f_sndmasssnf) - - call define_hist_field(n_sndmassmelt,"sndmassmelt","kg m^-2 s^-1",tstr2D, tcstr, & - "snow mass change from melt", & - "none", c1, c0, & - ns1, f_sndmassmelt) - - call define_hist_field(n_siflswdtop,"siflswdtop","W/m^2",tstr2D, tcstr, & - "down shortwave flux over sea ice", & - "positive downward", c1, c0, & - ns1, f_siflswdtop) - - call define_hist_field(n_siflswutop,"siflswutop","W/m^2",tstr2D, tcstr, & - "upward shortwave flux over sea ice", & - "positive downward", c1, c0, & - ns1, f_siflswutop) - - call define_hist_field(n_siflswdbot,"siflswdbot","W/m^2",tstr2D, tcstr, & - "down shortwave flux at bottom of ice", & - "positive downward", c1, c0, & - ns1, f_siflswdbot) - - call define_hist_field(n_sifllwdtop,"sifllwdtop","W/m^2",tstr2D, tcstr, & - "down longwave flux over sea ice", & - "positive downward", c1, c0, & - ns1, f_sifllwdtop) - - call define_hist_field(n_sifllwutop,"sifllwutop","W/m^2",tstr2D, tcstr, & - "upward longwave flux over sea ice", & - "positive downward", c1, c0, & - ns1, f_sifllwutop) - - call define_hist_field(n_siflsenstop,"siflsenstop","W/m^2",tstr2D, tcstr, & - "sensible heat flux over sea ice", & - "positive downward", c1, c0, & - ns1, f_siflsenstop) - - call define_hist_field(n_siflsensupbot,"siflsensupbot","W/m^2",tstr2D, tcstr, & - "sensible heat flux at bottom of sea ice", & - "positive downward", c1, c0, & - ns1, f_siflsensupbot) + "Sea-Ice Mass Change Through Lateral Melting", & + "per unit grid cell area", -c1*rhoi/dt, c0, & + ns1, f_sidmasslat) + call define_hist_field(n_sidmasslat,"sidmassmeltlat","kg m^-2 s^-1",tstr2D, tcstr, & + "Sea-Ice Mass Change Through Lateral Melting", & + "per unit grid cell area", -c1*rhoi/dt, c0, & + ns1, f_sidmassmeltlat) - call define_hist_field(n_sifllatstop,"sifllatstop","W/m^2",tstr2D, tcstr, & - "latent heat flux over sea ice", & - "positive downward", c1, c0, & - ns1, f_sifllatstop) - - call define_hist_field(n_siflcondtop,"siflcondtop","W/m^2",tstr2D, tcstr, & - "conductive heat flux at top of sea ice", & - "positive downward", c1, c0, & - ns1, f_siflcondtop) - - call define_hist_field(n_siflcondbot,"siflcondbot","W/m^2",tstr2D, tcstr, & - "conductive heat flux at bottom of sea ice", & - "positive downward", c1, c0, & - ns1, f_siflcondbot) - - call define_hist_field(n_sipr,"sipr","kg m^-2 s^-1",tstr2D, tcstr, & - "rainfall over sea ice", & - "none", c1, c0, & - ns1, f_sipr) - - - call define_hist_field(n_siflsaltbot,"siflsaltbot","kg m^-2 s^-1",tstr2D, tcstr, & - "salt flux from sea ice", & - "positive downward", c1, c0, & - ns1, f_siflsaltbot) + call define_hist_field(n_sndmasssnf,"sndmasssnf","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Change Through Snowfall", & + "per unit grid cell area", c1, c0, & + ns1, f_sndmasssnf) - call define_hist_field(n_siflfwbot,"siflfwbot","kg m^-2 s^-1",tstr2D, tcstr, & - "fresh water flux from sea ice", & - "positive downward", c1, c0, & - ns1, f_siflfwbot) + call define_hist_field(n_snow_ai,"sisndmasssnf","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Change Through Snowfall", & + "Always positive or zero, per unit grid cell area", c1, c0, & + ns1, f_sisndmasssnf) + call define_hist_field(n_sisndmasssnf_intensive,"sisndmasssnf_intensive","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Change Through Snowfall", & + "area weighted average, always positive or zero, per unit grid cell area", c1, c0, & + ns1, f_sisndmasssnf_intensive, avg_ice_present=.true., mask_ice_free_points=.true.) - call define_hist_field(n_sisaltmass,"sisaltmass","kg m^-2",tstr2D,& - tcstr, "mass of salt in sea ice (for ocean fluxes)",& + call define_hist_field(n_sndmassmelt,"sndmassmelt","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Melt", & + "per unit grid cell area", -c1*rhos/dt, c0, & + ns1, f_sndmassmelt) - "none", c1, c0, & - ns1, f_sisaltmass) + call define_hist_field(n_sndmassmelt,"sisndmassmelt","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Melt", & + "Always negative or zero, per unit grid cell area", -c1*rhos/dt, c0, & + ns1, f_sisndmassmelt) + + call define_hist_field(n_sisndmassmelt_intensive,"sisndmassmelt_intensive","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Melt", & + "area weighted average, always negative or zero, per unit grid cell area", -c1*rhos/dt, c0, & + ns1, f_sisndmassmelt_intensive, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sisndmasssi,"sisndmasssi","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Snow-to-Ice Conversion", & + "Always negative or zero, per unit grid cell area", -c1*rhoi/dt, c0, & + ns1, f_sisndmasssi) + + call define_hist_field(n_sisndmasssi_intensive,"sisndmasssi_intensive","kg m^-2 s^-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Snow-to-Ice Conversion", & + "area weighted average, always negative or zero, per unit grid cell area", -c1*rhoi/dt, c0, & + ns1, f_sisndmasssi_intensive, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sndmassdyn,"sndmassdyn","kg m-2 s-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Advection by Sea-Ice Dynamics", & + "per unit grid cell area", rhos, c0, & + ns1, f_sndmassdyn) + + call define_hist_field(n_sndmassdyn,"sisndmassdyn","kg m-2 s-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Advection by Sea-Ice Dynamics", & + "per unit grid cell area", rhos, c0, & + ns1, f_sisndmassdyn) + + call define_hist_field(n_sisndmassdyn_intensive,"sisndmassdyn_intensive","kg m-2 s-1",tstr2D, tcstr, & + "Snow Mass Rate of Change Through Advection by Sea-Ice Dynamics", & + "area weighted average, per unit grid cell area", rhos, c0, & + ns1, f_sisndmassdyn_intensive, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflswdtop,"siflswdtop","W m^-2",tstr2D, tcstr, & + "Downwelling Shortwave Flux over Sea Ice", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_siflswdtop, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflswutop,"siflswutop","W m^-2",tstr2D, tcstr, & + "Upwelling Shortwave Flux over Sea Ice", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_siflswutop, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflswdbot,"siflswdbot","W m^-2",tstr2D, tcstr, & + "Downwelling Shortwave Flux under Sea Ice", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_siflswdbot, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sifllwdtop,"sifllwdtop","W m^-2",tstr2D, tcstr, & + "Downwelling Longwave Flux over Sea Ice", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_sifllwdtop, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sifllwutop,"sifllwutop","W m^-2",tstr2D, tcstr, & + "Upwelling Longwave Flux over Sea Ice", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_sifllwutop, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflsenstop,"siflsenstop","W m^-2",tstr2D, tcstr, & + "Net Downward Sensible Heat Flux over Sea Ice", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_siflsenstop, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflsensupbot,"siflsensupbot","W m^-2",tstr2D, tcstr, & + "Net Upward Sensible Heat Flux under Sea Ice", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_siflsensupbot, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflsensupbot,"siflsensbot","W m^-2",tstr2D, tcstr, & + "Net Upward Sensible Heat Flux under Sea Ice", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_siflsensbot, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sifllatstop,"sifllatstop","W m^-2",tstr2D, tcstr, & + "Net Latent Heat Flux over Sea Ice", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_sifllatstop, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflcondtop,"siflcondtop","W m^-2",tstr2D, tcstr, & + "Net Conductive Heat Flux in Sea Ice at the Surface", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_siflcondtop, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflcondbot,"siflcondbot","W m^-2",tstr2D, tcstr, & + "Net Conductive Heat Flux in Sea Ice at the Base", & + "area weighted average, positive downward, per sea ice area", c1, c0, & + ns1, f_siflcondbot, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflfwdrain,"siflfwdrain","kg m^-2 s^-1",tstr2D, tcstr, & + "Freshwater Flux from Sea-Ice Surface", & + "area weighted average, positive downward, per unit grid cell area", c1, c0, & + ns1, f_siflfwdrain, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sipr,"sipr","kg m^-2 s^-1",tstr2D, tcstr, & + "Rainfall Rate over Sea Ice", & + "area weighted average, per unit grid cell area", c1, c0, & + ns1, f_sipr, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflsaltbot,"siflsaltbot","kg m^-2 s^-1",tstr2D, tcstr, & + "Salt Flux from Sea Ice", & + "area weighted average, positive downward, per unit grid cell area", c1, c0, & + ns1, f_siflsaltbot, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_siflfwbot,"siflfwbot","kg m^-2 s^-1",tstr2D, tcstr, & + "Freshwater Flux from Sea Ice", & + "area weighted average, positive downward, per unit grid cell area", c1, c0, & + ns1, f_siflfwbot, avg_ice_present=.true., mask_ice_free_points=.true.) + + call define_hist_field(n_sisaltmass,"sisaltmass","kg m^-2",tstr2D, tcstr, & + "Mass of Salt in Sea Ice",& + "per unit grid cell area", ice_ref_salinity * rhoi / c1000 , c0, & + ns1, f_sisaltmass) endif ! if (histfreq(ns1) /= 'x') then enddo ! ns1 @@ -1275,16 +1504,16 @@ subroutine init_hist (dt) do ns1 = 1, nstreams if (histfreq(ns1) /= 'x') then - call define_hist_field(n_aicen,"aicen","1",tstr3Dc, tcstr, & - "ice area, categories","none", c1, c0, & + call define_hist_field(n_aicen,"aicen","1",tstr3Dc, tcstr, & + "ice area, categories","none", c1, c0, & ns1, f_aicen) - call define_hist_field(n_vicen,"vicen","m",tstr3Dc, tcstr, & - "ice volume, categories","none", c1, c0, & + call define_hist_field(n_vicen,"vicen","m",tstr3Dc, tcstr, & + "ice volume, categories","none", c1, c0, & ns1, f_vicen) call define_hist_field(n_vsnon,"vsnon","m",tstr3Dc, tcstr, & - "snow depth on ice, categories","volume per unit area of snow", c1, c0, & + "snow depth on ice, categories","volume per unit area of snow", c1, c0, & ns1, f_vsnon) call define_hist_field(n_snowfracn,"snowfracn","1",tstr3Dc, tcstr, & @@ -1292,36 +1521,45 @@ subroutine init_hist (dt) "snow fraction per unit grid cell area", c1, c0, & ns1, f_snowfracn) - - call define_hist_field(n_fsurfn_ai,"fsurfn_ai","W/m^2",tstr3Dc, tcstr, & - "net surface heat flux, categories","weighted by ice area", c1, c0, & + call define_hist_field(n_fsurfn_ai,"fsurfn_ai","W/m^2",tstr3Dc, tcstr, & + "net surface heat flux, categories","weighted by ice area", c1, c0, & ns1, f_fsurfn_ai) - + call define_hist_field(n_fcondtopn_ai,"fcondtopn_ai","W/m^2",tstr3Dc, tcstr, & "top sfc conductive heat flux, cat","weighted by ice area", c1, c0, & ns1, f_fcondtopn_ai) - call define_hist_field(n_fmelttn_ai,"fmelttn_ai","W/m^2",tstr3Dc, tcstr, & - "net sfc heat flux causing melt, cat","weighted by ice area", c1, c0, & + call define_hist_field(n_fmelttn_ai,"fmelttn_ai","W/m^2",tstr3Dc, tcstr, & + "net sfc heat flux causing melt, cat","weighted by ice area", c1, c0, & ns1, f_fmelttn_ai) - call define_hist_field(n_flatn_ai,"flatn_ai","W/m^2",tstr3Dc, tcstr, & - "latent heat flux, category","weighted by ice area", c1, c0, & + call define_hist_field(n_flatn_ai,"flatn_ai","W/m^2",tstr3Dc, tcstr, & + "latent heat flux, category","weighted by ice area", c1, c0, & ns1, f_flatn_ai) - call define_hist_field(n_fsensn_ai,"fsensn_ai","W/m^2",tstr3Dc, tcstr, & - "sensible heat flux, category","weighted by ice area", c1, c0, & + call define_hist_field(n_fsensn_ai,"fsensn_ai","W/m^2",tstr3Dc, tcstr, & + "sensible heat flux, category","weighted by ice area", c1, c0, & ns1, f_fsensn_ai) - call define_hist_field(n_Tn_top,"Tn_top","K",tstr3Dc, tcstr, & - "temperature of the top layer (snow or ice), categories","multilayer scheme", c1, c0, & - ns1, f_Tn_top) + ! to-do: add if zero-layer? + call define_hist_field(n_Tn_top,"Tn_top","K",tstr3Dc, tcstr, & + "temperature of the top layer (snow or ice), categories", "multilayer scheme", c1, c0, & + ns1, f_Tn_top) call define_hist_field(n_keffn_top,"keffn_top","W/m^2/K",tstr3Dc, tcstr, & "effective thermal conductivity of the top ice layer, categories", & - "multilayer scheme", c1, c0, & + "multilayer scheme", c1, c0, & ns1, f_keffn_top) + ! CMIP 3D + call define_hist_field(n_siitdconc,"siitdconc","%",tstr3Dc, tcstr, & + "Sea-Ice Area Percentage in Ice Thickness Categories", & + "none", c100, c0, & + ns1, f_siitdconc) + + ! siitdthick, siitdsnconc, siitdsnthick are not implemented because it's not clear how to + ! mask them when ice free (e.g. by aice or aicen ? ) + endif ! if (histfreq(ns1) /= 'x') then enddo ! ns1 @@ -1386,14 +1624,15 @@ subroutine init_hist (dt) if (allocated(Tinz4d)) deallocate(Tinz4d) allocate(Tinz4d(nx_block,ny_block,nzilyr,ncat_hist)) endif + if (f_Sinz (1:1) /= 'x') then + if (allocated(Sinz4d)) deallocate(Sinz4d) + allocate(Sinz4d(nx_block,ny_block,nzilyr,ncat_hist)) + endif if (f_Tsnz (1:1) /= 'x') then if (allocated(Tsnz4d)) deallocate(Tsnz4d) allocate(Tsnz4d(nx_block,ny_block,nzslyr,ncat_hist)) endif - if (f_Sinz (1:1) /= 'x') then - if (allocated(Sinz4d)) deallocate(Sinz4d) - allocate(Sinz4d(nx_block,ny_block,nzilyr,ncat_hist)) - endif + ! other 4D history variables @@ -1432,9 +1671,13 @@ subroutine init_hist (dt) ntmp(:) = 0 if (my_task == master_task) then write(nu_diag,*) ' ' + write(nu_diag,*) 'total number of history fields = ',num_avail_hist_fields_tot + write(nu_diag,*) 'max number of history fields = ',max_avail_hist_fields write(nu_diag,*) 'The following variables will be ', & 'written to the history tape: ' write(nu_diag,101) 'description','units','variable','frequency','x' + if (num_avail_hist_fields_tot == 0) & + write(nu_diag,*) '*** WARNING: NO HISTORY FIELDS WILL BE WRITTEN ***' do n=1,num_avail_hist_fields_tot if (avail_hist_fields(n)%vhistfreq_n /= 0) & write(nu_diag,100) avail_hist_fields(n)%vdesc, & @@ -1447,8 +1690,8 @@ subroutine init_hist (dt) enddo ! num_avail_hist_fields_tot write(nu_diag,*) ' ' endif - 100 format (1x,a40,2x,a16,2x,a12,1x,a1,2x,i6) - 101 format (2x,a19,10x,a16,9x,a12,2x,a,3x,a1) + 100 format (1x,a50,2x,a16,2x,a16,1x,a1,2x,i6) + 101 format (2x,a19,21x,a16,5x,a16,2x,a,3x,a1) call broadcast_array(ntmp, master_task) do ns = 1, nstreams @@ -1480,9 +1723,11 @@ subroutine init_hist (dt) if (allocated(a4Di)) deallocate(a4Di) if (num_avail_hist_fields_4Di > 0) & allocate(a4Di(nx_block,ny_block,nzilyr,ncat_hist,num_avail_hist_fields_4Di,max_blocks)) + if (allocated(a4Ds)) deallocate(a4Ds) if (num_avail_hist_fields_4Ds > 0) & allocate(a4Ds(nx_block,ny_block,nzslyr,ncat_hist,num_avail_hist_fields_4Ds,max_blocks)) + if (allocated(a4Db)) deallocate(a4Db) if (num_avail_hist_fields_4Db > 0) & allocate(a4Db(nx_block,ny_block,nzblyr,ncat_hist,num_avail_hist_fields_4Db,max_blocks)) @@ -1499,8 +1744,8 @@ subroutine init_hist (dt) if (restart .and. yday >= c2) then ! restarting midyear gives erroneous onset dates - mlt_onset = 999._dbl_kind - frz_onset = 999._dbl_kind + mlt_onset = 999._dbl_kind + frz_onset = 999._dbl_kind else mlt_onset = c0 frz_onset = c0 @@ -1521,8 +1766,8 @@ subroutine accum_hist (dt) use ice_fileunits, only: nu_diag use ice_constants, only: c0, c1, p25, puny, secday, depressT, & - awtvdr, awtidr, awtvdf, awtidf, Lfresh, rhoi, rhos, cp_ice, & - spval_dbl, Tffresh, ice_ref_salinity, c1000 + awtvdr, awtidr, awtvdf, awtidf, Lfresh, rhoi, rhos, rhow, rhofresh, cp_ice, & + spval_dbl, Tffresh use ice_domain, only: blocks_ice, nblocks use ice_grid, only: tmask, lmask_n, lmask_s, tarea, HTE, HTN #ifdef AusCOM @@ -1541,24 +1786,7 @@ subroutine accum_hist (dt) use ice_dyn_eap, only: a11, a12, e11, e12, e22, s11, s12, s22, & yieldstress11, yieldstress12, yieldstress22 use ice_dyn_shared, only: kdyn, principal_stress,a_min - use ice_flux, only: fsw, flw, fsnow, frain, sst, sss, uocn, vocn, & - frzmlt_init, fswfac, fswabs, fswthru, alvdr, alvdf, alidr, alidf, & - albice, albsno, albpnd, coszen, flat, fsens, flwout, evap, & - Tair, Tref, Qref, congel, frazil, snoice, dsnow, & - melts, meltb, meltt, meltl, fresh, fsalt, fresh_ai, fsalt_ai, & - fhocn, fhocn_ai, uatm, vatm, & - fswthru_ai, strairx, strairy, strtltx, strtlty, strintx, strinty, & - strocnx, strocny, fm, daidtt, dvidtt, dvsdtt, daidtd, dvidtd, dvsdtd, fsurf, & - fcondtop, fsurfn, fcondtopn, & - fcondbot, fcondbotn, ice_freeboard, & - flatn, fsensn, albcnt, prs_sig, & - stressp_1, stressm_1, stress12_1, & - stressp_2, stressm_2, stress12_2, & - stressp_3, stressm_3, stress12_3, & - stressp_4, stressm_4, stress12_4, sig1, sig2, & - mlt_onset, frz_onset, dagedtt, dagedtd, fswint_ai, Tn_top, & - keffn_top, snowfrac, snowfracn, alvdr_ai, alvdf_ai, alidr_ai, & - alidf_ai, evap_snow, evap_ice + use ice_flux use ice_atmo, only: formdrag use ice_history_shared ! almost everything use ice_history_write, only: ice_write_hist @@ -1570,7 +1798,7 @@ subroutine accum_hist (dt) use ice_meltpond_cesm, only: hs0 use ice_state ! almost everything use ice_therm_shared, only: calculate_Tin_from_qin, Tmlt, ktherm, & - Ti_bot, Tsnic + Ti_bot, Tsnice use ice_therm_mushy, only: temperature_mush, temperature_snow use ice_timers, only: ice_timer_start, ice_timer_stop, timer_readwrite use ice_zbgc_shared, only: skl_bgc @@ -1593,7 +1821,11 @@ subroutine accum_hist (dt) real (kind=dbl_kind) :: & qn , & ! temporary variable for enthalpy hs , & ! temporary variable for snow depth - Tmlts ! temporary variable for melting temperature + Tmlts , & ! temporary variable for melting temperature + rho_ice, rho_ocn ! temporary variables for freeboard + + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat_hist) :: & + ravgipn real (kind=dbl_kind) :: & area_threshold ! min time mean ice area allowed for dividing @@ -1660,13 +1892,12 @@ subroutine accum_hist (dt) avgct(ns) = c1 else ! write averages over time histfreq avgct(ns) = avgct(ns) + c1 -! if (avgct(ns) == c1) time_beg(ns) = (time-dt)/int(secday) if (avgct(ns) == c1) then time_beg(ns) = (time-dt)/int(secday) time_beg(ns) = real(time_beg(ns),kind=real_kind) endif endif - enddo + enddo ! ns !--------------------------------------------------------------- ! increment field @@ -1677,7 +1908,7 @@ subroutine accum_hist (dt) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1690,9 +1921,7 @@ subroutine accum_hist (dt) call accum_hist_field(n_hi, iblk, vice(:,:,iblk), a2D) if (f_hs (1:1) /= 'x') & call accum_hist_field(n_hs, iblk, vsno(:,:,iblk), a2D) - if (f_sifb (1:1) /= 'x') & - call accum_hist_field(n_sifb, iblk, ice_freeboard(:,:,iblk), a2D) - if (f_snowfrac(1:1) /= 'x') & + if (f_snowfrac(1:1) /= 'x') & call accum_hist_field(n_snowfrac, iblk, snowfrac(:,:,iblk), a2D) if (f_Tsfc (1:1) /= 'x') & call accum_hist_field(n_Tsfc, iblk, trcr(:,:,nt_Tsfc,iblk), a2D) @@ -1706,7 +1935,6 @@ subroutine accum_hist (dt) call accum_hist_field(n_uatm, iblk, uatm(:,:,iblk), a2D) if (f_vatm (1:1) /= 'x') & call accum_hist_field(n_vatm, iblk, vatm(:,:,iblk), a2D) - if (f_sice (1:1) /= 'x') then do j = jlo, jhi do i = ilo, ihi @@ -1751,15 +1979,11 @@ subroutine accum_hist (dt) call accum_hist_field(n_fswfac, iblk, fswfac(:,:,iblk), a2D) if (f_fswabs (1:1) /= 'x') & call accum_hist_field(n_fswabs, iblk, fswabs(:,:,iblk), a2D) - + if (f_fswabs_ai(1:1)/= 'x') & + call accum_hist_field(n_fswabs_ai, iblk, fswabs_ai(:,:,iblk), a2D) if (f_fswint_ai (1:1) /= 'x') & call accum_hist_field(n_fswint_ai, iblk, fswint_ai(:,:,iblk), a2D) - workb(:,:) = aice(:,:,iblk) - - if (f_fswabs_ai(1:1)/= 'x') & - call accum_hist_field(n_fswabs_ai, iblk, fswabs(:,:,iblk)*workb(:,:), a2D) - if (f_fswup(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi @@ -1773,6 +1997,7 @@ subroutine accum_hist (dt) call accum_hist_field(n_fswup, iblk, worka(:,:), a2D) endif + workb(:,:) = aice(:,:,iblk) if (f_albsni (1:1) /= 'x') & call accum_hist_field(n_albsni, iblk, & (awtvdr*alvdr(:,:,iblk) & @@ -1787,14 +2012,13 @@ subroutine accum_hist (dt) call accum_hist_field(n_alvdf, iblk, alvdf(:,:,iblk), a2D) if (f_alidf (1:1) /= 'x') & call accum_hist_field(n_alidf, iblk, alidf(:,:,iblk), a2D) - if (f_alvdr_ai (1:1) /= 'x') & call accum_hist_field(n_alvdr_ai, iblk, alvdr_ai(:,:,iblk), a2D) if (f_alidr_ai (1:1) /= 'x') & call accum_hist_field(n_alidr_ai, iblk, alidr_ai(:,:,iblk), a2D) - if (f_alvdf_ai (1:1) /= 'x') & + if (f_alvdf_ai (1:1) /= 'x') & call accum_hist_field(n_alvdf_ai, iblk, alvdf_ai(:,:,iblk), a2D) - if (f_alidf_ai (1:1) /= 'x') & + if (f_alidf_ai (1:1) /= 'x') & call accum_hist_field(n_alidf_ai, iblk, alidf_ai(:,:,iblk), a2D) if (f_albice (1:1) /= 'x') & @@ -1809,32 +2033,30 @@ subroutine accum_hist (dt) if (f_flat (1:1) /= 'x') & call accum_hist_field(n_flat, iblk, flat(:,:,iblk), a2D) if (f_flat_ai(1:1) /= 'x') & - call accum_hist_field(n_flat_ai,iblk, flat(:,:,iblk)*workb(:,:), a2D) + call accum_hist_field(n_flat_ai,iblk, flat_ai(:,:,iblk), a2D) if (f_fsens (1:1) /= 'x') & call accum_hist_field(n_fsens, iblk, fsens(:,:,iblk), a2D) if (f_fsens_ai(1:1)/= 'x') & - call accum_hist_field(n_fsens_ai,iblk, fsens(:,:,iblk)*workb(:,:), a2D) + call accum_hist_field(n_fsens_ai,iblk, fsens_ai(:,:,iblk), a2D) if (f_flwup (1:1) /= 'x') & call accum_hist_field(n_flwup, iblk, flwout(:,:,iblk), a2D) if (f_flwup_ai(1:1)/= 'x') & - call accum_hist_field(n_flwup_ai,iblk, flwout(:,:,iblk)*workb(:,:), a2D) + call accum_hist_field(n_flwup_ai,iblk, flwout_ai(:,:,iblk), a2D) if (f_evap (1:1) /= 'x') & call accum_hist_field(n_evap, iblk, evap(:,:,iblk), a2D) if (f_evap_ai(1:1) /= 'x') & - call accum_hist_field(n_evap_ai,iblk, evap(:,:,iblk)*workb(:,:), a2D) + call accum_hist_field(n_evap_ai,iblk, evap_ai(:,:,iblk), a2D) if (f_evap_ice_ai(1:1) /= 'x') & - call accum_hist_field(n_evap_ice_ai,iblk, evap_ice(:,:,iblk)*workb(:,:), a2D) + call accum_hist_field(n_evap_ice_ai,iblk, evap_ice_ai(:,:,iblk), a2D) if (f_evap_snow_ai(1:1) /= 'x') & - call accum_hist_field(n_evap_snow_ai,iblk, evap_snow(:,:,iblk)*workb(:,:), a2D) + call accum_hist_field(n_evap_snow_ai,iblk, evap_snow_ai(:,:,iblk), a2D) if (f_Tair (1:1) /= 'x') & call accum_hist_field(n_Tair, iblk, Tair(:,:,iblk), a2D) if (f_Tref (1:1) /= 'x') & - call accum_hist_field(n_Tref, iblk, & - Tref(:,:,iblk)*workb(:,:), a2D) + call accum_hist_field(n_Tref, iblk, Tref(:,:,iblk)*workb(:,:), a2D) if (f_Qref (1:1) /= 'x') & - call accum_hist_field(n_Qref, iblk, & - Qref(:,:,iblk)*workb(:,:), a2D) + call accum_hist_field(n_Qref, iblk, Qref(:,:,iblk)*workb(:,:), a2D) if (f_congel (1:1) /= 'x') & call accum_hist_field(n_congel, iblk, congel(:,:,iblk), a2D) if (f_frazil (1:1) /= 'x') & @@ -1845,7 +2067,8 @@ subroutine accum_hist (dt) call accum_hist_field(n_dsnow, iblk, dsnow(:,:,iblk), a2D) if (f_meltt (1:1) /= 'x') & call accum_hist_field(n_meltt, iblk, meltt(:,:,iblk), a2D) - if (f_melts (1:1) /= 'x') & + if (f_melts (1:1) /= 'x') & + ! is this actually melts_ai, its a grid cell average (https://github.com/ACCESS-NRI/cice5/blob/3f0f38141cf5f87ac9b6f7b401f10b4b5fc15218/source/ice_flux.F90#L857-L862) call accum_hist_field(n_melts, iblk, melts(:,:,iblk), a2D) if (f_meltb (1:1) /= 'x') & call accum_hist_field(n_meltb, iblk, meltb(:,:,iblk), a2D) @@ -1869,7 +2092,7 @@ subroutine accum_hist (dt) call accum_hist_field(n_fswthru, iblk, fswthru(:,:,iblk), a2D) if (f_fswthru_ai(1:1)/= 'x') & call accum_hist_field(n_fswthru_ai,iblk, fswthru_ai(:,:,iblk), a2D) - + if (f_strairx(1:1) /= 'x') & call accum_hist_field(n_strairx, iblk, strairx(:,:,iblk), a2D) if (f_strairy(1:1) /= 'x') & @@ -1926,12 +2149,12 @@ subroutine accum_hist (dt) call accum_hist_field(n_dagedtd, iblk, dagedtd(:,:,iblk), a2D) if (f_fsurf_ai(1:1)/= 'x') & - call accum_hist_field(n_fsurf_ai,iblk, fsurf(:,:,iblk)*workb(:,:), a2D) + call accum_hist_field(n_fsurf_ai,iblk, fsurf_ai(:,:,iblk), a2D) if (f_fcondtop_ai(1:1)/= 'x') & call accum_hist_field(n_fcondtop_ai, iblk, & - fcondtop(:,:,iblk)*workb(:,:), a2D) + fcondtop_ai(:,:,iblk), a2D) - if (f_icepresent(1:1) /= 'x') then + if (f_icepresent(1:1) /= 'x' .or. f_sitimefrac(1:1) /= 'x') then worka(:,:) = c0 area_threshold = max(a_min,aicenmin) do j = jlo, jhi @@ -1944,90 +2167,114 @@ subroutine accum_hist (dt) !2D CMIP6 fields - if (f_sithick(1:1) /= 'x') then + ! for "extensive" vars, simply accumulate grid box mean values + ! for "intensive" vars, either: + ! - for grid box means, weight grid box means by aice (again) + ! - for ice area means, use grid box mean (to give the effect of ice area mean weighted by aice) + ! - for non spatial values (e.g. age), -> weight by aice + ! as intensive vars are divided by the sum of aice over time when written to file + + if (f_sivol(1:1) /= 'x') & + call accum_hist_field(n_sivol, iblk, vice(:,:,iblk), a2D) + + if (f_siconc (1:1) /= 'x') & + call accum_hist_field(n_siconc, iblk, aice(:,:,iblk), a2D) + + if (f_sithick(1:1) /= 'x') & + ! intensive - ice area mean -> use vice (grid box mean) + call accum_hist_field(n_sithick, iblk, vice(:,:,iblk), a2D) + + if (f_simass(1:1) /= 'x') & + ! extensive -> use vice (grid box mean) + ! converted to mass when writing ( *rhoi in define_hist_field ) + call accum_hist_field(n_simass, iblk, vice(:,:,iblk), a2D) + + if (f_siage(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) worka(i,j) = vice(i,j,iblk) + if (aice(i,j,iblk) > puny) & + ! intensive, weight by aice + worka(i,j) = aice(i,j,iblk)*trcr(i,j,nt_iage,iblk) enddo enddo - call accum_hist_field(n_sithick, iblk, worka(:,:), a2D) + call accum_hist_field(n_siage, iblk, worka(:,:), a2D) endif - if (f_siage(1:1) /= 'x') then + if (f_sisnconc(1:1) /= 'x') & + ! intensive + ice area mean -> use snowfrac (grid box mean) + call accum_hist_field(n_sisnconc, iblk, snowfrac(:,:,iblk), a2D) + + if (f_sisnthick(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) worka(i,j) = aice(i,j,iblk)*trcr(i,j,nt_iage,iblk) + do i = ilo, ihi + if (snowfrac(i,j,iblk) > puny) then + ! intensive + ice area mean -> calculate grid box mean + worka(i,j) = vsno(i,j,iblk) / snowfrac(i,j,iblk) + endif enddo enddo - call accum_hist_field(n_siage, iblk, worka(:,:), a2D) + call accum_hist_field(n_sisnthick, iblk, worka(:,:), a2D) endif + if (f_sisnmass(1:1) /= 'x') & + ! converted to mass when writing ( *rhos in define_hist_field ) + call accum_hist_field(n_sisnmass, iblk, vsno(:,:,iblk), a2D) - if (f_sisnconc(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) worka(i,j) = snowfrac(i,j,iblk) - enddo - enddo - call accum_hist_field(n_sisnconc, iblk, worka(:,:), a2D) - endif - - if (f_sisnthick(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny .and. snowfrac(i,j,iblk) > puny) & - worka(i,j) = vsno(i,j,iblk) - enddo - enddo - call accum_hist_field(n_sisnthick, iblk, worka(:,:), a2D) - endif - + if (f_sisnmass_intensive(1:1) /= 'x') & + ! intensive + grid box mean -> weight by aice again + ! converted to mass when writing ( *rhos in define_hist_field ) + call accum_hist_field(n_sisnmass_intensive, iblk, aice(:,:,iblk)*vsno(:,:,iblk) , a2D) if (f_sitemptop(1:1) /= 'x') then - worka(:,:) = c0 + worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi - worka(i,j) = aice(i,j,iblk)*trcr(i,j,nt_Tsfc,iblk) + if (aice(i,j,iblk) > puny) then + ! Tsfc is a tracer, so was advected during dynamics + ! intensive + ice area mean -> weight by aice + worka(i,j) = aice(i,j,iblk) * trcr(i,j,nt_Tsfc,iblk) + endif enddo enddo call accum_hist_field(n_sitemptop, iblk, worka(:,:), a2D) endif - if (f_sitempsnic(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi - do i = ilo, ihi - if (vsno(i,j,iblk) > puny .and. aice_init(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*Tsnic(i,j,iblk)/aice_init(i,j,iblk) - else - worka(i,j) = aice(i,j,iblk)*trcr(i,j,nt_Tsfc,iblk) - endif + do i = ilo, ihi + if (aice(i,j,iblk) > puny) & + ! nb Tsnice is approximate, not a tracer + ! intensive + ice area mean -> weight by aice + ! (we don't save Tsnice_ai as aice changes between calculating Tsnice and writing diagnostics) + worka(i,j) = aice(i,j,iblk)*Tsnice(i,j,iblk) + enddo enddo - enddo call accum_hist_field(n_sitempsnic, iblk, worka(:,:), a2D) endif - if (f_sitempbot(1:1) /= 'x') then + if (f_sitempbot(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice_init(i,j,iblk) > puny) & - worka(i,j) = aice(i,j,iblk)*(Ti_bot(i,j,iblk)+Tffresh) - enddo - enddo - call accum_hist_field(n_sitempbot, iblk, worka(:,:), a2D) + if (aice(i,j,iblk) > puny) & + ! nb Ti_bot is approximate, not a tracer + ! intensive + ice area mean -> weight by aice + worka(i,j) = aice(i,j,iblk)*Ti_bot(i,j,iblk) + enddo + enddo + call accum_hist_field(n_sitempbot, iblk, worka(:,:), a2D) endif if (f_siu(1:1) /= 'x') then - worka(:,:) = c0 + worka(:,:) = c0 do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) worka(i,j) = aice(i,j,iblk)*uvel(i,j,iblk) + do i = ilo, ihi + ! intensive -> weight by aice + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*uvel(i,j,iblk) enddo enddo call accum_hist_field(n_siu, iblk, worka(:,:), a2D) @@ -2036,60 +2283,69 @@ subroutine accum_hist (dt) if (f_siv(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) worka(i,j) = aice(i,j,iblk)*vvel(i,j,iblk) + do i = ilo, ihi + ! intensive -> weight by aice + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk)*vvel(i,j,iblk) enddo enddo call accum_hist_field(n_siv, iblk, worka(:,:), a2D) endif - - if (f_sispeed(1:1) /= 'x') then - worka(:,:) = c0 + if (f_sispeed(1:1) /= 'x') then + worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) worka(i,j) = aice(i,j,iblk) & - * sqrt(uvel(i,j,iblk)*uvel(i,j,iblk)+vvel(i,j,iblk)*vvel(i,j,iblk)) + ! intensive -> weight by aice + if (aice(i,j,iblk) > puny) & + worka(i,j) = aice(i,j,iblk) & + * sqrt(uvel(i,j,iblk)*uvel(i,j,iblk) & + +vvel(i,j,iblk)*vvel(i,j,iblk)) enddo - enddo - call accum_hist_field(n_sispeed, iblk, worka(:,:), a2D) + enddo + call accum_hist_field(n_sispeed, iblk, worka(:,:), a2D) endif - if (f_sidmasstranx(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) & - worka(i,j) = (rhoi*0.5*(vice(i+1,j,iblk)+vice(i,j,iblk))*HTE(i,j,iblk) & - + rhos*0.5*(vsno(i+1,j,iblk)+vsno(i,j,iblk))*HTE(i,j,iblk)) & - * 0.5*(uvel(i,j-1,iblk)+uvel(i,j,iblk)) + if (aice(i,j,iblk) > puny) & + ! extensive, vice and vsno are grid cell averages + worka(i,j) = p25*HTE(i,j,iblk)*( & + rhoi*(vice(i,j,iblk)+vice(i+1,j,iblk)) & + + rhos*(vsno(i,j,iblk)+vsno(i+1,j,iblk)) & + ) * (uvel(i,j-1,iblk)+uvel(i,j,iblk)) enddo - enddo - call accum_hist_field(n_sidmasstranx, iblk, worka(:,:), a2D) - endif + enddo + call accum_hist_field(n_sidmasstranx, iblk, worka(:,:), a2D) + endif - if (f_sidmasstrany(1:1) /= 'x') then + if (f_sidmasstrany(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) & - worka(i,j) = (rhoi*0.5*(vice(i,j+1,iblk)+vice(i,j,iblk))*HTN(i,j,iblk) & - + rhos*0.5*(vsno(i,j+1,iblk)+vsno(i,j,iblk))*HTN(i,j,iblk)) & - * 0.5*(vvel(i-1,j,iblk)+vvel(i,j,iblk)) + if (aice(i,j,iblk) > puny) & + ! extensive, vice and vsno are grid cell averages + worka(i,j) = p25*HTN(i,j,iblk)*( & + rhoi*(vice(i,j,iblk)+vice(i,j+1,iblk)) & + + rhos*(vsno(i,j,iblk)+vsno(i,j+1,iblk)) & + ) * (vvel(i-1,j,iblk)+vvel(i,j,iblk)) enddo - enddo - call accum_hist_field(n_sidmasstrany, iblk, worka(:,:), a2D) - endif + enddo + call accum_hist_field(n_sidmasstrany, iblk, worka(:,:), a2D) + endif if (f_sistrxdtop(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi + !to-do: scale by aice/aice_init as its calculated based on coupled state ? if (aice(i,j,iblk) > puny) & + ! intensive + grid box mean -> weight by aice again worka(i,j) = aice(i,j,iblk)*strairx(i,j,iblk) - enddo - enddo + enddo + enddo call accum_hist_field(n_sistrxdtop, iblk, worka(:,:), a2D) endif @@ -2097,7 +2353,9 @@ subroutine accum_hist (dt) worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi + !to-do: surface stress is from coupling, should use aice_init weighting if (aice(i,j,iblk) > puny) & + ! intensive + grid box mean -> weight by aice again worka(i,j) = aice(i,j,iblk)*strairy(i,j,iblk) enddo enddo @@ -2109,6 +2367,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & + ! intensive + grid box mean -> weight by aice again worka(i,j) = aice(i,j,iblk)*strocnx(i,j,iblk) enddo enddo @@ -2120,6 +2379,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & + ! intensive + grid box mean -> weight by aice again worka(i,j) = aice(i,j,iblk)*strocny(i,j,iblk) enddo enddo @@ -2131,6 +2391,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & + ! intensive + grid box mean -> weight by aice again worka(i,j) = aice(i,j,iblk)*strtltx(i,j,iblk) enddo enddo @@ -2142,6 +2403,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & + ! intensive + grid box mean -> weight by aice again worka(i,j) = aice(i,j,iblk)*strtlty(i,j,iblk) enddo enddo @@ -2164,6 +2426,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & + ! intensive + grid box mean -> weight by aice again worka(i,j) = -aice(i,j,iblk)*fm(i,j,iblk)*uvel(i,j,iblk) enddo enddo @@ -2175,6 +2438,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & + ! intensive + grid box mean -> weight by aice again worka(i,j) = aice(i,j,iblk)*strintx(i,j,iblk) enddo enddo @@ -2186,17 +2450,19 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & + ! intensive + grid box mean -> weight by aice again worka(i,j) = aice(i,j,iblk)*strinty(i,j,iblk) enddo enddo call accum_hist_field(n_siforceintstry, iblk, worka(:,:), a2D) endif - if (f_sicompstren(1:1) /= 'x') then + if (f_sicompstren(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & + ! intensive + grid box mean -> weight by aice again worka(i,j) = aice(i,j,iblk)*strength(i,j,iblk) enddo enddo @@ -2208,6 +2474,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & + ! intensive, weight by aice worka(i,j) = aice(i,j,iblk)*divu(i,j,iblk) enddo enddo @@ -2218,6 +2485,7 @@ subroutine accum_hist (dt) worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi + ! to-do: weight by aice_init directly if (fsw(i,j,iblk) > puny .and. aice_init(i,j,iblk) > puny) then worka(i,j) = aice(i,j,iblk)*(fsw(i,j,iblk)-fswabs(i,j,iblk) & * aice(i,j,iblk)/aice_init(i,j,iblk)) & @@ -2233,7 +2501,9 @@ subroutine accum_hist (dt) do k = 1,nilyr do j = jlo, jhi do i = ilo, ihi - worka(i,j) = worka(i,j) + trcr(i,j,nt_qice+k-1,iblk)*vice(i,j,iblk)/real(nilyr,kind=dbl_kind) + ! extensive, vice is grid cell average + worka(i,j) = worka(i,j) & + + trcr(i,j,nt_qice+k-1,iblk)*vice(i,j,iblk)/real(nilyr,kind=dbl_kind) enddo enddo enddo @@ -2245,170 +2515,100 @@ subroutine accum_hist (dt) do k = 1,nslyr do j = jlo, jhi do i = ilo, ihi - worka(i,j) = worka(i,j) + trcr(i,j,nt_qsno+k-1,iblk)*vsno(i,j,iblk)/real(nslyr,kind=dbl_kind) + ! extensive, vsno is grid cell average + worka(i,j) = worka(i,j) + & + trcr(i,j,nt_qsno+k-1,iblk)*vsno(i,j,iblk)/real(nslyr,kind=dbl_kind) enddo enddo enddo call accum_hist_field(n_sisnhc, iblk, worka(:,:), a2D) endif - if (f_sidconcth(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = daidtt(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sidconcth, iblk, worka(:,:), a2D) - endif + if (f_sidconcth(1:1) /= 'x') & + call accum_hist_field(n_sidconcth, iblk, daidtt(:,:,iblk), a2D) - if (f_sidconcdyn(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = daidtd(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sidconcdyn, iblk, worka(:,:), a2D) - endif + if (f_sidconcdyn(1:1) /= 'x') & + call accum_hist_field(n_sidconcdyn, iblk, daidtd(:,:,iblk), a2D) - if (f_sidmassth(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = dvidtt(i,j,iblk) * rhoi - endif - enddo - enddo - call accum_hist_field(n_sidmassth, iblk, worka(:,:), a2D) - endif + if (f_sidmassth(1:1) /= 'x') & + call accum_hist_field(n_sidmassth, iblk, dvidtt(:,:,iblk), a2D) ! *rhoi in define_hist_field - if (f_sidmassdyn(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = dvidtd(i,j,iblk) * rhoi - endif - enddo - enddo - call accum_hist_field(n_sidmassdyn, iblk, worka(:,:), a2D) - endif + if (f_sidmassdyn(1:1) /= 'x') & + call accum_hist_field(n_sidmassdyn, iblk, dvidtd(:,:,iblk), a2D) ! *rhoi in define_hist_field - if (f_sidmassgrowthwat(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice_init(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*frazil(i,j,iblk)*rhoi / aice_init(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sidmassgrowthwat, iblk, worka(:,:), a2D) - endif + if (f_sidmassgrowthwat(1:1) /= 'x') & + call accum_hist_field(n_sidmassgrowthwat, iblk, frazil(:,:,iblk), a2D) ! *rhoi/dt in define_hist_field - if (f_sidmassgrowthbot(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice_init(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*congel(i,j,iblk)*rhoi / aice_init(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sidmassgrowthbot, iblk, worka(:,:), a2D) - endif + if (f_sidmassgrowthbot(1:1) /= 'x') & + call accum_hist_field(n_sidmassgrowthbot, iblk, congel(:,:,iblk), a2D) ! *rhoi/dt in define_hist_field - if (f_sidmasssi(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice_init(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*snoice(i,j,iblk)*rhoi / aice_init(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sidmasssi, iblk, worka(:,:), a2D) - endif + if (f_sidmasssi(1:1) /= 'x' .or. f_sidmassgrowthsi(1:1) /= 'x') & + call accum_hist_field(n_sidmasssi, iblk, snoice(:,:,iblk), a2D) ! *rhoi/dt in define_hist_field - if (f_sidmassevapsubl(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*evap(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sidmassevapsubl, iblk, worka(:,:), a2D) + if (f_sisndmasssi(1:1) /= 'x') & + call accum_hist_field(n_sisndmasssi, iblk, snoice(:,:,iblk), a2D) ! *rhoi/dt in define_hist_field + + if (f_sisndmasssi_intensive(1:1) /= 'x') & + !To-do: calculate a seperate icesno diag for change in snow thickness in ice_therm_vertical ? + ! Its equivalent though, so fairly moot + ! intensive + grid box mean -> weight by aice again + call accum_hist_field(n_sisndmasssi_intensive, iblk, aice(:,:,iblk)*snoice(:,:,iblk), a2D) ! *rhoi/dt in define_hist_field + + if (f_sidmassevapsubl(1:1) /= 'x') & + ! extensive -> use grid cell average + call accum_hist_field(n_sidmassevapsubl, iblk, evap_ice_ai(:,:,iblk), a2D) + + if (f_sndmasssubl(1:1) /= 'x' .or. f_sisndmasssubl(1:1) /= 'x') & + call accum_hist_field(n_sisndmasssubl,iblk, evap_snow_ai(:,:,iblk), a2D) + + if (f_sisndmasssubl_intensive(1:1) /= 'x') then + ! intensive + grid box mean -> weight by aice again + call accum_hist_field(n_sisndmasssubl_intensive, iblk, aice(:,:,iblk)*evap_snow_ai(:,:,iblk), a2D) endif - if (f_sidmassmelttop(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice_init(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*meltt(i,j,iblk)*rhoi / aice_init(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sidmassmelttop, iblk, worka(:,:), a2D) - endif + if (f_sidmassmelttop(1:1) /= 'x') & + ! sidmassmelttop is extensive, meltt is grid cell average + call accum_hist_field(n_sidmassmelttop, iblk, meltt(:,:,iblk), a2D) ! *rhoi/dt in define_hist_field - if (f_sidmassmeltbot(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice_init(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*meltb(i,j,iblk)*rhoi / aice_init(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sidmassmeltbot, iblk, worka(:,:), a2D) - endif + if (f_sidmassmeltbot(1:1) /= 'x') & + ! sidmassmeltbot is extensive, meltb is grid cell average + call accum_hist_field(n_sidmassmeltbot, iblk, meltb(:,:,iblk), a2D) ! *rhoi/dt in define_hist_field - if (f_sidmasslat(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice_init(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*meltl(i,j,iblk)*rhoi / aice_init(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sidmasslat, iblk, worka(:,:), a2D) - endif + if (f_sidmasslat(1:1) /= 'x' .or. f_sidmassmeltlat(1:1) /= 'x') & + ! sidmassmeltlat is extensive, meltl is grid cell average + call accum_hist_field(n_sidmasslat, iblk, meltl(:,:,iblk), a2D) ! *rhoi/dt in define_hist_field - if (f_sndmasssnf(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk) * fsnow(i,j,iblk) * dt - endif - enddo - enddo - call accum_hist_field(n_sndmasssnf, iblk, worka(:,:), a2D) - endif + if (f_sndmasssnf(1:1) /= 'x' .or. f_sisndmasssnf(1:1) /= 'x') & + call accum_hist_field(n_sndmasssnf,iblk, fsnow(:,:,iblk)*aice_init(:,:,iblk), a2D) - if (f_sndmassmelt(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice_init(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*melts(i,j,iblk)*rhoi / aice_init(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sndmassmelt, iblk, worka(:,:), a2D) + if (f_sisndmasssnf_intensive(1:1) /= 'x') then + do j = jlo, jhi + do i = ilo, ihi + if (aice(i,j,iblk) > puny) then + ! intensive + grid box mean -> weight fsnow by aice twice (see f_snow_ai) + worka(i,j) = aice(i,j,iblk)*aice_init(i,j,iblk)*fsnow(i,j,iblk) + endif + enddo + enddo + call accum_hist_field(n_sisndmasssnf_intensive, iblk, worka(:,:), a2D) endif + if (f_sndmassmelt(1:1) /= 'x' .or. f_sisndmassmelt(1:1) /= 'x') & + call accum_hist_field(n_sndmassmelt, iblk, melts(:,:,iblk), a2D) + + if (f_sisndmassmelt_intensive(1:1) /= 'x') & + ! intensive + grid box mean -> weight by aice again + call accum_hist_field(n_sisndmassmelt_intensive, iblk, aice(:,:,iblk)*melts(:,:,iblk), a2D) ! *rhos/dt in define_hist_field + + if (f_sndmassdyn(1:1) /= 'x' .or. f_sisndmassdyn(1:1) /= 'x') & + call accum_hist_field(n_sndmassdyn, iblk, dvsdtd(:,:,iblk), a2D) ! rhos in define_hist_field + + if (f_sisndmassdyn_intensive(1:1) /= 'x') & + ! intensive + grid box mean -> weight by aice again + call accum_hist_field(n_sisndmassdyn_intensive, iblk, aice(:,:,iblk)*dvsdtd(:,:,iblk), a2D) ! rhos in define_hist_field + if (f_siflswdtop(1:1) /= 'x') then + ! intensive + ice area mean -> weight by aice worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi @@ -2421,6 +2621,7 @@ subroutine accum_hist (dt) endif if (f_siflswutop(1:1) /= 'x') then + ! intensive + ice area mean -> weight by aice worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi @@ -2433,23 +2634,16 @@ subroutine accum_hist (dt) call accum_hist_field(n_siflswutop, iblk, worka(:,:), a2D) endif - if (f_siflswdbot(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*fswthru(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_siflswdbot, iblk, worka(:,:), a2D) - endif + if (f_siflswdbot(1:1) /= 'x') & + ! intensive + ice area mean -> use weighted form + call accum_hist_field(n_siflswdbot, iblk, fswthru_ai(:,:,iblk), a2D) - if (f_sifllwdtop(1:1) /= 'x') then + if (f_sifllwdtop(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) then + ! intensive + ice area mean -> weight by aice worka(i,j) = aice(i,j,iblk)*flw(i,j,iblk) endif enddo @@ -2457,137 +2651,101 @@ subroutine accum_hist (dt) call accum_hist_field(n_sifllwdtop, iblk, worka(:,:), a2D) endif - if (f_sifllwutop(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*flwout(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sifllwutop, iblk, worka(:,:), a2D) - endif + if (f_sifllwutop(1:1) /= 'x') & + ! intensive + ice area mean -> use weighted form + call accum_hist_field(n_sifllwutop, iblk, flwout_ai(:,:,iblk), a2D) - if (f_siflsenstop(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*fsens(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_siflsenstop, iblk, worka(:,:), a2D) - endif + if (f_siflsenstop(1:1) /= 'x') & + ! intensive + ice area mean -> use weighted form + call accum_hist_field(n_siflsenstop, iblk, fsens_ai(:,:,iblk), a2D) - if (f_siflsensupbot(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*fhocn(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_siflsensupbot, iblk, worka(:,:), a2D) - endif + if (f_siflsensupbot(1:1) /= 'x' .or. f_siflsensbot(1:1) /= 'x') & + ! intensive + ice area mean -> use weighted form + call accum_hist_field(n_siflsensupbot, iblk, fhocn_ai(:,:,iblk), a2D) - if (f_sifllatstop(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*flat(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_sifllatstop, iblk, worka(:,:), a2D) - endif + if (f_sifllatstop(1:1) /= 'x') & + ! intensive + ice area mean -> use weighted form + call accum_hist_field(n_sifllatstop, iblk, flat_ai(:,:,iblk), a2D) - if (f_siflcondtop(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*fcondtop(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_siflcondtop, iblk, worka(:,:), a2D) - endif + if (f_siflcondtop(1:1) /= 'x') & + ! intensive + ice area mean -> use weighted form + call accum_hist_field(n_siflcondtop, iblk, fcondtop_ai(:,:,iblk), a2D) - if (f_siflcondbot(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice_init(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*fcondbot(i,j,iblk)/aice_init(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_siflcondbot, iblk, worka(:,:), a2D) - endif + if (f_siflcondbot(1:1) /= 'x') & + ! intensive + ice area mean -> use weighted form + call accum_hist_field(n_siflcondbot, iblk, fcondbot(:,:,iblk), a2D) if (f_sipr(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*frain(i,j,iblk) + ! intensive + grid box mean -> weight frain by aice twice (see f_frain_ai) + worka(i,j) = aice(i,j,iblk)*aice_init(i,j,iblk)*frain(i,j,iblk) endif enddo enddo call accum_hist_field(n_sipr, iblk, worka(:,:), a2D) endif - if (f_siflsaltbot(1:1) /= 'x') then + if (f_sifb(1:1) /= 'x') then worka(:,:) = c0 + rho_ice = rhoi + rho_ocn = rhow do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*fsalt(i,j,iblk) + !to-do : fix for mushy + if (ktherm == 2) then + call abort_ice("sifb not available when ktherm==2, set f_sifb = 'x' or fix in cice5 code") + ! ! rho_ocn = icepack_mushy_density_brine(sss(i,j,iblk)) + ! ! rho_ice = c0 + ! do k = 1, nzilyr + ! Tice = icepack_mushy_temperature_mush(trcr(i,j,nt_qice+k-1,iblk),trcr(i,j,nt_sice+k-1,iblk)) + ! Sbr = trcr(i,j,nt_sice+k-1,iblk) + ! phi = icepack_mushy_liquid_fraction(Tice,Sbr) + ! rhob = icepack_mushy_density_brine(Sbr) + ! rho_ice = rho_ice + min(phi*rhob+(c1-phi)*rhoi,rho_ocn) + ! enddo + ! rho_ice = rho_ice / real(nzilyr,kind=dbl_kind) + endif + worka(i,j) = ((rho_ocn-rho_ice)*vice(i,j,iblk) - rhos*vsno(i,j,iblk))/rho_ocn + ! if (worka(i,j) < c0) then + ! write(nu_diag,*) 'negative fb',rho_ocn,rho_ice,rhos + ! write(nu_diag,*) vice(i,j,iblk),vsno(i,j,iblk) + ! endif endif enddo enddo - call accum_hist_field(n_siflsaltbot, iblk, worka(:,:), a2D) + call accum_hist_field(n_sifb, iblk, worka(:,:), a2D) endif - if (f_sisaltmass(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = ice_ref_salinity * rhoi * vice(i,j,iblk) / c1000 - endif - enddo - enddo - call accum_hist_field(n_sisaltmass, iblk, worka(:,:), a2D) - endif + if (f_siflsaltbot(1:1) /= 'x') & + ! intensive + grid box mean -> weight by aice again + call accum_hist_field(n_siflsaltbot, iblk, aice(:,:,iblk)*fsalt_ai(:,:,iblk), a2D) - if (f_siflfwbot(1:1) /= 'x') then - worka(:,:) = c0 - do j = jlo, jhi - do i = ilo, ihi - if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*fresh(i,j,iblk) - endif - enddo - enddo - call accum_hist_field(n_siflfwbot, iblk, worka(:,:), a2D) - endif + if (f_sisaltmass(1:1) /= 'x') & + ! extensive -> grid box mean + ! *ice_ref_salinity*rhoi/c1000 in define_hist_field + call accum_hist_field(n_sisaltmass, iblk, vice(:,:,iblk), a2D) ! + if (f_siflfwbot(1:1) /= 'x') & + ! intensive + grid box mean -> weight by aice again + call accum_hist_field(n_siflfwbot, iblk, aice(:,:,iblk)*fresh_ai(:,:,iblk), a2D) - if (f_siflsaltbot(1:1) /= 'x') then + if (f_siflfwdrain(1:1) /= 'x') then worka(:,:) = c0 do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*fsalt(i,j,iblk) + ! to-do : drainage from meltpond + ! intensive + grid box mean -> weight by aice again + worka(i,j) = aice(i,j,iblk)*(melts(i,j,iblk) * rhos & + + meltt(i,j,iblk) * rhoi)/dt endif enddo enddo - call accum_hist_field(n_siflsaltbot, iblk, worka(:,:), a2D) + call accum_hist_field(n_siflfwdrain, iblk, worka(:,:), a2D) endif !3D category fields @@ -2601,11 +2759,11 @@ subroutine accum_hist (dt) if (f_vsnon (1:1) /= 'x') & call accum_hist_field(n_vsnon-n2D, iblk, ncat_hist, & vsnon(:,:,1:ncat_hist,iblk), a3Dc) - if (f_snowfracn(1:1) /= 'x') & + if (f_snowfracn(1:1) /= 'x') & call accum_hist_field(n_snowfracn-n2D, iblk, ncat_hist, & snowfracn(:,:,1:ncat_hist,iblk)*aicen(:,:,:,iblk), a3Dc) - if (f_Tn_top (1:1) /= 'x') & + if (f_Tn_top (1:1) /= 'x') & call accum_hist_field(n_Tn_top-n2D, iblk, ncat_hist, & Tn_top(:,:,1:ncat_hist,iblk), a3Dc) if (f_keffn_top (1:1) /= 'x') & @@ -2623,13 +2781,15 @@ subroutine accum_hist (dt) if (f_fsensn_ai (1:1) /= 'x') & call accum_hist_field(n_fsensn_ai-n2D, iblk, ncat_hist, & fsensn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) - ! Calculate surface heat flux that causes melt (calculated by the + ! Calculate surface heat flux that causes melt (calculated by the ! atmos in HadGEM3 so needed for checking purposes) if (f_fmelttn_ai (1:1) /= 'x') & call accum_hist_field(n_fmelttn_ai-n2D, iblk, ncat_hist, & max(fsurfn(:,:,1:ncat_hist,iblk) - fcondtopn(:,:,1:ncat_hist,iblk),c0) & *aicen_init(:,:,1:ncat_hist,iblk), a3Dc) - + if (f_siitdconc (1:1) /= 'x') & + call accum_hist_field(n_siitdconc-n2D, iblk, ncat_hist, & + aicen(:,:,1:ncat_hist,iblk), a3Dc) ! example for 3D field (x,y,z) ! if (f_field3dz (1:1) /= 'x') & ! call accum_hist_field(n_field3dz-n3Dccum, iblk, nzilyr, & @@ -2680,7 +2840,7 @@ subroutine accum_hist (dt) call accum_hist_field(n_Sinz-n3Dbcum, iblk, nzilyr, ncat_hist, & Sinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) endif - + if (f_Tsnz (1:1) /= 'x') then Tsnz4d(:,:,:,:) = c0 if (ktherm == 2) then @@ -2766,16 +2926,16 @@ subroutine accum_hist (dt) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & !$OMP n,nn,ravgctz,ravgip) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi + !to-do: figure out how to make sure n_aice actually exists do j = jlo, jhi do i = ilo, ihi - ! Alex West - enforce time mean ice area threshold based - ! on + ! Alex West - enforce time mean ice area threshold based on ! the maximum of aicenmin (thermodynamic min ice fraction) ! and a_min (dynamic min ice fraction) so that intensive ! variables are reported only where both dynamics and @@ -2789,696 +2949,78 @@ subroutine accum_hist (dt) enddo ! i enddo ! j - + if (n_aicen(ns) > n2D) then + do k=1,ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (a3Dc(i,j,k,n_aicen(ns)-n2D,iblk) > puny) then + ravgipn(i,j,k) = c1/(a3Dc(i,j,k,n_aicen(ns)-n2D,iblk)) + else + ravgipn(i,j,k) = c0 + endif + enddo ! i + enddo ! j + enddo ! k + endif do n = 1, num_avail_hist_fields_2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) then + ! Only average for timesteps when ice present + if (avail_hist_fields(n)%avg_ice_present) then + a2D(:,:,n,iblk) = a2D(:,:,n,iblk)*ravgip(:,:) + else + a2D(:,:,n,iblk) = a2D(:,:,n,iblk)*ravgct + endif + do j = jlo, jhi do i = ilo, ihi #ifdef AusCOM - if (n_uocn(ns)==n.or.n_vocn(ns)==n) then - if (.not. umask(i,j,iblk)) then ! mask out land points - a2D(i,j,n,iblk) = spval_dbl - else ! convert units - a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & - * ravgct + avail_hist_fields(n)%conb - endif - else - if (.not. tmask(i,j,iblk)) then ! mask out land points - a2D(i,j,n,iblk) = spval_dbl - else ! convert units - a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & - * ravgct + avail_hist_fields(n)%conb - endif - endif + if (n_uocn(ns)==n.or.n_vocn(ns)==n) then + if (.not. umask(i,j,iblk)) then ! mask out land points + a2D(i,j,n,iblk) = spval_dbl + else ! convert units + a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & + + avail_hist_fields(n)%conb + endif + else + if (.not. tmask(i,j,iblk)) then ! mask out land points + a2D(i,j,n,iblk) = spval_dbl + else ! convert units + a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & + + avail_hist_fields(n)%conb + endif + endif #else - if (.not. tmask(i,j,iblk)) then ! mask out land points - a2D(i,j,n,iblk) = spval_dbl - else ! convert units - a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & - * ravgct + avail_hist_fields(n)%conb - endif + if (.not. tmask(i,j,iblk)) then ! mask out land points + a2D(i,j,n,iblk) = spval_dbl + else ! convert units + a2D(i,j,n,iblk) = avail_hist_fields(n)%cona*a2D(i,j,n,iblk) & + + avail_hist_fields(n)%conb + endif #endif enddo ! i enddo ! j - ! Only average for timesteps when ice present - if (index(avail_hist_fields(n)%vname,'sithick') /= 0) then - if (f_sithick(1:1) /= 'x' .and. n_sithick(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sithick(ns),iblk) = & - a2D(i,j,n_sithick(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sithick(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sifb') /= 0) then - if (f_sifb(1:1) /= 'x' .and. n_sifb(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sifb(ns),iblk) = & - a2D(i,j,n_sifb(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sifb(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siage') /= 0) then - if (f_siage(1:1) /= 'x' .and. n_siage(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siage(ns),iblk) = & - a2D(i,j,n_siage(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siage(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - - if (index(avail_hist_fields(n)%vname,'sisnconc') /= 0) then - if (f_sisnconc(1:1) /= 'x' .and. n_sisnconc(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sisnconc(ns),iblk) = & - a2D(i,j,n_sisnconc(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sisnconc(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sisnthick') /= 0) then - if (f_sisnthick(1:1) /= 'x' .and. n_sisnthick(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sisnthick(ns),iblk) = & - a2D(i,j,n_sisnthick(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sisnthick(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sitemptop') /= 0) then - if (f_sitemptop(1:1) /= 'x' .and. n_sitemptop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sitemptop(ns),iblk) = & - a2D(i,j,n_sitemptop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sitemptop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sitempsnic') /= 0) then - if (f_sitempsnic(1:1) /= 'x' .and. n_sitempsnic(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sitempsnic(ns),iblk) = & - a2D(i,j,n_sitempsnic(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sitempsnic(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sitempbot') /= 0) then - if (f_sitempbot(1:1) /= 'x' .and. n_sitempbot(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sitempbot(ns),iblk) = & - a2D(i,j,n_sitempbot(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sitempbot(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siu') /= 0) then - if (f_siu(1:1) /= 'x' .and. n_siu(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siu(ns),iblk) = & - a2D(i,j,n_siu(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siu(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siv') /= 0) then - if (f_siv(1:1) /= 'x' .and. n_siv(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siv(ns),iblk) = & - a2D(i,j,n_siv(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siv(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sistrxdtop') /= 0) then - if (f_sistrxdtop(1:1) /= 'x' .and. n_sistrxdtop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sistrxdtop(ns),iblk) = & - a2D(i,j,n_sistrxdtop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sistrxdtop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sistrydtop') /= 0) then - if (f_sistrydtop(1:1) /= 'x' .and. n_sistrydtop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sistrydtop(ns),iblk) = & - a2D(i,j,n_sistrydtop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sistrydtop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sistrxubot') /= 0) then - if (f_sistrxubot(1:1) /= 'x' .and. n_sistrxubot(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sistrxubot(ns),iblk) = & - a2D(i,j,n_sistrxubot(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sistrxubot(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sistryubot') /= 0) then - if (f_sistryubot(1:1) /= 'x' .and. n_sistryubot(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sistryubot(ns),iblk) = & - a2D(i,j,n_sistryubot(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sistryubot(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siforcetiltx') /= 0) then - if (f_siforcetiltx(1:1) /= 'x' .and. n_siforcetiltx(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siforcetiltx(ns),iblk) = & - a2D(i,j,n_siforcetiltx(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siforcetiltx(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siforcetilty') /= 0) then - if (f_siforcetilty(1:1) /= 'x' .and. n_siforcetilty(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siforcetilty(ns),iblk) = & - a2D(i,j,n_siforcetilty(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siforcetilty(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siforcecoriolx') /= 0) then - if (f_siforcecoriolx(1:1) /= 'x' .and. n_siforcecoriolx(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siforcecoriolx(ns),iblk) = & - a2D(i,j,n_siforcecoriolx(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siforcecoriolx(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siforcecorioly') /= 0) then - if (f_siforcecorioly(1:1) /= 'x' .and. n_siforcecorioly(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siforcecorioly(ns),iblk) = & - a2D(i,j,n_siforcecorioly(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siforcecorioly(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siforceintstrx') /= 0) then - if (f_siforceintstrx(1:1) /= 'x' .and. n_siforceintstrx(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siforceintstrx(ns),iblk) = & - a2D(i,j,n_siforceintstrx(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siforceintstrx(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siforceintstry') /= 0) then - if (f_siforceintstry(1:1) /= 'x' .and. n_siforceintstry(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siforceintstry(ns),iblk) = & - a2D(i,j,n_siforceintstry(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siforceintstry(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sicompstren') /= 0) then - if (f_sicompstren(1:1) /= 'x' .and. n_sicompstren(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sicompstren(ns),iblk) = & - a2D(i,j,n_sicompstren(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sicompstren(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sidivvel') /= 0) then - if (f_sidivvel(1:1) /= 'x' .and. n_sidivvel(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sidivvel(ns),iblk) = & - a2D(i,j,n_sidivvel(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sidivvel(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sispeed') /= 0) then - if (f_sispeed(1:1) /= 'x' .and. n_sispeed(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sispeed(ns),iblk) = & - a2D(i,j,n_sispeed(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sispeed(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif + ! Mask ice-free points + if (avail_hist_fields(n)%mask_ice_free_points) then + where(ravgip(:,:) == c0) a2D(:,:,n,iblk) = spval_dbl endif + ! CMIP albedo: also mask points below horizon if (index(avail_hist_fields(n)%vname,'sialb') /= 0) then - if (f_sialb(1:1) /= 'x' .and. n_sialb(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sialb(ns),iblk) = & - a2D(i,j,n_sialb(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sialb(ns),iblk) = spval_dbl - if (albcnt(i,j,iblk,ns) <= puny) a2D(i,j,n_sialb(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sidmassgrowthwat') /= 0) then - if (f_sidmassgrowthwat(1:1) /= 'x' .and. n_sidmassgrowthwat(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sidmassgrowthwat(ns),iblk) = & - a2D(i,j,n_sidmassgrowthwat(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sidmassgrowthwat(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sidmassgrowthbot') /= 0) then - if (f_sidmassgrowthbot(1:1) /= 'x' .and. n_sidmassgrowthbot(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sidmassgrowthbot(ns),iblk) = & - a2D(i,j,n_sidmassgrowthbot(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sidmassgrowthbot(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sidmasssi') /= 0) then - if (f_sidmasssi(1:1) /= 'x' .and. n_sidmasssi(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sidmasssi(ns),iblk) = & - a2D(i,j,n_sidmasssi(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sidmasssi(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sidmassevapsubl') /= 0) then - if (f_sidmassevapsubl(1:1) /= 'x' .and. n_sidmassevapsubl(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sidmassevapsubl(ns),iblk) = & - a2D(i,j,n_sidmassevapsubl(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sidmassevapsubl(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sidmassmelttop') /= 0) then - if (f_sidmassmelttop(1:1) /= 'x' .and. n_sidmassmelttop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sidmassmelttop(ns),iblk) = & - a2D(i,j,n_sidmassmelttop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sidmassmelttop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sidmassmeltbot') /= 0) then - if (f_sidmassmeltbot(1:1) /= 'x' .and. n_sidmassmeltbot(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sidmassmeltbot(ns),iblk) = & - a2D(i,j,n_sidmassmeltbot(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sidmassmeltbot(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sidmasslat') /= 0) then - if (f_sidmasslat(1:1) /= 'x' .and. n_sidmasslat(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sidmasslat(ns),iblk) = & - a2D(i,j,n_sidmasslat(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sidmasslat(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - - - - if (index(avail_hist_fields(n)%vname,'sndmasssnf') /= 0) then - if (f_sndmasssnf(1:1) /= 'x' .and. n_sndmasssnf(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sndmasssnf(ns),iblk) = & - a2D(i,j,n_sndmasssnf(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sndmasssnf(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sndmassmelt') /= 0) then - if (f_sndmassmelt(1:1) /= 'x' .and. n_sndmassmelt(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sndmassmelt(ns),iblk) = & - a2D(i,j,n_sndmassmelt(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sndmassmelt(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siflswdtop') /= 0) then - if (f_siflswdtop(1:1) /= 'x' .and. n_siflswdtop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siflswdtop(ns),iblk) = & - a2D(i,j,n_siflswdtop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siflswdtop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siflswutop') /= 0) then - if (f_siflswutop(1:1) /= 'x' .and. n_siflswutop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siflswutop(ns),iblk) = & - a2D(i,j,n_siflswutop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siflswutop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if(index(avail_hist_fields(n)%vname,'siflswdbot') /= 0) then - if (f_siflswdbot(1:1) /= 'x' .and. n_siflswdbot(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siflswdbot(ns),iblk) = & - a2D(i,j,n_siflswdbot(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siflswdbot(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sifllwdtop') /= 0) then - if (f_sifllwdtop(1:1) /= 'x' .and. n_sifllwdtop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sifllwdtop(ns),iblk) = & - a2D(i,j,n_sifllwdtop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sifllwdtop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sifllwutop') /= 0) then - if (f_sifllwutop(1:1) /= 'x' .and. n_sifllwutop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sifllwutop(ns),iblk) = & - a2D(i,j,n_sifllwutop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sifllwutop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siflsenstop') /= 0) then - if (f_siflsenstop(1:1) /= 'x' .and. n_siflsenstop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siflsenstop(ns),iblk) = & - a2D(i,j,n_siflsenstop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siflsenstop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siflsensupbot') /= 0) then - if (f_siflsensupbot(1:1) /= 'x' .and. n_siflsensupbot(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siflsensupbot(ns),iblk) = & - a2D(i,j,n_siflsensupbot(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siflsensupbot(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sifllatstop') /= 0) then - if (f_sifllatstop(1:1) /= 'x' .and. n_sifllatstop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sifllatstop(ns),iblk) = & - a2D(i,j,n_sifllatstop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sifllatstop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'sipr') /= 0) then - if (f_sipr(1:1) /= 'x' .and. n_sipr(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_sipr(ns),iblk) = & - a2D(i,j,n_sipr(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_sipr(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siflcondtop') /= 0) then - if (f_siflcondtop(1:1) /= 'x' .and. n_siflcondtop(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siflcondtop(ns),iblk) = & - a2D(i,j,n_siflcondtop(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siflcondtop(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siflcondbot') /= 0) then - if (f_siflcondbot(1:1) /= 'x' .and. n_siflcondbot(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siflcondbot(ns),iblk) = & - a2D(i,j,n_siflcondbot(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siflcondbot(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siflsaltbot') /= 0) then - if (f_siflsaltbot(1:1) /= 'x' .and. n_siflsaltbot(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siflsaltbot(ns),iblk) = & - a2D(i,j,n_siflsaltbot(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siflsaltbot(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - - if (index(avail_hist_fields(n)%vname,'siflfwbot') /= 0) then - if (f_siflfwbot(1:1) /= 'x' .and. n_siflfwbot(ns) /= 0) then - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j,iblk)) then - a2D(i,j,n_siflfwbot(ns),iblk) = & - a2D(i,j,n_siflfwbot(ns),iblk)*avgct(ns)*ravgip(i,j) - if (ravgip(i,j) == c0) a2D(i,j,n_siflfwbot(ns),iblk) = spval_dbl - endif - enddo ! i - enddo ! j - endif - endif - + do j = jlo, jhi + do i = ilo, ihi + if (albcnt(i,j,iblk,ns) <= puny) a2D(i,j,n,iblk) = spval_dbl + enddo ! i + enddo ! j + endif - !back out albedo/zenith angle dependence + ! back out albedo/zenith angle dependence if (avail_hist_fields(n)%vname(1:6) == 'albice') then do j = jlo, jhi do i = ilo, ihi - if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk)) then ravgctz = c0 if (albcnt(i,j,iblk,ns) > puny) & ravgctz = c1/albcnt(i,j,iblk,ns) @@ -3498,7 +3040,7 @@ subroutine accum_hist (dt) if (avail_hist_fields(n)%vname(1:6) == 'albsni') then do j = jlo, jhi do i = ilo, ihi - if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk)) then ravgctz = c0 if (albcnt(i,j,iblk,ns) > puny) & ravgctz = c1/albcnt(i,j,iblk,ns) @@ -3509,7 +3051,6 @@ subroutine accum_hist (dt) enddo ! i enddo ! j endif - if (avail_hist_fields(n)%vname(1:8) == 'alvdr_ai') then do j = jlo, jhi do i = ilo, ihi @@ -3539,20 +3080,33 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_3Dc nn = n2D + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then - do k = 1, ncat_hist - do j = jlo, jhi - do i = ilo, ihi - if (.not. tmask(i,j,iblk)) then ! mask out land points - a3Dc(i,j,k,n,iblk) = spval_dbl - else ! convert units - a3Dc(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Dc(i,j,k,n,iblk) & - * ravgct + avail_hist_fields(nn)%conb - endif - enddo ! i - enddo ! j - enddo ! k + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + + ! Only average for timesteps when ice present + if (avail_hist_fields(n)%avg_ice_present) then + a3Dc(:,:,:,n,iblk) = a3Dc(:,:,:,n,iblk)*ravgipn(:,:,:) + else + a3Dc(:,:,:,n,iblk) = a3Dc(:,:,:,n,iblk)*ravgct + endif + + do k = 1, ncat_hist + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then ! mask out land points + a3Dc(i,j,k,n,iblk) = spval_dbl + else ! convert units + a3Dc(i,j,k,n,iblk) = avail_hist_fields(nn)%cona*a3Dc(i,j,k,n,iblk) & + + avail_hist_fields(nn)%conb + endif + enddo ! i + enddo ! j + enddo ! k + + ! To-do: if (avail_hist_fields(n)%mask_ice_free_points) returns true, would + ! we mask by aice or aicen ? + endif + enddo ! n do n = 1, num_avail_hist_fields_3Dz @@ -3574,7 +3128,7 @@ subroutine accum_hist (dt) enddo ! n do n = 1, num_avail_hist_fields_3Db nn = n3Dzcum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzblyr do j = jlo, jhi do i = ilo, ihi @@ -3592,7 +3146,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_4Di nn = n3Dbcum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzilyr do ic = 1, ncat_hist do j = jlo, jhi @@ -3612,7 +3166,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_4Ds nn = n4Dicum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzslyr do ic = 1, ncat_hist do j = jlo, jhi @@ -3631,7 +3185,7 @@ subroutine accum_hist (dt) enddo ! n do n = 1, num_avail_hist_fields_4Db nn = n4Dscum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzblyr do ic = 1, ncat_hist do j = jlo, jhi @@ -3654,7 +3208,7 @@ subroutine accum_hist (dt) !--------------------------------------------------------------- ! compute sig1 and sig2 - + call principal_stress (nx_block, ny_block, & stressp_1 (:,:,iblk), & stressm_1 (:,:,iblk), & diff --git a/source/ice_history_shared.F90 b/source/ice_history_shared.F90 index 9773aeb2..64cb4cce 100755 --- a/source/ice_history_shared.F90 +++ b/source/ice_history_shared.F90 @@ -73,16 +73,18 @@ module ice_history_shared !--------------------------------------------------------------- type, public :: ice_hist_field - character (len=16) :: vname ! variable name + character (len=32) :: vname ! variable name character (len=16) :: vunit ! variable units character (len=25) :: vcoord ! variable coordinates - character (len=16) :: vcellmeas ! variable cell measures - character (len=55) :: vdesc ! variable description - character (len=55) :: vcomment ! variable description + character (len=32) :: vcellmeas ! variable cell measures + character (len=255) :: vdesc ! variable description + character (len=255) :: vcomment ! variable description real (kind=dbl_kind) :: cona ! multiplicative conversion factor real (kind=dbl_kind) :: conb ! additive conversion factor character (len=1) :: vhistfreq ! frequency of history output integer (kind=int_kind) :: vhistfreq_n ! number of vhistfreq intervals + logical (kind=log_kind) :: avg_ice_present ! only average where ice is present + logical (kind=log_kind) :: mask_ice_free_points ! mask ice-free points end type integer (kind=int_kind), parameter, public :: & @@ -194,9 +196,11 @@ module ice_history_shared character (len=max_nstrm), public :: & ! f_example = 'md', & - f_hi = 'm', f_hs = 'm', & + f_hi = 'm', f_sivol = 'x', & + f_hs = 'm', & f_snowfrac = 'x', f_snowfracn = 'x', & - f_Tsfc = 'm', f_aice = 'm', & + f_Tsfc = 'm', & + f_aice = 'm', f_siconc = 'x' , & f_uvel = 'm', f_vvel = 'm', & f_uatm = 'm', f_vatm = 'm', & f_fswdn = 'm', f_flwdn = 'm', & @@ -246,6 +250,7 @@ module ice_history_shared f_iage = 'm', f_FY = 'm', & f_hisnap = 'm', f_aisnap = 'm', & f_sithick = 'x', f_sisnthick = 'x', & + f_simass = 'x', f_sisnmass = 'x', f_sisnmass_intensive = 'x',& f_sisnconc = 'x', f_siage = 'x', & f_sitemptop = 'x', f_sitempsnic = 'x', & f_sitempbot = 'x', f_sispeed = 'x', & @@ -264,13 +269,16 @@ module ice_history_shared f_sidmassth = 'x', f_sidmassdyn = 'x', & f_sidmassgrowthwat = 'x', & f_sidmassgrowthbot = 'x', & - f_sidmasssi = 'x', & + f_sidmasssi = 'x', f_sidmassgrowthsi = 'x', & f_sidmassevapsubl = 'x', & f_sidmassmelttop = 'x', & f_sidmassmeltbot = 'x', & - f_sidmasslat = 'x', & - f_sndmasssnf = 'x', & - f_sndmassmelt = 'x', & + f_sidmasslat = 'x', f_sidmassmeltlat = 'x', & + f_sndmasssnf = 'x', f_sisndmasssnf = 'x', f_sisndmasssnf_intensive = 'x', & + f_sndmassmelt = 'x', f_sisndmassmelt = 'x', f_sisndmassmelt_intensive = 'x', & + f_sndmassdyn = 'x', f_sisndmassdyn = 'x', f_sisndmassdyn_intensive = 'x', & + f_sisndmasssi = 'x', f_sisndmasssi_intensive = 'x', & + f_sndmasssubl = 'x', f_sisndmasssubl = 'x', f_sisndmasssubl_intensive = 'x', & f_sidivvel = 'x', & f_siflswdtop = 'x', & f_siflswutop = 'x', & @@ -278,24 +286,27 @@ module ice_history_shared f_sifllwdtop = 'x', & f_sifllwutop = 'x', & f_siflsenstop = 'x', & - f_siflsensupbot = 'x', & + f_siflsensupbot = 'x', f_siflsensbot = 'x', & f_sifllatstop = 'x', & f_siflcondtop = 'x', & f_siflcondbot = 'x', & f_sipr = 'x', & f_siflsaltbot = 'x', & f_siflfwbot = 'x', & + f_siflfwdrain = 'x', & f_sisaltmass = 'x', & - f_aicen = 'x', f_vicen = 'x', & - f_vsnon = 'x', & - f_trsig = 'm', f_icepresent = 'm', & + f_aicen = 'x' , f_siitdconc = 'x', & + f_vicen = 'x', & + f_vsnon = 'x', & + f_trsig = 'm', & + f_icepresent = 'm', f_sitimefrac = 'x',& f_fsurf_ai = 'm', f_fcondtop_ai= 'm', & f_fmeltt_ai = 'm', & f_fsurfn_ai = 'x' ,f_fcondtopn_ai='x', & f_fmelttn_ai= 'x', f_flatn_ai = 'x', & f_fsensn_ai = 'x', & ! f_field3dz = 'x', & - f_Tn_top = 'm', f_keffn_top = 'm', & + f_Tn_top = 'x', f_keffn_top = 'x', & f_Tinz = 'x', f_Sinz = 'x', & f_Tsnz = 'x', & f_a11 = 'x', f_a12 = 'x', & @@ -322,9 +333,11 @@ module ice_history_shared f_VGRDi , f_VGRDs , & f_VGRDb , & ! f_example , & - f_hi, f_hs , & + f_hi , f_sivol , & + f_hs , & f_snowfrac, f_snowfracn, & - f_Tsfc, f_aice , & + f_Tsfc , & + f_aice , f_siconc , & f_uvel, f_vvel , & f_uatm, f_vatm , & f_fswdn, f_flwdn , & @@ -374,6 +387,7 @@ module ice_history_shared f_iage, f_FY , & f_hisnap, f_aisnap , & f_sithick, f_sisnthick, & + f_simass, f_sisnmass, f_sisnmass_intensive, & f_sisnconc, f_siage, & f_sifb, & f_sitemptop, f_sitempsnic,& @@ -393,30 +407,36 @@ module ice_history_shared f_sidmassth, f_sidmassdyn,& f_sidmassgrowthwat, & f_sidmassgrowthbot, & - f_sidmasssi, & + f_sidmasssi, f_sidmassgrowthsi , & f_sidmassevapsubl, & f_sidmassmelttop, & f_sidmassmeltbot, & - f_sidmasslat, & - f_sndmasssnf, & - f_sndmassmelt, & + f_sidmasslat, f_sidmassmeltlat,& + f_sndmasssubl, f_sisndmasssubl, f_sisndmasssubl_intensive, & + f_sndmasssnf, f_sisndmasssnf, f_sisndmasssnf_intensive, & + f_sndmassmelt, f_sisndmassmelt, f_sisndmassmelt_intensive, & + f_sndmassdyn, f_sisndmassdyn, f_sisndmassdyn_intensive, & + f_sisndmasssi, f_sisndmasssi_intensive, & f_siflswdtop, & f_siflswutop, & f_siflswdbot, & f_sifllwdtop, & f_sifllwutop, & f_siflsenstop, & - f_siflsensupbot, & + f_siflsensupbot, f_siflsensbot, & f_sifllatstop, & f_siflcondtop, & f_siflcondbot, & f_sipr, & f_siflsaltbot, & f_siflfwbot, & + f_siflfwdrain, & f_sisaltmass, & - f_aicen, f_vicen , & - f_vsnon, & - f_trsig, f_icepresent,& + f_aicen, f_siitdconc, & + f_vicen, & + f_vsnon, & + f_trsig, & + f_icepresent, f_sitimefrac,& !same var, two names f_fsurf_ai, f_fcondtop_ai,& f_fmeltt_ai, & f_fsurfn_ai,f_fcondtopn_ai,& @@ -502,6 +522,7 @@ module ice_history_shared n_fsalt , n_fsalt_ai , & n_sidivvel, & n_sithick , n_sisnthick , & + n_simass , n_sisnmass, n_sisnmass_intensive, & n_sisnconc, n_siage, & n_sifb, & n_sitemptop , n_sitempsnic , & @@ -516,17 +537,27 @@ module ice_history_shared n_sicompstren, & n_sialb, & n_sihc , n_sisnhc, & + n_siconc, n_sivol, & n_sidconcth , n_sidconcdyn, & n_sidmassth , n_sidmassdyn, & n_sidmassgrowthwat, & n_sidmassgrowthbot, & n_sidmasssi, & + n_sidmasssubl, & n_sidmassevapsubl, & n_sidmassmelttop, & n_sidmassmeltbot, & - n_sidmasslat, & + n_sidmasslat, & n_sndmasssnf, & + n_sisndmasssnf_intensive, & n_sndmassmelt, & + n_sisndmassmelt_intensive, & + n_sndmassdyn, & + n_sisndmassdyn_intensive, & + n_sisndmasssi, & + n_sisndmasssi_intensive, & + n_sisndmasssubl, & + n_sisndmasssubl_intensive, & n_siflswdtop, & n_siflswutop, & n_siflswdbot, & @@ -540,7 +571,9 @@ module ice_history_shared n_sipr, & n_siflsaltbot, & n_siflfwbot, & + n_siflfwdrain, & n_sisaltmass, & + n_siitdconc, & n_vsnon, & n_fhocn , n_fhocn_ai , & n_fswthru , n_fswthru_ai , & @@ -706,7 +739,7 @@ end subroutine construct_filename subroutine define_hist_field(id, vname, vunit, vcoord, vcellmeas, & vdesc, vcomment, cona, conb, & - ns, vhistfreq) + ns, vhistfreq, avg_ice_present, mask_ice_free_points) use ice_calendar, only: histfreq, histfreq_n, nstreams use ice_domain_size, only: max_nstrm @@ -734,12 +767,28 @@ subroutine define_hist_field(id, vname, vunit, vcoord, vcellmeas, & integer (kind=int_kind), intent(in) :: & ns ! history file stream index + logical (kind=log_kind), optional, intent(in) :: & + avg_ice_present , & ! compute average only when ice is present + mask_ice_free_points ! mask ice-free points + integer (kind=int_kind) :: & ns1 , & ! variable stream loop index lenf ! length of namelist string character (len=40) :: stmp + logical (kind=log_kind) :: & + l_avg_ice_present , & ! compute average only when ice is present + l_mask_ice_free_points ! mask ice-free points + + character(len=*), parameter :: subname = '(define_hist_field)' + + l_avg_ice_present = .false. + l_mask_ice_free_points = .false. + + if(present(avg_ice_present)) l_avg_ice_present = avg_ice_present + if(present(mask_ice_free_points)) l_mask_ice_free_points = mask_ice_free_points + if (histfreq(ns) == 'x') then call abort_ice("define_hist_fields has histfreq x") endif @@ -750,6 +799,10 @@ subroutine define_hist_field(id, vname, vunit, vcoord, vcellmeas, & do ns1 = 1, lenf if (vhistfreq(ns1:ns1) == histfreq(ns)) then + if (ns1 > 1 .and. index(vhistfreq(1:ns1-1),'x') /= 0) then + call abort_ice(subname//' ERROR: history frequency variable f_' // vname // ' can''t contain ''x'' along with active frequencies') + endif + num_avail_hist_fields_tot = num_avail_hist_fields_tot + 1 if (vcoord(11:14) == 'time') then @@ -798,6 +851,8 @@ subroutine define_hist_field(id, vname, vunit, vcoord, vcellmeas, & avail_hist_fields(id(ns))%conb = conb avail_hist_fields(id(ns))%vhistfreq = vhistfreq(ns1:ns1) avail_hist_fields(id(ns))%vhistfreq_n = histfreq_n(ns) + avail_hist_fields(id(ns))%avg_ice_present = l_avg_ice_present + avail_hist_fields(id(ns))%mask_ice_free_points = l_mask_ice_free_points endif enddo @@ -825,7 +880,7 @@ subroutine accum_hist_field_2D(id, iblk, field_accum, field) integer (int_kind), dimension(max_nstrm), intent(in) :: & id ! location in avail_fields array for use in ! later routines - + integer (kind=int_kind), intent(in) :: iblk real (kind=dbl_kind), intent(in) :: & @@ -896,7 +951,7 @@ subroutine accum_hist_field_3D(id, iblk, ndim, field_accum, field) integer (int_kind), dimension(max_nstrm), intent(in) :: & id ! location in avail_fields array for use in ! later routines - + integer (kind=int_kind), intent(in) :: iblk integer (kind=int_kind), intent(in) :: & @@ -960,7 +1015,7 @@ subroutine accum_hist_field_4D(id, iblk, ndim3, ndim4, field_accum, field) integer (int_kind), dimension(max_nstrm), intent(in) :: & id ! location in avail_fields array for use in ! later routines - + integer (kind=int_kind), intent(in) :: iblk integer (kind=int_kind), intent(in) :: & diff --git a/source/ice_read_write.F90 b/source/ice_read_write.F90 index 8e973873..833bed90 100755 --- a/source/ice_read_write.F90 +++ b/source/ice_read_write.F90 @@ -1140,6 +1140,10 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & status = nf90_get_var( fid, varid, work_g1, & start=(/1,1,nrec/), & count=(/nx,ny,1/) ) + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_read_nc_xy: Cannot get variable '//trim(varname) ) + endif #else if (.not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & @@ -1318,6 +1322,10 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & status = nf90_get_var( fid, varid, work_g1, & start=(/1,1,1,nrec/), & count=(/nx,ny,ncat,1/) ) + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_read_nc_xyz: Cannot get variable '//trim(varname) ) + endif #else if (.not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & @@ -1553,6 +1561,11 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & start=(/1,nrec/), & count=(/nilyr,1/) ) + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_read_nc_z: Cannot get variable '//trim(varname) ) + endif + endif ! my_task = master_task !------------------------------------------------------------------- @@ -1661,6 +1674,11 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & start=(/1,1,nrec/), & count=(/nx,ny,1/) ) + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_write_nc_xy: Cannot put variable '//trim(nf90_strerror(status)) ) + endif + endif ! my_task = master_task !------------------------------------------------------------------- @@ -1777,6 +1795,11 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & start=(/1,1,1,nrec/), & count=(/nx,ny,ncat,1/) ) + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_write_nc_xyz: Cannot put variable '//trim(nf90_strerror(status)) ) + endif + endif ! my_task = master_task !------------------------------------------------------------------- @@ -1946,6 +1969,10 @@ subroutine ice_close_nc(fid) if (my_task == master_task) then status = nf90_close(fid) + if (status /= nf90_noerr) then + call abort_ice ( & + 'ice_close_nc: Error closing file '//trim(nf90_strerror(status)) ) + endif endif #endif diff --git a/source/ice_step_mod.F90 b/source/ice_step_mod.F90 index 4afab596..7235956d 100755 --- a/source/ice_step_mod.F90 +++ b/source/ice_step_mod.F90 @@ -183,8 +183,7 @@ subroutine step_therm1 (dt, iblk) use ice_flux, only: frzmlt, sst, Tf, strocnxT, strocnyT, rside, & meltsn, melttn, meltbn, congeln, snoicen, dsnown, uatm, vatm, & wind, rhoa, potT, Qa, zlvl, strax, stray, flatn, fsensn, fsurfn, & - fcondtopn, fcondbotn, fcondbot, & - ice_freeboardn, ice_freeboard, snowfracn, & + fcondtopn, fcondbotn, fcondbot, snowfracn, & flw, fsnow, fpond, sss, mlt_onset, frz_onset, faero_atm, faero_ocn, & frain, Tair, coszen, strairxT, strairyT, fsurf, fcondtop, fsens, & flat, fswabs, flwout, evap, Tref, Qref, Uref, fresh, fsalt, fhocn, & @@ -204,7 +203,7 @@ subroutine step_therm1 (dt, iblk) nt_apnd, nt_hpnd, nt_ipnd, nt_alvl, nt_vlvl, nt_Tsfc, & tr_iage, nt_iage, tr_FY, nt_FY, tr_aero, tr_pond, tr_pond_cesm, & tr_pond_lvl, nt_qice, nt_sice, tr_pond_topo, uvel, vvel - use ice_therm_shared, only: calc_Tsfc, Tsnic, Ti_bot + use ice_therm_shared, only: calc_Tsfc, Tsnice, Ti_bot use ice_therm_vertical, only: frzmlt_bottom_lateral, thermo_vertical use ice_timers, only: ice_timer_start, ice_timer_stop, timer_ponds !BBB: @@ -391,6 +390,9 @@ subroutine step_therm1 (dt, iblk) dfloe (:,:,iblk), ncat) endif + Tsnice(:,:,iblk) = c0 + Ti_bot(:,:,iblk) = c0 + do n = 1, ncat meltsn(:,:,n,iblk) = c0 @@ -560,7 +562,6 @@ subroutine step_therm1 (dt, iblk) fcondtopn(:,:,n,iblk), fcondbotn(:,:,n,iblk), & fsensn(:,:,n,iblk), flatn(:,:,n,iblk), & flwoutn, & - ice_freeboardn, & evapn, & evapn_ice, evapn_snow, & freshn, & @@ -571,7 +572,7 @@ subroutine step_therm1 (dt, iblk) mlt_onset(:,:,iblk), frz_onset(:,:,iblk), & yday, l_stop, & istop, jstop, & - dsnown(:,:,n,iblk)) + dsnown(:,:,n,iblk), Tsnice(:,:,iblk)) if (l_stop) then write (nu_diag,*) 'istep1, my_task, iblk =', & @@ -722,7 +723,6 @@ subroutine step_therm1 (dt, iblk) fswabsn, flwoutn, & evapn, & evapn_ice, evapn_snow, & - ice_freeboardn(:,:,n,iblk), & Trefn, Qrefn, & freshn, fsaltn, & fhocnn, fswthrun(:,:,n,iblk), & @@ -734,7 +734,6 @@ subroutine step_therm1 (dt, iblk) fswabs (:,:,iblk), flwout (:,:,iblk), & evap (:,:,iblk), & evap_ice(:,:,iblk), evap_snow (:,:,iblk), & - ice_freeboard(:,:,iblk), & Tref (:,:,iblk), Qref (:,:,iblk), & fresh (:,:,iblk), fsalt (:,:,iblk), & fhocn (:,:,iblk), fswthru (:,:,iblk), & @@ -748,9 +747,16 @@ subroutine step_therm1 (dt, iblk) enddo ! ncat - Ti_bot(:,:,iblk) = Tbot(:,:) * aice(:,:,iblk) - Tsnic(:,:,iblk) = c0 - + ! Tsnice is diagnostic only, it aggregated over ice thickness cats in thermo_vertical, + ! return to temperature over ice only + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j,iblk) .and. aice(i,j,iblk) > c0) then + Ti_bot(i,j,iblk) = Tbot(i,j) + Tsnice(i,j,iblk) = Tsnice(i,j,iblk)/aice(i,j,iblk) + endif + enddo + enddo !----------------------------------------------------------------- ! Calculate ponds from the topographic scheme !----------------------------------------------------------------- @@ -771,6 +777,7 @@ subroutine step_therm1 (dt, iblk) trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) endif + call ice_timer_stop(timer_ponds,iblk) end subroutine step_therm1 diff --git a/source/ice_therm_shared.F90 b/source/ice_therm_shared.F90 index 4d0a3f54..a23a91d1 100755 --- a/source/ice_therm_shared.F90 +++ b/source/ice_therm_shared.F90 @@ -44,7 +44,8 @@ module ice_therm_shared real (kind=dbl_kind), & dimension(nx_block,ny_block,max_blocks), & public :: & - Tsnic, Ti_bot + Tsnice, & ! snow ice interface temperature (deg C), (diagnostic) + Ti_bot logical (kind=log_kind), public :: & l_brine ! if true, treat brine pocket effects diff --git a/source/ice_therm_vertical.F90 b/source/ice_therm_vertical.F90 index 7a54934e..e4a9e97f 100755 --- a/source/ice_therm_vertical.F90 +++ b/source/ice_therm_vertical.F90 @@ -29,8 +29,8 @@ module ice_therm_vertical nt_Tsfc, nt_iage, nt_sice, nt_qice, nt_qsno, & nt_apnd, nt_hpnd use ice_therm_shared, only: ktherm, ferrmax, heat_capacity, l_brine, & - calc_Tsfc, calculate_tin_from_qin, Tmin, & - cap_fluxes + calc_Tsfc, calculate_tin_from_qin, Tmin, Tsnice, & + cap_fluxes use ice_therm_bl99, only: temperature_changes use ice_therm_0layer, only: zerolayer_temperature use ice_flux, only: Tf @@ -91,7 +91,6 @@ subroutine thermo_vertical (nx_block, ny_block, & fcondbotn, & fsensn, flatn, & flwoutn, & - ice_freeboardn, & evapn, & evapn_ice, evapn_snow,& freshn, fsaltn, & @@ -101,7 +100,7 @@ subroutine thermo_vertical (nx_block, ny_block, & mlt_onset, frz_onset, & yday, l_stop, & istop, jstop, & - dsnow) + dsnow, Tsnice) use ice_communicate, only: my_task use ice_therm_mushy, only: temperature_changes_salinity @@ -193,10 +192,7 @@ subroutine thermo_vertical (nx_block, ny_block, & snoice , & ! snow-ice formation (m/step-->cm/day) dsnow , & ! change in snow thickness (m/step-->cm/day) mlt_onset, & ! day of year that sfc melting begins - frz_onset, & ! day of year that freezing begins (congel or frazil) - ice_freeboardn ! height of ice surface (i.e. not snow surface) - ! above sea level in m - + frz_onset ! day of year that freezing begins (congel or frazil) real (kind=dbl_kind), intent(in) :: & yday ! day of year @@ -221,6 +217,7 @@ subroutine thermo_vertical (nx_block, ny_block, & ! 2D state variables (thickness, temperature, enthalpy) real (kind=dbl_kind), dimension (icells) :: & + Tsnice , & ! snow ice interface temperature (deg C), (diagnostic) hilyr , & ! ice layer thickness hslyr , & ! snow layer thickness Tsf , & ! ice/snow top surface temp, same as Tsfcn (deg C) @@ -286,9 +283,8 @@ subroutine thermo_vertical (nx_block, ny_block, & fhocnn (i,j) = c0 fadvocn(i,j) = c0 fcondbotn(i,j) = c0 - ice_freeboardn(i,j) = c0 - - + evapn_ice(i,j)= c0 + evapn_snow(i,j)=c0 meltt (i,j) = c0 meltb (i,j) = c0 @@ -464,25 +460,37 @@ subroutine thermo_vertical (nx_block, ny_block, & endif ! heat_capacity - ! Alex West: Read 1D bottom conductive flux array into 2D array - ! for diagnostics (SIMIP)i + ! intermediate energy for error check + do ij = 1, icells + einter(ij) = c0 + do k = 1, nslyr + einter(ij) = einter(ij) + hslyr(ij) * zqsn(ij,k) + enddo ! k + do k = 1, nilyr + einter(ij) = einter(ij) + hilyr(ij) * zqin(ij,k) + enddo ! k + enddo ! ij + + ! Read 1D bottom conductive flux array into 2D array for diagnostics (SIMIP) do ij = 1, icells i = indxi(ij) j = indxj(ij) fcondbotn(i,j) = fcondbot(ij) - enddo - - ! intermediate energy for error check - do ij = 1, icells - einter(ij) = c0 - do k = 1, nslyr - einter(ij) = einter(ij) + hslyr(ij) * zqsn(ij,k) - enddo ! k - do k = 1, nilyr - einter(ij) = einter(ij) + hilyr(ij) * zqin(ij,k) - enddo ! k - enddo ! ij + ! Tsnice from https://github.com/CICE-Consortium/Icepack/blob/e9d626f0e5b743e143a2e87248a1aa22ee4f3751/columnphysics/icepack_therm_vertical.F90#L378C1-L385C12 + ! Tsnice is : + if (hslyr(ij) > puny) then + ! interface temperature is taken by assumming a linear temperature gradient between temperature at + ! middle of top ice layer & middle of bottom snow layer temperatures, + ! weighted by the thickness of each layer (https://github.com/CICE-Consortium/Icepack/pull/542) + Tsnice(ij) = Tsnice(ij) + aicen(i,j)*(& + (hilyr(ij)*zTsn(ij,nslyr) + hslyr(ij)*zTin(ij,1)) & + / (hslyr(ij)+hilyr(ij)) & + ) + else + Tsnice(ij) = Tsnice(ij) + aicen(i,j)*Tsf(ij) + endif + enddo if (l_stop) return @@ -503,7 +511,6 @@ subroutine thermo_vertical (nx_block, ny_block, & fbot, Tbot, & flatn, fsurfn, & fcondtopn, fcondbot, & - ice_freeboardn, & fsnow, hsn_new, & fhocnn, evapn, & evapn_ice, evapn_snow,& @@ -1446,7 +1453,6 @@ subroutine thickness_changes (nx_block, ny_block, & fbot, Tbot, & flatn, fsurfn, & fcondtopn, fcondbot, & - ice_freeboardn, & fsnow, hsn_new, & fhocnn, evapn, & evapn_ice, evapn_snow,& @@ -1508,10 +1514,7 @@ subroutine thickness_changes (nx_block, ny_block, & dsnow , & ! snow formation (m/step-->cm/day) iage , & ! ice age (s) mlt_onset , & ! day of year that sfc melting begins - frz_onset , & ! day of year that freezing begins (congel or frazil) - ice_freeboardn ! height of ice surface (i.e. not snow surface) - ! above sea level in m - + frz_onset ! day of year that freezing begins (congel or frazil) real (kind=dbl_kind), dimension (icells), & intent(inout) :: & @@ -2016,7 +2019,7 @@ subroutine thickness_changes (nx_block, ny_block, & hin, hsn, & zqin, zqsn, & dzi, dzs, & - dsnow, ice_freeboardn) + dsnow ) !---!------------------------------------------------------------------- !---! Repartition the ice and snow into equal-thickness layers, @@ -2219,7 +2222,7 @@ subroutine freeboard (nx_block, ny_block, & hin, hsn, & zqin, zqsn, & dzi, dzs, & - dsnow, ice_freeboardn) + dsnow ) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -2238,12 +2241,6 @@ subroutine freeboard (nx_block, ny_block, & dsnow , & ! change in snow thickness after snow-ice formation (m) iage ! snow thickness (m) - real (kind=dbl_kind), dimension(nx_block,ny_block), & - intent(inout) :: & - ice_freeboardn ! height of ice surface (i.e. not snow surface) - ! above sea level in m - - real (kind=dbl_kind), dimension (icells), & intent(inout) :: & hin , & ! ice thickness (m) @@ -2352,16 +2349,6 @@ subroutine freeboard (nx_block, ny_block, & endif ! dhin > puny enddo ! ij - ! Calculate diagnostic sea ice freeboard after adjustments (SIMIP) - do ij = 1, icells - i = indxi(ij) - j = indxj(ij) - - ice_freeboardn(i,j) = & - hin(ij) * (1 - rhoi / rhow) - hsn(ij) * (rhos / rhow) - enddo - - end subroutine freeboard !======================================================================= From 5828e0cd3785ba38d0a51bf0ad92d449024fa95d Mon Sep 17 00:00:00 2001 From: Anton Steketee <79179784+anton-seaice@users.noreply.github.com> Date: Mon, 24 Nov 2025 11:27:11 +1100 Subject: [PATCH 43/52] Fix sndmasssn (#78) This fixes a minor issue where sisndmassdyn wasn't being saved --- source/ice_history.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/ice_history.F90 b/source/ice_history.F90 index ff419e62..afca964d 100755 --- a/source/ice_history.F90 +++ b/source/ice_history.F90 @@ -1351,7 +1351,7 @@ subroutine init_hist (dt) "per unit grid cell area", c1, c0, & ns1, f_sndmasssnf) - call define_hist_field(n_snow_ai,"sisndmasssnf","kg m^-2 s^-1",tstr2D, tcstr, & + call define_hist_field(n_sndmasssnf,"sisndmasssnf","kg m^-2 s^-1",tstr2D, tcstr, & "Snow Mass Change Through Snowfall", & "Always positive or zero, per unit grid cell area", c1, c0, & ns1, f_sisndmasssnf) From 3f8ba8ac53c244dcba5d95def904ddd2ffdc7039 Mon Sep 17 00:00:00 2001 From: anton-seaice Date: Fri, 9 Jan 2026 14:03:36 +1100 Subject: [PATCH 44/52] Use ACCESS instead of AusCOM for ESM1.6, some merge tidyup --- io_netcdf/ice_history_write.F90 | 287 ++++++++++++++++---------------- mpi/ice_boundary.F90 | 14 ++ source/ice_distribution.F90 | 1 - source/ice_history.F90 | 16 +- source/ice_history_shared.F90 | 4 +- source/ice_step_mod.F90 | 2 +- source/ice_therm_vertical.F90 | 5 +- 7 files changed, 176 insertions(+), 153 deletions(-) diff --git a/io_netcdf/ice_history_write.F90 b/io_netcdf/ice_history_write.F90 index 08fcd1b3..e382020a 100644 --- a/io_netcdf/ice_history_write.F90 +++ b/io_netcdf/ice_history_write.F90 @@ -7,7 +7,7 @@ ! Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Accepted some CCSM code into mainstream CICE ! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. ! Added histfreq_n and histfreq='h' options, removed histfreq='w' @@ -38,7 +38,7 @@ module ice_history_write use ice_history_shared use ice_itd, only: hin_max use ice_calendar, only: write_ic, histfreq - use ice_fileunits, only: nu_diag, ice_stderr, ice_stdout + implicit none private @@ -76,7 +76,7 @@ module ice_history_write ! ! author: Elizabeth C. Hunke, LANL - subroutine ice_write_hist (ns) +subroutine ice_write_hist (ns) use ice_calendar, only: time, month, daymo use ice_fileunits, only: nu_diag @@ -482,11 +482,11 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) enddo - ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR) - dimidex(1)=cmtid - dimidex(2)=kmtidi - dimidex(3)=kmtids - dimidex(4)=kmtidb + ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR) + dimidex(1)=cmtid + dimidex(2)=kmtidi + dimidex(3)=kmtids + dimidex(4)=kmtidb do i = 1, nvarz if (igrdz(i)) then @@ -663,17 +663,18 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) if (TRIM(avail_hist_fields(n)%vname)/='sig1' .or. & TRIM(avail_hist_fields(n)%vname)/='sig2') then if (avail_hist_fields(n)%avg_ice_present) then - call check(nf90_put_att(ncid,varid,'cell_methods',& - 'area: time: mean where sea_ice (mask=siconc)'), & - 'put att cell methods time mean '//avail_hist_fields(n)%vname) + call check(nf90_put_att(ncid,varid,'cell_methods',& + 'area: time: mean where sea_ice (mask=siconc)'), & + 'put att cell methods time mean '//avail_hist_fields(n)%vname) else - if (TRIM(avail_hist_fields(n)%vname(1:2))/='si') then !native diags - call check(nf90_put_att(ncid,varid,'cell_methods','time: mean'), & - 'put att cell methods time mean '//avail_hist_fields(n)%vname) - else !cmip diags - call check(nf90_put_att(ncid,varid,'cell_methods', & - 'area: mean where sea time: mean'), & - 'put att cell methods time mean '//avail_hist_fields(n)%vname) + if (TRIM(avail_hist_fields(n)%vname(1:2))/='si') then !native diags + call check(nf90_put_att(ncid,varid,'cell_methods','time: mean'), & + 'put att cell methods time mean '//avail_hist_fields(n)%vname) + else !cmip diags + call check(nf90_put_att(ncid,varid,'cell_methods', & + 'area: mean where sea time: mean'), & + 'put att cell methods time mean '//avail_hist_fields(n)%vname) + endif endif endif endif @@ -1122,9 +1123,9 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) write(title,'(a,i3,a)') 'This Year Has ',int(dayyr),' days' #else if (use_leap_years) then - write(title,'(a,i3,a)') 'This year has ',int(dayyr),' days' + write(title,'(a,i3,a)') 'This year has ',int(dayyr),' days' else - write(title,'(a,i3,a)') 'All years have exactly ',int(dayyr),' days' + write(title,'(a,i3,a)') 'All years have exactly ',int(dayyr),' days' endif #endif call check(nf90_put_att(ncid,nf90_global,'comment',title), & @@ -1189,31 +1190,31 @@ subroutine write_coordinate_variables(ncid, coord_var, var_nz) if (my_task == master_task) coord_var_name = coord_var(i)%short_name call broadcast_scalar(coord_var_name, master_task) SELECT CASE (coord_var_name) - CASE ('TLON') - ! Convert T grid longitude from -180 -> 180 to 0 to 360 - work1 = TLON*rad_to_deg + c360 - where (work1 > c360) work1 = work1 - c360 - where (work1 < c0 ) work1 = work1 + c360 - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('TLAT') - work1 = TLAT*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('ULON') - work1 = ULON*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('ULAT') - work1 = ULAT*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - END SELECT - - if (my_task == master_task) then + CASE ('TLON') + ! Convert T grid longitude from -180 -> 180 to 0 to 360 + work1 = TLON*rad_to_deg + c360 + where (work1 > c360) work1 = work1 - c360 + where (work1 < c0 ) work1 = work1 + c360 + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('TLAT') + work1 = TLAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ULON') + work1 = ULON*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ULAT') + work1 = ULAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + END SELECT + + if (my_task == master_task) then work_gr = work_g1 call check(nf90_inq_varid(ncid, coord_var_name, varid), & 'inq varid '//coord_var_name) call check(nf90_put_var(ncid,varid,work_gr), & 'put var '//coord_var_name) - endif - enddo + endif + enddo ! Extra dimensions (NCAT, VGRD*) if (my_task == master_task) then @@ -1274,9 +1275,9 @@ subroutine write_grid_variables(ncid, var, var_nverts) work_gr(:,:) = c0 work_gr3(:,:,:) = c0 - if (igrd(n_tmask)) then - call gather_global(work_g1, hm, master_task, distrb_info) - if (my_task == master_task) then + if (igrd(n_tmask)) then + call gather_global(work_g1, hm, master_task, distrb_info) + if (my_task == master_task) then work_gr = work_g1 call check(nf90_inq_varid(ncid, 'tmask', varid), & 'inq var tmask') @@ -1399,30 +1400,30 @@ subroutine write_2d_variables(ns, ncid, i_time) if (my_task == master_task) then allocate(work_g1(nx_global,ny_global)) - allocate(work_gr(nx_global,ny_global)) - else + allocate(work_gr(nx_global,ny_global)) + else allocate(work_g1(1,1)) - allocate(work_gr(1,1)) ! to save memory - endif + allocate(work_gr(1,1)) ! to save memory + endif - work_gr(:,:) = c0 - work_g1(:,:) = c0 + work_gr(:,:) = c0 + work_g1(:,:) = c0 do n=1, num_avail_hist_fields_2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call gather_global(work_g1, a2D(:,:,n,:), & - master_task, distrb_info) - if (my_task == master_task) then - work_gr(:,:) = work_g1(:,:) + call gather_global(work_g1, a2D(:,:,n,:), & + master_task, distrb_info) + if (my_task == master_task) then + work_gr(:,:) = work_g1(:,:) call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & 'inq varid '//avail_hist_fields(n)%vname) call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/1,1,i_time/), & count=(/nx_global,ny_global/)), & 'put var '//avail_hist_fields(n)%vname) - endif + endif endif - enddo ! num_avail_hist_fields_2D + enddo ! num_avail_hist_fields_2D deallocate(work_g1) deallocate(work_gr) @@ -1448,161 +1449,161 @@ subroutine write_3d_and_4d_variables(ns, ncid, i_time) allocate(work_gr(1,1)) ! to save memory endif - work_gr(:,:) = c0 - work_g1(:,:) = c0 + work_gr(:,:) = c0 + work_g1(:,:) = c0 - do n = n2D + 1, n3Dccum + do n = n2D + 1, n3Dccum nn = n - n2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then + if (my_task == master_task) then call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & 'inq varid '//avail_hist_fields(n)%vname) - endif - do k = 1, ncat_hist - call gather_global(work_g1, a3Dc(:,:,k,nn,:), & - master_task, distrb_info) - work_gr(:,:) = work_g1(:,:) + endif + do k = 1, ncat_hist + call gather_global(work_g1, a3Dc(:,:,k,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) - if (my_task == master_task) then + if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/1,1,k,i_time/), & count=(/nx_global,ny_global, 1/)), & 'put var '//avail_hist_fields(n)%vname) - endif - enddo ! k + endif + enddo ! k endif - enddo ! num_avail_hist_fields_3Dc + enddo ! num_avail_hist_fields_3Dc - work_gr(:,:) = c0 - work_g1(:,:) = c0 + work_gr(:,:) = c0 + work_g1(:,:) = c0 - do n = n3Dccum+1, n3Dzcum + do n = n3Dccum+1, n3Dzcum nn = n - n3Dccum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then + if (my_task == master_task) then call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & 'inq varid '//avail_hist_fields(n)%vname) - endif - do k = 1, nzilyr - call gather_global(work_g1, a3Dz(:,:,k,nn,:), & - master_task, distrb_info) - work_gr(:,:) = work_g1(:,:) + endif + do k = 1, nzilyr + call gather_global(work_g1, a3Dz(:,:,k,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) - if (my_task == master_task) then + if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/1,1,k,i_time/), & count=(/nx_global,ny_global,1/)), & 'put var '//avail_hist_fields(n)%vname) - endif - enddo ! k + endif + enddo ! k endif - enddo ! num_avail_hist_fields_3Dz + enddo ! num_avail_hist_fields_3Dz - work_gr(:,:) = c0 - work_g1(:,:) = c0 + work_gr(:,:) = c0 + work_g1(:,:) = c0 - do n = n3Dzcum+1, n3Dbcum + do n = n3Dzcum+1, n3Dbcum nn = n - n3Dzcum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then + if (my_task == master_task) then call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & 'inq varid '//avail_hist_fields(n)%vname) - endif - do k = 1, nzblyr - call gather_global(work_g1, a3Db(:,:,k,nn,:), & - master_task, distrb_info) - work_gr(:,:) = work_g1(:,:) + endif + do k = 1, nzblyr + call gather_global(work_g1, a3Db(:,:,k,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) - if (my_task == master_task) then + if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/1,1,k,i_time/), & count=(/nx_global,ny_global,1/)), & 'put var '//avail_hist_fields(n)%vname) - endif - enddo ! k + endif + enddo ! k endif - enddo ! num_avail_hist_fields_3Db + enddo ! num_avail_hist_fields_3Db - work_gr(:,:) = c0 - work_g1(:,:) = c0 + work_gr(:,:) = c0 + work_g1(:,:) = c0 - do n = n3Dbcum+1, n4Dicum + do n = n3Dbcum+1, n4Dicum nn = n - n3Dbcum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then + if (my_task == master_task) then call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & 'inq varid '//avail_hist_fields(n)%vname) - endif - do ic = 1, ncat_hist - do k = 1, nzilyr - call gather_global(work_g1, a4Di(:,:,k,ic,nn,:), & - master_task, distrb_info) - work_gr(:,:) = work_g1(:,:) - if (my_task == master_task) then + endif + do ic = 1, ncat_hist + do k = 1, nzilyr + call gather_global(work_g1, a4Di(:,:,k,ic,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) + if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/1,1,k,ic,i_time/), & count=(/nx_global,ny_global,1, 1/)), & 'put var '//avail_hist_fields(n)%vname) - endif - enddo ! k - enddo ! ic + endif + enddo ! k + enddo ! ic endif - enddo ! num_avail_hist_fields_4Di + enddo ! num_avail_hist_fields_4Di - work_gr(:,:) = c0 - work_g1(:,:) = c0 + work_gr(:,:) = c0 + work_g1(:,:) = c0 - do n = n4Dicum+1, n4Dscum + do n = n4Dicum+1, n4Dscum nn = n - n4Dicum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then + if (my_task == master_task) then call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & 'inq var '//avail_hist_fields(n)%vname) - endif - do ic = 1, ncat_hist - do k = 1, nzslyr - call gather_global(work_g1, a4Ds(:,:,k,ic,nn,:), & - master_task, distrb_info) - work_gr(:,:) = work_g1(:,:) - if (my_task == master_task) then + endif + do ic = 1, ncat_hist + do k = 1, nzslyr + call gather_global(work_g1, a4Ds(:,:,k,ic,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) + if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/1,1,k,ic,i_time/), & count=(/nx_global,ny_global,1, 1/)), & 'put var '//avail_hist_fields(n)%vname) - endif - enddo ! k - enddo ! ic + endif + enddo ! k + enddo ! ic endif - enddo ! num_avail_hist_fields_4Ds + enddo ! num_avail_hist_fields_4Ds - work_gr(:,:) = c0 - work_g1(:,:) = c0 + work_gr(:,:) = c0 + work_g1(:,:) = c0 - do n = n4Dscum+1, n4Dbcum + do n = n4Dscum+1, n4Dbcum nn = n - n4Dscum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then + if (my_task == master_task) then call check(nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid), & 'inq varid '//avail_hist_fields(n)%vname) - endif - do ic = 1, ncat_hist - do k = 1, nzblyr - call gather_global(work_g1, a4Db(:,:,k,ic,nn,:), & - master_task, distrb_info) - work_gr(:,:) = work_g1(:,:) - if (my_task == master_task) then + endif + do ic = 1, ncat_hist + do k = 1, nzblyr + call gather_global(work_g1, a4Db(:,:,k,ic,nn,:), & + master_task, distrb_info) + work_gr(:,:) = work_g1(:,:) + if (my_task == master_task) then call check(nf90_put_var(ncid,varid,work_gr(:,:), & start=(/1,1,k,ic,i_time/), & count=(/nx_global,ny_global,1, 1/)), & 'put var '//avail_hist_fields(n)%vname) - endif - enddo ! k - enddo ! ic + endif + enddo ! k + enddo ! ic endif - enddo ! num_avail_hist_fields_4Db + enddo ! num_avail_hist_fields_4Db - deallocate(work_gr) - deallocate(work_g1) + deallocate(work_gr) + deallocate(work_g1) end subroutine write_3d_and_4d_variables @@ -1656,7 +1657,7 @@ subroutine write_coordinate_variables_parallel(ncid, coord_var, var_nz) call check(nf90_put_var(ncid, varid, (/(k, k=1, nzblyr)/)), & 'put var VGRDb') END SELECT - endif + endif enddo end subroutine write_coordinate_variables_parallel @@ -1971,4 +1972,4 @@ subroutine put_4d_with_blocks(ncid, i_time, var_name, len_3dim, len_4dim, data) end subroutine put_4d_with_blocks -end module ice_history_write \ No newline at end of file +end module ice_history_write diff --git a/mpi/ice_boundary.F90 b/mpi/ice_boundary.F90 index 9198dcf3..28fa1e73 100644 --- a/mpi/ice_boundary.F90 +++ b/mpi/ice_boundary.F90 @@ -593,6 +593,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & ! check to see if they need to be re-sized ! !----------------------------------------------------------------------- + maxTmp = maxval(sendCount) maxSizeSend = global_maxval(maxTmp, dist) maxTmp = maxval(recvCount) @@ -1191,6 +1192,7 @@ subroutine ice_HaloMask(halo, basehalo, mask) elseif (mask(icel,jcel,abs(nblock)) /= 0) then tmpflag = .true. endif + if (tmpflag) then scnt = scnt + 1 if (scnt == 1) then @@ -1222,6 +1224,7 @@ subroutine ice_HaloMask(halo, basehalo, mask) elseif (mask(icel,jcel,abs(nblock)) /= 0) then tmpflag = .true. endif + if (tmpflag) then scnt = scnt + 1 if (scnt == 1) then @@ -1586,6 +1589,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do + case default call abort_ice( & 'ice_HaloUpdate2DR8: Unknown field location') @@ -1980,6 +1984,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do + case default call abort_ice( & 'ice_HaloUpdate2DR4: Unknown field location') @@ -2374,6 +2379,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do + case default call abort_ice( & 'ice_HaloUpdate2DI4: Unknown field location') @@ -3742,6 +3748,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -3873,6 +3880,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & call abort_ice( & 'ice_HaloUpdate3DI4: Unknown field location') end select + endif !*** copy out of global tripole buffer into local @@ -4260,6 +4268,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -4399,6 +4408,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & call abort_ice( & 'ice_HaloUpdate4DR8: Unknown field location') end select + endif !*** copy out of global tripole buffer into local @@ -4787,6 +4797,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -4926,6 +4937,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & call abort_ice( & 'ice_HaloUpdate4DR4: Unknown field location') end select + endif !*** copy out of global tripole buffer into local @@ -5455,6 +5467,7 @@ subroutine ice_HaloUpdate4DI4(array, halo, & call abort_ice( & 'ice_HaloUpdate4DI4: Unknown field location') end select + endif !*** copy out of global tripole buffer into local @@ -5811,6 +5824,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & iSrc = iSrc - ioffset jSrc = jSrc - joffset if (iSrc == 0) iSrc = nxGlobal + !*** for center and Eface, do not need to replace !*** top row of physical domain, so jSrc should be !*** out of range and skipped diff --git a/source/ice_distribution.F90 b/source/ice_distribution.F90 index 1f46e40a..0a3a31e5 100644 --- a/source/ice_distribution.F90 +++ b/source/ice_distribution.F90 @@ -50,7 +50,6 @@ module ice_distribution ! 'slenderX1' (NPX x 1) ! 'slenderX2' (NPX x 2) -!ars599: 04042016: should we keep or not? Refer to fn: create_distrb_cart !ars599: 26032014: will call from cpl_interface ! from function create_distrb_cart ! so change to public diff --git a/source/ice_history.F90 b/source/ice_history.F90 index 5466e9cd..fbc34491 100644 --- a/source/ice_history.F90 +++ b/source/ice_history.F90 @@ -81,7 +81,7 @@ subroutine init_hist (dt) use ice_zbgc_shared, only: skl_bgc use ice_fileunits, only: goto_nml -#ifdef AusCOM +#ifdef ACCESS use cpl_parameters, only: do_scale_fluxes #endif @@ -246,7 +246,7 @@ subroutine init_hist (dt) if ( f_sisnthick /= 'x' ) call abort_ice("f_sisnthick not available, set to 'x'") #endif -#ifdef AusCOM +#ifdef ACCESS if ( .not. do_scale_fluxes ) then ! normal case is these are scaled in place to ice area average, ! however without do_scale_fluxes, these are grid cell averages @@ -1133,12 +1133,14 @@ subroutine init_hist (dt) call define_hist_field(n_sisnconc,"sisnconc","%",tstr2D, tcstr, & "Snow Area Percentage", & - "area weighted average of Percentage of the sea-ice surface that is covered by snow", c100, c0, & + "area weighted average of Percentage of the sea-ice surface that"//& + " is covered by snow", c100, c0, & ns1, f_sisnconc, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sisnthick,"sisnthick","m",tstr2D, tcstr, & "Snow Thickness", & - "area weighted average of actual thickness of snow over the snow-covered part of the sea ice", c1, c0, & + "area weighted average of actual thickness of snow over the "//& + "snow-covered part of the sea ice", c1, c0, & ns1, f_sisnthick, avg_ice_present=.true., mask_ice_free_points=.true.) call define_hist_field(n_sisnmass,"sisnmass","kg m^-2",tstr2D, tcstr, & @@ -2051,9 +2053,15 @@ subroutine accum_hist (dt) if (f_Tair (1:1) /= 'x') & call accum_hist_field(n_Tair, iblk, Tair(:,:,iblk), a2D) if (f_Tref (1:1) /= 'x') & +#ifdef ACCESS call accum_hist_field(n_Tref, iblk, Tref(:,:,iblk)*workb(:,:), a2D) if (f_Qref (1:1) /= 'x') & call accum_hist_field(n_Qref, iblk, Qref(:,:,iblk)*workb(:,:), a2D) +#else + call accum_hist_field(n_Tref, iblk, Tref(:,:,iblk), a2D) + if (f_Qref (1:1) /= 'x') & + call accum_hist_field(n_Qref, iblk, Qref(:,:,iblk), a2D) +#endif if (f_congel (1:1) /= 'x') & call accum_hist_field(n_congel, iblk, congel(:,:,iblk), a2D) if (f_frazil (1:1) /= 'x') & diff --git a/source/ice_history_shared.F90 b/source/ice_history_shared.F90 index a22330dd..cb74ec45 100644 --- a/source/ice_history_shared.F90 +++ b/source/ice_history_shared.F90 @@ -824,7 +824,9 @@ subroutine define_hist_field(id, vname, vunit, vcoord, vcellmeas, & if (vhistfreq(ns1:ns1) == histfreq(ns)) then if (ns1 > 1 .and. index(vhistfreq(1:ns1-1),'x') /= 0) then - call abort_ice(subname//' ERROR: history frequency variable f_' // vname // ' can''t contain ''x'' along with active frequencies') + call abort_ice(subname// & + ' ERROR: history frequency variable f_' // vname // & + ' can''t contain ''x'' along with active frequencies') endif num_avail_hist_fields_tot = num_avail_hist_fields_tot + 1 diff --git a/source/ice_step_mod.F90 b/source/ice_step_mod.F90 index 90369310..83d20120 100644 --- a/source/ice_step_mod.F90 +++ b/source/ice_step_mod.F90 @@ -185,7 +185,7 @@ subroutine step_therm1 (dt, iblk) fswthru, meltt, melts, meltb, meltl, congel, snoice, & merge_fluxes, evap_ice, evap_snow use ice_firstyear, only: update_FYarea - use ice_grid, only: lmask_n, lmask_s, TLAT, TLON + use ice_grid, only: lmask_n, lmask_s, TLAT, TLON, tmask use ice_itd, only: hi_min use ice_meltpond_cesm, only: compute_ponds_cesm use ice_meltpond_lvl, only: compute_ponds_lvl, ffracn, dhsn, & diff --git a/source/ice_therm_vertical.F90 b/source/ice_therm_vertical.F90 index 28c654b3..e9433082 100644 --- a/source/ice_therm_vertical.F90 +++ b/source/ice_therm_vertical.F90 @@ -29,8 +29,8 @@ module ice_therm_vertical nt_Tsfc, nt_iage, nt_sice, nt_qice, nt_qsno, & nt_apnd, nt_hpnd use ice_therm_shared, only: ktherm, ferrmax, heat_capacity, l_brine, & - calc_Tsfc, calculate_tin_from_qin, Tmin, Tsnice, & - cap_fluxes + calc_Tsfc, calculate_tin_from_qin, Tmin, Tsnice, & + cap_fluxes use ice_therm_bl99, only: temperature_changes use ice_therm_0layer, only: zerolayer_temperature use ice_flux, only: Tf @@ -285,7 +285,6 @@ subroutine thermo_vertical (nx_block, ny_block, & fcondbotn(i,j) = c0 evapn_ice(i,j)= c0 evapn_snow(i,j)=c0 - meltt (i,j) = c0 meltb (i,j) = c0 melts (i,j) = c0 From 7a40ca061345673c7904d992185b8557360d72d6 Mon Sep 17 00:00:00 2001 From: anton-seaice Date: Thu, 18 Dec 2025 11:39:34 +1100 Subject: [PATCH 45/52] Support CMake Build for esm1.6 fix line truncation build failure --- .github/build-ci/data/standard.json | 1 + .../oneapi-cice5-cmake-esm1p6.spack.yaml.j2 | 16 ++++++++++++++++ ...l.j2 => oneapi-cice5-cmake-om2.spack.yaml.j2} | 0 CMakeLists.txt | 9 +++++++-- source/ice_therm_vertical.F90 | 3 ++- 5 files changed, 26 insertions(+), 3 deletions(-) create mode 100644 .github/build-ci/manifests/oneapi-cice5-cmake-esm1p6.spack.yaml.j2 rename .github/build-ci/manifests/{oneapi-cice5-cmake.spack.yaml.j2 => oneapi-cice5-cmake-om2.spack.yaml.j2} (100%) diff --git a/.github/build-ci/data/standard.json b/.github/build-ci/data/standard.json index b7bf6298..a516cf82 100644 --- a/.github/build-ci/data/standard.json +++ b/.github/build-ci/data/standard.json @@ -3,5 +3,6 @@ "intel_compiler": "intel@2021.10.0", "oneapi_compiler": "oneapi@2025.2.0", "om2_1deg": "build_system=cmake model=access-om2 nxglob=360 nyglob=300 blckx=15 blcky=300 mxblcks=1", + "esm1p6": "build_system=cmake model=access-esm1.6 nxglob=360 nyglob=300 blckx=30 blcky=300 mxblcks=1", "target": "x86_64" } \ No newline at end of file diff --git a/.github/build-ci/manifests/oneapi-cice5-cmake-esm1p6.spack.yaml.j2 b/.github/build-ci/manifests/oneapi-cice5-cmake-esm1p6.spack.yaml.j2 new file mode 100644 index 00000000..1a85b6f8 --- /dev/null +++ b/.github/build-ci/manifests/oneapi-cice5-cmake-esm1p6.spack.yaml.j2 @@ -0,0 +1,16 @@ +spack: + specs: + - 'cice5@git.{{ ref }}=stable {{ esm1p6 }} ^openmpi@4' + - 'cice5@git.{{ ref }}=stable {{ esm1p6 }} ^openmpi@5' + - 'cice5@git.{{ ref }}=stable build_type=Debug {{ esm1p6 }}' + packages: + gcc-runtime: + require: + '%gcc' + all: + require: + - '%{{ oneapi_compiler }}' + - 'target={{ target }}' + concretizer: + unify: false + view: false \ No newline at end of file diff --git a/.github/build-ci/manifests/oneapi-cice5-cmake.spack.yaml.j2 b/.github/build-ci/manifests/oneapi-cice5-cmake-om2.spack.yaml.j2 similarity index 100% rename from .github/build-ci/manifests/oneapi-cice5-cmake.spack.yaml.j2 rename to .github/build-ci/manifests/oneapi-cice5-cmake-om2.spack.yaml.j2 diff --git a/CMakeLists.txt b/CMakeLists.txt index cfb2f942..34013d04 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -131,7 +131,7 @@ endif() if(CICE_DRIVER MATCHES "auscom") add_compile_definitions(AusCOM coupled) elseif(CICE_DRIVER MATCHES "access") - add_compile_defitions(ACCESS AusCOM coupled) + add_compile_definitions(ACCESS AusCOM coupled) endif() #[==============================================================================[ @@ -169,8 +169,12 @@ set(CICE_DRIVER_SOURCE "${CMAKE_SOURCE_DIR}/drivers/${CICE_DRIVER}") add_executable(cice ${CICE_DRIVER_SOURCE}/CICE.F90) +if(CICE_DRIVER MATCHES "auscom") + find_package(LIBACCESSOM2) + target_link_libraries(cice PUBLIC libaccessom2::accessom2 ) +endif() + target_link_libraries(cice - PUBLIC libaccessom2::accessom2 PRIVATE MPI::MPI_Fortran PkgConfig::OASIS3PSMILE PkgConfig::OASIS3MCT PkgConfig::OASIS3MPEU PkgConfig::OASIS3SCRIP @@ -260,6 +264,7 @@ if(CICE_DRIVER MATCHES "access") ${CICE_DRIVER_SOURCE}/CICE_FinalMod.F90 ${CICE_DRIVER_SOURCE}/cpl_interface.F90 ${CICE_DRIVER_SOURCE}/ice_constants.F90 + ${CICE_DRIVER_SOURCE}/ice_coupling.F90 ${CICE_DRIVER_SOURCE}/CICE_InitMod.F90 ${CICE_DRIVER_SOURCE}/cpl_arrays_setup.F90 ${CICE_DRIVER_SOURCE}/cpl_netcdf_setup.F90 diff --git a/source/ice_therm_vertical.F90 b/source/ice_therm_vertical.F90 index e9433082..98c14728 100644 --- a/source/ice_therm_vertical.F90 +++ b/source/ice_therm_vertical.F90 @@ -1390,7 +1390,8 @@ end subroutine init_vertical_profile ! ! author Alex West, MOHC - subroutine cap_conductive_flux(nx_block,ny_block,my_task,icells,indxi,indxj,fcondtopn,fcondtopn_solve,fcondtopn_extra,hin,zTsn,zTin,hslyr) + subroutine cap_conductive_flux(nx_block,ny_block,my_task,icells,indxi,& + indxj,fcondtopn,fcondtopn_solve,fcondtopn_extra,hin,zTsn,zTin,hslyr) use ice_itd, only: hs_min From 442d45bee6ddf16f49cbc8c3d5e5dce8f450974b Mon Sep 17 00:00:00 2001 From: anton-seaice Date: Thu, 18 Dec 2025 13:35:53 +1100 Subject: [PATCH 46/52] Remove unused files --- compile/comp_access-cm1440-185_ac330 | 238 --------------------------- compile/comp_access-cm1440-185_r47 | 238 --------------------------- 2 files changed, 476 deletions(-) delete mode 100755 compile/comp_access-cm1440-185_ac330 delete mode 100755 compile/comp_access-cm1440-185_r47 diff --git a/compile/comp_access-cm1440-185_ac330 b/compile/comp_access-cm1440-185_ac330 deleted file mode 100755 index a32f5c50..00000000 --- a/compile/comp_access-cm1440-185_ac330 +++ /dev/null @@ -1,238 +0,0 @@ -#! /bin/csh -f - -set echo on -#setenv DEBUG yes # set to yes for debug - -if ( $1 == '') then - echo '*** Please issue the command like ***' - echo ' > ./comp_auscom_cice.RJ.nP #nproc ' - echo 'here #proc is the number of cpu to be used for CICE5 (e.g. 1, 2, 4, 6...)' - set nproc = 192 - echo *** $nproc processors will be used to run CICE5... *** - sleep 3 - #exit -else - set nproc = $1 - echo *** $nproc processors will be used to run CICE5... *** -endif - -### Change these to your own site and user directory! -### You will need to create a Makefile Macro in bld -### Platform and its architecture ($HOST = xe) -setenv ARCH raijin-185 - -# Set AusCOM home: -setenv AusCOMHOME $cwd:h:h:h - -#---------------------------------------------------------------------- - -### Specialty code -setenv CAM_ICE no # set to yes for CAM runs (single column) -setenv SHRDIR csm_share # location of CCSM shared code -setenv IO_TYPE netcdf # set to none if netcdf library is unavailable - # set to pio for parallel netcdf -setenv DITTO no # reproducible diagnostics -setenv THRD no # set to yes for OpenMP threading -if ( $THRD == 'yes') setenv OMP_NUM_THREADS 2 # positive integer - -setenv ACCESS yes # set to yes for ACCESS -setenv AusCOM yes # set to yes for AusCOM -setenv OASIS3_MCT yes # oasis3-mct version -setenv CHAN MPI1 # MPI1 or MPI2 (always MPI1!) -setenv NICELYR 4 # number of vertical layers in the ice -setenv NSNWLYR 1 # number of vertical layers in the snow -setenv NICECAT 5 # number of ice thickness categories - -### Location of ACCESS system -setenv SYSTEMDIR $AusCOMHOME -echo SYSTEMDIR: $SYSTEMDIR - -### Location of this model (source) -setenv SRCDIR $cwd:h #$SYSTEMDIR/submodels/cice5.0.4 -echo SRCDIR: $SRCDIR - -source ${SRCDIR}/compile/environs.$ARCH # environment variables and loadable modules - -### Location and names of coupling libraries and inclusions -### Location and names of coupling libraries -#setenv CPLLIBDIR ~access/access-cm2/prebuild/oasis3-mct/Linux-182/lib -setenv CPLLIBDIR /projects/access/apps/oasis3-mct/ompi185/lib -setenv CPLLIBS '-L$(CPLLIBDIR) -lpsmile.${CHAN} -lmct -lmpeu -lscrip' -#echo CPLLIBS: ${CPLLIBS} - -### Location of coupling inclusions -#setenv CPLINCDIR ~access/access-cm2/prebuild/oasis3-mct/Linux-182/build/lib -setenv CPLINCDIR /projects/access/apps/oasis3-mct/ompi185/include -setenv CPL_INCS '-I$(CPLINCDIR)/psmile.$(CHAN) -I$(CPLINCDIR)/pio -I$(CPLINCDIR)/mct' -#echo CPL_INCS: $CPL_INCS - -### For multi-Layer ice (standard) configuration -setenv N_ILYR 1 # 4 for standard multi-layer ice. for ktherm=0, zero-layer thermodynamics - -### Location and name of the generated exectuable -setenv DATESTR `date +%Y%m%d` -setenv BINDIR $SYSTEMDIR/bin -setenv EXE cice_GC3GA7-cm1440-185.${DATESTR}_${nproc}p_${NICELYR}lyr - -### Where this model is compiled -setenv OBJDIR $SRCDIR/compile/build_${CHAN}_{$nproc}p-mct-185 -if !(-d $OBJDIR) mkdir -p $OBJDIR -#/bin/rm $OBJDIR/* -# - -### Grid resolution -#setenv GRID gx3 ; setenv RES 100x116 -#setenv GRID gx1 ; setenv RES 320x384 -#setenv GRID tx1 ; setenv RES 360x240 -#setenv GRID tp1 ; setenv RES 360x300 -setenv GRID tp1 ; setenv RES 1440x1080 - -set NXGLOB = `echo $RES | sed s/x.\*//` -set NYGLOB = `echo $RES | sed s/.\*x//` -echo NXGLOB: $NXGLOB -echo NYGLOB: $NYGLOB - -# Recommendations: -# NTASK equals nprocs in ice_in -# use processor_shape = slenderX1 or slenderX2 in ice_in -# one per processor with distribution_type='cartesian' or -# squarish blocks with distribution_type='rake' -# If BLCKX (BLCKY) does not divide NXGLOB (NYGLOB) evenly, padding -# will be used on the right (top) of the grid. -setenv NTASK $nproc -#setenv BLCKX 45 # x-dimension of blocks ( not including ) -#setenv BLCKY 38 # y-dimension of blocks ( ghost cells ) -setenv BLCKX `expr $NXGLOB / $nproc` -setenv BLCKY `expr $NYGLOB` -echo BLCKX: $BLCKX -echo BLCKY: $BLCKY - -echo -#24 : 12x2 -setenv BLCKX 120 -setenv BLCKY 540 -#144 : 16x9 -setenv BLCKX 90 -setenv BLCKY 120 -#16 : 8x2 -setenv BLCKX 180 -setenv BLCKY 540 -#16 : 4x4 -setenv BLCKX 360 -setenv BLCKY 270 -##216 : 24x9 -#setenv BLCKX 60 -#setenv BLCKY 120 -#192 : 16x12 -->square-ice -setenv BLCKX 90 -setenv BLCKY 90 -#192 : 96x2 -setenv BLCKX 15 -setenv BLCKY 540 - -# may need to increase MXBLCKS with rake distribution or padding -@ a = $NXGLOB * $NYGLOB ; @ b = $BLCKX * $BLCKY * $NTASK -@ m = $a / $b ; setenv MXBLCKS $m ; if ($MXBLCKS == 0) setenv MXBLCKS 1 -echo Autimatically generated: MXBLCKS = $MXBLCKS -##setenv MXBLCKS 8 # if necessary (code will print proper value) -#20110830: increase it to 12 as required by code: -# (but no clue why it never happened before!) -#setenv MXBLCKS 12 # if necessary (code will print proper value) - -########################################### -# ars599: 24032014 -# copy from /short/p66/ars599/CICE.v5.0/accice.v504_csiro -# solo_ice_comp -########################################### -### Tracers # match ice_in tracer_nml to conserve memory -setenv TRAGE 1 # set to 1 for ice age tracer -setenv TRFY 0 # set to 1 for first-year ice area tracer -setenv TRLVL 0 # set to 1 for level and deformed ice tracers -setenv TRPND 1 # set to 1 for melt pond tracers -setenv NTRAERO 0 # number of aerosol tracers - # (up to max_aero in ice_domain_size.F90) - # CESM uses 3 aerosol tracers -setenv TRBRI 0 # set to 1 for brine height tracer -setenv NBGCLYR 0 # number of zbgc layers -setenv TRBGCS 0 # number of skeletal layer bgc tracers - # TRBGCS=0 or 2<=TRBGCS<=9) - -### File unit numbers -setenv NUMIN 11 # minimum file unit number -setenv NUMAX 99 # maximum file unit number - -if ($IO_TYPE == 'netcdf') then - setenv IODIR io_netcdf -else if ($IO_TYPE == 'pio') then - setenv IODIR io_pio -else - setenv IODIR io_binary -endif - -########################################### - -setenv CBLD $SRCDIR/bld - -if ( $ARCH == 'UNICOS/mp') setenv ARCH UNICOS -if ( $ARCH == 'UNICOS') then - cp -f $CBLD/Makefile.$ARCH $CBLD/Makefile -else - cp -f $CBLD/Makefile.std $CBLD/Makefile -endif - -if ($NTASK == 1) then - setenv COMMDIR serial -else - setenv COMMDIR mpi -endif -echo COMMDIR: $COMMDIR - -if ($ACCESS == 'yes') then - setenv DRVDIR access -else - setenv DRVDIR cice -endif -echo DRVDIR: $DRVDIR - -cd $OBJDIR - -### List of source code directories (in order of importance). -cat >! Filepath << EOF -$SRCDIR/drivers/$DRVDIR -$SRCDIR/source -$SRCDIR/$COMMDIR -$SRCDIR/$IODIR -$SRCDIR/$SHRDIR -EOF - -if ( $ARCH == 'UNICOS.ORNL.phoenix' ) then - ### use -h command for phoenix - cc -o makdep -h command $CBLD/makdep.c || exit 2 -else if ( $ARCH == 'Linux.ORNL.jaguar' ) then - gcc -g -o makdep $CBLD/makdep.c || exit 2 -else - cc -o makdep $CBLD/makdep.c || exit 2 -endif - -setenv MACFILE $CBLD/Macros.Linux.${ARCH} - -gmake VPFILE=Filepath EXEC=$BINDIR/$EXE \ - NXGLOB=$NXGLOB NYGLOB=$NYGLOB \ - BLCKX=$BLCKX BLCKY=$BLCKY MXBLCKS=$MXBLCKS \ - -f $CBLD/Makefile MACFILE=$MACFILE || exit 2 - -cd .. -pwd -echo NTASK = $NTASK -echo "global N, block_size" -echo "x $NXGLOB, $BLCKX" -echo "y $NYGLOB, $BLCKY" -echo max_blocks = $MXBLCKS -echo $TRAGE = TRAGE, iage tracer -echo $TRFY = TRFY, first-year ice tracer -echo $TRLVL = TRLVL, level-ice tracers -echo $TRPND = TRPND, melt pond tracers -echo $NTRAERO = NTRAERO, number of aerosol tracers -echo $TRBRI = TRBRI, brine height tracer -echo $NBGCLYR = NBGCLYR, number of bio grid layers -echo $TRBGCS = TRBGCS, number of BGC tracers diff --git a/compile/comp_access-cm1440-185_r47 b/compile/comp_access-cm1440-185_r47 deleted file mode 100755 index f3e09f0d..00000000 --- a/compile/comp_access-cm1440-185_r47 +++ /dev/null @@ -1,238 +0,0 @@ -#! /bin/csh -f - -set echo on -#setenv DEBUG yes # set to yes for debug - -if ( $1 == '') then - echo '*** Please issue the command like ***' - echo ' > ./comp_auscom_cice.RJ.nP #nproc ' - echo 'here #proc is the number of cpu to be used for CICE5 (e.g. 1, 2, 4, 6...)' - set nproc = 192 - echo *** $nproc processors will be used to run CICE5... *** - sleep 3 - #exit -else - set nproc = $1 - echo *** $nproc processors will be used to run CICE5... *** -endif - -### Change these to your own site and user directory! -### You will need to create a Makefile Macro in bld -### Platform and its architecture ($HOST = xe) -setenv ARCH raijin-185 - -# Set AusCOM home: -setenv AusCOMHOME $cwd:h:h:h - -#---------------------------------------------------------------------- - -### Specialty code -setenv CAM_ICE no # set to yes for CAM runs (single column) -setenv SHRDIR csm_share # location of CCSM shared code -setenv IO_TYPE netcdf # set to none if netcdf library is unavailable - # set to pio for parallel netcdf -setenv DITTO no # reproducible diagnostics -setenv THRD no # set to yes for OpenMP threading -if ( $THRD == 'yes') setenv OMP_NUM_THREADS 2 # positive integer - -setenv ACCESS yes # set to yes for ACCESS -setenv AusCOM yes # set to yes for AusCOM -setenv OASIS3_MCT yes # oasis3-mct version -setenv CHAN MPI1 # MPI1 or MPI2 (always MPI1!) -setenv NICELYR 4 # number of vertical layers in the ice -setenv NSNWLYR 1 # number of vertical layers in the snow -setenv NICECAT 5 # number of ice thickness categories - -### Location of ACCESS system -setenv SYSTEMDIR $AusCOMHOME -echo SYSTEMDIR: $SYSTEMDIR - -### Location of this model (source) -setenv SRCDIR $cwd:h #$SYSTEMDIR/submodels/cice5.0.4 -echo SRCDIR: $SRCDIR - -source ${SRCDIR}/compile/environs.$ARCH # environment variables and loadable modules - -### Location and names of coupling libraries and inclusions -### Location and names of coupling libraries -#setenv CPLLIBDIR ~access/access-cm2/prebuild/oasis3-mct/Linux-182/lib -setenv CPLLIBDIR /short/p66/hxy599/ACCESS/submodels/oasis3-mct_local/Linux-185_r47/lib -setenv CPLLIBS '-L$(CPLLIBDIR) -lpsmile.${CHAN} -lmct -lmpeu -lscrip' -#echo CPLLIBS: ${CPLLIBS} - -### Location of coupling inclusions -#setenv CPLINCDIR ~access/access-cm2/prebuild/oasis3-mct/Linux-182/build/lib -setenv CPLINCDIR /short/p66/hxy599/ACCESS/submodels/oasis3-mct_local/Linux-185_r47/build/lib -setenv CPL_INCS '-I$(CPLINCDIR)/psmile.$(CHAN) -I$(CPLINCDIR)/pio -I$(CPLINCDIR)/mct' -#echo CPL_INCS: $CPL_INCS - -### For multi-Layer ice (standard) configuration -setenv N_ILYR 1 # 4 for standard multi-layer ice. for ktherm=0, zero-layer thermodynamics - -### Location and name of the generated exectuable -setenv DATESTR `date +%Y%m%d` -setenv BINDIR $SYSTEMDIR/bin -setenv EXE cice_GC3GA7-cm1440-185.${DATESTR}_${nproc}p_${NICELYR}lyr - -### Where this model is compiled -setenv OBJDIR $SRCDIR/compile/build_${CHAN}_{$nproc}p-mct-185 -if !(-d $OBJDIR) mkdir -p $OBJDIR -#/bin/rm $OBJDIR/* -# - -### Grid resolution -#setenv GRID gx3 ; setenv RES 100x116 -#setenv GRID gx1 ; setenv RES 320x384 -#setenv GRID tx1 ; setenv RES 360x240 -#setenv GRID tp1 ; setenv RES 360x300 -setenv GRID tp1 ; setenv RES 1440x1080 - -set NXGLOB = `echo $RES | sed s/x.\*//` -set NYGLOB = `echo $RES | sed s/.\*x//` -echo NXGLOB: $NXGLOB -echo NYGLOB: $NYGLOB - -# Recommendations: -# NTASK equals nprocs in ice_in -# use processor_shape = slenderX1 or slenderX2 in ice_in -# one per processor with distribution_type='cartesian' or -# squarish blocks with distribution_type='rake' -# If BLCKX (BLCKY) does not divide NXGLOB (NYGLOB) evenly, padding -# will be used on the right (top) of the grid. -setenv NTASK $nproc -#setenv BLCKX 45 # x-dimension of blocks ( not including ) -#setenv BLCKY 38 # y-dimension of blocks ( ghost cells ) -setenv BLCKX `expr $NXGLOB / $nproc` -setenv BLCKY `expr $NYGLOB` -echo BLCKX: $BLCKX -echo BLCKY: $BLCKY - -echo -#24 : 12x2 -setenv BLCKX 120 -setenv BLCKY 540 -#144 : 16x9 -setenv BLCKX 90 -setenv BLCKY 120 -#16 : 8x2 -setenv BLCKX 180 -setenv BLCKY 540 -#16 : 4x4 -setenv BLCKX 360 -setenv BLCKY 270 -##216 : 24x9 -#setenv BLCKX 60 -#setenv BLCKY 120 -#192 : 16x12 -->square-ice -setenv BLCKX 90 -setenv BLCKY 90 -#192 : 96x2 -setenv BLCKX 15 -setenv BLCKY 540 - -# may need to increase MXBLCKS with rake distribution or padding -@ a = $NXGLOB * $NYGLOB ; @ b = $BLCKX * $BLCKY * $NTASK -@ m = $a / $b ; setenv MXBLCKS $m ; if ($MXBLCKS == 0) setenv MXBLCKS 1 -echo Autimatically generated: MXBLCKS = $MXBLCKS -##setenv MXBLCKS 8 # if necessary (code will print proper value) -#20110830: increase it to 12 as required by code: -# (but no clue why it never happened before!) -#setenv MXBLCKS 12 # if necessary (code will print proper value) - -########################################### -# ars599: 24032014 -# copy from /short/p66/ars599/CICE.v5.0/accice.v504_csiro -# solo_ice_comp -########################################### -### Tracers # match ice_in tracer_nml to conserve memory -setenv TRAGE 1 # set to 1 for ice age tracer -setenv TRFY 0 # set to 1 for first-year ice area tracer -setenv TRLVL 0 # set to 1 for level and deformed ice tracers -setenv TRPND 1 # set to 1 for melt pond tracers -setenv NTRAERO 0 # number of aerosol tracers - # (up to max_aero in ice_domain_size.F90) - # CESM uses 3 aerosol tracers -setenv TRBRI 0 # set to 1 for brine height tracer -setenv NBGCLYR 0 # number of zbgc layers -setenv TRBGCS 0 # number of skeletal layer bgc tracers - # TRBGCS=0 or 2<=TRBGCS<=9) - -### File unit numbers -setenv NUMIN 11 # minimum file unit number -setenv NUMAX 99 # maximum file unit number - -if ($IO_TYPE == 'netcdf') then - setenv IODIR io_netcdf -else if ($IO_TYPE == 'pio') then - setenv IODIR io_pio -else - setenv IODIR io_binary -endif - -########################################### - -setenv CBLD $SRCDIR/bld - -if ( $ARCH == 'UNICOS/mp') setenv ARCH UNICOS -if ( $ARCH == 'UNICOS') then - cp -f $CBLD/Makefile.$ARCH $CBLD/Makefile -else - cp -f $CBLD/Makefile.std $CBLD/Makefile -endif - -if ($NTASK == 1) then - setenv COMMDIR serial -else - setenv COMMDIR mpi -endif -echo COMMDIR: $COMMDIR - -if ($ACCESS == 'yes') then - setenv DRVDIR access -else - setenv DRVDIR cice -endif -echo DRVDIR: $DRVDIR - -cd $OBJDIR - -### List of source code directories (in order of importance). -cat >! Filepath << EOF -$SRCDIR/drivers/$DRVDIR -$SRCDIR/source -$SRCDIR/$COMMDIR -$SRCDIR/$IODIR -$SRCDIR/$SHRDIR -EOF - -if ( $ARCH == 'UNICOS.ORNL.phoenix' ) then - ### use -h command for phoenix - cc -o makdep -h command $CBLD/makdep.c || exit 2 -else if ( $ARCH == 'Linux.ORNL.jaguar' ) then - gcc -g -o makdep $CBLD/makdep.c || exit 2 -else - cc -o makdep $CBLD/makdep.c || exit 2 -endif - -setenv MACFILE $CBLD/Macros.Linux.${ARCH} - -gmake VPFILE=Filepath EXEC=$BINDIR/$EXE \ - NXGLOB=$NXGLOB NYGLOB=$NYGLOB \ - BLCKX=$BLCKX BLCKY=$BLCKY MXBLCKS=$MXBLCKS \ - -f $CBLD/Makefile MACFILE=$MACFILE || exit 2 - -cd .. -pwd -echo NTASK = $NTASK -echo "global N, block_size" -echo "x $NXGLOB, $BLCKX" -echo "y $NYGLOB, $BLCKY" -echo max_blocks = $MXBLCKS -echo $TRAGE = TRAGE, iage tracer -echo $TRFY = TRFY, first-year ice tracer -echo $TRLVL = TRLVL, level-ice tracers -echo $TRPND = TRPND, melt pond tracers -echo $NTRAERO = NTRAERO, number of aerosol tracers -echo $TRBRI = TRBRI, brine height tracer -echo $NBGCLYR = NBGCLYR, number of bio grid layers -echo $TRBGCS = TRBGCS, number of BGC tracers From 5586d4dd0dfb249cdef4197fb554f964ee831a9e Mon Sep 17 00:00:00 2001 From: anton-seaice Date: Thu, 18 Dec 2025 13:36:11 +1100 Subject: [PATCH 47/52] Sync namelist error handling between ACCESS & AUSCOM --- drivers/auscom/cpl_parameters.F90 | 55 ++++++++++++++++++++++++++----- 1 file changed, 46 insertions(+), 9 deletions(-) diff --git a/drivers/auscom/cpl_parameters.F90 b/drivers/auscom/cpl_parameters.F90 index dd2423c0..2aa23ab7 100644 --- a/drivers/auscom/cpl_parameters.F90 +++ b/drivers/auscom/cpl_parameters.F90 @@ -6,6 +6,12 @@ module cpl_parameters implicit none +#ifdef __INTEL_COMPILER + ! for intel runtime errors + ! see https://www.intel.com/content/www/us/en/docs/fortran-compiler/developer-guide-reference/2025-2/list-of-runtime-error-messages.html + include "for_iosdef.for" +#endif + integer(kind=int_kind) :: nt_cells ! nx_global x ny_global integer, parameter :: MAX_COUPLING_FIELDS = 32 @@ -94,10 +100,12 @@ module cpl_parameters subroutine read_namelist_parameters() - use ice_exit - use ice_fileunits + use ice_exit, only: abort_ice + use ice_fileunits, only: nu_nml, ice_stderr, ice_stdout, get_fileunit, release_fileunit + use ice_communicate, only: my_task, master_task integer (int_kind) :: nml_error, i + character (len=256) :: errstr, tmpstr ! For holding namelist read errors do i=1, MAX_COUPLING_FIELDS fields_from_atm(i) = char(0) @@ -106,24 +114,53 @@ subroutine read_namelist_parameters() enddo ! all processors read the namelist - call get_fileunit(nu_nml) - open(unit=nu_nml,file="input_ice.nml",form="formatted",status="old",iostat=nml_error) + call get_fileunit(nu_nml) + open(unit=nu_nml,file="input_ice.nml",form="formatted",status="old",iostat=nml_error, iomsg=errstr) + ! + if (my_task == master_task) then + write(ice_stdout,*)'CICE: input_ice.nml opened at unit = ', nu_nml + endif + ! if (nml_error /= 0) then - nml_error = -1 + write(tmpstr, '(a,i3,a)') 'CICE: ERROR failed to open input_ice.nml. Error code: ', nml_error, & + ' - ' // trim(errstr) + call abort_ice(trim(tmpstr)) else - nml_error = 1 + nml_error = 1 endif + do while (nml_error > 0) - read(nu_nml, nml=coupling_nml,iostat=nml_error) - if (nml_error > 0) read(nu_nml,*) ! for Nagware compiler + read(nu_nml, nml=coupling_nml,iostat=nml_error,iomsg=errstr) + ! check if error + if (nml_error /= 0) then + if (my_task == master_task) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt=*) tmpstr +#ifdef __INTEL_COMPILER + if (nml_error == FOR$IOS_INVREFVAR) then + write(ice_stderr,*)'CICE: Invalid reference to variable '//trim(tmpstr) + write(ice_stderr,*)'CICE: is '//trim(tmpstr)//' deprecated ?' + endif +#endif + call abort_ice('CICE ERROR in input_ice.nml when' // & + ' reading ' // trim(tmpstr) // ' - ' //errstr) + endif + endif end do if (nml_error == 0) close(nu_nml) + if (my_task == master_task) then + write(6,coupling_nml) + endif + call release_fileunit(nu_nml) if (nml_error /= 0) then - call abort_ice('ice: error reading coupling_nml') + if (my_task == master_task) then + call abort_ice('ice: error reading coupling namelist in "input_ice.nml"') + endif endif num_fields_from_atm = 0 From 99ce18a02bcfbbf676c89b49e1a099325df54818 Mon Sep 17 00:00:00 2001 From: anton-seaice Date: Thu, 8 Jan 2026 11:03:07 +1100 Subject: [PATCH 48/52] Use double precision for time diagnostic --- drivers/access/CICE_InitMod.F90 | 11 +++-------- drivers/access/cpl_forcing_handler.F90 | 1 + io_netcdf/ice_history_write.F90 | 4 ++-- io_pio/ice_history_write.F90 | 4 ++-- 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/access/CICE_InitMod.F90 b/drivers/access/CICE_InitMod.F90 index b8f4fc64..f382f87a 100644 --- a/drivers/access/CICE_InitMod.F90 +++ b/drivers/access/CICE_InitMod.F90 @@ -67,7 +67,9 @@ subroutine cice_init use ice_algae, only: get_forcing_bgc use ice_calendar, only: dt, dt_dyn, time, istep, istep1, write_ic, & init_calendar, calendar, idate, month - use ice_communicate, only: MPI_COMM_ICE, init_communicate + use ice_communicate, only: MPI_COMM_ICE + use ice_communicate, only: init_communicate + use ice_coupling, only: top_layer_Tandk_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_dyn_eap, only: init_eap @@ -95,9 +97,6 @@ subroutine cice_init #ifdef popcice use drv_forcing, only: sst_sss #endif -#ifdef ACCESS - use ice_coupling, only: top_layer_Tandk_init -#endif #ifdef AusCOM integer(kind=int_kind) :: idate_save @@ -367,16 +366,12 @@ subroutine init_restart call restartfile() ! given by pointer in ice_in !ars599: 11042014: markout call calendar ! according to dhb599 initmod at cice4.1_fm -#ifdef ACCESS ! TODO: 'time' argument in the calendar call below affects frz_onset output. ! Hardcoding time=0.0 gives the same results as the older CM2 based calendar ! initialisation. ! See https://github.com/ACCESS-NRI/cice5/issues/49 ! and https://github.com/ACCESS-NRI/cice5/pull/48 for details. call calendar(0.0) ! update time parameters -#else - call calendar(time) ! update time parameters -#endif if (kdyn == 2) call read_restart_eap ! EAP else if (restart) then ! ice_ic = core restart file call restartfile (ice_ic) ! or 'default' or 'none' diff --git a/drivers/access/cpl_forcing_handler.F90 b/drivers/access/cpl_forcing_handler.F90 index 08d502a6..85660c6d 100644 --- a/drivers/access/cpl_forcing_handler.F90 +++ b/drivers/access/cpl_forcing_handler.F90 @@ -150,6 +150,7 @@ subroutine read_access_a2i_data(fname,nrec,istep) call ice_read_nc(ncid, nrec, 'press_i', um_press, dbug) call ice_read_nc(ncid, nrec, 'co2_ai', um_co2, dbug) call ice_read_nc(ncid, nrec, 'wnd_ai', um_wnd, dbug) + if (my_task == master_task) call ice_close_nc(ncid) else if (my_task==0) then diff --git a/io_netcdf/ice_history_write.F90 b/io_netcdf/ice_history_write.F90 index e382020a..9dd448e4 100644 --- a/io_netcdf/ice_history_write.F90 +++ b/io_netcdf/ice_history_write.F90 @@ -85,7 +85,7 @@ subroutine ice_write_hist (ns) ! local variables - real (kind=real_kind) :: ltime !history timestamp in days + real (kind=dbl_kind) :: ltime !history timestamp in days character (char_len_long) :: ncfile(max_nstrm) !filenames character (char_len) :: time_string !model time for logging logical :: file_exists @@ -311,7 +311,7 @@ subroutine ice_hist_create(ns, ncfile, ncid, var, coord_var, var_nverts, var_nz) ! define coordinate variables !----------------------------------------------------------------- - call check(nf90_def_var(ncid,'time',nf90_float,timid,varid), & + call check(nf90_def_var(ncid,'time',nf90_double,timid,varid), & 'def var time') call check(nf90_put_att(ncid,varid,'long_name','model time'), & 'put_att long_name') diff --git a/io_pio/ice_history_write.F90 b/io_pio/ice_history_write.F90 index 8e233ec7..a135ab6d 100644 --- a/io_pio/ice_history_write.F90 +++ b/io_pio/ice_history_write.F90 @@ -78,7 +78,7 @@ subroutine ice_write_hist (ns) ! local variables - real (kind=real_kind) :: ltime !history timestamp in days + real (kind=dbl_kind) :: ltime !history timestamp in days character (char_len_long) :: ncfile(max_nstrm), filename !filenames character (char_len) :: time_string !model time for logging logical :: file_exists @@ -286,7 +286,7 @@ subroutine ice_hist_create(ns, ncfile, File, var, coord_var, var_nverts, var_nz) ! define coordinate variables !----------------------------------------------------------------- - call ice_pio_check(pio_def_var(File,'time',pio_real,(/timid/),varid), & + call ice_pio_check(pio_def_var(File,'time',pio_double,(/timid/),varid), & 'def var time') call ice_pio_check(pio_put_att(File,varid,'long_name','model time'), & 'put_att long_name') From 8031135e582c890e04fd9fc6230c3def9c0acbc4 Mon Sep 17 00:00:00 2001 From: anton-seaice Date: Fri, 9 Jan 2026 14:17:37 +1100 Subject: [PATCH 49/52] Fix ordering of setting aicenmin --- source/ice_init.F90 | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/source/ice_init.F90 b/source/ice_init.F90 index 52cb3b04..fdb2903e 100644 --- a/source/ice_init.F90 +++ b/source/ice_init.F90 @@ -503,14 +503,16 @@ subroutine input_data endif call release_fileunit(nu_nml) + + if (aicenmin == 99) then + aicenmin = puny #ifdef ACCESS - if (ktherm == 1 .and. aicenmin == 99) then - !Set a higher value - ! of aicenmin if we're using multilayers with UM-style coupling for stability. - aicenmin = 0.00001_dbl_kind - endif + if (ktherm == 1 .and. aicenmin == 99) then + ! Set a higher value of aicenmin if we're using multilayers with UM-style coupling for stability. + aicenmin = 0.00001_dbl_kind + endif #endif - if (aicenmin == 99) aicenmin = puny + endif !----------------------------------------------------------------- ! set up diagnostics output and resolve conflicts From f04d945d9c70b3c795f72b9e86618eeaea8fac93 Mon Sep 17 00:00:00 2001 From: anton-seaice Date: Mon, 12 Jan 2026 13:22:14 +1100 Subject: [PATCH 50/52] Turn off per process logging - not used --- source/ice_init.F90 | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/source/ice_init.F90 b/source/ice_init.F90 index fdb2903e..74f49de7 100644 --- a/source/ice_init.F90 +++ b/source/ice_init.F90 @@ -533,7 +533,7 @@ subroutine input_data end if else ! each task gets unique ice log filename when if test is true, for debugging - if (1 == 0) then + if ( .false. ) then call get_fileUnit(nu_diag) write(str,'(a,i4.4)') "ice.log.task_",my_task open(nu_diag,file=str) @@ -551,16 +551,23 @@ subroutine input_data write(ice_stdout,*) 'Diagnostic output will be in file ',diag_file open (nu_diag, file=diag_file, status='unknown') endif + write(nu_diag,*) '--------------------------------' + write(nu_diag,*) ' CICE model diagnostic output ' + write(nu_diag,*) '--------------------------------' + write(nu_diag,*) ' ' else - ! each task gets unique ice log filename. - call get_fileUnit(nu_diag) - write(str,'(a,i4.4)') "ice.log.task_",my_task - open(nu_diag,file=str) + ! each task gets unique ice log filename when if test is true, for debugging + if ( .false. ) then + call get_fileUnit(nu_diag) + write(str,'(a,i4.4)') "ice.log.task_",my_task + open(nu_diag,file=str) + write(nu_diag,*) '--------------------------------' + write(nu_diag,*) ' CICE model diagnostic output ' + write(nu_diag,*) '--------------------------------' + write(nu_diag,*) ' ' + endif endif - write(nu_diag,*) '--------------------------------' - write(nu_diag,*) ' CICE model diagnostic output ' - write(nu_diag,*) '--------------------------------' - write(nu_diag,*) ' ' + if (trim(runtype) == 'continue') restart = .true. if (trim(runtype) /= 'continue' .and. (restart)) then From eb00a6d506132dadced9eecc4f3cd489c9e1dc9d Mon Sep 17 00:00:00 2001 From: anton-seaice Date: Mon, 12 Jan 2026 15:15:49 +1100 Subject: [PATCH 51/52] Reinstate elevated limits for thermo errors in esm1.6 --- source/ice_atmo.F90 | 2 +- source/ice_therm_vertical.F90 | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/source/ice_atmo.F90 b/source/ice_atmo.F90 index 5ad4d07f..4fbcc030 100644 --- a/source/ice_atmo.F90 +++ b/source/ice_atmo.F90 @@ -921,7 +921,7 @@ subroutine neutral_drag_coeffs (nx_block, ny_block, & Cdn_atm_rdg(i,j) = ai * ctecar*tmp1/distrdg(i,j)*sca* & (log(tmp1/iceruf)/log(zref/iceruf))**c2 #else - Cdn_atm_rdg(i,j) = ai * ctecar*tmp1/distrdg(i,j)*sca* & + Cdn_atm_rdg(i,j) = ctecar*tmp1/distrdg(i,j)*sca* & (log(tmp1*icerufi)/log(zref*icerufi))**c2 #endif Cdn_atm_rdg(i,j) = min(Cdn_atm_rdg(i,j),camax) diff --git a/source/ice_therm_vertical.F90 b/source/ice_therm_vertical.F90 index 98c14728..6f37ccee 100644 --- a/source/ice_therm_vertical.F90 +++ b/source/ice_therm_vertical.F90 @@ -2577,7 +2577,16 @@ subroutine conservation_check_vthermo(nx_block, ny_block, & ferr = abs(efinal(ij)-einit(ij)-einp) / dt if (ferr > ferrmax) then +#ifdef ACCESS + if (ferr > 10.0 * ferrmax) then + l_stop = .true. + write(nu_diag,*) 'BBB: TOO BAD --- CICE is to stop!' + else + write(nu_diag,*) 'BBB: WARNING -- too big flux error --' + endif +#else l_stop = .true. +#endif istop = i jstop = j From af615945466579d009b41251053387efd807651e Mon Sep 17 00:00:00 2001 From: anton-seaice Date: Mon, 12 Jan 2026 15:18:23 +1100 Subject: [PATCH 52/52] Send all nu_diag logging to one file --- source/ice_init.F90 | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/source/ice_init.F90 b/source/ice_init.F90 index 74f49de7..6a7bbb57 100644 --- a/source/ice_init.F90 +++ b/source/ice_init.F90 @@ -546,21 +546,22 @@ subroutine input_data if (trim(diag_type) == 'file') call get_fileunit(nu_diag) #endif - if (my_task == master_task) then - if (trim(diag_type) == 'file') then - write(ice_stdout,*) 'Diagnostic output will be in file ',diag_file - open (nu_diag, file=diag_file, status='unknown') - endif + ! each task gets unique ice log filename when if test is true, for debugging + if ( .false. ) then + call get_fileUnit(nu_diag) + write(str,'(a,i4.4)') "ice.log.task_",my_task + open(nu_diag,file=str) write(nu_diag,*) '--------------------------------' write(nu_diag,*) ' CICE model diagnostic output ' write(nu_diag,*) '--------------------------------' write(nu_diag,*) ' ' else - ! each task gets unique ice log filename when if test is true, for debugging - if ( .false. ) then - call get_fileUnit(nu_diag) - write(str,'(a,i4.4)') "ice.log.task_",my_task - open(nu_diag,file=str) + ! shared diag file + if (trim(diag_type) == 'file') then + write(ice_stdout,*) 'Diagnostic output will be in file ',diag_file + open (nu_diag, file=diag_file, status='unknown') + endif + if (my_task == master_task) then write(nu_diag,*) '--------------------------------' write(nu_diag,*) ' CICE model diagnostic output ' write(nu_diag,*) '--------------------------------'