Repository: CliMA/ClimateMachine.jl Branch: master Commit: 2e0b6b7d9771 Files: 670 Total size: 4.3 MB Directory structure: gitextract_rqfc0vw8/ ├── .buildkite/ │ ├── docs-pipeline.yml │ └── pipeline.yml ├── .codecov.yml ├── .dev/ │ ├── .gitignore │ ├── Project.toml │ ├── clima_formatter_default_image.jl │ ├── clima_formatter_image.jl │ ├── clima_formatter_options.jl │ ├── climaformat.jl │ ├── hooks/ │ │ ├── pre-commit │ │ └── pre-commit.sysimage │ ├── precompile.jl │ └── systemimage/ │ └── climate_machine_image.jl ├── .github/ │ ├── issue_template.md │ ├── pull_request_template.md │ └── workflows/ │ ├── CompatHelper.yml │ ├── Coverage.yaml │ ├── DocCleanup.yml │ ├── Documenter.yaml │ ├── JuliaFormatter.yml │ ├── Linux-UnitTests.yml │ ├── OS-UnitTests.yml │ ├── PR-Comment.yml │ └── doc_build_common_error_messages.md ├── .gitignore ├── LICENSE.md ├── Manifest.toml ├── Project.toml ├── README.md ├── bors.toml ├── docs/ │ ├── Manifest.toml │ ├── Project.toml │ ├── bibliography.bib │ ├── clean_build_folder.jl │ ├── list_of_apis.jl │ ├── list_of_dev_docs.jl │ ├── list_of_getting_started_docs.jl │ ├── list_of_how_to_guides.jl │ ├── list_of_theory_docs.jl │ ├── list_of_tutorials.jl │ ├── make.jl │ ├── pages_helper.jl │ ├── plothelpers.jl │ └── src/ │ ├── APIs/ │ │ ├── Arrays/ │ │ │ └── Arrays.md │ │ ├── Atmos/ │ │ │ └── AtmosModel.md │ │ ├── BalanceLaws/ │ │ │ ├── BalanceLaws.md │ │ │ └── Problems.md │ │ ├── Common/ │ │ │ ├── CartesianDomains.md │ │ │ ├── CartesianFields.md │ │ │ ├── Orientations.md │ │ │ ├── Spectra.md │ │ │ ├── TurbulenceClosures.md │ │ │ └── TurbulenceConvection.md │ │ ├── Diagnostics/ │ │ │ ├── Diagnostics.md │ │ │ ├── DiagnosticsMachine.md │ │ │ ├── StateCheck.md │ │ │ └── StdDiagnostics.md │ │ ├── Driver/ │ │ │ ├── Checkpoint.md │ │ │ └── index.md │ │ ├── InputOutput/ │ │ │ └── index.md │ │ ├── Land/ │ │ │ ├── LandModel.md │ │ │ ├── RadiativeEnergyFlux.md │ │ │ ├── Runoff.md │ │ │ ├── SoilHeatParameterizations.md │ │ │ ├── SoilWaterParameterizations.md │ │ │ └── SurfaceFlow.md │ │ ├── Numerics/ │ │ │ ├── DGMethods/ │ │ │ │ ├── Courant.md │ │ │ │ ├── DGMethods.md │ │ │ │ ├── FVReconstructions.md │ │ │ │ └── NumericalFluxes.md │ │ │ ├── Meshes/ │ │ │ │ └── Mesh.md │ │ │ ├── ODESolvers/ │ │ │ │ └── ODESolvers.md │ │ │ └── SystemSolvers/ │ │ │ └── SystemSolvers.md │ │ ├── Ocean/ │ │ │ └── Ocean.md │ │ ├── Utilities/ │ │ │ ├── SingleStackUtils.md │ │ │ ├── TicToc.md │ │ │ └── VariableTemplates.md │ │ └── index.md │ ├── Contributing.md │ ├── DevDocs/ │ │ ├── AcceptableUnicode.md │ │ ├── CodeStyle.md │ │ ├── DiagnosticVariableList.md │ │ ├── ModelOutput.md │ │ ├── ModelVariableList.md │ │ └── SystemImage.md │ ├── GettingStarted/ │ │ ├── Atmos.md │ │ ├── Installation.md │ │ ├── RunningClimateMachine.md │ │ └── Terminology.md │ ├── HowToGuides/ │ │ ├── Atmos/ │ │ │ ├── AtmosReferenceState.md │ │ │ ├── MoistureAndPrecip.md │ │ │ ├── MoistureModelChoices.md │ │ │ └── PrecipitationModelChoices.md │ │ ├── BalanceLaws/ │ │ │ └── how_to_make_a_balance_law.md │ │ ├── Diagnostics/ │ │ │ └── UsingDiagnostics.md │ │ ├── Land/ │ │ │ └── index.md │ │ ├── Numerics/ │ │ │ ├── Meshes/ │ │ │ │ └── index.md │ │ │ ├── ODESolvers/ │ │ │ │ └── Timestepping.md │ │ │ └── SystemSolvers/ │ │ │ └── IterativeSolvers.md │ │ └── Ocean/ │ │ └── index.md │ ├── References.md │ ├── Theory/ │ │ ├── Atmos/ │ │ │ ├── AtmosModel.md │ │ │ ├── EDMFEquations.md │ │ │ ├── EDMF_plots.md │ │ │ ├── Microphysics_0M.md │ │ │ ├── Microphysics_1M.md │ │ │ └── Model/ │ │ │ └── tracers.md │ │ └── Common/ │ │ └── Turbulence.md │ └── index.md ├── experiments/ │ ├── AtmosGCM/ │ │ ├── GCMDriver/ │ │ │ ├── GCMDriver.jl │ │ │ ├── baroclinicwave_problem.jl │ │ │ ├── gcm_base_states.jl │ │ │ ├── gcm_bcs.jl │ │ │ ├── gcm_moisture_profiles.jl │ │ │ ├── gcm_perturbations.jl │ │ │ ├── gcm_sources.jl │ │ │ └── heldsuarez_problem.jl │ │ ├── heldsuarez.jl │ │ ├── moist_baroclinic_wave_bulksfcflux.jl │ │ └── nonhydrostatic_gravity_wave.jl │ ├── AtmosLES/ │ │ ├── Artifacts.toml │ │ ├── bomex_les.jl │ │ ├── bomex_model.jl │ │ ├── bomex_single_stack.jl │ │ ├── cfsite_hadgem2-a_07_amip.jl │ │ ├── convective_bl_les.jl │ │ ├── convective_bl_model.jl │ │ ├── dycoms.jl │ │ ├── ekman_layer_model.jl │ │ ├── rising_bubble_bryan.jl │ │ ├── rising_bubble_theta_formulation.jl │ │ ├── schar_scalar_advection.jl │ │ ├── squall_line.jl │ │ ├── stable_bl_les.jl │ │ ├── stable_bl_model.jl │ │ ├── surfacebubble.jl │ │ └── taylor_green.jl │ ├── OceanBoxGCM/ │ │ ├── homogeneous_box.jl │ │ ├── ocean_gyre.jl │ │ └── simple_box.jl │ ├── OceanSplitExplicit/ │ │ └── simple_box.jl │ └── TestCase/ │ ├── baroclinic_wave.jl │ ├── baroclinic_wave_fvm.jl │ ├── isothermal_zonal_flow.jl │ ├── risingbubble.jl │ ├── risingbubble_fvm.jl │ ├── solid_body_rotation.jl │ ├── solid_body_rotation_fvm.jl │ └── solid_body_rotation_mountain.jl ├── src/ │ ├── Arrays/ │ │ ├── CMBuffers.jl │ │ └── MPIStateArrays.jl │ ├── Atmos/ │ │ ├── Model/ │ │ │ ├── AtmosModel.jl │ │ │ ├── atmos_tendencies.jl │ │ │ ├── bc_energy.jl │ │ │ ├── bc_initstate.jl │ │ │ ├── bc_moisture.jl │ │ │ ├── bc_momentum.jl │ │ │ ├── bc_precipitation.jl │ │ │ ├── bc_tracer.jl │ │ │ ├── boundaryconditions.jl │ │ │ ├── courant.jl │ │ │ ├── declare_prognostic_vars.jl │ │ │ ├── energy.jl │ │ │ ├── filters.jl │ │ │ ├── get_prognostic_vars.jl │ │ │ ├── linear.jl │ │ │ ├── linear_atmos_tendencies.jl │ │ │ ├── linear_tendencies.jl │ │ │ ├── lsforcing.jl │ │ │ ├── moisture.jl │ │ │ ├── multiphysics_types.jl │ │ │ ├── precipitation.jl │ │ │ ├── problem.jl │ │ │ ├── prog_prim_conversion.jl │ │ │ ├── projections.jl │ │ │ ├── radiation.jl │ │ │ ├── reconstructions.jl │ │ │ ├── ref_state.jl │ │ │ ├── tendencies_energy.jl │ │ │ ├── tendencies_mass.jl │ │ │ ├── tendencies_moisture.jl │ │ │ ├── tendencies_momentum.jl │ │ │ ├── tendencies_precipitation.jl │ │ │ ├── tendencies_tracers.jl │ │ │ ├── thermo_states.jl │ │ │ ├── thermo_states_anelastic.jl │ │ │ └── tracers.jl │ │ ├── Parameterizations/ │ │ │ ├── GravityWaves/ │ │ │ │ └── README.md │ │ │ ├── README.md │ │ │ └── Radiation/ │ │ │ └── README.md │ │ └── TemperatureProfiles/ │ │ └── TemperatureProfiles.jl │ ├── BalanceLaws/ │ │ ├── BalanceLaws.jl │ │ ├── Problems.jl │ │ ├── boundaryconditions.jl │ │ ├── interface.jl │ │ ├── kernels.jl │ │ ├── prog_prim_conversion.jl │ │ ├── show_tendencies.jl │ │ ├── state_types.jl │ │ ├── sum_tendencies.jl │ │ ├── tendency_types.jl │ │ └── vars_wrappers.jl │ ├── ClimateMachine.jl │ ├── Common/ │ │ ├── CartesianDomains/ │ │ │ ├── CartesianDomains.jl │ │ │ └── rectangular_domain.jl │ │ ├── CartesianFields/ │ │ │ ├── CartesianFields.jl │ │ │ ├── rectangular_element.jl │ │ │ └── rectangular_spectral_element_fields.jl │ │ ├── Orientations/ │ │ │ └── Orientations.jl │ │ ├── Spectra/ │ │ │ ├── Spectra.jl │ │ │ ├── power_spectrum_gcm.jl │ │ │ ├── power_spectrum_les.jl │ │ │ └── spherical_helper.jl │ │ ├── SurfaceFluxes/ │ │ │ └── README.md │ │ ├── TurbulenceClosures/ │ │ │ └── TurbulenceClosures.jl │ │ └── TurbulenceConvection/ │ │ ├── TurbulenceConvection.jl │ │ ├── boundary_conditions.jl │ │ └── source.jl │ ├── Diagnostics/ │ │ ├── Debug/ │ │ │ └── StateCheck.jl │ │ ├── Diagnostics.jl │ │ ├── DiagnosticsMachine/ │ │ │ ├── DiagnosticsMachine.jl │ │ │ ├── atmos_diagnostic_funs.jl │ │ │ ├── group_gen.jl │ │ │ ├── groups.jl │ │ │ ├── horizontal_average.jl │ │ │ ├── onetime.jl │ │ │ ├── pointwise.jl │ │ │ └── variables.jl │ │ ├── StdDiagnostics/ │ │ │ ├── StdDiagnostics.jl │ │ │ ├── atmos_gcm_default.jl │ │ │ ├── atmos_gcm_diagnostic_vars.jl │ │ │ ├── atmos_les_default.jl │ │ │ └── atmos_les_diagnostic_vars.jl │ │ ├── atmos_common.jl │ │ ├── atmos_gcm_default.jl │ │ ├── atmos_gcm_spectra.jl │ │ ├── atmos_les_core.jl │ │ ├── atmos_les_default.jl │ │ ├── atmos_les_default_perturbations.jl │ │ ├── atmos_les_spectra.jl │ │ ├── atmos_mass_energy_loss.jl │ │ ├── atmos_refstate_perturbations.jl │ │ ├── atmos_turbulence_stats.jl │ │ ├── diagnostic_fields.jl │ │ ├── dump_aux.jl │ │ ├── dump_init.jl │ │ ├── dump_state.jl │ │ ├── dump_tendencies.jl │ │ ├── groups.jl │ │ ├── helpers.jl │ │ ├── thermo.jl │ │ ├── variables.jl │ │ └── vorticity_balancelaw.jl │ ├── Driver/ │ │ ├── Callbacks/ │ │ │ └── Callbacks.jl │ │ ├── Checkpoint/ │ │ │ └── Checkpoint.jl │ │ ├── ConfigTypes/ │ │ │ └── ConfigTypes.jl │ │ ├── Driver.jl │ │ ├── SolverTypes/ │ │ │ ├── ExplicitSolverType.jl │ │ │ ├── HEVISolverType.jl │ │ │ ├── IMEXSolverType.jl │ │ │ ├── ImplicitSolverType.jl │ │ │ ├── MISSolverType.jl │ │ │ ├── MultirateSolverType.jl │ │ │ ├── SolverTypes.jl │ │ │ └── SplitExplicitSolverType.jl │ │ ├── diagnostics_configs.jl │ │ ├── driver_configs.jl │ │ ├── solver_config_wrappers.jl │ │ └── solver_configs.jl │ ├── InputOutput/ │ │ ├── VTK/ │ │ │ ├── VTK.jl │ │ │ ├── fieldwriter.jl │ │ │ ├── writemesh.jl │ │ │ ├── writepvtu.jl │ │ │ └── writevtk.jl │ │ └── Writers/ │ │ ├── Writers.jl │ │ └── netcdf_writer.jl │ ├── LICENSE │ ├── Land/ │ │ └── Model/ │ │ ├── LandModel.jl │ │ ├── RadiativeEnergyFlux.jl │ │ ├── Runoff.jl │ │ ├── SoilHeatParameterizations.jl │ │ ├── SoilWaterParameterizations.jl │ │ ├── SurfaceFlow.jl │ │ ├── land_bc.jl │ │ ├── land_tendencies.jl │ │ ├── prog_types.jl │ │ ├── prognostic_vars.jl │ │ ├── soil_bc.jl │ │ ├── soil_heat.jl │ │ ├── soil_model.jl │ │ ├── soil_water.jl │ │ └── source.jl │ ├── Numerics/ │ │ ├── DGMethods/ │ │ │ ├── Courant.jl │ │ │ ├── DGFVModel.jl │ │ │ ├── DGFVModel_kernels.jl │ │ │ ├── DGMethods.jl │ │ │ ├── DGModel.jl │ │ │ ├── DGModel_kernels.jl │ │ │ ├── ESDGModel.jl │ │ │ ├── ESDGModel_kernels.jl │ │ │ ├── FVReconstructions.jl │ │ │ ├── NumericalFluxes.jl │ │ │ ├── SpaceDiscretization.jl │ │ │ ├── create_states.jl │ │ │ ├── custom_filter.jl │ │ │ └── remainder.jl │ │ ├── Mesh/ │ │ │ ├── BrickMesh.jl │ │ │ ├── DSS.jl │ │ │ ├── Elements.jl │ │ │ ├── Filters.jl │ │ │ ├── GeometricFactors.jl │ │ │ ├── Geometry.jl │ │ │ ├── Grids.jl │ │ │ ├── Interpolation.jl │ │ │ ├── Mesh.jl │ │ │ ├── Metrics.jl │ │ │ └── Topologies.jl │ │ ├── ODESolvers/ │ │ │ ├── AdditiveRungeKuttaMethod.jl │ │ │ ├── BackwardEulerSolvers.jl │ │ │ ├── DifferentialEquations.jl │ │ │ ├── GenericCallbacks.jl │ │ │ ├── LowStorageRungeKutta3NMethod.jl │ │ │ ├── LowStorageRungeKuttaMethod.jl │ │ │ ├── MultirateInfinitesimalGARKDecoupledImplicit.jl │ │ │ ├── MultirateInfinitesimalGARKExplicit.jl │ │ │ ├── MultirateInfinitesimalStepMethod.jl │ │ │ ├── MultirateRungeKuttaMethod.jl │ │ │ ├── ODESolvers.jl │ │ │ ├── SplitExplicitMethod.jl │ │ │ └── StrongStabilityPreservingRungeKuttaMethod.jl │ │ └── SystemSolvers/ │ │ ├── SystemSolvers.jl │ │ ├── batched_generalized_minimal_residual_solver.jl │ │ ├── columnwise_lu_solver.jl │ │ ├── conjugate_gradient_solver.jl │ │ ├── generalized_conjugate_residual_solver.jl │ │ ├── generalized_minimal_residual_solver.jl │ │ ├── jacobian_free_newton_krylov_solver.jl │ │ └── preconditioners.jl │ ├── Ocean/ │ │ ├── HydrostaticBoussinesq/ │ │ │ ├── Courant.jl │ │ │ ├── HydrostaticBoussinesq.jl │ │ │ ├── LinearHBModel.jl │ │ │ ├── bc_temperature.jl │ │ │ ├── bc_velocity.jl │ │ │ └── hydrostatic_boussinesq_model.jl │ │ ├── JLD2Writer.jl │ │ ├── Ocean.jl │ │ ├── OceanBC.jl │ │ ├── OceanProblems/ │ │ │ ├── OceanProblems.jl │ │ │ ├── homogeneous_box.jl │ │ │ ├── initial_value_problem.jl │ │ │ ├── ocean_gyre.jl │ │ │ ├── shallow_water_initial_states.jl │ │ │ └── simple_box_problem.jl │ │ ├── README.md │ │ ├── ShallowWater/ │ │ │ ├── ShallowWaterModel.jl │ │ │ └── bc_velocity.jl │ │ ├── SplitExplicit/ │ │ │ ├── Communication.jl │ │ │ ├── HydrostaticBoussinesqCoupling.jl │ │ │ ├── ShallowWaterCoupling.jl │ │ │ ├── SplitExplicitModel.jl │ │ │ └── VerticalIntegralModel.jl │ │ ├── SplitExplicit01/ │ │ │ ├── BarotropicModel.jl │ │ │ ├── Communication.jl │ │ │ ├── Continuity3dModel.jl │ │ │ ├── IVDCModel.jl │ │ │ ├── OceanBoundaryConditions.jl │ │ │ ├── OceanModel.jl │ │ │ ├── SplitExplicitLSRK2nMethod.jl │ │ │ ├── SplitExplicitLSRK3nMethod.jl │ │ │ ├── SplitExplicitModel.jl │ │ │ └── VerticalIntegralModel.jl │ │ └── SuperModels.jl │ └── Utilities/ │ ├── SingleStackUtils/ │ │ ├── SingleStackUtils.jl │ │ └── single_stack_diagnostics.jl │ ├── TicToc/ │ │ └── TicToc.jl │ └── VariableTemplates/ │ ├── VariableTemplates.jl │ ├── flattened_tup_chain.jl │ └── var_names.jl ├── test/ │ ├── Arrays/ │ │ ├── basics.jl │ │ ├── broadcasting.jl │ │ ├── mpi_comm.jl │ │ ├── reductions.jl │ │ ├── runtests.jl │ │ └── varsindex.jl │ ├── Atmos/ │ │ ├── EDMF/ │ │ │ ├── Artifacts.toml │ │ │ ├── bomex_edmf.jl │ │ │ ├── closures/ │ │ │ │ ├── entr_detr.jl │ │ │ │ ├── mixing_length.jl │ │ │ │ ├── pressure.jl │ │ │ │ ├── surface_functions.jl │ │ │ │ └── turbulence_functions.jl │ │ │ ├── compute_mse.jl │ │ │ ├── edmf_kernels.jl │ │ │ ├── edmf_model.jl │ │ │ ├── ekman_layer.jl │ │ │ ├── helper_funcs/ │ │ │ │ ├── diagnose_environment.jl │ │ │ │ ├── diagnostics_configuration.jl │ │ │ │ ├── lamb_smooth_minimum.jl │ │ │ │ ├── nondimensional_exchange_functions.jl │ │ │ │ ├── save_subdomain_temperature.jl │ │ │ │ ├── subdomain_statistics.jl │ │ │ │ ├── subdomain_thermo_states.jl │ │ │ │ └── utility_funcs.jl │ │ │ ├── report_mse_bomex.jl │ │ │ ├── report_mse_sbl_anelastic.jl │ │ │ ├── report_mse_sbl_coupled_edmf_an1d.jl │ │ │ ├── report_mse_sbl_edmf.jl │ │ │ ├── report_mse_sbl_ss_implicit.jl │ │ │ ├── stable_bl_anelastic1d.jl │ │ │ ├── stable_bl_coupled_edmf_an1d.jl │ │ │ ├── stable_bl_edmf.jl │ │ │ ├── stable_bl_edmf_fvm.jl │ │ │ ├── stable_bl_single_stack_implicit.jl │ │ │ └── variable_map.jl │ │ ├── Model/ │ │ │ ├── Artifacts.toml │ │ │ ├── discrete_hydrostatic_balance.jl │ │ │ ├── get_atmos_ref_states.jl │ │ │ ├── ref_state.jl │ │ │ └── runtests.jl │ │ ├── Parameterizations/ │ │ │ └── Microphysics/ │ │ │ ├── KM_ice.jl │ │ │ ├── KM_saturation_adjustment.jl │ │ │ ├── KM_warm_rain.jl │ │ │ └── KinematicModel.jl │ │ ├── prog_prim_conversion/ │ │ │ └── runtests.jl │ │ └── runtests.jl │ ├── BalanceLaws/ │ │ └── runtests.jl │ ├── Common/ │ │ ├── CartesianDomains/ │ │ │ └── runtests.jl │ │ ├── CartesianFields/ │ │ │ └── runtests.jl │ │ ├── Spectra/ │ │ │ ├── gcm_standalone_visual_test.jl │ │ │ ├── runtests.jl │ │ │ └── spherical_helper_test.jl │ │ └── runtests.jl │ ├── Diagnostics/ │ │ ├── Debug/ │ │ │ ├── test_statecheck.jl │ │ │ └── test_statecheck_refvals.jl │ │ ├── diagnostic_fields_test.jl │ │ ├── dm_tests.jl │ │ ├── runtests.jl │ │ ├── sin_init.jl │ │ └── sin_test.jl │ ├── Driver/ │ │ ├── cr_unit_tests.jl │ │ ├── gcm_driver_test.jl │ │ ├── les_driver_test.jl │ │ ├── mms3.jl │ │ └── runtests.jl │ ├── InputOutput/ │ │ ├── VTK/ │ │ │ └── runtests.jl │ │ ├── Writers/ │ │ │ └── runtests.jl │ │ └── runtests.jl │ ├── Land/ │ │ ├── Model/ │ │ │ ├── Artifacts.toml │ │ │ ├── freeze_thaw_alone.jl │ │ │ ├── haverkamp_test.jl │ │ │ ├── heat_analytic_unit_test.jl │ │ │ ├── prescribed_twice.jl │ │ │ ├── runtests.jl │ │ │ ├── soil_heterogeneity.jl │ │ │ ├── test_bc.jl │ │ │ ├── test_bc_3d.jl │ │ │ ├── test_heat_parameterizations.jl │ │ │ ├── test_overland_flow_analytic.jl │ │ │ ├── test_overland_flow_vcatchment.jl │ │ │ ├── test_physical_bc.jl │ │ │ ├── test_radiative_energy_flux_functions.jl │ │ │ └── test_water_parameterizations.jl │ │ └── runtests.jl │ ├── Numerics/ │ │ ├── DGMethods/ │ │ │ ├── Euler/ │ │ │ │ ├── acousticwave_1d_imex.jl │ │ │ │ ├── acousticwave_mrigark.jl │ │ │ │ ├── acousticwave_variable_degree.jl │ │ │ │ ├── fvm_balance.jl │ │ │ │ ├── fvm_isentropicvortex.jl │ │ │ │ ├── isentropicvortex.jl │ │ │ │ ├── isentropicvortex_imex.jl │ │ │ │ ├── isentropicvortex_lmars.jl │ │ │ │ ├── isentropicvortex_mrigark.jl │ │ │ │ ├── isentropicvortex_mrigark_implicit.jl │ │ │ │ ├── isentropicvortex_multirate.jl │ │ │ │ └── isentropicvortex_setup.jl │ │ │ ├── advection_diffusion/ │ │ │ │ ├── advection_diffusion_model.jl │ │ │ │ ├── advection_diffusion_model_1dimex_bgmres.jl │ │ │ │ ├── advection_diffusion_model_1dimex_bjfnks.jl │ │ │ │ ├── advection_sphere.jl │ │ │ │ ├── diffusion_hyperdiffusion_sphere.jl │ │ │ │ ├── direction_splitting_advection_diffusion.jl │ │ │ │ ├── fvm_advection.jl │ │ │ │ ├── fvm_advection_diffusion.jl │ │ │ │ ├── fvm_advection_diffusion_model_1dimex_bjfnks.jl │ │ │ │ ├── fvm_advection_diffusion_periodic.jl │ │ │ │ ├── fvm_advection_sphere.jl │ │ │ │ ├── fvm_swirl.jl │ │ │ │ ├── hyperdiffusion_bc.jl │ │ │ │ ├── hyperdiffusion_model.jl │ │ │ │ ├── periodic_3D_hyperdiffusion.jl │ │ │ │ ├── pseudo1D_advection_diffusion.jl │ │ │ │ ├── pseudo1D_advection_diffusion_1dimex.jl │ │ │ │ ├── pseudo1D_advection_diffusion_mrigark_implicit.jl │ │ │ │ ├── pseudo1D_heat_eqn.jl │ │ │ │ └── variable_degree_advection_diffusion.jl │ │ │ ├── compressible_Navier_Stokes/ │ │ │ │ ├── density_current_model.jl │ │ │ │ ├── mms_bc_atmos.jl │ │ │ │ ├── mms_bc_dgmodel.jl │ │ │ │ ├── mms_model.jl │ │ │ │ ├── mms_solution.jl │ │ │ │ └── mms_solution_generated.jl │ │ │ ├── compressible_navier_stokes_equations/ │ │ │ │ ├── plotting/ │ │ │ │ │ ├── bigfileofstuff.jl │ │ │ │ │ ├── plot_output.jl │ │ │ │ │ └── vizinanigans.jl │ │ │ │ ├── shared_source/ │ │ │ │ │ ├── FluidBC.jl │ │ │ │ │ ├── ScalarFields.jl │ │ │ │ │ ├── VectorFields.jl │ │ │ │ │ ├── abstractions.jl │ │ │ │ │ ├── boilerplate.jl │ │ │ │ │ ├── callbacks.jl │ │ │ │ │ ├── domains.jl │ │ │ │ │ └── grids.jl │ │ │ │ ├── sphere/ │ │ │ │ │ ├── sphere_helper_functions.jl │ │ │ │ │ ├── test_heat_equation.jl │ │ │ │ │ ├── test_hydrostatic_balance.jl │ │ │ │ │ └── test_sphere.jl │ │ │ │ ├── three_dimensional/ │ │ │ │ │ ├── ThreeDimensionalCompressibleNavierStokesEquations.jl │ │ │ │ │ ├── bc_momentum.jl │ │ │ │ │ ├── bc_temperature.jl │ │ │ │ │ ├── config_sphere.jl │ │ │ │ │ ├── refvals_bickley_jet.jl │ │ │ │ │ ├── refvals_buoyancy.jl │ │ │ │ │ ├── run_bickley_jet.jl │ │ │ │ │ ├── run_box.jl │ │ │ │ │ ├── run_taylor_green_vortex.jl │ │ │ │ │ ├── test_bickley_jet.jl │ │ │ │ │ └── test_buoyancy.jl │ │ │ │ └── two_dimensional/ │ │ │ │ ├── TwoDimensionalCompressibleNavierStokesEquations.jl │ │ │ │ ├── bc_momentum.jl │ │ │ │ ├── bc_tracer.jl │ │ │ │ ├── refvals_bickley_jet.jl │ │ │ │ ├── run_bickley_jet.jl │ │ │ │ └── test_bickley_jet.jl │ │ │ ├── conservation/ │ │ │ │ └── sphere.jl │ │ │ ├── courant.jl │ │ │ ├── custom_filter.jl │ │ │ ├── fv_reconstruction_test.jl │ │ │ ├── grad_test.jl │ │ │ ├── grad_test_sphere.jl │ │ │ ├── horizontal_integral_test.jl │ │ │ ├── integral_test.jl │ │ │ ├── integral_test_sphere.jl │ │ │ ├── remainder_model.jl │ │ │ └── vars_test.jl │ │ ├── ESDGMethods/ │ │ │ ├── DryAtmos/ │ │ │ │ ├── DryAtmos.jl │ │ │ │ ├── baroclinic_wave.jl │ │ │ │ ├── linear.jl │ │ │ │ ├── run_tests.jl │ │ │ │ └── run_tests_mpo.jl │ │ │ └── diagnostics.jl │ │ ├── Mesh/ │ │ │ ├── BrickMesh.jl │ │ │ ├── DSS.jl │ │ │ ├── DSS_mpi.jl │ │ │ ├── Elements.jl │ │ │ ├── Geometry.jl │ │ │ ├── Grids.jl │ │ │ ├── Metrics.jl │ │ │ ├── filter.jl │ │ │ ├── filter_TMAR.jl │ │ │ ├── grid_integral.jl │ │ │ ├── interpolation.jl │ │ │ ├── min_node_distance.jl │ │ │ ├── mpi_centroid.jl │ │ │ ├── mpi_connect.jl │ │ │ ├── mpi_connect_1d.jl │ │ │ ├── mpi_connect_ell.jl │ │ │ ├── mpi_connect_sphere.jl │ │ │ ├── mpi_connect_stacked.jl │ │ │ ├── mpi_connect_stacked_3d.jl │ │ │ ├── mpi_connectfull.jl │ │ │ ├── mpi_getpartition.jl │ │ │ ├── mpi_partition.jl │ │ │ ├── mpi_sortcolumns.jl │ │ │ └── topology.jl │ │ ├── ODESolvers/ │ │ │ ├── callbacks.jl │ │ │ ├── ode_tests_basic.jl │ │ │ ├── ode_tests_common.jl │ │ │ ├── ode_tests_convergence.jl │ │ │ └── runtests.jl │ │ ├── SystemSolvers/ │ │ │ ├── bandedsystem.jl │ │ │ ├── bgmres.jl │ │ │ ├── cg.jl │ │ │ ├── columnwiselu.jl │ │ │ ├── iterativesolvers.jl │ │ │ ├── poisson.jl │ │ │ └── runtests.jl │ │ └── runtests.jl │ ├── Ocean/ │ │ ├── HydrostaticBoussinesq/ │ │ │ ├── test_3D_spindown.jl │ │ │ ├── test_initial_value_problem.jl │ │ │ ├── test_ocean_gyre_long.jl │ │ │ ├── test_ocean_gyre_short.jl │ │ │ ├── test_windstress_long.jl │ │ │ └── test_windstress_short.jl │ │ ├── HydrostaticBoussinesqModel/ │ │ │ └── test_hydrostatic_boussinesq_model.jl │ │ ├── OceanProblems/ │ │ │ └── test_initial_value_problem.jl │ │ ├── ShallowWater/ │ │ │ ├── GyreDriver.jl │ │ │ └── test_2D_spindown.jl │ │ ├── SplitExplicit/ │ │ │ ├── hydrostatic_spindown.jl │ │ │ ├── simple_box_2dt.jl │ │ │ ├── simple_box_ivd.jl │ │ │ ├── simple_box_rk3.jl │ │ │ ├── simple_dbl_gyre.jl │ │ │ ├── split_explicit.jl │ │ │ ├── test_coriolis.jl │ │ │ ├── test_restart.jl │ │ │ ├── test_simple_box.jl │ │ │ ├── test_spindown_long.jl │ │ │ ├── test_spindown_short.jl │ │ │ └── test_vertical_integral_model.jl │ │ ├── refvals/ │ │ │ ├── 2D_hydrostatic_spindown_refvals.jl │ │ │ ├── 3D_hydrostatic_spindown_refvals.jl │ │ │ ├── hydrostatic_spindown_refvals.jl │ │ │ ├── simple_box_2dt_refvals.jl │ │ │ ├── simple_box_ivd_refvals.jl │ │ │ ├── simple_box_rk3_refvals.jl │ │ │ ├── simple_dbl_gyre_refvals.jl │ │ │ ├── test_ocean_gyre_refvals.jl │ │ │ ├── test_vertical_integral_model_refvals.jl │ │ │ └── test_windstress_refvals.jl │ │ └── runtests.jl │ ├── Utilities/ │ │ ├── SingleStackUtils/ │ │ │ ├── runtests.jl │ │ │ └── ssu_tests.jl │ │ ├── TicToc/ │ │ │ └── runtests.jl │ │ ├── VariableTemplates/ │ │ │ ├── complex_models.jl │ │ │ ├── runtests.jl │ │ │ ├── runtests_gpu.jl │ │ │ ├── test_base_functionality.jl │ │ │ ├── test_complex_models.jl │ │ │ ├── test_complex_models_gpu.jl │ │ │ └── varsindex.jl │ │ └── runtests.jl │ ├── runtests.jl │ ├── runtests_gpu.jl │ └── testhelpers.jl └── tutorials/ ├── Atmos/ │ ├── agnesi_hs_lin.jl │ ├── agnesi_nh_lin.jl │ ├── burgers_single_stack.jl │ ├── burgers_single_stack_bjfnk.jl │ ├── burgers_single_stack_fvm.jl │ ├── densitycurrent.jl │ ├── dry_rayleigh_benard.jl │ ├── heldsuarez.jl │ └── risingbubble.jl ├── BalanceLaws/ │ └── tendency_specification_layer.jl ├── Diagnostics/ │ └── Debug/ │ └── StateCheck.jl ├── Land/ │ ├── Heat/ │ │ └── heat_equation.jl │ └── Soil/ │ ├── Artifacts.toml │ ├── Coupled/ │ │ └── equilibrium_test.jl │ ├── Heat/ │ │ └── bonan_heat_tutorial.jl │ ├── PhaseChange/ │ │ ├── freezing_front.jl │ │ └── phase_change_analytic_test.jl │ ├── Water/ │ │ ├── equilibrium_test.jl │ │ └── hydraulic_functions.jl │ └── interpolation_helper.jl ├── Numerics/ │ ├── DGMethods/ │ │ ├── Box1D.jl │ │ └── showcase_filters.jl │ ├── SystemSolvers/ │ │ ├── bgmres.jl │ │ └── cg.jl │ └── TimeStepping/ │ ├── explicit_lsrk.jl │ ├── imex_ark.jl │ ├── mis.jl │ ├── multirate_rk.jl │ ├── ts_intro.jl │ ├── tutorial_acousticwave_config.jl │ └── tutorial_risingbubble_config.jl ├── Ocean/ │ ├── geostrophic_adjustment.jl │ ├── internal_wave.jl │ └── shear_instability.jl ├── TutorialList.jl └── literate_markdown.jl ================================================ FILE CONTENTS ================================================ ================================================ FILE: .buildkite/docs-pipeline.yml ================================================ env: JULIA_VERSION: "1.5.4" GKSwstype: nul OPENBLAS_NUM_THREADS: 1 CLIMATEMACHINE_SETTINGS_DISABLE_GPU: "true" CLIMATEMACHINE_SETTINGS_FIX_RNG_SEED: "true" CLIMATEMACHINE_SETTINGS_DISABLE_CUSTOM_LOGGER: "true" steps: - label: "Build project" command: - "julia --project --color=yes -e 'using Pkg; Pkg.instantiate()'" - "julia --project=docs/ --color=yes -e 'using Pkg; Pkg.instantiate()'" - "julia --project=docs/ --color=yes -e 'using Pkg; Pkg.precompile()'" agents: config: cpu queue: central slurm_ntasks: 1 slurm_cpus_per_task: 1 slurm_mem_per_cpu: 6000 - wait - label: "Build docs" command: # this extracts out the PR number from the bors message on the trying branch # to force documenter to deploy the PR branch number gh-preview - "if [ $$BUILDKITE_BRANCH == \"trying\" ]; then \ export BUILDKITE_PULL_REQUEST=\"$${BUILDKITE_MESSAGE//[!0-9]/}\"; \ fi" - "if [[ ! -z \"$${PULL_REQUEST}\" ]]; then \ export BUILDKITE_PULL_REQUEST=\"$${PULL_REQUEST}\"; \ fi" - "julia --project=docs/ --color=yes --procs=10 docs/make.jl" env: JULIA_PROJECT: "docs/" agents: config: cpu queue: central slurm_time: 120 slurm_nodes: 1 slurm_ntasks: 10 slurm_cpus_per_task: 1 slurm_mem_per_cpu: 6000 ================================================ FILE: .buildkite/pipeline.yml ================================================ env: JULIA_VERSION: "1.5.4" OPENMPI_VERSION: "4.0.4" CUDA_VERSION: "10.2" OPENBLAS_NUM_THREADS: 1 CLIMATEMACHINE_SETTINGS_FIX_RNG_SEED: "true" steps: - label: "init cpu env" key: "init_cpu_env" command: - "echo $JULIA_DEPOT_PATH" - "julia --project -e 'using Pkg; Pkg.instantiate(;verbose=true)'" - "julia --project -e 'using Pkg; Pkg.precompile()'" - "julia --project -e 'using Pkg; Pkg.status()'" agents: config: cpu queue: central slurm_ntasks: 1 - label: "init gpu env" key: "init_gpu_env" command: - "echo $JULIA_DEPOT_PATH" - "julia --project -e 'using Pkg; Pkg.instantiate(;verbose=true)'" - "julia --project -e 'using Pkg; Pkg.precompile()'" # force the initialization of the CUDA runtime # as it is lazily loaded by default - "julia --project -e 'using CUDA; CUDA.versioninfo()'" # force the initialization of the CUDA Compiler runtime # as it is lazily generated by default - "julia --project -e 'using CUDA; CUDA.precompile_runtime()'" - "julia --project -e 'using Pkg; Pkg.status()'" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - wait - label: "cpu_advection_diffusion_model_1dimex_bgmres" key: "cpu_advection_diffusion_model_1dimex_bgmres" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/advection_diffusion_model_1dimex_bgmres.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_fvm_advection_diffusion_model_1dimex_bjfnks" key: "cpu_fvm_advection_diffusion_model_1dimex_bjfnks" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_advection_diffusion_model_1dimex_bjfnks.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_pseudo1D_advection_diffusion" key: "cpu_pseudo1D_advection_diffusion" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/pseudo1D_advection_diffusion.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_pseudo1D_advection_diffusion_1dimex" key: "cpu_pseudo1D_advection_diffusion_1dimex" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/pseudo1D_advection_diffusion_1dimex.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_pseudo1D_advection_diffusion_mrigark_implicit" key: "cpu_pseudo1D_advection_diffusion_mrigark_implicit" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/pseudo1D_advection_diffusion_mrigark_implicit.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_pseudo1D_heat_eqn" key: "cpu_pseudo1D_heat_eqn" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/pseudo1D_heat_eqn.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_bickley_jet_2D" key: "cpu_bickley_jet_2D" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_navier_stokes_equations/two_dimensional/test_bickley_jet.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_periodic_3D_hyperdiffusion" key: "cpu_periodic_3D_hyperdiffusion" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/periodic_3D_hyperdiffusion.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_hyperdiffusion_bc" key: "cpu_hyperdiffusion_bc" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/hyperdiffusion_bc.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_diffusion_hyperdiffusion_sphere" key: "cpu_diffusion_hyperdiffusion_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/diffusion_hyperdiffusion_sphere.jl" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_mpi_connect_1d" key: "cpu_mpi_connect_1d" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_connect_1d.jl " agents: config: cpu queue: central slurm_ntasks: 5 - label: "cpu_mpi_connect_sphere" key: "cpu_mpi_connect_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_connect_sphere.jl " agents: config: cpu queue: central slurm_ntasks: 5 - label: "cpu_mpi_getpartition" key: "cpu_mpi_getpartition" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_getpartition.jl " agents: config: cpu queue: central slurm_ntasks: 5 - label: "cpu_mpi_sortcolumns4" key: "cpu_mpi_sortcolumns4" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_sortcolumns.jl " agents: config: cpu queue: central slurm_ntasks: 4 - label: "cpu_ode_tests_convergence" key: "cpu_ode_tests_convergence" command: - "mpiexec julia --color=yes --project test/Numerics/ODESolvers/ode_tests_convergence.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_ode_tests_basic" key: "cpu_ode_tests_basic" command: - "mpiexec julia --color=yes --project test/Numerics/ODESolvers/ode_tests_basic.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_varsindex" key: "cpu_varsindex" command: - "mpiexec julia --color=yes --project test/Arrays/varsindex.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_diagnostic_fields_test" key: "cpu_diagnostic_fields_test" command: - "mpiexec julia --color=yes --project test/Diagnostics/diagnostic_fields_test.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_vars_test" key: "cpu_vars_test" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/vars_test.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_grad_test" key: "cpu_grad_test" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/grad_test.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_grad_test_sphere" key: "cpu_grad_test_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/grad_test_sphere.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_horizontal_integral_test" key: "cpu_horizontal_integral_test" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/horizontal_integral_test.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_integral_test" key: "cpu_integral_test" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/integral_test.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_integral_test_sphere" key: "cpu_integral_test_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/integral_test_sphere.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_custom_filter" key: "cpu_custom_filter" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/custom_filter.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_fv_reconstruction_test" key: "cpu_fv_reconstruction_test" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/fv_reconstruction_test.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_remainder_model" key: "cpu_remainder_model" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/remainder_model.jl " agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_isentropicvortex" key: "cpu_isentropicvortex" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_isentropicvortex_imex" key: "cpu_isentropicvortex_imex" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex_imex.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_isentropicvortex_lmars" key: "cpu_isentropicvortex_lmars" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex_lmars.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_isentropicvortex_multirate" key: "cpu_isentropicvortex_multirate" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex_multirate.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_isentropicvortex_mrigark" key: "cpu_isentropicvortex_mrigark" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex_mrigark.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_isentropicvortex_mrigark_implicit" key: "cpu_isentropicvortex_mrigark_implicit" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex_mrigark_implicit.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_acousticwave_1d_imex" key: "cpu_acousticwave_1d_imex" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/acousticwave_1d_imex.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_acousticwave_mrigark" key: "cpu_acousticwave_mrigark" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/acousticwave_mrigark.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_acousticwave_variable_degree" key: "cpu_acousticwave_variable_degree" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/acousticwave_variable_degree.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_fvm_balance" key: "cpu_fvm_balance" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/fvm_balance.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_mms_bc_atmos" key: "cpu_mms_bc_atmos" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_Navier_Stokes/mms_bc_atmos.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_mms_bc_dgmodel" key: "cpu_mms_bc_dgmodel" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_Navier_Stokes/mms_bc_dgmodel.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_density_current_model" key: "cpu_density_current_model" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_Navier_Stokes/density_current_model.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_direction_splitting_advection_diffusion" key: "cpu_direction_splitting_advection_diffusion" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/direction_splitting_advection_diffusion.jl --fix-rng-seed" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_variable_degree_advection_diffusion" key: "cpu_variable_degree_advection_diffusion" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/variable_degree_advection_diffusion.jl" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_sphere" key: "cpu_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/conservation/sphere.jl --fix-rng-seed" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_advection_sphere" key: "cpu_advection_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/advection_sphere.jl " agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_fvm_advection_sphere" key: "cpu_fvm_advection_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_advection_sphere.jl " agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_fvm_swirl" key: "cpu_fvm_swirl" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_swirl.jl " agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_fvm_advection" key: "cpu_fvm_advection" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_advection.jl " agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_fvm_advection_diffusion" key: "cpu_fvm_advection_diffusion" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_advection_diffusion.jl " agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_fvm_advection_diffusion_periodic" key: "cpu_fvm_advection_diffusion_periodic" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_advection_diffusion_periodic.jl " agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_fvm_isentropicvortex" key: "cpu_fvm_isentropicvortex" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/fvm_isentropicvortex.jl " agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_gcm_driver_test" key: "cpu_gcm_driver_test" command: - "mpiexec julia --color=yes --project test/Driver/gcm_driver_test.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_poisson" key: "cpu_poisson" command: - "mpiexec julia --color=yes --project test/Numerics/SystemSolvers/poisson.jl " agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_columnwiselu" key: "cpu_columnwiselu" command: - "mpiexec julia --color=yes --project test/Numerics/SystemSolvers/columnwiselu.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_cg" key: "cpu_cg" command: - "mpiexec julia --color=yes --project test/Numerics/SystemSolvers/cg.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_bgmres" key: "cpu_bgmres" command: - "mpiexec julia --color=yes --project test/Numerics/SystemSolvers/bgmres.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_bandedsystem" key: "cpu_bandedsystem" command: - "mpiexec julia --color=yes --project test/Numerics/SystemSolvers/bandedsystem.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_interpolation" key: "cpu_interpolation" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/interpolation.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_dss_mpi" key: "cpu_dss_mpi" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/DSS_mpi.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_dss" key: "cpu_dss" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/DSS.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_GyreDriver" key: "cpu_GyreDriver" command: - "mpiexec julia --color=yes --project test/Ocean/ShallowWater/GyreDriver.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_test_windstress_short" key: "cpu_test_windstress_short" command: - "mpiexec julia --color=yes --project test/Ocean/HydrostaticBoussinesq/test_windstress_short.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_test_ocean_gyre_short" key: "cpu_test_ocean_gyre_short" command: - "mpiexec julia --color=yes --project test/Ocean/HydrostaticBoussinesq/test_ocean_gyre_short.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_test_2D_spindown" key: "cpu_test_2D_spindown" command: - "mpiexec julia --color=yes --project test/Ocean/ShallowWater/test_2D_spindown.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_test_3D_spindown" key: "cpu_test_3D_spindown" command: - "mpiexec julia --color=yes --project test/Ocean/HydrostaticBoussinesq/test_3D_spindown.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_test_vertical_integral_model" key: "cpu_test_vertical_integral_model" command: - "mpiexec julia --color=yes --project test/Ocean/SplitExplicit/test_vertical_integral_model.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_test_spindown_long" key: "cpu_test_spindown_long" command: - "mpiexec julia --color=yes --project test/Ocean/SplitExplicit/test_spindown_long.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_test_restart" key: "cpu_test_restart" command: - "mpiexec julia --color=yes --project test/Ocean/SplitExplicit/test_restart.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_test_coriolis" key: "cpu_test_coriolis" command: - "mpiexec julia --color=yes --project test/Ocean/SplitExplicit/test_coriolis.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_KM_saturation_adjustment" key: "cpu_KM_saturation_adjustment" command: - "mpiexec julia --color=yes --project test/Atmos/Parameterizations/Microphysics/KM_saturation_adjustment.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_KM_warm_rain" key: "cpu_KM_warm_rain" command: - "mpiexec julia --color=yes --project test/Atmos/Parameterizations/Microphysics/KM_warm_rain.jl " agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_haverkamp_test" key: "cpu_haverkamp_test" command: - "mpiexec julia --color=yes --project test/Land/Model/haverkamp_test.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_soil_params" key: "cpu_soil_params" command: - "mpiexec julia --color=yes --project test/Land/Model/soil_heterogeneity.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_heat_analytic_unit_test" key: "cpu_heat_analytic_unit_test" command: - "mpiexec julia --color=yes --project test/Land/Model/heat_analytic_unit_test.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_discrete_hydrostatic_balance" key: "cpu_discrete_hydrostatic_balance" command: - "mpiexec julia --color=yes --project test/Atmos/Model/discrete_hydrostatic_balance.jl" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_soil_test_bc" key: "cpu_soil_test_bc" command: - "mpiexec julia --color=yes --project test/Land/Model/test_bc.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_soil_test_bc_3d" key: "cpu_soil_test_bc_3d" command: - "mpiexec julia --color=yes --project test/Land/Model/test_bc_3d.jl " agents: config: cpu queue: central slurm_ntasks: 1 - label: "gpu_varsindex" key: "gpu_varsindex" command: - "mpiexec julia --color=yes --project test/Arrays/varsindex.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_variable_templates" key: "gpu_variable_templates" command: - "mpiexec julia --color=yes --project test/Utilities/VariableTemplates/runtests_gpu.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "cpu_land_overland_flow_vcatchment" key: "cpu_land_overland_flow_vcatchment" command: - "mpiexec julia --color=yes --project test/Land/Model/test_overland_flow_vcatchment.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "gpu_diagnostic_fields_test" key: "gpu_diagnostic_fields_test" command: - "mpiexec julia --color=yes --project test/Diagnostics/diagnostic_fields_test.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_vars_test" key: "gpu_vars_test" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/vars_test.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_custom_filter" key: "gpu_custom_filter" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/custom_filter.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_remainder_model" key: "gpu_remainder_model" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/remainder_model.jl " agents: config: gpu queue: central slurm_ntasks: 2 slurm_gres: "gpu:1" - label: "gpu_isentropicvortex" key: "gpu_isentropicvortex" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_isentropicvortex_imex" key: "gpu_isentropicvortex_imex" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex_imex.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_isentropicvortex_lmars" key: "gpu_isentropicvortex_lmars" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex_lmars.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_isentropicvortex_multirate" key: "gpu_isentropicvortex_multirate" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex_multirate.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_isentropicvortex_mrigark" key: "gpu_isentropicvortex_mrigark" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex_mrigark.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_isentropicvortex_mrigark_implicit" key: "gpu_isentropicvortex_mrigark_implicit" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/isentropicvortex_mrigark_implicit.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_acousticwave_1d_imex" key: "gpu_acousticwave_1d_imex" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/acousticwave_1d_imex.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_acousticwave_mrigark" key: "gpu_acousticwave_mrigark" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/acousticwave_mrigark.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_acousticwave_variable_degree" key: "gpu_acousticwave_variable_degree" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/acousticwave_variable_degree.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_fvm_balance" key: "gpu_fvm_balance" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/fvm_balance.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_mms_bc_atmos" key: "gpu_mms_bc_atmos" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_Navier_Stokes/mms_bc_atmos.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_mms_bc_dgmodel" key: "gpu_mms_bc_dgmodel" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_Navier_Stokes/mms_bc_dgmodel.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_density_current_model" key: "gpu_density_current_model" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_Navier_Stokes/density_current_model.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_direction_splitting_advection_diffusion" key: "gpu_direction_splitting_advection_diffusion" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/direction_splitting_advection_diffusion.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_variable_degree_advection_diffusion" key: "gpu_variable_degree_advection_diffusion" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/variable_degree_advection_diffusion.jl" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_sphere" key: "gpu_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/conservation/sphere.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_advection_sphere" key: "gpu_advection_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/advection_sphere.jl " agents: config: gpu queue: central slurm_ntasks: 2 slurm_gres: "gpu:1" - label: "gpu_fvm_advection_sphere" key: "gpu_fvm_advection_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_advection_sphere.jl " agents: config: gpu queue: central slurm_ntasks: 2 slurm_gres: "gpu:1" - label: "gpu_fvm_swirl" key: "gpu_fvm_swirl" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_swirl.jl " agents: config: gpu queue: central slurm_ntasks: 2 slurm_gres: "gpu:1" - label: "gpu_fvm_advection" key: "gpu_fvm_advection" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_advection.jl " agents: config: gpu queue: central slurm_ntasks: 2 slurm_gres: "gpu:1" - label: "gpu_fvm_advection_diffusion" key: "gpu_fvm_advection_diffusion" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_advection_diffusion.jl " agents: config: gpu queue: central slurm_ntasks: 2 slurm_gres: "gpu:1" - label: "gpu_fvm_advection_diffusion_periodic" key: "gpu_fvm_advection_diffusion_periodic" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/fvm_advection_diffusion_periodic.jl " agents: config: gpu queue: central slurm_ntasks: 2 slurm_gres: "gpu:1" - label: "gpu_fvm_isentropicvortex" key: "gpu_fvm_isentropicvortex" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/Euler/fvm_isentropicvortex.jl " agents: config: gpu queue: central slurm_ntasks: 2 slurm_gres: "gpu:1" - label: "gpu_gcm_driver_test" key: "gpu_gcm_driver_test" command: - "mpiexec julia --color=yes --project test/Driver/gcm_driver_test.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_poisson" key: "gpu_poisson" command: - "mpiexec julia --color=yes --project test/Numerics/SystemSolvers/poisson.jl " agents: config: gpu queue: central slurm_ntasks: 2 slurm_gres: "gpu:1" - label: "gpu_columnwiselu" key: "gpu_columnwiselu" command: - "mpiexec julia --color=yes --project test/Numerics/SystemSolvers/columnwiselu.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_bandedsystem" key: "gpu_bandedsystem" command: - "mpiexec julia --color=yes --project test/Numerics/SystemSolvers/bandedsystem.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "cpu_courant" key: "cpu_courant" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/courant.jl" agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_brickmesh" key: "cpu_brickmesh" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/BrickMesh.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_elements" key: "cpu_elements" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/Elements.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_metrics" key: "cpu_metrics" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/Metrics.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_grids" key: "cpu_grids" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/Grids.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_topology" key: "cpu_topology" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/topology.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_grid_integral" key: "cpu_grid_integral" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/grid_integral.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_filter" key: "cpu_filter" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/filter.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_filter_tmar" key: "cpu_filter_tmar" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/filter_TMAR.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "cpu_mpi_centroid" key: "cpu_mpi_centroid" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_centroid.jl" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_mpi_connect_ell" key: "cpu_mpi_connect_ell" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_connect_ell.jl" agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_mpi_connect" key: "cpu_mpi_connect" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_connect.jl" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_mpi_connectfull" key: "cpu_mpi_connectfull" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_connectfull.jl" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_mpi_connect_stacked" key: "cpu_mpi_connect_stacked" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_connect_stacked.jl" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_mpi_connect_stacked_3d" key: "cpu_mpi_connect_stacked_3d" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_connect_stacked_3d.jl" agents: config: cpu queue: central slurm_ntasks: 2 - label: "cpu_mpi_getpartition3" key: "cpu_mpi_getpartition3" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_getpartition.jl" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_mpi_partition" key: "cpu_mpi_partition" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_partition.jl" agents: config: cpu queue: central slurm_ntasks: 3 - label: "cpu_mpi_sortcolumns" key: "cpu_mpi_sortcolumns" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/mpi_sortcolumns.jl" agents: config: cpu queue: central slurm_ntasks: 1 - label: "gpu_interpolation" key: "gpu_interpolation" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/interpolation.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_dss_mpi" key: "gpu_dss_mpi" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/DSS_mpi.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_dss" key: "gpu_dss" command: - "mpiexec julia --color=yes --project test/Numerics/Mesh/DSS.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_GyreDriver" key: "gpu_GyreDriver" command: - "mpiexec julia --color=yes --project test/Ocean/ShallowWater/GyreDriver.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_test_windstress_short" key: "gpu_test_windstress_short" command: - "mpiexec julia --color=yes --project test/Ocean/HydrostaticBoussinesq/test_windstress_short.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_test_ocean_gyre_short" key: "gpu_test_ocean_gyre_short" command: - "mpiexec julia --color=yes --project test/Ocean/HydrostaticBoussinesq/test_ocean_gyre_short.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_test_2D_spindown" key: "gpu_test_2D_spindown" command: - "mpiexec julia --color=yes --project test/Ocean/ShallowWater/test_2D_spindown.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_test_3D_spindown" key: "gpu_test_3D_spindown" command: - "mpiexec julia --color=yes --project test/Ocean/HydrostaticBoussinesq/test_3D_spindown.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_test_vertical_integral_model" key: "gpu_test_vertical_integral_model" command: - "mpiexec julia --color=yes --project test/Ocean/SplitExplicit/test_vertical_integral_model.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_test_spindown_long" key: "gpu_test_spindown_long" command: - "mpiexec julia --color=yes --project test/Ocean/SplitExplicit/test_spindown_long.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_test_restart" key: "gpu_test_restart" command: - "mpiexec julia --color=yes --project test/Ocean/SplitExplicit/test_restart.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_test_coriolis" key: "gpu_test_coriolis" command: - "mpiexec julia --color=yes --project test/Ocean/SplitExplicit/test_coriolis.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_KM_saturation_adjustment" key: "gpu_KM_saturation_adjustment" command: - "mpiexec julia --color=yes --project test/Atmos/Parameterizations/Microphysics/KM_saturation_adjustment.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_KM_warm_rain" key: "gpu_KM_warm_rain" command: - "mpiexec julia --color=yes --project test/Atmos/Parameterizations/Microphysics/KM_warm_rain.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_KM_ice" key: "gpu_KM_ice" command: - "mpiexec julia --color=yes --project test/Atmos/Parameterizations/Microphysics/KM_ice.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_pseudo1D_advection_diffusion" key: "gpu_pseudo1D_advection_diffusion" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/pseudo1D_advection_diffusion.jl --integration-testing" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_pseudo1D_advection_diffusion_1dimex" key: "gpu_pseudo1D_advection_diffusion_1dimex" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/pseudo1D_advection_diffusion_1dimex.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_pseudo1D_advection_diffusion_mrigark_implicit" key: "gpu_pseudo1D_advection_diffusion_mrigark_implicit" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/pseudo1D_advection_diffusion_mrigark_implicit.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_pseudo1D_heat_eqn" key: "gpu_pseudo1D_heat_eqn" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/pseudo1D_heat_eqn.jl --integration-testing" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_bickley_jet_2D" key: "gpu_bickley_jet_2D" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_navier_stokes_equations/two_dimensional/test_bickley_jet.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_bickley_jet_3D" key: "gpu_bickley_jet_3D" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/test_bickley_jet.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_cnse_buoyancy_3D" key: "gpu_cnse_buoyancy_3D" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/test_buoyancy.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_cnse_sphere_3D" key: "gpu_cnse_sphere_3D" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_navier_stokes_equations/sphere/test_sphere.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_cnse_sphere_heat_3D" key: "gpu_cnse_sphere_heat_3D" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_navier_stokes_equations/sphere/test_heat_equation.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_cnse_sphere_balance_3D" key: "gpu_cnse_sphere_balance_3D" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/compressible_navier_stokes_equations/sphere/test_hydrostatic_balance.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_periodic_3D_hyperdiffusion" key: "gpu_periodic_3D_hyperdiffusion" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/periodic_3D_hyperdiffusion.jl --integration-testing" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_hyperdiffusion_bc" key: "gpu_hyperdiffusion_bc" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/hyperdiffusion_bc.jl --integration-testing" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_diffusion_hyperdiffusion_sphere" key: "gpu_diffusion_hyperdiffusion_sphere" command: - "mpiexec julia --color=yes --project test/Numerics/DGMethods/advection_diffusion/diffusion_hyperdiffusion_sphere.jl --integration-testing" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_esdg_baroclinic_wave" key: "gpu_esdg_baroclinic_wave" command: - "mpiexec julia --color=yes --project test/Numerics/ESDGMethods/DryAtmos/baroclinic_wave.jl" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_dry_rayleigh_benard" key: "gpu_dry_rayleigh_benard" command: - "mpiexec julia --color=yes --project tutorials/Atmos/dry_rayleigh_benard.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_dry_risingbubble" key: "gpu_dry_risingbubble" command: - "mpiexec julia --color=yes --project experiments/TestCase/risingbubble.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_dry_risingbubble_fvm" key: "gpu_dry_risingbubble_fvm" command: - "mpiexec julia --color=yes --project experiments/TestCase/risingbubble_fvm.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_moist_risingbubble" key: "gpu_moist_risingbubble" command: - "mpiexec julia --color=yes --project experiments/TestCase/risingbubble.jl --with-moisture " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_solid_body_rotation" key: "gpu_solid_body_rotation" command: - "mpiexec julia --color=yes --project experiments/TestCase/solid_body_rotation.jl --diagnostics=default " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_solid_body_rotation_fvm" key: "gpu_solid_body_rotation_fvm" command: - "mpiexec julia --color=yes --project experiments/TestCase/solid_body_rotation_fvm.jl --diagnostics=default " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_solid_body_rotation_mountain" key: "gpu_solid_body_rotation_mountain" command: - "mpiexec julia --color=yes --project experiments/TestCase/solid_body_rotation_mountain.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_dry_baroclinic_wave" key: "gpu_dry_baroclinic_wave" command: - "mpiexec julia --color=yes --project experiments/TestCase/baroclinic_wave.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_dry_baroclinic_wave_fvm" key: "gpu_dry_baroclinic_wave_fvm" command: - "mpiexec julia --color=yes --project experiments/TestCase/baroclinic_wave_fvm.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_moist_baroclinic_wave" key: "gpu_moist_baroclinic_wave" command: - "mpiexec julia --color=yes --project experiments/TestCase/baroclinic_wave.jl --with-moisture " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_heldsuarez" key: "gpu_heldsuarez" command: - "mpiexec julia --color=yes --project experiments/AtmosGCM/heldsuarez.jl --diagnostics=default --fix-rng-seed " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_nonhydrostatic_gravity_wave" key: "gpu_nonhydrostatic_gravity_wave" command: - "mpiexec julia --color=yes --project experiments/AtmosGCM/nonhydrostatic_gravity_wave.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_isothermal_zonal_flow" key: "gpu_isothermal_zonal_flow" command: - "mpiexec julia --color=yes --project experiments/TestCase/isothermal_zonal_flow.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_surfacebubble" key: "gpu_surfacebubble" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/surfacebubble.jl --diagnostics=default" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_dycoms" key: "gpu_dycoms" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/dycoms.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_dycoms_with_precip" key: "gpu_dycoms_with_precip" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/dycoms.jl --fix-rng-seed --moisture-model nonequilibrium --precipitation-model rain --sim-time 120 --check-asserts yes" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_squall_eq_no_precip" key: "gpu_squall_eq_no_precip" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/squall_line.jl --fix-rng-seed --sim-time 600 --check-asserts yes --moisture-model equilibrium --precipitation-model noprecipitation" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_squall_eq_rain" key: "gpu_squall_eq_rain" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/squall_line.jl --fix-rng-seed --sim-time 600 --check-asserts yes --moisture-model equilibrium --precipitation-model rain" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_squall_neq_rain_snow" key: "gpu_squall_neq_rain_snow" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/squall_line.jl --fix-rng-seed --sim-time 600 --check-asserts yes --moisture-model nonequilibrium --precipitation-model rainsnow" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_bomex_les" key: "gpu_bomex_les" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/bomex_les.jl --diagnostics=default --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_sbl_les" key: "gpu_sbl_les" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/stable_bl_les.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_cbl_les" key: "gpu_cbl_les" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/convective_bl_les.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_cfsite" key: "gpu_cfsite" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/cfsite_hadgem2-a_07_amip.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_bomex_single_stack" key: "gpu_bomex_single_stack" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/bomex_single_stack.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_bomex_single_stack_nonequil" key: "gpu_bomex_single_stack_nonequil" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/bomex_single_stack.jl --moisture-model nonequilibrium --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_bomex_edmf" key: "gpu_bomex_edmf" command: - "mpiexec julia --color=yes --project test/Atmos/EDMF/bomex_edmf.jl --diagnostics=default --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_ekman" key: "gpu_ekman" command: - "mpiexec julia --color=yes --project test/Atmos/EDMF/ekman_layer.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_sbl_edmf" key: "gpu_sbl_edmf" command: - "mpiexec julia --color=yes --project test/Atmos/EDMF/stable_bl_edmf.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "cpu_sbl_edmf_fvm" key: "cpu_sbl_edmf_fvm" command: - "mpiexec julia --color=yes --project test/Atmos/EDMF/stable_bl_edmf_fvm.jl --fix-rng-seed" agents: config: cpu queue: central slurm_ntasks: 1 - label: "gpu_sbl_an1d" key: "gpu_sbl_an1d" command: - "mpiexec julia --color=yes --project test/Atmos/EDMF/stable_bl_anelastic1d.jl --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "cpu_sbl_edmf_coupled" key: "cpu_sbl_edmf_coupled" command: - "mpiexec julia --color=yes --project test/Atmos/EDMF/stable_bl_coupled_edmf_an1d.jl --fix-rng-seed" agents: config: cpu queue: central slurm_ntasks: 1 - label: "gpu_bomex_bulk_sfc_flux" key: "gpu_bomex_bulk_sfc_flux" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/bomex_les.jl --surface-flux=bulk --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_taylor_green" key: "gpu_taylor_green" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/taylor_green.jl --diagnostics=default --fix-rng-seed" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_rising_bubble_theta_formulation" key: "gpu_rising_bubble_theta_formulation" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/rising_bubble_theta_formulation.jl" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_rising_bubble_bryan_mrrk" key: "gpu_rising_bubble_bryan_mrrk" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/rising_bubble_bryan.jl --fast-method=MultirateRungeKutta " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_rising_bubble_bryan_ark" key: "gpu_rising_bubble_bryan_ark" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/rising_bubble_bryan.jl --fast-method=AdditiveRungeKutta " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_rising_bubble_bryan_mis" key: "gpu_rising_bubble_bryan_mis" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/rising_bubble_bryan.jl --fast-method=MultirateInfinitesimalStep " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_schar_scalar_advection" key: "gpu_schar_scalar_advection" command: - "mpiexec julia --color=yes --project experiments/AtmosLES/schar_scalar_advection.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_test_windstress_long" key: "gpu_test_windstress_long" command: - "mpiexec julia --color=yes --project test/Ocean/HydrostaticBoussinesq/test_windstress_long.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_test_ocean_gyre_long" key: "gpu_test_ocean_gyre_long" command: - "mpiexec julia --color=yes --project test/Ocean/HydrostaticBoussinesq/test_ocean_gyre_long.jl " agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_simple_box_ivd" key: "gpu_simple_box_ivd" command: - "mpiexec julia --color=yes --project test/Ocean/SplitExplicit/simple_box_ivd.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_simple_dbl_gyre" key: "gpu_simple_dbl_gyre" command: - "mpiexec julia --color=yes --project test/Ocean/SplitExplicit/simple_dbl_gyre.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_discrete_hydrostatic_balance" key: "gpu_discrete_hydrostatic_balance" command: - "mpiexec julia --color=yes --project test/Atmos/Model/discrete_hydrostatic_balance.jl" agents: config: gpu queue: central slurm_ntasks: 3 slurm_gres: "gpu:1" - label: "gpu_haverkamp_test" key: "gpu_haverkamp_test" command: - "mpiexec julia --color=yes --project test/Land/Model/haverkamp_test.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_soil_params" key: "gpu_soil_params" command: - "mpiexec julia --color=yes --project test/Land/Model/soil_heterogeneity.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_stable_bl_edmf_implicit_test" key: "gpu_stable_bl_edmf_implicit_test" command: - "mpiexec julia --color=yes --project test/Atmos/EDMF/stable_bl_single_stack_implicit.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_heat_analytic_unit_test" key: "gpu_heat_analytic_unit_test" command: - "mpiexec julia --color=yes --project test/Land/Model/heat_analytic_unit_test.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_soil_test_bc" key: "gpu_soil_test_bc" command: - "mpiexec julia --color=yes --project test/Land/Model/test_bc.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_soil_test_bc_3d" key: "gpu_soil_test_bc_3d" command: - "mpiexec julia --color=yes --project test/Land/Model/test_bc_3d.jl " agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_land_overland_flow_vcatchment" key: "gpu_land_overland_flow_vcatchment" command: - "mpiexec julia --color=yes --project test/Land/Model/test_overland_flow_vcatchment.jl" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" - label: "gpu_unittests" key: "gpu_unittests" command: - "julia --color=yes --project test/runtests_gpu.jl" agents: config: gpu queue: central slurm_ntasks: 1 slurm_gres: "gpu:1" ================================================ FILE: .codecov.yml ================================================ comment: off ================================================ FILE: .dev/.gitignore ================================================ Manifest.toml ================================================ FILE: .dev/Project.toml ================================================ [deps] JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899" [compat] JuliaFormatter = "0.10" ================================================ FILE: .dev/clima_formatter_default_image.jl ================================================ #!/usr/bin/env julia #! format: off # # Called with no arguments will replace the default system image with one that # includes the JuliaFormatter # # Called with a single argument the system image for the formatter will be # placed in the path specified by the argument (relative to the callers path) using Pkg Pkg.add("PackageCompiler") using PackageCompiler if isfile(PackageCompiler.backup_default_sysimg_path()) @error """ A custom default system image already exists. Either restore default with: julia -e "using PackageCompiler; PackageCompiler.restore_default_sysimage()" or use the script $(abspath(joinpath(@__DIR__, "..", ".dev", "clima_formatter_image.jl"))) which will use a custom path for the system image """ exit(1) end # If a current Manifest exist for the formatter we remove it so that we have the # latest version rm(joinpath(@__DIR__, "Manifest.toml"); force = true) Pkg.activate(joinpath(@__DIR__)) PackageCompiler.create_sysimage( :JuliaFormatter; precompile_execution_file = joinpath(@__DIR__, "precompile.jl"), replace_default = true, ) ================================================ FILE: .dev/clima_formatter_image.jl ================================================ #!/usr/bin/env julia # # Called with no arguments will build the formattter system image # PATH_TO_CLIMATEMACHINE/.git/hooks/JuliaFormatterSysimage.so # # Called with a single argument the system image for the formatter will be # placed in the path specified by the argument (relative to the callers path) sysimage_path = abspath( isempty(ARGS) ? joinpath(@__DIR__, "..", ".git", "hooks", "JuliaFormatterSysimage.so") : ARGS[1], ) @info """ Creating system image object file at: $(sysimage_path) """ using Pkg Pkg.add("PackageCompiler") using PackageCompiler # If a current Manifest exist for the formatter we remove it so that we have the # latest version rm(joinpath(@__DIR__, "Manifest.toml"); force = true) Pkg.activate(joinpath(@__DIR__)) PackageCompiler.create_sysimage( :JuliaFormatter; precompile_execution_file = joinpath(@__DIR__, "precompile.jl"), sysimage_path = sysimage_path, ) ================================================ FILE: .dev/clima_formatter_options.jl ================================================ clima_formatter_options = ( indent = 4, margin = 80, always_for_in = true, whitespace_typedefs = true, whitespace_ops_in_indices = true, remove_extra_newlines = false, ) ================================================ FILE: .dev/climaformat.jl ================================================ #!/usr/bin/env julia # # This is an adapted version of format.jl from JuliaFormatter with the # following license: # # MIT License # Copyright (c) 2019 Dominique Luna # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the # following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN # NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE # USE OR OTHER DEALINGS IN THE SOFTWARE. # using Pkg Pkg.activate(@__DIR__) Pkg.instantiate() using JuliaFormatter include("clima_formatter_options.jl") help = """ Usage: climaformat.jl [flags] [FILE/PATH]... Formats the given julia files using the CLIMA formatting options. If paths are given it will format the julia files in the paths. Otherwise, it will format all changed julia files. -v, --verbose Print the name of the files being formatted with relevant details. -h, --help Print this message. """ function parse_opts!(args::Vector{String}) i = 1 opts = Dict{Symbol, Union{Int, Bool}}() while i ≤ length(args) arg = args[i] if arg[1] != '-' i += 1 continue end if arg == "-v" || arg == "--verbose" opt = :verbose elseif arg == "-h" || arg == "--help" opt = :help else error("invalid option $arg") end if opt in (:verbose, :help) opts[opt] = true deleteat!(args, i) end end return opts end opts = parse_opts!(ARGS) if haskey(opts, :help) write(stdout, help) exit(0) end if isempty(ARGS) filenames = readlines(`git ls-files "*.jl"`) else filenames = ARGS end format(filenames; clima_formatter_options..., opts...) ================================================ FILE: .dev/hooks/pre-commit ================================================ #!/usr/bin/env julia # # Called by git-commit with no arguments. This checks to make sure that all # .jl files are indented correctly before a commit is made. # # To enable this hook, make this file executable and copy it in # $GIT_DIR/hooks. toplevel_directory = chomp(read(`git rev-parse --show-toplevel`, String)) using Pkg Pkg.activate(joinpath(toplevel_directory, ".dev")) Pkg.instantiate() using JuliaFormatter include(joinpath(toplevel_directory, ".dev", "clima_formatter_options.jl")) needs_format = false for diffoutput in split.(readlines(`git diff --name-status --cached`)) status = diffoutput[1] filename = diffoutput[end] (!startswith(status, "D") && endswith(filename, ".jl")) || continue a = read(`git show :$filename`, String) b = format_text(a; clima_formatter_options...) if a != b fullfilename = joinpath(toplevel_directory, filename) @error """File $filename needs to be indented with: julia $(joinpath(toplevel_directory, ".dev", "climaformat.jl")) $fullfilename and added to the git index via git add $fullfilename """ global needs_format = true end end exit(needs_format ? 1 : 0) ================================================ FILE: .dev/hooks/pre-commit.sysimage ================================================ #!/usr/bin/env -S julia -J.git/hooks/JuliaFormatterSysimage.so # # Called by git-commit with no arguments. This checks to make sure that all # .jl files are indented correctly before a commit is made. # # To enable this hook, make this file executable and copy it in # $GIT_DIR/hooks. toplevel_directory = chomp(read(`git rev-parse --show-toplevel`, String)) using Pkg Pkg.activate(joinpath(toplevel_directory, ".dev")) Pkg.instantiate() using JuliaFormatter include(joinpath(toplevel_directory, ".dev", "clima_formatter_options.jl")) needs_format = false for diffoutput in split.(readlines(`git diff --name-status --cached`)) status = diffoutput[1] filename = diffoutput[end] (!startswith(status, "D") && endswith(filename, ".jl")) || continue a = read(`git show :$filename`, String) b = format_text(a; clima_formatter_options...) if a != b fullfilename = joinpath(toplevel_directory, filename) @error """File $filename needs to be indented with: julia -J$(joinpath(toplevel_directory, ".git/hooks", "JuliaFormatterSysimage.so")) $(joinpath(toplevel_directory, ".dev", "climaformat.jl")) $fullfilename and added to the git index via git add $fullfilename """ global needs_format = true end end exit(needs_format ? 1 : 0) ================================================ FILE: .dev/precompile.jl ================================================ using JuliaFormatter include("clima_formatter_options.jl") format(@__FILE__; clima_formatter_options...) ================================================ FILE: .dev/systemimage/climate_machine_image.jl ================================================ #!/usr/bin/env julia # # Called with no arguments will create the system image # ClimateMachine.so # in the `@__DIR__` directory. # # Called with a single argument the system image will be placed in the path # specified by the argument (relative to the callers path) # # Called with a specified systemimg path and `true`, the system image will # compile the climate machine package module (useful for CI) sysimage_path = isempty(ARGS) ? joinpath(@__DIR__, "ClimateMachine.so") : abspath(ARGS[1]) climatemachine_pkg = get(ARGS, 2, "false") == "true" @info "Creating system image object file at: '$(sysimage_path)'" @info "Building ClimateMachine into system image: $(climatemachine_pkg)" start_time = time() using Pkg Pkg.add("PackageCompiler") Pkg.activate(joinpath(@__DIR__, "..", "..")) Pkg.instantiate(verbose = true) pkgs = Symbol[] if climatemachine_pkg push!(pkgs, :ClimateMachine) else append!( pkgs, [Symbol(v.name) for v in values(Pkg.dependencies()) if v.is_direct_dep], ) end # use package compiler using PackageCompiler PackageCompiler.create_sysimage( pkgs, sysimage_path = sysimage_path, precompile_execution_file = joinpath( @__DIR__, "..", "..", "test", "Numerics", "DGMethods", "Euler", "isentropicvortex.jl", ), ) tot_secs = Int(floor(time() - start_time)) @info "Created system image object file at: $(sysimage_path)" @info "System image build time: $tot_secs sec" ================================================ FILE: .github/issue_template.md ================================================ ### Description ================================================ FILE: .github/pull_request_template.md ================================================ ### Description - [ ] Code follows the [style guidelines](https://clima.github.io/ClimateMachine.jl/latest/DevDocs/CodeStyle/) OR N/A. - [ ] Unit tests are included OR N/A. - [ ] Code is exercised in an integration test OR N/A. - [ ] Documentation has been added/updated OR N/A. ================================================ FILE: .github/workflows/CompatHelper.yml ================================================ name: CompatHelper on: schedule: - cron: '00 * * * *' jobs: CompatHelper: runs-on: ubuntu-latest steps: - uses: julia-actions/setup-julia@latest with: version: 1.5.4 - name: Pkg.add("CompatHelper") run: julia -e 'using Pkg; Pkg.add("CompatHelper")' - name: CompatHelper.main() env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: julia -e 'using CompatHelper; CompatHelper.main()' ================================================ FILE: .github/workflows/Coverage.yaml ================================================ name: Coverage on: schedule: # * is a special character in YAML so you have to quote this string # Run at 2am every day: - cron: '0 2 * * *' jobs: coverage: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2.2.0 - name: Set up Julia uses: julia-actions/setup-julia@latest with: version: 1.5.4 - name: Test with coverage run: | julia --project -e 'using Pkg; Pkg.instantiate()' julia --project -e 'using Pkg; Pkg.test(coverage=true)' - name: Generate coverage file run: julia --project -e 'using Pkg; Pkg.add("Coverage"); using Coverage; LCOV.writefile("coverage-lcov.info", Codecov.process_folder())' if: success() - name: Submit coverage uses: codecov/codecov-action@v1.0.7 with: token: ${{secrets.CODECOV_TOKEN}} if: success() ================================================ FILE: .github/workflows/DocCleanup.yml ================================================ name: Doc Preview Cleanup on: pull_request: types: [closed] jobs: doc-preview-cleanup: runs-on: ubuntu-latest steps: - name: Checkout gh-pages branch uses: actions/checkout@v2 with: ref: gh-pages - name: Delete preview and history run: | git config user.name "Documenter.jl" git config user.email "documenter@juliadocs.github.io" git rm -rf "previews/PR$PRNUM" git commit -m "delete preview" git branch gh-pages-new $(echo "delete history" | git commit-tree HEAD^{tree}) env: PRNUM: ${{ github.event.number }} - name: Push changes run: | git push --force origin gh-pages-new:gh-pages ================================================ FILE: .github/workflows/Documenter.yaml ================================================ name: Documentation on: pull_request: paths: - 'docs/**' - 'tutorials/**' - 'src/**' - 'Project.toml' - 'Manifest.toml' jobs: docs-build: runs-on: ubuntu-latest timeout-minutes: 90 steps: - name: Cancel Previous Runs uses: styfle/cancel-workflow-action@0.4.0 with: access_token: ${{ github.token }} - uses: actions/checkout@v2.2.0 - name: Install System Dependencies run: | sudo apt-get update sudo apt-get -qq install libxt6 libxrender1 libxext6 libgl1-mesa-glx libqt5widgets5 xvfb - uses: julia-actions/setup-julia@latest with: version: 1.5.4 # https://discourse.julialang.org/t/recommendation-cache-julia-artifacts-in-ci-services/35484 - name: Cache artifacts uses: actions/cache@v1 env: cache-name: cache-artifacts with: path: ~/.julia/artifacts key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} restore-keys: | ${{ runner.os }}-test-${{ env.cache-name }}- ${{ runner.os }}-test- ${{ runner.os }}- - name: Install Julia dependencies env: JULIA_PROJECT: "docs/" run: | julia --project -e 'using Pkg; Pkg.instantiate()' julia --project=docs/ -e 'using Pkg; Pkg.instantiate()' julia --project=docs/ -e 'using Pkg; Pkg.precompile()' - name: Build and deploy # Run with X virtual frame buffer as GR (default backend for Plots.jl) needs # an X session to run without warnings env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} XDG_RUNTIME_DIR: "/home/runner" JULIA_PROJECT: "docs/" CLIMATEMACHINE_DOCS_GENERATE_TUTORIALS: "false" ClIMATEMACHINE_SETTINGS_DISABLE_GPU: "true" CLIMATEMACHINE_SETTINGS_DISABLE_CUSTOM_LOGGER: "true" CLIMATEMACHINE_SETTINGS_FIX_RNG_SEED: "true" run: xvfb-run -- julia --project=docs/ --color=yes docs/make.jl - name: Help! Documenter Failed run: | cat .github/workflows/doc_build_common_error_messages.md if: failure() ================================================ FILE: .github/workflows/JuliaFormatter.yml ================================================ name: JuliaFormatter on: push: branches: - master - trying - staging tags: '*' pull_request: jobs: format: runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Cancel Previous Runs uses: styfle/cancel-workflow-action@0.4.0 with: access_token: ${{ github.token }} - uses: actions/checkout@v2.2.0 - uses: dorny/paths-filter@v2.9.1 id: filter with: filters: | julia_file_change: - added|modified: '**.jl' - uses: julia-actions/setup-julia@latest if: steps.filter.outputs.julia_file_change == 'true' with: version: 1.5.4 - name: Apply JuliaFormatter if: steps.filter.outputs.julia_file_change == 'true' run: | julia --project=.dev .dev/climaformat.jl . - name: Check formatting diff if: steps.filter.outputs.julia_file_change == 'true' run: | git diff --color=always --exit-code ================================================ FILE: .github/workflows/Linux-UnitTests.yml ================================================ name: Unit Tests on: pull_request: paths: - 'src/**' - 'test/**' - 'Project.toml' - 'Manifest.toml' jobs: test: runs-on: ubuntu-latest timeout-minutes: 60 strategy: fail-fast: true matrix: test-modules: ["Atmos,Common,InputOutput,Utilities", "Driver", "Diagnostics", "Arrays,Numerics/ODESolvers,Numerics/SystemSolvers", "Ocean", "Land",] env: CLIMATEMACHINE_SETTINGS_FIX_RNG_SEED: "true" steps: - name: Cancel Previous Runs uses: styfle/cancel-workflow-action@0.4.0 with: access_token: ${{ github.token }} - name: Checkout uses: actions/checkout@v2.2.0 - name: Set up Julia uses: julia-actions/setup-julia@latest with: version: 1.5.4 # https://discourse.julialang.org/t/recommendation-cache-julia-artifacts-in-ci-services/35484 - name: Cache artifacts uses: actions/cache@v1 env: cache-name: cache-artifacts with: path: ~/.julia/artifacts key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} restore-keys: | ${{ runner.os }}-test-${{ env.cache-name }}- ${{ runner.os }}-test- ${{ runner.os }}- - name: Install Project Packages run: | julia --project=@. -e 'using Pkg; Pkg.instantiate()' julia --project=@. -e 'using Pkg; Pkg.precompile()' - name: Run Unit Tests env: TEST_MODULES: ${{ matrix.test-modules }} run: | julia --project=@. -e 'using Pkg; Pkg.test(test_args=map(String, split(ENV["TEST_MODULES"], ",")))' ================================================ FILE: .github/workflows/OS-UnitTests.yml ================================================ name: OS Unit Tests on: push: branches: - staging - trying jobs: test-os: timeout-minutes: 210 strategy: fail-fast: true matrix: os: [ubuntu-latest, windows-latest, macos-latest] runs-on: ${{ matrix.os }} # Workaround for OSX MPICH issue: # https://github.com/pmodels/mpich/issues/4710 env: MPICH_INTERFACE_HOSTNAME: "localhost" CLIMATEMACHINE_TEST_RUNMPI_LOCALHOST: "true" CLIMATEMACHINE_SETTINGS_FIX_RNG_SEED: "true" steps: - name: Cancel Previous Runs uses: styfle/cancel-workflow-action@0.4.0 with: access_token: ${{ github.token }} - name: Checkout uses: actions/checkout@v2.2.0 # Setup a filter and only run if src/ test/ folder content changes # or project depedencies - uses: dorny/paths-filter@v2 id: filter with: filters: | run_test: - 'src/**' - 'test/**' - 'Project.toml' - 'Manifest.toml' - name: Set up Julia uses: julia-actions/setup-julia@latest if: steps.filter.outputs.run_test == 'true' with: version: 1.5.4 # https://discourse.julialang.org/t/recommendation-cache-julia-artifacts-in-ci-services/35484 - name: Cache artifacts uses: actions/cache@v1 if: steps.filter.outputs.run_test == 'true' env: cache-name: cache-artifacts with: path: ~/.julia/artifacts key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} restore-keys: | ${{ runner.os }}-test-${{ env.cache-name }}- ${{ runner.os }}-test- ${{ runner.os }}- - name: Install Project Packages if: steps.filter.outputs.run_test == 'true' run: | julia --project=@. -e 'using Pkg; Pkg.instantiate()' - name: Build System Image if: steps.filter.outputs.run_test == 'true' continue-on-error: true run: | julia --project .dev/systemimage/climate_machine_image.jl ClimateMachine.so true - name: Run Unit Tests if: steps.filter.outputs.run_test == 'true' run: | julia --project -J ClimateMachine.so -e 'using Pkg; Pkg.test()' ================================================ FILE: .github/workflows/PR-Comment.yml ================================================ name: Trigger action on PR comment on: issue_comment: types: [created] jobs: trigger-doc-build: if: ${{ github.event.issue.pull_request && ( github.event.comment.author_association == 'OWNER' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'COLLABORATOR' ) && contains(github.event.comment.body, '@climabot build docs') }} runs-on: ubuntu-latest steps: - uses: octokit/request-action@v2.0.26 id: get_pr # need sha of commit with: route: GET /repos/{repository}/pulls/{pull_number} repository: ${{ github.repository }} pull_number: ${{ github.event.issue.number }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Trigger Buildkite Pipeline id: buildkite uses: CliMA/buildkite-pipeline-action@master with: access_token: ${{ secrets.BUILDKITE }} pipeline: 'clima/climatemachine-docs' branch: ${{ fromJson(steps.get_pr.outputs.data).head.ref }} commit: ${{ fromJson(steps.get_pr.outputs.data).head.sha }} message: ":github: Triggered by comment on PR #${{ github.event.issue.number }}" env: '{"PULL_REQUEST": ${{ github.event.issue.number }} }' async: true - name: Create comment uses: peter-evans/create-or-update-comment@v1 with: issue-number: ${{ github.event.issue.number }} body: | Docs build created: ${{ steps.buildkite.outputs.web_url }} Preview link: https://clima.github.io/ClimateMachine.jl/previews/PR${{ github.event.issue.number}} ================================================ FILE: .github/workflows/doc_build_common_error_messages.md ================================================ # Documenter common warning/error messages: ## General notes - Changing order of API/HowToGuides does not fix unresolved path issues ## no doc found for reference ``` ┌ Warning: no doc found for reference '[`BatchedGeneralizedMinimalResidual`](@ref)' in src/HowToGuides/Numerics/SystemSolvers/IterativeSolvers.md. └ @ Documenter.CrossReferences ~/.julia/packages/Documenter/PLD7m/src/CrossReferences.jl:160 ``` - Missing entry in ```@docs ``` in API ## Reference could not be found ``` ┌ Warning: reference for 'ClimateMachine.ODESolvers.solve!' could not be found in src\APIs\Driver\index.md. └ @ Documenter.CrossReferences C:\Users\kawcz\.julia\packages\Documenter\PLD7m\src\CrossReferences.jl:104 ``` - Missing doc string? ## invalid local link: unresolved path ``` ┌ Warning: invalid local link: unresolved path in APIs/Atmos/AtmosModel.md │ link.text = │ 1-element Array{Any,1}: │ Markdown.Code("", "FlatOrientation") │ link.url = "@ref" ``` - Missing entry in ```@docs ``` for FlatOrientation in API OR - The "code" in the reference must be to actual code and not arbitrary text ## unable to get the binding ``` ┌ Warning: unable to get the binding for 'ClimateMachine.Atmos.AtmosModel.NoOrientation' in `@docs` block in src/APIs/Atmos/AtmosModel.md:9-14 from expression ':(ClimateMachine.Atmos.AtmosModel.NoOrientation)' in module ClimateMachine │ ```@docs │ ClimateMachine.Atmos.AtmosModel.NoOrientation ... │ ``` ... ``` - `ClimateMachine.Atmos.AtmosModel.NoOrientation` should be `ClimateMachine.Atmos.NoOrientation` ## Other useful tips - The syntax is white space sensitive: Do not leave any extra new line between the end of the doc string (denoted by triple double-quotes `"""`) and the code of the defined method / type / module name that you are describing. - In the doc string, indent the method / type / module signature and do not indent the descriptive text. - Any method name and the corresponding signature in the doc string have to match 1:1 (be careful of missing/extra exclamation points `!`) ================================================ FILE: .gitignore ================================================ # Temporary *.DS_Store *.swp *.jl.cov *.jl.*.cov *.jl.mem *.DS_Store *~ TAGS # Docs docs/build/ docs/site/ docs/src/tutorials/ docs/src/generated/ docs/transient_dir/generated/ !docs/src/assets/*.png !docs/src/assets/*.svg # Deps src/**/Manifest.toml # Data *.vtk *.dat *.vtu *.pvtu *.nc *.jld2 # Figs *.png *.jpg *.jpeg *.svg # Movies *.gif *.mp4 # Julia System Images *.so ================================================ FILE: LICENSE.md ================================================ Copyright 2019 Climate Modeling Alliance Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: Manifest.toml ================================================ # This file is machine-generated - editing it directly is not advised [[AbstractFFTs]] deps = ["LinearAlgebra"] git-tree-sha1 = "485ee0867925449198280d4af84bdb46a2a404d0" uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" version = "1.0.1" [[Adapt]] deps = ["LinearAlgebra"] git-tree-sha1 = "ffcfa2d345aaee0ef3d8346a073d5dd03c983ebe" uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" version = "3.2.0" [[ArgParse]] deps = ["Logging", "TextWrap"] git-tree-sha1 = "e928ca0a49f7b0564044b39108c70c160f03e05a" uuid = "c7e460c6-2fb9-53a9-8c5b-16f535851c63" version = "1.1.2" [[ArgTools]] git-tree-sha1 = "bdf73eec6a88885256f282d48eafcad25d7de494" uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" version = "1.1.1" [[ArnoldiMethod]] deps = ["LinearAlgebra", "Random", "StaticArrays"] git-tree-sha1 = "f87e559f87a45bece9c9ed97458d3afe98b1ebb9" uuid = "ec485272-7323-5ecc-a04f-4719b315124d" version = "0.1.0" [[ArrayInterface]] deps = ["IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"] git-tree-sha1 = "ce17bad65d0842b34a15fffc8879a9f68f08a67f" uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" version = "3.1.6" [[ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] git-tree-sha1 = "9aa9647b58147a81f7359eacc7d6249ac3a3e3d4" uuid = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" version = "0.6.4" [[ArtifactWrappers]] deps = ["DocStringExtensions", "Downloads", "Pkg"] git-tree-sha1 = "e9b52e63e3ea81a504412807c9426566e26c232d" uuid = "a14bc488-3040-4b00-9dc1-f6467924858a" version = "0.1.1" [[Artifacts]] deps = ["Pkg"] git-tree-sha1 = "c30985d8821e0cd73870b17b0ed0ce6dc44cb744" uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" version = "1.3.0" [[BFloat16s]] deps = ["LinearAlgebra", "Test"] git-tree-sha1 = "4af69e205efc343068dc8722b8dfec1ade89254a" uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b" version = "0.1.0" [[Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" [[BenchmarkTools]] deps = ["JSON", "Logging", "Printf", "Statistics", "UUIDs"] git-tree-sha1 = "9e62e66db34540a0c919d72172cc2f642ac71260" uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" version = "0.5.0" [[CEnum]] git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9" uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" version = "0.4.1" [[CFTime]] deps = ["Dates", "Printf"] git-tree-sha1 = "bca6cb6ee746e6485ca4535f6cc29cf3579a0f20" uuid = "179af706-886a-5703-950a-314cd64e0468" version = "0.1.1" [[CLIMAParameters]] deps = ["Test"] git-tree-sha1 = "0801216ee1670a1e5280cdb0e5fda60cc4b992ca" uuid = "6eacf6c3-8458-43b9-ae03-caf5306d3d53" version = "0.2.0" [[CUDA]] deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "DataStructures", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "Libdl", "LinearAlgebra", "Logging", "MacroTools", "NNlib", "Pkg", "Printf", "Random", "Reexport", "Requires", "SparseArrays", "Statistics", "TimerOutputs"] git-tree-sha1 = "6ccc73b2d8b671f7a65c92b5f08f81422ebb7547" uuid = "052768ef-5323-5732-b1bb-66c8b64840ba" version = "2.4.1" [[Cassette]] git-tree-sha1 = "742fbff99a2798f02bd37d25087efb5615b5a207" uuid = "7057c7e9-c182-5462-911a-8362d720325c" version = "0.3.5" [[ChainRulesCore]] deps = ["Compat", "LinearAlgebra", "SparseArrays"] git-tree-sha1 = "644c24cd6344348f1c645efab24b707088be526a" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" version = "0.9.34" [[CloudMicrophysics]] deps = ["CLIMAParameters", "DocStringExtensions", "SpecialFunctions", "Thermodynamics"] git-tree-sha1 = "52c85d99a842a1d9ebf6f36e8dd34d55e903b0b5" uuid = "6a9e3e04-43cd-43ba-94b9-e8782df3c71b" version = "0.3.1" [[CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da" uuid = "944b1d66-785c-5afd-91f1-9de20f533193" version = "0.7.0" [[Combinatorics]] git-tree-sha1 = "08c8b6831dc00bfea825826be0bc8336fc369860" uuid = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" version = "1.0.2" [[CommonSolve]] git-tree-sha1 = "68a0743f578349ada8bc911a5cbd5a2ef6ed6d1f" uuid = "38540f10-b2f7-11e9-35d8-d573e4eb0ff2" version = "0.2.0" [[CommonSubexpressions]] deps = ["MacroTools", "Test"] git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7" uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" version = "0.3.0" [[Compat]] deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"] git-tree-sha1 = "919c7f3151e79ff196add81d7f4e45d91bbf420b" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" version = "3.25.0" [[CompilerSupportLibraries_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "8e695f735fca77e9708e795eda62afdb869cbb70" uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" version = "0.3.4+0" [[ConstructionBase]] deps = ["LinearAlgebra"] git-tree-sha1 = "48920211c95a6da1914a06c44ec94be70e84ffff" uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" version = "1.1.0" [[Coverage]] deps = ["CoverageTools", "HTTP", "JSON", "LibGit2", "MbedTLS"] git-tree-sha1 = "a485b62b1ce53368e3a1a3a8dc0c39a61b9d3d9c" uuid = "a2441757-f6aa-5fb2-8edb-039e3f45d037" version = "1.2.0" [[CoverageTools]] deps = ["LibGit2"] git-tree-sha1 = "08b72d2f2154e33dc2aeb1bfcd4a83cb283abd4f" uuid = "c36e975a-824b-4404-a568-ef97ca766997" version = "1.2.2" [[Crayons]] git-tree-sha1 = "3f71217b538d7aaee0b69ab47d9b7724ca8afa0d" uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f" version = "4.0.4" [[CubedSphere]] deps = ["Elliptic", "Printf", "Rotations", "TaylorSeries", "Test"] git-tree-sha1 = "f66fabd1ee5df59a7ba47c7873a6332c19e0c03f" uuid = "7445602f-e544-4518-8976-18f8e8ae6cdb" version = "0.2.0" [[DataAPI]] git-tree-sha1 = "dfb3b7e89e395be1e25c2ad6d7690dc29cc53b1d" uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" version = "1.6.0" [[DataStructures]] deps = ["Compat", "InteractiveUtils", "OrderedCollections"] git-tree-sha1 = "4437b64df1e0adccc3e5d1adbc3ac741095e4677" uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" version = "0.18.9" [[DataValueInterfaces]] git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464" version = "1.0.0" [[Dates]] deps = ["Printf"] uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" [[DelimitedFiles]] deps = ["Mmap"] uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" [[Dierckx]] deps = ["Dierckx_jll"] git-tree-sha1 = "5fefbe52e9a6e55b8f87cb89352d469bd3a3a090" uuid = "39dd38d3-220a-591b-8e3c-4c3a8c710a94" version = "0.5.1" [[Dierckx_jll]] deps = ["CompilerSupportLibraries_jll", "Libdl", "Pkg"] git-tree-sha1 = "a580560f526f6fc6973e8bad2b036514a4e3b013" uuid = "cd4c43a9-7502-52ba-aa6d-59fb2a88580b" version = "0.0.1+0" [[DiffEqBase]] deps = ["ArrayInterface", "ChainRulesCore", "DataStructures", "DocStringExtensions", "FunctionWrappers", "IterativeSolvers", "LabelledArrays", "LinearAlgebra", "Logging", "MuladdMacro", "NonlinearSolve", "Parameters", "Printf", "RecursiveArrayTools", "RecursiveFactorization", "Reexport", "Requires", "SciMLBase", "SparseArrays", "StaticArrays", "Statistics", "SuiteSparse", "ZygoteRules"] git-tree-sha1 = "c2ff625248a0967adff1dc1f79c6a41e2531f081" uuid = "2b5f629d-d688-5b77-993f-72d75c75574e" version = "6.57.8" [[DiffResults]] deps = ["StaticArrays"] git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805" uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" version = "1.0.3" [[DiffRules]] deps = ["NaNMath", "Random", "SpecialFunctions"] git-tree-sha1 = "214c3fcac57755cfda163d91c58893a8723f93e9" uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" version = "1.0.2" [[DispatchedTuples]] git-tree-sha1 = "4ccc236f2e2f6e4a15b093d76184ffded23d211f" uuid = "508c55e1-51b4-41fd-a5ca-7eb0327d070d" version = "0.2.0" [[Distances]] deps = ["LinearAlgebra", "Statistics"] git-tree-sha1 = "366715149014943abd71aa647a07a43314158b2d" uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" version = "0.10.2" [[Distributed]] deps = ["Random", "Serialization", "Sockets"] uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" [[Distributions]] deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns"] git-tree-sha1 = "e64debe8cd174cc52d7dd617ebc5492c6f8b698c" uuid = "31c24e10-a181-5473-b8eb-7969acd0382f" version = "0.24.15" [[DocStringExtensions]] deps = ["LibGit2", "Markdown", "Pkg", "Test"] git-tree-sha1 = "50ddf44c53698f5e784bbebb3f4b21c5807401b1" uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" version = "0.8.3" [[DoubleFloats]] deps = ["GenericSVD", "GenericSchur", "LinearAlgebra", "Polynomials", "Printf", "Quadmath", "Random", "Requires", "SpecialFunctions"] git-tree-sha1 = "d5cb090c9f59e5024e0c94be0714c5de8ff5dc99" uuid = "497a8b3b-efae-58df-a0af-a86822472b78" version = "1.1.18" [[Downloads]] deps = ["ArgTools", "LibCURL", "NetworkOptions"] git-tree-sha1 = "5de8c54d269fd7ab430656c27de73e63eb07a979" uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" version = "1.4.0" [[Elliptic]] git-tree-sha1 = "71c79e77221ab3a29918aaf6db4f217b89138608" uuid = "b305315f-e792-5b7a-8f41-49f472929428" version = "1.0.1" [[ExponentialUtilities]] deps = ["LinearAlgebra", "Printf", "Requires", "SparseArrays"] git-tree-sha1 = "712cb5af8db62836913970ee035a5fa742986f00" uuid = "d4d017d3-3776-5f7e-afef-a10c40355c18" version = "1.8.1" [[ExprTools]] git-tree-sha1 = "10407a39b87f29d47ebaca8edbc75d7c302ff93e" uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04" version = "0.1.3" [[EzXML]] deps = ["Printf", "XML2_jll"] git-tree-sha1 = "0fa3b52a04a4e210aeb1626def9c90df3ae65268" uuid = "8f5d6c58-4d21-5cfd-889c-e3ad7ee6a615" version = "1.1.0" [[FFTW]] deps = ["AbstractFFTs", "FFTW_jll", "IntelOpenMP_jll", "Libdl", "LinearAlgebra", "MKL_jll", "Reexport"] git-tree-sha1 = "1b48dbde42f307e48685fa9213d8b9f8c0d87594" uuid = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" version = "1.3.2" [[FFTW_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "3676abafff7e4ff07bbd2c42b3d8201f31653dcc" uuid = "f5851436-0d7a-5f13-b9de-f02708fd171a" version = "3.3.9+8" [[FastClosures]] git-tree-sha1 = "acebe244d53ee1b461970f8910c235b259e772ef" uuid = "9aa1b823-49e4-5ca5-8b0f-3971ec8bab6a" version = "0.3.2" [[FileIO]] deps = ["Pkg", "Requires", "UUIDs"] git-tree-sha1 = "9cdfbf5c0ed88ad0dcdb02544416c8e5a73addef" uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" version = "1.6.4" [[FillArrays]] deps = ["LinearAlgebra", "Random", "SparseArrays"] git-tree-sha1 = "31939159aeb8ffad1d4d8ee44d07f8558273120a" uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" version = "0.11.7" [[FiniteDiff]] deps = ["ArrayInterface", "LinearAlgebra", "Requires", "SparseArrays", "StaticArrays"] git-tree-sha1 = "f6f80c8f934efd49a286bb5315360be66956dfc4" uuid = "6a86dc24-6348-571c-b903-95158fe2bd41" version = "2.8.0" [[Formatting]] deps = ["Printf"] git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8" uuid = "59287772-0a20-5a39-b81b-1366585eb4c0" version = "0.4.2" [[ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "NaNMath", "Random", "SpecialFunctions", "StaticArrays"] git-tree-sha1 = "c68fb7481b71519d313114dca639b35262ff105f" uuid = "f6369f11-7733-5829-9624-2563aa707210" version = "0.10.17" [[FunctionWrappers]] git-tree-sha1 = "241552bc2209f0fa068b6415b1942cc0aa486bcc" uuid = "069b7b12-0de2-55c6-9aab-29f3d0a68a2e" version = "1.1.2" [[Future]] deps = ["Random"] uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[GPUArrays]] deps = ["AbstractFFTs", "Adapt", "LinearAlgebra", "Printf", "Random", "Serialization"] git-tree-sha1 = "f99a25fe0313121f2f9627002734c7d63b4dd3bd" uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7" version = "6.2.0" [[GPUCompiler]] deps = ["DataStructures", "InteractiveUtils", "LLVM", "Libdl", "Scratch", "Serialization", "TimerOutputs", "UUIDs"] git-tree-sha1 = "c853c810b52a80f9aad79ab109207889e57f41ef" uuid = "61eb1bfa-7361-4325-ad38-22787b887f55" version = "0.8.3" [[GaussQuadrature]] deps = ["SpecialFunctions"] git-tree-sha1 = "ce3079d0172eaa258f31c30dec9ae045092447d9" uuid = "d54b0c1a-921d-58e0-8e36-89d8069c0969" version = "0.5.5" [[GenericSVD]] deps = ["LinearAlgebra"] git-tree-sha1 = "62909c3eda8a25b5673a367d1ad2392ebb265211" uuid = "01680d73-4ee2-5a08-a1aa-533608c188bb" version = "0.3.0" [[GenericSchur]] deps = ["LinearAlgebra", "Printf"] git-tree-sha1 = "372e48d7f3ced17fdc888a841bcce77be417ce57" uuid = "c145ed77-6b09-5dd9-b285-bf645a82121e" version = "0.5.0" [[HDF5_jll]] deps = ["Artifacts", "JLLWrappers", "LibCURL_jll", "Libdl", "OpenSSL_jll", "Pkg", "Zlib_jll"] git-tree-sha1 = "fd83fa0bde42e01952757f01149dd968c06c4dba" uuid = "0234f1f7-429e-5d53-9886-15a909be8d59" version = "1.12.0+1" [[HTTP]] deps = ["Base64", "Dates", "IniFile", "MbedTLS", "NetworkOptions", "Sockets", "URIs"] git-tree-sha1 = "c9f380c76d8aaa1fa7ea9cf97bddbc0d5b15adc2" uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" version = "0.9.5" [[Hwloc]] deps = ["Hwloc_jll"] git-tree-sha1 = "ffdcd4272a7cc36442007bca41aa07ca3cc5fda4" uuid = "0e44f5e4-bd66-52a0-8798-143a42290a1d" version = "1.3.0" [[Hwloc_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "aac91e34ef4c166e0857e3d6052a3467e5732ceb" uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8" version = "2.4.1+0" [[IOCapture]] deps = ["Logging"] git-tree-sha1 = "1868e4e7ad2f93d8de0904d89368c527b46aa6a1" uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" version = "0.2.1" [[IfElse]] git-tree-sha1 = "28e837ff3e7a6c3cdb252ce49fb412c8eb3caeef" uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173" version = "0.1.0" [[Inflate]] git-tree-sha1 = "f5fc07d4e706b84f72d54eedcc1c13d92fb0871c" uuid = "d25df0c9-e2be-5dd7-82c8-3ad0b3e990b9" version = "0.1.2" [[IniFile]] deps = ["Test"] git-tree-sha1 = "098e4d2c533924c921f9f9847274f2ad89e018b8" uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f" version = "0.5.0" [[IntelOpenMP_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "d979e54b71da82f3a65b62553da4fc3d18c9004c" uuid = "1d5cc7b8-4909-519e-a0f8-d0f5ad9712d0" version = "2018.0.3+2" [[InteractiveUtils]] deps = ["Markdown"] uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" [[Intervals]] deps = ["Dates", "Printf", "RecipesBase", "Serialization", "TimeZones"] git-tree-sha1 = "323a38ed1952d30586d0fe03412cde9399d3618b" uuid = "d8418881-c3e1-53bb-8760-2df7ec849ed5" version = "1.5.0" [[IterativeSolvers]] deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays"] git-tree-sha1 = "6f5ef3206d9dc6510a8b8e2334b96454a2ade590" uuid = "42fd0dbc-a981-5370-80f2-aaf504508153" version = "0.9.0" [[IteratorInterfaceExtensions]] git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856" uuid = "82899510-4779-5014-852e-03e436cf321d" version = "1.0.0" [[JLD2]] deps = ["CodecZlib", "DataStructures", "MacroTools", "Mmap", "Pkg", "Printf", "Requires", "UUIDs"] git-tree-sha1 = "b8343a7f96591404ade118b3a7014e1a52062465" uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" version = "0.4.2" [[JLLWrappers]] git-tree-sha1 = "a431f5f2ca3f4feef3bd7a5e94b8b8d4f2f647a0" uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" version = "1.2.0" [[JSON]] deps = ["Dates", "Mmap", "Parsers", "Unicode"] git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" version = "0.21.1" [[KernelAbstractions]] deps = ["Adapt", "CUDA", "Cassette", "InteractiveUtils", "MacroTools", "SpecialFunctions", "StaticArrays", "UUIDs"] git-tree-sha1 = "ee7f03c23d874c8353813a44315daf82a1e82046" uuid = "63c18a36-062a-441e-b654-da1e3ab1ce7c" version = "0.5.3" [[LLVM]] deps = ["CEnum", "Libdl", "Printf", "Unicode"] git-tree-sha1 = "b616937c31337576360cb9fb872ec7633af7b194" uuid = "929cbde3-209d-540e-8aea-75f648917ca0" version = "3.6.0" [[LabelledArrays]] deps = ["ArrayInterface", "LinearAlgebra", "MacroTools", "StaticArrays"] git-tree-sha1 = "5e288800819c323de5897fa6d5a002bdad54baf7" uuid = "2ee39098-c373-598a-b85f-a56591580800" version = "1.5.0" [[LambertW]] git-tree-sha1 = "2d9f4009c486ef676646bca06419ac02061c088e" uuid = "984bce1d-4616-540c-a9ee-88d1112d94c9" version = "0.4.5" [[LazyArrays]] deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra", "MacroTools", "MatrixFactorizations", "SparseArrays", "StaticArrays"] git-tree-sha1 = "91abe45baaaf05f855215b3221d02f06f96734e1" uuid = "5078a376-72f3-5289-bfd5-ec5146d43c02" version = "0.20.9" [[LazyArtifacts]] deps = ["Pkg"] git-tree-sha1 = "4bb5499a1fc437342ea9ab7e319ede5a457c0968" uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" version = "1.3.0" [[LibCURL]] deps = ["LibCURL_jll", "MozillaCACerts_jll"] git-tree-sha1 = "cdbe7465ab7b52358804713a53c7fe1dac3f8a3f" uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" version = "0.6.3" [[LibCURL_jll]] deps = ["LibSSH2_jll", "Libdl", "MbedTLS_jll", "Pkg", "Zlib_jll", "nghttp2_jll"] git-tree-sha1 = "897d962c20031e6012bba7b3dcb7a667170dad17" uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" version = "7.70.0+2" [[LibGit2]] deps = ["Printf"] uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" [[LibSSH2_jll]] deps = ["Libdl", "MbedTLS_jll", "Pkg"] git-tree-sha1 = "717705533148132e5466f2924b9a3657b16158e8" uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" version = "1.9.0+3" [[Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" [[Libiconv_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "cba7b560fcc00f8cd770fa85a498cbc1d63ff618" uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" version = "1.16.0+8" [[LightGraphs]] deps = ["ArnoldiMethod", "DataStructures", "Distributed", "Inflate", "LinearAlgebra", "Random", "SharedArrays", "SimpleTraits", "SparseArrays", "Statistics"] git-tree-sha1 = "432428df5f360964040ed60418dd5601ecd240b6" uuid = "093fc24a-ae57-5d10-9952-331d41423f4d" version = "1.3.5" [[LightXML]] deps = ["Libdl", "XML2_jll"] git-tree-sha1 = "e129d9391168c677cd4800f5c0abb1ed8cb3794f" uuid = "9c8b4983-aa76-5018-a973-4c85ecc9e179" version = "0.9.0" [[LineSearches]] deps = ["LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "Printf"] git-tree-sha1 = "f27132e551e959b3667d8c93eae90973225032dd" uuid = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" version = "7.1.1" [[LinearAlgebra]] deps = ["Libdl"] uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [[Literate]] deps = ["Base64", "IOCapture", "JSON", "REPL"] git-tree-sha1 = "501a1a74a0c825037860d36d87d703e987d39dbc" uuid = "98b081ad-f1c9-55d3-8b20-4c87d4299306" version = "2.8.1" [[Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" [[LoopVectorization]] deps = ["ArrayInterface", "DocStringExtensions", "IfElse", "LinearAlgebra", "OffsetArrays", "Requires", "SLEEFPirates", "ThreadingUtilities", "UnPack", "VectorizationBase"] git-tree-sha1 = "5684e4aafadaf668dce27f12d67df4888fa58181" uuid = "bdcacae8-1622-11e9-2a5c-532679323890" version = "0.11.2" [[MKL_jll]] deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"] git-tree-sha1 = "5455aef09b40e5020e1520f551fa3135040d4ed0" uuid = "856f044c-d86e-5d09-b602-aeab76dc8ba7" version = "2021.1.1+2" [[MPI]] deps = ["Distributed", "DocStringExtensions", "Libdl", "MPICH_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "Pkg", "Random", "Requires", "Serialization", "Sockets"] git-tree-sha1 = "38d0d0255db2316077f7d5dcf8f40c3940e8d534" uuid = "da04e1cc-30fd-572f-bb4f-1f8673147195" version = "0.17.0" [[MPICH_jll]] deps = ["CompilerSupportLibraries_jll", "Libdl", "Pkg"] git-tree-sha1 = "4d37f1e07b4e2a74462eebf9ee48c626d15ffdac" uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4" version = "3.3.2+10" [[MacroTools]] deps = ["Markdown", "Random"] git-tree-sha1 = "6a8a2a625ab0dea913aba95c11370589e0239ff0" uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" version = "0.5.6" [[Markdown]] deps = ["Base64"] uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" [[MatrixFactorizations]] deps = ["ArrayLayouts", "LinearAlgebra", "Printf", "Random"] git-tree-sha1 = "4154951579535cfba4d716b96dedd9d0beaefcb9" uuid = "a3b82374-2e81-5b9e-98ce-41277c0e4c87" version = "0.8.2" [[MbedTLS]] deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"] git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe" uuid = "739be429-bea8-5141-9913-cc70e7f3736d" version = "1.0.3" [[MbedTLS_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "0eef589dd1c26a3ac9d753fe1a8bcad63f956fa6" uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" version = "2.16.8+1" [[MicrosoftMPI_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "e5c90234b3967684c9c6f87b4a54549b4ce21836" uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf" version = "10.1.3+0" [[Missings]] deps = ["DataAPI"] git-tree-sha1 = "f8c673ccc215eb50fcadb285f522420e29e69e1c" uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" version = "0.4.5" [[Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" [[Mocking]] deps = ["ExprTools"] git-tree-sha1 = "916b850daad0d46b8c71f65f719c49957e9513ed" uuid = "78c3b35d-d492-501b-9361-3d52fe80e533" version = "0.7.1" [[MozillaCACerts_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "bbcac5afd9049834366c3b68d792971e3d981799" uuid = "14a3606d-f60d-562e-9121-12d972cd8159" version = "2020.10.14+0" [[MuladdMacro]] git-tree-sha1 = "c6190f9a7fc5d9d5915ab29f2134421b12d24a68" uuid = "46d2c3a1-f734-5fdb-9937-b9b9aeba4221" version = "0.2.2" [[NCDatasets]] deps = ["CFTime", "DataStructures", "Dates", "NetCDF_jll", "Printf"] git-tree-sha1 = "b71d83c87d80f5c54c55a7a9a3aa42bf931c72aa" uuid = "85f8d34a-cbdd-5861-8df4-14fed0d494ab" version = "0.11.3" [[NLSolversBase]] deps = ["DiffResults", "Distributed", "FiniteDiff", "ForwardDiff"] git-tree-sha1 = "50608f411a1e178e0129eab4110bd56efd08816f" uuid = "d41bc354-129a-5804-8e4c-c37616107c6c" version = "7.8.0" [[NLsolve]] deps = ["Distances", "LineSearches", "LinearAlgebra", "NLSolversBase", "Printf", "Reexport"] git-tree-sha1 = "019f12e9a1a7880459d0173c182e6a99365d7ac1" uuid = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" version = "4.5.1" [[NNlib]] deps = ["ChainRulesCore", "Compat", "LinearAlgebra", "Pkg", "Requires", "Statistics"] git-tree-sha1 = "ab1d43fead2ecb9aa5ae460d3d547c2cf8d89461" uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" version = "0.7.17" [[NaNMath]] git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb" uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" version = "0.3.5" [[NetCDF_jll]] deps = ["Artifacts", "HDF5_jll", "JLLWrappers", "LibCURL_jll", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Pkg", "Zlib_jll", "nghttp2_jll"] git-tree-sha1 = "d5835f95aea3b93965a1a7c06de9aace8cb82d99" uuid = "7243133f-43d8-5620-bbf4-c2c921802cf3" version = "400.701.400+0" [[NetworkOptions]] git-tree-sha1 = "ed3157f48a05543cce9b241e1f2815f7e843d96e" uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" version = "1.2.0" [[NonlinearSolve]] deps = ["ArrayInterface", "FiniteDiff", "ForwardDiff", "IterativeSolvers", "LinearAlgebra", "RecursiveArrayTools", "RecursiveFactorization", "Reexport", "SciMLBase", "Setfield", "StaticArrays", "UnPack"] git-tree-sha1 = "ef18e47df4f3917af35be5e5d7f5d97e8a83b0ec" uuid = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" version = "0.3.8" [[NonlinearSolvers]] deps = ["CUDA", "DocStringExtensions", "ForwardDiff"] git-tree-sha1 = "13de2bfa3716485129b59d2fdc1c48904c2cfa15" uuid = "f4b8ab15-8e73-4e04-9661-b5912071d22b" version = "0.1.0" [[OffsetArrays]] deps = ["Adapt"] git-tree-sha1 = "b3dfef5f2be7d7eb0e782ba9146a5271ee426e90" uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" version = "1.6.2" [[OpenMPI_jll]] deps = ["Libdl", "Pkg"] git-tree-sha1 = "41b983e26a7ab8c9bf05f7d70c274b817d541b46" uuid = "fe0851c0-eecd-5654-98d4-656369965a5c" version = "4.0.2+2" [[OpenSSL_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "71bbbc616a1d710879f5a1021bcba65ffba6ce58" uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" version = "1.1.1+6" [[OpenSpecFun_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "9db77584158d0ab52307f8c04f8e7c08ca76b5b3" uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" version = "0.5.3+4" [[OrderedCollections]] git-tree-sha1 = "4fa2ba51070ec13fcc7517db714445b4ab986bdf" uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" version = "1.4.0" [[OrdinaryDiffEq]] deps = ["Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "ExponentialUtilities", "FastClosures", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "Logging", "MacroTools", "MuladdMacro", "NLsolve", "RecursiveArrayTools", "Reexport", "SparseArrays", "SparseDiffTools", "StaticArrays", "UnPack"] git-tree-sha1 = "d22a75b8ae5b77543c4e1f8eae1ff01ce1f64453" uuid = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" version = "5.52.2" [[PDMats]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] git-tree-sha1 = "f82a0e71f222199de8e9eb9a09977bd0767d52a0" uuid = "90014a1f-27ba-587c-ab20-58faa44d9150" version = "0.11.0" [[PackageCompiler]] deps = ["Libdl", "Pkg", "UUIDs"] git-tree-sha1 = "d448727c4b86be81b225b738c88d30334fda6779" uuid = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" version = "1.2.5" [[Parameters]] deps = ["OrderedCollections", "UnPack"] git-tree-sha1 = "2276ac65f1e236e0a6ea70baff3f62ad4c625345" uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a" version = "0.12.2" [[Parsers]] deps = ["Dates"] git-tree-sha1 = "c8abc88faa3f7a3950832ac5d6e690881590d6dc" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" version = "1.1.0" [[Pkg]] deps = ["Dates", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" [[Polynomials]] deps = ["Intervals", "LinearAlgebra", "RecipesBase"] git-tree-sha1 = "c6b8b87670b9e765db3001ffe640e0583a5ec317" uuid = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" version = "2.0.3" [[PrettyTables]] deps = ["Crayons", "Formatting", "Markdown", "Reexport", "Tables"] git-tree-sha1 = "574a6b3ea95f04e8757c0280bb9c29f1a5e35138" uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" version = "0.11.1" [[Printf]] deps = ["Unicode"] uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" [[QuadGK]] deps = ["DataStructures", "LinearAlgebra"] git-tree-sha1 = "12fbe86da16df6679be7521dfb39fbc861e1dc7b" uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" version = "2.4.1" [[Quadmath]] deps = ["Printf", "Random", "Requires"] git-tree-sha1 = "5a8f74af8eae654086a1d058b4ec94ff192e3de0" uuid = "be4d8f0f-7fa4-5f49-b795-2f01399ab2dd" version = "0.5.5" [[REPL]] deps = ["InteractiveUtils", "Markdown", "Sockets"] uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" [[Random]] deps = ["Serialization"] uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [[RecipesBase]] git-tree-sha1 = "b3fb709f3c97bfc6e948be68beeecb55a0b340ae" uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" version = "1.1.1" [[RecursiveArrayTools]] deps = ["ArrayInterface", "LinearAlgebra", "RecipesBase", "Requires", "StaticArrays", "Statistics", "ZygoteRules"] git-tree-sha1 = "271a36e18c8806332b7bd0f57e50fcff0d428b11" uuid = "731186ca-8d62-57ce-b412-fbd966d074cd" version = "2.11.0" [[RecursiveFactorization]] deps = ["LinearAlgebra", "LoopVectorization"] git-tree-sha1 = "20f0ad1b2760da770d31be71f777740d25807631" uuid = "f2c3362d-daeb-58d1-803e-2bc74f2840b4" version = "0.1.11" [[Reexport]] git-tree-sha1 = "57d8440b0c7d98fc4f889e478e80f268d534c9d5" uuid = "189a3867-3050-52da-a836-e630ba90ab69" version = "1.0.0" [[Requires]] deps = ["UUIDs"] git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621" uuid = "ae029012-a4dd-5104-9daa-d747884805df" version = "1.1.3" [[Rmath]] deps = ["Random", "Rmath_jll"] git-tree-sha1 = "86c5647b565873641538d8f812c04e4c9dbeb370" uuid = "79098fc4-a85e-5d69-aa6a-4863f24498fa" version = "0.6.1" [[Rmath_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "1b7bf41258f6c5c9c31df8c1ba34c1fc88674957" uuid = "f50d1b31-88e8-58de-be2c-1cc44531875f" version = "0.2.2+2" [[RootSolvers]] deps = ["DocStringExtensions", "ForwardDiff", "Test"] git-tree-sha1 = "0e5b394adc5c6fb39b3964bce2a259a44cc312d3" uuid = "7181ea78-2dcb-4de3-ab41-2b8ab5a31e74" version = "0.2.0" [[Rotations]] deps = ["LinearAlgebra", "StaticArrays", "Statistics"] git-tree-sha1 = "2ed8d8a16d703f900168822d83699b8c3c1a5cd8" uuid = "6038ab10-8711-5258-84ad-4b1120ba62dc" version = "1.0.2" [[SHA]] uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" [[SLEEFPirates]] deps = ["IfElse", "Libdl", "VectorizationBase"] git-tree-sha1 = "ab6194c92dcf38036cd9513e4ab12cd76a613da1" uuid = "476501e8-09a2-5ece-8869-fb82de89a1fa" version = "0.6.10" [[SciMLBase]] deps = ["ArrayInterface", "CommonSolve", "Distributed", "DocStringExtensions", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "RecipesBase", "RecursiveArrayTools", "StaticArrays", "Statistics", "Tables", "TreeViews"] git-tree-sha1 = "617d5ade740dc628884b6a33e1b02b9bb950e9b3" uuid = "0bca4576-84f4-4d90-8ffe-ffa030f20462" version = "1.9.1" [[Scratch]] deps = ["Dates"] git-tree-sha1 = "ad4b278adb62d185bbcb6864dc24959ab0627bf6" uuid = "6c6a2e73-6563-6170-7368-637461726353" version = "1.0.3" [[Serialization]] uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" [[Setfield]] deps = ["ConstructionBase", "Future", "MacroTools", "Requires"] git-tree-sha1 = "d5640fc570fb1b6c54512f0bd3853866bd298b3e" uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46" version = "0.7.0" [[SharedArrays]] deps = ["Distributed", "Mmap", "Random", "Serialization"] uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" [[SimpleTraits]] deps = ["InteractiveUtils", "MacroTools"] git-tree-sha1 = "daf7aec3fe3acb2131388f93a4c409b8c7f62226" uuid = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" version = "0.9.3" [[Sockets]] uuid = "6462fe0b-24de-5631-8697-dd941f90decc" [[SortingAlgorithms]] deps = ["DataStructures", "Random", "Test"] git-tree-sha1 = "03f5898c9959f8115e30bc7226ada7d0df554ddd" uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c" version = "0.3.1" [[SparseArrays]] deps = ["LinearAlgebra", "Random"] uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" [[SparseDiffTools]] deps = ["Adapt", "ArrayInterface", "Compat", "DataStructures", "FiniteDiff", "ForwardDiff", "LightGraphs", "LinearAlgebra", "Requires", "SparseArrays", "VertexSafeGraphs"] git-tree-sha1 = "d05bc362e3fa1b0e2361594a706fc63ffbd140f3" uuid = "47a9eef4-7e08-11e9-0b38-333d64bd3804" version = "1.13.0" [[SpecialFunctions]] deps = ["ChainRulesCore", "OpenSpecFun_jll"] git-tree-sha1 = "5919936c0e92cff40e57d0ddf0ceb667d42e5902" uuid = "276daf66-3868-5448-9aa4-cd146d93841b" version = "1.3.0" [[Static]] deps = ["IfElse"] git-tree-sha1 = "ddec5466a1d2d7e58adf9a427ba69763661aacf6" uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3" version = "0.2.4" [[StaticArrays]] deps = ["LinearAlgebra", "Random", "Statistics"] git-tree-sha1 = "9da72ed50e94dbff92036da395275ed114e04d49" uuid = "90137ffa-7385-5640-81b9-e52037218182" version = "1.0.1" [[StaticNumbers]] deps = ["Requires"] git-tree-sha1 = "a0df7d5ade3fd0f0e6c93ad63facc05b12c40e6a" uuid = "c5e4b96a-f99f-5557-8ed2-dc63ef9b5131" version = "0.3.3" [[Statistics]] deps = ["LinearAlgebra", "SparseArrays"] uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" [[StatsBase]] deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics"] git-tree-sha1 = "a83fa3021ac4c5a918582ec4721bc0cf70b495a9" uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" version = "0.33.4" [[StatsFuns]] deps = ["Rmath", "SpecialFunctions"] git-tree-sha1 = "ced55fd4bae008a8ea12508314e725df61f0ba45" uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" version = "0.9.7" [[SuiteSparse]] deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" [[SurfaceFluxes]] deps = ["CLIMAParameters", "DocStringExtensions", "KernelAbstractions", "NonlinearSolvers", "StaticArrays"] git-tree-sha1 = "0f2685b633ebf751e50842ee3525880f7d043162" uuid = "49b00bb7-8bd4-4f2b-b78c-51cd0450215f" version = "0.1.1" [[TableTraits]] deps = ["IteratorInterfaceExtensions"] git-tree-sha1 = "b1ad568ba658d8cbb3b892ed5380a6f3e781a81e" uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" version = "1.0.0" [[Tables]] deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"] git-tree-sha1 = "a9ff3dfec713c6677af435d6a6d65f9744feef67" uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" version = "1.4.1" [[TaylorSeries]] deps = ["InteractiveUtils", "LinearAlgebra", "Markdown", "Requires", "SparseArrays"] git-tree-sha1 = "66f4d1993bae49eeba21a1634b5f65782585a42c" uuid = "6aa5eb33-94cf-58f4-a9d0-e4b2c4fc25ea" version = "0.10.13" [[Test]] deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [[TextWrap]] git-tree-sha1 = "9250ef9b01b66667380cf3275b3f7488d0e25faf" uuid = "b718987f-49a8-5099-9789-dcd902bef87d" version = "1.0.1" [[Thermodynamics]] deps = ["CLIMAParameters", "DocStringExtensions", "ExprTools", "KernelAbstractions", "Random", "RootSolvers"] git-tree-sha1 = "c7d73eae235caffdab85778d8b6c371691ad1b3e" uuid = "b60c26fb-14c3-4610-9d3e-2d17fe7ff00c" version = "0.5.1" [[ThreadingUtilities]] deps = ["VectorizationBase"] git-tree-sha1 = "e3032c97b183e6e2baf4d2cc4fe60c4292a4a707" uuid = "8290d209-cae3-49c0-8002-c8c24d57dab5" version = "0.2.5" [[TimeZones]] deps = ["Dates", "EzXML", "Mocking", "Pkg", "Printf", "RecipesBase", "Serialization", "Unicode"] git-tree-sha1 = "4ba8a9579a243400db412b50300cd61d7447e583" uuid = "f269a46b-ccf7-5d73-abea-4c690281aa53" version = "1.5.3" [[TimerOutputs]] deps = ["Printf"] git-tree-sha1 = "32cdbe6cd2d214c25a0b88f985c9e0092877c236" uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" version = "0.5.8" [[TranscodingStreams]] deps = ["Random", "Test"] git-tree-sha1 = "7c53c35547de1c5b9d46a4797cf6d8253807108c" uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" version = "0.9.5" [[TreeViews]] deps = ["Test"] git-tree-sha1 = "8d0d7a3fe2f30d6a7f833a5f19f7c7a5b396eae6" uuid = "a2a6695c-b41b-5b7d-aed9-dbfdeacea5d7" version = "0.3.0" [[URIs]] git-tree-sha1 = "7855809b88d7b16e9b029afd17880930626f54a2" uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4" version = "1.2.0" [[UUIDs]] deps = ["Random", "SHA"] uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" [[UnPack]] git-tree-sha1 = "387c1f73762231e86e0c9c5443ce3b4a0a9a0c2b" uuid = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" version = "1.0.2" [[Unicode]] uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" [[VectorizationBase]] deps = ["ArrayInterface", "Hwloc", "IfElse", "Libdl", "LinearAlgebra"] git-tree-sha1 = "486842a62c4a1bc23f7c8457d64e683a00d6d0e9" uuid = "3d5dd08c-fd9d-11e8-17fa-ed2836048c2f" version = "0.18.14" [[VertexSafeGraphs]] deps = ["LightGraphs"] git-tree-sha1 = "b9b450c99a3ca1cc1c6836f560d8d887bcbe356e" uuid = "19fa3120-7c27-5ec5-8db8-b0b0aa330d6f" version = "0.1.2" [[WriteVTK]] deps = ["Base64", "CodecZlib", "FillArrays", "LightXML", "Random", "TranscodingStreams"] git-tree-sha1 = "37eef911a9c5211e0ae4362dc0477cfe6c537ffa" uuid = "64499a7a-5c06-52f2-abe2-ccb03c286192" version = "1.9.1" [[XML2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"] git-tree-sha1 = "be0db24f70aae7e2b89f2f3092e93b8606d659a6" uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" version = "2.9.10+3" [[Zlib_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "320228915c8debb12cb434c59057290f0834dbf6" uuid = "83775a58-1f1d-513f-b197-d71354ab007a" version = "1.2.11+18" [[ZygoteRules]] deps = ["MacroTools"] git-tree-sha1 = "9e7a1e8ca60b742e508a315c17eef5211e7fbfd7" uuid = "700de1a5-db45-46bc-99cf-38207098b444" version = "0.2.1" [[nghttp2_jll]] deps = ["Libdl", "Pkg"] git-tree-sha1 = "8e2c44ab4d49ad9518f359ed8b62f83ba8beede4" uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" version = "1.40.0+2" ================================================ FILE: Project.toml ================================================ name = "ClimateMachine" uuid = "777c4786-024f-11e9-21a3-85d5d4106250" authors = ["Climate Modeling Alliance"] version = "0.3.0-DEV" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63" ArtifactWrappers = "a14bc488-3040-4b00-9dc1-f6467924858a" BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" CLIMAParameters = "6eacf6c3-8458-43b9-ae03-caf5306d3d53" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" CloudMicrophysics = "6a9e3e04-43cd-43ba-94b9-e8782df3c71b" Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" Coverage = "a2441757-f6aa-5fb2-8edb-039e3f45d037" CubedSphere = "7445602f-e544-4518-8976-18f8e8ae6cdb" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" Dierckx = "39dd38d3-220a-591b-8e3c-4c3a8c710a94" DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" DispatchedTuples = "508c55e1-51b4-41fd-a5ca-7eb0327d070d" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" DoubleFloats = "497a8b3b-efae-58df-a0af-a86822472b78" Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" Formatting = "59287772-0a20-5a39-b81b-1366585eb4c0" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" GaussQuadrature = "d54b0c1a-921d-58e0-8e36-89d8069c0969" InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c" LambertW = "984bce1d-4616-540c-a9ee-88d1112d94c9" LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" NCDatasets = "85f8d34a-cbdd-5861-8df4-14fed0d494ab" NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" NonlinearSolvers = "f4b8ab15-8e73-4e04-9661-b5912071d22b" OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" PrettyTables = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" RootSolvers = "7181ea78-2dcb-4de3-ab41-2b8ab5a31e74" Rotations = "6038ab10-8711-5258-84ad-4b1120ba62dc" SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" StaticNumbers = "c5e4b96a-f99f-5557-8ed2-dc63ef9b5131" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" SurfaceFluxes = "49b00bb7-8bd4-4f2b-b78c-51cd0450215f" Thermodynamics = "b60c26fb-14c3-4610-9d3e-2d17fe7ff00c" UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" WriteVTK = "64499a7a-5c06-52f2-abe2-ccb03c286192" [compat] Adapt = "2.0.2, 3.2" ArgParse = "1.1" ArtifactWrappers = "0.1.1" BenchmarkTools = "0.5" CLIMAParameters = "0.2" CUDA = "2.0" CloudMicrophysics = "0.3" Combinatorics = "1.0" Coverage = "1.0" Dierckx = "0.4, 0.5" DiffEqBase = "6.47" DispatchedTuples = "0.2" Distributions = "0.22, 0.23, 0.24" DocStringExtensions = "0.8" DoubleFloats = "1.1" FFTW = "1.2" FileIO = "1.2" Formatting = "0.4" ForwardDiff = "0.10" GaussQuadrature = "0.5" JLD2 = "0.1, 0.2, 0.3, 0.4" KernelAbstractions = "0.4.1, 0.5" LambertW = "0.4" LazyArrays = "0.15, 0.16, 0.18, 0.19, 0.20" Literate = "2.2" MPI = "0.16, 0.17" NCDatasets = "0.10, 0.11" NLsolve = "4.4" NonlinearSolvers = "0.1" OrderedCollections = "1.1" OrdinaryDiffEq = "5.41.0" PackageCompiler = "1.2" PrettyTables = "0.9, 0.10, 0.11" RootSolvers = "0.1, 0.2" SpecialFunctions = "0.10, 1.0" StaticArrays = "0.12, 1.0" StaticNumbers = "0.3.2" SurfaceFluxes = "0.1" Thermodynamics = "0.5.1" UnPack = "1.0" WriteVTK = "1.7" julia = "1.5" [extras] Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] test = ["Test"] ================================================ FILE: README.md ================================================ # ClimateMachine.jl ***NOTE THAT THIS REPO IS NOT CURRENTLY BEING MAINTAINED. PLEASE SEE THE FOLLOWING REPOSITORIES:*** - [CliMA/ClimaCore.jl](http://github.com/CliMA/ClimaCore.jl) - [CliMA/ClimaAtmos.jl](http://github.com/CliMA/ClimaAtmos.jl) - [CliMA/Thermodynamics.jl](http://github.com/CliMA/Thermodynamics.jl) - [CliMA/CloudMicrophysics.jl](http://github.com/CliMA/CloudMicrophysics.jl) - [CliMA/SurfaceFluxes.jl](http://github.com/CliMA/SurfaceFluxes.jl) - [CliMA/ClimaLSM.jl](http://github.com/CliMA/ClimaLSM.jl) - [CliMA/Oceananigans.jl](http://github.com/CliMA/Oceananigans.jl) - [CliMA/ClimaCoupler.jl](http://github.com/CliMA/ClimaCoupler.jl) The Climate Machine is a new Earth system model that leverages recent advances in the computational and data sciences to learn directly from a wealth of Earth observations from space and the ground. The Climate Machine will harness more data than ever before, providing a new level of accuracy to predictions of droughts, heat waves, and rainfall extremes. | **Documentation** | [![dev][docs-latest-img]][docs-latest-url] | |----------------------|--------------------------------------------------| | **Docs Build** | [![docs build][docs-bld-img]][docs-bld-url] | | **Unit tests** | [![unit tests][unit-tests-img]][unit-tests-url] | | **Code Coverage** | [![codecov][codecov-img]][codecov-url] | | **Bors** | [![Bors enabled][bors-img]][bors-url] | | **DOI** | [![Zenodo][zenodo-img]][zenodo-url] | [docs-bld-img]: https://github.com/CliMA/ClimateMachine.jl/workflows/Documentation/badge.svg [docs-bld-url]: https://github.com/CliMA/ClimateMachine.jl/actions?query=workflow%3ADocumentation [docs-latest-img]: https://img.shields.io/badge/docs-latest-blue.svg [docs-latest-url]: https://CliMA.github.io/ClimateMachine.jl/latest/ [unit-tests-img]: https://github.com/CliMA/ClimateMachine.jl/workflows/OS%20Unit%20Tests/badge.svg [unit-tests-url]: https://github.com/CliMA/ClimateMachine.jl/actions?query=workflow%3A%22OS+Unit+Tests%22 [codecov-img]: https://codecov.io/gh/CliMA/ClimateMachine.jl/branch/master/graph/badge.svg [codecov-url]: https://codecov.io/gh/CliMA/ClimateMachine.jl [bors-img]: https://bors.tech/images/badge_small.svg [bors-url]: https://app.bors.tech/repositories/11521 [zenodo-img]: https://zenodo.org/badge/162166244.svg [zenodo-url]: https://zenodo.org/badge/latestdoi/162166244 For installation instructions and explanations on how to use the Climate Machine, please look at the [Documentation](https://clima.github.io/ClimateMachine.jl/latest/GettingStarted/Installation/). ================================================ FILE: bors.toml ================================================ status = [ "test-os (ubuntu-latest)", "test-os (windows-latest)", "test-os (macos-latest)", "buildkite/climatemachine-ci", "buildkite/climatemachine-docs", "format" ] delete_merged_branches = true timeout_sec = 12600 block_labels = [ "do-not-merge-yet" ] cut_body_after = " H[1] = F G H I J axes |/ H[2] = K L M N O 0------ X[0] high low where the 15-bit Hilbert integer = `A B C D E F G H I J K L M N O` is stored in `H` This function is based on public domain code from John Skilling which can be found in [Skilling2004](@cite). """ function hilbertcode(Y::AbstractArray{T}; bits = 8 * sizeof(T)) where {T} # Below is Skilling's AxestoTranspose X = deepcopy(Y) n = length(X) M = one(T) << (bits - 1) Q = M for j in 1:(bits - 1) P = Q - one(T) for i in 1:n if X[i] & Q != zero(T) X[1] ⊻= P else t = (X[1] ⊻ X[i]) & P X[1] ⊻= t X[i] ⊻= t end end Q >>>= one(T) end for i in 2:n X[i] ⊻= X[i - 1] end t = zero(T) Q = M for j in 1:(bits - 1) if X[n] & Q != zero(T) t ⊻= Q - one(T) end Q >>>= one(T) end for i in 1:n X[i] ⊻= t end # Below we transpose X and store it in H, i.e.: # # X[0] = A D G J M H[0] = A B C D E # X[1] = B E H K N <-------> H[1] = F G H I J # X[2] = C F I L O H[2] = K L M N O # # The 15-bit Hilbert integer is then = A B C D E F G H I J K L M N O H = zero(X) for i in 0:(n - 1), j in 0:(bits - 1) k = i * bits + j bit = (X[n - mod(k, n)] >>> div(k, n)) & one(T) H[n - i] |= (bit << j) end return H end """ centroidtocode(comm::MPI.Comm, elemtocorner; coortocode, CT) Returns a code for each element based on its centroid. These element codes can be used to determine a linear ordering for the partition function. The communicator `comm` is used to calculate the bounding box for representing the centroids in coordinates of type `CT`, defaulting to `CT=UInt64`. These integer coordinates are converted to a code using the function `coortocode`, which defaults to `hilbertcode`. The array containing the element corner coordinates, `elemtocorner`, is used to compute the centroids. `elemtocorner` is a dimension by number of corners by number of elements array. """ function centroidtocode( comm::MPI.Comm, elemtocorner; coortocode = hilbertcode, CT = UInt64, ) (d, nvert, nelem) = size(elemtocorner) centroids = sum(elemtocorner, dims = 2) ./ nvert T = eltype(centroids) centroidmin = (nelem > 0) ? minimum(centroids, dims = 3) : fill(typemax(T), d) centroidmax = (nelem > 0) ? maximum(centroids, dims = 3) : fill(typemin(T), d) centroidmin = MPI.Allreduce(centroidmin, min, comm) centroidmax = MPI.Allreduce(centroidmax, max, comm) centroidsize = centroidmax - centroidmin # Fix centroidsize to be nonzero. It can be zero for a couple of reasons. # For example, it will be zero if we have just one element. if iszero(centroidsize) centroidsize = ones(T, d) else for i in 1:d if iszero(centroidsize[i]) centroidsize[i] = maximum(centroidsize) end end end code = Array{CT}(undef, d, nelem) for e in 1:nelem c = (centroids[:, 1, e] .- centroidmin) ./ centroidsize X = CT.(floor.( typemax(CT) .* BigFloat.(c; precision = 16 * sizeof(CT)), )) code[:, e] = coortocode(X) end code end """ brickmesh(x, periodic; part=1, numparts=1; boundary) Generate a brick mesh with coordinates given by the tuple `x` and the periodic dimensions given by the `periodic` tuple. The brick can optionally be partitioned into `numparts` and this returns partition `part`. This is a simple Cartesian partition and further partitioning (e.g, based on a space-filling curve) should be done before the mesh is used for computation. By default boundary faces will be marked with a one and other faces with a zero. Specific boundary numbers can also be passed for each face of the brick in `boundary`. This will mark the nonperiodic brick faces with the given boundary number. # Examples We can build a 3 by 2 element two-dimensional mesh that is periodic in the \$x_2\$-direction with ```jldoctest brickmesh julia> (elemtovert, elemtocoord, elemtobndy, faceconnections) = brickmesh((2:5,4:6), (false,true); boundary=((1,2), (3,4))); ``` This returns the mesh structure for x_2 ^ | 6- 9----10----11----12 | | | | | | | 4 | 5 | 6 | | | | | | 5- 5-----6-----7-----8 | | | | | | | 1 | 2 | 3 | | | | | | 4- 1-----2-----3-----4 | +--|-----|-----|-----|--> x_1 2 3 4 5 The (number of corners by number of elements) array `elemtovert` gives the global vertex number for the corners of each element. ```jldoctest brickmesh julia> elemtovert 4×6 Array{Int64,2}: 1 2 3 5 6 7 2 3 4 6 7 8 5 6 7 9 10 11 6 7 8 10 11 12 ``` Note that the vertices are listed in Cartesian order. The (dimension by number of corners by number of elements) array `elemtocoord` gives the coordinates of the corners of each element. ```jldoctes brickmesh julia> elemtocoord 2×4×6 Array{Int64,3}: [:, :, 1] = 2 3 2 3 4 4 5 5 [:, :, 2] = 3 4 3 4 4 4 5 5 [:, :, 3] = 4 5 4 5 4 4 5 5 [:, :, 4] = 2 3 2 3 5 5 6 6 [:, :, 5] = 3 4 3 4 5 5 6 6 [:, :, 6] = 4 5 4 5 5 5 6 6 ``` The (number of faces by number of elements) array `elemtobndy` gives the boundary number for each face of each element. A zero will be given for connected faces. ```jldoctest brickmesh julia> elemtobndy 4×6 Array{Int64,2}: 1 0 0 1 0 0 0 0 2 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 ``` Note that the faces are listed in Cartesian order. Finally, the periodic face connections are given in `faceconnections` which is a list of arrays, one for each connection. Each array in the list is given in the format `[e, f, vs...]` where - `e` is the element number; - `f` is the face number; and - `vs` is the global vertices that face associated with. I the example ```jldoctest brickmesh julia> faceconnections 3-element Array{Array{Int64,1},1}: [4, 4, 1, 2] [5, 4, 2, 3] [6, 4, 3, 4] ``` we see that face `4` of element `5` is associated with vertices `[2 3]` (the vertices for face `1` of element `2`). """ function brickmesh( x, periodic; part = 1, numparts = 1, boundary = ntuple(j -> (1, 1), length(x)), ) if boundary isa Matrix boundary = tuple(mapslices(x -> tuple(x...), boundary, dims = 1)...) end @assert length(x) == length(periodic) @assert length(x) >= 1 @assert 1 <= part <= numparts T = promote_type(eltype.(x)...) d = length(x) nvert = 2^d nface = 2d nelemdim = length.(x) .- 1 elemlocal = linearpartition(prod(nelemdim), part, numparts) elemtovert = Array{Int}(undef, nvert, length(elemlocal)) elemtocoord = Array{T}(undef, d, nvert, length(elemlocal)) elemtobndy = zeros(Int, nface, length(elemlocal)) faceconnections = Array{Array{Int, 1}}(undef, 0) verts = LinearIndices(ntuple(j -> 1:length(x[j]), d)) elems = CartesianIndices(ntuple(j -> 1:(length(x[j]) - 1), d)) p = reshape(1:nvert, ntuple(j -> 2, d)) fmask = hcat(( p[ntuple( j -> (j == div(f - 1, 2) + 1) ? ((mod(f - 1, 2) + 1):(mod(f - 1, 2) + 1)) : (:), d, )...,][:] for f in 1:nface )...) for (e, ec) in enumerate(elems[elemlocal]) corners = CartesianIndices(ntuple(j -> ec[j]:(ec[j] + 1), d)) for (v, vc) in enumerate(corners) elemtovert[v, e] = verts[vc] for j in 1:d elemtocoord[j, v, e] = x[j][vc[j]] end end for i in 1:d if !periodic[i] && ec[i] == 1 elemtobndy[2 * (i - 1) + 1, e] = boundary[i][1] end if !periodic[i] && ec[i] == nelemdim[i] elemtobndy[2 * (i - 1) + 2, e] = boundary[i][2] end end for i in 1:d if periodic[i] && ec[i] == nelemdim[i] neighcorners = CartesianIndices(ntuple( j -> (i == j) ? (1:2) : (ec[j]:(ec[j] + 1)), d, )) push!( faceconnections, vcat(e, 2i, verts[neighcorners[fmask[:, 2i - 1]]]), ) end end end (elemtovert, elemtocoord, elemtobndy, faceconnections) end """ parallelsortcolumns(comm::MPI.Comm, A; alg::Base.Sort.Algorithm=Base.Sort.DEFAULT_UNSTABLE, lt=isless, by=identity, rev::Union{Bool,Nothing}=nothing) Sorts the columns of the distributed matrix `A`. See the documentation of `sort!` for a description of the keyword arguments. This function assumes `A` has the same number of rows on each MPI rank but can have a different number of columns. """ function parallelsortcolumns( comm::MPI.Comm, A; alg::Base.Sort.Algorithm = Base.Sort.DEFAULT_UNSTABLE, lt = isless, by = identity, rev::Union{Bool, Nothing} = nothing, ) m, n = size(A) T = eltype(A) csize = MPI.Comm_size(comm) crank = MPI.Comm_rank(comm) croot = 0 A = sortslices(A, dims = 2, alg = alg, lt = lt, by = by, rev = rev) npivots = clamp(n, 0, csize) pivots = T[A[i, div(n * p, npivots) + 1] for i in 1:m, p in 0:(npivots - 1)] pivotcounts = MPI.Allgather(Cint(length(pivots)), comm) pivots = MPI.Allgatherv!( pivots, VBuffer(similar(pivots, sum(pivotcounts)), pivotcounts), comm, ) pivots = reshape(pivots, m, div(length(pivots), m)) pivots = sortslices(pivots, dims = 2, alg = alg, lt = lt, by = by, rev = rev) # if we don't have any pivots then we must have zero columns if size(pivots) == (m, 0) return A end pivots = [ pivots[i, div(div(length(pivots), m) * r, csize) + 1] for i in 1:m, r in 0:(csize - 1) ] cols = map(i -> view(A, :, i), 1:n) senddispls = Cint[ ( searchsortedfirst(cols, pivots[:, i], lt = lt, by = by, rev = rev) - 1 ) * m for i in 1:csize ] sendcounts = Cint[ (i == csize ? n * m : senddispls[i + 1]) - senddispls[i] for i in 1:csize ] recvcounts = similar(sendcounts) MPI.Alltoall!(UBuffer(sendcounts, 1), MPI.UBuffer(recvcounts, 1), comm) B = similar(A, sum(recvcounts)) MPI.Alltoallv!( VBuffer(A, sendcounts, senddispls), VBuffer(B, recvcounts), comm, ) B = reshape(B, m, div(length(B), m)) sortslices(B, dims = 2, alg = alg, lt = lt, by = by, rev = rev) end """ getpartition(comm::MPI.Comm, elemtocode) Returns an equally weighted partition of a distributed set of elements by sorting their codes given in `elemtocode`. The codes for each element, `elemtocode`, are given as an array with a single entry per local element or as a matrix with a column for each local element. The partition is returned as a tuple three parts: - `partsendorder`: permutation of elements into sending order - `partsendstarts`: start entries in the send array for each rank - `partrecvstarts`: start entries in the receive array for each rank Note that both `partsendstarts` and `partrecvstarts` are of length `MPI.Comm_size(comm)+1` where the last entry has the total number of elements to send or receive, respectively. """ getpartition(comm::MPI.Comm, elemtocode::AbstractVector) = getpartition(comm, reshape(elemtocode, 1, length(elemtocode))) function getpartition(comm::MPI.Comm, elemtocode::AbstractMatrix) (ncode, nelem) = size(elemtocode) csize = MPI.Comm_size(comm) crank = MPI.Comm_rank(comm) CT = eltype(elemtocode) A = CT[ elemtocode # code collect(CT, 1:nelem)' # original element number fill(CT(MPI.Comm_rank(comm)), (1, nelem)) # original rank fill(typemax(CT), (1, nelem)) ] # new rank m, n = size(A) # sort by just code A = parallelsortcolumns(comm, A) # count the distribution of A counts = MPI.Allgather(last(size(A)), comm) starts = ones(Int, csize + 1) for i in 1:csize starts[i + 1] = counts[i] + starts[i] end # loop to determine new rank j = range(starts[crank + 1], stop = starts[crank + 2] - 1) for r in 0:(csize - 1) k = linearpartition(starts[end] - 1, r + 1, csize) o = intersect(k, j) .- (starts[crank + 1] - 1) A[ncode + 3, o] .= r end # sort by original rank and code A = sortslices(A, dims = 2, by = x -> x[[ncode + 2, (1:ncode)...]]) # count number of elements that are going to be sent sendcounts = zeros(Cint, csize) for i in 1:last(size(A)) sendcounts[A[ncode + 2, i] + 1] += m end # communicate columns of A to original rank recvcounts = similar(sendcounts) MPI.Alltoall!(UBuffer(sendcounts, 1), UBuffer(recvcounts, 1), comm) B = similar(A, sum(recvcounts)) MPI.Alltoallv!(VBuffer(A, sendcounts), VBuffer(B, recvcounts), comm) B = reshape(B, m, div(length(B), m)) # check to make sure we didn't drop any elements @assert nelem == n == size(B)[2] partsendcounts = zeros(Cint, csize) for i in 1:last(size(B)) partsendcounts[B[ncode + 3, i] + 1] += 1 end partsendstarts = ones(Int, csize + 1) for i in 1:csize partsendstarts[i + 1] = partsendcounts[i] + partsendstarts[i] end partsendorder = Int.(B[ncode + 1, :]) partrecvcounts = similar(partsendcounts) MPI.Alltoall!(UBuffer(partsendcounts, 1), UBuffer(partrecvcounts, 1), comm) partrecvstarts = ones(Int, csize + 1) for i in 1:csize partrecvstarts[i + 1] = partrecvcounts[i] + partrecvstarts[i] end partsendorder, partsendstarts, partrecvstarts end """ partition(comm::MPI.Comm, elemtovert, elemtocoord, elemtobndy, faceconnections) This function takes in a mesh (as returned for example by `brickmesh`) and returns a Hilbert curve based partitioned mesh. """ function partition( comm::MPI.Comm, elemtovert, elemtocoord, elemtobndy, faceconnections, globord = [], ) (d, nvert, nelem) = size(elemtocoord) csize = MPI.Comm_size(comm) crank = MPI.Comm_rank(comm) nface = 2d nfacevert = 2^(d - 1) # Here we expand the list of face connections into a structure that is easy # to partition. The cost is extra memory transfer. If this becomes a # bottleneck something more efficient may be implemented. # elemtofaceconnect = zeros(eltype(eltype(faceconnections)), nfacevert, nface, nelem) for fc in faceconnections elemtofaceconnect[:, fc[2], fc[1]] = fc[3:end] end elemtocode = centroidtocode(comm, elemtocoord; CT = UInt64) sendorder, sendstarts, recvstarts = getpartition(comm, elemtocode) elemtovert = elemtovert[:, sendorder] elemtocoord = elemtocoord[:, :, sendorder] elemtobndy = elemtobndy[:, sendorder] elemtofaceconnect = elemtofaceconnect[:, :, sendorder] if !isempty(globord) globord = globord[sendorder] end sendcounts = diff(sendstarts) recvcounts = similar(sendcounts) MPI.Alltoall!(UBuffer(sendcounts, 1), UBuffer(recvcounts, 1), comm) newelemtovert = similar(elemtovert, nvert * sum(recvcounts)) MPI.Alltoallv!( VBuffer(elemtovert, Cint(nvert) .* sendcounts), VBuffer(newelemtovert, Cint(nvert) .* recvcounts), comm, ) newelemtocoord = similar(elemtocoord, d * nvert * sum(recvcounts)) MPI.Alltoallv!( VBuffer(elemtocoord, Cint(d * nvert) .* sendcounts), VBuffer(newelemtocoord, Cint(d * nvert) .* recvcounts), comm, ) newelemtobndy = similar(elemtobndy, nface * sum(recvcounts)) MPI.Alltoallv!( VBuffer(elemtobndy, Cint(nface) .* sendcounts), VBuffer(newelemtobndy, Cint(nface) .* recvcounts), comm, ) newelemtofaceconnect = similar(elemtofaceconnect, nfacevert * nface * sum(recvcounts)) MPI.Alltoallv!( VBuffer(elemtofaceconnect, Cint(nfacevert * nface) .* sendcounts), VBuffer(newelemtofaceconnect, Cint(nfacevert * nface) .* recvcounts), comm, ) if !isempty(globord) newglobord = similar(globord, sum(recvcounts)) MPI.Alltoallv!( VBuffer(globord, sendcounts), VBuffer(newglobord, recvcounts), comm, ) else newglobord = similar(globord) end newnelem = recvstarts[end] - 1 newelemtovert = reshape(newelemtovert, nvert, newnelem) newelemtocoord = reshape(newelemtocoord, d, nvert, newnelem) newelemtobndy = reshape(newelemtobndy, nface, newnelem) newelemtofaceconnect = reshape(newelemtofaceconnect, nfacevert, nface, newnelem) # reorder local elements based on code of new elements A = UInt64[ centroidtocode(comm, newelemtocoord; CT = UInt64) collect(1:newnelem)' ] A = sortslices(A, dims = 2) newsortorder = view(A, d + 1, :) newelemtovert = newelemtovert[:, newsortorder] newelemtocoord = newelemtocoord[:, :, newsortorder] newelemtobndy = newelemtobndy[:, newsortorder] newelemtofaceconnect = newelemtofaceconnect[:, :, newsortorder] newfaceconnections = similar(faceconnections, 0) for e in 1:newnelem, f in 1:nface if newelemtofaceconnect[1, f, e] > 0 push!(newfaceconnections, vcat(e, f, newelemtofaceconnect[:, f, e])) end end if !isempty(globord) newglobord = newglobord[newsortorder] end ( newelemtovert, newelemtocoord, newelemtobndy, newfaceconnections, newglobord, )#sendorder) end """ minmaxflip(x, y) Returns `x, y` sorted lowest to highest and a bool that indicates if a swap was needed. """ minmaxflip(x, y) = y < x ? (y, x, true) : (x, y, false) """ vertsortandorder(a) Returns `(a)` and an ordering `o==0`. """ vertsortandorder(a) = ((a,), 1) """ vertsortandorder(a, b) Returns sorted vertex numbers `(a,b)` and an ordering `o` depending on the order needed to sort the elements. This ordering is given below including the vetex ordering for faces. o= 0 1 (a,b) (b,a) a b | | | | b a """ function vertsortandorder(a, b) a, b, s1 = minmaxflip(a, b) o = s1 ? 2 : 1 ((a, b), o) end """ vertsortandorder(a, b, c) Returns sorted vertex numbers `(a,b,c)` and an ordering `o` depending on the order needed to sort the elements. This ordering is given below including the vetex ordering for faces. o= 1 2 3 4 5 6 (a,b,c) (c,a,b) (b,c,a) (b,a,c) (c,b,a) (a,c,b) /c\\ /b\\ /a\\ /c\\ /a\\ /b\\ / \\ / \\ / \\ / \\ / \\ / \\ /a___b\\ /c___a\\ /b___c\\ /b___a\\ /c___b\\ /a___c\\ """ function vertsortandorder(a, b, c) # Use a (Bose-Nelson Algorithm based) sorting network from # to sort the vertices. b, c, s1 = minmaxflip(b, c) a, c, s2 = minmaxflip(a, c) a, b, s3 = minmaxflip(a, b) if !s1 && !s2 && !s3 o = 1 elseif !s1 && s2 && s3 o = 2 elseif s1 && !s2 && s3 o = 3 elseif !s1 && !s2 && s3 o = 4 elseif s1 && s2 && s3 o = 5 elseif s1 && !s2 && !s3 o = 6 else error("Problem finding vertex ordering $((a,b,c)) with flips $((s1,s2,s3))") end ((a, b, c), o) end """ vertsortandorder(a, b, c, d) Returns sorted vertex numbers `(a,b,c,d)` and an ordering `o` depending on the order needed to sort the elements. This ordering is given below including the vetex ordering for faces. o= 1 2 3 4 5 6 7 8 (a,b, (a,c, (b,a, (b,d, (c,a, (c,d, (d,b, (d,c, c,d) b,d) c,d) a,c) d,b) a,b) c,a) b,a) c---d b---d c---d a---c d---b a---b c---a b---a | | | | | | | | | | | | | | | | a---b a---c b---a b---d c---a c---d d---b d---c """ function vertsortandorder(a, b, c, d) # Use a (Bose-Nelson Algorithm based) sorting network from # to sort the vertices. a, b, s1 = minmaxflip(a, b) c, d, s2 = minmaxflip(c, d) a, c, s3 = minmaxflip(a, c) b, d, s4 = minmaxflip(b, d) b, c, s5 = minmaxflip(b, c) if !s1 && !s2 && !s3 && !s4 && !s5 o = 1 elseif !s1 && !s2 && !s3 && !s4 && s5 o = 2 elseif s1 && !s2 && !s3 && !s4 && !s5 o = 3 elseif !s1 && !s2 && s3 && s4 && s5 o = 4 elseif s1 && s2 && !s3 && !s4 && s5 o = 5 elseif !s1 && !s2 && s3 && s4 && !s5 o = 6 elseif s1 && s2 && s3 && s4 && s5 o = 7 elseif s1 && s2 && s3 && s4 && !s5 o = 8 else # FIXME: some possible orientations are missing since there are a total of # 24. Missing orientations: #= d---c d---c b---c a---d c---b d---a a---b b---a | | | | | | | | | | | | | | | | a---b b---a a---d b---c d---a c---b d---c c---d c---b d---b b---d b---c c---a d---a a---d a---c | | | | | | | | | | | | | | | | a---d a---c c---a d---a b---d b---c c---b d---b =# error("Problem finding vertex ordering $((a,b,c,d)) with flips $((s1,s2,s3,s4,s5))") end ((a, b, c, d), o) end """ connectmesh(comm::MPI.Comm, elemtovert, elemtocoord, elemtobndy, faceconnections) This function takes in a mesh (as returned for example by `brickmesh`) and returns a connected mesh. This returns a `NamedTuple` of: - `elems` the range of element indices - `realelems` the range of real (aka nonghost) element indices - `ghostelems` the range of ghost element indices - `ghostfaces` ghost element to face is received; `ghostfaces[f,ge] == true` if face `f` of ghost element `ge` is received. - `sendelems` an array of send element indices - `sendfaces` send element to face is sent; `sendfaces[f,se] == true` if face `f` of send element `se` is sent. - `elemtocoord` element to vertex coordinates; `elemtocoord[d,i,e]` is the `d`th coordinate of corner `i` of element `e` - `elemtoelem` element to neighboring element; `elemtoelem[f,e]` is the number of the element neighboring element `e` across face `f`. If there is no neighboring element then `elemtoelem[f,e] == e`. - `elemtoface` element to neighboring element face; `elemtoface[f,e]` is the face number of the element neighboring element `e` across face `f`. If there is no neighboring element then `elemtoface[f,e] == f`. - `elemtoordr` element to neighboring element order; `elemtoordr[f,e]` is the ordering number of the element neighboring element `e` across face `f`. If there is no neighboring element then `elemtoordr[f,e] == 1`. - `elemtobndy` element to bounday number; `elemtobndy[f,e]` is the boundary number of face `f` of element `e`. If there is a neighboring element then `elemtobndy[f,e] == 0`. - `nabrtorank` a list of the MPI ranks for the neighboring processes - `nabrtorecv` a range in ghost elements to receive for each neighbor - `nabrtosend` a range in `sendelems` to send for each neighbor """ function connectmesh( comm::MPI.Comm, elemtovert, elemtocoord, elemtobndy, faceconnections; dim = size(elemtocoord, 1), ) d = dim (coorddim, nvert, nelem) = size(elemtocoord) nface, nfacevert = 2d, 2^(d - 1) p = reshape(1:nvert, ntuple(j -> 2, d)) fmask = hcat(( p[ntuple( j -> (j == div(f - 1, 2) + 1) ? ((mod(f - 1, 2) + 1):(mod(f - 1, 2) + 1)) : (:), d, )...][:] for f in 1:nface )...) csize = MPI.Comm_size(comm) crank = MPI.Comm_rank(comm) VT = eltype(elemtovert) A = Array{VT}(undef, nfacevert + 8, nface * nelem) MR, ME, MF, MO, NR, NE, NF, NO = nfacevert .+ (1:8) for e in 1:nelem v = reshape(elemtovert[:, e], ntuple(j -> 2, d)) for f in 1:nface j = (e - 1) * nface + f fv, o = vertsortandorder(v[fmask[:, f]]...) A[1:nfacevert, j] .= fv A[MR, j] = crank A[ME, j] = e A[MF, j] = f A[MO, j] = o A[NR, j] = typemax(VT) A[NE, j] = typemax(VT) A[NF, j] = typemax(VT) A[NO, j] = typemax(VT) end end # use neighboring vertices for connected faces for fc in faceconnections e = fc[1] f = fc[2] v = fc[3:end] j = (e - 1) * nface + f fv, o = vertsortandorder(v...) A[1:nfacevert, j] .= fv A[MO, j] = o end A = parallelsortcolumns(comm, A, by = x -> x[1:nfacevert]) m, n = size(A) # match faces j = 1 while j <= n if j + 1 <= n && A[1:nfacevert, j] == A[1:nfacevert, j + 1] # found connected face A[NR:NO, j] = A[MR:MO, j + 1] A[NR:NO, j + 1] = A[MR:MO, j] j += 2 else # found unconnect face A[NR:NO, j] = A[MR:MO, j] j += 1 end end A = sortslices(A, dims = 2, by = x -> (x[MR], x[NR], x[ME], x[MF])) # count number of elements that are going to be sent sendcounts = zeros(Cint, csize) for i in 1:last(size(A)) sendcounts[A[MR, i] + 1] += m end sendstarts = ones(Int, csize + 1) for i in 1:csize sendstarts[i + 1] = sendcounts[i] + sendstarts[i] end # communicate columns of A to original rank recvcounts = similar(sendcounts) MPI.Alltoall!(UBuffer(sendcounts, 1), UBuffer(recvcounts, 1), comm) B = similar(A, sum(recvcounts)) MPI.Alltoallv!(VBuffer(A, sendcounts), VBuffer(B, recvcounts), comm) B = reshape(B, m, nface * nelem) # get element sending information B = sortslices(B, dims = 2, by = x -> (x[NR], x[ME])) sendelems = Int[] counts = zeros(Int, csize + 1) counts[1] = (last(size(B)) > 0) ? 1 : 0 sr, se = -1, 0 for i in 1:last(size(B)) r, e = B[NR, i], B[ME, i] # See if we need to send element `e` to rank `r` and make sure that we # didn't already mark it for sending. if r != crank && !(sr == r && se == e) counts[r + 2] += 1 append!(sendelems, e) sr, se = r, e end end # Mark which faces need to be sent sendfaces = BitArray(undef, nface, length(sendelems)) sendfaces .= false sr, se, n = -1, 0, 0 for i in 1:last(size(B)) r, e, f = B[NR, i], B[ME, i], B[MF, i] if r != crank if !(sr == r && se == e) n += 1 sr, se = r, e end sendfaces[f, n] = true end end sendstarts = cumsum(counts) nabrtosendrank = Int[ r for r = 0:(csize - 1) if sendstarts[r + 2] - sendstarts[r + 1] > 0 ] nabrtosend = UnitRange{Int}[ (sendstarts[r + 1]:(sendstarts[r + 2] - 1)) for r = 0:(csize - 1) if sendstarts[r + 2] - sendstarts[r + 1] > 0 ] # get element receiving information B = sortslices(B, dims = 2, by = x -> (x[NR], x[NE])) counts = zeros(Int, csize + 1) counts[1] = (last(size(B)) > 0) ? 1 : 0 sr, se = -1, 0 nghost = 0 for i in 1:last(size(B)) r, e = B[NR, i], B[NE, i] if r != crank # Check to make sure we have not already marked the element for # receiving since we could be connected to the receiving element across # multiple faces. if !(sr == r && se == e) nghost += 1 counts[r + 2] += 1 sr, se = r, e end B[NE, i] = nelem + nghost end end # Mark which faces will be received ghostfaces = BitArray(undef, nface, nghost) ghostfaces .= false sr, se, ge = -1, 0, 0 for i in 1:last(size(B)) r, e, f = B[NR, i], B[NE, i], B[NF, i] if r != crank if !(sr == r && se == e) ge += 1 sr, se = r, e end B[NR, i] = crank ghostfaces[f, ge] = true end end recvstarts = cumsum(counts) nabrtorecvrank = Int[ r for r = 0:(csize - 1) if recvstarts[r + 2] - recvstarts[r + 1] > 0 ] nabrtorecv = UnitRange{Int}[ (recvstarts[r + 1]:(recvstarts[r + 2] - 1)) for r = 0:(csize - 1) if recvstarts[r + 2] - recvstarts[r + 1] > 0 ] @assert nabrtorecvrank == nabrtosendrank nabrtorank = nabrtorecvrank elemtoelem = repeat((1:(nelem + nghost))', nface, 1) elemtoface = repeat(1:nface, 1, nelem + nghost) elemtoordr = ones(Int, nface, nelem + nghost) if d == 2 for i in 1:last(size(B)) me, mf, mo = B[ME, i], B[MF, i], B[MO, i] ne, nf, no = B[NE, i], B[NF, i], B[NO, i] elemtoelem[mf, me] = ne elemtoface[mf, me] = nf elemtoordr[mf, me] = (no == mo ? 1 : 2) end else for i in 1:last(size(B)) me, mf, mo = B[ME, i], B[MF, i], B[MO, i] ne, nf, no = B[NE, i], B[NF, i], B[NO, i] elemtoelem[mf, me] = ne elemtoface[mf, me] = nf if no != 1 || mo != 1 error("TODO add support for other orientations") end elemtoordr[mf, me] = 1 end end # fill the ghost values in elemtocoord newelemtocoord = similar(elemtocoord, coorddim, nvert, nelem + nghost) newelemtobndy = similar(elemtobndy, nface, nelem + nghost) sendelemtocoord = elemtocoord[:, :, sendelems] sendelemtobndy = elemtobndy[:, sendelems] crreq = [ MPI.Irecv!(view(newelemtocoord, :, :, nelem .+ er), r, 666, comm) for (r, er) in zip(nabrtorank, nabrtorecv) ] brreq = [ MPI.Irecv!(view(newelemtobndy, :, nelem .+ er), r, 666, comm) for (r, er) in zip(nabrtorank, nabrtorecv) ] csreq = [ MPI.Isend(view(sendelemtocoord, :, :, es), r, 666, comm) for (r, es) in zip(nabrtorank, nabrtosend) ] bsreq = [ MPI.Isend(view(sendelemtobndy, :, es), r, 666, comm) for (r, es) in zip(nabrtorank, nabrtosend) ] newelemtocoord[:, :, 1:nelem] .= elemtocoord newelemtobndy[:, 1:nelem] .= elemtobndy MPI.Waitall!([csreq; crreq; bsreq; brreq]) ( elems = 1:(nelem + nghost), # range of element indices realelems = 1:nelem, # range of real element indices ghostelems = nelem .+ (1:nghost), # range of ghost element indices ghostfaces = ghostfaces, sendelems = sendelems, # array of send element indices sendfaces = sendfaces, elemtocoord = newelemtocoord, # element to vertex coordinates elemtovert = nothing, # element to locally unique global vertex number (for direct stiffness summation) elemtoelem = elemtoelem, # element to neighboring element elemtoface = elemtoface, # element to neighboring element face elemtoordr = elemtoordr, # element to neighboring element order elemtobndy = newelemtobndy, # element to boundary number nabrtorank = nabrtorank, # list of neighboring processes MPI ranks nabrtorecv = nabrtorecv, # neighbor receive ranges into `ghostelems` nabrtosend = nabrtosend, ) # neighbor send ranges into `sendelems` end """ (bndytoelem, bndytoface) = enumerateboundaryfaces!(elemtoelem, elemtobndy, periodicity, boundary) Update the `elemtoelem` array based on the boundary faces specified in `elemtobndy`. Builds the `bndytoelem` and `bndytoface` tuples. """ function enumerateboundaryfaces!(elemtoelem, elemtobndy, periodicity, boundary) nb = 0 for i in 1:length(periodicity) if !periodicity[i] nb = max(nb, boundary[i]...) end end # Limitation of the boundary condition unrolling in the DG kernels # (should never be violated unless more general unstructured meshes are used # since cube meshes only have 6 faces in 3D, and only 1 bcs is currently allowed # per face) @assert nb <= 6 bndytoelem = ntuple(b -> Vector{Int64}(), nb) bndytoface = ntuple(b -> Vector{Int64}(), nb) nface, nelem = size(elemtoelem) N = zeros(Int, nb) for e in 1:nelem for f in 1:nface d = elemtobndy[f, e] @assert 0 <= d <= nb if d != 0 elemtoelem[f, e] = N[d] += 1 push!(bndytoelem[d], e) push!(bndytoface[d], f) end end end return (bndytoelem, bndytoface) end """ connectmeshfull(comm::MPI.Comm, elemtovert, elemtocoord, elemtobndy, faceconnections) This function takes in a mesh (as returned for example by `brickmesh`) and returns a corner connected mesh. It is similar to [`connectmesh`](@ref) but returns a corner connected mesh, i.e. `ghostelems` will also include remote elements that are only connected by vertices. This returns a `NamedTuple` of: - `elems` the range of element indices - `realelems` the range of real (aka nonghost) element indices - `ghostelems` the range of ghost element indices - `ghostfaces` ghost element to face is received; `ghostfaces[f,ge] == true` if face `f` of ghost element `ge` is received. - `sendelems` an array of send element indices - `sendfaces` send element to face is sent; `sendfaces[f,se] == true` if face `f` of send element `se` is sent. - `elemtocoord` element to vertex coordinates; `elemtocoord[d,i,e]` is the `d`th coordinate of corner `i` of element `e` - `elemtoelem` element to neighboring element; `elemtoelem[f,e]` is the number of the element neighboring element `e` across face `f`. If there is no neighboring element then `elemtoelem[f,e] == e`. - `elemtoface` element to neighboring element face; `elemtoface[f,e]` is the face number of the element neighboring element `e` across face `f`. If there is no neighboring element then `elemtoface[f,e] == f`. - `elemtoordr` element to neighboring element order; `elemtoordr[f,e]` is the ordering number of the element neighboring element `e` across face `f`. If there is no neighboring element then `elemtoordr[f,e] == 1`. - `elemtobndy` element to bounday number; `elemtobndy[f,e]` is the boundary number of face `f` of element `e`. If there is a neighboring element then `elemtobndy[f,e] == 0`. - `nabrtorank` a list of the MPI ranks for the neighboring processes - `nabrtorecv` a range in ghost elements to receive for each neighbor - `nabrtosend` a range in `sendelems` to send for each neighbor The algorithm assumes that the 2-D mesh can be gathered on single process, which is reasonable for the 2-D mesh. """ function connectmeshfull( comm::MPI.Comm, elemtovert, elemtocoord, elemtobndy, faceconnections; dim = size(elemtocoord, 1), ) @assert dim == 2 dim_coord = size(elemtocoord, 1) csize = MPI.Comm_size(comm) crank = MPI.Comm_rank(comm) root = 0 I = eltype(elemtovert) FT = eltype(elemtocoord) nvert, nelem = size(elemtovert) nfaces = 2 * dim nelemv = zeros(I, csize) fmask = build_fmask(dim) nfvert = size(fmask, 1) # collecting # of realelems on each process MPI.Allgather!(nelem, UBuffer(nelemv, Cint(1)), comm) offset = cumsum(nelemv) .+ 1 pushfirst!(offset, 1) nelemg = sum(nelemv) # collecting elemtovert on each process elemtovertg = zeros(I, nvert + 2, nelemg) MPI.Allgatherv!( [elemtovert; ones(I, 1, nelem) .* crank; reshape(Array(1:nelem), 1, :)], VBuffer(elemtovertg, nelemv .* (nvert + 2)), comm, ) nvertg = maximum(elemtovertg[1:nvert, :]) # collecting elemtocoordg on each process elemtocoordg = Array{FT}(undef, dim_coord, nvert, nelemg) MPI.Allgatherv!( elemtocoord, VBuffer(elemtocoordg, nelemv .* (dim_coord * nvert)), comm, ) # collecting elemtobndy on each process elemtobndyg = zeros(I, nfaces, nelemg) MPI.Allgatherv!(elemtobndy, VBuffer(elemtobndyg, nelemv .* nfaces), comm) # accounting for vertices on periodic faces nper = length(faceconnections) * nfvert vconn = Array{Int}(undef, 2, nper) # stores the periodic and corresponding shadow vertex ctr = 1 for fc in faceconnections e, f, v = fc[1], fc[2], fc[3:end] fv = elemtovert[fmask[:, f], e] fv, o = vertsortandorder(fv) v, o = vertsortandorder(v) for i in 1:nfvert vconn[1, ctr] = fv[1][i] vconn[2, ctr] = v[1][i] ctr += 1 end end vconn = unique(vconn, dims = 2) nper = size(vconn, 2) nperv = zeros(I, csize) MPI.Allgather!(nper, UBuffer(nperv, Cint(1)), comm) nperg = sum(nperv) vconng = Array{Int}(undef, 2, nperg) MPI.Allgatherv!(vconn, VBuffer(vconng, nperv .* 2), comm) gldofv = -ones(Int, nvertg) pmarker = -ones(Int, nperg) # conflict-free periodic nodes for i in 1:nperg v1, v2 = vconng[1, i], vconng[2, i] if gldofv[v1] == -1 && gldofv[v2] == -1 id = min(v1, v2) gldofv[v1] = id gldofv[v2] = id pmarker[i] = 1 end end # dealing with doubly periodic nodes (whenever applicable) for i in 1:nperg if pmarker[i] == -1 v1, v2 = vconng[1, i], vconng[2, i] id = min(gldofv[v1], gldofv[v2]) gldofv[v1] = id gldofv[v2] = id end end # labeling non-periodic vertices for i in 1:nvertg if gldofv[i] == -1 gldofv[i] = i end end #------------------------------------------------------------- elemtovertg_orig = deepcopy(elemtovertg) for i in 1:nelemg, j in 1:nvert elemtovertg[j, i] = gldofv[elemtovertg[j, i]] end #------------------------------------------------- nsend, nghost = 0, 0 interiorelems, exteriorelems = Int[], Int[] vertgtoprocs = map(i -> zeros(Int, i), zeros(Int, nvertg)) # procs for each vertex vertgtolelem = map(i -> zeros(Int, i), zeros(Int, nvertg)) # local cell # for each vertex vertgtolvert = map(i -> zeros(Int, i), zeros(Int, nvertg)) # local vertex # in local cell for each vertex sendelems = map(i -> zeros(Int, i), zeros(Int, csize)) recvelems = map(i -> zeros(Int, i), zeros(Int, csize)) for icls in 1:nelemg # gathering connectivity info for each global vertex for ivt in 1:nvert gvt = elemtovertg[ivt, icls] prc = elemtovertg[nvert + 1, icls] lcl = elemtovertg[nvert + 2, icls] push!(vertgtoprocs[gvt], prc) push!(vertgtolelem[gvt], lcl) push!(vertgtolvert[gvt], ivt) end end for icls in 1:nelem # building sendelems and recvelems interior = true for ivt in 1:nvert vt = elemtovert[ivt, icls] gvt = gldofv[vt] for ip in 1:length(vertgtoprocs[gvt]) proc = vertgtoprocs[gvt][ip] if proc ≠ crank lcell = vertgtolelem[gvt][ip] if findfirst(recvelems[proc + 1] .== lcell) == nothing push!(recvelems[proc + 1], lcell) nghost += 1 end if findfirst(sendelems[proc + 1] .== icls) == nothing push!(sendelems[proc + 1], icls) nsend += 1 end interior = false end end end interior ? push!(interiorelems, icls) : push!(exteriorelems, icls) end nabrtorank = Int[] newsendelems = Array{Int}(undef, nsend) nabrtosend = Array{UnitRange{Int64}}(undef, 0) nabrtorecv = Array{UnitRange{Int64}}(undef, 0) st_recv, en_recv = 1, 1 st_send, en_send = 1, 1 for ipr in 0:(csize - 1) l_send = length(sendelems[ipr + 1]) l_recv = length(recvelems[ipr + 1]) if l_send > 0 en_send = st_send + l_send - 1 sort!(sendelems[ipr + 1]) newsendelems[st_send:en_send] .= sendelems[ipr + 1][:] push!(nabrtosend, st_send:en_send) st_send = en_send + 1 push!(nabrtorank, ipr) end if l_recv > 0 en_recv = st_recv + l_recv - 1 sort!(recvelems[ipr + 1]) push!(nabrtorecv, st_recv:en_recv) st_recv = en_recv + 1 end end sendfaces = BitArray(zeros(nfaces, nsend)) ghostfaces = BitArray(zeros(nfaces, nghost)) newelemtovert = similar(elemtovert, nvert, (nelem + nghost)) newelemtocoord = similar(elemtocoord, dim_coord, nvert, (nelem + nghost)) newelemtobndy = similar(elemtobndy, nfaces, (nelem + nghost)) newelemtovert[1:nvert, 1:nelem] .= elemtovert newelemtocoord[:, :, 1:nelem] .= elemtocoord newelemtobndy[:, 1:nelem] .= elemtobndy vmarker = BitArray(undef, nvert) ctrg, ctrs = 1, 1 for ipr in 0:(csize - 1) # building ghost faces for icls in recvelems[ipr + 1] vmarker .= 0 off = offset[ipr + 1] newelemtovert[1:nvert, nelem + ctrg] .= elemtovertg_orig[1:nvert, off + icls - 1] newelemtocoord[:, :, nelem + ctrg] .= elemtocoordg[:, :, off + icls - 1] newelemtobndy[:, nelem + ctrg] .= elemtobndyg[:, off + icls - 1] for ivt in 1:nvert gvt = elemtovertg[ivt, icls + off - 1] if findfirst(vertgtoprocs[gvt] .== crank) ≠ nothing vmarker[ivt] = 1 end end for fc in 1:nfaces if findfirst(vmarker[fmask[:, fc]]) ≠ nothing ghostfaces[fc, ctrg] = 1 end end ctrg += 1 end # building send faces for icls in sendelems[ipr + 1] vmarker .= 0 for ivt in 1:nvert vt = elemtovert[ivt, icls] gvt = gldofv[vt] if findfirst(vertgtoprocs[gvt] .== ipr) ≠ nothing vmarker[ivt] = 1 end end for fc in 1:nfaces if findfirst(vmarker[fmask[:, fc]]) ≠ nothing sendfaces[fc, ctrs] = 1 end end ctrs += 1 end end A = zeros(Int, (nfvert + 3), nfaces * (nelem + nghost)) for e in 1:(nelem + nghost) v = reshape(newelemtovert[:, e], ntuple(j -> 2, dim)) for f in 1:nfaces j = (e - 1) * nfaces + f fv, o = vertsortandorder(v[fmask[:, f]]...) # faces vertices, B-N orientation for fvt in 1:nfvert A[fvt, j] = gldofv[fv[fvt]] end A[nfvert + 1, j] = o # orientation A[nfvert + 2, j] = e # local element number A[nfvert + 3, j] = f # local face number for element e end end A = sortslices(A, dims = 2, by = x -> x[1:nfvert]) elemtoelem = Array{Int}(undef, nfaces, (nelem + nghost)) elemtoface = Array{Int}(undef, nfaces, (nelem + nghost)) elemtoordr = Array{Int}(undef, nfaces, (nelem + nghost)) # match faces n = size(A, 2) j = 1 while j ≤ n lel = A[nfvert + 2, j] lfc = A[nfvert + 3, j] if j + 1 ≤ n && A[1:nfvert, j] == A[1:nfvert, j + 1] # found connected face nel = A[nfvert + 2, j + 1] # local neighboring element nfc = A[nfvert + 3, j + 1] # local face # of local neighboring element elemtoelem[lfc, lel] = nel elemtoface[lfc, lel] = nfc elemtoelem[nfc, nel] = lel elemtoface[nfc, nel] = lfc if A[nfvert + 1, j] == A[nfvert + 1, j + 1] elemtoordr[lfc, lel] = 1 elemtoordr[nfc, nel] = 1 else elemtoordr[lfc, lel] = 2 elemtoordr[nfc, nel] = 2 end j += 2 else # found unconnected face elemtoelem[lfc, lel] = lel elemtoface[lfc, lel] = lfc elemtoordr[lfc, lel] = 1 j += 1 end end # provide locally unique elemtovert for DSS uvert = -ones(Int, nvertg) for el in 1:last(size(newelemtovert)), i in 1:nvert newelemtovert[i, el] = gldofv[newelemtovert[i, el]] uvert[newelemtovert[i, el]] = 0 end ctr = 1 for i in 1:length(uvert) if uvert[i] == 0 uvert[i] = ctr ctr += 1 end end for el in 1:last(size(newelemtovert)), i in 1:nvert newelemtovert[i, el] = uvert[newelemtovert[i, el]] end #--------------------------------------------------------------- ( elems = 1:(nelem + nghost), # range of element indices realelems = 1:nelem, # range of real element indices ghostelems = nelem .+ (1:nghost), # range of ghost element indices ghostfaces = ghostfaces, # bit array of recv faces for ghost elems sendelems = newsendelems, # array of send element indices sendfaces = sendfaces, # bit array of send faces for send elems elemtocoord = newelemtocoord, # element to vertex coordinates elemtovert = newelemtovert, # element to locally unique global vertex number (for direct stiffness summation) elemtoelem = elemtoelem, # element to neighboring element elemtoface = elemtoface, # element to neighboring element face elemtoordr = elemtoordr, # element to neighboring element order elemtobndy = newelemtobndy, # element to boundary number nabrtorank = nabrtorank, # list of neighboring processes MPI ranks nabrtorecv = nabrtorecv, # neighbor receive ranges into `ghostelems` nabrtosend = nabrtosend, # neighbor send ranges into `sendelems` ) end """ build_fmask(dim) Returns the face mask for mapping element vertices to face vertices. ex: for 2D element with vertices (1, 2, 3, 4) 3---4 | | 1---2 the function returns the face mask f1 | f2 | f3 | f4 ================= 1 | 2 | 1 | 3 3 | 4 | 2 | 4 ================= """ function build_fmask(dim) nvert = 2^dim nfaces = 2 * dim p = reshape(1:nvert, ntuple(j -> 2, dim)) fmask = Array{Int64}(undef, 2^(dim - 1), nfaces) f = 0 for d in 1:dim for slice in eachslice(p, dims = d) fmask[:, f += 1] = vec(slice) end end return fmask end end # module ================================================ FILE: src/Numerics/Mesh/DSS.jl ================================================ module DSS using ..Grids using ClimateMachine.MPIStateArrays using CUDA using KernelAbstractions using DocStringExtensions export dss! """ dss3d(Q::MPIStateArray, grid::DiscontinuousSpectralElementGrid) This function computes the 3D direct stiffness summation for all variables in the MPIStateArray. # Fields - `Q`: MPIStateArray - `grid`: Discontinuous Spectral Element Grid """ function dss!( Q::MPIStateArray, grid::DiscontinuousSpectralElementGrid{FT, 3}; max_threads = 256, ) where {FT} DA = arraytype(grid) # device array device = arraytype(grid) <: Array ? CPU() : CUDADevice() # device #----communication-------------------------- event = MPIStateArrays.begin_ghost_exchange!(Q) event = MPIStateArrays.end_ghost_exchange!(Q, dependencies = event) wait(event) #----Direct Stiffness Summation------------- vertmap = grid.vertmap edgemap = grid.edgemap facemap = grid.facemap vtconn = grid.topology.vtconn fcconn = grid.topology.fcconn edgconn = grid.topology.edgconn vtconnoff = grid.topology.vtconnoff edgconnoff = grid.topology.edgconnoff nvt, nfc, nedg = length(vtconnoff) - 1, size(fcconn, 1), length(edgconnoff) - 1 Nq = polynomialorders(grid) .+ 1 Nqmax = maximum(Nq) Nemax = Nqmax - 2 Nfmax = size(facemap, 1) args = ( Q.data, vertmap, edgemap, facemap, vtconn, vtconnoff, edgconn, edgconnoff, fcconn, nvt, nedg, nfc, Nemax, Nfmax, ) if device == CPU() dss3d_CPU!(args...) else n_items = nvt + nfc + nedg tx = max(n_items, max_threads) bx = cld(n_items, tx) @cuda threads = (tx) blocks = (bx) dss3d_CUDA!(args...) end return nothing end function dss3d_CUDA!( data, vertmap, edgemap, facemap, vtconn, vtconnoff, edgconn, edgconnoff, fcconn, nvt, nedg, nfc, Nemax, Nfmax, ) I = eltype(nvt) FT = eltype(data) tx = threadIdx().x # threaid id bx = blockIdx().x # block id bxdim = blockDim().x # block dimension glx = tx + (bx - 1) * bxdim # global id nvars = size(data, 2) # A mesh node is either # - interior (no dss required) # - on an element corner / vertex, # - edge (excluding end point element corners / vertices) # - face (excluding edges and corners) if glx ≤ nvt #vertex DSS vx = glx for ivar in 1:nvars dss_vertex!(vtconn, vtconnoff, vertmap, data, ivar, vx, FT) end elseif glx > nvt && glx ≤ (nvt + nedg) # edge DSS ex = glx - nvt for ivar in 1:nvars dss_edge!(edgconn, edgconnoff, edgemap, Nemax, data, ivar, ex, FT) end elseif glx > (nvt + nedg) && glx ≤ (nvt + nedg + nfc) # face DSS fx = glx - (nvt + nedg) for ivar in 1:nvars dss_face!(fcconn, facemap, Nfmax, data, ivar, fx) end end return nothing end function dss3d_CPU!( data, vertmap, edgemap, facemap, vtconn, vtconnoff, edgconn, edgconnoff, fcconn, nvt, nedg, nfc, Nemax, Nfmax, ) I = eltype(nvt) FT = eltype(data) nvars = size(data, 2) # A mesh node is either # - interior (no dss required) # - on an element corner / vertex, # - edge (excluding end point element corners / vertices) # - face (excluding edges and corners) for ivar in 1:nvars for vx in 1:nvt # vertex DSS dss_vertex!(vtconn, vtconnoff, vertmap, data, ivar, vx, FT) end for ex in 1:nedg # edge DSS dss_edge!(edgconn, edgconnoff, edgemap, Nemax, data, ivar, ex, FT) end for fx in 1:nfc # face DSS dss_face!(fcconn, facemap, Nfmax, data, ivar, fx) end end return nothing end """ dss_vertex!( vtconn, vtconnoff, vertmap, data, ivar, vx, ::Type{FT}, ) where {FT} This function computes the direct stiffness summation for the vertex `vx`. # Fields - `vtconn`: vertex connectivity array - `vtconnoff`: offsets for vertex connectivity array - `vertmap`: map to vertex degrees of freedom: `vertmap[vx]` contains the degree of freedom located at vertex `vx`. - `data`: data field of MPIStateArray - `ivar`: variable # in the MPIStateArray - `vx`: unique edge number - `::Type{FT}`: Floating point type """ function dss_vertex!( vtconn, vtconnoff, vertmap, data, ivar, vx, ::Type{FT}, ) where {FT} @inbounds st = vtconnoff[vx] @inbounds nlvt = Int((vtconnoff[vx + 1] - vtconnoff[vx]) / 2) sumv = -FT(0) @inbounds for j in 1:nlvt lvt = vtconn[st + (j - 1) * 2] lelem = vtconn[st + (j - 1) * 2 + 1] loc = vertmap[lvt] sumv += data[loc, ivar, lelem] end @inbounds for j in 1:nlvt lvt = vtconn[st + (j - 1) * 2] lelem = vtconn[st + (j - 1) * 2 + 1] loc = vertmap[lvt] data[loc, ivar, lelem] = sumv end end """ dss_edge!( edgconn, edgconnoff, edgemap, Nemax, data, ivar, ex, ::Type{FT}, ) where {FT} This function computes the direct stiffness summation for all degrees of freedom corresponding to edge `ex`. dss_edge! applies only to interior (non-vertex) edge nodes. # Fields - `edgconn`: edge connectivity array - `edgconnoff`: offsets for edge connectivity array - `edgemap`: map to edge degrees of freedom: `edgemap[i, edgno, orient]` contains the element node index of the `i`th interior node on edge `edgno`, under orientation `orient`. - `Nemax`: # of relevant degrees of freedom per edge (other dof are marked as -1) - `data`: data field of MPIStateArray - `ivar`: variable # in the MPIStateArray - `ex`: unique edge number - `::Type{FT}`: Floating point type """ function dss_edge!( edgconn, edgconnoff, edgemap, Nemax, data, ivar, ex, ::Type{FT}, ) where {FT} @inbounds st = edgconnoff[ex] @inbounds nledg = Int((edgconnoff[ex + 1] - edgconnoff[ex]) / 3) @inbounds for k in 1:Nemax sume = -FT(0) @inbounds for j in 1:nledg ledg = edgconn[st + (j - 1) * 3] lor = edgconn[st + (j - 1) * 3 + 1] lelem = edgconn[st + (j - 1) * 3 + 2] loc = edgemap[k, ledg, lor] if loc ≠ -1 sume += data[loc, ivar, lelem] end end @inbounds for j in 1:nledg ledg = edgconn[st + (j - 1) * 3] lor = edgconn[st + (j - 1) * 3 + 1] lelem = edgconn[st + (j - 1) * 3 + 2] loc = edgemap[k, ledg, lor] if loc ≠ -1 data[loc, ivar, lelem] = sume end end end end """ dss_face!(fcconn, facemap, Nfmax, data, ivar, fx) This function computes the direct stiffness summation for all degrees of freedom corresponding to face `fx`. dss_face! applies only to interior (non-vertex and non-edge) face nodes. # Fields - `fcconn`: face connectivity array - `facemap`: map to face degrees of freedom: `facemap[ij, fcno, orient]` contains the element node index of the `ij`th interior node on face `fcno` under orientation `orient` - `Nfmax`: # of relevant degrees of freedom per face (other dof are marked as -1) - `data`: data field of MPIStateArray - `ivar`: variable # in the MPIStateArray - `fx`: unique face number """ function dss_face!(fcconn, facemap, Nfmax, data, ivar, fx) @inbounds lfc = fcconn[fx, 1] @inbounds lel = fcconn[fx, 2] @inbounds nabrlfc = fcconn[fx, 3] @inbounds nabrlel = fcconn[fx, 4] @inbounds ordr = fcconn[fx, 5] ordr = ordr == 3 ? 2 : 1 # mesh orientation 3 is a flip along the horizontal edges. # This is the only orientation currently support by the mesh generator # see vertsortandorder in # src/Numerics/Mesh/BrickMesh.jl @inbounds for j in 1:Nfmax loc1 = facemap[j, lfc, ordr] loc2 = facemap[j, nabrlfc, 1] if loc1 ≠ -1 && loc2 ≠ -1 sumf = data[loc1, ivar, lel] + data[loc2, ivar, nabrlel] data[loc1, ivar, lel] = sumf data[loc2, ivar, nabrlel] = sumf end end end end ================================================ FILE: src/Numerics/Mesh/Elements.jl ================================================ module Elements import GaussQuadrature """ lglpoints(::Type{T}, N::Integer) where T <: AbstractFloat returns the points `r` and weights `w` associated with the `N+1`-point Gauss-Legendre-Lobatto quadrature rule of type `T` """ function lglpoints(::Type{T}, N::Integer) where {T <: AbstractFloat} @assert N ≥ 1 GaussQuadrature.legendre(T, N + 1, GaussQuadrature.both) end """ glpoints(::Type{T}, N::Integer) where T <: AbstractFloat returns the points `r` and weights `w` associated with the `N+1`-point Gauss-Legendre quadrature rule of type `T` """ function glpoints(::Type{T}, N::Integer) where {T <: AbstractFloat} GaussQuadrature.legendre(T, N + 1, GaussQuadrature.neither) end """ baryweights(r) returns the barycentric weights associated with the array of points `r` Reference: [Berrut2004](@cite) """ function baryweights(r::AbstractVector{T}) where {T} Np = length(r) wb = ones(T, Np) for j in 1:Np for i in 1:Np if i != j wb[j] = wb[j] * (r[j] - r[i]) end end wb[j] = T(1) / wb[j] end wb end """ spectralderivative(r::AbstractVector{T}, wb=baryweights(r)::AbstractVector{T}) where T returns the spectral differentiation matrix for a polynomial defined on the points `r` with associated barycentric weights `wb` Reference: - [Berrut2004](@cite) """ function spectralderivative( r::AbstractVector{T}, wb = baryweights(r)::AbstractVector{T}, ) where {T} Np = length(r) @assert Np == length(wb) D = zeros(T, Np, Np) for k in 1:Np for j in 1:Np if k == j for l in 1:Np if l != k D[j, k] = D[j, k] + T(1) / (r[k] - r[l]) end end else D[j, k] = (wb[k] / wb[j]) / (r[j] - r[k]) end end end D end """ interpolationmatrix(rsrc::AbstractVector{T}, rdst::AbstractVector{T}, wbsrc=baryweights(rsrc)::AbstractVector{T}) where T returns the polynomial interpolation matrix for interpolating between the points `rsrc` (with associated barycentric weights `wbsrc`) and `rdst` Reference: - [Berrut2004](@cite) """ function interpolationmatrix( rsrc::AbstractVector{T}, rdst::AbstractVector{T}, wbsrc = baryweights(rsrc)::AbstractVector{T}, ) where {T} Npdst = length(rdst) Npsrc = length(rsrc) @assert Npsrc == length(wbsrc) I = zeros(T, Npdst, Npsrc) for k in 1:Npdst for j in 1:Npsrc I[k, j] = wbsrc[j] / (rdst[k] - rsrc[j]) if !isfinite(I[k, j]) I[k, :] .= T(0) I[k, j] = T(1) break end end d = sum(I[k, :]) I[k, :] = I[k, :] / d end I end """ jacobip(α, β, N, x) Returns a `(nx, N+1)` array containing the `N+1` Jacobi polynomials of order `N`, with parameter `(α, β)`, evaluated on 1D grid `x`. """ function jacobip(α, β, N, x) a, b = GaussQuadrature.jacobi_coefs(N, α, β) V = GaussQuadrature.orthonormal_poly(x, a, b) return V end end # module ================================================ FILE: src/Numerics/Mesh/Filters.jl ================================================ module Filters using SpecialFunctions using LinearAlgebra, GaussQuadrature, KernelAbstractions using KernelAbstractions.Extras: @unroll using StaticArrays using ..Grids using ..Grids: Direction, EveryDirection, HorizontalDirection, VerticalDirection using ...MPIStateArrays using ...VariableTemplates: @vars, varsize, Vars, varsindices export AbstractSpectralFilter, AbstractFilter export ExponentialFilter, CutoffFilter, MassPreservingCutoffFilter, TMARFilter, BoydVandevenFilter abstract type AbstractFilter end abstract type AbstractSpectralFilter <: AbstractFilter end """ AbstractFilterTarget An abstract type representing variables that the filter will act on """ abstract type AbstractFilterTarget end """ vars_state_filtered(::AbstractFilterTarget, FT) A tuple of symbols containing variables that the filter will act on given a float type `FT` """ function vars_state_filtered end """ compute_filter_argument!(::AbstractFilterTarget, state_filter::Vars, state::Vars, state_auxiliary::Vars) Compute filter argument `state_filter` based on `state` and `state_auxiliary` """ function compute_filter_argument! end """ compute_filter_result!(::AbstractFilterTarget, state::Vars, state_filter::Vars, state_auxiliary::Vars) Compute filter result `state` based on the filtered state `state_filter` and `state_auxiliary` """ function compute_filter_result! end number_state_filtered(t::AbstractFilterTarget, FT) = varsize(vars_state_filtered(t, FT)) """ FilterIndices(I) Filter variables based on their indices `I` where `I` can be a range or a list of indices ## Examples ```julia FiltersIndices(1:3) FiltersIndices(1, 3, 5) ``` """ struct FilterIndices{I} <: AbstractFilterTarget FilterIndices(I::Integer...) = new{I}() FilterIndices(I::AbstractRange) = new{I}() end vars_state_filtered(::FilterIndices{I}, FT) where {I} = @vars(_::SVector{length(I), FT}) function compute_filter_argument!( ::FilterIndices{I}, filter_state::Vars, state::Vars, aux::Vars, ) where {I} @unroll for s in 1:length(I) @inbounds parent(filter_state)[s] = parent(state)[I[s]] end end function compute_filter_result!( ::FilterIndices{I}, state::Vars, filter_state::Vars, aux::Vars, ) where {I} @unroll for s in 1:length(I) @inbounds parent(state)[I[s]] = parent(filter_state)[s] end end """ spectral_filter_matrix(r, Nc, σ) Returns the filter matrix that takes function values at the interpolation `N+1` points, `r`, converts them into Legendre polynomial basis coefficients, multiplies ```math σ((n-N_c)/(N-N_c)) ``` against coefficients `n=Nc:N` and evaluates the resulting polynomial at the points `r`. """ function spectral_filter_matrix(r, Nc, σ) N = length(r) - 1 T = eltype(r) @assert N >= 0 @assert 0 <= Nc <= N a, b = GaussQuadrature.legendre_coefs(T, N) V = (N == 0 ? ones(T, 1, 1) : GaussQuadrature.orthonormal_poly(r, a, b)) Σ = ones(T, N + 1) Σ[(Nc:N) .+ 1] .= σ.(((Nc:N) .- Nc) ./ (N - Nc)) V * Diagonal(Σ) / V end """ modified_filter_matrix(r, Nc, σ) Returns the filter matrix that takes function values at the interpolation `N+1` points, `r`, converts them into Legendre polynomial basis coefficients, multiplies ```math σ((n-N_c)/(N-N_c)) ``` against coefficients `n=Nc:N` and evaluates the resulting polynomial at the points `r`. Unlike spectral_filter_matrix, this allows for the identity matrix, to be applied. """ function modified_filter_matrix(r, Nc, σ) N = length(r) - 1 T = eltype(r) @assert N >= 0 @assert 0 <= Nc Nc > N && return Array{T}(I, N + 1, N + 1) a, b = GaussQuadrature.legendre_coefs(T, N) V = (N == 0 ? ones(T, 1, 1) : GaussQuadrature.orthonormal_poly(r, a, b)) Σ = ones(T, N + 1) Σ[(Nc:N) .+ 1] .= σ.(((Nc:N) .- Nc) ./ (N - Nc)) V * Diagonal(Σ) / V end """ ExponentialFilter(grid, Nc=0, s=32, α=-log(eps(eltype(grid)))) Returns the spectral filter with the filter function ```math σ(η) = \exp(-α η^s) ``` where `s` is the filter order (must be even), the filter starts with polynomial order `Nc`, and `alpha` is a parameter controlling the smallest value of the filter function. """ struct ExponentialFilter{FM} <: AbstractSpectralFilter "filter matrices in all directions (tuple of filter matrices)" filter_matrices::FM function ExponentialFilter( grid, Nc = 0, s = 32, α = -log(eps(eltype(grid))), ) dim = dimensionality(grid) # Support different filtering thresholds in different # directions (default behavior is to apply the same threshold # uniformly in all directions) if Nc isa Integer Nc = ntuple(i -> Nc, dim) elseif Nc isa NTuple{2} && dim == 3 Nc = (Nc[1], Nc[1], Nc[2]) end @assert length(Nc) == dim # Tuple of polynomial degrees (N₁, N₂, N₃) N = polynomialorders(grid) # In 2D, we assume same polynomial order in the horizontal @assert dim == 2 || N[1] == N[2] @assert iseven(s) @assert all(0 .<= Nc .<= N) σ(η) = exp(-α * η^s) AT = arraytype(grid) ξ = referencepoints(grid) filter_matrices = ntuple(i -> AT(spectral_filter_matrix(ξ[i], Nc[i], σ)), dim) new{typeof(filter_matrices)}(filter_matrices) end end """ BoydVandevenFilter(grid, Nc=0, s=32) Returns the spectral filter using the logarithmic error function of the form: ```math σ(η) = 1/2 erfc(2*sqrt(s)*χ(η)*(abs(η)-0.5)) ``` whenever s ≤ i ≤ N, and 1 otherwise. The function `χ(η)` is defined as ```math χ(η) = sqrt(-log(1-4*(abs(η)-0.5)^2)/(4*(abs(η)-0.5)^2)) ``` if `x != 0.5` and `1` otherwise. Here, `s` is the filter order, the filter starts with polynomial order `Nc`, and `alpha` is a parameter controlling the smallest value of the filter function. ### References - [Boyd1996](@cite) """ struct BoydVandevenFilter{FM} <: AbstractSpectralFilter "filter matrices in all directions (tuple of filter matrices)" filter_matrices::FM function BoydVandevenFilter(grid, Nc = 0, s = 32) dim = dimensionality(grid) # Support different filtering thresholds in different # directions (default behavior is to apply the same threshold # uniformly in all directions) if Nc isa Integer Nc = ntuple(i -> Nc, dim) elseif Nc isa NTuple{2} && dim == 3 Nc = (Nc[1], Nc[1], Nc[2]) end @assert length(Nc) == dim # Tuple of polynomial degrees (N₁, N₂, N₃) N = polynomialorders(grid) # In 2D, we assume same polynomial order in the horizontal @assert dim == 2 || N[1] == N[2] @assert iseven(s) @assert all(0 .<= Nc .<= N) function σ(η) a = 2 * abs(η) - 1 χ = iszero(a) ? one(a) : sqrt(-log1p(-a^2) / a^2) return erfc(sqrt(s) * χ * a) / 2 end AT = arraytype(grid) ξ = referencepoints(grid) filter_matrices = ntuple(i -> AT(spectral_filter_matrix(ξ[i], Nc[i], σ)), dim) new{typeof(filter_matrices)}(filter_matrices) end end """ CutoffFilter(grid, Nc=polynomialorders(grid)) Returns the spectral filter that zeros out polynomial modes greater than or equal to `Nc`. """ struct CutoffFilter{FM} <: AbstractSpectralFilter "filter matrices in all directions (tuple of filter matrices)" filter_matrices::FM function CutoffFilter(grid, Nc = polynomialorders(grid)) dim = dimensionality(grid) # Support different filtering thresholds in different # directions (default behavior is to apply the same threshold # uniformly in all directions) if Nc isa Integer Nc = ntuple(i -> Nc, dim) elseif Nc isa NTuple{2} && dim == 3 Nc = (Nc[1], Nc[1], Nc[2]) end @assert length(Nc) == dim # Tuple of polynomial degrees (N₁, N₂, N₃) N = polynomialorders(grid) # In 2D, we assume same polynomial order in the horizontal @assert dim == 2 || N[1] == N[2] @assert all(0 .<= Nc .<= N) σ(η) = 0 AT = arraytype(grid) ξ = referencepoints(grid) filter_matrices = ntuple(i -> AT(spectral_filter_matrix(ξ[i], Nc[i], σ)), dim) new{typeof(filter_matrices)}(filter_matrices) end end """ MassPreservingCutoffFilter(grid, Nc=polynomialorders(grid)) Returns the spectral filter that zeros out polynomial modes greater than or equal to `Nc` while preserving the cell average value. Use this filter if the jacobian is nonconstant. """ struct MassPreservingCutoffFilter{FM} <: AbstractSpectralFilter "filter matrices in all directions (tuple of filter matrices)" filter_matrices::FM function MassPreservingCutoffFilter(grid, Nc = polynomialorders(grid)) dim = dimensionality(grid) # Support different filtering thresholds in different # directions (default behavior is to apply the same threshold # uniformly in all directions) if Nc isa Integer Nc = ntuple(i -> Nc, dim) elseif Nc isa NTuple{2} && dim == 3 Nc = (Nc[1], Nc[1], Nc[2]) end @assert length(Nc) == dim # Tuple of polynomial degrees (N₁, N₂, N₃) N = polynomialorders(grid) # In 2D, we assume same polynomial order in the horizontal @assert dim == 2 || N[1] == N[2] @assert all(0 .<= Nc) σ(η) = 0 AT = arraytype(grid) ξ = referencepoints(grid) filter_matrices = ntuple(i -> AT(modified_filter_matrix(ξ[i], Nc[i], σ)), dim) new{typeof(filter_matrices)}(filter_matrices) end end """ TMARFilter() Returns the truncation and mass aware rescaling nonnegativity preservation filter. The details of this filter are described in [Light2016](@cite) Note this needs to be used with a restrictive time step or a flux correction to ensure that grid integral is conserved. ## Examples This filter can be applied to the 3rd and 4th fields of an `MPIStateArray` `Q` with the code ```julia Filters.apply!(Q, (3, 4), grid, TMARFilter()) ``` where `grid` is the associated `DiscontinuousSpectralElementGrid`. """ struct TMARFilter <: AbstractFilter end """ Filters.apply!(Q::MPIStateArray, target, grid::DiscontinuousSpectralElementGrid, filter::AbstractSpectralFilter; kwargs...) Applies `filter` to `Q` given a `grid` and a custom `target`. A `target` can be any of the following: - a tuple or range of indices - a tuple of symbols or strings of variable names - a colon (`:`) to apply to all variables - a custom [`AbstractFilterTarget`] The following keyword arguments are supported for some filters: - `direction`: for `AbstractSpectralFilter` controls if the filter is applied in the horizontal and/or vertical directions. It is assumed that the trailing dimension on the reference element is the vertical dimension and the rest are horizontal. - `state_auxiliary`: if `target` requires auxiliary state to compute its argument or results. # Examples Specifying the `target` via indices: ```julia Filters.apply!(Q, :, grid, TMARFilter()) Filters.apply!(Q, (1, 3), grid, CutoffFilter(grid); direction=VerticalDirection()) ``` Speciying `target` via symbols or strings: ```julia Filters.apply!(Q, (:ρ, "energy.ρe"), grid, TMARFilter()) Filters.apply!(Q, ("moisture.ρq_tot",), grid, CutoffFilter(grid); direction=VerticalDirection()) ``` """ function apply!( Q, target, grid::DiscontinuousSpectralElementGrid, filter::AbstractFilter; kwargs..., ) device = typeof(Q.data) <: Array ? CPU() : CUDADevice() event = Event(device) event = apply_async!(Q, target, grid, filter; dependencies = event, kwargs...) wait(device, event) end """ Filters.apply_async!(Q, target, grid::DiscontinuousSpectralElementGrid, filter::AbstractFilter; dependencies, kwargs...) An asynchronous version of [`Filters.apply!`](@ref), returning an `Event` object. `dependencies` should be an `Event` or tuple of `Event`s which need to finish before applying the filter. ```julia compstream = Filters.apply_async!(Q, :, grid, CutoffFilter(grid); dependencies=compstream) wait(compstream) ``` """ function apply_async! end function apply_async!( Q, target::AbstractFilterTarget, grid::DiscontinuousSpectralElementGrid, filter::AbstractSpectralFilter; dependencies, state_auxiliary = nothing, direction = EveryDirection(), ) topology = grid.topology # Tuple of polynomial degrees (N₁, N₂, N₃) N = polynomialorders(grid) # In 2D, we assume same polynomial order in the horizontal dim = dimensionality(grid) # Currently only support same polynomial in both horizontal directions @assert N[1] == N[2] device = typeof(Q.data) <: Array ? CPU() : CUDADevice() nelem = length(topology.elems) # Number of Gauss-Lobatto quadrature points in each direction Nq = N .+ 1 Nq1 = Nq[1] Nq2 = Nq[2] Nq3 = dim == 2 ? 1 : Nq[dim] nrealelem = length(topology.realelems) event = dependencies if direction isa EveryDirection || direction isa HorizontalDirection @assert dim == 2 || Nq1 == Nq2 filtermatrix = filter.filter_matrices[1] event = kernel_apply_filter!(device, (Nq1, Nq2, Nq3))( Val(dim), Val(N), Val(vars(Q)), Val(isnothing(state_auxiliary) ? nothing : vars(state_auxiliary)), HorizontalDirection(), Q.data, isnothing(state_auxiliary) ? nothing : state_auxiliary.data, target, filtermatrix, ndrange = (nrealelem * Nq1, Nq2, Nq3), dependencies = event, ) end if direction isa EveryDirection || direction isa VerticalDirection filtermatrix = filter.filter_matrices[end] event = kernel_apply_filter!(device, (Nq1, Nq2, Nq3))( Val(dim), Val(N), Val(vars(Q)), Val(isnothing(state_auxiliary) ? nothing : vars(state_auxiliary)), VerticalDirection(), Q.data, isnothing(state_auxiliary) ? nothing : state_auxiliary.data, target, filtermatrix, ndrange = (nrealelem * Nq1, Nq2, Nq3), dependencies = event, ) end return event end function apply_async!( Q, target::AbstractFilterTarget, grid::DiscontinuousSpectralElementGrid, ::TMARFilter; dependencies, ) topology = grid.topology device = typeof(Q.data) <: Array ? CPU() : CUDADevice() dim = dimensionality(grid) N = polynomialorders(grid) # Currently only support same polynomial in both horizontal directions @assert dim == 2 || N[1] == N[2] Nqs = N .+ 1 Nq = Nqs[1] Nqj = dim == 2 ? 1 : Nqs[2] nrealelem = length(topology.realelems) nreduce = 2^ceil(Int, log2(Nq * Nqj)) event = dependencies event = kernel_apply_TMAR_filter!(device, (Nq, Nqj), (nrealelem * Nq, Nqj))( Val(nreduce), Val(dim), Val(N), Q.data, target, grid.vgeo, dependencies = event, ) return event end function apply_async!( Q, target::AbstractFilterTarget, grid::DiscontinuousSpectralElementGrid, filter::MassPreservingCutoffFilter; dependencies, state_auxiliary = nothing, direction = EveryDirection(), ) topology = grid.topology device = typeof(Q.data) <: Array ? CPU() : CUDADevice() dim = dimensionality(grid) N = polynomialorders(grid) # Currently only support same polynomial in both horizontal directions @assert dim == 2 || N[1] == N[2] Nq = N .+ 1 Nq1 = Nq[1] Nq2 = Nq[2] Nq3 = dim == 2 ? 1 : Nq[dim] nrealelem = length(topology.realelems) # parallel sum info nreduce = 2^ceil(Int, log2(Nq1 * Nq2 * Nq3)) event = dependencies if direction isa EveryDirection || direction isa HorizontalDirection @assert dim == 2 || Nq1 == Nq2 filtermatrix = filter.filter_matrices[1] event = kernel_apply_mp_filter!(device, (Nq1, Nq2, Nq3))( Val(nreduce), Val(dim), Val(N), Val(vars(Q)), Val(isnothing(state_auxiliary) ? nothing : vars(state_auxiliary)), HorizontalDirection(), Q.data, isnothing(state_auxiliary) ? nothing : state_auxiliary.data, target, filtermatrix, grid.vgeo, ndrange = (nrealelem * Nq1, Nq2, Nq3), dependencies = event, ) end if direction isa EveryDirection || direction isa VerticalDirection filtermatrix = filter.filter_matrices[end] event = kernel_apply_mp_filter!(device, (Nq1, Nq2, Nq3))( Val(nreduce), Val(dim), Val(N), Val(vars(Q)), Val(isnothing(state_auxiliary) ? nothing : vars(state_auxiliary)), VerticalDirection(), Q.data, isnothing(state_auxiliary) ? nothing : state_auxiliary.data, target, filtermatrix, grid.vgeo, ndrange = (nrealelem * Nq1, Nq2, Nq3), dependencies = event, ) end return event end function apply_async!( Q, indices::Union{Colon, AbstractRange, Tuple{Vararg{Integer}}}, grid::DiscontinuousSpectralElementGrid, filter::AbstractFilter; kwargs..., ) if indices isa Colon indices = 1:size(Q, 2) end apply_async!(Q, FilterIndices(indices...), grid, filter; kwargs...) end function apply_async!( Q, vs::Tuple, grid::DiscontinuousSpectralElementGrid, filter::AbstractFilter; kwargs..., ) apply_async!( Q, FilterIndices(varsindices(vars(Q), vs)...), grid, filter; kwargs..., ) end const _M = Grids._M @doc """ kernel_apply_filter!(::Val{dim}, ::Val{N}, direction, Q, state_auxiliary, target, filtermatrix ) where {dim, N} Computational kernel: Applies the `filtermatrix` to `Q` given a custom target `target` while preserving the cell average. The `direction` argument is used to control if the filter is applied in the horizontal and/or vertical reference directions. """ kernel_apply_filter! @kernel function kernel_apply_filter!( ::Val{dim}, ::Val{N}, ::Val{vars_Q}, ::Val{vars_state_auxiliary}, direction, Q, state_auxiliary, target::AbstractFilterTarget, filtermatrix, ) where {dim, N, vars_Q, vars_state_auxiliary} @uniform begin FT = eltype(Q) Nqs = N .+ 1 Nq1 = Nqs[1] Nq2 = Nqs[2] Nq3 = dim == 2 ? 1 : Nqs[dim] if direction isa EveryDirection filterinξ1 = filterinξ2 = true filterinξ3 = dim == 2 ? false : true elseif direction isa HorizontalDirection filterinξ1 = true filterinξ2 = dim == 2 ? false : true filterinξ3 = false elseif direction isa VerticalDirection filterinξ1 = false filterinξ2 = dim == 2 ? true : false filterinξ3 = dim == 2 ? false : true end nstates = varsize(vars_Q) nfilterstates = number_state_filtered(target, FT) nfilteraux = isnothing(state_auxiliary) ? 0 : varsize(vars_state_auxiliary) # ugly workaround around problems with @private # hopefully will be soon fixed in KA l_Q2 = MVector{nstates, FT}(undef) l_Qfiltered2 = MVector{nfilterstates, FT}(undef) end s_Q = @localmem FT (Nq1, Nq2, Nq3, nfilterstates) l_Q = @private FT (nstates,) l_Qfiltered = @private FT (nfilterstates,) l_aux = @private FT (nfilteraux,) e = @index(Group, Linear) i, j, k = @index(Local, NTuple) @inbounds begin ijk = i + Nq1 * ((j - 1) + Nq2 * (k - 1)) @unroll for s in 1:nstates l_Q[s] = Q[ijk, s, e] end @unroll for s in 1:nfilteraux l_aux[s] = state_auxiliary[ijk, s, e] end fill!(l_Qfiltered2, -zero(FT)) compute_filter_argument!( target, Vars{vars_state_filtered(target, FT)}(l_Qfiltered2), Vars{vars_Q}(l_Q[:]), Vars{vars_state_auxiliary}(l_aux[:]), ) @unroll for fs in 1:nfilterstates l_Qfiltered[fs] = zero(FT) end @unroll for fs in 1:nfilterstates s_Q[i, j, k, fs] = l_Qfiltered2[fs] end if filterinξ1 @synchronize @unroll for n in 1:Nq1 @unroll for fs in 1:nfilterstates l_Qfiltered[fs] += filtermatrix[i, n] * s_Q[n, j, k, fs] end end if filterinξ2 || filterinξ3 @synchronize @unroll for fs in 1:nfilterstates s_Q[i, j, k, fs] = l_Qfiltered[fs] l_Qfiltered[fs] = zero(FT) end end end if filterinξ2 @synchronize @unroll for n in 1:Nq2 @unroll for fs in 1:nfilterstates l_Qfiltered[fs] += filtermatrix[j, n] * s_Q[i, n, k, fs] end end if filterinξ3 @synchronize @unroll for fs in 1:nfilterstates s_Q[i, j, k, fs] = l_Qfiltered[fs] l_Qfiltered[fs] = zero(FT) end end end if filterinξ3 @synchronize @unroll for n in 1:Nq3 @unroll for fs in 1:nfilterstates l_Qfiltered[fs] += filtermatrix[k, n] * s_Q[i, j, n, fs] end end end @unroll for s in 1:nstates l_Q2[s] = l_Q[s] end compute_filter_result!( target, Vars{vars_Q}(l_Q2), Vars{vars_state_filtered(target, FT)}(l_Qfiltered[:]), Vars{vars_state_auxiliary}(l_aux[:]), ) # Store result ijk = i + Nq1 * ((j - 1) + Nq2 * (k - 1)) @unroll for s in 1:nstates Q[ijk, s, e] = l_Q2[s] end @synchronize end end @kernel function kernel_apply_TMAR_filter!( ::Val{nreduce}, ::Val{dim}, ::Val{N}, Q, target::FilterIndices{I}, vgeo, ) where {nreduce, dim, N, I} @uniform begin FT = eltype(Q) Nqs = N .+ 1 Nq1 = Nqs[1] Nq2 = dim == 2 ? 1 : Nqs[2] Nq3 = Nqs[end] nfilterstates = number_state_filtered(target, FT) nelemperblock = 1 end l_Q = @private FT (nfilterstates, Nq1) l_MJ = @private FT (Nq1,) s_MJQ = @localmem FT (Nq1 * Nq2, nfilterstates) s_MJQclipped = @localmem FT (Nq1 * Nq2, nfilterstates) e = @index(Group, Linear) i, j = @index(Local, NTuple) @inbounds begin # loop up the pencil and load Q and MJ @unroll for k in 1:Nq3 ijk = i + Nq1 * ((j - 1) + Nq2 * (k - 1)) @unroll for sf in 1:nfilterstates s = I[sf] l_Q[sf, k] = Q[ijk, s, e] end l_MJ[k] = vgeo[ijk, _M, e] end @unroll for sf in 1:nfilterstates MJQ, MJQclipped = zero(FT), zero(FT) @unroll for k in 1:Nq3 MJ = l_MJ[k] Qs = l_Q[sf, k] Qsclipped = Qs ≥ 0 ? Qs : zero(Qs) MJQ += MJ * Qs MJQclipped += MJ * Qsclipped end ij = i + Nq1 * (j - 1) s_MJQ[ij, sf] = MJQ s_MJQclipped[ij, sf] = MJQclipped end @synchronize @unroll for n in 11:-1:1 if nreduce ≥ 2^n ij = i + Nq1 * (j - 1) ijshift = ij + 2^(n - 1) if ij ≤ 2^(n - 1) && ijshift ≤ Nq1 * Nq2 @unroll for sf in 1:nfilterstates s_MJQ[ij, sf] += s_MJQ[ijshift, sf] s_MJQclipped[ij, sf] += s_MJQclipped[ijshift, sf] end end @synchronize end end @unroll for sf in 1:nfilterstates qs_average = s_MJQ[1, sf] qs_clipped_average = s_MJQclipped[1, sf] r = qs_average > 0 ? qs_average / qs_clipped_average : zero(FT) s = I[sf] @unroll for k in 1:Nq3 ijk = i + Nq1 * ((j - 1) + Nq2 * (k - 1)) Qs = l_Q[sf, k] Q[ijk, s, e] = Qs ≥ 0 ? r * Qs : zero(Qs) end end end end """ kernel_apply_mp_filter!(::Val{dim}, ::Val{N}, direction, Q, state_auxiliary, target, filtermatrix ) where {dim, N} Computational kernel: Applies the `filtermatrix` to `Q` given a custom target `target`. The `direction` argument is used to control if the filter is applied in the """ @kernel function kernel_apply_mp_filter!( ::Val{nreduce}, ::Val{dim}, ::Val{N}, ::Val{vars_Q}, ::Val{vars_state_auxiliary}, direction, Q, state_auxiliary, target::AbstractFilterTarget, filtermatrix, vgeo, ) where {nreduce, dim, N, vars_Q, vars_state_auxiliary} @uniform begin FT = eltype(Q) Nqs = N .+ 1 Nq1 = Nqs[1] Nq2 = Nqs[2] Nq3 = dim == 2 ? 1 : Nqs[dim] if direction isa EveryDirection filterinξ1 = filterinξ2 = true filterinξ3 = dim == 2 ? false : true elseif direction isa HorizontalDirection filterinξ1 = true filterinξ2 = dim == 2 ? false : true filterinξ3 = false elseif direction isa VerticalDirection filterinξ1 = false filterinξ2 = dim == 2 ? true : false filterinξ3 = dim == 2 ? false : true end nstates = varsize(vars_Q) nfilterstates = number_state_filtered(target, FT) nfilteraux = isnothing(state_auxiliary) ? 0 : varsize(vars_state_auxiliary) # ugly workaround around problems with @private # hopefully will be soon fixed in KA u_Q = MVector{nstates, FT}(undef) u_Qfiltered = MVector{nfilterstates, FT}(undef) end l_Q = @localmem FT (Nq1, Nq2, Nq3, nfilterstates) # element local l_MQᴮ = @localmem FT (Nq1 * Nq2 * Nq3, nstates) # before applying filter l_MQᴬ = @localmem FT (Nq1 * Nq2 * Nq3, nstates) # after applying filter l_M = @localmem FT (Nq1 * Nq2 * Nq3) # local mass matrix p_Q = @private FT (nstates,) p_Qfiltered = @private FT (nfilterstates,) # scratch space for storing mat mul p_aux = @private FT (nfilteraux,) e = @index(Group, Linear) i, j, k = @index(Local, NTuple) ijk = @index(Local, Linear) @inbounds begin @unroll for s in 1:nstates p_Q[s] = Q[ijk, s, e] end @unroll for s in 1:nfilteraux p_aux[s] = state_auxiliary[ijk, s, e] end # Load mass matrix and pre-filtered mass weighted quantities to shared memory l_M[ijk] = vgeo[ijk, _M, e] @unroll for s in 1:nstates l_MQᴮ[ijk, s] = l_M[ijk] * p_Q[s] end fill!(u_Qfiltered, -zero(FT)) compute_filter_argument!( target, Vars{vars_state_filtered(target, FT)}(u_Qfiltered), Vars{vars_Q}(p_Q[:]), Vars{vars_state_auxiliary}(p_aux[:]), ) @unroll for fs in 1:nfilterstates p_Qfiltered[fs] = zero(FT) end @unroll for fs in 1:nfilterstates l_Q[i, j, k, fs] = u_Qfiltered[fs] end if filterinξ1 @synchronize @unroll for n in 1:Nq1 @unroll for fs in 1:nfilterstates p_Qfiltered[fs] += filtermatrix[i, n] * l_Q[n, j, k, fs] end end if filterinξ2 || filterinξ3 @synchronize @unroll for fs in 1:nfilterstates l_Q[i, j, k, fs] = p_Qfiltered[fs] p_Qfiltered[fs] = zero(FT) end end end if filterinξ2 @synchronize @unroll for n in 1:Nq2 @unroll for fs in 1:nfilterstates p_Qfiltered[fs] += filtermatrix[j, n] * l_Q[i, n, k, fs] end end if filterinξ3 @synchronize @unroll for fs in 1:nfilterstates l_Q[i, j, k, fs] = p_Qfiltered[fs] p_Qfiltered[fs] = zero(FT) end end end if filterinξ3 @synchronize @unroll for n in 1:Nq3 @unroll for fs in 1:nfilterstates p_Qfiltered[fs] += filtermatrix[k, n] * l_Q[i, j, n, fs] end end end # work around for not being able to `Vars` `@private` arrays @unroll for s in 1:nstates u_Q[s] = p_Q[s] end compute_filter_result!( target, Vars{vars_Q}(u_Q), Vars{vars_state_filtered(target, FT)}(p_Qfiltered[:]), Vars{vars_state_auxiliary}(p_aux[:]), ) # Store result and post-filtered mass weighted quantities @unroll for s in 1:nstates p_Q[s] = u_Q[s] l_MQᴬ[ijk, s] = l_M[ijk] * p_Q[s] end @synchronize @unroll for n in 11:-1:1 if nreduce ≥ (1 << n) ijkshift = ijk + (1 << (n - 1)) if ijk ≤ (1 << (n - 1)) && ijkshift ≤ Nq1 * Nq2 * Nq3 l_M[ijk] += l_M[ijkshift] @unroll for s in 1:nstates l_MQᴮ[ijk, s] += l_MQᴮ[ijkshift, s] l_MQᴬ[ijk, s] += l_MQᴬ[ijkshift, s] end end @synchronize end end @synchronize M⁻¹ = 1 / l_M[1] # Reset the element average and store result @unroll for s in 1:nstates Q[ijk, s, e] = p_Q[s] + M⁻¹ * (l_MQᴮ[1, s] - l_MQᴬ[1, s]) end @synchronize end end end # end of module ================================================ FILE: src/Numerics/Mesh/GeometricFactors.jl ================================================ module GeometricFactors export VolumeGeometry, SurfaceGeometry """ VolumeGeometry{Nq, AA <: AbstractArray, A <: AbstractArray} A struct that collects `VolumeGeometry` fields: - array: Array contatining the data stored in a VolumeGeometry struct (the following fields are views into this array) - ∂ξk/∂xi: Derivative of the Cartesian reference element coordinate `ξ_k` with respect to the Cartesian physical coordinate `x_i` - ωJ: Mass matrix. This is the physical mass matrix, and thus contains the Jacobian determinant, J .* (ωᵢ ⊗ ωⱼ ⊗ ωₖ), where ωᵢ are the quadrature weights and J is the Jacobian determinant, det(∂x/∂ξ) - ωJI: Inverse mass matrix: 1 ./ ωJ - ωJH: Horizontal mass matrix (used in diagnostics), J .* norm(∂ξ3/∂x) * (ωᵢ ⊗ ωⱼ); for integrating over a plane (in 2-D ξ2 is used, not ξ3) - xi: Nodal degrees of freedom locations in Cartesian physical space - JcV: Metric terms for vertical line integrals norm(∂x/∂ξ3) (in 2-D ξ2 is used, not ξ3) - ∂xk/∂ξi: Inverse of matrix `∂ξk/∂xi` that represents the derivative of Cartesian physical coordinate `x_i` with respect to Cartesian reference element coordinate `ξ_k` """ struct VolumeGeometry{Nq, AA <: AbstractArray, A <: AbstractArray} array::AA ξ1x1::A ξ2x1::A ξ3x1::A ξ1x2::A ξ2x2::A ξ3x2::A ξ1x3::A ξ2x3::A ξ3x3::A ωJ::A ωJI::A ωJH::A x1::A x2::A x3::A JcV::A x1ξ1::A x2ξ1::A x3ξ1::A x1ξ2::A x2ξ2::A x3ξ2::A x1ξ3::A x2ξ3::A x3ξ3::A function VolumeGeometry{Nq}( array::AA, args::A..., ) where {Nq, AA, A <: AbstractArray} new{Nq, AA, A}(array, args...) end end """ VolumeGeometry(FT, Nq::NTuple{N, Int}, nelems::Int) Construct an empty `VolumeGeometry` object, in `FT` precision. - `Nq` is a tuple containing the number of quadrature points in each direction. - `nelem` is the number of elements. """ function VolumeGeometry(FT, Nq::NTuple{N, Int}, nelem::Int) where {N} # - 1 after fieldcount is to remove the `array` field from the array allocation array = zeros(FT, prod(Nq), fieldcount(VolumeGeometry) - 1, nelem) VolumeGeometry{Nq}( array, ntuple(j -> @view(array[:, j, :]), fieldcount(VolumeGeometry) - 1)..., ) end """ SurfaceGeometry{Nq, AA, A <: AbstractArray} A struct that collects `VolumeGeometry` fields: - array: Array contatining the data stored in a SurfaceGeometry struct (the following fields are views into this array) - ni: Outward pointing unit normal in physical space - sωJ: Surface mass matrix. This is the physical mass matrix, and thus contains the surface Jacobian determinant, sJ .* (ωⱼ ⊗ ωₖ), where ωᵢ are the quadrature weights and sJ is the surface Jacobian determinant - vωJI: Volume mass matrix at the surface nodes (needed in the lift operation, i.e., the projection of a face field back to the volume). Since DGSEM is used only collocated, volume mass matrices are required. """ struct SurfaceGeometry{Nq, AA, A <: AbstractArray} array::AA n1::A n2::A n3::A sωJ::A vωJI::A function SurfaceGeometry{Nq}( array::AA, args::A..., ) where {Nq, AA, A <: AbstractArray} new{Nq, AA, A}(array, args...) end end """ SurfaceGeometry(FT, Nq::NTuple{N, Int}, nface::Int, nelem::Int) Construct an empty `SurfaceGeometry` object, in `FT` precision. - `Nq` is a tuple containing the number of quadrature points in each direction. - `nface` is the number of faces. - `nelem` is the number of elements. """ function SurfaceGeometry( FT, Nq::NTuple{N, Int}, nface::Int, nelem::Int, ) where {N} Np = prod(Nq) Nfp = div.(Np, Nq) # - 1 after fieldcount is to remove the `array` field from the array allocation array = zeros(FT, fieldcount(SurfaceGeometry) - 1, maximum(Nfp), nface, nelem) SurfaceGeometry{Nfp}( array, ntuple( j -> @view(array[j, :, :, :]), fieldcount(SurfaceGeometry) - 1, )..., ) end end # module ================================================ FILE: src/Numerics/Mesh/Geometry.jl ================================================ module Geometry using StaticArrays, LinearAlgebra, DocStringExtensions using KernelAbstractions.Extras: @unroll using ..Grids: _ξ1x1, _ξ2x1, _ξ3x1, _ξ1x2, _ξ2x2, _ξ3x2, _ξ1x3, _ξ2x3, _ξ3x3, _M, _MI, _x1, _x2, _x3, _JcV export LocalGeometry, lengthscale, resolutionmetric, lengthscale_horizontal """ LocalGeometry The local geometry at a nodal point. # Constructors LocalGeometry{Np, N}(vgeo::AbstractArray{T}, n::Integer, e::Integer) Extracts a `LocalGeometry` object from the `vgeo` array at node `n` in element `e` with `Np` being the number of points in the element and `N` being the polynomial order # Fields - `polyorder` polynomial order of the element - `coord` local degree of freedom Cartesian coordinate - `invJ` Jacobian from Cartesian to element coordinates: `invJ[i,j]` is ``∂ξ_i / ∂x_j`` $(DocStringExtensions.FIELDS) """ struct LocalGeometry{Np, N, AT, IT} "Global volume geometry array" vgeo::AT "element local linear node index" n::IT "process local element index" e::IT LocalGeometry{Np, N}(vgeo::AT, n::IT, e::IT) where {Np, N, AT, IT} = new{Np, N, AT, IT}(vgeo, n, e) end @inline function Base.getproperty( geo::LocalGeometry{Np, N}, sym::Symbol, ) where {Np, N} @inbounds if sym === :polyorder return N elseif sym === :coord vgeo, n, e = getfield(geo, :vgeo), getfield(geo, :n), getfield(geo, :e) FT = eltype(vgeo) return @SVector FT[vgeo[n, _x1, e], vgeo[n, _x2, e], vgeo[n, _x3, e]] elseif sym === :invJ vgeo, n, e = getfield(geo, :vgeo), getfield(geo, :n), getfield(geo, :e) FT = eltype(vgeo) return @SMatrix FT[ vgeo[n, _ξ1x1, e] vgeo[n, _ξ1x2, e] vgeo[n, _ξ1x3, e] vgeo[n, _ξ2x1, e] vgeo[n, _ξ2x2, e] vgeo[n, _ξ2x3, e] vgeo[n, _ξ3x1, e] vgeo[n, _ξ3x2, e] vgeo[n, _ξ3x3, e] ] elseif sym === :center_coord vgeo, n, e = getfield(geo, :vgeo), getfield(geo, :n), getfield(geo, :e) FT = eltype(vgeo) coords = SVector(vgeo[n, _x1, e], vgeo[n, _x2, e], vgeo[n, _x3, e]) V = FT(0) xc = FT(0) yc = FT(0) zc = FT(0) @unroll for i in 1:Np M = vgeo[i, _M, e] V += M xc += M * vgeo[i, _x1, e] yc += M * vgeo[i, _x2, e] zc += M * vgeo[i, _x3, e] end return SVector(xc / V, yc / V, zc / V) else return getfield(geo, sym) end end """ resolutionmetric(g::LocalGeometry) The metric tensor of the discretisation resolution. Given a unit vector `u` in Cartesian coordinates and `M = resolutionmetric(g)`, `sqrt(u'*M*u)` is the degree-of-freedom density in the direction of `u`. """ function resolutionmetric(g::LocalGeometry) S = g.polyorder * g.invJ / 2 S' * S # TODO: return an eigendecomposition / symmetric object? end """ lengthscale(g::LocalGeometry) The effective geometric mean grid resolution at the point. """ lengthscale(g::LocalGeometry) = 2 / (cbrt(det(g.invJ)) * maximum(max.(1, g.polyorder))) """ lengthscale_horizontal(g::LocalGeometry) The effective horizontal grid resolution at the point. """ function lengthscale_horizontal(g::LocalGeometry) # inverse Jacobian matrix: # # invJ[i,j] = ∂ξ_i / ∂x_j invJ = g.invJ # The local horizontal grid stretchings are: # # horizontal direction 1: √( ∑_j ∂x_1 / ∂ξ_j) * 2 # horizontal direction 2: √( ∑_j ∂x_2 / ∂ξ_j) * 2 # # To get these we need to have the Jacobian matrix: # # J[i,j] = ∂x_i / ∂ξ_j # # To get this we can solve the system with the unit basis vectors # (division by `N` gives the local average node distance) Δ1 = norm(invJ \ SVector(1, 0, 0)) * 2 / g.polyorder[1] Δ2 = norm(invJ \ SVector(0, 1, 0)) * 2 / g.polyorder[2] # Set the horizontal length scale to the average of the two calculated values return (Δ1 + Δ2) / 2 end end # module ================================================ FILE: src/Numerics/Mesh/Grids.jl ================================================ module Grids using ..Topologies, ..GeometricFactors import ..Metrics, ..Elements import ..BrickMesh using ClimateMachine.MPIStateArrays using MPI using LinearAlgebra using KernelAbstractions using DocStringExtensions export DiscontinuousSpectralElementGrid, AbstractGrid export dofs_per_element, arraytype, dimensionality, polynomialorders export referencepoints, min_node_distance, get_z, computegeometry export EveryDirection, HorizontalDirection, VerticalDirection, Direction abstract type Direction end struct EveryDirection <: Direction end struct HorizontalDirection <: Direction end struct VerticalDirection <: Direction end Base.in(::T, ::S) where {T <: Direction, S <: Direction} = T == S """ MinNodalDistance{FT} A struct containing the minimum nodal distance along horizontal and vertical directions. """ struct MinNodalDistance{FT} "horizontal" h::FT "vertical" v::FT end abstract type AbstractGrid{ FloatType, dim, polynomialorder, numberofDOFs, DeviceArray, } end dofs_per_element(::AbstractGrid{T, D, N, Np}) where {T, D, N, Np} = Np polynomialorders(::AbstractGrid{T, dim, N}) where {T, dim, N} = N dimensionality(::AbstractGrid{T, dim}) where {T, dim} = dim Base.eltype(::AbstractGrid{T}) where {T} = T arraytype(::AbstractGrid{T, D, N, Np, DA}) where {T, D, N, Np, DA} = DA """ referencepoints(::AbstractGrid) Returns the points on the reference element. """ referencepoints(::AbstractGrid) = error("needs to be implemented") """ min_node_distance(::AbstractGrid, direction::Direction=EveryDirection() ) Returns an approximation of the minimum node distance in physical space. """ function min_node_distance( ::AbstractGrid, direction::Direction = EveryDirection(), ) error("needs to be implemented") end # {{{ # `vgeo` stores geometry and metrics terms at the volume quadrature / # interpolation points const _nvgeo = 16 const _ξ1x1, _ξ2x1, _ξ3x1, _ξ1x2, _ξ2x2, _ξ3x2, _ξ1x3, _ξ2x3, _ξ3x3, _M, _MI, _MH, _x1, _x2, _x3, _JcV = 1:_nvgeo const vgeoid = ( # ∂ξk/∂xi: derivative of the Cartesian reference element coordinate `ξ_k` # with respect to the Cartesian physical coordinate `x_i` ξ1x1id = _ξ1x1, ξ2x1id = _ξ2x1, ξ3x1id = _ξ3x1, ξ1x2id = _ξ1x2, ξ2x2id = _ξ2x2, ξ3x2id = _ξ3x2, ξ1x3id = _ξ1x3, ξ2x3id = _ξ2x3, ξ3x3id = _ξ3x3, # M refers to the mass matrix. This is the physical mass matrix, and thus # contains the Jacobian determinant: # J .* (ωᵢ ⊗ ωⱼ ⊗ ωₖ) # where ωᵢ are the quadrature weights and J is the Jacobian determinant # det(∂x/∂ξ) Mid = _M, # Inverse mass matrix: 1 ./ M MIid = _MI, # Horizontal mass matrix (used in diagnostics) # J .* norm(∂ξ3/∂x) * (ωᵢ ⊗ ωⱼ); for integrating over a plane # (in 2-D ξ2 not ξ3 is used) MHid = _MH, # Nodal degrees of freedom locations in Cartesian physical space x1id = _x1, x2id = _x2, x3id = _x3, # Metric terms for vertical line integrals # norm(∂x/∂ξ3) # (in 2-D ξ2 not ξ3 is used) JcVid = _JcV, ) # `sgeo` stores geometry and metrics terms at the surface quadrature / # interpolation points const _nsgeo = 5 const _n1, _n2, _n3, _sM, _vMI = 1:_nsgeo const sgeoid = ( # outward pointing unit normal in physical space n1id = _n1, n2id = _n2, n3id = _n3, # sM refers to the surface mass matrix. This is the physical mass matrix, # and thus contains the surface Jacobian determinant: # sJ .* (ωⱼ ⊗ ωₖ) # where ωᵢ are the quadrature weights and sJ is the surface Jacobian # determinant sMid = _sM, # Volume mass matrix at the surface nodes (needed in the lift operation, # i.e., the projection of a face field back to the volume). Since DGSEM is # used only collocated, volume mass matrices are required. vMIid = _vMI, ) # }}} """ DiscontinuousSpectralElementGrid(topology; FloatType, DeviceArray, polynomialorder, meshwarp = (x...)->identity(x)) Generate a discontinuous spectral element (tensor product, Legendre-Gauss-Lobatto) grid/mesh from a `topology`, where the order of the elements is given by `polynomialorder`. `DeviceArray` gives the array type used to store the data (`CuArray` or `Array`), and the coordinate points will be of `FloatType`. The polynomial order can be different in each direction (specified as a `NTuple`). If only a single integer is specified, then each dimension will use the same order. If the topology dimension is 3 and the `polynomialorder` has dimension 2, then the first value will be used for horizontal and the second for the vertical. The optional `meshwarp` function allows the coordinate points to be warped after the mesh is created; the mesh degrees of freedom are orginally assigned using a trilinear blend of the element corner locations. """ struct DiscontinuousSpectralElementGrid{ T, dim, N, Np, DA, DAT1, DAT2, DAT3, DAT4, DAI1, DAI2, DAI3, TOP, TVTK, MINΔ, } <: AbstractGrid{T, dim, N, Np, DA} "mesh topology" topology::TOP "volume metric terms" vgeo::DAT3 "surface metric terms" sgeo::DAT4 "element to boundary condition map" elemtobndy::DAI2 "volume DOF to element minus side map" vmap⁻::DAI3 "volume DOF to element plus side map" vmap⁺::DAI3 "list of DOFs that need to be received (in neighbors order)" vmaprecv::DAI1 "list of DOFs that need to be sent (in neighbors order)" vmapsend::DAI1 "An array of ranges in `vmaprecv` to receive from each neighbor" nabrtovmaprecv::Any "An array of ranges in `vmapsend` to send to each neighbor" nabrtovmapsend::Any "Array of real elements that do not have a ghost element as a neighbor" interiorelems::Any "Array of real elements that have at least one ghost element as a neighbor" exteriorelems::Any "Array indicating if a degree of freedom (real or ghost) is active" activedofs::Any "1-D LGL weights on the device (one for each dimension)" ω::DAT1 "1-D derivative operator on the device (one for each dimension)" D::DAT2 "1-D indefinite integral operator on the device (one for each dimension)" Imat::DAT2 """ tuple of (x1, x2, x3) to use for vtk output (Needed for the `N = 0` case) in other cases these match `vgeo` values """ x_vtk::TVTK """ Minimum nodal distances for horizontal and vertical directions """ minΔ::MINΔ """ Map to vertex degrees of freedom: `vertmap[v]` contains the degree of freedom located at vertex `v`. """ vertmap::Union{DAI1, Nothing} """ Map to edge degrees of freedom: `edgemap[i, edgno, orient]` contains the element node index of the `i`th interior node on edge `edgno`, under orientation `orient`. """ edgemap::Union{DAI3, Nothing} """ Map to face degrees of freedom: `facemap[ij, fcno, orient]` contains the element node index of the `ij`th interior node on face `fcno` under orientation `orient`. Note that only the two orientations that are generated for stacked meshes are currently supported, i.e., mesh orientation `3` as defined by `BrickMesh` gets mapped to orientation `2` for this data structure. """ facemap::Union{DAI3, Nothing} # Constructor for a tuple of polynomial orders function DiscontinuousSpectralElementGrid( topology::AbstractTopology{dim}; polynomialorder, FloatType, DeviceArray, meshwarp::Function = (x...) -> identity(x), ) where {dim} if polynomialorder isa Integer polynomialorder = ntuple(j -> polynomialorder, dim) elseif polynomialorder isa NTuple{2} && dim == 3 polynomialorder = (polynomialorder[1], polynomialorder[1], polynomialorder[2]) end @assert dim == length(polynomialorder) N = polynomialorder (vmap⁻, vmap⁺) = mappings( N, topology.elemtoelem, topology.elemtoface, topology.elemtoordr, ) (vmaprecv, nabrtovmaprecv) = commmapping( N, topology.ghostelems, topology.ghostfaces, topology.nabrtorecv, ) (vmapsend, nabrtovmapsend) = commmapping( N, topology.sendelems, topology.sendfaces, topology.nabrtosend, ) Np = prod(N .+ 1) vertmap, edgemap, facemap = init_vertex_edge_face_mappings(N) # Create element operators for each polynomial order ξω = ntuple( j -> N[j] == 0 ? Elements.glpoints(FloatType, N[j]) : Elements.lglpoints(FloatType, N[j]), dim, ) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) Imat = ntuple( j -> indefinite_integral_interpolation_matrix(ξ[j], ω[j]), dim, ) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) (vgeo, sgeo, x_vtk) = computegeometry(topology.elemtocoord, D, ξ, ω, meshwarp) vgeo = vgeo.array sgeo = sgeo.array @assert Np == size(vgeo, 1) activedofs = zeros(Bool, Np * length(topology.elems)) activedofs[1:(Np * length(topology.realelems))] .= true activedofs[vmaprecv] .= true # Create arrays on the device vgeo = DeviceArray(vgeo) sgeo = DeviceArray(sgeo) elemtobndy = DeviceArray(topology.elemtobndy) vmap⁻ = DeviceArray(vmap⁻) vmap⁺ = DeviceArray(vmap⁺) vmapsend = DeviceArray(vmapsend) vmaprecv = DeviceArray(vmaprecv) activedofs = DeviceArray(activedofs) ω = DeviceArray.(ω) D = DeviceArray.(D) Imat = DeviceArray.(Imat) # FIXME: There has got to be a better way! DAT1 = typeof(ω) DAT2 = typeof(D) DAT3 = typeof(vgeo) DAT4 = typeof(sgeo) DAI1 = typeof(vmapsend) DAI2 = typeof(elemtobndy) DAI3 = typeof(vmap⁻) TOP = typeof(topology) TVTK = typeof(x_vtk) if vertmap isa Array vertmap = DAI1(vertmap) end if edgemap isa Array edgemap = DAI3(edgemap) end if facemap isa Array facemap = DAI3(facemap) end FT = FloatType minΔ = MinNodalDistance( min_node_distance(vgeo, topology, N, FT, HorizontalDirection()), min_node_distance(vgeo, topology, N, FT, VerticalDirection()), ) MINΔ = typeof(minΔ) new{ FloatType, dim, N, Np, DeviceArray, DAT1, DAT2, DAT3, DAT4, DAI1, DAI2, DAI3, TOP, TVTK, MINΔ, }( topology, vgeo, sgeo, elemtobndy, vmap⁻, vmap⁺, vmaprecv, vmapsend, nabrtovmaprecv, nabrtovmapsend, DeviceArray(topology.interiorelems), DeviceArray(topology.exteriorelems), activedofs, ω, D, Imat, x_vtk, minΔ, vertmap, edgemap, facemap, ) end end """ referencepoints(::DiscontinuousSpectralElementGrid) Returns the 1D interpolation points used for the reference element. """ function referencepoints( ::DiscontinuousSpectralElementGrid{FT, dim, N}, ) where {FT, dim, N} ξω = ntuple( j -> N[j] == 0 ? Elements.glpoints(FT, N[j]) : Elements.lglpoints(FT, N[j]), dim, ) ξ, _ = ntuple(j -> map(x -> x[j], ξω), 2) return ξ end """ min_node_distance( ::DiscontinuousSpectralElementGrid, direction::Direction=EveryDirection()) ) Returns an approximation of the minimum node distance in physical space along the reference coordinate directions. The direction controls which reference directions are considered. """ min_node_distance( grid::DiscontinuousSpectralElementGrid, direction::Direction = EveryDirection(), ) = min_node_distance(grid.minΔ, direction) min_node_distance(minΔ::MinNodalDistance, ::VerticalDirection) = minΔ.v min_node_distance(minΔ::MinNodalDistance, ::HorizontalDirection) = minΔ.h min_node_distance(minΔ::MinNodalDistance, ::EveryDirection) = min(minΔ.h, minΔ.v) function min_node_distance( vgeo, topology::AbstractTopology{dim}, N, ::Type{T}, direction::Direction = EveryDirection(), ) where {T, dim} topology = topology nrealelem = length(topology.realelems) if nrealelem > 0 Nq = N .+ 1 Np = prod(Nq) device = vgeo isa Array ? CPU() : CUDADevice() min_neighbor_distance = similar(vgeo, Np, nrealelem) event = Event(device) event = kernel_min_neighbor_distance!(device, min(Np, 1024))( Val(N), Val(dim), direction, min_neighbor_distance, vgeo, topology.realelems; ndrange = (Np * nrealelem), dependencies = (event,), ) wait(device, event) locmin = minimum(min_neighbor_distance) else locmin = typemax(T) end MPI.Allreduce(locmin, min, topology.mpicomm) end """ get_z(grid; z_scale = 1, rm_dupes = false) Get the Gauss-Lobatto points along the Z-coordinate. - `grid`: DG grid - `z_scale`: multiplies `z-coordinate` - `rm_dupes`: removes duplicate Gauss-Lobatto points """ function get_z( grid::DiscontinuousSpectralElementGrid{T, dim, N}; z_scale = 1, rm_dupes = false, ) where {T, dim, N} # Assumes same polynomial orders in all horizontal directions @assert dim < 3 || N[1] == N[2] Nhoriz = N[1] Nvert = N[end] Nph = (Nhoriz + 1)^2 Np = Nph * (Nvert + 1) if last(polynomialorders(grid)) == 0 rm_dupes = false # no duplicates in FVM end if rm_dupes ijk_range = (1:Nph:(Np - Nph)) vgeo = Array(grid.vgeo) z = reshape(vgeo[ijk_range, _x3, :], :) z = [z..., vgeo[Np, _x3, end]] return z * z_scale else ijk_range = (1:Nph:Np) z = Array(reshape(grid.vgeo[ijk_range, _x3, :], :)) return z * z_scale end return reshape(grid.vgeo[(1:Nph:Np), _x3, :], :) * z_scale end function Base.getproperty(G::DiscontinuousSpectralElementGrid, s::Symbol) if s ∈ keys(vgeoid) vgeoid[s] elseif s ∈ keys(sgeoid) sgeoid[s] else getfield(G, s) end end function Base.propertynames(G::DiscontinuousSpectralElementGrid) ( fieldnames(DiscontinuousSpectralElementGrid)..., keys(vgeoid)..., keys(sgeoid)..., ) end # {{{ mappings """ mappings(N, elemtoelem, elemtoface, elemtoordr) This function takes in a tuple of polynomial orders `N` and parts of a topology (as returned from `connectmesh`) and returns index mappings for the element surface flux computation. The returned `Tuple` contains: - `vmap⁻` an array of linear indices into the volume degrees of freedom where `vmap⁻[:,f,e]` are the degrees of freedom indices for face `f` of element `e`. - `vmap⁺` an array of linear indices into the volume degrees of freedom where `vmap⁺[:,f,e]` are the degrees of freedom indices for the face neighboring face `f` of element `e`. """ function mappings(N, elemtoelem, elemtoface, elemtoordr) nfaces, nelem = size(elemtoelem) d = div(nfaces, 2) Nq = N .+ 1 # number of points in the element Np = prod(Nq) # Compute the maximum number of points on a face Nfp = div.(Np, Nq) # linear index for each direction, e.g., (i, j, k) -> n p = reshape(1:Np, ntuple(j -> Nq[j], d)) # fmask[f] -> returns an array of all degrees of freedom on face f fmask = if d == 1 ( p[1:1], # Face 1 p[Nq[1]:Nq[1]], # Face 2 ) elseif d == 2 ( p[1, :][:], # Face 1 p[Nq[1], :][:], # Face 2 p[:, 1][:], # Face 3 p[:, Nq[2]][:], # Face 4 ) elseif d == 3 ( p[1, :, :][:], # Face 1 p[Nq[1], :, :][:], # Face 2 p[:, 1, :][:], # Face 3 p[:, Nq[2], :][:], # Face 4 p[:, :, 1][:], # Face 5 p[:, :, Nq[3]][:], # Face 6 ) else error("unknown dimensionality") end # Create a map from Cartesian face dof number to linear face dof numbering # inds[face][i, j] -> n inds = ntuple( f -> dropdims( LinearIndices(ntuple(j -> j == cld(f, 2) ? 1 : Nq[j], d)); dims = cld(f, 2), ), nfaces, ) # Use the largest possible storage vmap⁻ = fill!(similar(elemtoelem, maximum(Nfp), nfaces, nelem), 0) vmap⁺ = fill!(similar(elemtoelem, maximum(Nfp), nfaces, nelem), 0) for e1 in 1:nelem, f1 in 1:nfaces e2 = elemtoelem[f1, e1] f2 = elemtoface[f1, e1] o2 = elemtoordr[f1, e1] d1, d2 = cld(f1, 2), cld(f2, 2) # Check to make sure the dof grid is conforming @assert Nfp[d1] == Nfp[d2] # Always pull out minus side without any flips / rotations vmap⁻[1:Nfp[d1], f1, e1] .= Np * (e1 - 1) .+ fmask[f1][1:Nfp[d1]][:] # Orientation codes defined in BrickMesh.jl (arbitrary numbers in 3D) if o2 == 1 # Neighbor oriented same as minus vmap⁺[1:Nfp[d1], f1, e1] .= Np * (e2 - 1) .+ fmask[f2][1:Nfp[d1]][:] elseif d == 3 && o2 == 3 # Neighbor fliped in first index vmap⁺[1:Nfp[d1], f1, e1] = Np * (e2 - 1) .+ fmask[f2][inds[f2][end:-1:1, :]][:] else error("Orientation '$o2' with dim '$d' not supported yet") end end (vmap⁻, vmap⁺) end # }}} function init_vertex_edge_face_mappings(N) dim = length(N) Np = N .+ 1 if dim == 3 && Np[end] > 2 nodes = reshape(1:prod(Np), Np) vertmap = Int64.([ nodes[1, 1, 1], nodes[Np[1], 1, 1], nodes[1, Np[2], 1], nodes[Np[1], Np[2], 1], nodes[1, 1, Np[3]], nodes[Np[1], 1, Np[3]], nodes[1, Np[2], Np[3]], nodes[Np[1], Np[2], Np[3]], ]) Ne = Np .- 2 Ne_max = maximum(Ne) if Ne_max ≥ 1 edgemap = -ones(Int64, Ne_max, 12, 2) if Np[1] > 2 edgemap[1:Ne[1], 1, 1] .= nodes[2:(end - 1), 1, 1] edgemap[1:Ne[1], 2, 1] .= nodes[2:(end - 1), Np[2], 1] edgemap[1:Ne[1], 3, 1] .= nodes[2:(end - 1), 1, Np[3]] edgemap[1:Ne[1], 4, 1] .= nodes[2:(end - 1), Np[2], Np[3]] edgemap[1:Ne[1], 1, 2] .= nodes[(end - 1):-1:2, 1, 1] edgemap[1:Ne[1], 2, 2] .= nodes[(end - 1):-1:2, Np[2], 1] edgemap[1:Ne[1], 3, 2] .= nodes[(end - 1):-1:2, 1, Np[3]] edgemap[1:Ne[1], 4, 2] .= nodes[(end - 1):-1:2, Np[2], Np[3]] end if Np[2] > 2 edgemap[1:Ne[2], 5, 1] .= nodes[1, 2:(end - 1), 1] edgemap[1:Ne[2], 6, 1] .= nodes[Np[1], 2:(end - 1), 1] edgemap[1:Ne[2], 7, 1] .= nodes[1, 2:(end - 1), Np[3]] edgemap[1:Ne[2], 8, 1] .= nodes[Np[1], 2:(end - 1), Np[3]] edgemap[1:Ne[2], 5, 2] .= nodes[1, (end - 1):-1:2, 1] edgemap[1:Ne[2], 6, 2] .= nodes[Np[1], (end - 1):-1:2, 1] edgemap[1:Ne[2], 7, 2] .= nodes[1, (end - 1):-1:2, Np[3]] edgemap[1:Ne[2], 8, 2] .= nodes[Np[1], (end - 1):-1:2, Np[3]] end if Np[3] > 2 edgemap[1:Ne[3], 9, 1] .= nodes[1, 1, 2:(end - 1)] edgemap[1:Ne[3], 10, 1] .= nodes[Np[1], 1, 2:(end - 1)] edgemap[1:Ne[3], 11, 1] .= nodes[1, Np[2], 2:(end - 1)] edgemap[1:Ne[3], 12, 1] .= nodes[Np[1], Np[2], 2:(end - 1)] edgemap[1:Ne[3], 9, 2] .= nodes[1, 1, (end - 1):-1:2] edgemap[1:Ne[3], 10, 2] .= nodes[Np[1], 1, (end - 1):-1:2] edgemap[1:Ne[3], 11, 2] .= nodes[1, Np[2], (end - 1):-1:2] edgemap[1:Ne[3], 12, 2] .= nodes[Np[1], Np[2], (end - 1):-1:2] end else edgemap = nothing end Nf = Np .- 2 Nf_max = maximum([Nf[1] * Nf[2], Nf[2] * Nf[3], Nf[1] * Nf[3]]) if Nf_max ≥ 1 facemap = -ones(Int64, Nf_max, 6, 2) if Nf[2] > 0 && Nf[3] > 0 nfc = Nf[2] * Nf[3] facemap[1:nfc, 1, 1] .= nodes[1, 2:(end - 1), 2:(end - 1)][:] facemap[1:nfc, 2, 1] .= nodes[Np[1], 2:(end - 1), 2:(end - 1)][:] facemap[1:nfc, 1, 2] .= nodes[1, (end - 1):-1:2, 2:(end - 1)][:] facemap[1:nfc, 2, 2] .= nodes[Np[1], (end - 1):-1:2, 2:(end - 1)][:] end if Nf[1] > 0 && Nf[3] > 0 nfc = Nf[1] * Nf[3] facemap[1:nfc, 3, 1] .= nodes[2:(end - 1), 1, 2:(end - 1)][:] facemap[1:nfc, 4, 1] .= nodes[2:(end - 1), Np[2], 2:(end - 1)][:] facemap[1:nfc, 3, 2] .= nodes[(end - 1):-1:2, 1, 2:(end - 1)][:] facemap[1:nfc, 4, 2] .= nodes[(end - 1):-1:2, Np[2], 2:(end - 1)][:] end if Nf[1] > 0 && Nf[2] > 0 nfc = Nf[1] * Nf[2] facemap[1:nfc, 5, 1] .= nodes[2:(end - 1), 2:(end - 1), 1][:] facemap[1:nfc, 6, 1] .= nodes[2:(end - 1), 2:(end - 1), Np[3]][:] facemap[1:nfc, 5, 2] .= nodes[(end - 1):-1:2, 2:(end - 1), 1][:] facemap[1:nfc, 6, 2] .= nodes[(end - 1):-1:2, 2:(end - 1), Np[3]][:] end else facemap = nothing end else vertmap, edgemap, facemap = nothing, nothing, nothing end return vertmap, edgemap, facemap end """ commmapping(N, commelems, commfaces, nabrtocomm) This function takes in a tuple of polynomial orders `N` and parts of a mesh (as returned from `connectmesh` such as `sendelems`, `sendfaces`, and `nabrtosend`) and returns index mappings for the element surface flux parallel communcation. The returned `Tuple` contains: - `vmapC` an array of linear indices into the volume degrees of freedom to be communicated. - `nabrtovmapC` a range in `vmapC` to communicate with each neighbor. """ function commmapping(N, commelems, commfaces, nabrtocomm) nface, nelem = size(commfaces) @assert nelem == length(commelems) d = div(nface, 2) Nq = N .+ 1 Np = prod(Nq) vmapC = similar(commelems, nelem * Np) nabrtovmapC = similar(nabrtocomm) i = 1 e = 1 for neighbor in 1:length(nabrtocomm) rbegin = i for ne in nabrtocomm[neighbor] ce = commelems[ne] # Whole element sending # for n = 1:Np # vmapC[i] = (ce-1)*Np + n # i += 1 # end CI = CartesianIndices(ntuple(j -> 1:Nq[j], d)) for (ci, li) in zip(CI, LinearIndices(CI)) addpoint = false for j in 1:d addpoint |= (commfaces[2 * (j - 1) + 1, e] && ci[j] == 1) || (commfaces[2 * (j - 1) + 2, e] && ci[j] == Nq[j]) end if addpoint vmapC[i] = (ce - 1) * Np + li i += 1 end end e += 1 end rend = i - 1 nabrtovmapC[neighbor] = rbegin:rend end resize!(vmapC, i - 1) (vmapC, nabrtovmapC) end # Compute geometry FVM version function computegeometry_fvm(elemtocoord, D, ξ, ω, meshwarp) FT = eltype(D[1]) dim = length(D) nface = 2dim nelem = size(elemtocoord, 3) Nq = ntuple(j -> size(D[j], 1), dim) Np = prod(Nq) Nfp = div.(Np, Nq) Nq_N1 = max.(Nq, 2) Np_N1 = prod(Nq_N1) Nfp_N1 = div.(Np_N1, Nq_N1) # First we compute the geometry with all the N = 0 dimension set to N = 1 # so that we can later compute the geometry for the case N = 0, as the # average of two N = 1 cases ξ1, ω1 = Elements.lglpoints(FT, 1) D1 = Elements.spectralderivative(ξ1) D_N1 = ntuple(j -> Nq[j] == 1 ? D1 : D[j], dim) ξ_N1 = ntuple(j -> Nq[j] == 1 ? ξ1 : ξ[j], dim) ω_N1 = ntuple(j -> Nq[j] == 1 ? ω1 : ω[j], dim) (vgeo_N1, sgeo_N1, x_vtk) = computegeometry(elemtocoord, D_N1, ξ_N1, ω_N1, meshwarp) # Allocate the storage for N = 0 volume metrics vgeo = VolumeGeometry(FT, Nq, nelem) # Counter to make sure we got all the vgeo terms num_vgeo_handled = 0 Metrics.creategrid!(vgeo, elemtocoord, ξ) x1 = vgeo.x1 x2 = vgeo.x2 x3 = vgeo.x3 @inbounds for j in 1:length(vgeo.x1) (x1[j], x2[j], x3[j]) = meshwarp(vgeo.x1[j], vgeo.x2[j], vgeo.x3[j]) end # Update data in vgeo vgeo.x1 .= x1 vgeo.x2 .= x2 vgeo.x3 .= x3 num_vgeo_handled += 3 @views begin # ωJ should be a sum ωJ_N1 = reshape(vgeo_N1.ωJ, (Nq_N1..., nelem)) vgeo.ωJ[:] .= sum(ωJ_N1, dims = findall(Nq .== 1))[:] num_vgeo_handled += 1 # need to recompute ωJI vgeo.ωJI .= 1 ./ vgeo.ωJ num_vgeo_handled += 1 # coordinates should just be averages avg_den = 2^sum(Nq .== 1) JcV_N1 = reshape(vgeo_N1.JcV, (Nq_N1..., nelem)) vgeo.JcV[:] .= sum(JcV_N1, dims = findall(Nq .== 1))[:] ./ avg_den num_vgeo_handled += 1 # For the metrics it is J * ξixk we approximate so multiply and divide the # mass matrix (which has the Jacobian determinant and the proper averaging # due to the quadrature weights) ωJ_N1 = reshape(vgeo_N1.ωJ, (Nq_N1..., nelem)) ωJI = vgeo.ωJI ξ1x1_N1 = reshape(vgeo_N1.ξ1x1, (Nq_N1..., nelem)) ξ2x1_N1 = reshape(vgeo_N1.ξ2x1, (Nq_N1..., nelem)) ξ3x1_N1 = reshape(vgeo_N1.ξ3x1, (Nq_N1..., nelem)) ξ1x2_N1 = reshape(vgeo_N1.ξ1x2, (Nq_N1..., nelem)) ξ2x2_N1 = reshape(vgeo_N1.ξ2x2, (Nq_N1..., nelem)) ξ3x2_N1 = reshape(vgeo_N1.ξ3x2, (Nq_N1..., nelem)) ξ1x3_N1 = reshape(vgeo_N1.ξ1x3, (Nq_N1..., nelem)) ξ2x3_N1 = reshape(vgeo_N1.ξ2x3, (Nq_N1..., nelem)) ξ3x3_N1 = reshape(vgeo_N1.ξ3x3, (Nq_N1..., nelem)) vgeo.ξ1x1[:] .= sum(ωJ_N1 .* ξ1x1_N1, dims = findall(Nq .== 1))[:] .* ωJI[:] vgeo.ξ2x1[:] .= sum(ωJ_N1 .* ξ2x1_N1, dims = findall(Nq .== 1))[:] .* ωJI[:] vgeo.ξ3x1[:] .= sum(ωJ_N1 .* ξ3x1_N1, dims = findall(Nq .== 1))[:] .* ωJI[:] vgeo.ξ1x2[:] .= sum(ωJ_N1 .* ξ1x2_N1, dims = findall(Nq .== 1))[:] .* ωJI[:] vgeo.ξ2x2[:] .= sum(ωJ_N1 .* ξ2x2_N1, dims = findall(Nq .== 1))[:] .* ωJI[:] vgeo.ξ3x2[:] .= sum(ωJ_N1 .* ξ3x2_N1, dims = findall(Nq .== 1))[:] .* ωJI[:] vgeo.ξ1x3[:] .= sum(ωJ_N1 .* ξ1x3_N1, dims = findall(Nq .== 1))[:] .* ωJI[:] vgeo.ξ2x3[:] .= sum(ωJ_N1 .* ξ2x3_N1, dims = findall(Nq .== 1))[:] .* ωJI[:] vgeo.ξ3x3[:] .= sum(ωJ_N1 .* ξ3x3_N1, dims = findall(Nq .== 1))[:] .* ωJI[:] num_vgeo_handled += 9 # compute ωJH and JvC horizontal_metrics!(vgeo, Nq, ω) num_vgeo_handled += 1 # Make sure we handled all the vgeo terms @assert _nvgeo == num_vgeo_handled end # Sort out the sgeo terms @views begin sgeo = SurfaceGeometry(FT, Nq, nface, nelem) # for the volume inverse mass matrix p = reshape(1:Np, Nq) if dim == 1 fmask = (p[1:1], p[Nq[1]:Nq[1]]) elseif dim == 2 fmask = (p[1, :][:], p[Nq[1], :][:], p[:, 1][:], p[:, Nq[2]][:]) elseif dim == 3 fmask = ( p[1, :, :][:], p[Nq[1], :, :][:], p[:, 1, :][:], p[:, Nq[2], :][:], p[:, :, 1][:], p[:, :, Nq[3]][:], ) end for d in 1:dim for f in (2d - 1):(2d) # number of points matches means that we keep all the data # (N = 0 is not on the face) if Nfp[d] == Nfp_N1[d] sgeo.n1[:, f, :] .= sgeo_N1.n1[:, f, :] sgeo.n2[:, f, :] .= sgeo_N1.n2[:, f, :] sgeo.n3[:, f, :] .= sgeo_N1.n3[:, f, :] sgeo.sωJ[:, f, :] .= sgeo_N1.sωJ[:, f, :] # Volume inverse mass will be wrong so reset it sgeo.vωJI[:, f, :] .= vgeo.ωJI[fmask[f], :] else # Counter to make sure we got all the sgeo terms num_sgeo_handled = 0 # sum to get sM Nq_f = (Nq[1:(d - 1)]..., Nq[(d + 1):dim]...) Nq_f_N1 = (Nq_N1[1:(d - 1)]..., Nq_N1[(d + 1):dim]...) sM_N1 = reshape( sgeo_N1.sωJ[1:Nfp_N1[d], f, :], Nq_f_N1..., nelem, ) sgeo.sωJ[1:Nfp[d], f, :][:] .= sum(sM_N1, dims = findall(Nq_f .== 1))[:] num_sgeo_handled += 1 # Normals (like metrics in the volume) need to be computed # scaled by surface Jacobian which we can do with the # surface mass matrices sM = sgeo.sωJ[1:Nfp[d], f, :] fld_N1_n1 = reshape( sgeo_N1.n1[1:Nfp_N1[d], f, :], Nq_f_N1..., nelem, ) fld_N1_n2 = reshape( sgeo_N1.n2[1:Nfp_N1[d], f, :], Nq_f_N1..., nelem, ) fld_N1_n3 = reshape( sgeo_N1.n3[1:Nfp_N1[d], f, :], Nq_f_N1..., nelem, ) sgeo.n1[1:Nfp[d], f, :][:] .= sum(sM_N1 .* fld_N1_n1, dims = findall(Nq_f .== 1))[:] ./ sM[:] sgeo.n2[1:Nfp[d], f, :][:] .= sum(sM_N1 .* fld_N1_n2, dims = findall(Nq_f .== 1))[:] ./ sM[:] sgeo.n3[1:Nfp[d], f, :][:] .= sum(sM_N1 .* fld_N1_n3, dims = findall(Nq_f .== 1))[:] ./ sM[:] num_sgeo_handled += 3 # set the volume inverse mass matrix sgeo.vωJI[1:Nfp[d], f, :] .= vgeo.ωJI[fmask[f], :] num_sgeo_handled += 1 # Make sure we handled all the vgeo terms @assert _nsgeo == num_sgeo_handled end end end end (vgeo, sgeo, x_vtk) end """ computegeometry(elemtocoord, D, ξ, ω, meshwarp) Compute the geometric factors data needed to define metric terms at each quadrature point. First, compute the so called "topology coordinates" from reference coordinates ξ. Then map these topology coordinate to physical coordinates. Then compute the Jacobian of the mapping from reference coordinates to physical coordinates, i.e., ∂x/∂ξ, by calling `compute_reference_to_physical_coord_jacobian!`. Finally, compute the metric terms by calling the function `computemetric!`. """ function computegeometry(elemtocoord, D, ξ, ω, meshwarp) FT = eltype(D[1]) dim = length(D) nface = 2dim nelem = size(elemtocoord, 3) Nq = ntuple(j -> size(D[j], 1), dim) # Compute metric terms for FVM if any(Nq .== 1) return computegeometry_fvm(elemtocoord, D, ξ, ω, meshwarp) end Np = prod(Nq) Nfp = div.(Np, Nq) # Initialize volume and surface geometric term data structures vgeo = VolumeGeometry(FT, Nq, nelem) sgeo = SurfaceGeometry(FT, Nq, nface, nelem) # a) Compute "topology coordinates" from reference coordinates ξ Metrics.creategrid!(vgeo, elemtocoord, ξ) # Create local variables x1 = vgeo.x1 x2 = vgeo.x2 x3 = vgeo.x3 # b) Map "topology coordinates" -> physical coordinates @inbounds for j in 1:length(vgeo.x1) (x1[j], x2[j], x3[j]) = meshwarp(vgeo.x1[j], vgeo.x2[j], vgeo.x3[j]) end # Update global data in vgeo vgeo.x1 .= x1 vgeo.x2 .= x2 vgeo.x3 .= x3 # c) Compute Jacobian matrix, ∂x/∂ξ Metrics.compute_reference_to_physical_coord_jacobian!(vgeo, nelem, D) # d) Compute the metric terms Metrics.computemetric!(vgeo, sgeo, D) # Note: # To get analytic derivatives, we need to be able differentiate through (a,b) and combine (a,b,c) # Compute the metric terms p = reshape(1:Np, Nq) if dim == 1 fmask = (p[1:1], p[Nq[1]:Nq[1]]) elseif dim == 2 fmask = (p[1, :][:], p[Nq[1], :][:], p[:, 1][:], p[:, Nq[2]][:]) elseif dim == 3 fmask = ( p[1, :, :][:], p[Nq[1], :, :][:], p[:, 1, :][:], p[:, Nq[2], :][:], p[:, :, 1][:], p[:, :, Nq[3]][:], ) end # since `ξ1` is the fastest dimension and `ξdim` the slowest the tensor # product order is reversed M = kron(1, reverse(ω)...) vgeo.ωJ .*= M vgeo.ωJI .= 1 ./ vgeo.ωJ for d in 1:dim for f in (2d - 1):(2d) sgeo.vωJI[1:Nfp[d], f, :] .= vgeo.ωJI[fmask[f], :] end end sM = fill!(similar(sgeo.sωJ, maximum(Nfp), nface), NaN) for d in 1:dim for f in (2d - 1):(2d) ωf = ntuple(j -> ω[mod1(d + j, dim)], dim - 1) # Because of the `mod1` this face is already flipped if !(dim == 3 && d == 2) ωf = reverse(ωf) end sM[1:Nfp[d], f] .= dim > 1 ? kron(1, ωf...) : one(FT) end end sgeo.sωJ .*= sM # compute MH and JvC horizontal_metrics!(vgeo, Nq, ω) # This is mainly done to support FVM plotting when N=0 (since we need cell # edge values) x_vtk = (vgeo.x1, vgeo.x2, vgeo.x3) return (vgeo, sgeo, x_vtk) end """ horizontal_metrics!(vgeo::VolumeGeometry, Nq, ω) Compute the horizontal mass matrix `ωJH` field of `vgeo` ``` J .* norm(∂ξ3/∂x) * (ωᵢ ⊗ ωⱼ); for integrating over a plane ``` (in 2-D ξ2 not ξ3 is used). """ function horizontal_metrics!(vgeo::VolumeGeometry, Nq, ω) dim = length(Nq) MH = dim == 1 ? 1 : kron(ones(1, Nq[dim]), reverse(ω[1:(dim - 1)])...)[:] M = vec(kron(1, reverse(ω)...)) J = vgeo.ωJ ./ M # Compute |r'(ξ3)| for vertical line integrals if dim == 1 vgeo.ωJH .= 1 elseif dim == 2 vgeo.ωJH .= MH .* hypot.(J .* vgeo.ξ2x1, J .* vgeo.ξ2x2) elseif dim == 3 vgeo.ωJH .= MH .* hypot.(J .* vgeo.ξ3x1, J .* vgeo.ξ3x2, J .* vgeo.ξ3x3) else error("dim $dim not implemented") end return vgeo end """ indefinite_integral_interpolation_matrix(r, ω) Given a set of integration points `r` and integration weights `ω` this computes a matrix that will compute the indefinite integral of the (interpolant) of a function and evaluate the indefinite integral at the points `r`. Namely, let ```math q(ξ) = ∫_{ξ_{0}}^{ξ} f(ξ') dξ' ``` then we have that ``` I∫ * f.(r) = q.(r) ``` where `I∫` is the integration and interpolation matrix defined by this function. !!! note The integration is done using the provided quadrature weight, so if these cannot integrate `f(ξ)` exactly, `f` is first interpolated and then integrated using quadrature. Namely, we have that: ```math q(ξ) = ∫_{ξ_{0}}^{ξ} I(f(ξ')) dξ' ``` where `I` is the interpolation operator. """ function indefinite_integral_interpolation_matrix(r, ω) Nq = length(r) I∫ = similar(r, Nq, Nq) # first value is zero I∫[1, :] .= Nq == 1 ? ω[1] : 0 # barycentric weights for interpolation wbary = Elements.baryweights(r) # Compute the interpolant of the indefinite integral for n in 2:Nq # grid from first dof to current point rdst = (1 .- r) / 2 * r[1] + (1 .+ r) / 2 * r[n] # interpolation matrix In = Elements.interpolationmatrix(r, rdst, wbary) # scaling from LGL to current of the interval Δ = (r[n] - r[1]) / 2 # row of the matrix we have computed I∫[n, :] .= (Δ * ω' * In)[:] end I∫ end # }}} using KernelAbstractions.Extras: @unroll using StaticArrays const _x1 = Grids._x1 const _x2 = Grids._x2 const _x3 = Grids._x3 const _JcV = Grids._JcV @doc """ kernel_min_neighbor_distance!(::Val{N}, ::Val{dim}, direction, min_neighbor_distance, vgeo, topology.realelems) Computational kernel: Computes the minimum physical distance between node neighbors within an element. The `direction` in the reference element controls which nodes are considered neighbors. """ kernel_min_neighbor_distance! @kernel function kernel_min_neighbor_distance!( ::Val{N}, ::Val{dim}, direction, min_neighbor_distance, vgeo, elems, ) where {N, dim} @uniform begin FT = eltype(min_neighbor_distance) Nq = N .+ 1 Np = prod(Nq) if direction isa EveryDirection mininξ = (true, true, true) elseif direction isa HorizontalDirection mininξ = (true, dim == 2 ? false : true, false) elseif direction isa VerticalDirection mininξ = (false, dim == 2 ? true : false, dim == 2 ? false : true) end @inbounds begin # 2D Nq = (nh, nv) # 3D Nq = (nh, nh, nv) Nq1 = Nq[1] Nq2 = Nq[2] Nqk = dim == 2 ? 1 : Nq[end] mininξ1 = mininξ[1] mininξ2 = mininξ[2] mininξ3 = mininξ[3] end end I = @index(Global, Linear) # local element id e = (I - 1) ÷ Np + 1 # local quadrature id ijk = (I - 1) % Np + 1 # local i, j, k quadrature id i = (ijk - 1) % Nq1 + 1 j = (ijk - 1) ÷ Nq1 % Nq2 + 1 k = (ijk - 1) ÷ (Nq1 * Nq2) % Nqk + 1 md = typemax(FT) x = SVector(vgeo[ijk, _x1, e], vgeo[ijk, _x2, e], vgeo[ijk, _x3, e]) # first horizontal distance if mininξ1 @unroll for î in (i - 1, i + 1) if 1 ≤ î ≤ Nq1 îjk = î + Nq1 * (j - 1) + Nq1 * Nq2 * (k - 1) x̂ = SVector( vgeo[îjk, _x1, e], vgeo[îjk, _x2, e], vgeo[îjk, _x3, e], ) md = min(md, norm(x - x̂)) end end end # second horizontal distance or vertical distance (dim=2) if mininξ2 # FV Vercial direction, use 2vgeo[ijk, _JcV, e] if dim == 2 && Nq2 == 1 md = min(md, 2vgeo[ijk, _JcV, e]) else @unroll for ĵ in (j - 1, j + 1) if 1 ≤ ĵ ≤ Nq2 iĵk = i + Nq1 * (ĵ - 1) + Nq1 * Nq2 * (k - 1) x̂ = SVector( vgeo[iĵk, _x1, e], vgeo[iĵk, _x2, e], vgeo[iĵk, _x3, e], ) md = min(md, norm(x - x̂)) end end end end # vertical distance (dim=3) if mininξ3 # FV Vercial direction, use 2vgeo[ijk, _JcV, e] if dim == 3 && Nqk == 1 md = min(md, 2vgeo[ijk, _JcV, e]) else @unroll for k̂ in (k - 1, k + 1) if 1 ≤ k̂ ≤ Nqk ijk̂ = i + Nq1 * (j - 1) + Nq1 * Nq2 * (k̂ - 1) x̂ = SVector( vgeo[ijk̂, _x1, e], vgeo[ijk̂, _x2, e], vgeo[ijk̂, _x3, e], ) md = min(md, norm(x - x̂)) end end end end min_neighbor_distance[ijk, e] = md end end # module ================================================ FILE: src/Numerics/Mesh/Interpolation.jl ================================================ module Interpolation using CUDA using DocStringExtensions using LinearAlgebra using MPI using OrderedCollections using StaticArrays using KernelAbstractions using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Geometry import ClimateMachine.Mesh.Elements: baryweights import ClimateMachine.MPIStateArrays: array_device export dimensions, accumulate_interpolated_data, accumulate_interpolated_data!, InterpolationBrick, InterpolationCubedSphere, interpolate_local!, project_cubed_sphere!, InterpolationTopology abstract type InterpolationTopology end dimensions(nothing) = OrderedDict() """ InterpolationBrick{ FT <: AbstractFloat,CuArrays UI8AD <: AbstractArray{UInt8, 2}, UI16VD <: AbstractVector{UInt16}, I32V <: AbstractVector{Int32}, } <: InterpolationTopology This interpolation data structure and the corresponding functions works for a brick, where stretching/compression happens only along the x1, x2 & x3 axis. Here x1 = X1(ξ1), x2 = X2(ξ2) and x3 = X3(ξ3). # Fields $(DocStringExtensions.FIELDS) # Usage InterpolationBrick( grid::DiscontinuousSpectralElementGrid{FT}, xbnd::Array{FT,2}, xres, ) where FT <: AbstractFloat This interpolation structure and the corresponding functions works for a brick, where stretching/compression happens only along the x1, x2 & x3 axis. Here x1 = X1(ξ1), x2 = X2(ξ2) and x3 = X3(ξ3). # Arguments for the inner constructor - `grid`: DiscontinousSpectralElementGrid - `xbnd`: Domain boundaries in x1, x2 and x3 directions - `x1g`: Interpolation grid in x1 direction - `x2g`: Interpolation grid in x2 direction - `x3g`: Interpolation grid in x3 direction """ struct InterpolationBrick{ FT <: AbstractFloat, I <: Int, FTV <: AbstractVector{FT}, FTVD <: AbstractVector{FT}, IVD <: AbstractVector{I}, FTA2 <: Array{FT, 2}, UI8AD <: AbstractArray{UInt8, 2}, UI16VD <: AbstractVector{UInt16}, I32V <: AbstractVector{Int32}, } <: InterpolationTopology "Number of elements" Nel::I "Total number of interpolation points" Np::I "Total number of interpolation points on local process" Npl::I "Domain bounds in x1, x2 and x3 directions" xbnd::FTA2 "Interpolation grid in x1 direction" x1g::FTV "Interpolation grid in x2 direction" x2g::FTV "Interpolation grid in x3 direction" x3g::FTV "Unique ξ1 coordinates of interpolation points within each spectral element" ξ1::FTVD "Unique ξ2 coordinates of interpolation points within each spectral element" ξ2::FTVD "Unique ξ3 coordinates of interpolation points within each spectral element" ξ3::FTVD "Flags when ξ1/ξ2/ξ3 interpolation point matches with a GLL point" flg::UI8AD "Normalization factor" fac::FTVD "x1 interpolation grid index of interpolation points within each element on the local process" x1i::UI16VD "x2 interpolation grid index of interpolation points within each element on the local process" x2i::UI16VD "x3 interpolation grid index of interpolation points within each element on the local process" x3i::UI16VD "Offsets for each element" offset::IVD # offsets for each element for v "GLL points in ξ1 direction" m_ξ1::FTVD "GLL points in ξ2 direction" m_ξ2::FTVD "GLL points in ξ3 direction" m_ξ3::FTVD "Barycentric weights" wb1::FTVD "Barycentric weights" wb2::FTVD "Barycentric weights" wb3::FTVD # MPI setup for gathering interpolated variable on proc # 0 "Number of interpolation points on each of the processes" Np_all::I32V "x1 interpolation grid index of interpolation points within each element on all processes stored only on proc 0" x1i_all::UI16VD "x2 interpolation grid index of interpolation points within each element on all processes stored only on proc 0" x2i_all::UI16VD "x3 interpolation grid index of interpolation points within each element on all processes stored only on proc 0" x3i_all::UI16VD function InterpolationBrick( grid::DiscontinuousSpectralElementGrid{FT}, xbnd::Array{FT, 2}, x1g::AbstractArray{FT, 1}, x2g::AbstractArray{FT, 1}, x3g::AbstractArray{FT, 1}, ) where {FT <: AbstractFloat} mpicomm = grid.topology.mpicomm pid = MPI.Comm_rank(mpicomm) npr = MPI.Comm_size(mpicomm) DA = arraytype(grid) # device array device = arraytype(grid) <: Array ? CPU() : CUDADevice() qm = polynomialorders(grid) .+ 1 ndim = 3 toler = 4 * eps(FT) # tolerance n1g = length(x1g) n2g = length(x2g) n3g = length(x3g) Np = n1g * n2g * n3g marker = BitArray{3}(undef, n1g, n2g, n3g) fill!(marker, true) Nel = length(grid.topology.realelems) # # of elements on local process offset = Vector{Int}(undef, Nel + 1) # offsets for the interpolated variable n123 = zeros(Int, ndim) # # of unique ξ1, ξ2, ξ3 points in each cell xsten = zeros(Int, 2, ndim) # x1, x2, x3 start and end for each brick element xbndl = zeros(FT, 2, ndim) # x1,x2,x3 limits (min,max) for each brick element ξ1 = map(i -> zeros(FT, i), zeros(Int, Nel)) ξ2 = map(i -> zeros(FT, i), zeros(Int, Nel)) ξ3 = map(i -> zeros(FT, i), zeros(Int, Nel)) x1i = map(i -> zeros(UInt16, i), zeros(UInt16, Nel)) x2i = map(i -> zeros(UInt16, i), zeros(UInt16, Nel)) x3i = map(i -> zeros(UInt16, i), zeros(UInt16, Nel)) x = map(i -> zeros(FT, ndim, i), zeros(Int, Nel)) # interpolation grid points embedded in each cell offset[1] = 0 for el in 1:Nel for (xg, dim) in zip((x1g, x2g, x3g), 1:ndim) xbndl[1, dim], xbndl[2, dim] = extrema(grid.topology.elemtocoord[dim, :, el]) st = findfirst(xg .≥ xbndl[1, dim] .- toler) if st ≠ nothing if xg[st] > (xbndl[2, dim] + toler) st = nothing end end if st ≠ nothing xsten[1, dim] = st xsten[2, dim] = findlast(temp -> temp .≤ xbndl[2, dim] .+ toler, xg) n123[dim] = xsten[2, dim] - xsten[1, dim] + 1 else n123[dim] = 0 end end if prod(n123) > 0 for k in xsten[1, 3]:xsten[2, 3], j in xsten[1, 2]:xsten[2, 2], i in xsten[1, 1]:xsten[2, 1] if marker[i, j, k] push!( ξ1[el], 2 * (x1g[i] - xbndl[1, 1]) / (xbndl[2, 1] - xbndl[1, 1]) - 1, ) push!( ξ2[el], 2 * (x2g[j] - xbndl[1, 2]) / (xbndl[2, 2] - xbndl[1, 2]) - 1, ) push!( ξ3[el], 2 * (x3g[k] - xbndl[1, 3]) / (xbndl[2, 3] - xbndl[1, 3]) - 1, ) push!(x1i[el], UInt16(i)) push!(x2i[el], UInt16(j)) push!(x3i[el], UInt16(k)) marker[i, j, k] = false end end offset[el + 1] = offset[el] + length(ξ1[el]) else offset[el + 1] = offset[el] end end # el loop m_ξ1, m_ξ2, m_ξ3 = referencepoints(grid) wb1, wb2, wb3 = baryweights(m_ξ1), baryweights(m_ξ2), baryweights(m_ξ3) Npl = offset[end] ξ1_d = Array{FT}(undef, Npl) ξ2_d = Array{FT}(undef, Npl) ξ3_d = Array{FT}(undef, Npl) x1i_d = Array{UInt16}(undef, Npl) x2i_d = Array{UInt16}(undef, Npl) x3i_d = Array{UInt16}(undef, Npl) fac_d = zeros(FT, Npl) flg_d = zeros(UInt8, 3, Npl) for i in 1:Nel ctr = 1 for j in (offset[i] + 1):offset[i + 1] ξ1_d[j] = ξ1[i][ctr] ξ2_d[j] = ξ2[i][ctr] ξ3_d[j] = ξ3[i][ctr] x1i_d[j] = x1i[i][ctr] x2i_d[j] = x2i[i][ctr] x3i_d[j] = x3i[i][ctr] # set up interpolation fac1 = FT(0) fac2 = FT(0) fac3 = FT(0) for ib in 1:qm[1] if abs(m_ξ1[ib] - ξ1_d[j]) < toler @inbounds flg_d[1, j] = UInt8(ib) else @inbounds fac1 += wb1[ib] / (ξ1_d[j] - m_ξ1[ib]) end end for ib in 1:qm[2] if abs(m_ξ2[ib] - ξ2_d[j]) < toler @inbounds flg_d[2, j] = UInt8(ib) else @inbounds fac2 += wb2[ib] / (ξ2_d[j] - m_ξ2[ib]) end end for ib in 1:qm[3] if abs(m_ξ3[ib] - ξ3_d[j]) < toler @inbounds flg_d[3, j] = UInt8(ib) else @inbounds fac3 += wb3[ib] / (ξ3_d[j] - m_ξ3[ib]) end end flg_d[1, j] ≠ UInt8(0) && (fac1 = FT(1)) flg_d[2, j] ≠ UInt8(0) && (fac2 = FT(1)) flg_d[3, j] ≠ UInt8(0) && (fac3 = FT(1)) fac_d[j] = FT(1) / (fac1 * fac2 * fac3) ctr += 1 end end # MPI setup for gathering data on proc 0 root = 0 Np_all = zeros(Int32, npr) Np_all[pid + 1] = Npl MPI.Allreduce!(Np_all, +, mpicomm) if pid ≠ root x1i_all = zeros(UInt16, 0) x2i_all = zeros(UInt16, 0) x3i_all = zeros(UInt16, 0) MPI.Gatherv!(x1i_d, nothing, root, mpicomm) MPI.Gatherv!(x2i_d, nothing, root, mpicomm) MPI.Gatherv!(x3i_d, nothing, root, mpicomm) else x1i_all = Array{UInt16}(undef, sum(Np_all)) x2i_all = Array{UInt16}(undef, sum(Np_all)) x3i_all = Array{UInt16}(undef, sum(Np_all)) MPI.Gatherv!(x1i_d, VBuffer(x1i_all, Np_all), root, mpicomm) MPI.Gatherv!(x2i_d, VBuffer(x2i_all, Np_all), root, mpicomm) MPI.Gatherv!(x3i_d, VBuffer(x3i_all, Np_all), root, mpicomm) end if device isa CUDADevice ξ1_d = DA(ξ1_d) ξ2_d = DA(ξ2_d) ξ3_d = DA(ξ3_d) x1i_d = DA(x1i_d) x2i_d = DA(x2i_d) x3i_d = DA(x3i_d) flg_d = DA(flg_d) fac_d = DA(fac_d) offset = DA(offset) m_ξ1 = DA(m_ξ1) m_ξ2 = DA(m_ξ2) m_ξ3 = DA(m_ξ3) wb1 = DA(wb1) wb2 = DA(wb2) wb3 = DA(wb3) x1i_all = DA(x1i_all) x2i_all = DA(x2i_all) x3i_all = DA(x3i_all) end return new{ FT, Int, typeof(x1g), typeof(ξ1_d), typeof(offset), typeof(xbnd), typeof(flg_d), typeof(x1i_d), typeof(Np_all), }( Nel, Np, Npl, xbnd, x1g, x2g, x3g, ξ1_d, ξ2_d, ξ3_d, flg_d, fac_d, x1i_d, x2i_d, x3i_d, offset, m_ξ1, m_ξ2, m_ξ3, wb1, wb2, wb3, Np_all, x1i_all, x2i_all, x3i_all, ) end end # struct InterpolationBrick """ interpolate_local!( intrp_brck::InterpolationBrick{FT}, sv::AbstractArray{FT}, v::AbstractArray{FT}, ) where {FT <: AbstractFloat} This interpolation function works for a brick, where stretching/compression happens only along the x1, x2 & x3 axis. Here x1 = X1(ξ1), x2 = X2(ξ2) and x3 = X3(ξ3) # Arguments - `intrp_brck`: Initialized InterpolationBrick structure - `sv`: State Array consisting of various variables on the discontinuous Galerkin grid - `v`: Interpolated variables """ function interpolate_local!( intrp_brck::InterpolationBrick{FT}, sv::AbstractArray{FT}, v::AbstractArray{FT}, ) where {FT <: AbstractFloat} offset = intrp_brck.offset m_ξ1 = intrp_brck.m_ξ1 m_ξ2 = intrp_brck.m_ξ2 m_ξ3 = intrp_brck.m_ξ3 wb1 = intrp_brck.wb1 wb2 = intrp_brck.wb2 wb3 = intrp_brck.wb3 ξ1 = intrp_brck.ξ1 ξ2 = intrp_brck.ξ2 ξ3 = intrp_brck.ξ3 flg = intrp_brck.flg fac = intrp_brck.fac qm = (length(m_ξ1), length(m_ξ2), length(m_ξ3)) Nel = length(offset) - 1 nvars = size(sv, 2) device = array_device(sv) comp_stream = Event(device) workgroup = (qm[2], qm[3]) ndrange = (qm[2] * Nel, qm[3] * nvars) comp_stream = interpolate_local_kernel!(device, workgroup)( offset, m_ξ1, m_ξ2, m_ξ3, wb1, wb2, wb3, ξ1, ξ2, ξ3, flg, fac, sv, v, Val(qm), ndrange = ndrange, dependencies = (comp_stream,), ) wait(comp_stream) return nothing end #--------------------------------------------------------------- @kernel function interpolate_local_kernel!( offset::AbstractArray{T, 1}, m_ξ1::AbstractArray{FT, 1}, m_ξ2::AbstractArray{FT, 1}, m_ξ3::AbstractArray{FT, 1}, wb1::AbstractArray{FT, 1}, wb2::AbstractArray{FT, 1}, wb3::AbstractArray{FT, 1}, ξ1::AbstractArray{FT, 1}, ξ2::AbstractArray{FT, 1}, ξ3::AbstractArray{FT, 1}, flg::AbstractArray{UInt8, 2}, fac::AbstractArray{FT, 1}, sv::AbstractArray{FT}, v::AbstractArray{FT}, ::Val{qm}, ) where {qm, T <: Int, FT <: AbstractFloat} el, st_idx = @index(Group, NTuple) tj, tk = @index(Local, NTuple) vout_jk = @localmem FT (qm[2], qm[3]) m_ξ1_sh = @localmem FT (qm[1],) m_ξ2_sh = @localmem FT (qm[2],) m_ξ3_sh = @localmem FT (qm[3],) wb1_sh = @localmem FT (qm[1],) wb2_sh = @localmem FT (qm[2],) wb3_sh = @localmem FT (qm[3],) np = @localmem T (1,) off = @localmem T (1,) # load shared memory if tk == 1 m_ξ2_sh[tj] = m_ξ2[tj] wb2_sh[tj] = wb2[tj] end if tj == 1 m_ξ3_sh[tk] = m_ξ3[tk] wb3_sh[tk] = wb3[tk] end if tj == 1 && tk == 1 for i in 1:qm[1] m_ξ1_sh[i] = m_ξ1[i] wb1_sh[i] = wb1[i] end np[1] = offset[el + 1] - offset[el] off[1] = offset[el] end @synchronize for i in 1:np[1] # interpolate point-by-point ξ1l = ξ1[off[1] + i] ξ2l = ξ2[off[1] + i] f1 = flg[1, off[1] + i] if f1 == 0 # apply phir @inbounds vout_jk[tj, tk] = sv[ 1 + (tj - 1) * qm[1] + (tk - 1) * qm[1] * qm[2], st_idx, el, ] * wb1_sh[1] / (ξ1l - m_ξ1_sh[1]) for ii in 2:qm[1] @inbounds vout_jk[tj, tk] += sv[ ii + (tj - 1) * qm[1] + (tk - 1) * qm[1] * qm[2], st_idx, el, ] * wb1_sh[ii] / (ξ1l - m_ξ1_sh[ii]) end else @inbounds vout_jk[tj, tk] = sv[f1 + (tj - 1) * qm[1] + (tk - 1) * qm[1] * qm[2], st_idx, el] end if flg[2, off[1] + i] == 0 # apply phis @inbounds vout_jk[tj, tk] *= (wb2_sh[tj] / (ξ2l - m_ξ2_sh[tj])) end @synchronize f2 = flg[2, off[1] + i] ξ3l = ξ3[off[1] + i] if tj == 1 # reduction if f2 == 0 for ij in 2:qm[2] @inbounds vout_jk[1, tk] += vout_jk[ij, tk] end else if f2 ≠ 1 @inbounds vout_jk[1, tk] = vout_jk[f2, tk] end end if flg[3, off[1] + i] == 0 # apply phit @inbounds vout_jk[1, tk] *= (wb3_sh[tk] / (ξ3l - m_ξ3_sh[tk])) end end @synchronize f3 = flg[3, off[1] + i] if tj == 1 && tk == 1 # reduction if f3 == 0 for ik in 2:qm[3] @inbounds vout_jk[1, 1] += vout_jk[1, ik] end else if f3 ≠ 1 @inbounds vout_jk[1, 1] = vout_jk[1, f3] end end @inbounds v[off[1] + i, st_idx] = vout_jk[1, 1] * fac[off[1] + i] end @synchronize end end #--------------------------------------------------------------- function dimensions(interpol::InterpolationBrick) if Array ∈ typeof(interpol.x1g).parameters h_x1g = interpol.x1g h_x2g = interpol.x2g h_x3g = interpol.x3g else h_x1g = Array(interpol.x1g) h_x2g = Array(interpol.x2g) h_x3g = Array(interpol.x3g) end return OrderedDict( "x" => (h_x1g, OrderedDict()), "y" => (h_x2g, OrderedDict()), "z" => (h_x3g, OrderedDict()), ) end """ InterpolationCubedSphere{ FT <: AbstractFloat, T <: Int, FTV <: AbstractVector{FT}, FTVD <: AbstractVector{FT}, TVD <: AbstractVector{T}, UI8AD <: AbstractArray{UInt8, 2}, UI16VD <: AbstractVector{UInt16}, I32V <: AbstractVector{Int32}, } <: InterpolationTopology This interpolation structure and the corresponding functions works for a cubed sphere topology. The data is interpolated along a lat/long/rad grid. -90⁰ ≤ lat ≤ 90⁰ -180⁰ ≤ long ≤ 180⁰ Rᵢ ≤ r ≤ Rₒ # Fields $(DocStringExtensions.FIELDS) # Usage InterpolationCubedSphere(grid::DiscontinuousSpectralElementGrid, vert_range::AbstractArray{FT}, nhor::Int, lat_res::FT, long_res::FT, rad_res::FT) where {FT <: AbstractFloat} This interpolation structure and the corresponding functions works for a cubed sphere topology. The data is interpolated along a lat/long/rad grid. -90⁰ ≤ lat ≤ 90⁰ -180⁰ ≤ long ≤ 180⁰ Rᵢ ≤ r ≤ Rₒ # Arguments for the inner constructor - `grid`: DiscontinousSpectralElementGrid - `vert_range`: Vertex range along the radial coordinate - `lat_res`: Resolution of the interpolation grid along the latitude coordinate in radians - `long_res`: Resolution of the interpolation grid along the longitude coordinate in radians - `rad_res`: Resolution of the interpolation grid along the radial coordinate """ struct InterpolationCubedSphere{ FT <: AbstractFloat, T <: Int, FTV <: AbstractVector{FT}, FTVD <: AbstractVector{FT}, TVD <: AbstractVector{T}, UI8AD <: AbstractArray{UInt8, 2}, UI16VD <: AbstractVector{UInt16}, I32V <: AbstractVector{Int32}, } <: InterpolationTopology "Number of elements" Nel::T "Number of interpolation points" Np::T "Number of interpolation points on local process" Npl::T # # of interpolation points on the local process "Number of interpolation points in radial direction" n_rad::T "Number of interpolation points in lat direction" n_lat::T "Number of interpolation points in long direction" n_long::T "Interpolation grid in radial direction" rad_grd::FTV "Interpolation grid in lat direction" lat_grd::FTV "Interpolation grid in long direction" long_grd::FTV # rad, lat & long locations of interpolation grid "Device array containing ξ1 coordinates of interpolation points within each element" ξ1::FTVD "Device array containing ξ2 coordinates of interpolation points within each element" ξ2::FTVD "Device array containing ξ3 coordinates of interpolation points within each element" ξ3::FTVD "flags when ξ1/ξ2/ξ3 interpolation point matches with a GLL point" flg::UI8AD "Normalization factor" fac::FTVD "Radial coordinates of interpolation points withing each element" radi::UI16VD "Latitude coordinates of interpolation points withing each element" lati::UI16VD "Longitude coordinates of interpolation points withing each element" longi::UI16VD "Offsets for each element" offset::TVD "GLL points in ξ1 direction" m_ξ1::FTVD "GLL points in ξ2 direction" m_ξ2::FTVD "GLL points in ξ3 direction" m_ξ3::FTVD "Barycentric weights in ξ1 direction" wb1::FTVD "Barycentric weights in ξ2 direction" wb2::FTVD "Barycentric weights in ξ3 direction" wb3::FTVD # MPI setup for gathering interpolated variable on proc 0 "Number of interpolation points on each of the processes" Np_all::I32V "Radial interpolation grid index of interpolation points within each element on all processes stored only on proc 0" radi_all::UI16VD "Latitude interpolation grid index of interpolation points within each element on all processes stored only on proc 0" lati_all::UI16VD "Longitude interpolation grid index of interpolation points within each element on all processes stored only on proc 0" longi_all::UI16VD function InterpolationCubedSphere( grid::DiscontinuousSpectralElementGrid, vert_range::AbstractArray{FT}, nhor::Int, lat_grd::AbstractArray{FT, 1}, long_grd::AbstractArray{FT, 1}, rad_grd::AbstractArray{FT}; nr_toler = nothing, ) where {FT <: AbstractFloat} mpicomm = MPI.COMM_WORLD pid = MPI.Comm_rank(mpicomm) npr = MPI.Comm_size(mpicomm) DA = arraytype(grid) # device array device = arraytype(grid) <: Array ? CPU() : CUDADevice() qm = polynomialorders(grid) .+ 1 toler1 = FT(eps(FT) * vert_range[1] * 2.0) # tolerance for unwarp function toler2 = FT(eps(FT) * 4.0) # tolerance # tolerance for Newton-Raphson if isnothing(nr_toler) nr_toler = FT(eps(FT) * vert_range[1] * 10.0) end Nel = length(grid.topology.realelems) # # of local elements on the local process nvert_range = length(vert_range) nvert = nvert_range - 1 # # of elements in vertical direction Nel_glob = nvert * nhor * nhor * 6 nblck = nhor * nhor * nvert Δh = 2 / nhor # horizontal grid spacing in unwarped grid n_lat, n_long, n_rad = Int(length(lat_grd)), Int(length(long_grd)), Int(length(rad_grd)) Np = n_lat * n_long * n_rad uw_grd = zeros(FT, 3) diffv = zeros(FT, 3) ξ = zeros(FT, 3) glob_ord = grid.topology.origsendorder # to account for reordering of elements after the partitioning process glob_elem_no = zeros(Int, nvert * length(glob_ord)) for i in 1:length(glob_ord), j in 1:nvert glob_elem_no[j + (i - 1) * nvert] = (glob_ord[i] - 1) * nvert + j end glob_to_loc = Dict(glob_elem_no[i] => Int(i) for i in 1:Nel) # using dictionary for speedup ξ1, ξ2, ξ3 = map(i -> zeros(FT, i), zeros(Int, Nel)), map(i -> zeros(FT, i), zeros(Int, Nel)), map(i -> zeros(FT, i), zeros(Int, Nel)) radi, lati, longi = map(i -> zeros(UInt16, i), zeros(UInt16, Nel)), map(i -> zeros(UInt16, i), zeros(UInt16, Nel)), map(i -> zeros(UInt16, i), zeros(UInt16, Nel)) offset_d = zeros(Int, Nel + 1) for i in 1:n_rad rad = rad_grd[i] if rad ≤ vert_range[1] # accounting for minor rounding errors from unwarp function at boundaries vert_range[1] - rad < toler1 ? l_nrm = 1 : error( "fatal error, rad lower than inner radius: ", vert_range[1] - rad, " $rad_grd /// $lat_grd //// $long_grd", ) elseif rad ≥ vert_range[end] # accounting for minor rounding errors from unwarp function at boundaries rad - vert_range[end] < toler1 ? l_nrm = nvert : error("fatal error, rad greater than outer radius") else # normal scenario for l in 2:nvert_range if vert_range[l] - rad > FT(0) l_nrm = l - 1 break end end end for j in 1:n_lat @inbounds x3_grd = rad * sind(lat_grd[j]) for k in 1:n_long @inbounds x1_grd = rad * cosd(lat_grd[j]) * cosd(long_grd[k]) # inclination -> latitude; azimuthal -> longitude. @inbounds x2_grd = rad * cosd(lat_grd[j]) * sind(long_grd[k]) # inclination -> latitude; azimuthal -> longitude. uw_grd[1], uw_grd[2], uw_grd[3] = Topologies.cubed_sphere_unwarp( EquiangularCubedSphere(), x1_grd, x2_grd, x3_grd, ) # unwarping from sphere to cubed shell x1_uw2_grd = uw_grd[1] / rad # unwrapping cubed shell on to a 2D grid (in 3D space, -1 to 1 cube) x2_uw2_grd = uw_grd[2] / rad x3_uw2_grd = uw_grd[3] / rad if abs(x1_uw2_grd + 1) < toler2 # face 1 (x1 == -1 plane) l2 = min(div(x2_uw2_grd + 1, Δh) + 1, nhor) l3 = min(div(x3_uw2_grd + 1, Δh) + 1, nhor) el_glob = Int( l_nrm + (nhor - l2) * nvert + (l3 - 1) * nvert * nhor, ) elseif abs(x2_uw2_grd + 1) < toler2 # face 2 (x2 == -1 plane) l1 = min(div(x1_uw2_grd + 1, Δh) + 1, nhor) l3 = min(div(x3_uw2_grd + 1, Δh) + 1, nhor) el_glob = Int( l_nrm + (l1 - 1) * nvert + (l3 - 1) * nvert * nhor + nblck * 1, ) elseif abs(x1_uw2_grd - 1) < toler2 # face 3 (x1 == +1 plane) l2 = min(div(x2_uw2_grd + 1, Δh) + 1, nhor) l3 = min(div(x3_uw2_grd + 1, Δh) + 1, nhor) el_glob = Int( l_nrm + (l2 - 1) * nvert + (l3 - 1) * nvert * nhor + nblck * 2, ) elseif abs(x3_uw2_grd - 1) < toler2 # face 4 (x3 == +1 plane) l1 = min(div(x1_uw2_grd + 1, Δh) + 1, nhor) l2 = min(div(x2_uw2_grd + 1, Δh) + 1, nhor) el_glob = Int( l_nrm + (l1 - 1) * nvert + (l2 - 1) * nvert * nhor + nblck * 3, ) elseif abs(x2_uw2_grd - 1) < toler2 # face 5 (x2 == +1 plane) l1 = min(div(x1_uw2_grd + 1, Δh) + 1, nhor) l3 = min(div(x3_uw2_grd + 1, Δh) + 1, nhor) el_glob = Int( l_nrm + (l1 - 1) * nvert + (nhor - l3) * nvert * nhor + nblck * 4, ) elseif abs(x3_uw2_grd + 1) < toler2 # face 6 (x3 == -1 plane) l1 = min(div(x1_uw2_grd + 1, Δh) + 1, nhor) l2 = min(div(x2_uw2_grd + 1, Δh) + 1, nhor) el_glob = Int( l_nrm + (l1 - 1) * nvert + (nhor - l2) * nvert * nhor + nblck * 5, ) else error("error: unwrapped grid does not lie on any of the 6 faces") end el_loc = get(glob_to_loc, el_glob, nothing) if el_loc ≠ nothing # computing inner coordinates for local elements invert_trilear_mapping_hex!( view(grid.topology.elemtocoord, 1, :, el_loc), view(grid.topology.elemtocoord, 2, :, el_loc), view(grid.topology.elemtocoord, 3, :, el_loc), uw_grd, diffv, nr_toler, ξ, ) push!(ξ1[el_loc], ξ[1]) push!(ξ2[el_loc], ξ[2]) push!(ξ3[el_loc], ξ[3]) push!(radi[el_loc], UInt16(i)) push!(lati[el_loc], UInt16(j)) push!(longi[el_loc], UInt16(k)) offset_d[el_loc + 1] += 1 end end end end for i in 2:(Nel + 1) @inbounds offset_d[i] += offset_d[i - 1] end Npl = offset_d[Nel + 1] v = Vector{FT}(undef, offset_d[Nel + 1]) # Allocating storage for interpolation variable ξ1_d = Vector{FT}(undef, Npl) ξ2_d = Vector{FT}(undef, Npl) ξ3_d = Vector{FT}(undef, Npl) flg_d = zeros(UInt8, 3, Npl) fac_d = ones(FT, Npl) rad_d = Vector{UInt16}(undef, Npl) lat_d = Vector{UInt16}(undef, Npl) long_d = Vector{UInt16}(undef, Npl) m_ξ1, m_ξ2, m_ξ3 = referencepoints(grid) wb1, wb2, wb3 = baryweights(m_ξ1), baryweights(m_ξ2), baryweights(m_ξ3) for i in 1:Nel ctr = 1 for j in (offset_d[i] + 1):offset_d[i + 1] @inbounds ξ1_d[j] = ξ1[i][ctr] @inbounds ξ2_d[j] = ξ2[i][ctr] @inbounds ξ3_d[j] = ξ3[i][ctr] @inbounds rad_d[j] = radi[i][ctr] @inbounds lat_d[j] = lati[i][ctr] @inbounds long_d[j] = longi[i][ctr] # set up interpolation fac1 = FT(0) fac2 = FT(0) fac3 = FT(0) for ib in 1:qm[1] if abs(m_ξ1[ib] - ξ1_d[j]) < toler2 @inbounds flg_d[1, j] = UInt8(ib) else @inbounds fac1 += wb1[ib] / (ξ1_d[j] - m_ξ1[ib]) end end for ib in 1:qm[2] if abs(m_ξ2[ib] - ξ2_d[j]) < toler2 @inbounds flg_d[2, j] = UInt8(ib) else @inbounds fac2 += wb2[ib] / (ξ2_d[j] - m_ξ2[ib]) end end for ib in 1:qm[3] if abs(m_ξ3[ib] - ξ3_d[j]) < toler2 @inbounds flg_d[3, j] = UInt8(ib) else @inbounds fac3 += wb3[ib] / (ξ3_d[j] - m_ξ3[ib]) end end flg_d[1, j] ≠ 0 && (fac1 = FT(1)) flg_d[2, j] ≠ 0 && (fac2 = FT(1)) flg_d[3, j] ≠ 0 && (fac3 = FT(1)) fac_d[j] = FT(1) / (fac1 * fac2 * fac3) ctr += 1 end end # MPI setup for gathering data on proc 0 root = 0 Np_all = zeros(Int32, npr) Np_all[pid + 1] = Int32(Npl) MPI.Allreduce!(Np_all, +, mpicomm) if pid ≠ root radi_all = zeros(UInt16, 0) lati_all = zeros(UInt16, 0) longi_all = zeros(UInt16, 0) MPI.Gatherv!(rad_d, nothing, root, mpicomm) MPI.Gatherv!(lat_d, nothing, root, mpicomm) MPI.Gatherv!(long_d, nothing, root, mpicomm) else radi_all = Array{UInt16}(undef, sum(Np_all)) lati_all = Array{UInt16}(undef, sum(Np_all)) longi_all = Array{UInt16}(undef, sum(Np_all)) MPI.Gatherv!(rad_d, VBuffer(radi_all, Np_all), root, mpicomm) MPI.Gatherv!(lat_d, VBuffer(lati_all, Np_all), root, mpicomm) MPI.Gatherv!(long_d, VBuffer(longi_all, Np_all), root, mpicomm) end if device isa CUDADevice ξ1_d = DA(ξ1_d) ξ2_d = DA(ξ2_d) ξ3_d = DA(ξ3_d) flg_d = DA(flg_d) fac_d = DA(fac_d) rad_d = DA(rad_d) lat_d = DA(lat_d) long_d = DA(long_d) m_ξ1 = DA(m_ξ1) m_ξ2 = DA(m_ξ2) m_ξ3 = DA(m_ξ3) wb1 = DA(wb1) wb2 = DA(wb2) wb3 = DA(wb3) offset_d = DA(offset_d) rad_grd = DA(rad_grd) lat_grd = DA(lat_grd) long_grd = DA(long_grd) radi_all = DA(radi_all) lati_all = DA(lati_all) longi_all = DA(longi_all) end return new{ FT, Int, typeof(rad_grd), typeof(ξ1_d), typeof(offset_d), typeof(flg_d), typeof(rad_d), typeof(Np_all), }( Nel, Np, Npl, n_rad, n_lat, n_long, rad_grd, lat_grd, long_grd, ξ1_d, ξ2_d, ξ3_d, flg_d, fac_d, rad_d, lat_d, long_d, offset_d, m_ξ1, m_ξ2, m_ξ3, wb1, wb2, wb3, Np_all, radi_all, lati_all, longi_all, ) end # Inner constructor InterpolationCubedSphere end # struct InterpolationCubedSphere """ invert_trilear_mapping_hex!(X1::AbstractArray{FT,1}, X2::AbstractArray{FT,1}, X3::AbstractArray{FT,1}, x::AbstractArray{FT,1}, d::AbstractArray{FT,1}, tol::FT, ξ::AbstractArray{FT,1}) where FT <: AbstractFloat This function computes ξ = (ξ1,ξ2,ξ3) given x = (x1,x2,x3) and the (8) vertex coordinates of a Hexahedron. Newton-Raphson method is used. # Arguments - `X1`: X1 coordinates of the (8) vertices of the hexahedron - `X2`: X2 coordinates of the (8) vertices of the hexahedron - `X3`: X3 coordinates of the (8) vertices of the hexahedron - `x`: (x1,x2,x3) coordinates of the point - `d`: (x1,x2,x3) coordinates, temporary storage - `tol`: Tolerance for convergence - `ξ`: (ξ1,ξ2,ξ3) coordinates of the point """ function invert_trilear_mapping_hex!( X1::AbstractArray{FT, 1}, X2::AbstractArray{FT, 1}, X3::AbstractArray{FT, 1}, x::AbstractArray{FT, 1}, d::AbstractArray{FT, 1}, tol::FT, ξ::AbstractArray{FT, 1}, ) where {FT <: AbstractFloat} max_it = 10 # maximum # of iterations ξ .= FT(0) #zeros(FT,3,1) # initial guess => cell centroid trilinear_map_minus_x!(ξ, X1, X2, X3, x, d) err = sqrt(d[1] * d[1] + d[2] * d[2] + d[3] * d[3]) ctr = 0 # Newton-Raphson iterations while err > tol trilinear_map_IJac_x_vec!(ξ, X1, X2, X3, d) ξ .-= d trilinear_map_minus_x!(ξ, X1, X2, X3, x, d) err = sqrt(d[1] * d[1] + d[2] * d[2] + d[3] * d[3]) #norm(d) ctr += 1 if ctr > max_it error( "invert_trilinear_mapping_hex: Newton-Raphson not converging to desired tolerance after max_it = ", max_it, " iterations; err = ", err, "; toler = ", tol, ) end end clamp!(ξ, FT(-1), FT(1)) return nothing end function trilinear_map_minus_x!( ξ::AbstractArray{FT, 1}, x1v::AbstractArray{FT, 1}, x2v::AbstractArray{FT, 1}, x3v::AbstractArray{FT, 1}, x::AbstractArray{FT, 1}, d::AbstractArray{FT, 1}, ) where {FT <: AbstractFloat} p1 = 1 + ξ[1] p2 = 1 + ξ[2] p3 = 1 + ξ[3] m1 = 1 - ξ[1] m2 = 1 - ξ[2] m3 = 1 - ξ[3] d[1] = ( m1 * ( m2 * (m3 * x1v[1] + p3 * x1v[5]) + p2 * (m3 * x1v[3] + p3 * x1v[7]) ) + p1 * ( m2 * (m3 * x1v[2] + p3 * x1v[6]) + p2 * (m3 * x1v[4] + p3 * x1v[8]) ) ) / 8.0 - x[1] d[2] = ( m1 * ( m2 * (m3 * x2v[1] + p3 * x2v[5]) + p2 * (m3 * x2v[3] + p3 * x2v[7]) ) + p1 * ( m2 * (m3 * x2v[2] + p3 * x2v[6]) + p2 * (m3 * x2v[4] + p3 * x2v[8]) ) ) / 8.0 - x[2] d[3] = ( m1 * ( m2 * (m3 * x3v[1] + p3 * x3v[5]) + p2 * (m3 * x3v[3] + p3 * x3v[7]) ) + p1 * ( m2 * (m3 * x3v[2] + p3 * x3v[6]) + p2 * (m3 * x3v[4] + p3 * x3v[8]) ) ) / 8.0 - x[3] return nothing end function trilinear_map_IJac_x_vec!( ξ::AbstractArray{FT, 1}, x1v::AbstractArray{FT, 1}, x2v::AbstractArray{FT, 1}, x3v::AbstractArray{FT, 1}, v::AbstractArray{FT, 1}, ) where {FT <: AbstractFloat} p1 = 1 + ξ[1] p2 = 1 + ξ[2] p3 = 1 + ξ[3] m1 = 1 - ξ[1] m2 = 1 - ξ[2] m3 = 1 - ξ[3] Jac11 = ( m2 * (m3 * (x1v[2] - x1v[1]) + p3 * (x1v[6] - x1v[5])) + p2 * (m3 * (x1v[4] - x1v[3]) + p3 * (x1v[8] - x1v[7])) ) / 8.0 Jac12 = ( m1 * (m3 * (x1v[3] - x1v[1]) + p3 * (x1v[7] - x1v[5])) + p1 * (m3 * (x1v[4] - x1v[2]) + p3 * (x1v[8] - x1v[6])) ) / 8.0 Jac13 = ( m1 * (m2 * (x1v[5] - x1v[1]) + p2 * (x1v[7] - x1v[3])) + p1 * (m2 * (x1v[6] - x1v[2]) + p2 * (x1v[8] - x1v[4])) ) / 8.0 Jac21 = ( m2 * (m3 * (x2v[2] - x2v[1]) + p3 * (x2v[6] - x2v[5])) + p2 * (m3 * (x2v[4] - x2v[3]) + p3 * (x2v[8] - x2v[7])) ) / 8.0 Jac22 = ( m1 * (m3 * (x2v[3] - x2v[1]) + p3 * (x2v[7] - x2v[5])) + p1 * (m3 * (x2v[4] - x2v[2]) + p3 * (x2v[8] - x2v[6])) ) / 8.0 Jac23 = ( m1 * (m2 * (x2v[5] - x2v[1]) + p2 * (x2v[7] - x2v[3])) + p1 * (m2 * (x2v[6] - x2v[2]) + p2 * (x2v[8] - x2v[4])) ) / 8.0 Jac31 = ( m2 * (m3 * (x3v[2] - x3v[1]) + p3 * (x3v[6] - x3v[5])) + p2 * (m3 * (x3v[4] - x3v[3]) + p3 * (x3v[8] - x3v[7])) ) / 8.0 Jac32 = ( m1 * (m3 * (x3v[3] - x3v[1]) + p3 * (x3v[7] - x3v[5])) + p1 * (m3 * (x3v[4] - x3v[2]) + p3 * (x3v[8] - x3v[6])) ) / 8.0 Jac33 = ( m1 * (m2 * (x3v[5] - x3v[1]) + p2 * (x3v[7] - x3v[3])) + p1 * (m2 * (x3v[6] - x3v[2]) + p2 * (x3v[8] - x3v[4])) ) / 8.0 # computing cofactor matrix C11 = Jac22 * Jac33 - Jac23 * Jac32 C12 = -Jac21 * Jac33 + Jac23 * Jac31 C13 = Jac21 * Jac32 - Jac22 * Jac31 C21 = -Jac12 * Jac33 + Jac13 * Jac32 C22 = Jac11 * Jac33 - Jac13 * Jac31 C23 = -Jac11 * Jac32 + Jac12 * Jac31 C31 = Jac12 * Jac23 - Jac13 * Jac22 C32 = -Jac11 * Jac23 + Jac13 * Jac21 C33 = Jac11 * Jac22 - Jac12 * Jac21 # computing determinant det = Jac11 * C11 + Jac12 * C12 + Jac13 * C13 Jac11 = (C11 * v[1] + C21 * v[2] + C31 * v[3]) / det Jac21 = (C12 * v[1] + C22 * v[2] + C32 * v[3]) / det Jac31 = (C13 * v[1] + C23 * v[2] + C33 * v[3]) / det v[1] = Jac11 v[2] = Jac21 v[3] = Jac31 return nothing end """ interpolate_local!(intrp_cs::InterpolationCubedSphere{FT}, sv::AbstractArray{FT}, v::AbstractArray{FT}) where {FT <: AbstractFloat} This interpolation function works for cubed spherical shell geometry. # Arguments - `intrp_cs`: Initialized cubed sphere structure - `sv`: Array consisting of various variables on the discontinuous Galerkin grid - `v`: Array consisting of variables on the interpolated grid """ function interpolate_local!( intrp_cs::InterpolationCubedSphere{FT}, sv::AbstractArray{FT}, v::AbstractArray{FT}, ) where {FT <: AbstractFloat} offset = intrp_cs.offset m_ξ1 = intrp_cs.m_ξ1 m_ξ2 = intrp_cs.m_ξ2 m_ξ3 = intrp_cs.m_ξ3 wb1 = intrp_cs.wb1 wb2 = intrp_cs.wb2 wb3 = intrp_cs.wb3 ξ1 = intrp_cs.ξ1 ξ2 = intrp_cs.ξ2 ξ3 = intrp_cs.ξ3 flg = intrp_cs.flg fac = intrp_cs.fac qm = (length(m_ξ1), length(m_ξ2), length(m_ξ3)) nvars = size(sv, 2) Nel = length(offset) - 1 np_tot = size(v, 1) device = array_device(sv) comp_stream = Event(device) workgroup = (qm[2], qm[3]) ndrange = (qm[2] * Nel, qm[3] * nvars) comp_stream = interpolate_local_kernel!(device, workgroup)( offset, m_ξ1, m_ξ2, m_ξ3, wb1, wb2, wb3, ξ1, ξ2, ξ3, flg, fac, sv, v, Val(qm), ndrange = ndrange, dependencies = (comp_stream,), ) wait(comp_stream) return nothing end """ project_cubed_sphere!(intrp_cs::InterpolationCubedSphere{FT}, v::AbstractArray{FT}, uvwi::Tuple{Int,Int,Int}) where {FT <: AbstractFloat} This function projects the velocity field along unit vectors in radial, lat and long directions for cubed spherical shell geometry. # Fields - `intrp_cs`: Initialized cubed sphere structure - `v`: Array consisting of x1, x2 and x3 components of the vector field - `uvwi`: Tuple providing the column numbers for x1, x2 and x3 components of vector field in the array. These columns will be replaced with projected vector fields along unit vectors in long, lat and rad directions. """ function project_cubed_sphere!( intrp_cs::InterpolationCubedSphere{FT}, v::AbstractArray{FT}, uvwi::Tuple{Int, Int, Int}, ) where {FT <: AbstractFloat} # projecting velocity onto unit vectors in long, lat and rad directions # assumes u, v and w are located in columns specified in vector uvwi @assert length(uvwi) == 3 "length(uvwi) is not 3" lati = intrp_cs.lati longi = intrp_cs.longi lat_grd = intrp_cs.lat_grd long_grd = intrp_cs.long_grd _ρu = uvwi[1] _ρv = uvwi[2] _ρw = uvwi[3] np_tot = size(v, 1) device = array_device(v) comp_stream = Event(device) thr_x = min(256, np_tot) workgroup = (thr_x,) ndrange = (np_tot,) comp_stream = project_cubed_sphere_kernel!(device, workgroup)( lat_grd, long_grd, lati, longi, v, _ρu, _ρv, _ρw, ndrange = ndrange, dependencies = (comp_stream,), ) wait(comp_stream) return nothing end @kernel function project_cubed_sphere_kernel!( lat_grd::AbstractArray{FT, 1}, long_grd::AbstractArray{FT, 1}, lati::AbstractVector{UInt16}, longi::AbstractVector{UInt16}, v::AbstractArray{FT}, _ρu::Int, _ρv::Int, _ρw::Int, ) where {FT <: AbstractFloat} idx = @index(Global, Linear) # global thread ids # projecting velocity onto unit vectors in long, lat and rad directions # assumed u, v and w are located in columns 2, 3 and 4 deg2rad = FT(π) / FT(180) vrad = v[idx, _ρu] * cos(lat_grd[lati[idx]] * deg2rad) * cos(long_grd[longi[idx]] * deg2rad) + v[idx, _ρv] * cos(lat_grd[lati[idx]] * deg2rad) * sin(long_grd[longi[idx]] * deg2rad) + v[idx, _ρw] * sin(lat_grd[lati[idx]] * deg2rad) vlat = -v[idx, _ρu] * sin(lat_grd[lati[idx]] * deg2rad) * cos(long_grd[longi[idx]] * deg2rad) - v[idx, _ρv] * sin(lat_grd[lati[idx]] * deg2rad) * sin(long_grd[longi[idx]] * deg2rad) + v[idx, _ρw] * cos(lat_grd[lati[idx]] * deg2rad) vlon = -v[idx, _ρu] * sin(long_grd[longi[idx]] * deg2rad) + v[idx, _ρv] * cos(long_grd[longi[idx]] * deg2rad) v[idx, _ρu] = vlon v[idx, _ρv] = vlat v[idx, _ρw] = vrad # TODO: cosd / sind having issues on GPU. Unable to isolate the issue at this point. Needs to be revisited. end function dimensions(interpol::InterpolationCubedSphere) if Array ∈ typeof(interpol.rad_grd).parameters h_long_grd = interpol.long_grd h_lat_grd = interpol.lat_grd h_rad_grd = interpol.rad_grd else h_long_grd = Array(interpol.long_grd) h_lat_grd = Array(interpol.lat_grd) h_rad_grd = Array(interpol.rad_grd) end FT = eltype(h_rad_grd) return OrderedDict( "long" => ( h_long_grd, OrderedDict("units" => "degrees_east", "long_name" => "longitude"), ), "lat" => ( h_lat_grd, OrderedDict("units" => "degrees_north", "long_name" => "latitude"), ), "level" => (h_rad_grd, OrderedDict("units" => "m", "long_name" => "level")), ) end """ accumulate_interpolated_data!(intrp::InterpolationTopology, iv::AbstractArray{FT,2}, fiv::AbstractArray{FT,4}) where {FT <: AbstractFloat} This interpolation function gathers interpolated data onto process # 0. # Fields - `intrp`: Initialized interpolation topology structure - `iv`: Interpolated variables on local process - `fiv`: Full interpolated variables accumulated on process # 0 """ function accumulate_interpolated_data!( intrp::InterpolationTopology, iv::AbstractArray{FT, 2}, fiv::AbstractArray{FT, 4}, ) where {FT <: AbstractFloat} device = array_device(iv) mpicomm = MPI.COMM_WORLD pid = MPI.Comm_rank(mpicomm) npr = MPI.Comm_size(mpicomm) root = 0 nvars = size(iv, 2) if intrp isa InterpolationCubedSphere nx1 = length(intrp.long_grd) nx2 = length(intrp.lat_grd) nx3 = length(intrp.rad_grd) np_tot = length(intrp.radi_all) i1 = intrp.longi_all i2 = intrp.lati_all i3 = intrp.radi_all elseif intrp isa InterpolationBrick nx1 = length(intrp.x1g) nx2 = length(intrp.x2g) nx3 = length(intrp.x3g) np_tot = length(intrp.x1i_all) i1 = intrp.x1i_all i2 = intrp.x2i_all i3 = intrp.x3i_all else error("Unsupported topology; only InterpolationCubedSphere and InterpolationBrick supported") end if pid == 0 && size(fiv) ≠ (nx1, nx2, nx3, nvars) error("size of fiv = $(size(fiv)); which does not match with ($nx1,$nx2,$nx3,$nvars) ") end if npr > 1 Np_all = intrp.Np_all pid == 0 ? v_all = Array{FT}(undef, np_tot, nvars) : v_all = Array{FT}(undef, 0, nvars) if device isa CPU for vari in 1:nvars MPI.Gatherv!( view(iv, :, vari), pid == root ? VBuffer(view(v_all, :, vari), Np_all) : nothing, root, mpicomm, ) end elseif device isa CUDADevice v = Array(iv) for vari in 1:nvars MPI.Gatherv!( view(v, :, vari), pid == root ? VBuffer(view(v_all, :, vari), Np_all) : nothing, root, mpicomm, ) end v_all = CuArray(v_all) else error("accumulate_interpolate_data: unsupported device, only CPU() and CUDADevice() supported") end else v_all = iv end if pid == 0 comp_stream = Event(device) thr_x = min(256, np_tot) workgroup = (thr_x,) ndrange = (np_tot,) comp_stream = accumulate_helper_kernel!(device, workgroup)( i1, i2, i3, v_all, fiv, ndrange = ndrange, dependencies = (comp_stream,), ) wait(comp_stream) end MPI.Barrier(mpicomm) return nothing end @kernel function accumulate_helper_kernel!( i1::AbstractArray{UInt16, 1}, i2::AbstractArray{UInt16, 1}, i3::AbstractArray{UInt16, 1}, v_all::AbstractArray{FT, 2}, fiv::AbstractArray{FT, 4}, ) where {FT <: AbstractFloat} idx = @index(Global, Linear) nvars = size(v_all, 2) for vari in 1:nvars @inbounds fiv[i1[idx], i2[idx], i3[idx], vari] = v_all[idx, vari] end end function accumulate_interpolated_data( mpicomm::MPI.Comm, intrp::InterpolationTopology, iv::AbstractArray{FT, 2}, ) where {FT <: AbstractFloat} mpirank = MPI.Comm_rank(mpicomm) numranks = MPI.Comm_size(mpicomm) nvars = size(iv, 2) if intrp isa InterpolationCubedSphere nx1 = length(intrp.long_grd) nx2 = length(intrp.lat_grd) nx3 = length(intrp.rad_grd) np_tot = length(intrp.radi_all) i1 = intrp.longi_all i2 = intrp.lati_all i3 = intrp.radi_all elseif intrp isa InterpolationBrick nx1 = length(intrp.x1g) nx2 = length(intrp.x2g) nx3 = length(intrp.x3g) np_tot = length(intrp.x1i_all) i1 = intrp.x1i_all i2 = intrp.x2i_all i3 = intrp.x3i_all else error("Unsupported topology; only InterpolationCubedSphere and InterpolationBrick supported") end if array_device(iv) isa CPU h_iv = iv h_i1 = i1 h_i2 = i2 h_i3 = i3 else h_iv = Array(iv) h_i1 = Array(i1) h_i2 = Array(i2) h_i3 = Array(i3) end if numranks == 1 v_all = h_iv else v_all = Array{FT}(undef, mpirank == 0 ? np_tot : 0, nvars) for vari in 1:nvars MPI.Gatherv!( view(h_iv, :, vari), mpirank == 0 ? VBuffer(view(v_all, :, vari), intrp.Np_all) : nothing, 0, mpicomm, ) end end if mpirank == 0 fiv = Array{FT}(undef, nx1, nx2, nx3, nvars) for i in 1:np_tot for vari in 1:nvars @inbounds fiv[h_i1[i], h_i2[i], h_i3[i], vari] = v_all[i, vari] end end else fiv = nothing end return fiv end end # module Interpolation ================================================ FILE: src/Numerics/Mesh/Mesh.jl ================================================ module Mesh include("BrickMesh.jl") include("Topologies.jl") include("GeometricFactors.jl") include("Metrics.jl") include("Elements.jl") include("Grids.jl") include("DSS.jl") include("Filters.jl") include("Geometry.jl") include("Interpolation.jl") end # module ================================================ FILE: src/Numerics/Mesh/Metrics.jl ================================================ module Metrics using ..GeometricFactors export creategrid, compute_reference_to_physical_coord_jacobian, computemetric """ creategrid!(vgeo, elemtocoord, ξ) Create a 1-D grid using `elemtocoord` (see `brickmesh`) using the 1-D `(-1, 1)` reference coordinates `ξ` (in 1D, `ξ = ξ1`). The element grids are filled using linear interpolation of the element coordinates. If `Nq = length(ξ)` and `nelem = size(elemtocoord, 3)` then the preallocated array `vgeo.x1` should be `Nq * nelem == length(x1)`. """ function creategrid!( vgeo::VolumeGeometry{Nq, <:AbstractArray, <:AbstractArray}, e2c, ξ::NTuple{1, Vector{FT}}, ) where {Nq, FT} (d, nvert, nelem) = size(e2c) @assert d == 1 (ξ1,) = ξ x1 = reshape(vgeo.x1, (Nq..., nelem)) # Linear blend @inbounds for e in 1:nelem for i in 1:Nq[1] vgeo.x1[i, e] = ((1 - ξ1[i]) * e2c[1, 1, e] + (1 + ξ1[i]) * e2c[1, 2, e]) / 2 end end nothing end """ creategrid!(vgeo, elemtocoord, ξ) Create a 2-D tensor product grid using `elemtocoord` (see `brickmesh`) using the tuple `ξ = (ξ1, ξ2)`, composed by the 1D reference coordinates `ξ1` and `ξ2` in `(-1, 1)^2`. The element grids are filled using bilinear interpolation of the element coordinates. If `Nq = (length(ξ1), length(ξ2))` and `nelem = size(elemtocoord, 3)` then the preallocated arrays `vgeo.x1` and `vgeo.x2` should be `prod(Nq) * nelem == size(vgeo.x1) == size(vgeo.x2)`. """ function creategrid!( vgeo::VolumeGeometry{Nq, <:AbstractArray, <:AbstractArray}, e2c, ξ::NTuple{2, Vector{FT}}, ) where {Nq, FT} (d, nvert, nelem) = size(e2c) @assert d == 2 (ξ1, ξ2) = ξ x1 = reshape(vgeo.x1, (Nq..., nelem)) x2 = reshape(vgeo.x2, (Nq..., nelem)) # Bilinear blend of corners @inbounds for (f, n) in zip((x1, x2), 1:d) for e in 1:nelem, j in 1:Nq[2], i in 1:Nq[1] f[i, j, e] = ( (1 - ξ1[i]) * (1 - ξ2[j]) * e2c[n, 1, e] + (1 + ξ1[i]) * (1 - ξ2[j]) * e2c[n, 2, e] + (1 - ξ1[i]) * (1 + ξ2[j]) * e2c[n, 3, e] + (1 + ξ1[i]) * (1 + ξ2[j]) * e2c[n, 4, e] ) / 4 end end nothing end """ creategrid!(vgeo, elemtocoord, ξ) Create a 3-D tensor product grid using `elemtocoord` (see `brickmesh`) using the tuple `ξ = (ξ1, ξ2, ξ3)`, composed by the 1D reference coordinates `ξ1`, `ξ2`, `ξ3` in `(-1, 1)^3`. The element grids are filled using trilinear interpolation of the element coordinates. If `Nq = (length(ξ1), length(ξ2), length(ξ3))` and `nelem = size(elemtocoord, 3)` then the preallocated arrays `vgeo.x1`, `vgeo.x2`, and `vgeo.x3` should be `prod(Nq) * nelem == size(vgeo.x1) == size(vgeo.x2) == size(vgeo.x3)`. """ function creategrid!( vgeo::VolumeGeometry{Nq, <:AbstractArray, <:AbstractArray}, e2c, ξ::NTuple{3, Vector{FT}}, ) where {Nq, FT} (d, nvert, nelem) = size(e2c) @assert d == 3 (ξ1, ξ2, ξ3) = ξ x1 = reshape(vgeo.x1, (Nq..., nelem)) x2 = reshape(vgeo.x2, (Nq..., nelem)) x3 = reshape(vgeo.x3, (Nq..., nelem)) # Trilinear blend of corners @inbounds for (f, n) in zip((x1, x2, x3), 1:d) for e in 1:nelem, k in 1:Nq[3], j in 1:Nq[2], i in 1:Nq[1] f[i, j, k, e] = ( (1 - ξ1[i]) * (1 - ξ2[j]) * (1 - ξ3[k]) * e2c[n, 1, e] + (1 + ξ1[i]) * (1 - ξ2[j]) * (1 - ξ3[k]) * e2c[n, 2, e] + (1 - ξ1[i]) * (1 + ξ2[j]) * (1 - ξ3[k]) * e2c[n, 3, e] + (1 + ξ1[i]) * (1 + ξ2[j]) * (1 - ξ3[k]) * e2c[n, 4, e] + (1 - ξ1[i]) * (1 - ξ2[j]) * (1 + ξ3[k]) * e2c[n, 5, e] + (1 + ξ1[i]) * (1 - ξ2[j]) * (1 + ξ3[k]) * e2c[n, 6, e] + (1 - ξ1[i]) * (1 + ξ2[j]) * (1 + ξ3[k]) * e2c[n, 7, e] + (1 + ξ1[i]) * (1 + ξ2[j]) * (1 + ξ3[k]) * e2c[n, 8, e] ) / 8 end end nothing end """ compute_reference_to_physical_coord_jacobian!(vgeo, nelem, D) Input arguments: - vgeo::VolumeGeometry, a struct containing the volumetric geometric factors - D::NTuple{2,Int}, a tuple of derivative matrices, i.e., D = (D1,), where: - D1::DAT2, 1-D derivative operator on the device in the first dimension Compute the Jacobian matrix, ∂x / ∂ξ, of the transformation from reference coordinates, `ξ1`, to physical coordinates, `vgeo.x1`, for each quadrature point in element e. """ function compute_reference_to_physical_coord_jacobian!( vgeo::VolumeGeometry{Nq, <:AbstractArray, <:AbstractArray}, nelem, D::NTuple{1, Matrix{FT}}, ) where {Nq, FT} @assert Nq == map(d -> size(d, 1), D) T = eltype(vgeo.x1) (D1,) = D vgeo.x1ξ1 .= zero(T) for e in 1:nelem for i in 1:Nq[1] for n in 1:Nq[1] vgeo.x1ξ1[i, e] += D1[i, n] * vgeo.x1[n, e] end end end return vgeo end """ compute_reference_to_physical_coord_jacobian!(vgeo, nelem, D) Input arguments: - vgeo::VolumeGeometry, a struct containing the volumetric geometric factors - D::NTuple{2,Int}, a tuple of derivative matrices, i.e., D = (D1, D2), where: - D1::DAT2, 1-D derivative operator on the device in the first dimension - D2::DAT2, 1-D derivative operator on the device in the second dimension Compute the Jacobian matrix, ∂x / ∂ξ, of the transformation from reference coordinates, `ξ1`, `ξ2`, to physical coordinates, `vgeo.x1`, `vgeo.x2`, for each quadrature point in element e. """ function compute_reference_to_physical_coord_jacobian!( vgeo::VolumeGeometry{Nq, <:AbstractArray, <:AbstractArray}, nelem, D::NTuple{2, Matrix{FT}}, ) where {Nq, FT} @assert Nq == map(d -> size(d, 1), D) T = eltype(vgeo.x1) (D1, D2) = D x1 = reshape(vgeo.x1, (Nq..., nelem)) x2 = reshape(vgeo.x2, (Nq..., nelem)) x1ξ1 = reshape(vgeo.x1ξ1, (Nq..., nelem)) x2ξ1 = reshape(vgeo.x2ξ1, (Nq..., nelem)) x1ξ2 = reshape(vgeo.x1ξ2, (Nq..., nelem)) x2ξ2 = reshape(vgeo.x2ξ2, (Nq..., nelem)) x1ξ1 .= x1ξ2 .= zero(T) x2ξ1 .= x2ξ2 .= zero(T) for e in 1:nelem for j in 1:Nq[2], i in 1:Nq[1] for n in 1:Nq[1] x1ξ1[i, j, e] += D1[i, n] * x1[n, j, e] x2ξ1[i, j, e] += D1[i, n] * x2[n, j, e] end for n in 1:Nq[2] x1ξ2[i, j, e] += D2[j, n] * x1[i, n, e] x2ξ2[i, j, e] += D2[j, n] * x2[i, n, e] end end end return vgeo end """ compute_reference_to_physical_coord_jacobian!(vgeo, nelem, D) Input arguments: - vgeo::VolumeGeometry, a struct containing the volumetric geometric factors - D::NTuple{3,Int}, a tuple of derivative matrices, i.e., D = (D1, D2, D3), where: - D1::DAT2, 1-D derivative operator on the device in the first dimension - D2::DAT2, 1-D derivative operator on the device in the second dimension - D3::DAT2, 1-D derivative operator on the device in the third dimension Compute the Jacobian matrix, ∂x / ∂ξ, of the transformation from reference coordinates, `ξ1`, `ξ2`, `ξ3` to physical coordinates, `vgeo.x1`, `vgeo.x2`, `vgeo.x3` for each quadrature point in element e. """ function compute_reference_to_physical_coord_jacobian!( vgeo::VolumeGeometry{Nq, <:AbstractArray, <:AbstractArray}, nelem, D::NTuple{3, Matrix{FT}}, ) where {Nq, FT} @assert Nq == map(d -> size(d, 1), D) T = eltype(vgeo.x1) (D1, D2, D3) = D x1 = reshape(vgeo.x1, (Nq..., nelem)) x2 = reshape(vgeo.x2, (Nq..., nelem)) x3 = reshape(vgeo.x3, (Nq..., nelem)) x1ξ1 = reshape(vgeo.x1ξ1, (Nq..., nelem)) x2ξ1 = reshape(vgeo.x2ξ1, (Nq..., nelem)) x3ξ1 = reshape(vgeo.x3ξ1, (Nq..., nelem)) x1ξ2 = reshape(vgeo.x1ξ2, (Nq..., nelem)) x2ξ2 = reshape(vgeo.x2ξ2, (Nq..., nelem)) x3ξ2 = reshape(vgeo.x3ξ2, (Nq..., nelem)) x1ξ3 = reshape(vgeo.x1ξ3, (Nq..., nelem)) x2ξ3 = reshape(vgeo.x2ξ3, (Nq..., nelem)) x3ξ3 = reshape(vgeo.x3ξ3, (Nq..., nelem)) x1ξ1 .= x1ξ2 .= x1ξ3 .= zero(T) x2ξ1 .= x2ξ2 .= x2ξ3 .= zero(T) x3ξ1 .= x3ξ2 .= x3ξ3 .= zero(T) @inbounds for e in 1:nelem for k in 1:Nq[3], j in 1:Nq[2], i in 1:Nq[1] for n in 1:Nq[1] x1ξ1[i, j, k, e] += D1[i, n] * x1[n, j, k, e] x2ξ1[i, j, k, e] += D1[i, n] * x2[n, j, k, e] x3ξ1[i, j, k, e] += D1[i, n] * x3[n, j, k, e] end for n in 1:Nq[2] x1ξ2[i, j, k, e] += D2[j, n] * x1[i, n, k, e] x2ξ2[i, j, k, e] += D2[j, n] * x2[i, n, k, e] x3ξ2[i, j, k, e] += D2[j, n] * x3[i, n, k, e] end for n in 1:Nq[3] x1ξ3[i, j, k, e] += D3[k, n] * x1[i, j, n, e] x2ξ3[i, j, k, e] += D3[k, n] * x2[i, j, n, e] x3ξ3[i, j, k, e] += D3[k, n] * x3[i, j, n, e] end end end return vgeo end """ computemetric!(vgeo, sgeo, D) Input arguments: - vgeo::VolumeGeometry, a struct containing the volumetric geometric factors - sgeo::SurfaceGeometry, a struct containing the surface geometric factors - D::NTuple{1,Int}, tuple with 1-D derivative operator on the device Compute the 1-D metric terms from the element grid arrays `vgeo.x1`. All the arrays are preallocated by the user and the (square) derivative matrix `D` should be consistent with the reference grid `ξ1` used in [`creategrid!`](@ref). If `Nq = size(D, 1)` and `nelem = div(length(x1), Nq)` then the volume arrays `x1`, `J`, and `ξ1x1` should all have length `Nq * nelem`. Similarly, the face arrays `sJ` and `n1` should be of length `nface * nelem` with `nface = 2`. """ function computemetric!( vgeo::VolumeGeometry{Nq, <:AbstractArray, <:AbstractArray}, sgeo::SurfaceGeometry{Nfp, <:AbstractArray}, D::NTuple{1, Matrix{FT}}, ) where {Nq, Nfp, FT} @assert Nq == map(d -> size(d, 1), D) nelem = div(length(vgeo.ωJ), Nq[1]) ωJ = reshape(vgeo.ωJ, (Nq[1], nelem)) nface = 2 n1 = reshape(sgeo.n1, (1, nface, nelem)) sωJ = reshape(sgeo.sωJ, (1, nface, nelem)) # Compute vertical Jacobian determinant, JcV, and Jacobian determinant, det(∂x/∂ξ), per quadrature point vgeo.JcV .= vgeo.x1ξ1 vgeo.ωJ .= vgeo.x1ξ1 vgeo.ξ1x1 .= 1 ./ vgeo.ωJ sgeo.n1[1, 1, :] .= -sign.(ωJ[1, :]) sgeo.n1[1, 2, :] .= sign.(ωJ[Nq[1], :]) sgeo.sωJ .= 1 nothing end """ computemetric!(vgeo, sgeo, D) Input arguments: - vgeo::VolumeGeometry, a struct containing the volumetric geometric factors - sgeo::SurfaceGeometry, a struct containing the surface geometric factors - D::NTuple{2,Int}, a tuple of derivative matrices, i.e., D = (D1, D2), where: - D1::DAT2, 1-D derivative operator on the device in the first dimension - D2::DAT2, 1-D derivative operator on the device in the second dimension Compute the 2-D metric terms from the element grid arrays `vgeo.x1` and `vgeo.x2`. All the arrays are preallocated by the user and the (square) derivative matrice `D1` and `D2` should be consistent with the reference grid `ξ1` and `ξ2` used in [`creategrid!`](@ref). If `Nq = (size(D1, 1), size(D2, 1))` and `nelem = div(length(vgeo.x1), prod(Nq))` then the volume arrays `vgeo.x1`, `vgeo.x2`, `vgeo.ωJ`, `vgeo.ξ1x1`, `vgeo.ξ2x1`, `vgeo.ξ1x2`, and `vgeo.ξ2x2` should all be of size `(Nq..., nelem)`. Similarly, the face arrays `sgeo.sωJ`, `sgeo.n1`, and `sgeo.n2` should be of size `(maximum(Nq), nface, nelem)` with `nface = 4` """ function computemetric!( vgeo::VolumeGeometry{Nq, <:AbstractArray, <:AbstractArray}, sgeo::SurfaceGeometry{Nfp, <:AbstractArray}, D::NTuple{2, Matrix{FT}}, ) where {Nq, Nfp, FT} @assert Nq == map(d -> size(d, 1), D) @assert Nfp == div.(prod(Nq), Nq) nelem = div(length(vgeo.ωJ), prod(Nq)) x1 = reshape(vgeo.x1, (Nq..., nelem)) x2 = reshape(vgeo.x2, (Nq..., nelem)) ωJ = reshape(vgeo.ωJ, (Nq..., nelem)) JcV = reshape(vgeo.JcV, (Nq..., nelem)) ξ1x1 = reshape(vgeo.ξ1x1, (Nq..., nelem)) ξ2x1 = reshape(vgeo.ξ2x1, (Nq..., nelem)) ξ1x2 = reshape(vgeo.ξ1x2, (Nq..., nelem)) ξ2x2 = reshape(vgeo.ξ2x2, (Nq..., nelem)) x1ξ1 = reshape(vgeo.x1ξ1, (Nq..., nelem)) x1ξ2 = reshape(vgeo.x1ξ2, (Nq..., nelem)) x2ξ1 = reshape(vgeo.x2ξ1, (Nq..., nelem)) x2ξ2 = reshape(vgeo.x2ξ2, (Nq..., nelem)) nface = 4 n1 = reshape(sgeo.n1, (maximum(Nfp), nface, nelem)) n2 = reshape(sgeo.n2, (maximum(Nfp), nface, nelem)) sωJ = reshape(sgeo.sωJ, (maximum(Nfp), nface, nelem)) for e in 1:nelem for j in 1:Nq[2], i in 1:Nq[1] # Compute vertical Jacobian determinant, JcV, per quadrature point JcV[i, j, e] = hypot(x1ξ2[i, j, e], x2ξ2[i, j, e]) # Compute Jacobian determinant, det(∂x/∂ξ), per quadrature point ωJ[i, j, e] = x1ξ1[i, j, e] * x2ξ2[i, j, e] - x2ξ1[i, j, e] * x1ξ2[i, j, e] ξ1x1[i, j, e] = x2ξ2[i, j, e] / ωJ[i, j, e] ξ2x1[i, j, e] = -x2ξ1[i, j, e] / ωJ[i, j, e] ξ1x2[i, j, e] = -x1ξ2[i, j, e] / ωJ[i, j, e] ξ2x2[i, j, e] = x1ξ1[i, j, e] / ωJ[i, j, e] end # Compute surface struct field entries for i in 1:maximum(Nfp) if i <= Nfp[1] sgeo.n1[i, 1, e] = -ωJ[1, i, e] * ξ1x1[1, i, e] sgeo.n2[i, 1, e] = -ωJ[1, i, e] * ξ1x2[1, i, e] sgeo.n1[i, 2, e] = ωJ[Nq[1], i, e] * ξ1x1[Nq[1], i, e] sgeo.n2[i, 2, e] = ωJ[Nq[1], i, e] * ξ1x2[Nq[1], i, e] else sgeo.n1[i, 1:2, e] .= NaN sgeo.n2[i, 1:2, e] .= NaN end if i <= Nfp[2] sgeo.n1[i, 3, e] = -ωJ[i, 1, e] * ξ2x1[i, 1, e] sgeo.n2[i, 3, e] = -ωJ[i, 1, e] * ξ2x2[i, 1, e] sgeo.n1[i, 4, e] = ωJ[i, Nq[2], e] * ξ2x1[i, Nq[2], e] sgeo.n2[i, 4, e] = ωJ[i, Nq[2], e] * ξ2x2[i, Nq[2], e] else sgeo.n1[i, 3:4, e] .= NaN sgeo.n2[i, 3:4, e] .= NaN end for n in 1:nface sgeo.sωJ[i, n, e] = hypot(n1[i, n, e], n2[i, n, e]) sgeo.n1[i, n, e] /= sωJ[i, n, e] sgeo.n2[i, n, e] /= sωJ[i, n, e] end end end nothing end """ computemetric!(vgeo, sgeo, D) Input arguments: - vgeo::VolumeGeometry, a struct containing the volumetric geometric factors - sgeo::SurfaceGeometry, a struct containing the surface geometric factors - D::NTuple{3,Int}, a tuple of derivative matrices, i.e., D = (D1, D2, D3), where: - D1::DAT2, 1-D derivative operator on the device in the first dimension - D2::DAT2, 1-D derivative operator on the device in the second dimension - D3::DAT2, 1-D derivative operator on the device in the third dimension Compute the 3-D metric terms from the element grid arrays `vgeo.x1`, `vgeo.x2`, and `vgeo.x3`. All the arrays are preallocated by the user and the (square) derivative matrice `D1`, `D2`, and `D3` should be consistent with the reference grid `ξ1`, `ξ2`, and `ξ3` used in [`creategrid!`](@ref). If `Nq = size(D1, 1)` and `nelem = div(length(vgeo.x1), Nq^3)` then the volume arrays `vgeo.x1`, `vgeo.x2`, `vgeo.x3`, `vgeo.ωJ`, `vgeo.ξ1x1`, `vgeo.ξ2x1`, `vgeo.ξ3x1`, `vgeo.ξ1x2`, `vgeo.ξ2x2`, `vgeo.ξ3x2`, `vgeo.ξ1x3`,`vgeo.ξ2x3`, and `vgeo.ξ3x3` should all be of length `Nq^3 * nelem`. Similarly, the face arrays `sgeo.sωJ`, `sgeo.n1`, `sgeo.n2`, and `sgeo.n3` should be of size `Nq^2 * nface * nelem` with `nface = 6`. The curl invariant formulation of Kopriva (2006), equation 37, is used. Reference: - [Kopriva2006](@cite) """ function computemetric!( vgeo::VolumeGeometry{Nq, <:AbstractArray, <:AbstractArray}, sgeo::SurfaceGeometry{Nfp, <:AbstractArray}, D::NTuple{3, Matrix{FT}}, ) where {Nq, Nfp, FT} @assert Nq == map(d -> size(d, 1), D) @assert Nfp == div.(prod(Nq), Nq) T = eltype(vgeo.x1) nelem = div(length(vgeo.ωJ), prod(Nq)) x1 = reshape(vgeo.x1, (Nq..., nelem)) x2 = reshape(vgeo.x2, (Nq..., nelem)) x3 = reshape(vgeo.x3, (Nq..., nelem)) ωJ = reshape(vgeo.ωJ, (Nq..., nelem)) JcV = reshape(vgeo.JcV, (Nq..., nelem)) ξ1x1 = reshape(vgeo.ξ1x1, (Nq..., nelem)) ξ2x1 = reshape(vgeo.ξ2x1, (Nq..., nelem)) ξ3x1 = reshape(vgeo.ξ3x1, (Nq..., nelem)) ξ1x2 = reshape(vgeo.ξ1x2, (Nq..., nelem)) ξ2x2 = reshape(vgeo.ξ2x2, (Nq..., nelem)) ξ3x2 = reshape(vgeo.ξ3x2, (Nq..., nelem)) ξ1x3 = reshape(vgeo.ξ1x3, (Nq..., nelem)) ξ2x3 = reshape(vgeo.ξ2x3, (Nq..., nelem)) ξ3x3 = reshape(vgeo.ξ3x3, (Nq..., nelem)) x1ξ1 = reshape(vgeo.x1ξ1, (Nq..., nelem)) x1ξ2 = reshape(vgeo.x1ξ2, (Nq..., nelem)) x1ξ3 = reshape(vgeo.x1ξ3, (Nq..., nelem)) x2ξ1 = reshape(vgeo.x2ξ1, (Nq..., nelem)) x2ξ2 = reshape(vgeo.x2ξ2, (Nq..., nelem)) x2ξ3 = reshape(vgeo.x2ξ3, (Nq..., nelem)) x3ξ1 = reshape(vgeo.x3ξ1, (Nq..., nelem)) x3ξ2 = reshape(vgeo.x3ξ2, (Nq..., nelem)) x3ξ3 = reshape(vgeo.x3ξ3, (Nq..., nelem)) nface = 6 n1 = reshape(sgeo.n1, maximum(Nfp), nface, nelem) n2 = reshape(sgeo.n2, maximum(Nfp), nface, nelem) n3 = reshape(sgeo.n3, maximum(Nfp), nface, nelem) sωJ = reshape(sgeo.sωJ, maximum(Nfp), nface, nelem) JI2 = similar(vgeo.ωJ, Nq...) (yzr, yzs, yzt) = (similar(JI2), similar(JI2), similar(JI2)) (zxr, zxs, zxt) = (similar(JI2), similar(JI2), similar(JI2)) (xyr, xys, xyt) = (similar(JI2), similar(JI2), similar(JI2)) # Temporary variables to compute inverse of a 3x3 matrix (a11, a12, a13) = (similar(JI2), similar(JI2), similar(JI2)) (a21, a22, a23) = (similar(JI2), similar(JI2), similar(JI2)) (a31, a32, a33) = (similar(JI2), similar(JI2), similar(JI2)) ξ1x1 .= ξ2x1 .= ξ3x1 .= zero(T) ξ1x2 .= ξ2x2 .= ξ3x2 .= zero(T) ξ1x3 .= ξ2x3 .= ξ3x3 .= zero(T) fill!(n1, NaN) fill!(n2, NaN) fill!(n3, NaN) fill!(sωJ, NaN) @inbounds for e in 1:nelem for k in 1:Nq[3], j in 1:Nq[2], i in 1:Nq[1] # Compute vertical Jacobian determinant, JcV, per quadrature point JcV[i, j, k, e] = hypot(x1ξ3[i, j, k, e], x2ξ3[i, j, k, e], x3ξ3[i, j, k, e]) # Compute Jacobian determinant, det(∂x/∂ξ), per quadrature point ωJ[i, j, k, e] = ( x1ξ1[i, j, k, e] * ( x2ξ2[i, j, k, e] * x3ξ3[i, j, k, e] - x3ξ2[i, j, k, e] * x2ξ3[i, j, k, e] ) + x2ξ1[i, j, k, e] * ( x3ξ2[i, j, k, e] * x1ξ3[i, j, k, e] - x1ξ2[i, j, k, e] * x3ξ3[i, j, k, e] ) + x3ξ1[i, j, k, e] * ( x1ξ2[i, j, k, e] * x2ξ3[i, j, k, e] - x2ξ2[i, j, k, e] * x1ξ3[i, j, k, e] ) ) JI2[i, j, k] = 1 / (2 * ωJ[i, j, k, e]) yzr[i, j, k] = x2[i, j, k, e] * x3ξ1[i, j, k, e] - x3[i, j, k, e] * x2ξ1[i, j, k, e] yzs[i, j, k] = x2[i, j, k, e] * x3ξ2[i, j, k, e] - x3[i, j, k, e] * x2ξ2[i, j, k, e] yzt[i, j, k] = x2[i, j, k, e] * x3ξ3[i, j, k, e] - x3[i, j, k, e] * x2ξ3[i, j, k, e] zxr[i, j, k] = x3[i, j, k, e] * x1ξ1[i, j, k, e] - x1[i, j, k, e] * x3ξ1[i, j, k, e] zxs[i, j, k] = x3[i, j, k, e] * x1ξ2[i, j, k, e] - x1[i, j, k, e] * x3ξ2[i, j, k, e] zxt[i, j, k] = x3[i, j, k, e] * x1ξ3[i, j, k, e] - x1[i, j, k, e] * x3ξ3[i, j, k, e] xyr[i, j, k] = x1[i, j, k, e] * x2ξ1[i, j, k, e] - x2[i, j, k, e] * x1ξ1[i, j, k, e] xys[i, j, k] = x1[i, j, k, e] * x2ξ2[i, j, k, e] - x2[i, j, k, e] * x1ξ2[i, j, k, e] xyt[i, j, k] = x1[i, j, k, e] * x2ξ3[i, j, k, e] - x2[i, j, k, e] * x1ξ3[i, j, k, e] end for k in 1:Nq[3], j in 1:Nq[2], i in 1:Nq[1] for n in 1:Nq[1] ξ2x1[i, j, k, e] -= D[1][i, n] * yzt[n, j, k] ξ3x1[i, j, k, e] += D[1][i, n] * yzs[n, j, k] ξ2x2[i, j, k, e] -= D[1][i, n] * zxt[n, j, k] ξ3x2[i, j, k, e] += D[1][i, n] * zxs[n, j, k] ξ2x3[i, j, k, e] -= D[1][i, n] * xyt[n, j, k] ξ3x3[i, j, k, e] += D[1][i, n] * xys[n, j, k] end for n in 1:Nq[2] ξ1x1[i, j, k, e] += D[2][j, n] * yzt[i, n, k] ξ3x1[i, j, k, e] -= D[2][j, n] * yzr[i, n, k] ξ1x2[i, j, k, e] += D[2][j, n] * zxt[i, n, k] ξ3x2[i, j, k, e] -= D[2][j, n] * zxr[i, n, k] ξ1x3[i, j, k, e] += D[2][j, n] * xyt[i, n, k] ξ3x3[i, j, k, e] -= D[2][j, n] * xyr[i, n, k] end for n in 1:Nq[3] ξ1x1[i, j, k, e] -= D[3][k, n] * yzs[i, j, n] ξ2x1[i, j, k, e] += D[3][k, n] * yzr[i, j, n] ξ1x2[i, j, k, e] -= D[3][k, n] * zxs[i, j, n] ξ2x2[i, j, k, e] += D[3][k, n] * zxr[i, j, n] ξ1x3[i, j, k, e] -= D[3][k, n] * xys[i, j, n] ξ2x3[i, j, k, e] += D[3][k, n] * xyr[i, j, n] end ξ1x1[i, j, k, e] *= JI2[i, j, k] ξ2x1[i, j, k, e] *= JI2[i, j, k] ξ3x1[i, j, k, e] *= JI2[i, j, k] ξ1x2[i, j, k, e] *= JI2[i, j, k] ξ2x2[i, j, k, e] *= JI2[i, j, k] ξ3x2[i, j, k, e] *= JI2[i, j, k] ξ1x3[i, j, k, e] *= JI2[i, j, k] ξ2x3[i, j, k, e] *= JI2[i, j, k] ξ3x3[i, j, k, e] *= JI2[i, j, k] # Invert ∂ξk/∂xi, since the discrete curl-invariant form that we have # just computed, ∂ξk/∂xi, is not equal to its inverse a11[i, j, k] = ξ2x2[i, j, k, e] * ξ3x3[i, j, k, e] - ξ2x3[i, j, k, e] * ξ3x2[i, j, k, e] a12[i, j, k] = ξ1x3[i, j, k, e] * ξ3x2[i, j, k, e] - ξ1x2[i, j, k, e] * ξ3x3[i, j, k, e] a13[i, j, k] = ξ1x2[i, j, k, e] * ξ2x3[i, j, k, e] - ξ1x3[i, j, k, e] * ξ2x2[i, j, k, e] a21[i, j, k] = ξ2x3[i, j, k, e] * ξ3x1[i, j, k, e] - ξ2x1[i, j, k, e] * ξ3x3[i, j, k, e] a22[i, j, k] = ξ1x1[i, j, k, e] * ξ3x3[i, j, k, e] - ξ1x3[i, j, k, e] * ξ3x1[i, j, k, e] a23[i, j, k] = ξ1x3[i, j, k, e] * ξ2x1[i, j, k, e] - ξ1x1[i, j, k, e] * ξ2x3[i, j, k, e] a31[i, j, k] = ξ2x1[i, j, k, e] * ξ3x2[i, j, k, e] - ξ2x2[i, j, k, e] * ξ3x1[i, j, k, e] a32[i, j, k] = ξ1x2[i, j, k, e] * ξ3x1[i, j, k, e] - ξ1x1[i, j, k, e] * ξ3x2[i, j, k, e] a33[i, j, k] = ξ1x1[i, j, k, e] * ξ2x2[i, j, k, e] - ξ1x2[i, j, k, e] * ξ2x1[i, j, k, e] det = ξ1x1[i, j, k, e] * a11[i, j, k] + ξ2x1[i, j, k, e] * a12[i, j, k] + ξ3x1[i, j, k, e] * a13[i, j, k] x1ξ1[i, j, k, e] = 1.0 / det * ( a11[i, j, k] * a11[i, j, k] + a12[i, j, k] * a12[i, j, k] + a13[i, j, k] * a13[i, j, k] ) x1ξ2[i, j, k, e] = 1.0 / det * ( a11[i, j, k] * a21[i, j, k] + a12[i, j, k] * a22[i, j, k] + a13[i, j, k] * a23[i, j, k] ) x1ξ3[i, j, k, e] = 1.0 / det * ( a11[i, j, k] * a31[i, j, k] + a12[i, j, k] * a32[i, j, k] + a13[i, j, k] * a33[i, j, k] ) x2ξ1[i, j, k, e] = 1.0 / det * ( a21[i, j, k] * a11[i, j, k] + a22[i, j, k] * a12[i, j, k] + a23[i, j, k] * a13[i, j, k] ) x2ξ2[i, j, k, e] = 1.0 / det * ( a21[i, j, k] * a21[i, j, k] + a22[i, j, k] * a22[i, j, k] + a23[i, j, k] * a23[i, j, k] ) x2ξ3[i, j, k, e] = 1.0 / det * ( a21[i, j, k] * a31[i, j, k] + a22[i, j, k] * a32[i, j, k] + a23[i, j, k] * a33[i, j, k] ) x3ξ1[i, j, k, e] = 1.0 / det * ( a31[i, j, k] * a11[i, j, k] + a32[i, j, k] * a12[i, j, k] + a33[i, j, k] * a13[i, j, k] ) x3ξ2[i, j, k, e] = 1.0 / det * ( a31[i, j, k] * a21[i, j, k] + a32[i, j, k] * a22[i, j, k] + a33[i, j, k] * a23[i, j, k] ) x3ξ3[i, j, k, e] = 1.0 / det * ( a31[i, j, k] * a31[i, j, k] + a32[i, j, k] * a32[i, j, k] + a33[i, j, k] * a33[i, j, k] ) end # Compute surface struct field entries # faces 1 & 2 for k in 1:Nq[3], j in 1:Nq[2] n = j + (k - 1) * Nq[2] sgeo.n1[n, 1, e] = -ωJ[1, j, k, e] * ξ1x1[1, j, k, e] sgeo.n2[n, 1, e] = -ωJ[1, j, k, e] * ξ1x2[1, j, k, e] sgeo.n3[n, 1, e] = -ωJ[1, j, k, e] * ξ1x3[1, j, k, e] sgeo.n1[n, 2, e] = ωJ[Nq[1], j, k, e] * ξ1x1[Nq[1], j, k, e] sgeo.n2[n, 2, e] = ωJ[Nq[1], j, k, e] * ξ1x2[Nq[1], j, k, e] sgeo.n3[n, 2, e] = ωJ[Nq[1], j, k, e] * ξ1x3[Nq[1], j, k, e] for f in 1:2 sgeo.sωJ[n, f, e] = hypot(n1[n, f, e], n2[n, f, e], n3[n, f, e]) sgeo.n1[n, f, e] /= sωJ[n, f, e] sgeo.n2[n, f, e] /= sωJ[n, f, e] sgeo.n3[n, f, e] /= sωJ[n, f, e] end end # faces 3 & 4 for k in 1:Nq[3], i in 1:Nq[1] n = i + (k - 1) * Nq[1] sgeo.n1[n, 3, e] = -ωJ[i, 1, k, e] * ξ2x1[i, 1, k, e] sgeo.n2[n, 3, e] = -ωJ[i, 1, k, e] * ξ2x2[i, 1, k, e] sgeo.n3[n, 3, e] = -ωJ[i, 1, k, e] * ξ2x3[i, 1, k, e] sgeo.n1[n, 4, e] = ωJ[i, Nq[2], k, e] * ξ2x1[i, Nq[2], k, e] sgeo.n2[n, 4, e] = ωJ[i, Nq[2], k, e] * ξ2x2[i, Nq[2], k, e] sgeo.n3[n, 4, e] = ωJ[i, Nq[2], k, e] * ξ2x3[i, Nq[2], k, e] for f in 3:4 sgeo.sωJ[n, f, e] = hypot(n1[n, f, e], n2[n, f, e], n3[n, f, e]) sgeo.n1[n, f, e] /= sωJ[n, f, e] sgeo.n2[n, f, e] /= sωJ[n, f, e] sgeo.n3[n, f, e] /= sωJ[n, f, e] end end # faces 5 & 6 for j in 1:Nq[2], i in 1:Nq[1] n = i + (j - 1) * Nq[1] sgeo.n1[n, 5, e] = -ωJ[i, j, 1, e] * ξ3x1[i, j, 1, e] sgeo.n2[n, 5, e] = -ωJ[i, j, 1, e] * ξ3x2[i, j, 1, e] sgeo.n3[n, 5, e] = -ωJ[i, j, 1, e] * ξ3x3[i, j, 1, e] sgeo.n1[n, 6, e] = ωJ[i, j, Nq[3], e] * ξ3x1[i, j, Nq[3], e] sgeo.n2[n, 6, e] = ωJ[i, j, Nq[3], e] * ξ3x2[i, j, Nq[3], e] sgeo.n3[n, 6, e] = ωJ[i, j, Nq[3], e] * ξ3x3[i, j, Nq[3], e] for f in 5:6 sgeo.sωJ[n, f, e] = hypot(n1[n, f, e], n2[n, f, e], n3[n, f, e]) sgeo.n1[n, f, e] /= sωJ[n, f, e] sgeo.n2[n, f, e] /= sωJ[n, f, e] sgeo.n3[n, f, e] /= sωJ[n, f, e] end end end nothing end end # module ================================================ FILE: src/Numerics/Mesh/Topologies.jl ================================================ module Topologies using ClimateMachine using CubedSphere, Rotations import ..BrickMesh import MPI using CUDA using DocStringExtensions export AbstractTopology, BrickTopology, StackedBrickTopology, CubedShellTopology, StackedCubedSphereTopology, AnalyticalTopography, NoTopography, DCMIPMountain, EquidistantCubedSphere, EquiangularCubedSphere, isstacked, cubed_sphere_topo_warp, compute_lat_long, compute_analytical_topography, cubed_sphere_warp, cubed_sphere_unwarp, equiangular_cubed_sphere_warp, equiangular_cubed_sphere_unwarp, equidistant_cubed_sphere_warp, equidistant_cubed_sphere_unwarp, conformal_cubed_sphere_warp, conformal_cubed_sphere_unwarp export grid1d, SingleExponentialStretching, InteriorStretching export basic_topology_info """ AbstractTopology{dim, T, nb} Represents the connectivity of individual elements, with local dimension `dim` with `nb` boundary conditions types. The element coordinates are of type `T`. """ abstract type AbstractTopology{dim, T, nb} end """ BoxElementTopology{dim, T, nb} <: AbstractTopology{dim,T,nb} The local topology of a larger MPI-distributed topology, represented by `dim`-dimensional box elements, with `nb` boundary conditions. This contains the necessary information for the connectivity elements of the elements on the local process, along with "ghost" elements from neighbouring processes. # Fields $(DocStringExtensions.FIELDS) """ struct BoxElementTopology{dim, T, nb} <: AbstractTopology{dim, T, nb} """ MPI communicator for communicating with neighbouring processes. """ mpicomm::MPI.Comm """ Range of element indices """ elems::UnitRange{Int64} """ Range of real (aka nonghost) element indices """ realelems::UnitRange{Int64} """ Range of ghost element indices """ ghostelems::UnitRange{Int64} """ Ghost element to face is received; `ghostfaces[f,ge] == true` if face `f` of ghost element `ge` is received. """ ghostfaces::BitArray{2} """ Array of send element indices """ sendelems::Array{Int64, 1} """ Send element to face is sent; `sendfaces[f,se] == true` if face `f` of send element `se` is sent. """ sendfaces::BitArray{2} """ Array of real elements that do not have a ghost element as a neighbor. """ interiorelems::Array{Int64, 1} """ Array of real elements that have at least on ghost element as a neighbor. Note that this is different from `sendelems` because `sendelems` duplicates elements that need to be sent to multiple neighboring processes. """ exteriorelems::Array{Int64, 1} """ Element to vertex coordinates; `elemtocoord[d,i,e]` is the `d`th coordinate of corner `i` of element `e` !!! note currently coordinates always are of size 3 for `(x1, x2, x3)` """ elemtocoord::Array{T, 3} """ Element to neighboring element; `elemtoelem[f,e]` is the number of the element neighboring element `e` across face `f`. If it is a boundary face, then it is the boundary element index. """ elemtoelem::Array{Int64, 2} """ Element to neighboring element face; `elemtoface[f,e]` is the face number of the element neighboring element `e` across face `f`. If there is no neighboring element then `elemtoface[f,e] == f`." """ elemtoface::Array{Int64, 2} """ element to neighboring element order; `elemtoordr[f,e]` is the ordering number of the element neighboring element `e` across face `f`. If there is no neighboring element then `elemtoordr[f,e] == 1`. """ elemtoordr::Array{Int64, 2} """ Element to boundary number; `elemtobndy[f,e]` is the boundary number of face `f` of element `e`. If there is a neighboring element then `elemtobndy[f,e] == 0`. """ elemtobndy::Array{Int64, 2} """ List of the MPI ranks for the neighboring processes """ nabrtorank::Array{Int64, 1} """ Range in ghost elements to receive for each neighbor """ nabrtorecv::Array{UnitRange{Int64}, 1} """ Range in `sendelems` to send for each neighbor """ nabrtosend::Array{UnitRange{Int64}, 1} """ original order in partitioning """ origsendorder::Array{Int64, 1} """ Tuple of boundary to element. `bndytoelem[b][i]` is the element which faces the `i`th boundary element of boundary `b`. """ bndytoelem::NTuple{nb, Array{Int64, 1}} """ Tuple of boundary to element face. `bndytoface[b][i]` is the face number of the element which faces the `i`th boundary element of boundary `b`. """ bndytoface::NTuple{nb, Array{Int64, 1}} """ Element to locally unique vertex number; `elemtouvert[v,e]` is the `v`th vertex of element `e` """ elemtouvert::Union{Array{Int64, 2}, Nothing} """ Vertex connectivity information for direct stiffness summation; `vtconn[vtconnoff[i]:vtconnoff[i+1]-1] == vtconn[lvt1, lelem1, lvt2, lelem2, ....]` for each rank-local unique vertex number, where `lvt1` is the element-local vertex number of element `lelem1`, etc. Vertices, not shared by multiple elements, are ignored. """ vtconn::Union{AbstractArray{Int64, 1}, Nothing} """ Vertex offset for vertex connectivity information """ vtconnoff::Union{AbstractArray{Int64, 1}, Nothing} """ Face connectivity information for direct stiffness summation; `fcconn[ufc,:] = [lfc1, lelem1, lfc2, lelem2, ordr]` where `ufc` is the rank-local unique face number, and `lfc1` is the element-local face number of element `lelem1`, etc., and ordr is the relative orientation. Faces, not shared by multiple elements are ignored. """ fcconn::Union{AbstractArray{Int64, 2}, Nothing} """ Edge connectivity information for direct stiffness summation; `edgconn[edgconnoff[i]:edgconnoff[i+1]-1] == edgconn[ledg1, orient, lelem1, ledg2, orient, lelem2, ...]` for each rank-local unique edge number, where `ledg1` is the element-local edge number, `orient1` is orientation (forward/reverse) of dof along edge. Edges, not shared by multiple elements, are ignored. """ edgconn::Union{AbstractArray{Int64, 1}, Nothing} """ Edge offset for edge connectivity information """ edgconnoff::Union{AbstractArray{Int64, 1}, Nothing} function BoxElementTopology{dim, T, nb}( mpicomm, elems, realelems, ghostelems, ghostfaces, sendelems, sendfaces, elemtocoord, elemtoelem, elemtoface, elemtoordr, elemtobndy, nabrtorank, nabrtorecv, nabrtosend, origsendorder, bndytoelem, bndytoface; device_array = ClimateMachine.array_type(), elemtouvert = nothing, vtconn = nothing, vtconnoff = nothing, fcconn = nothing, edgconn = nothing, edgconnoff = nothing, ) where {dim, T, nb} exteriorelems = sort(unique(sendelems)) interiorelems = sort(setdiff(realelems, exteriorelems)) if vtconn isa Array && vtconnoff isa Array vtconn = device_array(vtconn) vtconnoff = device_array(vtconnoff) end if fcconn isa Array fcconn = device_array(fcconn) end if edgconn isa Array && edgconnoff isa Array edgconn = device_array(edgconn) edgconnoff = device_array(edgconnoff) end return new{dim, T, nb}( mpicomm, elems, realelems, ghostelems, ghostfaces, sendelems, sendfaces, interiorelems, exteriorelems, elemtocoord, elemtoelem, elemtoface, elemtoordr, elemtobndy, nabrtorank, nabrtorecv, nabrtosend, origsendorder, bndytoelem, bndytoface, elemtouvert, vtconn, vtconnoff, fcconn, edgconn, edgconnoff, ) end end """ hasboundary(topology::AbstractTopology) query function to check whether a topology has a boundary (i.e., not fully periodic) """ hasboundary(topology::AbstractTopology{dim, T, nb}) where {dim, T, nb} = nb != 0 if VERSION >= v"1.2-" isstacked(::T) where {T <: AbstractTopology} = hasfield(T, :stacksize) else isstacked(::T) where {T <: AbstractTopology} = Base.fieldindex(T, :stacksize, false) > 0 end """ BrickTopology{dim, T, nb} <: AbstractTopology{dim, T, nb} A simple grid-based topology. This is a convenience wrapper around [`BoxElementTopology`](@ref). """ struct BrickTopology{dim, T, nb} <: AbstractTopology{dim, T, nb} topology::BoxElementTopology{dim, T, nb} end Base.getproperty(a::BrickTopology, p::Symbol) = getproperty(getfield(a, :topology), p) """ CubedShellTopology{T} <: AbstractTopology{2, T, 0} A cube-shell topology. This is a convenience wrapper around [`BoxElementTopology`](@ref). """ struct CubedShellTopology{T} <: AbstractTopology{2, T, 0} topology::BoxElementTopology{2, T, 0} end Base.getproperty(a::CubedShellTopology, p::Symbol) = getproperty(getfield(a, :topology), p) abstract type AbstractStackedTopology{dim, T, nb} <: AbstractTopology{dim, T, nb} end """ StackedBrickTopology{dim, T, nb} <: AbstractStackedTopology{dim} A simple grid-based topology, where all elements on the trailing dimension are stacked to be contiguous. This is a convenience wrapper around [`BoxElementTopology`](@ref). """ struct StackedBrickTopology{dim, T, nb} <: AbstractStackedTopology{dim, T, nb} topology::BoxElementTopology{dim, T, nb} stacksize::Int64 periodicstack::Bool end function Base.getproperty(a::StackedBrickTopology, p::Symbol) return p in (:stacksize, :periodicstack) ? getfield(a, p) : getproperty(getfield(a, :topology), p) end """ StackedCubedSphereTopology{T, nb} <: AbstractStackedTopology{3, T, nb} A cube-sphere topology. All elements on the same "vertical" dimension are stacked to be contiguous. This is a convenience wrapper around [`BoxElementTopology`](@ref). """ struct StackedCubedSphereTopology{T, nb} <: AbstractStackedTopology{3, T, nb} topology::BoxElementTopology{3, T, nb} stacksize::Int64 end function Base.getproperty(a::StackedCubedSphereTopology, p::Symbol) if p == :periodicstack return false else return p == :stacksize ? getfield(a, p) : getproperty(getfield(a, :topology), p) end end """ A wrapper for the BrickTopology """ BrickTopology(mpicomm, Nelems::NTuple{N, Integer}; kw...) where {N} = BrickTopology(mpicomm, map(Ne -> 0:Ne, Nelems); kw...) """ BrickTopology{dim, T}(mpicomm, elemrange; boundary, periodicity) Generate a brick mesh topology with coordinates given by the tuple `elemrange` and the periodic dimensions given by the `periodicity` tuple. The elements of the brick are partitioned equally across the MPI ranks based on a space-filling curve. By default boundary faces will be marked with a one and other faces with a zero. Specific boundary numbers can also be passed for each face of the brick in `boundary`. This will mark the nonperiodic brick faces with the given boundary number. # Examples We can build a 3 by 2 element two-dimensional mesh that is periodic in the \$x2\$-direction with ```jldoctest brickmesh using ClimateMachine.Topologies using MPI MPI.Init() topology = BrickTopology(MPI.COMM_SELF, (2:5,4:6); periodicity=(false,true), boundary=((1,2),(3,4))) ``` This returns the mesh structure for x2 ^ | 6- +-----+-----+-----+ | | | | | | | 3 | 4 | 5 | | | | | | 5- +-----+-----+-----+ | | | | | | | 1 | 2 | 6 | | | | | | 4- +-----+-----+-----+ | +--|-----|-----|-----|--> x1 2 3 4 5 For example, the (dimension by number of corners by number of elements) array `elemtocoord` gives the coordinates of the corners of each element. ```jldoctest brickmesh julia> topology.elemtocoord 2×4×6 Array{Int64,3}: [:, :, 1] = 2 3 2 3 4 4 5 5 [:, :, 2] = 3 4 3 4 4 4 5 5 [:, :, 3] = 2 3 2 3 5 5 6 6 [:, :, 4] = 3 4 3 4 5 5 6 6 [:, :, 5] = 4 5 4 5 5 5 6 6 [:, :, 6] = 4 5 4 5 4 4 5 5 ``` Note that the corners are listed in Cartesian order. The (number of faces by number of elements) array `elemtobndy` gives the boundary number for each face of each element. A zero will be given for connected faces. ```jldoctest brickmesh julia> topology.elemtobndy 4×6 Array{Int64,2}: 1 0 1 0 0 0 0 0 0 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 ``` Note that the faces are listed in Cartesian order. """ function BrickTopology( mpicomm, elemrange; boundary = ntuple(j -> (1, 1), length(elemrange)), periodicity = ntuple(j -> false, length(elemrange)), connectivity = :face, ghostsize = 1, ) if boundary isa Matrix boundary = tuple(mapslices(x -> tuple(x...), boundary, dims = 1)...) end # We cannot handle anything else right now... @assert ghostsize == 1 mpirank = MPI.Comm_rank(mpicomm) mpisize = MPI.Comm_size(mpicomm) topology = BrickMesh.brickmesh( elemrange, periodicity, part = mpirank + 1, numparts = mpisize, boundary = boundary, ) topology = BrickMesh.partition(mpicomm, topology...) origsendorder = topology[5] topology = connectivity == :face ? BrickMesh.connectmesh(mpicomm, topology[1:4]...) : BrickMesh.connectmeshfull(mpicomm, topology[1:4]...) bndytoelem, bndytoface = BrickMesh.enumerateboundaryfaces!( topology.elemtoelem, topology.elemtobndy, periodicity, boundary, ) nb = length(bndytoelem) dim = length(elemrange) T = eltype(topology.elemtocoord) return BrickTopology{dim, T, nb}(BoxElementTopology{dim, T, nb}( mpicomm, topology.elems, topology.realelems, topology.ghostelems, topology.ghostfaces, topology.sendelems, topology.sendfaces, topology.elemtocoord, topology.elemtoelem, topology.elemtoface, topology.elemtoordr, topology.elemtobndy, topology.nabrtorank, topology.nabrtorecv, topology.nabrtosend, origsendorder, bndytoelem, bndytoface, elemtouvert = topology.elemtovert, )) end """ A wrapper for the StackedBrickTopology """ StackedBrickTopology(mpicomm, Nelems::NTuple{N, Integer}; kw...) where {N} = StackedBrickTopology(mpicomm, map(Ne -> 0:Ne, Nelems); kw...) """ StackedBrickTopology{dim, T}(mpicomm, elemrange; boundary, periodicity) Generate a stacked brick mesh topology with coordinates given by the tuple `elemrange` and the periodic dimensions given by the `periodicity` tuple. The elements are stacked such that the elements associated with range `elemrange[dim]` are contiguous in the element ordering. The elements of the brick are partitioned equally across the MPI ranks based on a space-filling curve. Further, stacks are not split at MPI boundaries. By default boundary faces will be marked with a one and other faces with a zero. Specific boundary numbers can also be passed for each face of the brick in `boundary`. This will mark the nonperiodic brick faces with the given boundary number. # Examples We can build a 3 by 2 element two-dimensional mesh that is periodic in the \$x2\$-direction with ```jldoctest brickmesh using ClimateMachine.Topologies using MPI MPI.Init() topology = StackedBrickTopology(MPI.COMM_SELF, (2:5,4:6); periodicity=(false,true), boundary=((1,2),(3,4))) ``` This returns the mesh structure stacked in the \$x2\$-direction for x2 ^ | 6- +-----+-----+-----+ | | | | | | | 2 | 4 | 6 | | | | | | 5- +-----+-----+-----+ | | | | | | | 1 | 3 | 5 | | | | | | 4- +-----+-----+-----+ | +--|-----|-----|-----|--> x1 2 3 4 5 For example, the (dimension by number of corners by number of elements) array `elemtocoord` gives the coordinates of the corners of each element. ```jldoctest brickmesh julia> topology.elemtocoord 2×4×6 Array{Int64,3}: [:, :, 1] = 2 3 2 3 4 4 5 5 [:, :, 2] = 2 3 2 3 5 5 6 6 [:, :, 3] = 3 4 3 4 4 4 5 5 [:, :, 4] = 3 4 3 4 5 5 6 6 [:, :, 5] = 4 5 4 5 4 4 5 5 [:, :, 6] = 4 5 4 5 5 5 6 6 ``` Note that the corners are listed in Cartesian order. The (number of faces by number of elements) array `elemtobndy` gives the boundary number for each face of each element. A zero will be given for connected faces. ```jldoctest brickmesh julia> topology.elemtobndy 4×6 Array{Int64,2}: 1 0 1 0 0 0 0 0 0 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 ``` Note that the faces are listed in Cartesian order. """ function StackedBrickTopology( mpicomm, elemrange; boundary = ntuple(j -> (1, 1), length(elemrange)), periodicity = ntuple(j -> false, length(elemrange)), connectivity = :full, ghostsize = 1, ) if boundary isa Matrix boundary = tuple(mapslices(x -> tuple(x...), boundary, dims = 1)...) end dim = length(elemrange) dim <= 1 && error("Stacked brick topology works for 2D and 3D") # Build the base topology basetopo = BrickTopology( mpicomm, elemrange[1:(dim - 1)]; boundary = boundary[1:(dim - 1)], periodicity = periodicity[1:(dim - 1)], connectivity = connectivity, ghostsize = ghostsize, ) # Use the base topology to build the stacked topology stack = elemrange[dim] stacksize = length(stack) - 1 nvert = 2^dim nface = 2dim nreal = length(basetopo.realelems) * stacksize nghost = length(basetopo.ghostelems) * stacksize elems = 1:(nreal + nghost) realelems = 1:nreal ghostelems = nreal .+ (1:nghost) sendelems = similar(basetopo.sendelems, length(basetopo.sendelems) * stacksize) for i in 1:length(basetopo.sendelems), j in 1:stacksize sendelems[stacksize * (i - 1) + j] = stacksize * (basetopo.sendelems[i] - 1) + j end ghostfaces = similar(basetopo.ghostfaces, nface, length(ghostelems)) ghostfaces .= false for i in 1:length(basetopo.ghostelems), j in 1:stacksize e = stacksize * (i - 1) + j for f in 1:(2 * (dim - 1)) ghostfaces[f, e] = basetopo.ghostfaces[f, i] end end sendfaces = similar(basetopo.sendfaces, nface, length(sendelems)) sendfaces .= false for i in 1:length(basetopo.sendelems), j in 1:stacksize e = stacksize * (i - 1) + j for f in 1:(2 * (dim - 1)) sendfaces[f, e] = basetopo.sendfaces[f, i] end end elemtocoord = similar(basetopo.elemtocoord, dim, nvert, length(elems)) for i in 1:length(basetopo.elems), j in 1:stacksize e = stacksize * (i - 1) + j for v in 1:(2^(dim - 1)) for d in 1:(dim - 1) elemtocoord[d, v, e] = basetopo.elemtocoord[d, v, i] elemtocoord[d, 2^(dim - 1) + v, e] = basetopo.elemtocoord[d, v, i] end elemtocoord[dim, v, e] = stack[j] elemtocoord[dim, 2^(dim - 1) + v, e] = stack[j + 1] end end elemtoelem = similar(basetopo.elemtoelem, nface, length(elems)) elemtoface = similar(basetopo.elemtoface, nface, length(elems)) elemtoordr = similar(basetopo.elemtoordr, nface, length(elems)) elemtobndy = similar(basetopo.elemtobndy, nface, length(elems)) for e in 1:(length(basetopo.elems) * stacksize), f in 1:nface elemtoelem[f, e] = e elemtoface[f, e] = f elemtoordr[f, e] = 1 elemtobndy[f, e] = 0 end for i in 1:length(basetopo.realelems), j in 1:stacksize e1 = stacksize * (i - 1) + j for f in 1:(2 * (dim - 1)) e2 = stacksize * (basetopo.elemtoelem[f, i] - 1) + j elemtoelem[f, e1] = e2 elemtoface[f, e1] = basetopo.elemtoface[f, i] # We assume a simple orientation right now @assert basetopo.elemtoordr[f, i] == 1 elemtoordr[f, e1] = basetopo.elemtoordr[f, i] end et = stacksize * (i - 1) + j + 1 eb = stacksize * (i - 1) + j - 1 ft = 2 * (dim - 1) + 1 fb = 2 * (dim - 1) + 2 ot = 1 ob = 1 if j == stacksize et = periodicity[dim] ? stacksize * (i - 1) + 1 : e1 ft = periodicity[dim] ? ft : 2 * (dim - 1) + 2 end if j == 1 eb = periodicity[dim] ? stacksize * (i - 1) + stacksize : e1 fb = periodicity[dim] ? fb : 2 * (dim - 1) + 1 end elemtoelem[2 * (dim - 1) + 1, e1] = eb elemtoelem[2 * (dim - 1) + 2, e1] = et elemtoface[2 * (dim - 1) + 1, e1] = fb elemtoface[2 * (dim - 1) + 2, e1] = ft elemtoordr[2 * (dim - 1) + 1, e1] = ob elemtoordr[2 * (dim - 1) + 2, e1] = ot end for i in 1:length(basetopo.elems), j in 1:stacksize e1 = stacksize * (i - 1) + j for f in 1:(2 * (dim - 1)) elemtobndy[f, e1] = basetopo.elemtobndy[f, i] end bt = bb = 0 if j == stacksize bt = periodicity[dim] ? bt : boundary[dim][2] end if j == 1 bb = periodicity[dim] ? bb : boundary[dim][1] end elemtobndy[2 * (dim - 1) + 1, e1] = bb elemtobndy[2 * (dim - 1) + 2, e1] = bt end nabrtorank = basetopo.nabrtorank nabrtorecv = UnitRange{Int}[ UnitRange( stacksize * (first(basetopo.nabrtorecv[n]) - 1) + 1, stacksize * last(basetopo.nabrtorecv[n]), ) for n in 1:length(nabrtorank) ] nabrtosend = UnitRange{Int}[ UnitRange( stacksize * (first(basetopo.nabrtosend[n]) - 1) + 1, stacksize * last(basetopo.nabrtosend[n]), ) for n in 1:length(nabrtorank) ] bndytoelem, bndytoface = BrickMesh.enumerateboundaryfaces!( elemtoelem, elemtobndy, periodicity, boundary, ) nb = length(bndytoelem) T = eltype(basetopo.elemtocoord) #----setting up DSS---------- if basetopo.elemtouvert isa Array #---setup vertex DSS nvertby2 = div(nvert, 2) elemtouvert = similar(basetopo.elemtouvert, nvert, length(elems)) # base level for i in 1:length(basetopo.elems) e = stacksize * (i - 1) + 1 for vt in 1:nvertby2 elemtouvert[vt, e] = (basetopo.elemtouvert[vt, i] - 1) * (stacksize + 1) + 1 end end # interior levels for i in 1:length(basetopo.elems), j in 1:(stacksize - 1) e = stacksize * (i - 1) + j for vt in 1:nvertby2 elemtouvert[nvertby2 + vt, e] = elemtouvert[vt, e] + 1 elemtouvert[vt, e + 1] = elemtouvert[nvertby2 + vt, e] end end # top level if periodicity[dim] for i in 1:length(basetopo.elems) e = stacksize * i for vt in 1:nvertby2 elemtouvert[nvertby2 + vt, e] = (basetopo.elemtouvert[vt, i] - 1) * (stacksize + 1) + 1 end end else for i in 1:length(basetopo.elems) e = stacksize * i for vt in 1:nvertby2 elemtouvert[nvertby2 + vt, e] = elemtouvert[vt, e] + 1 end end end vtmax = maximum(unique(elemtouvert)) vtconn = map(j -> zeros(Int, j), zeros(Int, vtmax)) for el in elems, lvt in 1:nvert vt = elemtouvert[lvt, el] push!(vtconn[vt], lvt) # local vertex number push!(vtconn[vt], el) # local elem number end # building the vertex connectivity device array vtconnoff = zeros(Int, vtmax + 1) vtconnoff[1] = 1 temp = Int64[] for vt in 1:vtmax nconn = length(vtconn[vt]) if nconn > 2 vtconnoff[vt + 1] = vtconnoff[vt] + nconn append!(temp, vtconn[vt]) else vtconnoff[vt + 1] = vtconnoff[vt] end end vtconn = temp #---setup face DSS--------------------- fcmarker = -ones(Int, nface, length(elems)) fcno = 0 for el in realelems for fc in 1:nface if elemtobndy[fc, el] == 0 nabrel = elemtoelem[fc, el] nabrfc = elemtoface[fc, el] if fcmarker[fc, el] == -1 && fcmarker[nabrfc, nabrel] == -1 fcno += 1 fcmarker[fc, el] = fcno fcmarker[nabrfc, nabrel] = fcno end end end end fcconn = -ones(Int, fcno, 5) for el in realelems for fc in 1:nface fcno = fcmarker[fc, el] ordr = elemtoordr[fc, el] if fcno ≠ -1 nabrel = elemtoelem[fc, el] nabrfc = elemtoface[fc, el] fcconn[fcno, 1] = fc fcconn[fcno, 2] = el fcconn[fcno, 3] = nabrfc fcconn[fcno, 4] = nabrel fcconn[fcno, 5] = ordr fcmarker[fc, el] = -1 fcmarker[nabrfc, nabrel] = -1 end end end #---setup edge DSS--------------------- if dim == 3 nedge = 12 edgemask = [ 1 3 5 7 1 2 5 6 1 2 3 4 2 4 6 8 3 4 7 8 5 6 7 8 ] edges = Array{Int64}(undef, 6, nedge * length(elems)) ledge = zeros(Int64, 2) uedge = Dict{Array{Int64, 1}, Int64}() orient = 1 uedgno = Int64(0) ctr = 1 for el in elems for edg in 1:nedge orient = 1 for i in 1:2 ledge[i] = elemtouvert[edgemask[i, edg], el] edges[i, ctr] = ledge[i] # edge vertices [1:2] end if ledge[1] > ledge[2] sort!(ledge) orient = 2 end edges[3, ctr] = edg # local edge number edges[4, ctr] = orient # edge orientation edges[5, ctr] = el # element number if haskey(uedge, ledge[:]) edges[6, ctr] = uedge[ledge[:]] # unique edge number else uedgno += 1 uedge[ledge[:]] = uedgno edges[6, ctr] = uedgno end ctr += 1 end end edgconn = map(j -> zeros(Int, j), zeros(Int, uedgno)) ctr = 1 for el in elems for edg in 1:nedge uedg = edges[6, ctr] push!(edgconn[uedg], edg) # local edge number push!(edgconn[uedg], edges[4, ctr]) # orientation push!(edgconn[uedg], el) # element number ctr += 1 end end # remove edges belonging to a single element and edges # belonging exclusively to ghost elements # and build edge connectivity device array edgconnoff = Int64[] temp = Int64[] push!(edgconnoff, 1) for i in 1:uedgno elen = length(edgconn[i]) encls = Int(elen / 3) edgmrk = true if encls > 1 for j in 1:encls if edgconn[i][3 * j] ≤ nreal edgmrk = false end end end if edgmrk edgconn[i] = [] else shift = edgconnoff[end] push!(edgconnoff, (shift + length(edgconn[i]))) append!(temp, edgconn[i][:]) end end edgconn = temp else edgconn = nothing edgconnoff = nothing end else elemtouvert = nothing vtconn = nothing vtconnoff = nothing fcconn = nothing edgconn = nothing edgconnoff = nothing end #--------------- StackedBrickTopology{dim, T, nb}( BoxElementTopology{dim, T, nb}( mpicomm, elems, realelems, ghostelems, ghostfaces, sendelems, sendfaces, elemtocoord, elemtoelem, elemtoface, elemtoordr, elemtobndy, nabrtorank, nabrtorecv, nabrtosend, basetopo.origsendorder, bndytoelem, bndytoface, elemtouvert = elemtouvert, vtconn = vtconn, vtconnoff = vtconnoff, fcconn = fcconn, edgconn = edgconn, edgconnoff = edgconnoff, ), stacksize, periodicity[end], ) end """ CubedShellTopology(mpicomm, Nelem, T) <: AbstractTopology{dim,T,nb} Generate a cubed shell mesh with the number of elements along each dimension of the cubes being `Nelem`. This topology actually creates a cube mesh, and the warping should be done after the grid is created using the `cubed_sphere_warp` function. The coordinates of the points will be of type `T`. The elements of the shell are partitioned equally across the MPI ranks based on a space-filling curve. Note that this topology is logically 2-D but embedded in a 3-D space # Examples We can build a cubed shell mesh with 10*10 elements on each cube face, i.e., the total number of elements is `10 * 10 * 6 = 600`, with ```jldoctest brickmesh using ClimateMachine.Topologies using MPI MPI.Init() topology = CubedShellTopology(MPI.COMM_SELF, 10, Float64) # Typically the warping would be done after the grid is created, but the cell # corners could be warped with... # Shell radius = 1 x1, x2, x3 = ntuple(j->topology.elemtocoord[j, :, :], 3) for n = 1:length(x1) x1[n], x2[n], x3[n] = Topologies.cubed_sphere_warp(EquiangularCubedSphere(), x1[n], x2[n], x3[n]) end # in case a unitary equiangular cubed sphere is desired, or # Shell radius = 10 x1, x2, x3 = ntuple(j->topology.elemtocoord[j, :, :], 3) for n = 1:length(x1) x1[n], x2[n], x3[n] = Topologies.cubed_sphere_warp(EquidistantCubedSphere(), x1[n], x2[n], x3[n], 10) end # in case an equidistant cubed sphere of radius 10 is desired. ``` """ function CubedShellTopology( mpicomm, Neside, T; connectivity = :full, ghostsize = 1, ) # We cannot handle anything else right now... @assert ghostsize == 1 mpirank = MPI.Comm_rank(mpicomm) mpisize = MPI.Comm_size(mpicomm) topology = cubedshellmesh(Neside, part = mpirank + 1, numparts = mpisize) topology = BrickMesh.partition(mpicomm, topology...) origsendorder = topology[5] dim, nvert = 3, 4 elemtovert = topology[1] nelem = size(elemtovert, 2) elemtocoord = Array{T}(undef, dim, nvert, nelem) ind2vert = CartesianIndices((Neside + 1, Neside + 1, Neside + 1)) for e in 1:nelem for n in 1:nvert v = elemtovert[n, e] i, j, k = Tuple(ind2vert[v]) elemtocoord[:, n, e] = (2 * [i - 1, j - 1, k - 1] .- Neside) / Neside end end topology = connectivity == :face ? BrickMesh.connectmesh( mpicomm, topology[1], elemtocoord, topology[3], topology[4], dim = 2, ) : BrickMesh.connectmeshfull( mpicomm, topology[1], elemtocoord, topology[3], topology[4], dim = 2, ) CubedShellTopology{T}(BoxElementTopology{2, T, 0}( mpicomm, topology.elems, topology.realelems, topology.ghostelems, topology.ghostfaces, topology.sendelems, topology.sendfaces, topology.elemtocoord, topology.elemtoelem, topology.elemtoface, topology.elemtoordr, topology.elemtobndy, topology.nabrtorank, topology.nabrtorecv, topology.nabrtosend, origsendorder, (), (), elemtouvert = topology.elemtovert, )) end """ cubedshellmesh(T, Ne; part=1, numparts=1) Generate a cubed mesh where each of the "cubes" has an `Ne X Ne` grid of elements. The mesh can optionally be partitioned into `numparts` and this returns partition `part`. This is a simple Cartesian partition and further partitioning (e.g, based on a space-filling curve) should be done before the mesh is used for computation. This mesh returns the cubed spehere in a flattened fashion for the vertex values, and a remapping is needed to embed the mesh in a 3-D space. The mesh structures for the cubes is as follows: ``` x2 ^ | 4Ne- +-------+ | | | | | 6 | | | | 3Ne- +-------+ | | | | | 5 | | | | 2Ne- +-------+ | | | | | 4 | | | | Ne- +-------+-------+-------+ | | | | | | | 1 | 2 | 3 | | | | | | 0- +-------+-------+-------+ | +---|-------|-------|------|-> x1 0 Ne 2Ne 3Ne ``` """ function cubedshellmesh(Ne; part = 1, numparts = 1) dim = 2 @assert 1 <= part <= numparts globalnelems = 6 * Ne^2 # How many vertices and faces per element nvert = 2^dim # 4 nface = 2dim # 4 # linearly partition to figure out which elements we own elemlocal = BrickMesh.linearpartition(prod(globalnelems), part, numparts) # elemen to vertex maps which we own elemtovert = Array{Int}(undef, nvert, length(elemlocal)) elemtocoord = Array{Int}(undef, dim, nvert, length(elemlocal)) nelemcube = Ne^dim # Ne^2 etoijb = CartesianIndices((Ne, Ne, 6)) bx = [0 Ne 2Ne Ne Ne Ne] by = [0 0 0 Ne 2Ne 3Ne] vertmap = LinearIndices((Ne + 1, Ne + 1, Ne + 1)) for (le, e) in enumerate(elemlocal) i, j, blck = Tuple(etoijb[e]) elemtocoord[1, :, le] = bx[blck] .+ [i - 1 i i - 1 i] elemtocoord[2, :, le] = by[blck] .+ [j - 1 j - 1 j j] for n in 1:4 ix = i + mod(n - 1, 2) jx = j + div(n - 1, 2) # set the vertices like they are the face vertices of a cube if blck == 1 elemtovert[n, le] = vertmap[1, Ne + 2 - ix, jx] elseif blck == 2 elemtovert[n, le] = vertmap[ix, 1, jx] elseif blck == 3 elemtovert[n, le] = vertmap[Ne + 1, ix, jx] elseif blck == 4 elemtovert[n, le] = vertmap[ix, jx, Ne + 1] elseif blck == 5 elemtovert[n, le] = vertmap[ix, Ne + 1, Ne + 2 - jx] elseif blck == 6 elemtovert[n, le] = vertmap[ix, Ne + 2 - jx, 1] end end end # no boundaries for a shell elemtobndy = zeros(Int, nface, length(elemlocal)) # no faceconnections for a shell faceconnections = Array{Array{Int, 1}}(undef, 0) (elemtovert, elemtocoord, elemtobndy, faceconnections, collect(elemlocal)) end abstract type AbstractCubedSphere end struct EquiangularCubedSphere <: AbstractCubedSphere end struct EquidistantCubedSphere <: AbstractCubedSphere end struct ConformalCubedSphere <: AbstractCubedSphere end """ cubed_sphere_warp(::EquiangularCubedSphere, a, b, c, R = max(abs(a), abs(b), abs(c))) Given points `(a, b, c)` on the surface of a cube, warp the points out to a spherical shell of radius `R` based on the equiangular gnomonic grid proposed by [Ronchi1996](@cite) """ function cubed_sphere_warp( ::EquiangularCubedSphere, a, b, c, R = max(abs(a), abs(b), abs(c)), ) function f(sR, ξ, η) X, Y = tan(π * ξ / 4), tan(π * η / 4) ζ1 = sR / sqrt(X^2 + Y^2 + 1) ζ2, ζ3 = X * ζ1, Y * ζ1 ζ1, ζ2, ζ3 end fdim = argmax(abs.((a, b, c))) if fdim == 1 && a < 0 # (-R, *, *) : formulas for Face I from Ronchi, Iacono, Paolucci (1996) # but for us face II of the developed net of the cube x1, x2, x3 = f(-R, b / a, c / a) elseif fdim == 2 && b < 0 # ( *,-R, *) : formulas for Face II from Ronchi, Iacono, Paolucci (1996) # but for us face III of the developed net of the cube x2, x1, x3 = f(-R, a / b, c / b) elseif fdim == 1 && a > 0 # ( R, *, *) : formulas for Face III from Ronchi, Iacono, Paolucci (1996) # but for us face IV of the developed net of the cube x1, x2, x3 = f(R, b / a, c / a) elseif fdim == 2 && b > 0 # ( *, R, *) : formulas for Face IV from Ronchi, Iacono, Paolucci (1996) # but for us face I of the developed net of the cube x2, x1, x3 = f(R, a / b, c / b) elseif fdim == 3 && c > 0 # ( *, *, R) : formulas for Face V from Ronchi, Iacono, Paolucci (1996) # and the same for us on the developed net of the cube x3, x2, x1 = f(R, b / c, a / c) elseif fdim == 3 && c < 0 # ( *, *,-R) : formulas for Face VI from Ronchi, Iacono, Paolucci (1996) # and the same for us on the developed net of the cube x3, x2, x1 = f(-R, b / c, a / c) else error("invalid case for cubed_sphere_warp(::EquiangularCubedSphere): $a, $b, $c") end return x1, x2, x3 end """ equiangular_cubed_sphere_warp(a, b, c, R = max(abs(a), abs(b), abs(c))) A wrapper function for the cubed_sphere_warp function, when called with the EquiangularCubedSphere type """ equiangular_cubed_sphere_warp(a, b, c, R = max(abs(a), abs(b), abs(c))) = cubed_sphere_warp(EquiangularCubedSphere(), a, b, c, R) """ cubed_sphere_unwarp(x1, x2, x3) The inverse of [`cubed_sphere_warp`](@ref). This function projects a given point `(x_1, x_2, x_3)` from the surface of a sphere onto a cube """ function cubed_sphere_unwarp(::EquiangularCubedSphere, x1, x2, x3) function g(R, X, Y) ξ = atan(X) * 4 / pi η = atan(Y) * 4 / pi R, R * ξ, R * η end R = hypot(x1, x2, x3) fdim = argmax(abs.((x1, x2, x3))) if fdim == 1 && x1 < 0 # (-R, *, *) : formulas for Face I from Ronchi, Iacono, Paolucci (1996) # but for us face II of the developed net of the cube a, b, c = g(-R, x2 / x1, x3 / x1) elseif fdim == 2 && x2 < 0 # ( *,-R, *) : formulas for Face II from Ronchi, Iacono, Paolucci (1996) # but for us face III of the developed net of the cube b, a, c = g(-R, x1 / x2, x3 / x2) elseif fdim == 1 && x1 > 0 # ( R, *, *) : formulas for Face III from Ronchi, Iacono, Paolucci (1996) # but for us face IV of the developed net of the cube a, b, c = g(R, x2 / x1, x3 / x1) elseif fdim == 2 && x2 > 0 # ( *, R, *) : formulas for Face IV from Ronchi, Iacono, Paolucci (1996) # but for us face I of the developed net of the cube b, a, c = g(R, x1 / x2, x3 / x2) elseif fdim == 3 && x3 > 0 # ( *, *, R) : formulas for Face V from Ronchi, Iacono, Paolucci (1996) # and the same for us on the developed net of the cube c, b, a = g(R, x2 / x3, x1 / x3) elseif fdim == 3 && x3 < 0 # ( *, *,-R) : formulas for Face VI from Ronchi, Iacono, Paolucci (1996) # and the same for us on the developed net of the cube c, b, a = g(-R, x2 / x3, x1 / x3) else error("invalid case for cubed_sphere_unwarp(::EquiangularCubedSphere): $a, $b, $c") end return a, b, c end """ equiangular_cubed_sphere_unwarp(x1, x2, x3) A wrapper function for the cubed_sphere_unwarp function, when called with the EquiangularCubedSphere type """ equiangular_cubed_sphere_unwarp(x1, x2, x3) = cubed_sphere_unwarp(EquiangularCubedSphere(), x1, x2, x3) """ cubed_sphere_warp(a, b, c, R = max(abs(a), abs(b), abs(c))) Given points `(a, b, c)` on the surface of a cube, warp the points out to a spherical shell of radius `R` based on the equidistant gnomonic grid outlined in [Rancic1996](@cite) and [Nair2005](@cite) """ function cubed_sphere_warp( ::EquidistantCubedSphere, a, b, c, R = max(abs(a), abs(b), abs(c)), ) r = hypot(a, b, c) x1 = R * a / r x2 = R * b / r x3 = R * c / r return x1, x2, x3 end """ equidistant_cubed_sphere_warp(a, b, c, R = max(abs(a), abs(b), abs(c))) A wrapper function for the cubed_sphere_warp function, when called with the EquidistantCubedSphere type """ equidistant_cubed_sphere_warp(a, b, c, R = max(abs(a), abs(b), abs(c))) = cubed_sphere_warp(EquidistantCubedSphere(), a, b, c, R) """ cubed_sphere_unwarp(x1, x2, x3) The inverse of [`cubed_sphere_warp`](@ref). This function projects a given point `(x_1, x_2, x_3)` from the surface of a sphere onto a cube """ function cubed_sphere_unwarp(::EquidistantCubedSphere, x1, x2, x3) r = hypot(1, x2 / x1, x3 / x1) R = hypot(x1, x2, x3) a = r * x1 b = r * x2 c = r * x3 m = max(abs(a), abs(b), abs(c)) return a * R / m, b * R / m, c * R / m end """ equidistant_cubed_sphere_unwarp(x1, x2, x3) A wrapper function for the cubed_sphere_unwarp function, when called with the EquidistantCubedSphere type """ equidistant_cubed_sphere_unwarp(x1, x2, x3) = cubed_sphere_unwarp(EquidistantCubedSphere(), x1, x2, x3) """ cubed_sphere_warp(::ConformalCubedSphere, a, b, c, R = max(abs(a), abs(b), abs(c))) Given points `(a, b, c)` on the surface of a cube, warp the points out to a spherical shell of radius `R` based on the conformal grid proposed by [Rancic1996](@cite) """ function cubed_sphere_warp( ::ConformalCubedSphere, a, b, c, R = max(abs(a), abs(b), abs(c)), ) fdim = argmax(abs.((a, b, c))) M = max(abs.((a, b, c))...) if fdim == 1 && a < 0 # left face x1, x2, x3 = conformal_cubed_sphere_mapping(-b / M, c / M) x1, x2, x3 = RotX(π / 2) * RotY(-π / 2) * [x1, x2, x3] elseif fdim == 2 && b < 0 # front face x1, x2, x3 = conformal_cubed_sphere_mapping(a / M, c / M) x1, x2, x3 = RotX(π / 2) * [x1, x2, x3] elseif fdim == 1 && a > 0 # right face x1, x2, x3 = conformal_cubed_sphere_mapping(b / M, c / M) x1, x2, x3 = RotX(π / 2) * RotY(π / 2) * [x1, x2, x3] elseif fdim == 2 && b > 0 # back face x1, x2, x3 = conformal_cubed_sphere_mapping(a / M, -c / M) x1, x2, x3 = RotX(-π / 2) * [x1, x2, x3] elseif fdim == 3 && c > 0 # top face x1, x2, x3 = conformal_cubed_sphere_mapping(a / M, b / M) elseif fdim == 3 && c < 0 # bottom face x1, x2, x3 = conformal_cubed_sphere_mapping(a / M, -b / M) x1, x2, x3 = RotX(π) * [x1, x2, x3] else error("invalid case for cubed_sphere_warp(::ConformalCubedSphere): $a, $b, $c") end return x1 * R, x2 * R, x3 * R end """ conformal_cubed_sphere_warp(a, b, c, R = max(abs(a), abs(b), abs(c))) A wrapper function for the cubed_sphere_warp function, when called with the ConformalCubedSphere type """ conformal_cubed_sphere_warp(a, b, c, R = max(abs(a), abs(b), abs(c))) = cubed_sphere_warp(ConformalCubedSphere(), a, b, c, R) """ cubed_sphere_unwarp(::ConformalCubedSphere, x1, x2, x3) The inverse of [`cubed_sphere_warp`](@ref). This function projects a given point `(x_1, x_2, x_3)` from the surface of a sphere onto a cube [Rancic1996](@cite) """ function cubed_sphere_unwarp(::ConformalCubedSphere, x1, x2, x3) # Auxuliary function that flips coordinates, if needed, to prepare input # arguments in the correct quadrant for the `conformal_cubed_sphere_inverse_mapping` # function. Then, flips the output of `conformal_cubed_sphere_inverse_mapping` # back to original face and scales the coordinates so that result is on the cube function flip_unwarp_scale(x1, x2, x3) R = hypot(x1, x2, x3) flipx1, flipx2 = false, false if x1 < 0 # flip the point around x2 axis x1 = -x1 flipx1 = true end if x2 < 0 # flip the point around x1 axis x2 = -x2 flipx2 = true end a, b = conformal_cubed_sphere_inverse_mapping(x1 / R, x2 / R, x3 / R) if flipx1 == true a = -a end if flipx2 == true b = -b end # Rescale to desired length a *= R b *= R # Since we were trating coordinates on top face of the cube, the c # coordinate must have the top-face z value (z = R) c = R return a, b, c end fdim = argmax(abs.((x1, x2, x3))) if fdim == 1 && x1 < 0 # left face # rotate to align with top face x1, x2, x3 = RotY(π / 2) * RotX(-π / 2) * [x1, x2, x3] # call the unwarp function, with appropriate flipping and scaling a, b, c = flip_unwarp_scale(x1, x2, x3) # rotate back a, b, c = RotX(π / 2) * RotY(-π / 2) * [a, b, c] elseif fdim == 2 && x2 < 0 # front face # rotate to align with top face x1, x2, x3 = RotX(-π / 2) * [x1, x2, x3] # call the unwarp function, with appropriate flipping and scaling a, b, c = flip_unwarp_scale(x1, x2, x3) # rotate back a, b, c = RotX(π / 2) * [a, b, c] elseif fdim == 1 && x1 > 0 # right face # rotate to align with top face x1, x2, x3 = RotZ(-π / 2) * RotY(-π / 2) * [x1, x2, x3] # call the unwarp function, with appropriate flipping and scaling a, b, c = flip_unwarp_scale(x1, x2, x3) # rotate back a, b, c = RotY(π / 2) * RotZ(π / 2) * [a, b, c] elseif fdim == 2 && x2 > 0 # back face # rotate to align with top face x1, x2, x3 = RotZ(π) * RotX(π / 2) * [x1, x2, x3] # call the unwarp function, with appropriate flipping and scaling a, b, c = flip_unwarp_scale(x1, x2, x3) # rotate back a, b, c = RotX(-π / 2) * RotZ(-π) * [a, b, c] elseif fdim == 3 && x3 > 0 # top face # already on top face, no need to rotate a, b, c = flip_unwarp_scale(x1, x2, x3) elseif fdim == 3 && x3 < 0 # bottom face # rotate to align with top face x1, x2, x3 = RotX(π) * [x1, x2, x3] # call the unwarp function, with appropriate flipping and scaling a, b, c = flip_unwarp_scale(x1, x2, x3) # rotate back a, b, c = RotX(-π) * [a, b, c] else error("invalid case for cubed_sphere_unwarp(::ConformalCubedSphere): $a, $b, $c") end return a, b, c end """ conformal_cubed_sphere_unwarp(x1, x2, x3) A wrapper function for the cubed_sphere_unwarp function, when called with the ConformalCubedSphere type """ conformal_cubed_sphere_unwarp(x1, x2, x3) = cubed_sphere_unwarp(ConformalCubedSphere(), x1, x2, x3) """ StackedCubedSphereTopology(mpicomm, Nhorz, Rrange; boundary=(1,1)) <: AbstractTopology{3} Generate a stacked cubed sphere topology with `Nhorz` by `Nhorz` cells for each horizontal face and `Rrange` is the radius edges of the stacked elements. This topology actual creates a cube mesh, and the warping should be done after the grid is created using the `cubed_sphere_warp` function. The coordinates of the points will be of type `eltype(Rrange)`. The inner boundary condition type is `boundary[1]` and the outer boundary condition type is `boundary[2]`. The elements are stacked such that the vertical elements are contiguous in the element ordering. The elements of the brick are partitioned equally across the MPI ranks based on a space-filling curve. Further, stacks are not split at MPI boundaries. # Examples We can build a cubed sphere mesh with 10 x 10 x 5 elements on each cube face, i.e., the total number of elements is `10 * 10 * 5 * 6 = 3000`, with ```jldoctest brickmesh using ClimateMachine.Topologies using MPI MPI.Init() Nhorz = 10 Nstack = 5 Rrange = Float64.(accumulate(+,1:Nstack+1)) topology = StackedCubedSphereTopology(MPI.COMM_SELF, Nhorz, Rrange) x1, x2, x3 = ntuple(j->reshape(topology.elemtocoord[j, :, :], 2, 2, 2, length(topology.elems)), 3) for n = 1:length(x1) x1[n], x2[n], x3[n] = Topologies.cubed_sphere_warp(EquiangularCubedSphere(),x1[n], x2[n], x3[n]) end ``` Note that the faces are listed in Cartesian order. """ function StackedCubedSphereTopology( mpicomm, Nhorz, Rrange; boundary = (1, 1), connectivity = :full, ghostsize = 1, ) T = eltype(Rrange) basetopo = CubedShellTopology( mpicomm, Nhorz, T; connectivity = connectivity, ghostsize = ghostsize, ) dim = 3 nvert = 2^dim nface = 2dim stacksize = length(Rrange) - 1 nreal = length(basetopo.realelems) * stacksize nghost = length(basetopo.ghostelems) * stacksize elems = 1:(nreal + nghost) realelems = 1:nreal ghostelems = nreal .+ (1:nghost) sendelems = similar(basetopo.sendelems, length(basetopo.sendelems) * stacksize) for i in 1:length(basetopo.sendelems), j in 1:stacksize sendelems[stacksize * (i - 1) + j] = stacksize * (basetopo.sendelems[i] - 1) + j end ghostfaces = similar(basetopo.ghostfaces, nface, length(ghostelems)) ghostfaces .= false for i in 1:length(basetopo.ghostelems), j in 1:stacksize e = stacksize * (i - 1) + j for f in 1:(2 * (dim - 1)) ghostfaces[f, e] = basetopo.ghostfaces[f, i] end end sendfaces = similar(basetopo.sendfaces, nface, length(sendelems)) sendfaces .= false for i in 1:length(basetopo.sendelems), j in 1:stacksize e = stacksize * (i - 1) + j for f in 1:(2 * (dim - 1)) sendfaces[f, e] = basetopo.sendfaces[f, i] end end elemtocoord = similar(basetopo.elemtocoord, dim, nvert, length(elems)) for i in 1:length(basetopo.elems), j in 1:stacksize # i is base element # e is stacked element e = stacksize * (i - 1) + j # v is base vertex for v in 1:(2^(dim - 1)) for d in 1:dim # dim here since shell is embedded in 3-D # v is lower stacked vertex elemtocoord[d, v, e] = basetopo.elemtocoord[d, v, i] * Rrange[j] # 2^(dim-1) + v is higher stacked vertex elemtocoord[d, 2^(dim - 1) + v, e] = basetopo.elemtocoord[d, v, i] * Rrange[j + 1] end end end elemtoelem = similar(basetopo.elemtoelem, nface, length(elems)) elemtoface = similar(basetopo.elemtoface, nface, length(elems)) elemtoordr = similar(basetopo.elemtoordr, nface, length(elems)) elemtobndy = similar(basetopo.elemtobndy, nface, length(elems)) for e in 1:(length(basetopo.elems) * stacksize), f in 1:nface elemtoelem[f, e] = e elemtoface[f, e] = f elemtoordr[f, e] = 1 elemtobndy[f, e] = 0 end for i in 1:length(basetopo.realelems), j in 1:stacksize e1 = stacksize * (i - 1) + j for f in 1:(2 * (dim - 1)) e2 = stacksize * (basetopo.elemtoelem[f, i] - 1) + j elemtoelem[f, e1] = e2 elemtoface[f, e1] = basetopo.elemtoface[f, i] # since the basetopo is 2-D we only need to worry about two # orientations @assert basetopo.elemtoordr[f, i] ∈ (1, 2) #= orientation 1: 2---3 2---3 | | --> | | 0---1 0---1 same: (a,b) --> (a,b) orientation 3: 2---3 3---2 | | --> | | 0---1 1---0 reverse first index: (a,b) --> (N+1-a,b) =# elemtoordr[f, e1] = basetopo.elemtoordr[f, i] == 1 ? 1 : 3 end # If top or bottom of stack set neighbor to self on respective face elemtoelem[2 * (dim - 1) + 1, e1] = j == 1 ? e1 : stacksize * (i - 1) + j - 1 elemtoelem[2 * (dim - 1) + 2, e1] = j == stacksize ? e1 : stacksize * (i - 1) + j + 1 elemtoface[2 * (dim - 1) + 1, e1] = j == 1 ? 2 * (dim - 1) + 1 : 2 * (dim - 1) + 2 elemtoface[2 * (dim - 1) + 2, e1] = j == stacksize ? 2 * (dim - 1) + 2 : 2 * (dim - 1) + 1 elemtoordr[2 * (dim - 1) + 1, e1] = 1 elemtoordr[2 * (dim - 1) + 2, e1] = 1 end # Set the top and bottom boundary condition for i in 1:length(basetopo.elems) eb = stacksize * (i - 1) + 1 et = stacksize * (i - 1) + stacksize elemtobndy[2 * (dim - 1) + 1, eb] = boundary[1] elemtobndy[2 * (dim - 1) + 2, et] = boundary[2] end nabrtorank = basetopo.nabrtorank nabrtorecv = UnitRange{Int}[ UnitRange( stacksize * (first(basetopo.nabrtorecv[n]) - 1) + 1, stacksize * last(basetopo.nabrtorecv[n]), ) for n in 1:length(nabrtorank) ] nabrtosend = UnitRange{Int}[ UnitRange( stacksize * (first(basetopo.nabrtosend[n]) - 1) + 1, stacksize * last(basetopo.nabrtosend[n]), ) for n in 1:length(nabrtorank) ] bndytoelem, bndytoface = BrickMesh.enumerateboundaryfaces!( elemtoelem, elemtobndy, (false,), (boundary,), ) nb = length(bndytoelem) #----setting up DSS---------- if basetopo.elemtouvert isa Array #---setup vertex DSS nvertby2 = div(nvert, 2) elemtouvert = similar(basetopo.elemtouvert, nvert, length(elems)) # base level for i in 1:length(basetopo.elems) e = stacksize * (i - 1) + 1 for vt in 1:nvertby2 elemtouvert[vt, e] = (basetopo.elemtouvert[vt, i] - 1) * (stacksize + 1) + 1 end end # interior levels for i in 1:length(basetopo.elems), j in 1:(stacksize - 1) e = stacksize * (i - 1) + j for vt in 1:nvertby2 elemtouvert[nvertby2 + vt, e] = elemtouvert[vt, e] + 1 elemtouvert[vt, e + 1] = elemtouvert[nvertby2 + vt, e] end end # top level for i in 1:length(basetopo.elems) e = stacksize * i for vt in 1:nvertby2 elemtouvert[nvertby2 + vt, e] = elemtouvert[vt, e] + 1 end end vtmax = maximum(unique(elemtouvert)) vtconn = map(j -> zeros(Int, j), zeros(Int, vtmax)) for el in elems, lvt in 1:nvert vt = elemtouvert[lvt, el] push!(vtconn[vt], lvt) # local vertex number push!(vtconn[vt], el) # local elem number end # building the vertex connectivity device array vtconnoff = zeros(Int, vtmax + 1) vtconnoff[1] = 1 temp = Int64[] for vt in 1:vtmax nconn = length(vtconn[vt]) if nconn > 2 vtconnoff[vt + 1] = vtconnoff[vt] + nconn append!(temp, vtconn[vt]) else vtconnoff[vt + 1] = vtconnoff[vt] end end vtconn = temp #---setup face DSS--------------------- fcmarker = -ones(Int, nface, length(elems)) fcno = 0 for el in realelems for fc in 1:nface if elemtobndy[fc, el] == 0 nabrel = elemtoelem[fc, el] nabrfc = elemtoface[fc, el] if fcmarker[fc, el] == -1 && fcmarker[nabrfc, nabrel] == -1 fcno += 1 fcmarker[fc, el] = fcno fcmarker[nabrfc, nabrel] = fcno end end end end fcconn = -ones(Int, fcno, 5) for el in realelems for fc in 1:nface fcno = fcmarker[fc, el] ordr = elemtoordr[fc, el] if fcno ≠ -1 nabrel = elemtoelem[fc, el] nabrfc = elemtoface[fc, el] fcconn[fcno, 1] = fc fcconn[fcno, 2] = el fcconn[fcno, 3] = nabrfc fcconn[fcno, 4] = nabrel fcconn[fcno, 5] = ordr fcmarker[fc, el] = -1 fcmarker[nabrfc, nabrel] = -1 end end end #---setup edge DSS--------------------- if dim == 3 nedge = 12 edgemask = [ 1 3 5 7 1 2 5 6 1 2 3 4 2 4 6 8 3 4 7 8 5 6 7 8 ] edges = Array{Int64}(undef, 6, nedge * length(elems)) ledge = zeros(Int64, 2) uedge = Dict{Array{Int64, 1}, Int64}() orient = 1 uedgno = Int64(0) ctr = 1 for el in elems for edg in 1:nedge orient = 1 for i in 1:2 ledge[i] = elemtouvert[edgemask[i, edg], el] edges[i, ctr] = ledge[i] # edge vertices [1:2] end if ledge[1] > ledge[2] sort!(ledge) orient = 2 end edges[3, ctr] = edg # local edge number edges[4, ctr] = orient # edge orientation edges[5, ctr] = el # element number if haskey(uedge, ledge[:]) edges[6, ctr] = uedge[ledge[:]] # unique edge number else uedgno += 1 uedge[ledge[:]] = uedgno edges[6, ctr] = uedgno end ctr += 1 end end edgconn = map(j -> zeros(Int, j), zeros(Int, uedgno)) ctr = 1 for el in elems for edg in 1:nedge uedg = edges[6, ctr] push!(edgconn[uedg], edg) # local edge number push!(edgconn[uedg], edges[4, ctr]) # orientation push!(edgconn[uedg], el) # element number ctr += 1 end end # remove edges belonging to a single element and edges # belonging exclusively to ghost elements # and build edge connectivity device array edgconnoff = Int64[] temp = Int64[] push!(edgconnoff, 1) for i in 1:uedgno elen = length(edgconn[i]) encls = Int(elen / 3) edgmrk = true if encls > 1 for j in 1:encls if edgconn[i][3 * j] ≤ nreal edgmrk = false end end end if edgmrk edgconn[i] = [] else shift = edgconnoff[end] push!(edgconnoff, (shift + length(edgconn[i]))) append!(temp, edgconn[i][:]) end end edgconn = temp else edgconn = nothing edgconnoff = nothing end #------------------------------------- else elemtouvert = nothing vtconn = nothing vtconnoff = nothing fcconn = nothing edgconn = nothing edgconnoff = nothing end #----------------------------- StackedCubedSphereTopology{T, nb}( BoxElementTopology{3, T, nb}( mpicomm, elems, realelems, ghostelems, ghostfaces, sendelems, sendfaces, elemtocoord, elemtoelem, elemtoface, elemtoordr, elemtobndy, nabrtorank, nabrtorecv, nabrtosend, basetopo.origsendorder, bndytoelem, bndytoface, elemtouvert = elemtouvert, vtconn = vtconn, vtconnoff = vtconnoff, fcconn = fcconn, edgconn = edgconn, edgconnoff = edgconnoff, ), stacksize, ) end """ grid1d(a, b[, stretch::AbstractGridStretching]; elemsize, nelem) Discretize the 1D interval [`a`,`b`] into elements. Exactly one of the following keyword arguments must be provided: - `elemsize`: the average element size, or - `nelem`: the number of elements. The optional `stretch` argument allows stretching, otherwise the element sizes will be uniform. Returns either a range object or a vector containing the element boundaries. """ function grid1d(a, b, stretch = nothing; elemsize = nothing, nelem = nothing) xor(nelem === nothing, elemsize === nothing) || error("Either `elemsize` or `nelem` arguments must be provided") if elemsize !== nothing nelem = round(Int, abs(b - a) / elemsize) end grid1d(a, b, stretch, nelem) end function grid1d(a, b, ::Nothing, nelem) range(a, stop = b, length = nelem + 1) end # TODO: document these abstract type AbstractGridStretching end """ SingleExponentialStretching(A) Apply single-exponential stretching: `A > 0` will increase the density of points at the lower boundary, `A < 0` will increase the density at the upper boundary. # Reference * "Handbook of Grid Generation" J. F. Thompson, B. K. Soni, N. P. Weatherill (Editors) RCR Press 1999, §3.6.1 Single-Exponential Function """ struct SingleExponentialStretching{T} <: AbstractGridStretching A::T end function grid1d( a::A, b::B, stretch::SingleExponentialStretching, nelem, ) where {A, B} F = float(promote_type(A, B)) s = range(zero(F), stop = one(F), length = nelem + 1) a .+ (b - a) .* expm1.(stretch.A .* s) ./ expm1(stretch.A) end struct InteriorStretching{T} <: AbstractGridStretching attractor::T end function grid1d(a::A, b::B, stretch::InteriorStretching, nelem) where {A, B} F = float(promote_type(A, B)) coe = F(2.5) s = range(zero(F), stop = one(F), length = nelem + 1) range(a, stop = b, length = nelem + 1) .+ coe .* (stretch.attractor .- (b - a) .* s) .* (1 .- s) .* s end function basic_topology_info(topology::AbstractStackedTopology) nelem = length(topology.elems) nvertelem = topology.stacksize nhorzelem = div(nelem, nvertelem) nrealelem = length(topology.realelems) nhorzrealelem = div(nrealelem, nvertelem) return ( nelem = nelem, nvertelem = nvertelem, nhorzelem = nhorzelem, nrealelem = nrealelem, nhorzrealelem = nhorzrealelem, ) end function basic_topology_info(topology::AbstractTopology) return ( nelem = length(topology.elems), nrealelem = length(topology.realelems), ) end ### Helper Functions for Topography Calculations """ compute_lat_long(X,Y,δ,faceid) Helper function to allow computation of latitute and longitude coordinates given the cubed sphere coordinates X, Y, δ, faceid """ function compute_lat_long(X, Y, δ, faceid) if faceid == 1 λ = atan(X) # longitude ϕ = atan(cos(λ) * Y) # latitude elseif faceid == 2 λ = atan(X) + π / 2 ϕ = atan(Y * cos(atan(X))) elseif faceid == 3 λ = atan(X) + π ϕ = atan(Y * cos(atan(X))) elseif faceid == 4 λ = atan(X) + (3 / 2) * π ϕ = atan(Y * cos(atan(X))) elseif faceid == 5 λ = atan(X, -Y) + π ϕ = atan(1 / sqrt(δ - 1)) elseif faceid == 6 λ = atan(X, Y) ϕ = -atan(1 / sqrt(δ - 1)) end return λ, ϕ end """ AnalyticalTopography Abstract type to allow dispatch over different analytical topography prescriptions in experiments. """ abstract type AnalyticalTopography end function compute_analytical_topography( ::AnalyticalTopography, λ, ϕ, sR, r_inner, r_outer, ) return sR end """ NoTopography <: AnalyticalTopography Allows definition of fallback methods in case cubed_sphere_topo_warp is used with no prescribed topography function. """ struct NoTopography <: AnalyticalTopography end ### DCMIP Mountain """ DCMIPMountain <: AnalyticalTopography Topography description based on standard DCMIP experiments. """ struct DCMIPMountain <: AnalyticalTopography end function compute_analytical_topography( ::DCMIPMountain, λ, ϕ, sR, r_inner, r_outer, ) #User specified warp parameters R_m = π * 3 / 4 h0 = 2000 ζ_m = π / 16 φ_m = 0 λ_m = π * 3 / 2 r_m = acos(sin(φ_m) * sin(ϕ) + cos(φ_m) * cos(ϕ) * cos(λ - λ_m)) # Define mesh decay profile Δ = (r_outer - abs(sR)) / (r_outer - r_inner) if r_m < R_m zs = 0.5 * h0 * (1 + cos(π * r_m / R_m)) * cos(π * r_m / ζ_m) * cos(π * r_m / ζ_m) else zs = 0.0 end mR = sign(sR) * (abs(sR) + zs * Δ) return mR end """ cubed_sphere_topo_warp(a, b, c, R = max(abs(a), abs(b), abs(c)); r_inner = _planet_radius, r_outer = _planet_radius + domain_height, topography = NoTopography()) Given points `(a, b, c)` on the surface of a cube, warp the points out to a spherical shell of radius `R` based on the equiangular gnomonic grid proposed by [Ronchi1996](@cite). Assumes a user specified modified radius using the compute_analytical_topography function. Defaults to smooth cubed sphere unless otherwise specified via the AnalyticalTopography type. """ function cubed_sphere_topo_warp( a, b, c, R = max(abs(a), abs(b), abs(c)); r_inner = _planet_radius, r_outer = _planet_radius + domain_height, topography::AnalyticalTopography = NoTopography(), ) function f(sR, ξ, η, faceid) X, Y = tan(π * ξ / 4), tan(π * η / 4) δ = 1 + X^2 + Y^2 λ, ϕ = compute_lat_long(X, Y, δ, faceid) mR = compute_analytical_topography( topography, λ, ϕ, sR, r_inner, r_outer, ) x1 = mR / sqrt(δ) x2, x3 = X * x1, Y * x1 x1, x2, x3 end fdim = argmax(abs.((a, b, c))) if fdim == 1 && a < 0 faceid = 1 # (-R, *, *) : Face I from Ronchi, Iacono, Paolucci (1996) x1, x2, x3 = f(-R, b / a, c / a, faceid) elseif fdim == 2 && b < 0 faceid = 2 # ( *,-R, *) : Face II from Ronchi, Iacono, Paolucci (1996) x2, x1, x3 = f(-R, a / b, c / b, faceid) elseif fdim == 1 && a > 0 faceid = 3 # ( R, *, *) : Face III from Ronchi, Iacono, Paolucci (1996) x1, x2, x3 = f(R, b / a, c / a, faceid) elseif fdim == 2 && b > 0 faceid = 4 # ( *, R, *) : Face IV from Ronchi, Iacono, Paolucci (1996) x2, x1, x3 = f(R, a / b, c / b, faceid) elseif fdim == 3 && c > 0 faceid = 5 # ( *, *, R) : Face V from Ronchi, Iacono, Paolucci (1996) x3, x2, x1 = f(R, b / c, a / c, faceid) elseif fdim == 3 && c < 0 faceid = 6 # ( *, *,-R) : Face VI from Ronchi, Iacono, Paolucci (1996) x3, x2, x1 = f(-R, b / c, a / c, faceid) else error("invalid case for cubed_sphere_warp(::EquiangularCubedSphere): $a, $b, $c") end return x1, x2, x3 end end ================================================ FILE: src/Numerics/ODESolvers/AdditiveRungeKuttaMethod.jl ================================================ export AbstractAdditiveRungeKutta export LowStorageVariant, NaiveVariant export AdditiveRungeKutta export ARK1ForwardBackwardEuler export ARK2ImplicitExplicitMidpoint export ARK2GiraldoKellyConstantinescu export ARK548L2SA2KennedyCarpenter, ARK437L2SA1KennedyCarpenter export Trap2LockWoodWeller export DBM453VoglEtAl # Naive formulation that uses equation 3.8 from Giraldo, Kelly, and # Constantinescu (2013) directly. Seems to cut the number of solver iterations # by half but requires Nstages - 1 additional storage. struct NaiveVariant end additional_storage(::NaiveVariant, Q, Nstages) = (Lstages = ntuple(i -> similar(Q), Nstages),) # Formulation that does things exactly as in Giraldo, Kelly, and Constantinescu # (2013). Uses only one additional vector of storage regardless of the number # of stages. struct LowStorageVariant end additional_storage(::LowStorageVariant, Q, Nstages) = (Qtt = similar(Q),) abstract type AbstractAdditiveRungeKutta <: AbstractODESolver end """ AdditiveRungeKutta(f, l, backward_euler_solver, RKAe, RKAi, RKB, RKC, Q; split_explicit_implicit, variant, dt, t0 = 0) This is a time stepping object for implicit-explicit time stepping of a decomposed differential equation. When `split_explicit_implicit == false` the equation is assumed to be decomposed as ```math \\dot{Q} = [l(Q, t)] + [f(Q, t) - l(Q, t)] ``` where `Q` is the state, `f` is the full tendency and `l` is the chosen implicit operator. When `split_explicit_implicit == true` the assumed decomposition is ```math \\dot{Q} = [l(Q, t)] + [f(Q, t)] ``` where `f` is now only the nonlinear tendency. For both decompositions the implicit operator `l` is integrated implicitly whereas the remaining part is integrated explicitly. Other arguments are the required time step size `dt` and the optional initial time `t0`. The resulting backward Euler type systems are solved using the provided `backward_euler_solver`. This time stepping object is intended to be passed to the `solve!` command. The constructor builds an additive Runge--Kutta scheme based on the provided `RKAe`, `RKAi`, `RKB` and `RKC` coefficient arrays. Additionally `variant` specifies which of the analytically equivalent but numerically different formulations of the scheme is used. The available concrete implementations are: - [`ARK1ForwardBackwardEuler`](@ref) - [`ARK2ImplicitExplicitMidpoint`](@ref) - [`ARK2GiraldoKellyConstantinescu`](@ref) - [`ARK548L2SA2KennedyCarpenter`](@ref) - [`ARK437L2SA1KennedyCarpenter`](@ref) - [`Trap2LockWoodWeller`](@ref) - [`DBM453VoglEtAl`](@ref) """ mutable struct AdditiveRungeKutta{ T, RT, AT, V, VS, IST, Nstages, Nstages_sq, Nstagesm1, } <: AbstractAdditiveRungeKutta "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int "rhs function" rhs!::Any "rhs linear operator" rhs_implicit!::Any "a dictionary of backward Euler solvers" implicit_solvers::IST "An integer which is updated to determine the appropriate cached implicit solver (used in substepping)" substep_stage::Int "Storage for solution during the AdditiveRungeKutta update" Qstages::NTuple{Nstagesm1, AT} "Storage for RHS during the AdditiveRungeKutta update" Rstages::NTuple{Nstages, AT} "Storage for the linear solver rhs vector" Qhat::AT "RK coefficient matrix A for the explicit scheme" RKA_explicit::SArray{NTuple{2, Nstages}, RT, 2, Nstages_sq} "RK coefficient matrix A for the implicit scheme" RKA_implicit::SArray{NTuple{2, Nstages}, RT, 2, Nstages_sq} "RK coefficient vector B for the explicit scheme (rhs add in scaling)" RKB_explicit::SArray{Tuple{Nstages}, RT, 1, Nstages} "RK coefficient vector B for the implicit scheme (rhs add in scaling)" RKB_implicit::SArray{Tuple{Nstages}, RT, 1, Nstages} "RK_explicit coefficient vector C for the explicit scheme (time scaling)" RKC_explicit::SArray{Tuple{Nstages}, RT, 1, Nstages} "RK_implicit coefficient vector C for the implicit scheme (time scaling)" RKC_implicit::SArray{Tuple{Nstages}, RT, 1, Nstages} split_explicit_implicit::Bool "Variant of the ARK scheme" variant::V "Storage dependent on the variant of the ARK scheme" variant_storage::VS function AdditiveRungeKutta( rhs!, rhs_implicit!, backward_euler_solver, RKA_explicit, RKA_implicit, RKB_explicit, RKB_implicit, RKC_explicit, RKC_implicit, split_explicit_implicit, variant, Q::AT; dt = nothing, t0 = 0, nsubsteps = [], ) where {AT <: AbstractArray} @assert dt !== nothing T = eltype(Q) RT = real(T) Nstages = length(RKB_explicit) Qstages = ntuple(i -> similar(Q), Nstages - 1) Rstages = ntuple(i -> similar(Q), Nstages) Qhat = similar(Q) V = typeof(variant) variant_storage = additional_storage(variant, Q, Nstages) VS = typeof(variant_storage) implicit_solvers = Dict() rk_diag = unique(diag(RKA_implicit)) # Remove all zero entries from `rk_diag` # so we build all unique implicit solvers (parameterized by the # corresponding RK coefficient) filter!(c -> !iszero(c), rk_diag) # LowStorageVariant ARK methods assume that both the explicit and # implicit B and C vectors are the same. Additionally, the diagonal # of the implicit Butcher table A is assumed to have the form: # [0, c, ... c ], where c is some non-zero constant. if variant isa LowStorageVariant @assert RKB_explicit == RKB_implicit @assert RKC_explicit == RKC_implicit # rk_diag here has been filtered of all non-unique and zero values. # So [0, c, ... c ] filters to [c]. We error if its length is not 1 @assert length(rk_diag) == 1 end if isempty(nsubsteps) for rk_coeff in rk_diag α = dt * rk_coeff besolver! = setup_backward_Euler_solver( backward_euler_solver, Q, α, rhs_implicit!, ) @assert besolver! isa AbstractBackwardEulerSolver implicit_solvers[rk_coeff] = (besolver!,) end else nsteps = length(nsubsteps) for rk_coeff in rk_diag solvers = ntuple( i -> setup_backward_Euler_solver( backward_euler_solver, Q, dt * nsubsteps[i] * rk_coeff, rhs_implicit!, ), nsteps, ) @assert(all(isa.(solvers, AbstractBackwardEulerSolver))) implicit_solvers[rk_coeff] = solvers end end IST = typeof(implicit_solvers) new{T, RT, AT, V, VS, IST, Nstages, Nstages^2, Nstages - 1}( RT(dt), RT(t0), 0, rhs!, rhs_implicit!, implicit_solvers, # By default (no substepping, this parameter is simply set to 1) 1, Qstages, Rstages, Qhat, RKA_explicit, RKA_implicit, RKB_explicit, RKB_implicit, RKC_explicit, RKC_implicit, split_explicit_implicit, variant, variant_storage, ) end end function AdditiveRungeKutta( ark, op::TimeScaledRHS{2, RT} where {RT}, backward_euler_solver, Q::AT; dt = 0, t0 = 0, nsubsteps = [], split_explicit_implicit = true, variant = NaiveVariant(), ) where {AT <: AbstractArray} return ark( op.rhs![1], op.rhs![2], backward_euler_solver, Q; dt = dt, t0 = t0, nsubsteps = nsubsteps, split_explicit_implicit = split_explicit_implicit, variant = variant, ) end # this will only work for iterative solves # direct solvers use prefactorization function updatedt!(ark::AdditiveRungeKutta, dt) for (rk_coeff, implicit_solvers) in ark.implicit_solvers implicit_solver! = implicit_solvers[ark.substep_stage] @assert Δt_is_adjustable(implicit_solver!) # New coefficient α = dt * rk_coeff # Update with new dt and implicit coefficient ark.dt = dt update_backward_Euler_solver!(implicit_solver!, ark.Qstages[1], α) end end function dostep!( Q, ark::AdditiveRungeKutta, p, time, slow_δ = nothing, slow_rv_dQ = nothing, slow_scaling = nothing, ) dostep!(Q, ark, ark.variant, p, time, slow_δ, slow_rv_dQ, slow_scaling) end function dostep!( Q, ark::AdditiveRungeKutta, p, time::Real, nsubsteps::Int, iStage::Int, slow_δ = nothing, slow_rv_dQ = nothing, slow_scaling = nothing, ) ark.substep_stage = iStage for i in 1:nsubsteps dostep!(Q, ark, ark.variant, p, time, slow_δ, slow_rv_dQ, slow_scaling) time += ark.dt end end function dostep!( Q, ark::AdditiveRungeKutta, variant::NaiveVariant, p, time::Real, slow_δ = nothing, slow_rv_dQ = nothing, slow_scaling = nothing, ) dt = ark.dt RKA_explicit, RKA_implicit = ark.RKA_explicit, ark.RKA_implicit RKB_explicit, RKC_explicit = ark.RKB_explicit, ark.RKC_explicit RKB_implicit, RKC_implicit = ark.RKB_implicit, ark.RKC_implicit rhs!, rhs_implicit! = ark.rhs!, ark.rhs_implicit! Qstages, Rstages = (Q, ark.Qstages...), ark.Rstages Qhat = ark.Qhat split_explicit_implicit = ark.split_explicit_implicit Lstages = ark.variant_storage.Lstages rv_Q = realview(Q) rv_Qstages = realview.(Qstages) rv_Lstages = realview.(Lstages) rv_Rstages = realview.(Rstages) rv_Qhat = realview(Qhat) Nstages = length(RKB_explicit) groupsize = 256 # calculate the rhs at first stage to initialize the stage loop rhs!( Rstages[1], Qstages[1], p, time + RKC_explicit[1] * dt, increment = false, ) rhs_implicit!( Lstages[1], Qstages[1], p, time + RKC_implicit[1] * dt, increment = false, ) # note that it is important that this loop does not modify Q! for istage in 2:Nstages stagetime_implicit = time + RKC_implicit[istage] * dt stagetime_explicit = time + RKC_explicit[istage] * dt # this kernel also initializes Qstages[istage] with an initial guess # for the linear solver event = Event(array_device(Q)) event = stage_update!(array_device(Q), groupsize)( variant, rv_Q, rv_Qstages, rv_Lstages, rv_Rstages, rv_Qhat, RKA_explicit, RKA_implicit, dt, Val(istage), Val(split_explicit_implicit), slow_δ, slow_rv_dQ; ndrange = length(rv_Q), dependencies = (event,), ) wait(array_device(Q), event) # solves # Qs = Qhat + dt * RKA_implicit[istage, istage] * rhs_implicit!(Qs) rk_coeff = RKA_implicit[istage, istage] if !iszero(rk_coeff) α = rk_coeff * dt besolver! = ark.implicit_solvers[rk_coeff][ark.substep_stage] besolver!(Qstages[istage], Qhat, α, p, stagetime_implicit) end rhs!( Rstages[istage], Qstages[istage], p, stagetime_explicit, increment = false, ) rhs_implicit!( Lstages[istage], Qstages[istage], p, stagetime_implicit, increment = false, ) end # compose the final solution event = Event(array_device(Q)) event = solution_update!(array_device(Q), groupsize)( variant, rv_Q, rv_Lstages, rv_Rstages, RKB_explicit, RKB_implicit, dt, Val(Nstages), Val(split_explicit_implicit), slow_δ, slow_rv_dQ, slow_scaling; ndrange = length(rv_Q), dependencies = (event,), ) wait(array_device(Q), event) end function dostep!( Q, ark::AdditiveRungeKutta, variant::LowStorageVariant, p, time::Real, slow_δ = nothing, slow_rv_dQ = nothing, slow_scaling = nothing, ) dt = ark.dt RKA_explicit, RKA_implicit = ark.RKA_explicit, ark.RKA_implicit # LowStorageVariant ARK methods assumes that the implicit # Butcher table has an SDIRK form; meaning explicit first step (no # implicit solve at the first stage) and all non-zero diaognal # coefficients are the same. rk_coeff = RKA_implicit[2, 2] besolver! = ark.implicit_solvers[rk_coeff][ark.substep_stage] # NOTE: Using low-storage variant assumes that the butcher tables # for both the explicit and implicit parts have the same B and C # vectors RKB, RKC = ark.RKB_explicit, ark.RKC_explicit rhs!, rhs_implicit! = ark.rhs!, ark.rhs_implicit! Qstages, Rstages = (Q, ark.Qstages...), ark.Rstages Qhat = ark.Qhat split_explicit_implicit = ark.split_explicit_implicit Qtt = ark.variant_storage.Qtt rv_Q = realview(Q) rv_Qstages = realview.(Qstages) rv_Rstages = realview.(Rstages) rv_Qhat = realview(Qhat) rv_Qtt = realview(Qtt) Nstages = length(RKB) groupsize = 256 # calculate the rhs at first stage to initialize the stage loop rhs!(Rstages[1], Qstages[1], p, time + RKC[1] * dt, increment = false) # note that it is important that this loop does not modify Q! for istage in 2:Nstages stagetime = time + RKC[istage] * dt # this kernel also initializes Qtt for the linear solver event = Event(array_device(Q)) event = stage_update!(array_device(Q), groupsize)( variant, rv_Q, rv_Qstages, rv_Rstages, rv_Qhat, rv_Qtt, RKA_explicit, RKA_implicit, dt, Val(istage), Val(split_explicit_implicit), slow_δ, slow_rv_dQ; ndrange = length(rv_Q), dependencies = (event,), ) wait(array_device(Q), event) # solves # Q_tt = Qhat + dt * RKA_implicit[istage, istage] * rhs_implicit!(Q_tt) α = dt * RKA_implicit[istage, istage] besolver!(Qtt, Qhat, α, p, stagetime) # update Qstages Qstages[istage] .+= Qtt rhs!(Rstages[istage], Qstages[istage], p, stagetime, increment = false) end if split_explicit_implicit for istage in 1:Nstages stagetime = time + RKC[istage] * dt rhs_implicit!( Rstages[istage], Qstages[istage], p, stagetime, increment = true, ) end end # compose the final solution event = Event(array_device(Q)) event = solution_update!(array_device(Q), groupsize)( variant, rv_Q, rv_Rstages, RKB, dt, Val(Nstages), slow_δ, slow_rv_dQ, slow_scaling; ndrange = length(rv_Q), dependencies = (event,), ) wait(array_device(Q), event) end @kernel function stage_update!( ::NaiveVariant, Q, Qstages, Lstages, Rstages, Qhat, RKA_explicit, RKA_implicit, dt, ::Val{is}, ::Val{split_explicit_implicit}, slow_δ, slow_dQ, ) where {is, split_explicit_implicit} i = @index(Global, Linear) @inbounds begin Qhat_i = Q[i] Qstages_is_i = Q[i] if slow_δ !== nothing Rstages[is - 1][i] += slow_δ * slow_dQ[i] end @unroll for js in 1:(is - 1) R_explicit = dt * RKA_explicit[is, js] * Rstages[js][i] L_explicit = dt * RKA_explicit[is, js] * Lstages[js][i] L_implicit = dt * RKA_implicit[is, js] * Lstages[js][i] Qhat_i += (R_explicit + L_implicit) Qstages_is_i += R_explicit if split_explicit_implicit Qstages_is_i += L_explicit else Qhat_i -= L_explicit end end Qstages[is][i] = Qstages_is_i Qhat[i] = Qhat_i end end @kernel function stage_update!( ::LowStorageVariant, Q, Qstages, Rstages, Qhat, Qtt, RKA_explicit, RKA_implicit, dt, ::Val{is}, ::Val{split_explicit_implicit}, slow_δ, slow_dQ, ) where {is, split_explicit_implicit} i = @index(Global, Linear) @inbounds begin Qhat_i = Q[i] Qstages_is_i = -zero(eltype(Q)) if slow_δ !== nothing Rstages[is - 1][i] += slow_δ * slow_dQ[i] end @unroll for js in 1:(is - 1) if split_explicit_implicit rkcoeff = RKA_implicit[is, js] / RKA_implicit[is, is] else rkcoeff = (RKA_implicit[is, js] - RKA_explicit[is, js]) / RKA_implicit[is, is] end commonterm = rkcoeff * Qstages[js][i] Qhat_i += commonterm + dt * RKA_explicit[is, js] * Rstages[js][i] Qstages_is_i -= commonterm end Qstages[is][i] = Qstages_is_i Qhat[i] = Qhat_i Qtt[i] = Qhat_i end end @kernel function solution_update!( ::NaiveVariant, Q, Lstages, Rstages, RKB_explicit, RKB_implicit, dt, ::Val{Nstages}, ::Val{split_explicit_implicit}, slow_δ, slow_dQ, slow_scaling, ) where {Nstages, split_explicit_implicit} i = @index(Global, Linear) @inbounds begin if slow_δ !== nothing Rstages[Nstages][i] += slow_δ * slow_dQ[i] end if slow_scaling !== nothing slow_dQ[i] *= slow_scaling end @unroll for is in 1:Nstages Q[i] += RKB_explicit[is] * dt * Rstages[is][i] if split_explicit_implicit Q[i] += RKB_implicit[is] * dt * Lstages[is][i] end end end end @kernel function solution_update!( ::LowStorageVariant, Q, Rstages, RKB, dt, ::Val{Nstages}, slow_δ, slow_dQ, slow_scaling, ) where {Nstages} i = @index(Global, Linear) @inbounds begin if slow_δ !== nothing Rstages[Nstages][i] += slow_δ * slow_dQ[i] end if slow_scaling !== nothing slow_dQ[i] *= slow_scaling end @unroll for is in 1:Nstages Q[i] += RKB[is] * dt * Rstages[is][i] end end end """ ARK1ForwardBackwardEuler(f, l, backward_euler_solver, Q; dt, t0, split_explicit_implicit, variant) This function returns an [`AdditiveRungeKutta`](@ref) time stepping object, see the documentation of [`AdditiveRungeKutta`](@ref) for arguments definitions. This time stepping object is intended to be passed to the `solve!` command. This uses a first-order-accurate two-stage additive Runge--Kutta scheme by combining a forward Euler explicit step with a backward Euler implicit correction. ### References @article{Ascher1997, title = {Implicit-explicit Runge-Kutta methods for time-dependent partial differential equations}, author = {Uri M. Ascher and Steven J. Ruuth and Raymond J. Spiteri}, volume = {25}, number = {2-3}, pages = {151--167}, year = {1997}, journal = {Applied Numerical Mathematics}, publisher = {Elsevier {BV}} } """ function ARK1ForwardBackwardEuler( F, L, backward_euler_solver, Q::AT; dt = nothing, t0 = 0, nsubsteps = [], split_explicit_implicit = false, variant = LowStorageVariant(), ) where {AT <: AbstractArray} @assert dt !== nothing T = eltype(Q) RT = real(T) RKA_explicit = [ RT(0) RT(0) RT(1) RT(0) ] RKA_implicit = [ RT(0) RT(0) RT(0) RT(1) ] RKB_explicit = [RT(0), RT(1)] RKC_explicit = [RT(0), RT(1)] # For this ARK method, both RK methods share the same # B and C vectors in the Butcher table RKB_implicit = RKB_explicit RKC_implicit = RKC_explicit Nstages = length(RKB_explicit) AdditiveRungeKutta( F, L, backward_euler_solver, RKA_explicit, RKA_implicit, RKB_explicit, RKB_implicit, RKC_explicit, RKC_implicit, split_explicit_implicit, variant, Q; dt = dt, t0 = t0, nsubsteps = nsubsteps, ) end """ ARK2ImplicitExplicitMidpoint(f, l, backward_euler_solver, Q; dt, t0, split_explicit_implicit, variant) This function returns an [`AdditiveRungeKutta`](@ref) time stepping object, see the documentation of [`AdditiveRungeKutta`](@ref) for arguments definitions. This time stepping object is intended to be passed to the `solve!` command. This uses a second-order-accurate two-stage additive Runge--Kutta scheme by combining the implicit and explicit midpoint methods. ### References @article{Ascher1997, title = {Implicit-explicit Runge-Kutta methods for time-dependent partial differential equations}, author = {Uri M. Ascher and Steven J. Ruuth and Raymond J. Spiteri}, volume = {25}, number = {2-3}, pages = {151--167}, year = {1997}, journal = {Applied Numerical Mathematics}, publisher = {Elsevier {BV}} } """ function ARK2ImplicitExplicitMidpoint( F, L, backward_euler_solver, Q::AT; dt = nothing, t0 = 0, nsubsteps = [], split_explicit_implicit = false, variant = LowStorageVariant(), ) where {AT <: AbstractArray} @assert dt !== nothing T = eltype(Q) RT = real(T) RKA_explicit = [ RT(0) RT(0) RT(1 / 2) RT(0) ] RKA_implicit = [ RT(0) RT(0) RT(0) RT(1 / 2) ] RKB_explicit = [RT(0), RT(1)] RKC_explicit = [RT(0), RT(1 / 2)] # For this ARK method, both RK methods share the same # B and C vectors in the Butcher table RKB_implicit = RKB_explicit RKC_implicit = RKC_explicit Nstages = length(RKB_explicit) AdditiveRungeKutta( F, L, backward_euler_solver, RKA_explicit, RKA_implicit, RKB_explicit, RKB_implicit, RKC_explicit, RKC_implicit, split_explicit_implicit, variant, Q; dt = dt, t0 = t0, nsubsteps = nsubsteps, ) end """ ARK2GiraldoKellyConstantinescu(f, l, backward_euler_solver, Q; dt, t0, split_explicit_implicit, variant, paperversion) This function returns an [`AdditiveRungeKutta`](@ref) time stepping object, see the documentation of [`AdditiveRungeKutta`](@ref) for arguments definitions. This time stepping object is intended to be passed to the `solve!` command. `paperversion=true` uses the coefficients from the paper, `paperversion=false` uses coefficients that make the scheme (much) more stable but less accurate This uses the second-order-accurate 3-stage additive Runge--Kutta scheme of Giraldo, Kelly and Constantinescu (2013). ### References - [Giraldo2013](@cite) """ function ARK2GiraldoKellyConstantinescu( F, L, backward_euler_solver, Q::AT; dt = nothing, t0 = 0, nsubsteps = [], split_explicit_implicit = false, variant = LowStorageVariant(), paperversion = false, ) where {AT <: AbstractArray} @assert dt !== nothing T = eltype(Q) RT = real(T) a32 = RT(paperversion ? (3 + 2 * sqrt(2)) / 6 : 1 // 2) RKA_explicit = [ RT(0) RT(0) RT(0) RT(2 - sqrt(2)) RT(0) RT(0) RT(1 - a32) RT(a32) RT(0) ] RKA_implicit = [ RT(0) RT(0) RT(0) RT(1 - 1 / sqrt(2)) RT(1 - 1 / sqrt(2)) RT(0) RT(1 / (2 * sqrt(2))) RT(1 / (2 * sqrt(2))) RT(1 - 1 / sqrt(2)) ] RKB_explicit = [RT(1 / (2 * sqrt(2))), RT(1 / (2 * sqrt(2))), RT(1 - 1 / sqrt(2))] RKC_explicit = [RT(0), RT(2 - sqrt(2)), RT(1)] # For this ARK method, both RK methods share the same # B and C vectors in the Butcher table RKB_implicit = RKB_explicit RKC_implicit = RKC_explicit Nstages = length(RKB_explicit) AdditiveRungeKutta( F, L, backward_euler_solver, RKA_explicit, RKA_implicit, RKB_explicit, RKB_implicit, RKC_explicit, RKC_implicit, split_explicit_implicit, variant, Q; dt = dt, t0 = t0, nsubsteps = nsubsteps, ) end """ Trap2LockWoodWeller(F, L, backward_euler_solver, Q; dt, t0, nsubsteps, split_explicit_implicit, variant) This function returns an [`AdditiveRungeKutta`](@ref) time stepping object, see the documentation of [`AdditiveRungeKutta`](@ref) for arguments definitions. This time stepping object is intended to be passed to the `solve!` command. The time integrator scheme used is Trap2(2,3,2) with δ_s = 1, δ_f = 0, from the following reference ### References @article{Ascher1997, title = {Numerical analyses of Runge–Kutta implicit–explicit schemes for horizontally explicit, vertically implicit solutions of atmospheric models}, author = {S.-J. Lock and N. Wood and H. Weller}, volume = {140}, number = {682}, pages = {1654-1669}, year = {2014}, journal = {Quarterly Journal of the Royal Meteorological Society}, publisher = {{RMetS}} } """ function Trap2LockWoodWeller( F, L, backward_euler_solver, Q::AT; dt = nothing, t0 = 0, nsubsteps = [], split_explicit_implicit = false, variant = NaiveVariant(), δ_s = 1, δ_f = 0, α = 0, ) where {AT <: AbstractArray} @assert dt !== nothing # In this scheme B and C vectors do not coincide, # hence we can't use the LowStorageVariant optimization @assert variant isa NaiveVariant T = eltype(Q) RT = real(T) #! format: off RKA_explicit = [ RT(0) RT(0) RT(0) RT(0) RT(δ_s) RT(0) RT(0) RT(0) RT(1 / 2) RT(1 / 2) RT(0) RT(0) RT(1 / 2) RT(0) RT(1 / 2) RT(0) ] RKA_implicit = [ RT(0) RT(0) RT(0) RT(0) RT(δ_f * (1 - α) / 2) RT(δ_f * (1 + α) / 2) RT(0) RT(0) RT(1 / 2) RT(0) RT(1 / 2) RT(0) RT(1 / 2) RT(0) RT(0) RT(1 / 2) ] #! format: on RKB_explicit = [RT(1 / 2), RT(0), RT(1 / 2), RT(0)] RKB_implicit = [RT(1 / 2), RT(0), RT(0), RT(1 / 2)] RKC_explicit = [RT(0), RT(δ_s), RT(1), RT(1)] RKC_implicit = [RT(0), RT(δ_f), RT(1), RT(1)] Nstages = length(RKB_explicit) AdditiveRungeKutta( F, L, backward_euler_solver, RKA_explicit, RKA_implicit, RKB_explicit, RKB_implicit, RKC_explicit, RKC_implicit, split_explicit_implicit, variant, Q; dt = dt, t0 = t0, nsubsteps = nsubsteps, ) end """ ARK548L2SA2KennedyCarpenter(f, l, backward_euler_solver, Q; dt, t0, split_explicit_implicit, variant) This function returns an [`AdditiveRungeKutta`](@ref) time stepping object, see the documentation of [`AdditiveRungeKutta`](@ref) for arguments definitions. This time stepping object is intended to be passed to the `solve!` command. This uses the fifth-order-accurate 8-stage additive Runge--Kutta scheme of Kennedy and Carpenter (2013). ### References - [Kennedy2019](@cite) """ function ARK548L2SA2KennedyCarpenter( F, L, backward_euler_solver, Q::AT; dt = nothing, t0 = 0, nsubsteps = [], split_explicit_implicit = false, variant = LowStorageVariant(), ) where {AT <: AbstractArray} @assert dt !== nothing T = eltype(Q) RT = real(T) Nstages = 8 gamma = RT(2 // 9) # declared as Arrays for mutability, later these will be converted to static # arrays RKA_explicit = zeros(RT, Nstages, Nstages) RKA_implicit = zeros(RT, Nstages, Nstages) RKB_explicit = zeros(RT, Nstages) RKC_explicit = zeros(RT, Nstages) # the main diagonal for is in 2:Nstages RKA_implicit[is, is] = gamma end RKA_implicit[3, 2] = RT(2366667076620 // 8822750406821) RKA_implicit[4, 2] = RT(-257962897183 // 4451812247028) RKA_implicit[4, 3] = RT(128530224461 // 14379561246022) RKA_implicit[5, 2] = RT(-486229321650 // 11227943450093) RKA_implicit[5, 3] = RT(-225633144460 // 6633558740617) RKA_implicit[5, 4] = RT(1741320951451 // 6824444397158) RKA_implicit[6, 2] = RT(621307788657 // 4714163060173) RKA_implicit[6, 3] = RT(-125196015625 // 3866852212004) RKA_implicit[6, 4] = RT(940440206406 // 7593089888465) RKA_implicit[6, 5] = RT(961109811699 // 6734810228204) RKA_implicit[7, 2] = RT(2036305566805 // 6583108094622) RKA_implicit[7, 3] = RT(-3039402635899 // 4450598839912) RKA_implicit[7, 4] = RT(-1829510709469 // 31102090912115) RKA_implicit[7, 5] = RT(-286320471013 // 6931253422520) RKA_implicit[7, 6] = RT(8651533662697 // 9642993110008) RKA_explicit[3, 1] = RT(1 // 9) RKA_explicit[3, 2] = RT(1183333538310 // 1827251437969) RKA_explicit[4, 1] = RT(895379019517 // 9750411845327) RKA_explicit[4, 2] = RT(477606656805 // 13473228687314) RKA_explicit[4, 3] = RT(-112564739183 // 9373365219272) RKA_explicit[5, 1] = RT(-4458043123994 // 13015289567637) RKA_explicit[5, 2] = RT(-2500665203865 // 9342069639922) RKA_explicit[5, 3] = RT(983347055801 // 8893519644487) RKA_explicit[5, 4] = RT(2185051477207 // 2551468980502) RKA_explicit[6, 1] = RT(-167316361917 // 17121522574472) RKA_explicit[6, 2] = RT(1605541814917 // 7619724128744) RKA_explicit[6, 3] = RT(991021770328 // 13052792161721) RKA_explicit[6, 4] = RT(2342280609577 // 11279663441611) RKA_explicit[6, 5] = RT(3012424348531 // 12792462456678) RKA_explicit[7, 1] = RT(6680998715867 // 14310383562358) RKA_explicit[7, 2] = RT(5029118570809 // 3897454228471) RKA_explicit[7, 3] = RT(2415062538259 // 6382199904604) RKA_explicit[7, 4] = RT(-3924368632305 // 6964820224454) RKA_explicit[7, 5] = RT(-4331110370267 // 15021686902756) RKA_explicit[7, 6] = RT(-3944303808049 // 11994238218192) RKA_explicit[8, 1] = RT(2193717860234 // 3570523412979) RKA_explicit[8, 2] = RKA_explicit[8, 1] RKA_explicit[8, 3] = RT(5952760925747 // 18750164281544) RKA_explicit[8, 4] = RT(-4412967128996 // 6196664114337) RKA_explicit[8, 5] = RT(4151782504231 // 36106512998704) RKA_explicit[8, 6] = RT(572599549169 // 6265429158920) RKA_explicit[8, 7] = RT(-457874356192 // 11306498036315) RKB_explicit[2] = 0 RKB_explicit[3] = RT(3517720773327 // 20256071687669) RKB_explicit[4] = RT(4569610470461 // 17934693873752) RKB_explicit[5] = RT(2819471173109 // 11655438449929) RKB_explicit[6] = RT(3296210113763 // 10722700128969) RKB_explicit[7] = RT(-1142099968913 // 5710983926999) RKC_explicit[2] = RT(4 // 9) RKC_explicit[3] = RT(6456083330201 // 8509243623797) RKC_explicit[4] = RT(1632083962415 // 14158861528103) RKC_explicit[5] = RT(6365430648612 // 17842476412687) RKC_explicit[6] = RT(18 // 25) RKC_explicit[7] = RT(191 // 200) for is in 2:Nstages RKA_implicit[is, 1] = RKA_implicit[is, 2] end for is in 1:(Nstages - 1) RKA_implicit[Nstages, is] = RKB_explicit[is] end RKB_explicit[1] = RKB_explicit[2] RKB_explicit[8] = gamma RKA_explicit[2, 1] = RKC_explicit[2] RKA_explicit[Nstages, 1] = RKA_explicit[Nstages, 2] RKC_explicit[1] = 0 RKC_explicit[Nstages] = 1 # For this ARK method, both RK methods share the same # B and C vectors in the Butcher table RKB_implicit = RKB_explicit RKC_implicit = RKC_explicit ark = AdditiveRungeKutta( F, L, backward_euler_solver, RKA_explicit, RKA_implicit, RKB_explicit, RKB_implicit, RKC_explicit, RKC_implicit, split_explicit_implicit, variant, Q; dt = dt, t0 = t0, nsubsteps = nsubsteps, ) end """ ARK437L2SA1KennedyCarpenter(f, l, backward_euler_solver, Q; dt, t0, split_explicit_implicit, variant) This function returns an [`AdditiveRungeKutta`](@ref) time stepping object, see the documentation of [`AdditiveRungeKutta`](@ref) for arguments definitions. This time stepping object is intended to be passed to the `solve!` command. This uses the fourth-order-accurate 7-stage additive Runge--Kutta scheme of Kennedy and Carpenter (2013). ### References - [Kennedy2019](@cite) """ function ARK437L2SA1KennedyCarpenter( F, L, backward_euler_solver, Q::AT; dt = nothing, t0 = 0, nsubsteps = [], split_explicit_implicit = false, variant = LowStorageVariant(), ) where {AT <: AbstractArray} @assert dt !== nothing T = eltype(Q) RT = real(T) Nstages = 7 gamma = RT(1235 // 10000) # declared as Arrays for mutability, later these will be converted to static # arrays RKA_explicit = zeros(RT, Nstages, Nstages) RKA_implicit = zeros(RT, Nstages, Nstages) RKB_explicit = zeros(RT, Nstages) RKC_explicit = zeros(RT, Nstages) # the main diagonal for is in 2:Nstages RKA_implicit[is, is] = gamma end RKA_implicit[3, 2] = RT(624185399699 // 4186980696204) RKA_implicit[4, 2] = RT(1258591069120 // 10082082980243) RKA_implicit[4, 3] = RT(-322722984531 // 8455138723562) RKA_implicit[5, 2] = RT(-436103496990 // 5971407786587) RKA_implicit[5, 3] = RT(-2689175662187 // 11046760208243) RKA_implicit[5, 4] = RT(4431412449334 // 12995360898505) RKA_implicit[6, 2] = RT(-2207373168298 // 14430576638973) RKA_implicit[6, 3] = RT(242511121179 // 3358618340039) RKA_implicit[6, 4] = RT(3145666661981 // 7780404714551) RKA_implicit[6, 5] = RT(5882073923981 // 14490790706663) RKA_implicit[7, 2] = 0 RKA_implicit[7, 3] = RT(9164257142617 // 17756377923965) RKA_implicit[7, 4] = RT(-10812980402763 // 74029279521829) RKA_implicit[7, 5] = RT(1335994250573 // 5691609445217) RKA_implicit[7, 6] = RT(2273837961795 // 8368240463276) RKA_explicit[3, 1] = RT(247 // 4000) RKA_explicit[3, 2] = RT(2694949928731 // 7487940209513) RKA_explicit[4, 1] = RT(464650059369 // 8764239774964) RKA_explicit[4, 2] = RT(878889893998 // 2444806327765) RKA_explicit[4, 3] = RT(-952945855348 // 12294611323341) RKA_explicit[5, 1] = RT(476636172619 // 8159180917465) RKA_explicit[5, 2] = RT(-1271469283451 // 7793814740893) RKA_explicit[5, 3] = RT(-859560642026 // 4356155882851) RKA_explicit[5, 4] = RT(1723805262919 // 4571918432560) RKA_explicit[6, 1] = RT(6338158500785 // 11769362343261) RKA_explicit[6, 2] = RT(-4970555480458 // 10924838743837) RKA_explicit[6, 3] = RT(3326578051521 // 2647936831840) RKA_explicit[6, 4] = RT(-880713585975 // 1841400956686) RKA_explicit[6, 5] = RT(-1428733748635 // 8843423958496) RKA_explicit[7, 2] = RT(760814592956 // 3276306540349) RKA_explicit[7, 3] = RT(-47223648122716 // 6934462133451) RKA_explicit[7, 4] = RT(71187472546993 // 9669769126921) RKA_explicit[7, 5] = RT(-13330509492149 // 9695768672337) RKA_explicit[7, 6] = RT(11565764226357 // 8513123442827) RKB_explicit[2] = 0 RKB_explicit[3] = RT(9164257142617 // 17756377923965) RKB_explicit[4] = RT(-10812980402763 // 74029279521829) RKB_explicit[5] = RT(1335994250573 // 5691609445217) RKB_explicit[6] = RT(2273837961795 // 8368240463276) RKB_explicit[7] = RT(247 // 2000) RKC_explicit[2] = RT(247 // 1000) RKC_explicit[3] = RT(4276536705230 // 10142255878289) RKC_explicit[4] = RT(67 // 200) RKC_explicit[5] = RT(3 // 40) RKC_explicit[6] = RT(7 // 10) for is in 2:Nstages RKA_implicit[is, 1] = RKA_implicit[is, 2] end for is in 1:(Nstages - 1) RKA_implicit[Nstages, is] = RKB_explicit[is] end RKB_explicit[1] = RKB_explicit[2] RKA_explicit[2, 1] = RKC_explicit[2] RKA_explicit[Nstages, 1] = RKA_explicit[Nstages, 2] RKC_explicit[1] = 0 RKC_explicit[Nstages] = 1 # For this ARK method, both RK methods share the same # B and C vectors in the Butcher table RKB_implicit = RKB_explicit RKC_implicit = RKC_explicit ark = AdditiveRungeKutta( F, L, backward_euler_solver, RKA_explicit, RKA_implicit, RKB_explicit, RKB_implicit, RKC_explicit, RKC_implicit, split_explicit_implicit, variant, Q; dt = dt, t0 = t0, nsubsteps = nsubsteps, ) end """ DBM453VoglEtAl(f, l, backward_euler_solver, Q; dt, t0, split_explicit_implicit, variant) This function returns an [`AdditiveRungeKutta`](@ref) time stepping object, see the documentation of [`AdditiveRungeKutta`](@ref) for arguments definitions. This time stepping object is intended to be passed to the `solve!` command. This uses the third-order-accurate 5-stage additive Runge--Kutta scheme of Vogl et al. (2019). ### References - [Vogl2019](@cite) """ function DBM453VoglEtAl( F, L, backward_euler_solver, Q::AT; dt = nothing, t0 = 0, nsubsteps = [], split_explicit_implicit = false, variant = LowStorageVariant(), ) where {AT <: AbstractArray} @assert dt !== nothing T = eltype(Q) RT = real(T) Nstages = 5 gamma = RT(0.32591194130117247) # declared as Arrays for mutability, later these will be converted to static # arrays RKA_explicit = zeros(RT, Nstages, Nstages) RKA_implicit = zeros(RT, Nstages, Nstages) RKB_explicit = zeros(RT, Nstages) RKC_explicit = zeros(RT, Nstages) # the main diagonal for is in 2:Nstages RKA_implicit[is, is] = gamma end RKA_implicit[2, 1] = -0.22284985318525410 RKA_implicit[3, 1] = -0.46801347074080545 RKA_implicit[3, 2] = 0.86349284225716961 RKA_implicit[4, 1] = -0.46509906651927421 RKA_implicit[4, 2] = 0.81063103116959553 RKA_implicit[4, 3] = 0.61036726756832357 RKA_implicit[5, 1] = 0.87795339639076675 RKA_implicit[5, 2] = -0.72692641526151547 RKA_implicit[5, 3] = 0.75204137157372720 RKA_implicit[5, 4] = -0.22898029400415088 RKA_explicit[2, 1] = 0.10306208811591838 RKA_explicit[3, 1] = -0.94124866143519894 RKA_explicit[3, 2] = 1.66263997425273560 RKA_explicit[4, 1] = -1.36709752014377650 RKA_explicit[4, 2] = 1.38158529110168730 RKA_explicit[4, 3] = 1.26732340256190650 RKA_explicit[5, 1] = -0.81287582068772448 RKA_explicit[5, 2] = 0.81223739060505738 RKA_explicit[5, 3] = 0.90644429603699305 RKA_explicit[5, 4] = 0.094194134045674111 RKB_explicit[1] = 0.87795339639076672 RKB_explicit[2] = -0.72692641526151549 RKB_explicit[3] = 0.7520413715737272 RKB_explicit[4] = -0.22898029400415090 RKB_explicit[5] = 0.32591194130117247 RKC_explicit[1] = 0 RKC_explicit[2] = 0.1030620881159184 RKC_explicit[3] = 0.72139131281753662 RKC_explicit[4] = 1.28181117351981733 RKC_explicit[5] = 1 # For this ARK method, both RK methods share the same # B and C vectors in the Butcher table RKB_implicit = RKB_explicit RKC_implicit = RKC_explicit ark = AdditiveRungeKutta( F, L, backward_euler_solver, RKA_explicit, RKA_implicit, RKB_explicit, RKB_implicit, RKC_explicit, RKC_implicit, split_explicit_implicit, variant, Q; dt = dt, t0 = t0, nsubsteps = nsubsteps, ) end ================================================ FILE: src/Numerics/ODESolvers/BackwardEulerSolvers.jl ================================================ export LinearBackwardEulerSolver, AbstractBackwardEulerSolver export NonLinearBackwardEulerSolver abstract type AbstractImplicitOperator end """ op! = EulerOperator(f!, ϵ) Construct a linear operator which performs an explicit Euler step ``Q + α f(Q)``, where `f!` and `op!` both operate inplace, with extra arguments passed through, i.e. ``` op!(LQ, Q, args...) ``` is equivalent to ``` f!(dQ, Q, args...) LQ .= Q .+ ϵ .* dQ ``` """ mutable struct EulerOperator{F, FT} <: AbstractImplicitOperator f!::F ϵ::FT end function (op::EulerOperator)(LQ, Q, args...) op.f!(LQ, Q, args..., increment = false) @. LQ = Q + op.ϵ * LQ end """ AbstractBackwardEulerSolver An abstract backward Euler method """ abstract type AbstractBackwardEulerSolver end """ (be::AbstractBackwardEulerSolver)(Q, Qhat, α, param, time) Each concrete implementations of `AbstractBackwardEulerSolver` should provide a callable version which solves the following system for `Q` ``` Q = Qhat + α f(Q, param, time) ``` where `f` is the ODE tendency function, `param` are the ODE parameters, and `time` is the current ODE time. The arguments `Q` should be modified in place and should not be assumed to be initialized to any value. """ (be::AbstractBackwardEulerSolver)(Q, Qhat, α, p, t) = throw(MethodError(be, (Q, Qhat, α, p, t))) """ Δt_is_adjustable(::AbstractBackwardEulerSolver) Return `Bool` for whether this backward Euler solver can be updated. default is `false`. """ Δt_is_adjustable(::AbstractBackwardEulerSolver) = false """ update_backward_Euler_solver!(::AbstractBackwardEulerSolver, α) Update the given backward Euler solver for the parameter `α`; see ['AbstractBackwardEulerSolver'](@ref). Default behavior is no change to the solver. """ update_backward_Euler_solver!(::AbstractBackwardEulerSolver, Q, α) = nothing """ setup_backward_Euler_solver(solver, Q, α, tendency!) Returns a concrete implementation of an `AbstractBackwardEulerSolver` that will solve for `Q` in systems of the form of ``` Q = Qhat + α f(Q, param, time) ``` where `tendency!` is the in-place tendency function. Not the array `Q` is just passed in for type information, e.g., `Q` the same `Q` will not be used for all calls to the solver. """ setup_backward_Euler_solver(solver::AbstractBackwardEulerSolver, _...) = solver """ LinearBackwardEulerSolver(::AbstractSystemSolver; isadjustable = false) Helper type for specifying building a backward Euler solver with a linear solver. If `isadjustable == true` then the solver can be updated with a new time step size. """ struct LinearBackwardEulerSolver{LS} solver::LS isadjustable::Bool preconditioner_update_freq::Int LinearBackwardEulerSolver( solver; isadjustable = false, preconditioner_update_freq = -1, ) = new{typeof(solver)}(solver, isadjustable, preconditioner_update_freq) end """ LinBESolver Concrete implementation of an `AbstractBackwardEulerSolver` to use linear solvers of type `AbstractSystemSolver`. See helper type [`LinearBackwardEulerSolver`](@ref) ``` Q = Qhat + α f(Q, param, time) ``` """ mutable struct LinBESolver{FT, F, LS} <: AbstractBackwardEulerSolver α::FT f_imp!::F solver::LS isadjustable::Bool # used only for iterative solver preconditioner::AbstractPreconditioner # used only for direct solver factors::Any end Δt_is_adjustable(lin::LinBESolver) = lin.isadjustable function setup_backward_Euler_solver( lin::LinearBackwardEulerSolver, Q, α, f_imp!, ) FT = eltype(α) rhs! = EulerOperator(f_imp!, -α) factors = prefactorize(rhs!, lin.solver, Q, nothing, FT(NaN)) # when direct solver is applied preconditioner_update_freq <= 0 @assert( typeof(lin.solver) <: AbstractIterativeSystemSolver || lin.preconditioner_update_freq <= 0 ) preconditioner_update_freq = lin.preconditioner_update_freq # construct an empty preconditioner preconditioner = ( preconditioner_update_freq > 0 ? ColumnwiseLUPreconditioner(f_imp!, Q, preconditioner_update_freq) : NoPreconditioner() ) LinBESolver( α, f_imp!, lin.solver, lin.isadjustable, preconditioner, factors, ) end function update_backward_Euler_solver!(lin::LinBESolver, Q, α) lin.α = α FT = eltype(Q) # for direct solver, update factors # for iterative solver, set factors to Nothing (TODO optimize) lin.factors = prefactorize( EulerOperator(lin.f_imp!, -α), lin.solver, Q, nothing, FT(NaN), ) end function (lin::LinBESolver)(Q, Qhat, α, p, t) # If α is not the same as the already assembled operator # with its previous value of α (lin.α), then we need to # first check that the solver CAN be rebuilt (lin.isadjustable) # followed by recalling the set up routine by updating the # coefficient with the new version of α if lin.α != α @assert lin.isadjustable update_backward_Euler_solver!(lin, Q, α) end rhs! = EulerOperator(lin.f_imp!, -α) if typeof(lin.solver) <: AbstractIterativeSystemSolver FT = eltype(α) preconditioner_update!(rhs!, rhs!.f!, lin.preconditioner, p, t) linearsolve!(rhs!, lin.preconditioner, lin.solver, Q, Qhat, p, t) preconditioner_counter_update!(lin.preconditioner) else linearsolve!(rhs!, lin.factors, lin.solver, Q, Qhat, p, t) end end """ struct NonLinearBackwardEulerSolver{NLS} nlsolver::NLS isadjustable::Bool preconditioner_update_freq::Int64 end Helper type for specifying building a nonlinear backward Euler solver with a nonlinear solver. # Arguments - `nlsolver`: iterative nonlinear solver, i.e., JacobianFreeNewtonKrylovSolver - `isadjustable`: TODO not used, might use for updating preconditioner - `preconditioner_update_freq`: relavent to Jacobian free -1: no preconditioner; positive number, update every freq times """ struct NonLinearBackwardEulerSolver{NLS} nlsolver::NLS isadjustable::Bool # preconditioner_update_freq, -1: no preconditioner; # positive number, update every freq times preconditioner_update_freq::Int function NonLinearBackwardEulerSolver( nlsolver; isadjustable = false, preconditioner_update_freq = -1, ) NLS = typeof(nlsolver) return new{NLS}(nlsolver, isadjustable, preconditioner_update_freq) end end """ LinBESolver Concrete implementation of an `AbstractBackwardEulerSolver` to use nonlinear solvers of type `NLS`. See helper type [`NonLinearBackwardEulerSolver`](@ref) ``` Q = Qhat + α f_imp(Q, param, time) ``` """ mutable struct NonLinBESolver{FT, F, NLS} <: AbstractBackwardEulerSolver # Solve Q - α f_imp(Q) = Qrhs α::FT # implcit operator f_imp!::F # jacobian action, which approximates drhs!/dQ⋅ΔQ , here rhs!(Q) = Q - α f_imp(Q) jvp!::JacobianAction # nonlinear solver nlsolver::NLS # whether adjust the time step or not isadjustable::Bool # preconditioner, approximation of drhs!/dQ preconditioner::AbstractPreconditioner end Δt_is_adjustable(nlsolver::NonLinBESolver) = nlsolver.isadjustable """ setup_backward_Euler_solver(solver::NonLinearBackwardEulerSolver, Q, α, tendency!) Returns a concrete implementation of an `AbstractBackwardEulerSolver` that will solve for `Q` in nonlinear systems of the form of ``` Q = Qhat + α f(Q, param, time) ``` Create an empty JacobianAction Create an empty preconditioner if preconditioner_update_freq > 0 """ function setup_backward_Euler_solver( nlbesolver::NonLinearBackwardEulerSolver, Q, α, f_imp!, ) # Create an empty JacobianAction (without operator) jvp! = JacobianAction(nothing, Q, nlbesolver.nlsolver.ϵ) # Create an empty preconditioner if preconditioner_update_freq > 0 preconditioner_update_freq = nlbesolver.preconditioner_update_freq # construct an empty preconditioner preconditioner = ( preconditioner_update_freq > 0 ? ColumnwiseLUPreconditioner(f_imp!, Q, preconditioner_update_freq) : NoPreconditioner() ) NonLinBESolver( α, f_imp!, jvp!, nlbesolver.nlsolver, nlbesolver.isadjustable, preconditioner, ) end """ Nonlinear solve Update rhs! with α Update the rhs! in the jacobian action jvp! """ function (nlbesolver::NonLinBESolver)(Q, Qhat, α, p, t) rhs! = EulerOperator(nlbesolver.f_imp!, -α) nlbesolver.jvp!.rhs! = rhs! nonlinearsolve!( rhs!, nlbesolver.jvp!, nlbesolver.preconditioner, nlbesolver.nlsolver, Q, Qhat, p, t; max_newton_iters = nlbesolver.nlsolver.M, ) end ================================================ FILE: src/Numerics/ODESolvers/DifferentialEquations.jl ================================================ import DiffEqBase export DiffEqJLSolver, DiffEqJLIMEXSolver abstract type AbstractDiffEqJLSolver <: AbstractODESolver end """ DiffEqJLSolver(f, RKA, RKB, RKC, Q; dt, t0 = 0) This is a time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` via a DifferentialEquations.jl DEAlgorithm, which includes support for OrdinaryDiffEq.jl, Sundials.jl, and more. """ mutable struct DiffEqJLSolver{I} <: AbstractDiffEqJLSolver integ::I steps::Int function DiffEqJLSolver( rhs!, alg, Q, args...; t0 = 0, p = nothing, kwargs..., ) prob = DiffEqBase.ODEProblem( (du, u, p, t) -> rhs!(du, u, p, t; increment = false), Q, (float(t0), typemax(typeof(float(t0)))), p, ) integ = DiffEqBase.init( prob, alg, args...; adaptive = false, save_everystep = false, save_start = false, save_end = false, kwargs..., ) new{typeof(integ)}(integ, 0) end end """ DiffEqJLSolver(f, RKA, RKB, RKC, Q; dt, t0 = 0) This is a time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f_I(Q, t) + f_E(Q, t) ``` via a DifferentialEquations.jl DEAlgorithm, which includes support for OrdinaryDiffEq.jl, Sundials.jl, and more. """ mutable struct DiffEqJLIMEXSolver{I} <: AbstractDiffEqJLSolver integ::I steps::Int function DiffEqJLIMEXSolver( rhs!, rhs_implicit!, alg, Q, args...; t0 = 0, p = nothing, kwargs..., ) prob = DiffEqBase.SplitODEProblem( (du, u, p, t) -> rhs_implicit!(du, u, p, t; increment = false), (du, u, p, t) -> rhs!(du, u, p, t; increment = false), Q, (float(t0), typemax(typeof(float(t0)))), p, ) integ = DiffEqBase.init( prob, alg, args...; adaptive = false, save_everystep = false, save_start = false, save_end = false, kwargs..., ) new{typeof(integ)}(integ, 0) end end gettime(solver::AbstractDiffEqJLSolver) = solver.integ.t getdt(solver::AbstractDiffEqJLSolver) = solver.integ.dt updatedt!(solver::AbstractDiffEqJLSolver, dt) = DiffEqBase.set_proposed_dt!(solver.integ, dt) updatetime!(solver::AbstractDiffEqJLSolver, t) = DiffEqBase.set_t!(solver.integ, t) isadjustable(solver::AbstractDiffEqJLSolver) = true # Is this isadaptive? Or something different? """ ODESolvers.general_dostep!(Q, solver::AbstractODESolver, p, timeend::Real, adjustfinalstep::Bool) Use the solver to step `Q` forward in time from the current time, to the time `timeend`. If `adjustfinalstep == true` then `dt` is adjusted so that the step does not take the solution beyond the `timeend`. """ function general_dostep!( Q, solver::AbstractDiffEqJLSolver, p, timeend::Real; adjustfinalstep::Bool, ) integ = solver.integ if first(integ.opts.tstops) !== timeend DiffEqBase.add_tstop!(integ, timeend) end dostep!(Q, solver, p, time) solver.integ.t end function dostep!( Q, solver::AbstractDiffEqJLSolver, p, time, slow_δ = nothing, slow_rv_dQ = nothing, in_slow_scaling = nothing, ) integ = solver.integ integ.p = p # Can this change? rv_Q = realview(Q) if integ.u != Q integ.u .= Q DiffEqBase.u_modified!(integ, true) # Will time always be correct? end DiffEqBase.step!(integ) rv_Q .= solver.integ.u end function DiffEqJLConstructor(alg) constructor = (F, Q; dt = 0, t0 = 0) -> DiffEqJLSolver(F, alg, Q; t0 = t0, dt = dt) return constructor end ================================================ FILE: src/Numerics/ODESolvers/GenericCallbacks.jl ================================================ """ GenericCallbacks This module defines interfaces and wrappers for callbacks to be used with an `AbstractODESolver`. A callback `cb` defines three methods: - `GenericCallbacks.init!(cb, solver, Q, param, t)`, to be called at solver initialization. - `GenericCallbacks.call!(cb, solver, Q, param, t)`, to be called after each time step: the return value dictates what action should be taken: * `0` or `nothing`: continue time stepping as usual * `1`: stop time stepping after all callbacks have been executed * `2`: stop time stepping immediately - `GenericCallbacks.fini!(cb, solver, Q, param, t)`, to be called at solver finish. Additionally, _wrapper_ callbacks can be used to execute the callbacks under certain conditions: - [`AtInit`](@ref) - [`AtInitAndFini`](@ref) - [`EveryXWallTimeSeconds`](@ref) - [`EveryXSimulationTime`](@ref) - [`EveryXSimulationSteps`](@ref) For convenience, the following objects can also be used as callbacks: - A `Function` object `f`, `init!` and `fini!` are no-ops, and `call!` will call `f()`, and ignore the return value. - A `Tuple` object will call `init!`, `call!` and `fini!` on each element of the tuple. """ module GenericCallbacks export AtInit, AtInitAndFini, EveryXWallTimeSeconds, EveryXSimulationTime, EveryXSimulationSteps using MPI init!(f::Function, solver, Q, param, t) = nothing function call!(f::Function, solver, Q, param, t) f() return nothing end fini!(f::Function, solver, Q, param, t) = nothing function init!(callbacks::Tuple, solver, Q, param, t) for cb in callbacks GenericCallbacks.init!(cb, solver, Q, param, t) end end function call!(callbacks::Tuple, solver, Q, param, t) val = 0 for cb in callbacks val_i = GenericCallbacks.call!(cb, solver, Q, param, t) val_i = (val_i === nothing) ? 0 : val_i val = max(val, val_i) if val == 2 return val end end return val end function fini!(callbacks::Tuple, solver, Q, param, t) for cb in callbacks GenericCallbacks.fini!(cb, solver, Q, param, t) end end abstract type AbstractCallback end """ AtInit(callback) <: AbstractCallback A wrapper callback to execute `callback` at initialization as well as after each interval. """ struct AtInit <: AbstractCallback callback::Any end function init!(cb::AtInit, solver, Q, param, t) init!(cb.callback, solver, Q, param, t) call!(cb.callback, solver, Q, param, t) end function call!(cb::AtInit, solver, Q, param, t) call!(cb.callback, solver, Q, param, t) end function fini!(cb::AtInit, solver, Q, param, t) fini!(cb.callback, solver, Q, param, t) end """ AtInitAndFini(callback) <: AbstractCallback A wrapper callback to execute `callback` at initialization and at finish as well as after each interval. """ struct AtInitAndFini <: AbstractCallback callback::Any end function init!(cb::AtInitAndFini, solver, Q, param, t) init!(cb.callback, solver, Q, param, t) call!(cb.callback, solver, Q, param, t) end function call!(cb::AtInitAndFini, solver, Q, param, t) call!(cb.callback, solver, Q, param, t) end function fini!(cb::AtInitAndFini, solver, Q, param, t) call!(cb.callback, solver, Q, param, t) fini!(cb.callback, solver, Q, param, t) end """ EveryXWallTimeSeconds(callback, Δtime, mpicomm) A wrapper callback to execute `callback` every `Δtime` wallclock time seconds. `mpicomm` is used to syncronize runtime across MPI ranks. """ mutable struct EveryXWallTimeSeconds <: AbstractCallback "callback to wrap" callback::Any "wall time seconds between callbacks" Δtime::Real "MPI communicator" mpicomm::MPI.Comm "time of the last callback" lastcbtime_ns::UInt64 function EveryXWallTimeSeconds(callback, Δtime, mpicomm) lastcbtime_ns = zero(UInt64) new(callback, Δtime, mpicomm, lastcbtime_ns) end end function init!(cb::EveryXWallTimeSeconds, solver, Q, param, t) cb.lastcbtime_ns = time_ns() init!(cb.callback, solver, Q, param, t) end function call!(cb::EveryXWallTimeSeconds, solver, Q, param, t) # Check whether we should do a callback currtime_ns = time_ns() runtime = (currtime_ns - cb.lastcbtime_ns) * 1e-9 runtime = MPI.Allreduce(runtime, max, cb.mpicomm) if runtime < cb.Δtime return 0 else # Compute the next time to do a callback cb.lastcbtime_ns = currtime_ns return call!(cb.callback, solver, Q, param, t) end end function fini!(cb::EveryXWallTimeSeconds, solver, Q, param, t) fini!(cb.callback, solver, Q, param, t) end """ EveryXSimulationTime(f, Δtime) A wrapper callback to execute `callback` every `time` simulation time seconds. """ mutable struct EveryXSimulationTime <: AbstractCallback "callback to wrap" callback::Any "simulation time seconds between callbacks" Δtime::Real "time of the last callback" lastcbtime::Real function EveryXSimulationTime(callback, Δtime) new(callback, Δtime, 0) end end function init!(cb::EveryXSimulationTime, solver, Q, param, t) cb.lastcbtime = t init!(cb.callback, solver, Q, param, t) end function call!(cb::EveryXSimulationTime, solver, Q, param, t) # Check whether we should do a callback if (t - cb.lastcbtime) < cb.Δtime return 0 else # Compute the next time to do a callback cb.lastcbtime = t return call!(cb.callback, solver, Q, param, t) end end function fini!(cb::EveryXSimulationTime, solver, Q, param, t) fini!(cb.callback, solver, Q, param, t) end """ EveryXSimulationSteps(callback, Δsteps) A wrapper callback to execute `callback` every `nsteps` of the time stepper. """ mutable struct EveryXSimulationSteps <: AbstractCallback "callback to wrap" callback::Any "number of steps between callbacks" Δsteps::Int "number of steps since last callback" steps::Int function EveryXSimulationSteps(callback, Δsteps) new(callback, Δsteps, 0) end end function init!(cb::EveryXSimulationSteps, solver, Q, param, t) cb.steps = 0 init!(cb.callback, solver, Q, param, t) end function call!(cb::EveryXSimulationSteps, solver, Q, param, t) cb.steps += 1 if cb.steps < cb.Δsteps return 0 else cb.steps = 0 return call!(cb.callback, solver, Q, param, t) end end function fini!(cb::EveryXSimulationSteps, solver, Q, param, t) fini!(cb.callback, solver, Q, param, t) end end ================================================ FILE: src/Numerics/ODESolvers/LowStorageRungeKutta3NMethod.jl ================================================ export LowStorageRungeKutta3N export LS3NRK44Classic, LS3NRK33Heuns """ LowStorageRungeKutta3N(f, RKA, RKB, RKC, RKW, Q; dt, t0 = 0) This is a time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. The constructor builds a low-storage Runge--Kutta scheme using 3N storage based on the provided `RKA`, `RKB` and `RKC` coefficient arrays. `RKC` (vector of length the number of stages `ns`) set nodal points position; `RKA` and `RKB` (size: ns x 2) set weight for tendency and stage-state; `RKW` (unused) provides RK weight (last row in Butcher's tableau). The 3-N storage formulation from Fyfe (1966) is applicable to any 4-stage, fourth-order RK scheme. It is implemented here as: ```math \\hspace{-20mm} for ~~ j ~~ in ~ [1:ns]: \\hspace{10mm} t_j = t^n + \\Delta t ~ rkC_j ``` ```math dQ_j = dQ^*_j + f(Q_j,t_j) ``` ```math Q_{j+1} = Q_{j} + \\Delta t \\{ rkB_{j,1} ~ dQ_j + rkB_{j,2} ~ dR_j \\} ``` ```math dR_{j+1} = dR_j + rkA_{j+1,2} ~ dQ_j ``` ```math dQ^*_{j+1} = rkA_{j+1,1} ~ dQ_j ``` The available concrete implementations are: - [`LS3NRK44Classic`](@ref) - [`LS3NRK33Heuns`](@ref) ### References @article{Fyfe1966, title = {Economical Evaluation of Runge-Kutta Formulae}, author = {Fyfe, David J.}, journal = {Mathematics of Computation}, volume = {20}, pages = {392--398}, year = {1966} } """ mutable struct LowStorageRungeKutta3N{T, RT, AT, Nstages} <: AbstractODESolver "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int "rhs function" rhs!::Any "Storage for RHS during the `LowStorageRungeKutta3N` update" dQ::AT "Secondary Storage for RHS during the `LowStorageRungeKutta3N` update" dR::AT "low storage RK coefficient array A (rhs scaling)" RKA::Array{RT, 2} "low storage RK coefficient array B (rhs add in scaling)" RKB::Array{RT, 2} "low storage RK coefficient vector C (time scaling)" RKC::Array{RT, 1} "RK weight coefficient vector W (last row in Butcher's tableau)" RKW::Array{RT, 1} function LowStorageRungeKutta3N( rhs!, RKA, RKB, RKC, RKW, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) dQ = similar(Q) dR = similar(Q) fill!(dQ, 0) fill!(dR, 0) new{T, RT, AT, length(RKC)}( RT(dt), RT(t0), 0, rhs!, dQ, dR, RKA, RKB, RKC, RKW, ) end end """ dostep!(Q, lsrk3n::LowStorageRungeKutta3N, p, time::Real, nsubsteps::Int, iStage::Int, [slow_δ, slow_rv_dQ, slow_scaling]) Wrapper function to use the 3N low storage Runge--Kutta method `lsrk3n` as the fast solver for a Multirate Infinitesimal Step method by calling dostep!(Q, lsrk3n::LowStorageRungeKutta3N, p, time::Real, [slow_δ, slow_rv_dQ, slow_scaling]) nsubsteps times. """ function dostep!( Q, lsrk3n::LowStorageRungeKutta3N, p, time::Real, nsubsteps::Int, iStage::Int, slow_δ = nothing, slow_rv_dQ = nothing, slow_scaling = nothing, ) for i in 1:nsubsteps dostep!(Q, lsrk3n, p, time, slow_δ, slow_rv_dQ, slow_scaling) time += lsrk3n.dt end end """ dostep!(Q, lsrk3n::LowStorageRungeKutta3N, p, time::Real, [slow_δ, slow_rv_dQ, slow_scaling]) Use the 3N low storage Runge--Kutta method `lsrk3n` to step `Q` forward in time from the current time `time` to final time `time + getdt(lsrk3n)`. If the optional parameter `slow_δ !== nothing` then `slow_rv_dQ * slow_δ` is added as an additional ODE right-hand side source. If the optional parameter `slow_scaling !== nothing` then after the final stage update the scaling `slow_rv_dQ *= slow_scaling` is performed. """ function dostep!( Q, lsrk3n::LowStorageRungeKutta3N, p, time, slow_δ = nothing, slow_rv_dQ = nothing, in_slow_scaling = nothing, ) dt = lsrk3n.dt RKA, RKB, RKC = lsrk3n.RKA, lsrk3n.RKB, lsrk3n.RKC rhs!, dQ, dR = lsrk3n.rhs!, lsrk3n.dQ, lsrk3n.dR rv_Q = realview(Q) rv_dQ = realview(dQ) rv_dR = realview(dR) groupsize = 256 rv_dR .= -0 for s in 1:length(RKC) rhs!(dQ, Q, p, time + RKC[s] * dt, increment = true) slow_scaling = nothing if s == length(RKC) slow_scaling = in_slow_scaling end # update solution and scale RHS event = Event(array_device(Q)) event = update!(array_device(Q), groupsize)( rv_dQ, rv_dR, rv_Q, RKA[s % length(RKC) + 1, 1], RKA[s % length(RKC) + 1, 2], RKB[s, 1], RKB[s, 2], dt, slow_δ, slow_rv_dQ, slow_scaling; ndrange = length(rv_Q), dependencies = (event,), ) wait(array_device(Q), event) end end @kernel function update!( dQ, dR, Q, rka1, rka2, rkb1, rkb2, dt, slow_δ, slow_dQ, slow_scaling, ) i = @index(Global, Linear) @inbounds begin if slow_δ !== nothing dQ[i] += slow_δ * slow_dQ[i] end Q[i] += rkb1 * dt * dQ[i] + rkb2 * dt * dR[i] dR[i] += rka2 * dQ[i] dQ[i] *= rka1 if slow_scaling !== nothing slow_dQ[i] *= slow_scaling end end end """ LS3NRK44Classic(f, Q; dt, t0 = 0) This function returns a [`LowStorageRungeKutta3N`](@ref) time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This uses the classic 4-stage, fourth-order Runge--Kutta scheme in the low-storage implementation of Blum (1962) ### References @article {Blum1962, title = {A Modification of the Runge-Kutta Fourth-Order Method} author = {Blum, E. K.}, journal = {Mathematics of Computation}, volume = {16}, pages = {176-187}, year = {1962} } """ function LS3NRK44Classic(F, Q::AT; dt = 0, t0 = 0) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) RKA = [ RT(0) RT(0) RT(0) RT(1) RT(-1 // 2) RT(0) RT(2) RT(-6) ] RKB = [ RT(1 // 2) RT(0) RT(1 // 2) RT(-1 // 2) RT(1) RT(0) RT(1 // 6) RT(1 // 6) ] RKC = [RT(0), RT(1 // 2), RT(1 // 2), RT(1)] RKW = [RT(1 // 6), RT(1 // 3), RT(1 // 3), RT(1 // 6)] LowStorageRungeKutta3N(F, RKA, RKB, RKC, RKW, Q; dt = dt, t0 = t0) end """ LS3NRK33Heuns(f, Q; dt, t0 = 0) This function returns a [`LowStorageRungeKutta3N`](@ref) time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This method uses the 3-stage, third-order Heun's Runge--Kutta scheme. ### References @article {Heun1900, title = {Neue Methoden zur approximativen Integration der Differentialgleichungen einer unabh\"{a}ngigen Ver\"{a}nderlichen} author = {Heun, Karl}, journal = {Z. Math. Phys}, volume = {45}, pages = {23--38}, year = {1900} } """ function LS3NRK33Heuns( F, Q::AT; dt = nothing, t0 = 0, ) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) RKA = [ RT(0) RT(0) RT(0) RT(1) RT(-1) RT(1 // 3) ] RKB = [ RT(1 // 3) RT(0) RT(2 // 3) RT(-1 // 3) RT(3 // 4) RT(1 // 4) ] RKC = [RT(0), RT(1 // 3), RT(2 // 3)] RKW = [RT(1 // 4), RT(0), RT(3 // 4)] LowStorageRungeKutta3N(F, RKA, RKB, RKC, RKW, Q; dt = dt, t0 = t0) end ================================================ FILE: src/Numerics/ODESolvers/LowStorageRungeKuttaMethod.jl ================================================ export LowStorageRungeKutta2N export LSRK54CarpenterKennedy, LSRK144NiegemannDiehlBusch, LSRKEulerMethod """ LowStorageRungeKutta2N(f, RKA, RKB, RKC, Q; dt, t0 = 0) This is a time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. The constructor builds a low-storage Runge-Kutta scheme using 2N storage based on the provided `RKA`, `RKB` and `RKC` coefficient arrays. The available concrete implementations are: - [`LSRK54CarpenterKennedy`](@ref) - [`LSRK144NiegemannDiehlBusch`](@ref) """ mutable struct LowStorageRungeKutta2N{T, RT, AT, Nstages} <: AbstractODESolver "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int "rhs function" rhs!::Any "Storage for RHS during the LowStorageRungeKutta update" dQ::AT "low storage RK coefficient vector A (rhs scaling)" RKA::NTuple{Nstages, RT} "low storage RK coefficient vector B (rhs add in scaling)" RKB::NTuple{Nstages, RT} "low storage RK coefficient vector C (time scaling)" RKC::NTuple{Nstages, RT} function LowStorageRungeKutta2N( rhs!, RKA, RKB, RKC, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) dQ = similar(Q) fill!(dQ, 0) new{T, RT, AT, length(RKA)}(RT(dt), RT(t0), 0, rhs!, dQ, RKA, RKB, RKC) end end """ dostep!(Q, lsrk::LowStorageRungeKutta2N, p, time::Real, nsubsteps::Int, iStage::Int, [slow_δ, slow_rv_dQ, slow_scaling]) Wrapper function to use the 2N low storage Runge--Kutta method `lsrk` as the fast solver for a Multirate Infinitesimal Step method by calling dostep!(Q, lsrk::LowStorageRungeKutta2N, p, time::Real, [slow_δ, slow_rv_dQ, slow_scaling]) nsubsteps times. """ function dostep!( Q, lsrk::LowStorageRungeKutta2N, p, time::Real, nsubsteps::Int, iStage::Int, slow_δ = nothing, slow_rv_dQ = nothing, slow_scaling = nothing, ) for i in 1:nsubsteps dostep!(Q, lsrk, p, time, slow_δ, slow_rv_dQ, slow_scaling) time += lsrk.dt end end """ dostep!(Q, lsrk::LowStorageRungeKutta2N, p, time::Real, [slow_δ, slow_rv_dQ, slow_scaling]) Use the 2N low storage Runge--Kutta method `lsrk` to step `Q` forward in time from the current time `time` to final time `time + getdt(lsrk)`. If the optional parameter `slow_δ !== nothing` then `slow_rv_dQ * slow_δ` is added as an additionall ODE right-hand side source. If the optional parameter `slow_scaling !== nothing` then after the final stage update the scaling `slow_rv_dQ *= slow_scaling` is performed. """ function dostep!( Q, lsrk::LowStorageRungeKutta2N, p, time, slow_δ = nothing, slow_rv_dQ = nothing, in_slow_scaling = nothing, ) dt = lsrk.dt RKA, RKB, RKC = lsrk.RKA, lsrk.RKB, lsrk.RKC rhs!, dQ = lsrk.rhs!, lsrk.dQ rv_Q = realview(Q) rv_dQ = realview(dQ) groupsize = 256 for s in 1:length(RKA) rhs!(dQ, Q, p, time + RKC[s] * dt, increment = true) slow_scaling = nothing if s == length(RKA) slow_scaling = in_slow_scaling end # update solution and scale RHS event = Event(array_device(Q)) event = update!(array_device(Q), groupsize)( rv_dQ, rv_Q, RKA[s % length(RKA) + 1], RKB[s], dt, slow_δ, slow_rv_dQ, slow_scaling; ndrange = length(rv_Q), dependencies = (event,), ) wait(array_device(Q), event) end end @kernel function update!(dQ, Q, rka, rkb, dt, slow_δ, slow_dQ, slow_scaling) i = @index(Global, Linear) @inbounds begin if slow_δ !== nothing dQ[i] += slow_δ * slow_dQ[i] end Q[i] += rkb * dt * dQ[i] dQ[i] *= rka if slow_scaling !== nothing slow_dQ[i] *= slow_scaling end end end """ dostep!(Q, lsrk::LowStorageRungeKutta2N, p::MRIParam, time::Real, dt::Real) Use the 2N low storage Runge--Kutta method `lsrk` to step `Q` forward in time from the current time `time` to final time `time + dt`. If the optional parameter `slow_δ !== nothing` then `slow_rv_dQ * slow_δ` is added as an additionall ODE right-hand side source. If the optional parameter `slow_scaling !== nothing` then after the final stage update the scaling `slow_rv_dQ *= slow_scaling` is performed. """ function dostep!(Q, lsrk::LowStorageRungeKutta2N, mrip::MRIParam, time::Real) dt = lsrk.dt RKA, RKB, RKC = lsrk.RKA, lsrk.RKB, lsrk.RKC rhs!, dQ = lsrk.rhs!, lsrk.dQ rv_Q = realview(Q) rv_dQ = realview(dQ) groupsize = 256 for s in 1:length(RKA) stage_time = time + RKC[s] * dt rhs!(dQ, Q, mrip.p, stage_time, increment = true) # update solution and scale RHS τ = (stage_time - mrip.ts) / mrip.Δts event = Event(array_device(Q)) event = lsrk_mri_update!(array_device(Q), groupsize)( rv_dQ, rv_Q, RKA[s % length(RKA) + 1], RKB[s], τ, dt, mrip.γs, mrip.Rs; ndrange = length(rv_Q), dependencies = (event,), ) wait(array_device(Q), event) end end @kernel function lsrk_mri_update!(dQ, Q, rka, rkb, τ, dt, γs, Rs) i = @index(Global, Linear) @inbounds begin NΓ = length(γs) Ns = length(γs[1]) dqi = dQ[i] for s in 1:Ns ri = Rs[s][i] sc = γs[NΓ][s] for k in (NΓ - 1):-1:1 sc = sc * τ + γs[k][s] end dqi += sc * ri end Q[i] += rkb * dt * dqi dQ[i] = rka * dqi end end """ LSRKEulerMethod(f, Q; dt, t0 = 0) This function returns a [`LowStorageRungeKutta2N`](@ref) time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This method uses the LSRK2N framework to implement a simple Eulerian forward time stepping scheme for the use of debugging. ### References """ function LSRKEulerMethod( F, Q::AT; dt = nothing, t0 = 0, ) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) RKA = (RT(0),) RKB = (RT(1),) RKC = (RT(0),) LowStorageRungeKutta2N(F, RKA, RKB, RKC, Q; dt = dt, t0 = t0) end """ LSRK54CarpenterKennedy(f, Q; dt, t0 = 0) This function returns a [`LowStorageRungeKutta2N`](@ref) time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This uses the fourth-order, low-storage, Runge--Kutta scheme of Carpenter and Kennedy (1994) (in their notation (5,4) 2N-Storage RK scheme). ### References @TECHREPORT{CarpenterKennedy1994, author = {M.~H. Carpenter and C.~A. Kennedy}, title = {Fourth-order {2N-storage} {Runge-Kutta} schemes}, institution = {National Aeronautics and Space Administration}, year = {1994}, number = {NASA TM-109112}, address = {Langley Research Center, Hampton, VA}, } """ function LSRK54CarpenterKennedy( F, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) RKA = ( RT(0), RT(-567301805773 // 1357537059087), RT(-2404267990393 // 2016746695238), RT(-3550918686646 // 2091501179385), RT(-1275806237668 // 842570457699), ) RKB = ( RT(1432997174477 // 9575080441755), RT(5161836677717 // 13612068292357), RT(1720146321549 // 2090206949498), RT(3134564353537 // 4481467310338), RT(2277821191437 // 14882151754819), ) RKC = ( RT(0), RT(1432997174477 // 9575080441755), RT(2526269341429 // 6820363962896), RT(2006345519317 // 3224310063776), RT(2802321613138 // 2924317926251), ) LowStorageRungeKutta2N(F, RKA, RKB, RKC, Q; dt = dt, t0 = t0) end """ LSRK144NiegemannDiehlBusch((f, Q; dt, t0 = 0) This function returns a [`LowStorageRungeKutta2N`](@ref) time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This uses the fourth-order, 14-stage, low-storage, Runge--Kutta scheme of Niegemann, Diehl, and Busch (2012) with optimized stability region ### References - [Niegemann2012](@cite) """ function LSRK144NiegemannDiehlBusch( F, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) RKA = ( RT(0), RT(-0.7188012108672410), RT(-0.7785331173421570), RT(-0.0053282796654044), RT(-0.8552979934029281), RT(-3.9564138245774565), RT(-1.5780575380587385), RT(-2.0837094552574054), RT(-0.7483334182761610), RT(-0.7032861106563359), RT(0.0013917096117681), RT(-0.0932075369637460), RT(-0.9514200470875948), RT(-7.1151571693922548), ) RKB = ( RT(0.0367762454319673), RT(0.3136296607553959), RT(0.1531848691869027), RT(0.0030097086818182), RT(0.3326293790646110), RT(0.2440251405350864), RT(0.3718879239592277), RT(0.6204126221582444), RT(0.1524043173028741), RT(0.0760894927419266), RT(0.0077604214040978), RT(0.0024647284755382), RT(0.0780348340049386), RT(5.5059777270269628), ) RKC = ( RT(0), RT(0.0367762454319673), RT(0.1249685262725025), RT(0.2446177702277698), RT(0.2476149531070420), RT(0.2969311120382472), RT(0.3978149645802642), RT(0.5270854589440328), RT(0.6981269994175695), RT(0.8190890835352128), RT(0.8527059887098624), RT(0.8604711817462826), RT(0.8627060376969976), RT(0.8734213127600976), ) LowStorageRungeKutta2N(F, RKA, RKB, RKC, Q; dt = dt, t0 = t0) end ================================================ FILE: src/Numerics/ODESolvers/MultirateInfinitesimalGARKDecoupledImplicit.jl ================================================ export MRIGARKDecoupledImplicit export MRIGARKIRK21aSandu, MRIGARKESDIRK34aSandu, MRIGARKESDIRK46aSandu, MRIGARKESDIRK23LSA, MRIGARKESDIRK24LSA """ MRIGARKDecoupledImplicit(f!, backward_euler_solver, fastsolver, Γs, γ̂s, Q, Δt, t0) Construct a decoupled implicit MultiRate Infinitesimal General-structure Additive Runge--Kutta (MRI-GARK) scheme to solve ```math \\dot{y} = f(y, t) + g(y, t) ``` where `f` is the slow tendency function and `g` is the fast tendency function; see Sandu (2019). The fast tendency is integrated using the `fastsolver` and the slow tendency using the MRI-GARK scheme. Since this is a decoupled, implicit MRI-GARK there is no implicit coupling between the fast and slow tendencies. The `backward_euler_solver` should be of type `AbstractBackwardEulerSolver` or `LinearBackwardEulerSolver`, and is used to perform the backward Euler solves for `y` given the slow tendency function, namely ```math y = z + α f(y, t; p) ``` Currently only [`LowStorageRungeKutta2N`](@ref) schemes are supported for `fastsolver` The coefficients defined by `γ̂s` can be used for an embedded scheme (only the last stage is different). The available concrete implementations are: - [`MRIGARKIRK21aSandu`](@ref) - [`MRIGARKESDIRK34aSandu`](@ref) - [`MRIGARKESDIRK46aSandu`](@ref) ### References - [Sandu2019](@cite) """ mutable struct MRIGARKDecoupledImplicit{ T, RT, AT, Nstages, NΓ, FS, Nx, Ny, Nx_Ny, BE, } <: AbstractODESolver "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int "rhs function" slowrhs!::Any "backwark Euler solver" besolver!::BE "Storage for RHS during the `MRIGARKDecoupledImplicit` update" Rstages::NTuple{Nstages, AT} "Storage for the implicit solver data vector" Qhat::AT "RK coefficient matrices for coupling coefficients" Γs::NTuple{NΓ, SArray{Tuple{Nx, Ny}, RT, 2, Nx_Ny}} "RK coefficient matrices for embedded scheme" γ̂s::NTuple{NΓ, SArray{NTuple{1, Ny}, RT, 1, Ny}} "RK coefficient vector C (time scaling)" Δc::SArray{NTuple{1, Nstages}, RT, 1, Nstages} "fast solver" fastsolver::FS function MRIGARKDecoupledImplicit( slowrhs!, backward_euler_solver, fastsolver, Γs, γ̂s, Q::AT, dt, t0, ) where {AT <: AbstractArray} NΓ = length(Γs) T = eltype(Q) RT = real(T) # Compute the Δc coefficients (only explicit values kept) Δc = sum(Γs[1], dims = 2) # Couple of sanity checks on the assumptions of coefficients being of # the decoupled implicit structure of Sandu (2019) @assert all(isapprox.(Δc[2:2:end], 0; atol = 2 * eps(RT))) Δc = Δc[1:2:(end - 1)] # number of slow RHS values we need to keep Nstages = length(Δc) # Couple more sanity checks on the decoupled implicit structure @assert Nstages == size(Γs[1], 2) - 1 @assert Nstages == div(size(Γs[1], 1), 2) # Scale in the Δc to the Γ and γ̂, and convert to real type Γs = ntuple(k -> RT.(Γs[k]), NΓ) γ̂s = ntuple(k -> RT.(γ̂s[k]), NΓ) # Convert to real type Δc = RT.(Δc) # create storage for the stage values Rstages = ntuple(i -> similar(Q), Nstages) Qhat = similar(Q) FS = typeof(fastsolver) Nx, Ny = size(Γs[1]) # Set up the backward Euler solver with the initial value of α α = dt * Γs[1][2, 2] besolver! = setup_backward_Euler_solver(backward_euler_solver, Q, α, slowrhs!) @assert besolver! isa AbstractBackwardEulerSolver BE = typeof(besolver!) new{T, RT, AT, Nstages, NΓ, FS, Nx, Ny, Nx * Ny, BE}( RT(dt), RT(t0), 0, slowrhs!, besolver!, Rstages, Qhat, Γs, γ̂s, Δc, fastsolver, ) end end function updatedt!(mrigark::MRIGARKDecoupledImplicit, dt) @assert Δt_is_adjustable(mrigark.besolver!) α = dt * mrigark.Γs[1][2, 2] update_backward_Euler_solver!(mrigark.besolver!, mrigark.Qhat, α) mrigark.dt = dt end function dostep!(Q, mrigark::MRIGARKDecoupledImplicit, param, time::Real) dt = mrigark.dt fast = mrigark.fastsolver Rs = mrigark.Rstages Δc = mrigark.Δc Nstages = length(Δc) groupsize = 256 slowrhs! = mrigark.slowrhs! Γs = mrigark.Γs NΓ = length(Γs) ts = time groupsize = 256 besolver! = mrigark.besolver! Qhat = mrigark.Qhat # Since decoupled implicit methods are being used, there is an purely # explicit stage followed by an implicit correction stage, hence the divide # by two for s in 1:Nstages # Stage dt dts = Δc[s] * dt stage_end_time = ts + dts # initialize the slow tendency stage value slowrhs!(Rs[s], Q, param, ts, increment = false) # # advance fast solution to time stage_end_time γs = ntuple(k -> ntuple(j -> Γs[k][2s - 1, j] / Δc[s], s), NΓ) mriparam = MRIParam(param, γs, realview.(Rs[1:s]), ts, dts) updatetime!(mrigark.fastsolver, ts) solve!(Q, mrigark.fastsolver, mriparam; timeend = stage_end_time) # correct with implicit slow solve # Qhat = Q + ∑_j Σ_k Γ_{sjk} dt Rs[j] / k # (Divide by k arises from the integration γ_{ij}(τ) in Sandu (2019); # see Equation (2.2b) and Definition 2.2 γs = ntuple(k -> ntuple(j -> dt * Γs[k][2s, j] / k, s), NΓ) event = Event(array_device(Q)) event = mri_create_Qhat!(array_device(Q), groupsize)( realview(Qhat), realview(Q), γs, mriparam.Rs; ndrange = length(realview(Q)), dependencies = (event,), ) wait(array_device(Q), event) # Solve: Q = Qhat + α fslow(Q, stage_end_time) α = dt * Γs[1][2s, s + 1] besolver!(Q, Qhat, α, param, stage_end_time) # update time ts += dts end end # Compute: Qhat = Q + ∑_j Σ_k Γ_{sjk} dt Rs[j] / k @kernel function mri_create_Qhat!(Qhat, Q, γs, Rs) i = @index(Global, Linear) @inbounds begin NΓ = length(γs) Ns = length(γs[1]) qhat = Q[i] for s in 1:Ns ri = Rs[s][i] sc = γs[1][s] for k in 2:NΓ sc += γs[k][s] end qhat += sc * ri end Qhat[i] = qhat end end """ MRIGARKIRK21aSandu(f!, fastsolver, Q; dt, t0 = 0) The 2rd order, 2 stage implicit scheme from Sandu (2019). """ function MRIGARKIRK21aSandu( slowrhs!, backward_euler_solver, fastsolver, Q; dt, t0 = 0, ) #! format: off Γ0 = [ 1 // 1 0 // 1 -1 // 2 1 // 2 ] γ̂0 = [-1 // 2 1 // 2 ] #! format: on MRIGARKDecoupledImplicit( slowrhs!, backward_euler_solver, fastsolver, (Γ0,), (γ̂0,), Q, dt, t0, ) end """ MRIGARKESDIRK34aSandu(f!, fastsolver, Q; dt, t0=0) The 3rd order, 4 stage decoupled implicit scheme from Sandu (2019). """ function MRIGARKESDIRK34aSandu( slowrhs!, backward_euler_solver, fastsolver, Q; dt, t0 = 0, ) T = real(eltype(Q)) μ = acot(2 * sqrt(T(2))) / 3 λ = 1 - cos(μ) / sqrt(T(2)) + sqrt(T(3 // 2)) * sin(μ) @assert isapprox(-1 + 9λ - 18 * λ^2 + 6 * λ^3, 0, atol = 2 * eps(T)) #! format: off Γ0 = [ T(1 // 3) 0 0 0 -λ λ 0 0 (3-10λ) / (24λ-6) (5-18λ) / (6-24λ) 0 0 (-24λ^2+6λ+1) / (6-24λ) (-48λ^2+12λ+1) / (24λ-6) λ 0 (3-16λ) / (12-48λ) (48λ^2-21λ+2) / (12λ-3) (3-16λ) / 4 0 -λ 0 0 λ ] γ̂0 = [ 0 0 0 0] #! format: on MRIGARKDecoupledImplicit( slowrhs!, backward_euler_solver, fastsolver, (Γ0,), (γ̂0,), Q, dt, t0, ) end """ MRIGARKESDIRK46aSandu(f!, fastsolver, Q; dt, t0=0) The 4th order, 6 stage decoupled implicit scheme from Sandu (2019). """ function MRIGARKESDIRK46aSandu( slowrhs!, implicitsolve!, fastsolver, Q; dt, t0 = 0, ) T = real(eltype(Q)) μ = acot(2 * sqrt(T(2))) / 3 λ = 1 - cos(μ) / sqrt(T(2)) + sqrt(T(3 // 2)) * sin(μ) @assert isapprox(-1 + 9λ - 18 * λ^2 + 6 * λ^3, 0, atol = 2 * eps(T)) #! format: off Γ0 = [ 1 // 5 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 -1 // 4 1 // 4 0 // 1 0 // 1 0 // 1 0 // 1 1771023115159 // 1929363690800 -1385150376999 // 1929363690800 0 // 1 0 // 1 0 // 1 0 // 1 914009 // 345800 -1000459 // 345800 1 // 4 0 // 1 0 // 1 0 // 1 18386293581909 // 36657910125200 5506531089 // 80566835440 -178423463189 // 482340922700 0 // 1 0 // 1 0 // 1 36036097 // 8299200 4621 // 118560 -38434367 // 8299200 1 // 4 0 // 1 0 // 1 -247809665162987 // 146631640500800 10604946373579 // 14663164050080 10838126175385 // 5865265620032 -24966656214317 // 36657910125200 0 // 1 0 // 1 38519701 // 11618880 10517363 // 9682400 -23284701 // 19364800 -10018609 // 2904720 1 // 4 0 // 1 -52907807977903 // 33838070884800 74846944529257 // 73315820250400 365022522318171 // 146631640500800 -20513210406809 // 109973730375600 -2918009798 // 1870301537 0 // 1 19 // 100 -73 // 300 127 // 300 127 // 300 -313 // 300 1 // 4 ] Γ1 = [ 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 -1674554930619 // 964681845400 1674554930619 // 964681845400 0 // 1 0 // 1 0 // 1 0 // 1 -1007739 // 172900 1007739 // 172900 0 // 1 0 // 1 0 // 1 0 // 1 -8450070574289 // 18328955062600 -39429409169 // 40283417720 173621393067 // 120585230675 0 // 1 0 // 1 0 // 1 -122894383 // 16598400 14501 // 237120 121879313 // 16598400 0 // 1 0 // 1 0 // 1 32410002731287 // 15434909526400 -46499276605921 // 29326328100160 -34914135774643 // 11730531240064 45128506783177 // 18328955062600 0 // 1 0 // 1 -128357303 // 23237760 -35433927 // 19364800 71038479 // 38729600 8015933 // 1452360 0 // 1 0 // 1 136721604296777 // 67676141769600 -349632444539303 // 146631640500800 -1292744859249609 // 293263281001600 8356250416309 // 54986865187800 17282943803 // 3740603074 0 // 1 3 // 25 -29 // 300 71 // 300 71 // 300 -149 // 300 0 // 1 ] γ̂0 = [-1 // 4 5595 // 8804 -2445 // 8804 -4225 // 8804 2205 // 4402 -567 // 4402] γ̂1 = [ 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 ] #! format: on MRIGARKDecoupledImplicit( slowrhs!, implicitsolve!, fastsolver, (Γ0, Γ1), (γ̂0, γ̂1), Q, dt, t0, ) end """ MRIGARKESDIRK23LSA(f!, fastsolver, Q; dt, t0 = 0, δ = 0 A 2nd order, 3 stage decoupled implicit scheme. It is based on L-Stable, stiffly-accurate ESDIRK scheme of Bank et al (1985); see also Kennedy and Carpenter (2016). The free parameter `δ` can take any values for accuracy. ### References - [Bank1985](@cite) - [KennedyCarpenter2016](@cite) """ function MRIGARKESDIRK23LSA( slowrhs!, implicitsolve!, fastsolver, Q; dt, t0 = 0, δ = 0, ) T = real(eltype(Q)) rt2 = sqrt(T(2)) #! format: off Γ0 = [ 2 - rt2 0 0 (1 - rt2) / rt2 (rt2 - 1) / rt2 0 δ rt2 - 1 - δ 0 (3 - 2rt2 * (1 + δ)) / 2rt2 (δ * 2rt2 - 1) / 2rt2 (rt2 - 1) / rt2 ] γ̂0 = [0 0 0] #! format: on Δc = sum(Γ0, dims = 2) # Check that the explicit steps match the Δc values: @assert Γ0[1, 1] ≈ Δc[1] @assert Γ0[3, 1] + Γ0[3, 2] ≈ Δc[3] # Check the implicit stages have no Δc @assert isapprox(Γ0[2, 1] + Γ0[2, 2], 0, atol = eps(T)) @assert isapprox(Γ0[4, 1] + Γ0[4, 2] + Γ0[4, 3], 0, atol = eps(T)) # Check consistency with the original scheme @assert Γ0[1, 1] + Γ0[2, 1] ≈ 1 - 1 / rt2 @assert Γ0[2, 2] ≈ 1 - 1 / rt2 @assert Γ0[1, 1] + Γ0[2, 1] + Γ0[3, 1] + Γ0[4, 1] ≈ 1 / (2 * rt2) @assert Γ0[2, 2] + Γ0[3, 2] + Γ0[4, 2] ≈ 1 / (2 * rt2) @assert Γ0[4, 3] ≈ 1 - 1 / rt2 MRIGARKDecoupledImplicit( slowrhs!, implicitsolve!, fastsolver, (Γ0,), (γ̂0,), Q, dt, t0, ) end """ MRIGARKESDIRK24LSA(f!, fastsolver, Q; dt, t0 = 0, γ = 0.2, c3 = (2γ + 1) / 2, a32 = 0.2, α = -0.1, β1 = c3 / 10, β2 = c3 / 10, ) A 2nd order, 4 stage decoupled implicit scheme. It is based on an L-Stable, stiffly-accurate ESDIRK. """ function MRIGARKESDIRK24LSA( slowrhs!, implicitsolve!, fastsolver, Q; dt, t0 = 0, γ = 0.2, c3 = (2γ + 1) / 2, a32 = 0.2, α = -0.1, β1 = c3 / 10, β2 = c3 / 10, ) T = real(eltype(Q)) # Check L-Stability constraint; bound comes from Kennedy and Carpenter # (2016) Table 5. @assert 0.1804253064293985641345831 ≤ γ < 1 // 2 # check the stage times are increasing @assert 2γ < c3 < 1 # Original RK scheme # Enforce L-Stability b3 = (2 * (1 - γ)^2 - 1) / 4 / a32 # Enforce 2nd order accuracy b2 = (1 - 2γ - 2b3 * c3) / 4γ A = [ 0 0 0 0 γ γ 0 0 c3 - a32-γ a32 γ 0 1 - b2 - b3-γ b2 b3 γ ] c = sum(A, dims = 2) b = A[end, :] # Check 2nd order accuracy @assert sum(b) ≈ 1 @assert 2 * sum(A' * b) ≈ 1 # Setup the GARK Tableau Δc = [c[2], 0, c[3] - c[2], 0, c[4] - c[3], 0] Γ0 = zeros(T, 6, 4) Γ0[1, 1] = Δc[1] Γ0[2, 1] = A[2, 1] - Γ0[1, 1] Γ0[2, 2] = A[2, 2] Γ0[3, 1] = α Γ0[3, 2] = Δc[3] - Γ0[3, 1] Γ0[4, 1] = A[3, 1] - Γ0[1, 1] - Γ0[2, 1] - Γ0[3, 1] Γ0[4, 2] = A[3, 2] - Γ0[1, 2] - Γ0[2, 2] - Γ0[3, 2] Γ0[4, 3] = A[3, 3] Γ0[5, 1] = β1 Γ0[5, 2] = β2 Γ0[5, 3] = Δc[5] - Γ0[5, 1] - Γ0[5, 2] Γ0[6, 1] = A[4, 1] - Γ0[1, 1] - Γ0[2, 1] - Γ0[3, 1] - Γ0[4, 1] - Γ0[5, 1] Γ0[6, 2] = A[4, 2] - Γ0[1, 2] - Γ0[2, 2] - Γ0[3, 2] - Γ0[4, 2] - Γ0[5, 2] Γ0[6, 3] = A[4, 3] - Γ0[1, 3] - Γ0[2, 3] - Γ0[3, 3] - Γ0[4, 3] - Γ0[5, 3] Γ0[6, 4] = A[4, 4] γ̂0 = [0 0 0 0] # Check consistency with original scheme @assert all(A ≈ [0 0 0 0; accumulate(+, Γ0, dims = 1)[2:2:end, :]]) @assert all(Δc ≈ sum(Γ0, dims = 2)) MRIGARKDecoupledImplicit( slowrhs!, implicitsolve!, fastsolver, (Γ0,), (γ̂0,), Q, dt, t0, ) end ================================================ FILE: src/Numerics/ODESolvers/MultirateInfinitesimalGARKExplicit.jl ================================================ export MRIGARKExplicit export MRIGARKERK33aSandu, MRIGARKERK45aSandu """ MRIParam(p, γs, Rs, ts, Δts) Construct a type for passing the data around for the `MRIGARKExplicit` explicit time stepper to follow on methods. `p` is the original user defined ODE parameters, `γs` and `Rs` are the MRI parameters and stage values, respectively. `ts` and `Δts` are the stage time and stage time step. """ struct MRIParam{P, T, AT, N, M} p::P γs::NTuple{M, SArray{NTuple{1, N}, T, 1, N}} Rs::NTuple{N, AT} ts::T Δts::T function MRIParam( p::P, γs::NTuple{M}, Rs::NTuple{N, AT}, ts, Δts, ) where {P, M, N, AT} T = eltype(γs[1]) new{P, T, AT, N, M}(p, γs, Rs, ts, Δts) end end # We overload get property to access the original param function Base.getproperty(mriparam::MRIParam, s::Symbol) if s === :p p = getfield(mriparam, :p) return p isa MRIParam ? p.p : p else getfield(mriparam, s) end end """ MRIGARKExplicit(f!, fastsolver, Γs, γ̂s, Q, Δt, t0) Construct an explicit MultiRate Infinitesimal General-structure Additive Runge--Kutta (MRI-GARK) scheme to solve ```math \\dot{y} = f_{slow}(y, t) + f_{fast}(y, t) ``` where `f_{slow}` is the slow tendency function and `f_{fast}` is the fast tendency function; see Sandu (2019). The fast tendency is integrated using the `fastsolver` and the slow tendency using the MRI-GARK scheme. Namely, at each stage the scheme solves ```math \\begin{aligned} v(T_i) &= Y_i \\\\ \\dot{v} &= f(v, t) + \\sum_{j=1}^{i} \\bar{γ}_{ij}(t) R_j \\\\ \\bar{γ}_{ijk}(t) &= \\sum_{k=0}^{NΓ-1} γ_{ijk} τ(t)^k / Δc_s \\\\ τ(t) &= (t - t_s) / Δt \\\\ Y_{i+1} &= v(T_i + c_s * Δt) \\end{aligned} ``` where ``Y_1 = y_n`` and ``y_{n+1} = Y_{Nstages+1}``. Here ``R_j = g(Y_j, t_0 + c_j * Δt)`` is the tendency for stage ``j``, ``γ_{ijk}`` are the GARK coupling coefficients, ``NΓ`` is the number of sets of GARK coupling coefficients there are ``Δc_s = \\sum_{j=1}^{Nstages} γ_{sj1} = c_{s+1} - c_s`` is the scaling increment between stage times. The ODE for ``v(t)`` is solved using the `fastsolver`. Note that this form of the scheme is based on Definition 2.2 of Sandu (2019), but ODE for ``v(t)`` is written to go from ``t_s`` to ``T_i + c_s * Δt`` as opposed to ``0`` to ``1``. Currently only [`LowStorageRungeKutta2N`](@ref) schemes are supported for `fastsolver` The coefficients defined by `γ̂s` can be used for an embedded scheme (only the last stage is different). The available concrete implementations are: - [`MRIGARKERK33aSandu`](@ref) - [`MRIGARKERK45aSandu`](@ref) ### References - [Sandu2019](@cite) """ mutable struct MRIGARKExplicit{T, RT, AT, Nstages, NΓ, FS, Nstages_sq} <: AbstractODESolver "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int "rhs function" slowrhs!::Any "Storage for RHS during the `MRIGARKExplicit` update" Rstages::NTuple{Nstages, AT} "RK coefficient matrices for coupling coefficients" Γs::NTuple{NΓ, SArray{NTuple{2, Nstages}, RT, 2, Nstages_sq}} "RK coefficient matrices for embedded scheme" γ̂s::NTuple{NΓ, SArray{NTuple{1, Nstages}, RT, 1, Nstages}} "RK coefficient vector C (time scaling)" Δc::SArray{NTuple{1, Nstages}, RT, 1, Nstages} "fast solver" fastsolver::FS function MRIGARKExplicit( slowrhs!, fastsolver, Γs, γ̂s, Q::AT, dt, t0, ) where {AT <: AbstractArray} NΓ = length(Γs) Nstages = size(Γs[1], 1) T = eltype(Q) RT = real(T) # Compute the Δc coefficients Δc = sum(Γs[1], dims = 2)[:] # Scale in the Δc to the Γ and γ̂, and convert to real type Γs = ntuple(k -> RT.(Γs[k] ./ Δc), NΓ) γ̂s = ntuple(k -> RT.(γ̂s[k] / Δc[Nstages]), NΓ) # Convert to real type Δc = RT.(Δc) # create storage for the stage values Rstages = ntuple(i -> similar(Q), Nstages) FS = typeof(fastsolver) new{T, RT, AT, Nstages, NΓ, FS, Nstages^2}( RT(dt), RT(t0), 0, slowrhs!, Rstages, Γs, γ̂s, Δc, fastsolver, ) end end function dostep!(Q, mrigark::MRIGARKExplicit, param, time::Real) dt = mrigark.dt fast = mrigark.fastsolver Rs = mrigark.Rstages Δc = mrigark.Δc Nstages = length(Δc) slowrhs! = mrigark.slowrhs! Γs = mrigark.Γs NΓ = length(Γs) ts = time groupsize = 256 for s in 1:Nstages # Stage dt dts = Δc[s] * dt p = param isa MRIParam ? param.p : param slowrhs!(Rs[s], Q, p, ts, increment = false) if param isa MRIParam # fraction of the step slower stage increment we are on τ = (ts - param.ts) / param.Δts event = Event(array_device(Q)) event = mri_update_rate!(array_device(Q), groupsize)( realview(Rs[s]), τ, param.γs, param.Rs; ndrange = length(realview(Rs[s])), dependencies = (event,), ) wait(array_device(Q), event) end γs = ntuple(k -> ntuple(j -> Γs[k][s, j], s), NΓ) mriparam = MRIParam(param, γs, realview.(Rs[1:s]), ts, dts) updatetime!(mrigark.fastsolver, ts) solve!(Q, mrigark.fastsolver, mriparam; timeend = ts + dts) # update time ts += dts end end @kernel function mri_update_rate!(dQ, τ, γs, Rs) i = @index(Global, Linear) @inbounds begin NΓ = length(γs) Ns = length(γs[1]) dqi = dQ[i] for s in 1:Ns ri = Rs[s][i] sc = γs[NΓ][s] for k in (NΓ - 1):-1:1 sc = sc * τ + γs[k][s] end dqi += sc * ri end dQ[i] = dqi end end """ MRIGARKERK33aSandu(f!, fastsolver, Q; dt, t0 = 0, δ = -1 // 2) The 3rd order, 3 stage scheme from Sandu (2019). The parameter `δ` defaults to the value suggested by Sandu, but can be varied. """ function MRIGARKERK33aSandu(slowrhs!, fastsolver, Q; dt, t0 = 0, δ = -1 // 2) T = eltype(Q) RT = real(T) #! format: off Γ0 = [ 1 // 3 0 // 1 0 // 1 (-6δ - 7) // 12 (6δ + 11) // 12 0 // 1 0 // 1 (6δ - 5) // 12 (3 - 2δ) // 4 ] γ̂0 = [ 1 // 12 -1 // 3 7 // 12] Γ1 = [ 0 // 1 0 // 1 0 // 1 (2δ + 1) // 2 -(2δ + 1) // 2 0 // 1 1 // 2 -(2δ + 1) // 2 δ // 1 ] γ̂1 = [ 0 // 1 0 // 1 0 // 1] #! format: on MRIGARKExplicit(slowrhs!, fastsolver, (Γ0, Γ1), (γ̂0, γ̂1), Q, dt, t0) end """ MRIGARKERK45aSandu(f!, fastsolver, Q; dt, t0 = 0) The 4th order, 5 stage scheme from Sandu (2019). """ function MRIGARKERK45aSandu(slowrhs!, fastsolver, Q; dt, t0 = 0) T = eltype(Q) RT = real(T) #! format: off Γ0 = [ 1 // 5 0 // 1 0 // 1 0 // 1 0 // 1 -53 // 16 281 // 80 0 // 1 0 // 1 0 // 1 -36562993 // 71394880 34903117 // 17848720 -88770499 // 71394880 0 // 1 0 // 1 -7631593 // 71394880 -166232021 // 35697440 6068517 // 1519040 8644289 // 8924360 0 // 1 277061 // 303808 -209323 // 1139280 -1360217 // 1139280 -148789 // 56964 147889 // 45120 ] γ̂0 = [-1482837 // 759520 175781 // 71205 -790577 // 1139280 -6379 // 56964 47 // 96] Γ1 = [ 0 // 1 0 // 1 0 // 1 0 // 1 0 // 1 503 // 80 -503 // 80 0 // 1 0 // 1 0 // 1 -1365537 // 35697440 4963773 // 7139488 -1465833 // 2231090 0 // 1 0 // 1 66974357 // 35697440 21445367 // 7139488 -3 // 1 -8388609 // 4462180 0 // 1 -18227 // 7520 2 // 1 1 // 1 5 // 1 -41933 // 7520 ] γ̂1 = [ 6213 // 1880 -6213 // 1880 0 // 1 0 // 1 0 // 1] #! format: on MRIGARKExplicit(slowrhs!, fastsolver, (Γ0, Γ1), (γ̂0, γ̂1), Q, dt, t0) end ================================================ FILE: src/Numerics/ODESolvers/MultirateInfinitesimalStepMethod.jl ================================================ export MultirateInfinitesimalStep, TimeScaledRHS, MISRK1, MIS2, MISRK2a, MISRK2b, MIS3C, MISRK3, MIS4, MIS4a, MISKWRK43, TVDMISA, TVDMISB, getnsubsteps """ TimeScaledRHS(a, b, rhs!) When evaluate at time `t`, evaluates `rhs!` at time `a + bt`. """ mutable struct TimeScaledRHS{N, RT} a::RT b::RT rhs!::Any function TimeScaledRHS(a, b, rhs!) RT = typeof(a) if isa(rhs!, Tuple) N = length(rhs!) else N = 1 end new{N, RT}(a, b, rhs!) end end function (o::TimeScaledRHS{1, RT} where {RT})(dQ, Q, params, tau; increment) o.rhs!(dQ, Q, params, o.a + o.b * tau; increment = increment) end function (o::TimeScaledRHS{2, RT} where {RT})(dQ, Q, params, tau, i; increment) o.rhs![i](dQ, Q, params, o.a + o.b * tau; increment = increment) end mutable struct OffsetRHS{AT} offset::AT rhs!::Any function OffsetRHS(offset, rhs!) AT = typeof(offset) new{AT}(offset, rhs!) end end function (o::OffsetRHS{AT} where {AT})(dQ, Q, params, tau; increment) o.rhs!(dQ, Q, params, tau; increment = increment) dQ .+= o.offset end """ MultirateInfinitesimalStep(slowrhs!, fastrhs!, fastmethod, α, β, γ, Q::AT; dt=0, t0=0) where {AT<:AbstractArray} This is a time stepping object for explicitly time stepping the partitioned differential equation given by right-hand-side functions `f_fast` and `f_slow` with the state `Q`, i.e., ```math \\dot{Q} = f_{fast}(Q, t) + f_{slow}(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. The constructor builds a multirate infinitesimal step Runge-Kutta scheme based on the provided `α`, `β` and `γ` tableaux and `fastmethod` for solving the fast modes. The available concrete implementations are: - [`MISRK1`](@ref) - [`MIS2`](@ref) - [`MISRK2a`](@ref) - [`MISRK2b`](@ref) - [`MIS3C`](@ref) - [`MISRK3`](@ref) - [`MIS4`](@ref) - [`MIS4a`](@ref) - [`MISKWRK43`](@ref) - [`TVDMISA`](@ref) - [`TVDMISB`](@ref) ### References - [KnothWensch2014](@cite) - [WickerSkamarock2002](@cite) - [KnothWolke1998](@cite) """ mutable struct MultirateInfinitesimalStep{ T, RT, AT, FS, Nstages, Nstagesm1, Nstagesm2, Nstages_sq, } <: AbstractODESolver "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int "storage for y_n" yn::AT "Storage for ``Y_nj - y_n``" ΔYnj::NTuple{Nstagesm2, AT} "Storage for ``f(Y_nj)``" fYnj::NTuple{Nstagesm1, AT} "Storage for offset" offset::AT "slow rhs function" slowrhs!::Any "RHS for fast solver" tsfastrhs!::TimeScaledRHS{N, RT} where {N} "fast rhs method" fastsolver::FS "number of substeps per stage" nsubsteps::Int α::SArray{NTuple{2, Nstages}, RT, 2, Nstages_sq} β::SArray{NTuple{2, Nstages}, RT, 2, Nstages_sq} γ::SArray{NTuple{2, Nstages}, RT, 2, Nstages_sq} d::SArray{NTuple{1, Nstages}, RT, 1, Nstages} c::SArray{NTuple{1, Nstages}, RT, 1, Nstages} c̃::SArray{NTuple{1, Nstages}, RT, 1, Nstages} function MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) Nstages = size(α, 1) yn = similar(Q) ΔYnj = ntuple(_ -> similar(Q), Nstages - 2) fYnj = ntuple(_ -> similar(Q), Nstages - 1) offset = similar(Q) tsfastrhs! = TimeScaledRHS(RT(0), RT(0), fastrhs!) fastsolver = fastmethod(tsfastrhs!, Q) d = sum(β, dims = 2) c = similar(d) for i in eachindex(c) c[i] = d[i] if i > 1 c[i] += sum(j -> (α[i, j] + γ[i, j]) * c[j], 1:(i - 1)) end # When d[i] = 0, we do not perform fast substepping, therefore # we do not need to scale the β, γ coefficients if !(abs(d[i]) < 1.e-10) β[i, :] ./= d[i] γ[i, :] ./= d[i] end end c̃ = α * c new{ T, RT, AT, typeof(fastsolver), Nstages, Nstages - 1, Nstages - 2, Nstages^2, }( RT(dt), RT(t0), 0, yn, ΔYnj, fYnj, offset, slowrhs!, tsfastrhs!, fastsolver, nsubsteps, α, β, γ, d, c, c̃, ) end end function MultirateInfinitesimalStep( mis, op::TimeScaledRHS{2, RT} where {RT}, fastmethod, Q = nothing; dt = 0, t0 = 0, nsubsteps = 1, ) where {AT <: AbstractArray} return mis( op.rhs![1], op.rhs![2], fastmethod, nsubsteps, Q; dt = dt, t0 = t0, ) end function dostep!( Q, mis::MultirateInfinitesimalStep, p, time::Real, nsubsteps::Int, iStage::Int, slow_δ = nothing, slow_rv_dQ = nothing, slow_scaling = nothing, ) if isa(mis.slowrhs!, OffsetRHS{AT} where {AT}) mis.slowrhs!.offset = slow_rv_dQ else mis.slowrhs! = OffsetRHS(slow_rv_dQ, mis.slowrhs!) end for i in 1:nsubsteps dostep!(Q, mis, p, time) time += mis.fastsolver.dt end end function dostep!(Q, mis::MultirateInfinitesimalStep, p, time) dt = mis.dt FT = eltype(dt) α = mis.α β = mis.β γ = mis.γ yn = mis.yn ΔYnj = mis.ΔYnj fYnj = mis.fYnj offset = mis.offset d = mis.d c = mis.c c̃ = mis.c̃ slowrhs! = mis.slowrhs! fastsolver = mis.fastsolver fastrhs! = mis.tsfastrhs! nsubsteps = mis.nsubsteps nstages = size(α, 1) copyto!(yn, Q) # first stage for i in 2:nstages slowrhs!(fYnj[i - 1], Q, p, time + c[i - 1] * dt, increment = false) groupsize = 256 event = Event(array_device(Q)) event = update!(array_device(Q), groupsize)( realview(Q), realview(offset), Val(i), realview(yn), map(realview, ΔYnj[1:(i - 2)]), map(realview, fYnj[1:(i - 1)]), α[i, :], β[i, :], γ[i, :], dt; ndrange = length(realview(Q)), dependencies = (event,), ) wait(array_device(Q), event) # When d[i] = 0, we do not perform fast substepping; # instead we just update the slow tendency if iszero(d[i]) Q .+= dt .* offset else fastrhs!.a = time + c̃[i] * dt fastrhs!.b = (c[i] - c̃[i]) / d[i] τ = zero(FT) nsubstepsLoc = ceil(Int, nsubsteps * d[i]) dτ = d[i] * dt / nsubstepsLoc updatetime!(fastsolver, τ) updatedt!(fastsolver, dτ) # TODO: we want to be able to write # solve!(Q, fastsolver, p; numberofsteps = mis.nsubsteps) #(1c) # especially if we want to use StormerVerlet, but need some way to pass in `offset` dostep!( Q, fastsolver, p, τ, nsubstepsLoc, i, FT(1), realview(offset), nothing, ) #(1c) end end end @kernel function update!( Q, offset, ::Val{i}, yn, ΔYnj, fYnj, αi, βi, γi, dt, ) where {i} e = @index(Global, Linear) @inbounds begin if i > 2 ΔYnj[i - 2][e] = Q[e] - yn[e] # is 0 for i == 2 end Q[e] = yn[e] # (1a) offset[e] = (βi[1]) .* fYnj[1][e] # (1b) @unroll for j in 2:(i - 1) Q[e] += αi[j] .* ΔYnj[j - 1][e] # (1a cont.) offset[e] += (γi[j] / dt) * ΔYnj[j - 1][e] + βi[j] * fYnj[j][e] # (1b cont.) end end end """ MISRK1(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `MISRK1` method is a 1st-order accurate MIS method based on the RK1 (explicit Euler) method. ### References - [KnothWensch2014](@cite) """ function MISRK1( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = zeros(2, 2) β = beta(MISRK1, RT) γ = zeros(2, 2) MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(MISRK1), RT::DataType) β = [ 0 0 1 0 ] end """ MIS2(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `MIS2` method is a 2nd-order accurate, 3-stage MIS method whose construction is summarized in Table 1 of [KnothWensch2014](@cite). ### References - [KnothWensch2014](@cite) """ function MIS2( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = [ 0 0 0 0 0 0 0 0 0 RT(0.536946566710) 0 0 0 RT(0.480892968551) RT(0.500561163566) 0 ] β = beta(MIS2, RT) γ = [ 0 0 0 0 0 0 0 0 0 RT(0.652465126004) 0 0 0 RT(-0.0732769849457) RT(0.144902430420) 0 ] MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(MIS2), RT::DataType) β = [ 0 0 0 0 RT(0.126848494553) 0 0 0 RT(-0.784838278826) RT(1.37442675268) 0 0 RT(-0.0456727081749) RT(-0.00875082271190) RT(0.524775788629) 0 ] end """ MISRK2a(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `MISRK2a` method is a 2nd-order accurate, 2-stage MIS method based on the approach detailed by Wicker and Skamarock in [WickerSkamarock2002](@cite). ### References - [WickerSkamarock2002](@cite) """ function MISRK2a( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = [ 0 0 0 0 0 0 0 1 0 ] β = beta(MISRK2a, RT) γ = zeros(3, 3) MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(MISRK2a), RT::DataType) β = [ 0 0 0 RT(0.5) 0 0 RT(-0.5) 1 0 ] end """ MISRK2b(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `MISRK2b` method is a 2nd-order accurate, 2-stage MIS method and a variant of the `MISRK2a` method based on the approach detailed by Wicker and Skamarock in [WickerSkamarock2002](@cite). ### References - [WickerSkamarock2002](@cite) """ function MISRK2b( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = [ 0 0 0 0 0 0 0 1 0 ] β = beta(MISRK2b, RT) γ = zeros(3, 3) MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(MISRK2b), RT::DataType) β = [ 0 0 0 1 0 0 RT(-0.5) RT(0.5) 0 ] end """ MIS3C(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `MIS3C` method is a 3rd-order accurate, 3-stage MIS method whose construction is summarized in Table 2 of [KnothWensch2014](@cite). ### References - [KnothWensch2014](@cite) """ function MIS3C( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = [ 0 0 0 0 0 0 0 0 0 RT(0.589557277145) 0 0 0 RT(0.544036601551) RT(0.565511042564) 0 ] β = beta(MIS3C, RT) γ = [ 0 0 0 0 0 0 0 0 0 RT(0.142798786398) 0 0 0 RT(-0.0428918957402) RT(0.0202720980282) 0 ] MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(MIS3C), RT::DataType) β = [ 0 0 0 0 RT(0.397525189225) 0 0 0 RT(-0.227036463644) RT(0.624528794618) 0 0 RT(-0.00295238076840) RT(-0.270971764284) RT(0.671323159437) 0 ] end """ MISRK3(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `MISRK3` method is a 3rd-order accurate, 3-stage MIS method based on the approach detailed by Wicker and Skamarock in [WickerSkamarock2002](@cite). ### References - [WickerSkamarock2002](@cite) """ function MISRK3( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = zeros(4, 4) β = beta(MISRK3, RT) γ = zeros(4, 4) MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(MISRK3), RT::DataType) β = [ 0 0 0 0 RT(0.3333333333333333) 0 0 0 0 RT(0.5) 0 0 0 0 1 0 ] end """ MIS4(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `MIS4` method is a 3rd-order accurate, 4-stage MIS method whose construction is summarized in Table 3 of [KnothWensch2014](@cite). ### References - [KnothWensch2014](@cite) """ function MIS4( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = [ 0 0 0 0 0 0 0 0 0 0 0 RT(0.914092810304) 0 0 0 0 RT(1.14274417397) RT(-0.295211246188) 0 0 0 RT(0.112965282231) RT(0.337369411296) RT(0.503747183119) 0 ] β = beta(MIS4, RT) γ = [ 0 0 0 0 0 0 0 0 0 0 0 RT(0.678951983291) 0 0 0 0 RT(-1.38974164070) RT(0.503864576302) 0 0 0 RT(-0.375328608282) RT(0.320925021109) RT(-0.158259688945) 0 ] MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(MIS4), RT::DataType) β = [ 0 0 0 0 0 RT(0.136296478423) 0 0 0 0 RT(0.280462398979) RT(-0.0160351333596) 0 0 0 RT(0.904713355208) RT(-1.04011183154) RT(0.652337563489) 0 0 RT(0.0671969845546) RT(-0.365621862610) RT(-0.154861470835) RT(0.970362444469) 0 ] end """ MIS4a(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `MIS4a` method is a 3rd-order accurate, 4-stage MIS method whose construction is summarized in Table 4 of [KnothWensch2014](@cite). ### References - [KnothWensch2014](@cite) """ function MIS4a( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = [ 0 0 0 0 0 0 0 0 0 0 0 RT(0.52349249922385610) 0 0 0 0 RT(1.1683374366893629) RT(-0.75762080241712637) 0 0 0 RT(-0.036477233846797109) RT(0.56936148730740477) RT(0.47746263002599681) 0 ] # β[5,1] in the paper is incorrect # the correct value is used below (from authors) β = beta(MIS4a, RT) γ = [ 0 0 0 0 0 0 0 0 0 0 0 RT(0.13145089796226542) 0 0 0 0 RT(-0.36855857648747881) RT(0.33159232636600550) 0 0 0 RT(-0.065767130537473045) RT(0.040591093109036858) RT(0.064902111640806712) 0 ] MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(MIS4a), RT::DataType) β = [ 0 0 0 0 0 RT(0.38758444641450318) 0 0 0 0 RT(-0.025318448354142823) RT(0.38668943087310403) 0 0 0 RT(0.20899983523553325) RT(-0.45856648476371231) RT(0.43423187573425748) 0 0 RT(-0.10048822195663100) RT(-0.46186171956333327) RT(0.83045062122462809) RT(0.27014914900250392) 0 ] end """ MISKWRK43(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `MISKWRK43` method is a 3rd-order accurate, 4-stage MIS method. It is the MIS analog of an RK43 method, based on the approach detailed in [KnothWolke1998](@cite). ### References - [KnothWolke1998](@cite) """ function MISKWRK43( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = [ 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 ] β = beta(MISKWRK43, RT) γ = zeros(5, 5) MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(MISKWRK43), RT::DataType) β = [ 0 0 0 0 0 0.5 0 0 0 0 -RT(2 // 3) RT(2 // 3) 0 0 0 RT(0.5) -1 1 0 0 -RT(1 // 6) RT(2 // 3) -RT(2 // 3) RT(1 // 6) 0 ] end """ TVDMISA(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `TVDMISA` method is a 3rd-order accurate, 3-stage MIS method whose construction is summarized in Table 6 of [KnothWensch2014](@cite). ### References - [KnothWensch2014](@cite) """ function TVDMISA( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = [ 0 0 0 0 0 0 0 0 0 RT(0.1946360605647457) 0 0 0 RT(0.3971200136786614) RT(0.2609434606211801) 0 ] β = beta(TVDMISA, RT) γ = [ 0 0 0 0 0 0 0 0 0 RT(0.5624048933209129) 0 0 0 RT(0.4408467475713277) RT(-0.2459300561692391) 0 ] MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(TVDMISA), RT::DataType) β = [ 0 0 0 0 RT(2 // 3) 0 0 0 RT(-0.28247174703488398) RT(4 // 9) 0 0 RT(-0.31198081960042401) RT(0.18082737579913699) RT(9 // 16) 0 ] end """ TVDMISB(slowrhs!, fastrhs!, fastmethod, nsubsteps, Q; dt = 0, t0 = 0) The `TVDMISB` method is a 3rd-order accurate, 3-stage MIS method whose construction is summarized in Table 7 of [KnothWensch2014](@cite). ### References - [KnothWensch2014](@cite) """ function TVDMISB( slowrhs!, fastrhs!, fastmethod, nsubsteps, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} FT = eltype(Q) RT = real(FT) α = [ 0 0 0 0 0 0 0 0 0 RT(0.42668232863311001) 0 0 0 RT(0.26570779016173801) RT(0.41489966891866698) 0 ] β = beta(TVDMISB, RT) γ = [ 0 0 0 0 0 0 0 0 0 RT(0.28904389120139701) 0 0 0 RT(0.45113560071334202) RT(-0.25006656847591002) 0 ] MultirateInfinitesimalStep( slowrhs!, fastrhs!, fastmethod, nsubsteps, α, β, γ, Q; dt = dt, t0 = t0, ) end function beta(::typeof(TVDMISB), RT::DataType) β = [ 0 0 0 0 RT(2 // 3) 0 0 0 RT(-0.25492859100078202) RT(4 // 9) 0 0 RT(-0.26452517179288798) RT(0.11424084424766399) RT(9 // 16) 0 ] end function getnsubsteps(mis, ns::Int, RT::DataType) d = sum(beta(mis, RT), dims = 2) return d ./ ns end ================================================ FILE: src/Numerics/ODESolvers/MultirateRungeKuttaMethod.jl ================================================ export MultirateRungeKutta LSRK2N = LowStorageRungeKutta2N """ MultirateRungeKutta(slow_solver, fast_solver; dt, t0 = 0) This is a time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f_fast(Q, t) + f_slow(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. The constructor builds a multirate Runge-Kutta scheme using two different RK solvers. This is based on Currently only the low storage RK methods can be used as slow solvers ### References - [Schlegel2012](@cite) """ mutable struct MultirateRungeKutta{SS, FS, RT} <: AbstractODESolver "slow solver" slow_solver::SS "fast solver" fast_solver::FS "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int function MultirateRungeKutta( slow_solver::LSRK2N, fast_solver, Q = nothing; dt = getdt(slow_solver), t0 = slow_solver.t, ) where {AT <: AbstractArray} SS = typeof(slow_solver) FS = typeof(fast_solver) RT = real(eltype(slow_solver.dQ)) new{SS, FS, RT}(slow_solver, fast_solver, RT(dt), RT(t0), 0) end end function MultirateRungeKutta( solvers::Tuple, Q = nothing; dt = getdt(solvers[1]), t0 = solvers[1].t, ) where {AT <: AbstractArray} if length(solvers) < 2 error("Must specify atleast two solvers") elseif length(solvers) == 2 fast_solver = solvers[2] else fast_solver = MultirateRungeKutta(solvers[2:end], Q; dt = dt, t0 = t0) end slow_solver = solvers[1] MultirateRungeKutta(slow_solver, fast_solver, Q; dt = dt, t0 = t0) end function MultirateRungeKutta( mrk, op::TimeScaledRHS{2, RT} where {RT}, Q = nothing; dt = 0, t0 = 0, steps = 0, ) where {AT <: AbstractArray} slow_solver = mrk(op.rhs![1], Q, dt = dt, t0 = t0) fast_solver = mrk(op.rhs![2], Q, dt = dt, t0 = t0) MultirateRungeKutta(slow_solver, fast_solver, Q; dt = dt, t0 = t0) end function dostep!( Q, mrrk::MultirateRungeKutta{SS}, param, time::Real, nsteps::Int, iStage::Int, slow_δ = nothing, slow_rv_dQ = nothing, slow_scaling = nothing, ) where {SS <: LSRK2N} for i in 1:nsteps dostep!(Q, mrrk, param, time, slow_δ, slow_rv_dQ, slow_scaling) time += mrrk.fast_solver.dt end end function dostep!( Q, mrrk::MultirateRungeKutta{SS}, param, time, in_slow_δ = nothing, in_slow_rv_dQ = nothing, in_slow_scaling = nothing, ) where {SS <: LSRK2N} dt = mrrk.dt slow = mrrk.slow_solver fast = mrrk.fast_solver slow_rv_dQ = realview(slow.dQ) groupsize = 256 fast_dt_in = getdt(fast) for slow_s in 1:length(slow.RKA) # Currnent slow state time slow_stage_time = time + slow.RKC[slow_s] * dt # Evaluate the slow mode slow.rhs!(slow.dQ, Q, param, slow_stage_time, increment = true) if in_slow_δ !== nothing slow_scaling = nothing if slow_s == length(slow.RKA) slow_scaling = in_slow_scaling end # update solution and scale RHS event = Event(array_device(Q)) event = update!(array_device(Q), groupsize)( slow_rv_dQ, in_slow_rv_dQ, in_slow_δ, slow_scaling; ndrange = length(realview(Q)), dependencies = (event,), ) wait(array_device(Q), event) end # Fractional time for slow stage if slow_s == length(slow.RKA) γ = 1 - slow.RKC[slow_s] else γ = slow.RKC[slow_s + 1] - slow.RKC[slow_s] end # RKB for the slow with fractional time factor remove (since full # integration of fast will result in scaling by γ) slow_δ = slow.RKB[slow_s] / (γ) # RKB for the slow with fractional time factor remove (since full # integration of fast will result in scaling by γ) nsubsteps = fast_dt_in > 0 ? ceil(Int, γ * dt / fast_dt_in) : 1 fast_dt = γ * dt / nsubsteps updatedt!(fast, fast_dt) for substep in 1:nsubsteps slow_rka = nothing if substep == nsubsteps slow_rka = slow.RKA[slow_s % length(slow.RKA) + 1] end fast_time = slow_stage_time + (substep - 1) * fast_dt dostep!(Q, fast, param, fast_time, slow_δ, slow_rv_dQ, slow_rka) end end updatedt!(fast, fast_dt_in) end @kernel function update!(fast_dQ, slow_dQ, δ, slow_rka = nothing) i = @index(Global, Linear) @inbounds begin fast_dQ[i] += δ * slow_dQ[i] if slow_rka !== nothing slow_dQ[i] *= slow_rka end end end ================================================ FILE: src/Numerics/ODESolvers/ODESolvers.jl ================================================ """ ODESolvers Ordinary differential equation solvers """ module ODESolvers using LinearAlgebra using KernelAbstractions using KernelAbstractions.Extras: @unroll using StaticArrays using ..SystemSolvers using ..MPIStateArrays: array_device, realview using ..GenericCallbacks export AbstractODESolver, solve!, updatedt!, gettime, getsteps abstract type AbstractODESolver end """ gettime(solver::AbstractODESolver) Returns the current simulation time of the ODE solver `solver` """ gettime(solver::AbstractODESolver) = solver.t """ getdt(solver::AbstractODESolver) Returns the current simulation time step of the ODE solver `solver` """ getdt(solver::AbstractODESolver) = solver.dt """ getsteps(solver::AbstractODESolver) Returns the number of completed time steps of the ODE solver `solver` """ getsteps(solver::AbstractODESolver) = solver.steps """ ODESolvers.general_dostep!(Q, solver::AbstractODESolver, p, timeend::Real, adjustfinalstep::Bool) Use the solver to step `Q` forward in time from the current time, to the time `timeend`. If `adjustfinalstep == true` then `dt` is adjusted so that the step does not take the solution beyond the `timeend`. """ function general_dostep!( Q, solver::AbstractODESolver, p, timeend::Real; adjustfinalstep::Bool, ) time, dt = gettime(solver), getdt(solver) final_step = false if adjustfinalstep && time + dt > timeend orig_dt = dt dt = timeend - time updatedt!(solver, dt) final_step = true end @assert dt > 0 dostep!(Q, solver, p, time) if !final_step updatetime!(solver, time + dt) else updatedt!(solver, orig_dt) updatetime!(solver, timeend) end end """ updatetime!(solver::AbstractODESolver, time) Change the current time to `time` for the ODE solver `solver`. """ updatetime!(solver::AbstractODESolver, time) = (solver.t = time) """ updatedt!(solver::AbstractODESolver, dt) Change the time step size to `dt` for the ODE solver `solver`. """ updatedt!(solver::AbstractODESolver, dt) = (solver.dt = dt) """ updatesteps!(solver::AbstractODESolver, dt) Set the number of elapsed time steps for the ODE solver `solver`. """ updatesteps!(solver::AbstractODESolver, steps) = (solver.steps = steps) isadjustable(solver::AbstractODESolver) = true # {{{ run! """ solve!(Q, solver::AbstractODESolver; timeend, stopaftertimeend=true, numberofsteps, callbacks) Solves an ODE using the `solver` starting from a state `Q`. The state `Q` is updated inplace. The final time `timeend` or `numberofsteps` must be specified. A series of optional callback functions can be specified using the tuple `callbacks`; see the `GenericCallbacks` module. """ function solve!( Q, solver::AbstractODESolver, param = nothing; timeend::Real = Inf, adjustfinalstep = true, numberofsteps::Integer = 0, callbacks = (), ) @assert isfinite(timeend) || numberofsteps > 0 if adjustfinalstep && !isadjustable(solver) error("$solver does not support time step adjustments. Can only be used with `adjustfinalstep=false`.") end t0 = gettime(solver) # Loop through an initialize callbacks (if they need it) GenericCallbacks.init!(callbacks, solver, Q, param, t0) step = 0 time = t0 while time < timeend step += 1 updatesteps!(solver, step) time = general_dostep!( Q, solver, param, timeend; adjustfinalstep = adjustfinalstep, ) val = GenericCallbacks.call!(callbacks, solver, Q, param, time) if val !== nothing && val > 0 return gettime(solver) end # Figure out if we should stop if step == numberofsteps break end end # Loop through to fini callbacks GenericCallbacks.fini!(callbacks, solver, Q, param, time) return gettime(solver) end # }}} include("BackwardEulerSolvers.jl") include("MultirateInfinitesimalGARKExplicit.jl") include("MultirateInfinitesimalGARKDecoupledImplicit.jl") include("MultirateInfinitesimalStepMethod.jl") include("LowStorageRungeKuttaMethod.jl") include("LowStorageRungeKutta3NMethod.jl") include("StrongStabilityPreservingRungeKuttaMethod.jl") include("AdditiveRungeKuttaMethod.jl") include("MultirateRungeKuttaMethod.jl") include("SplitExplicitMethod.jl") include("DifferentialEquations.jl") end # module ================================================ FILE: src/Numerics/ODESolvers/SplitExplicitMethod.jl ================================================ export SplitExplicitSolver using ..BalanceLaws: initialize_states!, tendency_from_slow_to_fast!, cummulate_fast_solution!, reconcile_from_fast_to_slow! LSRK2N = LowStorageRungeKutta2N @doc """ SplitExplicitSolver(slow_solver, fast_solver; dt, t0 = 0, coupled = true) This is a time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q_{fast}} = f_{fast}(Q_{fast}, Q_{slow}, t) \\dot{Q_{slow}} = f_{slow}(Q_{slow}, Q_{fast}, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This method performs an operator splitting to timestep the vertical average of the model at a faster rate than the full model. This results in a first- order time stepper. """ SplitExplicitSolver mutable struct SplitExplicitSolver{SS, FS, RT, MSA} <: AbstractODESolver "slow solver" slow_solver::SS "fast solver" fast_solver::FS "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int "storage for transfer tendency" dQ2fast::MSA function SplitExplicitSolver( slow_solver::LSRK2N, fast_solver, Q = nothing; dt = getdt(slow_solver), t0 = slow_solver.t, ) where {AT <: AbstractArray} SS = typeof(slow_solver) FS = typeof(fast_solver) RT = real(eltype(slow_solver.dQ)) dQ2fast = similar(slow_solver.dQ) dQ2fast .= -0 MSA = typeof(dQ2fast) return new{SS, FS, RT, MSA}( slow_solver, fast_solver, RT(dt), RT(t0), 0, dQ2fast, ) end end function dostep!( Qslow, split::SplitExplicitSolver{SS}, param, time::Real, ) where {SS <: LSRK2N} slow = split.slow_solver fast = split.fast_solver Qfast = slow.rhs!.modeldata.Q_2D dQslow = slow.dQ dQ2fast = split.dQ2fast slow_bl = slow.rhs!.balance_law fast_bl = fast.rhs!.balance_law groupsize = 256 slow_dt = getdt(slow) fast_dt_in = getdt(fast) for slow_s in 1:length(slow.RKA) # Current slow state time slow_stage_time = time + slow.RKC[slow_s] * slow_dt # Initialize fast model and tendency adjustment # before evalution of slow mode initialize_states!(slow_bl, fast_bl, slow.rhs!, fast.rhs!, Qslow, Qfast) # Evaluate the slow mode # --> save tendency for the fast slow.rhs!(dQ2fast, Qslow, param, slow_stage_time, increment = false) # vertically integrate slow tendency to advance fast equation # and use vertical mean for slow model (negative source) # ---> work with dQ2fast as input tendency_from_slow_to_fast!( slow_bl, fast_bl, slow.rhs!, fast.rhs!, Qslow, Qfast, dQ2fast, ) # Compute (and RK update) slow tendency slow.rhs!(dQslow, Qslow, param, slow_stage_time, increment = true) # Fractional time for slow stage if slow_s == length(slow.RKA) γ = 1 - slow.RKC[slow_s] else γ = slow.RKC[slow_s + 1] - slow.RKC[slow_s] end # RKB for the slow with fractional time factor remove (since full # integration of fast will result in scaling by γ) nsubsteps = fast_dt_in > 0 ? ceil(Int, γ * slow_dt / fast_dt_in) : 1 fast_dt = γ * slow_dt / nsubsteps updatedt!(fast, fast_dt) for substep in 1:nsubsteps fast_time = slow_stage_time + (substep - 1) * fast_dt dostep!(Qfast, fast, param, fast_time) cummulate_fast_solution!( slow_bl, fast_bl, fast.rhs!, Qfast, fast_time, fast_dt, substep, ) end # Update (RK-stage) slow state event = Event(array_device(Qslow)) event = update!(array_device(Qslow), groupsize)( realview(dQslow), realview(Qslow), slow.RKA[slow_s % length(slow.RKA) + 1], slow.RKB[slow_s], slow_dt, nothing, nothing, nothing; ndrange = length(realview(Qslow)), dependencies = (event,), ) wait(array_device(Qslow), event) # reconcile slow equation using fast equation reconcile_from_fast_to_slow!( slow_bl, fast_bl, slow.rhs!, fast.rhs!, Qslow, Qfast, ) end updatedt!(fast, fast_dt_in) return nothing end ================================================ FILE: src/Numerics/ODESolvers/StrongStabilityPreservingRungeKuttaMethod.jl ================================================ export StrongStabilityPreservingRungeKutta export SSPRK22Heuns, SSPRK22Ralstons, SSPRK33ShuOsher, SSPRK34SpiteriRuuth """ StrongStabilityPreservingRungeKutta(f, RKA, RKB, RKC, Q; dt, t0 = 0) This is a time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. The constructor builds a strong-stability-preserving Runge--Kutta scheme based on the provided `RKA`, `RKB` and `RKC` coefficient arrays. The available concrete implementations are: - [`SSPRK33ShuOsher`](@ref) - [`SSPRK34SpiteriRuuth`](@ref) """ mutable struct StrongStabilityPreservingRungeKutta{T, RT, AT, Nstages} <: AbstractODESolver "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int "rhs function" rhs!::Any "Storage for RHS during the `StrongStabilityPreservingRungeKutta` update" Rstage::AT "Storage for the stage state during the `StrongStabilityPreservingRungeKutta` update" Qstage::AT "RK coefficient vector A (rhs scaling)" RKA::Array{RT, 2} "RK coefficient vector B (rhs add in scaling)" RKB::Array{RT, 1} "RK coefficient vector C (time scaling)" RKC::Array{RT, 1} function StrongStabilityPreservingRungeKutta( rhs!, RKA, RKB, RKC, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) new{T, RT, AT, length(RKB)}( RT(dt), RT(t0), 0, rhs!, similar(Q), similar(Q), RKA, RKB, RKC, ) end end """ dostep!(Q, ssp::StrongStabilityPreservingRungeKutta, p, time::Real, nsteps::Int, iStage::Int, [slow_δ, slow_rv_dQ, slow_scaling]) Wrapper function to use the strong stability preserving Runge--Kutta method `ssp` as the fast solver for a Multirate Infinitesimal Step method by calling dostep!(Q, ssp::StrongStabilityPreservingRungeKutta, p, time::Real, [slow_δ, slow_rv_dQ, slow_scaling]) nsubsteps times. """ function dostep!( Q, ssp::StrongStabilityPreservingRungeKutta, p, time::Real, nsteps::Int, iStage::Int, slow_δ = nothing, slow_rv_dQ = nothing, slow_scaling = nothing, ) for i in 1:nsteps dostep!(Q, ssp, p, time, slow_δ, slow_rv_dQ, slow_scaling) time += ssp.dt end end """ ODESolvers.dostep!(Q, ssp::StrongStabilityPreservingRungeKutta, p, time::Real, [slow_δ, slow_rv_dQ, slow_scaling]) Use the strong stability preserving Runge--Kutta method `ssp` to step `Q` forward in time from the current time `time` to final time `time + getdt(ssp)`. If the optional parameter `slow_δ !== nothing` then `slow_rv_dQ * slow_δ` is added as an additional ODE right-hand side source. If the optional parameter `slow_scaling !== nothing` then after the final stage update the scaling `slow_rv_dQ *= slow_scaling` is performed. """ function dostep!( Q, ssp::StrongStabilityPreservingRungeKutta, p, time, slow_δ = nothing, slow_rv_dQ = nothing, in_slow_scaling = nothing, ) dt = ssp.dt RKA, RKB, RKC = ssp.RKA, ssp.RKB, ssp.RKC rhs! = ssp.rhs! Rstage, Qstage = ssp.Rstage, ssp.Qstage rv_Q = realview(Q) rv_Rstage = realview(Rstage) rv_Qstage = realview(Qstage) groupsize = 256 rv_Qstage .= rv_Q for s in 1:length(RKB) rhs!(Rstage, Qstage, p, time + RKC[s] * dt, increment = false) slow_scaling = nothing if s == length(RKB) slow_scaling = in_slow_scaling end event = Event(array_device(Q)) event = update!(array_device(Q), groupsize)( rv_Rstage, rv_Q, rv_Qstage, RKA[s, 1], RKA[s, 2], RKB[s], dt, slow_δ, slow_rv_dQ, slow_scaling; ndrange = length(rv_Q), dependencies = (event,), ) wait(array_device(Q), event) end rv_Q .= rv_Qstage end @kernel function update!( dQ, Q, Qstage, rka1, rka2, rkb, dt, slow_δ, slow_dQ, slow_scaling, ) i = @index(Global, Linear) @inbounds begin if slow_δ !== nothing dQ[i] += slow_δ * slow_dQ[i] end Qstage[i] = rka1 * Q[i] + rka2 * Qstage[i] + dt * rkb * dQ[i] if slow_scaling !== nothing slow_dQ[i] *= slow_scaling end end end """ SSPRK22Heuns(f, Q; dt, t0 = 0) This function returns a [`StrongStabilityPreservingRungeKutta`](@ref) time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This uses the second-order, 2-stage, strong-stability-preserving, Runge--Kutta scheme of Shu and Osher (1988) (also known as Heun's method.) Exact choice of coefficients from wikipedia page for Heun's method :) ### References - [Shu1988](@cite) - [Heun1900](@cite) """ function SSPRK22Heuns(F, Q::AT; dt = 0, t0 = 0) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) RKA = [RT(1) RT(0); RT(1 // 2) RT(1 // 2)] RKB = [RT(1), RT(1 // 2)] RKC = [RT(0), RT(1)] StrongStabilityPreservingRungeKutta(F, RKA, RKB, RKC, Q; dt = dt, t0 = t0) end """ SSPRK22Ralstons(f, Q; dt, t0 = 0) This function returns a [`StrongStabilityPreservingRungeKutta`](@ref) time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This uses the second-order, 2-stage, strong-stability-preserving, Runge--Kutta scheme of Shu and Osher (1988) (also known as Ralstons's method.) Exact choice of coefficients from wikipedia page for Heun's method :) ### References - [Shu1988](@cite) - [Ralston1962](@cite) """ function SSPRK22Ralstons(F, Q::AT; dt = 0, t0 = 0) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) RKA = [RT(1) RT(0); RT(5 // 8) RT(3 // 8)] RKB = [RT(2 // 3), RT(3 // 4)] RKC = [RT(0), RT(2 // 3)] StrongStabilityPreservingRungeKutta(F, RKA, RKB, RKC, Q; dt = dt, t0 = t0) end """ SSPRK33ShuOsher(f, Q; dt, t0 = 0) This function returns a [`StrongStabilityPreservingRungeKutta`](@ref) time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This uses the third-order, 3-stage, strong-stability-preserving, Runge--Kutta scheme of Shu and Osher (1988) ### References - [Shu1988](@cite) """ function SSPRK33ShuOsher(F, Q::AT; dt = 0, t0 = 0) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) RKA = [RT(1) RT(0); RT(3 // 4) RT(1 // 4); RT(1 // 3) RT(2 // 3)] RKB = [RT(1), RT(1 // 4), RT(2 // 3)] RKC = [RT(0), RT(1), RT(1 // 2)] StrongStabilityPreservingRungeKutta(F, RKA, RKB, RKC, Q; dt = dt, t0 = t0) end """ SSPRK34SpiteriRuuth(f, Q; dt, t0 = 0) This function returns a [`StrongStabilityPreservingRungeKutta`](@ref) time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q} = f(Q, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This uses the third-order, 4-stage, strong-stability-preserving, Runge--Kutta scheme of Spiteri and Ruuth (1988) ### References - [Spiteri2002](@cite) """ function SSPRK34SpiteriRuuth( F, Q::AT; dt = 0, t0 = 0, ) where {AT <: AbstractArray} T = eltype(Q) RT = real(T) RKA = [RT(1) RT(0); RT(0) RT(1); RT(2 // 3) RT(1 // 3); RT(0) RT(1)] RKB = [RT(1 // 2); RT(1 // 2); RT(1 // 6); RT(1 // 2)] RKC = [RT(0); RT(1 // 2); RT(1); RT(1 // 2)] StrongStabilityPreservingRungeKutta(F, RKA, RKB, RKC, Q; dt = dt, t0 = t0) end ================================================ FILE: src/Numerics/SystemSolvers/SystemSolvers.jl ================================================ module SystemSolvers using ..MPIStateArrays using ..MPIStateArrays: array_device, realview using ..Mesh.Grids import ..Mesh.Grids: polynomialorders, dimensionality using ..Mesh.Topologies using ..DGMethods using ..DGMethods: DGModel, DGFVModel, SpaceDiscretization import ..DGMethods.FVReconstructions: width using ..BalanceLaws using Adapt using CUDA using LinearAlgebra using LazyArrays using StaticArrays using KernelAbstractions const weighted_norm = false # just for testing SystemSolvers LinearAlgebra.norm(A::MVector, p::Real, weighted::Bool) = norm(A, p) LinearAlgebra.norm(A::MVector, weighted::Bool) = norm(A, 2, weighted) LinearAlgebra.dot(A::MVector, B::MVector, weighted) = dot(A, B) LinearAlgebra.norm(A::AbstractVector, p::Real, weighted::Bool) = norm(A, p) LinearAlgebra.norm(A::AbstractVector, weighted::Bool) = norm(A, 2, weighted) LinearAlgebra.dot(A::AbstractVector, B::AbstractVector, weighted) = dot(A, B) export linearsolve!, settolerance!, prefactorize, construct_preconditioner, preconditioner_solve! export AbstractSystemSolver, AbstractIterativeSystemSolver, AbstractNonlinearSolver export nonlinearsolve! """ AbstractSystemSolver This is an abstract type representing a generic linear solver. """ abstract type AbstractSystemSolver end """ AbstractNonlinearSolver This is an abstract type representing a generic nonlinear solver. """ abstract type AbstractNonlinearSolver <: AbstractSystemSolver end """ LSOnly Only applies the linear solver (no Newton solver) """ struct LSOnly <: AbstractNonlinearSolver linearsolver::Any end function donewtoniteration!( rhs!, linearoperator!, preconditioner, Q, Qrhs, solver::LSOnly, args..., ) @info "donewtoniteration! linearsolve!", args... linearsolve!( linearoperator!, preconditioner, solver.linearsolver, Q, Qrhs, args...; max_iters = getmaxiterations(solver.linearsolver), ) end """ Solving rhs!(Q) = Qrhs via Newton, where `F = rhs!(Q) - Qrhs` dF/dQ(Q^n) ΔQ ≈ jvp!(ΔQ; Q^n, F(Q^n)) preconditioner ≈ dF/dQ(Q) """ function nonlinearsolve!( rhs!, jvp!, preconditioner, solver::AbstractNonlinearSolver, Q::AT, Qrhs, args...; max_newton_iters = 10, cvg = Ref{Bool}(), ) where {AT} FT = eltype(Q) tol = solver.tol converged = false iters = 0 if preconditioner === nothing preconditioner = NoPreconditioner() end # Initialize NLSolver, compute initial residual initial_residual_norm = initialize!(rhs!, Q, Qrhs, solver, args...) if initial_residual_norm < tol converged = true end converged && return iters while !converged && iters < max_newton_iters # dF/dQ(Q^n) ΔQ ≈ jvp!(ΔQ; Q^n, F(Q^n)), update Q^n in jvp! update_Q!(jvp!, Q, args...) # update preconditioner based on finite difference, with jvp! preconditioner_update!(jvp!, rhs!.f!, preconditioner, args...) # do newton iteration with Q^{n+1} = Q^{n} - dF/dQ(Q^n)⁻¹ (rhs!(Q) - Qrhs) residual_norm, linear_iterations = donewtoniteration!( rhs!, jvp!, preconditioner, Q, Qrhs, solver, args..., ) # @info "Linear solver converged in $linear_iterations iterations" iters += 1 preconditioner_counter_update!(preconditioner) if !isfinite(residual_norm) error("norm of residual is not finite after $iters iterations of `donewtoniteration!`") end # Check residual_norm / norm(R0) # Comment: Should we check "correction" magitude? # ||Delta Q|| / ||Q|| ? relresidual = residual_norm / initial_residual_norm if relresidual < tol || residual_norm < tol # @info "Newton converged in $iters iterations!" converged = true end end converged || @warn "Nonlinear solver did not converge after $iters iterations" cvg[] = converged iters end """ AbstractIterativeSystemSolver This is an abstract type representing a generic iterative linear solver. The available concrete implementations are: - [`GeneralizedConjugateResidual`](@ref) - [`GeneralizedMinimalResidual`](@ref) """ abstract type AbstractIterativeSystemSolver <: AbstractSystemSolver end """ settolerance!(solver::AbstractIterativeSystemSolver, tolerance, relative) Sets the relative or absolute tolerance of the iterative linear solver `solver` to `tolerance`. """ settolerance!( solver::AbstractIterativeSystemSolver, tolerance, relative = true, ) = (relative ? (solver.rtol = tolerance) : (solver.atol = tolerance)) doiteration!( linearoperator!, preconditioner, Q, Qrhs, solver::AbstractIterativeSystemSolver, threshold, args..., ) = throw(MethodError( doiteration!, (linearoperator!, preconditioner, Q, Qrhs, solver, tolerance, args...), )) initialize!( linearoperator!, Q, Qrhs, solver::AbstractIterativeSystemSolver, args..., ) = throw(MethodError(initialize!, (linearoperator!, Q, Qrhs, solver, args...))) """ prefactorize(linop!, linearsolver, args...) Prefactorize the in-place linear operator `linop!` for use with `linearsolver`. """ function prefactorize( linop!, linearsolver::AbstractIterativeSystemSolver, args..., ) return nothing end """ linearsolve!(linearoperator!, solver::AbstractIterativeSystemSolver, Q, Qrhs, args...) Solves a linear problem defined by the `linearoperator!` function and the state `Qrhs`, i.e, ```math L(Q) = Q_{rhs} ``` using the `solver` and the initial guess `Q`. After the call `Q` contains the solution. The arguments `args` is passed to `linearoperator!` when it is called. """ function linearsolve!( linearoperator!, preconditioner, solver::AbstractIterativeSystemSolver, Q, Qrhs, args...; max_iters = length(Q), cvg = Ref{Bool}(), ) converged = false iters = 0 if preconditioner === nothing preconditioner = NoPreconditioner() end converged, threshold = initialize!(linearoperator!, Q, Qrhs, solver, args...) converged && return iters while !converged && iters < max_iters converged, inner_iters, residual_norm = doiteration!( linearoperator!, preconditioner, Q, Qrhs, solver, threshold, args..., ) iters += inner_iters if !isfinite(residual_norm) error("norm of residual is not finite after $iters iterations of `doiteration!`") end achieved_tolerance = residual_norm / threshold * solver.rtol end converged || @warn "Solver did not attain convergence after $iters iterations" cvg[] = converged iters end @kernel function linearcombination!(Q, cs, Xs, increment::Bool) i = @index(Global, Linear) if !increment @inbounds Q[i] = -zero(eltype(Q)) end @inbounds for j in 1:length(cs) Q[i] += cs[j] * Xs[j][i] end end include("generalized_minimal_residual_solver.jl") include("generalized_conjugate_residual_solver.jl") include("conjugate_gradient_solver.jl") include("columnwise_lu_solver.jl") include("preconditioners.jl") include("batched_generalized_minimal_residual_solver.jl") include("jacobian_free_newton_krylov_solver.jl") end ================================================ FILE: src/Numerics/SystemSolvers/batched_generalized_minimal_residual_solver.jl ================================================ using CUDA export BatchedGeneralizedMinimalResidual """ BatchedGeneralizedMinimalResidual( Q, dofperbatch, Nbatch; M = min(20, length(Q)), rtol = √eps(eltype(AT)), atol = eps(eltype(AT)), forward_reshape = size(Q), forward_permute = Tuple(1:length(size(Q))), ) # BGMRES This is an object for solving batched linear systems using the GMRES algorithm. The constructor parameter `M` is the number of steps after which the algorithm is restarted (if it has not converged), `Q` is a reference state used only to allocate the solver internal state, `dofperbatch` is the size of each batched system (assumed to be the same throughout), `Nbatch` is the total number of independent linear systems, and `rtol` specifies the convergence criterion based on the relative residual norm (max across all batched systems). The argument `forward_reshape` is a tuple of integers denoting the reshaping (if required) of the solution vectors for batching the Arnoldi routines. The argument `forward_permute` describes precisely which indices of the array `Q` to permute. This object is intended to be passed to the [`linearsolve!`](@ref) command. This uses a batched-version of the restarted Generalized Minimal Residual method of Saad and Schultz (1986). # Note Eventually, we'll want to do something like this: i = @index(Global) linearoperator!(Q[:, :, :, i], args...) This will help stop the need for constantly reshaping the work arrays. It would also potentially save us some memory. """ mutable struct BatchedGeneralizedMinimalResidual{ I, T, AT, BKT, OmT, HT, gT, sT, resT, res0T, FRS, FPR, BRS, BPR, } <: AbstractIterativeSystemSolver "global Krylov basis at present step" krylov_basis::AT "global Krylov basis at previous step" krylov_basis_prev::AT "global batched Krylov basis" batched_krylov_basis::BKT "Storage for the Givens rotation matrices" Ω::OmT "Hessenberg matrix in each column" H::HT "rhs of the least squares problem in each column" g0::gT "The GMRES iterate in each batched column" sol::sT "Relative tolerance" rtol::T "Absolute tolerance" atol::T "Maximum number of GMRES iterations (global across all columns)" max_iter::I "total number of batched columns" batch_size::I "total number of dofs per batched column" dofperbatch::I "residual norm in each column" resnorms::resT "initial residual norm in each column" initial_resnorms::res0T forward_reshape::FRS forward_permute::FPR backward_reshape::BRS backward_permute::BPR function BatchedGeneralizedMinimalResidual( Q::AT, dofperbatch, Nbatch; M = min(20, length(Q)), rtol = √eps(eltype(AT)), atol = eps(eltype(AT)), forward_reshape = size(Q), forward_permute = Tuple(1:length(size(Q))), ) where {AT} # Get ArrayType information if isa(array_device(Q), CPU) ArrayType = Array else # Sanity check since we don't support anything else @assert isa(array_device(Q), CUDADevice) ArrayType = CuArray end # FIXME: If we can batch the application of linearoperator!, then we dont # need these two temporary work vectors (unpermuted/reshaped) krylov_basis = similar(Q) krylov_basis_prev = similar(Q) FT = eltype(AT) # Create storage for holding the batched Krylov basis batched_krylov_basis = fill!(ArrayType{FT}(undef, M + 1, dofperbatch, Nbatch), 0) # Create storage for doing the batched Arnoldi process Ω = fill!(ArrayType{FT}(undef, Nbatch, 2 * M), 0) H = fill!(ArrayType{FT}(undef, Nbatch, M + 1, M), 0) g0 = fill!(ArrayType{FT}(undef, Nbatch, M + 1), 0) # Create storage for constructing the global gmres iterate # and recording column-norms sol = fill!(ArrayType{FT}(undef, dofperbatch, Nbatch), 0) resnorms = fill!(ArrayType{FT}(undef, Nbatch), 0) initial_resnorms = fill!(ArrayType{FT}(undef, Nbatch), 0) @assert dofperbatch * Nbatch == length(Q) # Define the back permutation and reshape backward_permute = Tuple(sortperm([forward_permute...])) tmp_reshape_tuple_b = [forward_reshape...] permute!(tmp_reshape_tuple_b, [forward_permute...]) backward_reshape = Tuple(tmp_reshape_tuple_b) # FIXME: Is there a better way of doing this? BKT = typeof(batched_krylov_basis) OmT = typeof(Ω) HT = typeof(H) gT = typeof(g0) sT = typeof(sol) resT = typeof(resnorms) res0T = typeof(initial_resnorms) FRS = typeof(forward_reshape) FPR = typeof(forward_permute) BRS = typeof(backward_reshape) BPR = typeof(backward_permute) return new{ typeof(Nbatch), eltype(Q), AT, BKT, OmT, HT, gT, sT, resT, res0T, FRS, FPR, BRS, BPR, }( krylov_basis, krylov_basis_prev, batched_krylov_basis, Ω, H, g0, sol, rtol, atol, M, Nbatch, dofperbatch, resnorms, initial_resnorms, forward_reshape, forward_permute, backward_reshape, backward_permute, ) end end """ BatchedGeneralizedMinimalResidual( dg::SpaceDiscretization, Q::MPIStateArray; atol = sqrt(eps(eltype(Q))), rtol = sqrt(eps(eltype(Q))), max_subspace_size = nothing, independent_states = false, ) # Description Specialized constructor for `BatchedGeneralizedMinimalResidual` struct, using a `SpaceDiscretization` to infer state-information and determine appropriate reshaping and permutations. # Arguments - `dg`: (SpaceDiscretization) A `SpaceDiscretization` containing all relevant grid and topology information. - `Q` : (MPIStateArray) An `MPIStateArray` containing field information. # Keyword Arguments - `atol`: (float) absolute tolerance. `DEFAULT = sqrt(eps(eltype(Q)))` - `rtol`: (float) relative tolerance. `DEFAULT = sqrt(eps(eltype(Q)))` - `max_subspace_size` : (Int). Maximal dimension of each (batched) Krylov subspace. DEFAULT = nothing - `independent_states`: (boolean) An optional flag indicating whether or not degrees of freedom are coupled internally (within a column). `DEFAULT = false` # Return instance of `BatchedGeneralizedMinimalResidual` struct """ function BatchedGeneralizedMinimalResidual( dg::SpaceDiscretization, Q::MPIStateArray; atol = sqrt(eps(eltype(Q))), rtol = sqrt(eps(eltype(Q))), max_subspace_size = nothing, independent_states = false, ) grid = dg.grid topology = grid.topology dim = dimensionality(grid) N = polynomialorders(grid) # Number of Gauss-Lobatto quadrature points in each direction Nq = N .+ 1 # Number of states and elements (in vertical and horizontal directions) num_states = size(Q)[2] nelem = length(topology.realelems) nvertelem = topology.stacksize nhorzelem = div(nelem, nvertelem) # Definition of a "column" here is a vertical stack of degrees # of freedom. For example, consider a mesh consisting of a single # linear element: # o----------o # |\ d1 d2 |\ # | \ | \ # | \ d3 d4 \ # | o----------o # o--d5---d6-o | # \ | \ | # \ | \ | # \|d7 d8 \| # o----------o # There are 4 total 1-D columns, each containing two # degrees of freedom. In general, a mesh of stacked elements will # have `Nq[1] * Nq[2] * nhorzelem` total 1-D columns. # A single 1-D column has `Nq[3] * nvertelem * num_states` # degrees of freedom. # # nql = length(Nq) # indices: (1...nql, nql + 1 , nql + 2, nql + 3) # for 3d case, this is [ni, nj, nk, num_states, nvertelem, nhorzelem] # here ni, nj, nk are number of Gauss quadrature points in each element in x-y-z directions # Q = reshape(Q, reshaping_tup), leads to the column-wise fashion Q reshaping_tup = (Nq..., num_states, nvertelem, nhorzelem) @inbounds Nqv = Nq[dim] @inbounds Nqh = dim == 2 ? Nq[1] : Nq[1] * Nq[2] if independent_states m = Nqv * nvertelem n = Nqh * nhorzelem * num_states else m = Nqv * nvertelem * num_states n = Nqh * nhorzelem end if max_subspace_size === nothing max_subspace_size = m end # permute [ni, nj, nk, num_states, nvertelem, nhorzelem] # to [nvertelem, nk, num_states, ni, nj, nhorzelem] permute_size = length(reshaping_tup) permute_tuple_f = (dim + 1, dim, dim + 2, (1:(dim - 1))..., permute_size) return BatchedGeneralizedMinimalResidual( Q, m, n; M = max_subspace_size, atol = atol, rtol = rtol, forward_reshape = reshaping_tup, forward_permute = permute_tuple_f, ) end function initialize!( linearoperator!, Q, Qrhs, solver::BatchedGeneralizedMinimalResidual, args...; restart = false, ) g0 = solver.g0 krylov_basis = solver.krylov_basis rtol, atol = solver.rtol, solver.atol batched_krylov_basis = solver.batched_krylov_basis Ndof = solver.dofperbatch forward_reshape = solver.forward_reshape forward_permute = solver.forward_permute resnorms = solver.resnorms initial_resnorms = solver.initial_resnorms max_iter = solver.max_iter # Device and groupsize information device = array_device(Q) groupsize = 256 @assert size(Q) == size(krylov_basis) # PRECONDITIONER: PQ0 -> P*Q0, # the first basis is (J Pinv)PQ0 = b, kry1 = b - J Q0 linearoperator!(krylov_basis, Q, args...) krylov_basis .= Qrhs .- krylov_basis # Convert into a batched Krylov basis vector # REMARK: Ugly hack on the GPU. Can we fix this? tmp_array = similar(batched_krylov_basis, size(batched_krylov_basis)[2:3]) convert_structure!( tmp_array, krylov_basis, forward_reshape, forward_permute, ) batched_krylov_basis[1, :, :] .= tmp_array # Now we initialize across all columns (solver.batch_size). # This function also computes the residual norm in each column event = Event(device) event = batched_initialize!(device, groupsize)( resnorms, g0, batched_krylov_basis, Ndof, max_iter; ndrange = solver.batch_size, dependencies = (event,), ) wait(device, event) # When restarting, we do not want to overwrite the initial threshold, # otherwise we may not get an accurate indication that we have sufficiently # reduced the GMRES residual. if !restart initial_resnorms .= resnorms end residual_norm = maximum(resnorms) initial_residual_norm = maximum(initial_resnorms) converged = check_convergence(residual_norm, initial_residual_norm, atol, rtol) converged, residual_norm end function doiteration!( linearoperator!, preconditioner, Q, Qrhs, solver::BatchedGeneralizedMinimalResidual, threshold, args..., ) FT = eltype(Q) krylov_basis = solver.krylov_basis krylov_basis_prev = solver.krylov_basis_prev Hs = solver.H g0s = solver.g0 Ωs = solver.Ω sols = solver.sol batched_krylov_basis = solver.batched_krylov_basis Ndof = solver.dofperbatch rtol, atol = solver.rtol, solver.atol max_iter = solver.max_iter forward_reshape = solver.forward_reshape forward_permute = solver.forward_permute backward_reshape = solver.backward_reshape backward_permute = solver.backward_permute resnorms = solver.resnorms initial_resnorms = solver.initial_resnorms initial_residual_norm = maximum(initial_resnorms) # Device and groupsize information device = array_device(Q) groupsize = 256 # Main batched-GMRES iteration cycle converged = false residual_norm = typemax(FT) j = 1 for outer j in 1:max_iter # FIXME: Remove this back-and-forth reshaping by exploiting the # data layout in a similar way that the ColumnwiseLU solver does convert_structure!( krylov_basis_prev, view(batched_krylov_basis, j, :, :), backward_reshape, backward_permute, ) # PRECONDITIONER: batched_krylov_basis[j+1] = J P^{-1}batched_krylov_basis[j] # set krylov_basis_prev = P^{-1}batched_krylov_basis[j] preconditioner_solve!(preconditioner, krylov_basis_prev) # Global operator application to get new Krylov basis vector linearoperator!(krylov_basis, krylov_basis_prev, args...) # Now that we have a global Krylov vector, we reshape and batch # the Arnoldi iterations across all columns convert_structure!( view(batched_krylov_basis, j + 1, :, :), krylov_basis, forward_reshape, forward_permute, ) event = Event(device) event = batched_arnoldi_process!(device, groupsize)( resnorms, g0s, Hs, Ωs, batched_krylov_basis, j, Ndof; ndrange = solver.batch_size, dependencies = (event,), ) wait(device, event) # Current stopping criteria is based on the maximal column norm # TODO: Once we are able to batch the operator application, we # should revisit the termination criteria. residual_norm = maximum(resnorms) converged = check_convergence(residual_norm, initial_residual_norm, atol, rtol) if converged break end end # Reshape the solution vector to construct the new GMRES iterate # PRECONDITIONER Q = Q0 + Pinv PΔQ = Q0 + Pinv (Kry * y) # sol = PΔQ = Kry * y sols .= 0 # Solve the triangular system (minimization problem for optimal linear coefficients # in the GMRES iterate) and construct the current iterate in each column event = Event(device) event = construct_batched_gmres_iterate!(device, groupsize)( batched_krylov_basis, Hs, g0s, sols, j, Ndof; ndrange = solver.batch_size, dependencies = (event,), ) wait(device, event) # Use krylov_basis_prev as container for ΔQ ΔQ = krylov_basis_prev # Unwind reshaping and return solution in standard format convert_structure!(ΔQ, sols, backward_reshape, backward_permute) # PRECONDITIONER: Q -> Pinv Q preconditioner_solve!(preconditioner, ΔQ) Q .+= ΔQ # if not converged, then restart converged || initialize!(linearoperator!, Q, Qrhs, solver, args...; restart = true) (converged, j, residual_norm) end @kernel function batched_initialize!( resnorms, g0, batched_krylov_basis, Ndof, M, ) cidx = @index(Global) FT = eltype(batched_krylov_basis) # Initialize entire RHS storage @inbounds for j in 1:(M + 1) g0[cidx, j] = FT(0.0) end # Now we compute the first element of g0[cidx, :], # which is determined by the column norm of the initial residual ∥r0∥_2: # g0 = ∥r0∥_2 e1 @inbounds for j in 1:Ndof g0[cidx, 1] += batched_krylov_basis[1, j, cidx] * batched_krylov_basis[1, j, cidx] end @inbounds g0[cidx, 1] = sqrt(g0[cidx, 1]) # Normalize the batched_krylov_basis by the (local) residual norm @inbounds for j in 1:Ndof batched_krylov_basis[1, j, cidx] /= g0[cidx, 1] end # Record initialize residual norm in the column @inbounds resnorms[cidx] = g0[cidx, 1] nothing end @kernel function batched_arnoldi_process!( resnorms, g0, H, Ω, batched_krylov_basis, j, Ndof, ) cidx = @index(Global) FT = eltype(batched_krylov_basis) # Arnoldi process in the local column `cidx` @inbounds for i in 1:j H[cidx, i, j] = FT(0.0) # Modified Gram-Schmidt procedure to generate the Hessenberg matrix for k in 1:Ndof H[cidx, i, j] += batched_krylov_basis[j + 1, k, cidx] * batched_krylov_basis[i, k, cidx] end # Orthogonalize new Krylov vector against previous one for k in 1:Ndof batched_krylov_basis[j + 1, k, cidx] -= H[cidx, i, j] * batched_krylov_basis[i, k, cidx] end end # And finally, normalize latest Krylov basis vector local_norm = FT(0.0) @inbounds for i in 1:Ndof local_norm += batched_krylov_basis[j + 1, i, cidx] * batched_krylov_basis[j + 1, i, cidx] end @inbounds H[cidx, j + 1, j] = sqrt(local_norm) @inbounds for i in 1:Ndof batched_krylov_basis[j + 1, i, cidx] /= H[cidx, j + 1, j] end # Loop over previously computed Krylov basis vectors # and apply the Givens rotations @inbounds for i in 1:(j - 1) cos_tmp = Ω[cidx, 2 * i - 1] sin_tmp = Ω[cidx, 2 * i] # Apply the Givens rotations # | cos -sin | | hi | # | sin cos | | hi+1 | tmp1 = cos_tmp * H[cidx, i, j] + sin_tmp * H[cidx, i + 1, j] H[cidx, i + 1, j] = -sin_tmp * H[cidx, i, j] + cos_tmp * H[cidx, i + 1, j] H[cidx, i, j] = tmp1 end # Eliminate the last element hj+1 and update the rotation matrix # | cos -sin | | hj | = | hj'| # | sin cos | | hj+1 | = | 0 | # where cos, sin = hj+1/sqrt(hj^2 + hj+1^2), hj/sqrt(hj^2 + hj+1^2), # and update for next iteration @inbounds begin Ω[cidx, 2 * j - 1] = H[cidx, j, j] Ω[cidx, 2 * j] = H[cidx, j + 1, j] H[cidx, j, j] = sqrt(Ω[cidx, 2 * j - 1]^2 + Ω[cidx, 2 * j]^2) H[cidx, j + 1, j] = FT(0.0) Ω[cidx, 2 * j - 1] /= H[cidx, j, j] Ω[cidx, 2 * j] /= H[cidx, j, j] # And now to the rhs g0 cos_tmp = Ω[cidx, 2 * j - 1] sin_tmp = Ω[cidx, 2 * j] tmp1 = cos_tmp * g0[cidx, j] + sin_tmp * g0[cidx, j + 1] g0[cidx, j + 1] = -sin_tmp * g0[cidx, j] + cos_tmp * g0[cidx, j + 1] g0[cidx, j] = tmp1 # Record estimate for the gmres residual resnorms[cidx] = abs(g0[cidx, j + 1]) end nothing end @kernel function construct_batched_gmres_iterate!( batched_krylov_basis, Hs, g0s, sols, j, Ndof, ) # Solve for the GMRES coefficients (yⱼ) at the `j`-th # iteration that minimizes ∥ b - A xⱼ ∥_2, where # xⱼ = ∑ᵢ yᵢ Ψᵢ, with Ψᵢ denoting the Krylov basis vectors cidx = @index(Global) # Do the upper-triangular backsolve @inbounds for i in j:-1:1 g0s[cidx, i] /= Hs[cidx, i, i] for k in 1:(i - 1) g0s[cidx, k] -= Hs[cidx, k, i] * g0s[cidx, i] end end # Having determined yᵢ, we now construct the GMRES solution # in each column: xⱼ = ∑ᵢ yᵢ Ψᵢ @inbounds for i in 1:j for k in 1:Ndof sols[k, cidx] += g0s[cidx, i] * batched_krylov_basis[i, k, cidx] end end nothing end """ convert_structure!( x, y, reshape_tuple, permute_tuple, ) Computes a tensor transpose and stores result in x # Arguments - `x`: (array) [OVERWRITTEN]. target destination for storing the y data - `y`: (array). data that we want to copy - `reshape_tuple`: (tuple) reshapes y to be like that of x, up to a permutation - `permute_tuple`: (tuple) permutes the reshaped array into the correct structure """ @inline function convert_structure!(x, y, reshape_tuple, permute_tuple) alias_y = reshape(y, reshape_tuple) permute_y = permutedims(alias_y, permute_tuple) copyto!(x, reshape(permute_y, size(x))) nothing end @inline convert_structure!(x, y::MPIStateArray, reshape_tuple, permute_tuple) = convert_structure!(x, y.realdata, reshape_tuple, permute_tuple) @inline convert_structure!(x::MPIStateArray, y, reshape_tuple, permute_tuple) = convert_structure!(x.realdata, y, reshape_tuple, permute_tuple) function check_convergence(residual_norm, initial_residual_norm, atol, rtol) relative_residual = residual_norm / initial_residual_norm converged = false if (residual_norm ≤ atol || relative_residual ≤ rtol) converged = true end return converged end ================================================ FILE: src/Numerics/SystemSolvers/columnwise_lu_solver.jl ================================================ #### Columnwise LU Solver export ManyColumnLU, SingleColumnLU abstract type AbstractColumnLUSolver <: AbstractSystemSolver end """ ManyColumnLU() This solver is used for systems that are block diagonal where each block is associated with a column of the mesh. The systems are solved using a non-pivoted LU factorization. """ struct ManyColumnLU <: AbstractColumnLUSolver end """ SingleColumnLU() This solver is used for systems that are block diagonal where each block is associated with a column of the mesh. Moreover, each block is assumed to be the same. The systems are solved using a non-pivoted LU factorization. """ struct SingleColumnLU <: AbstractColumnLUSolver end struct ColumnwiseLU{AT} A::AT end struct DGColumnBandedMatrix{D, P, NS, EH, EV, EB, SC, AT} data::AT end DGColumnBandedMatrix( A::DGColumnBandedMatrix{D, P, NS, EH, EV, EB, SC}, data, ) where {D, P, NS, EH, EV, EB, SC} = DGColumnBandedMatrix{D, P, NS, EH, EV, EB, SC, typeof(data)}(data) Base.eltype(A::DGColumnBandedMatrix) = eltype(A.data) Base.size(A::DGColumnBandedMatrix) = size(A.data) dimensionality(::DGColumnBandedMatrix{D}) where {D} = D polynomialorders(::DGColumnBandedMatrix{D, P}) where {D, P} = P # polynomialorders is polynomial orders P, which is a tuple, # vertical_polynomialorder is the vertical polynomial order vertical_polynomialorder(::DGColumnBandedMatrix{D, P}) where {D, P} = P[end] num_state(::DGColumnBandedMatrix{D, P, NS}) where {D, P, NS} = NS num_horz_elem(::DGColumnBandedMatrix{D, P, NS, EH}) where {D, P, NS, EH} = EH num_vert_elem( ::DGColumnBandedMatrix{D, P, NS, EH, EV}, ) where {D, P, NS, EH, EV} = EV elem_band( ::DGColumnBandedMatrix{D, P, NS, EH, EV, EB}, ) where {D, P, NS, EH, EV, EB} = EB single_column( ::DGColumnBandedMatrix{D, P, NS, EH, EV, EB, SC}, ) where {D, P, NS, EH, EV, EB, SC} = SC # DG: lower_bandwidth is Nq_v*nstate * eband - 1, (does not include itself) # eband = 1 for inviscid, since nodal point at the face communicate to the overlaping point to its neighbor # and other points only communicate to points in the same element # eband = 2 for visous # # FV: lower_bandwidth is nstate * (stencil_width + 1 + 1) - 1, # since the reconstruction states depend on stencil_width points on each side, # and the flux depends on stencil_width + 1 points on each side # eband = (stencil_width + 2) for inviscid # eband = max{ (stencil_width + 2), 3} for viscous, # since the viscous flux is computed by applying first order FD twice # # The lower_bandwidth ls formulated as Nq_v*nstate * eband - 1 lower_bandwidth(N, nstate, eband) = (N + 1) * nstate * eband - 1 lower_bandwidth(A::DGColumnBandedMatrix) = lower_bandwidth(vertical_polynomialorder(A), num_state(A), elem_band(A)) upper_bandwidth(N, nstate, eband) = lower_bandwidth(N, nstate, eband) upper_bandwidth(A::DGColumnBandedMatrix) = upper_bandwidth(vertical_polynomialorder(A), num_state(A), elem_band(A)) Base.reshape(A::DGColumnBandedMatrix, args...) = DGColumnBandedMatrix(A, reshape(A.data, args...)) Adapt.adapt_structure(to, A::DGColumnBandedMatrix) = DGColumnBandedMatrix(A, adapt(to, A.data)) Base.@propagate_inbounds function Base.getindex(A::DGColumnBandedMatrix, I...) return A.data[I...] end Base.@propagate_inbounds function Base.setindex!( A::DGColumnBandedMatrix, val, I..., ) A.data[I...] = val end function prefactorize(op, solver::AbstractColumnLUSolver, Q, args...) dg = op.f! # TODO: can we get away with just passing the grid? A = banded_matrix( op, dg, similar(Q), similar(Q), args...; single_column = typeof(solver) <: SingleColumnLU, ) band_lu!(A) ColumnwiseLU(A) end function linearsolve!( linop, clu::ColumnwiseLU, ::AbstractColumnLUSolver, Q, Qrhs, args..., ) A = clu.A Q .= Qrhs band_forward!(Q, A) band_back!(Q, A) end """ band_lu!(A) """ function band_lu!(A) device = array_device(A.data) nstate = num_state(A) Nq = polynomialorders(A) .+ 1 @inbounds Nq_h = Nq[1] @inbounds Nqj = dimensionality(A) == 2 ? 1 : Nq[2] nhorzelem = num_horz_elem(A) groupsize = (Nq_h, Nqj) ndrange = (nhorzelem * Nq_h, Nqj) if single_column(A) # single column case # # TODO Would it be faster to copy the matrix to the host and factorize it # there? groupsize = (1, 1) ndrange = groupsize A = reshape(A, 1, 1, size(A)..., 1) end event = Event(device) event = band_lu_kernel!(device, groupsize)( A, ndrange = ndrange, dependencies = (event,), ) wait(device, event) end function band_forward!(Q, A) device = array_device(Q) Nq = polynomialorders(A) .+ 1 @inbounds Nq_h = Nq[1] @inbounds Nqj = dimensionality(A) == 2 ? 1 : Nq[2] nhorzelem = num_horz_elem(A) event = Event(device) event = band_forward_kernel!(device, (Nq_h, Nqj))( Q.data, A, ndrange = (nhorzelem * Nq_h, Nqj), dependencies = (event,), ) wait(device, event) end function band_back!(Q, A) device = array_device(Q) Nq = polynomialorders(A) .+ 1 @inbounds Nq_h = Nq[1] @inbounds Nqj = dimensionality(A) == 2 ? 1 : Nq[2] nhorzelem = num_horz_elem(A) event = Event(device) event = band_back_kernel!(device, (Nq_h, Nqj))( Q.data, A, ndrange = (nhorzelem * Nq_h, Nqj), dependencies = (event,), ) wait(device, event) end """ banded_matrix( dg::SpaceDiscretization, Q::MPIStateArray = MPIStateArray(dg), dQ::MPIStateArray = MPIStateArray(dg); single_column = false, ) Forms the banded matrices for each the column operator defined by the `SpaceDiscretization` dg. If `single_column=false` then a banded matrix is stored for each column and if `single_column=true` only the banded matrix associated with the first column of the first element is stored. The bandwidth of the DG column banded matrix is `p = q = (vertical_polynomial + 1) * nstate * eband - 1` with `p` and `q` being the upper and lower bandwidths. The banded matrices are stored in the LAPACK band storage format . The banded matrices are returned as an arrays where the array type matches that of `Q`. If `single_column=false` then the returned array has 5 dimensions, which are: - first horizontal column index - second horizontal column index - band index (-q:p) - vertical DOF index with state `s`, vertical DOF index `k`, and vertical element `ev` mapping to `s + nstate * (k - 1) + nstate * nvertelem * (ev - 1)` - horizontal element index If the `single_column=true` then the returned array has 2 dimensions which are the band index and the vertical DOF index. """ function banded_matrix( dg::SpaceDiscretization, Q::MPIStateArray = MPIStateArray(dg), dQ::MPIStateArray = MPIStateArray(dg); single_column = false, ) banded_matrix( (dQ, Q) -> dg(dQ, Q, nothing, 0; increment = false), dg, Q, dQ; single_column = single_column, ) end """ banded_matrix( f!, dg::SpaceDiscretization, Q::MPIStateArray = MPIStateArray(dg), dQ::MPIStateArray = MPIStateArray(dg), args...; single_column = false, ) Forms the banded matrices for each the column operator defined by the linear operator `f!` which is assumed to have the same banded structure as the `SpaceDiscretization` dg. If `single_column=false` then a banded matrix is stored for each column and if `single_column=true` only the banded matrix associated with the first column of the first element is stored. The bandwidth of the DG column banded matrix is `p = q = (vertical_polynomial + 1) * nstate * eband - 1` with `p` and `q` being the upper and lower bandwidths. The banded matrices are stored in the LAPACK band storage format . The banded matrices are returned as an arrays where the array type matches that of `Q`. If `single_column=false` then the returned array has 5 dimensions, which are: - first horizontal column index - second horizontal column index - band index (-q:p) - vertical DOF index with state `s`, vertical DOF index `k`, and vertical element `ev` mapping to `s + nstate * (k - 1) + nstate * nvertelem * (ev - 1)` - horizontal element index If the `single_column=true` then the returned array has 2 dimensions which are the band index and the vertical DOF index. Here `args` are passed to `f!`. """ function banded_matrix( f!, dg::SpaceDiscretization, Q::MPIStateArray = MPIStateArray(dg), dQ::MPIStateArray = MPIStateArray(dg), args...; single_column = false, ) # Initialize banded matrix data structure A = empty_banded_matrix(dg, Q; single_column = single_column) # Populate matrix with data update_banded_matrix!( A, f!, dg, Q, dQ, args...; single_column = single_column, ) A end """ empty_banded_matrix( dg::SpaceDiscretization, Q::MPIStateArray; single_column = false, ) Initializes an empty banded matrix stored in the LAPACK band storage format . """ function empty_banded_matrix( dg::SpaceDiscretization, Q::MPIStateArray; single_column = false, ) bl = dg.balance_law grid = dg.grid topology = grid.topology @assert isstacked(topology) @assert typeof(dg.direction) <: VerticalDirection FT = eltype(Q.data) device = array_device(Q) nstate = number_states(bl, Prognostic()) N = polynomialorders(grid) dim = dimensionality(grid) Nq = N .+ 1 @inbounds begin Nq_h = Nq[1] Nqj = dim == 2 ? 1 : Nq[2] Nq_v = Nq[dim] end eband = (typeof(dg) <: DGModel) ? (number_states(bl, GradientFlux()) == 0 ? 1 : 2) : ( number_states(bl, GradientFlux()) == 0 ? width(dg.fv_reconstruction) + 2 : max(width(dg.fv_reconstruction) + 2, 3) ) # else: DGFVModel p = lower_bandwidth(N[dim], nstate, eband) q = upper_bandwidth(N[dim], nstate, eband) nrealelem = length(topology.realelems) nvertelem = topology.stacksize nhorzelem = div(nrealelem, nvertelem) # first horizontal DOF index # second horizontal DOF index # band index -q:p # vertical DOF index # horizontal element index A = if single_column similar(Q.data, p + q + 1, Nq_v * nstate * nvertelem) else similar(Q.data, Nq_h, Nqj, p + q + 1, Nq_v * nstate * nvertelem, nhorzelem) end fill!(A, zero(FT)) A = DGColumnBandedMatrix{ dim, N, nstate, nhorzelem, nvertelem, eband, single_column, typeof(A), }( A, ) A end """ update_banded_matrix!( A::DGColumnBandedMatrix, f!, dg::SpaceDiscretization, Q::MPIStateArray = MPIStateArray(dg), dQ::MPIStateArray = MPIStateArray(dg), args...; single_column = false, ) Updates the banded matrices for each the column operator defined by the linear operator `f!` which is assumed to have the same banded structure as the `SpaceDiscretization` dg. If `single_column=false` then a banded matrix is stored for each column and if `single_column=true` only the banded matrix associated with the first column of the first element is stored. The bandwidth of the DG column banded matrix is `p = q = (vertical_polynomial + 1) * nstate * eband - 1` with `p` and `q` being the upper and lower bandwidths. Here `args` are passed to `f!`. """ function update_banded_matrix!( A::DGColumnBandedMatrix, f!, dg::SpaceDiscretization, Q::MPIStateArray = MPIStateArray(dg), dQ::MPIStateArray = MPIStateArray(dg), args...; single_column = false, ) bl = dg.balance_law grid = dg.grid topology = grid.topology @assert isstacked(topology) @assert typeof(dg.direction) <: VerticalDirection FT = eltype(Q.data) device = array_device(Q) nstate = number_states(bl, Prognostic()) N = polynomialorders(grid) dim = dimensionality(grid) Nq = N .+ 1 @inbounds begin Nq_h = Nq[1] Nqj = dim == 2 ? 1 : Nq[2] Nq_v = Nq[dim] end eband = elem_band(A) nrealelem = length(topology.realelems) nvertelem = topology.stacksize nhorzelem = div(nrealelem, nvertelem) # loop through all DOFs in a column and compute the matrix column # loop only the first min(nvertelem, 2eband+1) elements # in each element loop, updating these columns correspond # to elements (ev :2eband+1 : nvertelem) for ev in 1:min(nvertelem, 2eband + 1) for s in 1:nstate for k in 1:Nq_v # Set a single 1 per column and rest 0 event = Event(device) event = kernel_set_banded_data!(device, (Nq_h, Nqj, Nq_v))( Q.data, A, k, s, ev, 1:nhorzelem, 1:nvertelem; ndrange = (nvertelem * Nq_h, nhorzelem * Nqj, Nq_v), dependencies = (event,), ) wait(device, event) # Get the matrix column f!(dQ, Q, args...) # Store the banded matrix event = Event(device) event = kernel_set_banded_matrix!(device, (Nq_h, Nqj, Nq_v))( A, dQ.data, k, s, ev, 1:nhorzelem, (-eband):eband; ndrange = ((2eband + 1) * Nq_h, nhorzelem * Nqj, Nq_v), dependencies = (event,), ) wait(device, event) end end end end """ banded_matrix_vector_product!( A, dQ::MPIStateArray, Q::MPIStateArray ) Compute a matrix vector product `dQ = A * Q` where `A` is assumed to be a matrix created using the `banded_matrix` function. This function is primarily for testing purposes. """ function banded_matrix_vector_product!(A, dQ::MPIStateArray, Q::MPIStateArray) device = array_device(Q) Nq = polynomialorders(A) .+ 1 @inbounds begin Nq_h = Nq[1] Nqj = dimensionality(A) == 2 ? 1 : Nq[2] Nq_v = Nq[end] end nvertelem = num_vert_elem(A) nhorzelem = num_horz_elem(A) event = Event(device) event = kernel_banded_matrix_vector_product!(device, (Nq_h, Nqj, Nq_v))( dQ.data, A, Q.data, 1:nhorzelem, 1:nvertelem; ndrange = (nvertelem * Nq_h, nhorzelem * Nqj, Nq_v), dependencies = (event,), ) wait(device, event) end using StaticArrays using KernelAbstractions.Extras: @unroll @doc """ band_lu_kernel!(A) This performs Band Gaussian Elimination (Algorithm 4.3.1 of Golub and Van Loan). The array `A` contains a band matrix for each vertical column. For example, `A[i, j, :, :, h]`, is the band matrix associated with the `(i, j)`th degree of freedom in the horizontal element `h`. Each `n` by `n` band matrix is assumed to have upper bandwidth `q` and lower bandwidth `p` where `n = nstate * Nq * nvertelem` and `p = q = (vertical_polynomial + 1) * nstate * eband - 1` Each band matrix is stored in the [LAPACK band storage](https://www.netlib.org/lapack/lug/node124.html). For example the band matrix B = [b₁₁ b₁₂ 0 0 0 b₂₁ b₂₂ b₂₃ 0 0 b₃₁ b₃₂ b₃₃ b₃₄ 0 0 b₄₂ b₄₃ b₄₄ b₄₅ 0 0 b₅₃ b₅₄ b₅₅] is stored as B = [0 b₁₂ b₂₃ b₃₄ b₄₅ b₁₁ b₂₂ b₃₃ b₄₄ b₅₅ b₂₁ b₃₂ b₄₃ b₅₄ 0 b₃₁ b₄₂ b₅₃ 0 0] ### Reference - [GolubVanLoan2013](@cite) """ band_lu_kernel! @kernel function band_lu_kernel!(A) @uniform begin Nq = polynomialorders(A) .+ 1 @inbounds Nq_h = Nq[1] @inbounds Nq_v = Nq[end] nstate = num_state(A) nvertelem = num_vert_elem(A) n = nstate * Nq_v * nvertelem p, q = lower_bandwidth(A), upper_bandwidth(A) end h = @index(Group, Linear) i, j = @index(Local, NTuple) @inbounds begin for v in 1:nvertelem for k in 1:Nq_v for s in 1:nstate kk = s + (k - 1) * nstate + (v - 1) * nstate * Nq_v Aq = A[i, j, q + 1, kk, h] for ii in 1:p A[i, j, q + ii + 1, kk, h] /= Aq end for jj in 1:q if jj + kk ≤ n Ajj = A[i, j, q - jj + 1, jj + kk, h] for ii in 1:p A[i, j, q + ii - jj + 1, jj + kk, h] -= A[i, j, q + ii + 1, kk, h] * Ajj end end end end end end end end @doc """ band_forward_kernel!(b, LU) This performs Band Forward Substitution (Algorithm 4.3.2 of Golub and Van Loan), i.e., the right-hand side `b` is replaced with the solution of `L*x=b`. The array `b` is of the size `(Nq * Nqj * Nq, nstate, nvertelem * nhorzelem)`. The LU-factorization array `LU` contains a single band matrix or one for each vertical column, see [`band_lu!`](@ref). Each `n` by `n` band matrix is assumed to have upper bandwidth `q` and lower bandwidth `p` where `n = nstate * Nq * nvertelem` and `p = q = (vertical_polynomial + 1) * nstate * eband - 1` ### Reference - [GolubVanLoan2013](@cite) """ band_forward_kernel! @kernel function band_forward_kernel!(b, LU) @uniform begin FT = eltype(b) nstate = num_state(LU) Nq = polynomialorders(LU) .+ 1 @inbounds begin Nq_h = Nq[1] Nqj = dimensionality(LU) == 2 ? 1 : Nq[2] Nq_v = Nq[end] end nvertelem = num_vert_elem(LU) n = nstate * Nq_v * nvertelem eband = elem_band(LU) p, q = lower_bandwidth(LU), upper_bandwidth(LU) l_b = MArray{Tuple{p + 1}, FT}(undef) end h = @index(Group, Linear) i, j = @index(Local, NTuple) @inbounds begin @unroll for v in 1:eband @unroll for k in 1:Nq_v @unroll for s in 1:nstate ijk = i + Nqj * (j - 1) + Nq_h * Nqj * (k - 1) ee = v + nvertelem * (h - 1) ii = s + (k - 1) * nstate + (v - 1) * nstate * Nq_v l_b[ii] = nvertelem ≥ v ? b[ijk, s, ee] : zero(FT) end end end for v in 1:nvertelem @unroll for k in 1:Nq_v @unroll for s in 1:nstate jj = s + (k - 1) * nstate + (v - 1) * nstate * Nq_v @unroll for ii in 2:(p + 1) Lii = single_column(LU) ? LU[ii + q, jj] : LU[i, j, ii + q, jj, h] l_b[ii] -= Lii * l_b[1] end ijk = i + Nqj * (j - 1) + Nq_h * Nqj * (k - 1) ee = v + nvertelem * (h - 1) b[ijk, s, ee] = l_b[1] @unroll for ii in 1:p l_b[ii] = l_b[ii + 1] end if jj + p < n (idx, si) = fldmod1(jj + p + 1, nstate) (vi, ki) = fldmod1(idx, Nq_v) ijk = i + Nqj * (j - 1) + Nq_h * Nqj * (ki - 1) ee = vi + nvertelem * (h - 1) l_b[p + 1] = b[ijk, si, ee] end end end end end end @doc """ band_back_kernel!(b, LU) This performs Band Back Substitution (Algorithm 4.3.3 of Golub and Van Loan), i.e., the right-hand side `b` is replaced with the solution of `U*x=b`. The array `b` is of the size `(Nq * Nqj * Nq, nstate, nvertelem * nhorzelem)`. The LU-factorization array `LU` contains a single band matrix or one for each vertical column, see [`band_lu!`](@ref). Each `n` by `n` band matrix is assumed to have upper bandwidth `q` and lower bandwidth `p` where `n = nstate * Nq * nvertelem` and `p = q = (vertical_polynomial + 1) * nstate * eband - 1` ### Reference - [GolubVanLoan2013](@cite) """ band_back_kernel! @kernel function band_back_kernel!(b, LU) @uniform begin FT = eltype(b) nstate = num_state(LU) Nq = polynomialorders(LU) .+ 1 @inbounds begin Nq_h = Nq[1] Nqj = dimensionality(LU) == 2 ? 1 : Nq[2] Nq_v = Nq[end] end nvertelem = num_vert_elem(LU) n = nstate * Nq_h * nvertelem q = upper_bandwidth(LU) eband = elem_band(LU) l_b = MArray{Tuple{q + 1}, FT}(undef) end h = @index(Group, Linear) i, j = @index(Local, NTuple) @inbounds begin @unroll for v in nvertelem:-1:(nvertelem - eband + 1) @unroll for k in Nq_v:-1:1 @unroll for s in nstate:-1:1 vi = eband - nvertelem + v ii = s + (k - 1) * nstate + (vi - 1) * nstate * Nq_v ijk = i + Nqj * (j - 1) + Nq_h * Nqj * (k - 1) ee = v + nvertelem * (h - 1) l_b[ii] = b[ijk, s, ee] end end end for v in nvertelem:-1:1 @unroll for k in Nq_v:-1:1 @unroll for s in nstate:-1:1 jj = s + (k - 1) * nstate + (v - 1) * nstate * Nq_v l_b[q + 1] /= single_column(LU) ? LU[q + 1, jj] : LU[i, j, q + 1, jj, h] @unroll for ii in 1:q Uii = single_column(LU) ? LU[ii, jj] : LU[i, j, ii, jj, h] l_b[ii] -= Uii * l_b[q + 1] end ijk = i + Nqj * (j - 1) + Nq_h * Nqj * (k - 1) ee = v + nvertelem * (h - 1) b[ijk, s, ee] = l_b[q + 1] @unroll for ii in q:-1:1 l_b[ii + 1] = l_b[ii] end if jj - q > 1 (idx, si) = fldmod1(jj - q - 1, nstate) (vi, ki) = fldmod1(idx, Nq_v) ijk = i + Nqj * (j - 1) + Nq_h * Nqj * (ki - 1) ee = vi + nvertelem * (h - 1) l_b[1] = b[ijk, si, ee] end end end end end end ### TODO: Document this @kernel function kernel_set_banded_data!( Q, A::DGColumnBandedMatrix, kin, sin, evin0, helems, velems, ) @uniform begin FT = eltype(Q) nstate = num_state(A) Nq = polynomialorders(A) .+ 1 @inbounds begin Nq_h = Nq[1] Nq_v = Nq[end] Nqj = dimensionality(A) == 2 ? 1 : Nq[2] end nvertelem = num_vert_elem(A) eband = elem_band(A) end ev, eh = @index(Group, NTuple) i, j, k = @index(Local, NTuple) @inbounds begin e = ev + (eh - 1) * nvertelem ijk = i + Nqj * (j - 1) + Nq_h * Nqj * (k - 1) @unroll for s in 1:nstate if k == kin && s == sin && ((ev - evin0) % (2eband + 1) == 0) Q[ijk, s, e] = 1 else Q[ijk, s, e] = 0 end end end end @kernel function kernel_set_banded_matrix!( A, dQ, kin, sin, evin0, helems, vpelems, ) @uniform begin FT = eltype(A) nstate = num_state(A) Nq = polynomialorders(A) .+ 1 @inbounds begin Nq_h = Nq[1] Nqj = dimensionality(A) == 2 ? 1 : Nq[2] Nq_v = Nq[end] end nvertelem = num_vert_elem(A) p = lower_bandwidth(A) q = upper_bandwidth(A) eband = elem_band(A) eshift = elem_band(A) + 1 end ep, eh = @index(Group, NTuple) ep = ep - eshift i, j, k = @index(Local, NTuple) for evin in evin0:(2eband + 1):nvertelem # sin, kin, evin are the state, vertical dof, and vert element we are # handling # column index of matrix jj = sin + (kin - 1) * nstate + (evin - 1) * nstate * Nq_v # one thread is launch for dof that might contribute to column jj's band @inbounds begin # ep is the shift we need to add to evin to get the element we need to # consider ev = ep + evin if 1 ≤ ev ≤ nvertelem e = ev + (eh - 1) * nvertelem ijk = i + Nqj * (j - 1) + Nq_h * Nqj * (k - 1) @unroll for s in 1:nstate # row index of matrix ii = s + (k - 1) * nstate + (ev - 1) * nstate * Nq_v # row band index bb = ii - jj # make sure we're in the bandwidth if -q ≤ bb ≤ p if !single_column(A) A[i, j, bb + q + 1, jj, eh] = dQ[ijk, s, e] else if (i, j, eh) == (1, 1, 1) A[bb + q + 1, jj] = dQ[ijk, s, e] end end end end end end end end @kernel function kernel_banded_matrix_vector_product!(dQ, A, Q, helems, velems) @uniform begin FT = eltype(A) nstate = num_state(A) Nq = polynomialorders(A) .+ 1 @inbounds begin Nq_h = Nq[1] Nq_v = Nq[end] Nqj = dimensionality(A) == 2 ? 1 : Nq[2] end eband = elem_band(A) nvertelem = num_vert_elem(A) p = lower_bandwidth(A) q = upper_bandwidth(A) elo = eband - 1 eup = eband - 1 end ev, eh = @index(Group, NTuple) i, j, k = @index(Local, NTuple) # matrix row loops @inbounds begin e = ev + nvertelem * (eh - 1) @unroll for s in 1:nstate Ax = -zero(FT) ii = s + (k - 1) * nstate + (ev - 1) * nstate * Nq_v # banded matrix column loops @unroll for evv in max(1, ev - elo):min(nvertelem, ev + eup) ee = evv + nvertelem * (eh - 1) @unroll for kk in 1:Nq_v ijk = i + Nqj * (j - 1) + Nq_h * Nqj * (kk - 1) @unroll for ss in 1:nstate jj = ss + (kk - 1) * nstate + (evv - 1) * nstate * Nq_v bb = ii - jj if -q ≤ bb ≤ p if !single_column(A) Ax += A[i, j, bb + q + 1, jj, eh] * Q[ijk, ss, ee] else Ax += A[bb + q + 1, jj] * Q[ijk, ss, ee] end end end end end ijk = i + Nqj * (j - 1) + Nq_h * Nqj * (k - 1) dQ[ijk, s, e] = Ax end end end ================================================ FILE: src/Numerics/SystemSolvers/conjugate_gradient_solver.jl ================================================ #### Conjugate Gradient solver export ConjugateGradient struct ConjugateGradient{AT1, AT2, FT, RD, RT, IT} <: AbstractIterativeSystemSolver # tolerances (2) rtol::FT atol::FT # arrays of size reshape_tuple (7) r0::AT1 z0::AT1 p0::AT1 r1::AT1 z1::AT1 p1::AT1 Lp::AT1 # arrays of size(MPIStateArray) which are aliased to two of the previous dimensions (2) alias_p0::AT2 alias_Lp::AT2 # reduction dimension (1) dims::RD # reshape dimension (1) reshape_tuple::RT # maximum number of iterations (1) max_iter::IT end # Define the outer constructor for the ConjugateGradient struct """ ConjugateGradient( Q::AT; rtol = eps(eltype(Q)), atol = eps(eltype(Q)), max_iter = length(Q), dims = :, reshape_tuple = size(Q), ) where {AT} # ConjugateGradient # Description - Outer constructor for the ConjugateGradient struct # Arguments - `Q`:(array). The kind of object that linearoperator! acts on. # Keyword Arguments - `rtol`: (float). relative tolerance - `atol`: (float). absolute tolerance - `dims`: (tuple or : ). the dimensions to reduce over - `reshape_tuple`: (tuple). the dimensions that the conjugate gradient solver operators over # Comment - The reshape tuple is necessary in case the linearoperator! is defined over vectors of a different size as compared to what plays nicely with the dimension reduction in the ConjugateGradient. It also allows the user to define preconditioners over arrays that are more convenienently shaped. # Return - ConjugateGradient struct """ function ConjugateGradient( Q::AT; rtol = eps(eltype(Q)), atol = eps(eltype(Q)), max_iter = length(Q), dims = :, reshape_tuple = size(Q), ) where {AT} # allocate arrays (5) r0 = reshape(similar(Q), reshape_tuple) z0 = reshape(similar(Q), reshape_tuple) r1 = reshape(similar(Q), reshape_tuple) z1 = reshape(similar(Q), reshape_tuple) p1 = reshape(similar(Q), reshape_tuple) # allocate array of different shape (2) alias_p0 = similar(Q) alias_Lp = similar(Q) # allocate create aliased arrays (2) p0 = reshape(alias_p0, reshape_tuple) Lp = reshape(alias_Lp, reshape_tuple) container = [ rtol, atol, r0, z0, p0, r1, z1, p1, Lp, alias_p0, alias_Lp, dims, reshape_tuple, max_iter, ] # create struct instance by splatting the container into the default constructor return ConjugateGradient{ typeof(z0), typeof(Q), eltype(Q), typeof(dims), typeof(reshape_tuple), typeof(max_iter), }(container...) end # Define the outer constructor for the ConjugateGradient struct """ ConjugateGradient( Q::MPIStateArray; rtol = eps(eltype(Q)), atol = eps(eltype(Q)), max_iter = length(Q), dims = :, reshape_tuple = size(Q), ) # Description Outer constructor for the ConjugateGradient struct with MPIStateArrays. THIS IS A HACK DUE TO RESHAPE FUNCTIONALITY ON MPISTATEARRAYS. # Arguments - `Q`:(array). The kind of object that linearoperator! acts on. # Keyword Arguments - `rtol`: (float). relative tolerance - `atol`: (float). absolute tolerance - `dims`: (tuple or : ). the dimensions to reduce over - `reshape_tuple`: (tuple). the dimensions that the conjugate gradient solver operators over # Comment - The reshape tuple is necessary in case the linearoperator! is defined over vectors of a different size as compared to what plays nicely with the dimension reduction in the ConjugateGradient. It also allows the user to define preconditioners over arrays that are more convenienently shaped. # Return - ConjugateGradient struct """ function ConjugateGradient( Q::MPIStateArray; rtol = eps(eltype(Q)), atol = eps(eltype(Q)), max_iter = length(Q), dims = :, reshape_tuple = size(Q), ) # create empty container for pushing struct objects # allocate arrays (5) r0 = reshape(similar(Q.data), reshape_tuple) z0 = reshape(similar(Q.data), reshape_tuple) r1 = reshape(similar(Q.data), reshape_tuple) z1 = reshape(similar(Q.data), reshape_tuple) p1 = reshape(similar(Q.data), reshape_tuple) # allocate array of different shape (2) alias_p0 = similar(Q) alias_Lp = similar(Q) # allocate create aliased arrays (2) p0 = reshape(alias_p0.data, reshape_tuple) Lp = reshape(alias_Lp.data, reshape_tuple) container = [ rtol, atol, r0, z0, p0, r1, z1, p1, Lp, alias_p0, alias_Lp, dims, reshape_tuple, max_iter, ] # create struct instance by splatting the container into the default constructor return ConjugateGradient{ typeof(z0), typeof(Q), eltype(Q), typeof(dims), typeof(reshape_tuple), typeof(max_iter), }(container...) end """ initialize!( linearoperator!, Q, Qrhs, solver::ConjugateGradient, args..., ) # Description - This function initializes the iterative solver. It is called as part of the AbstractIterativeSystemSolver routine. SEE CODEREF for documentation on AbstractIterativeSystemSolver # Arguments - `linearoperator!`: (function). This applies the predefined linear operator on an array. Applies a linear operator to object "y" and overwrites object "z". The function argument i s linearoperator!(z,y, args...) and it returns nothing. - `Q`: (array). This is an object that linearoperator! outputs - `Qrhs`: (array). This is an object that linearoperator! acts on - `solver`: (struct). This is a scruct for dispatch, in this case for ColumnwisePreconditionedConjugateGradient - `args...`: (arbitrary). This is optional arguments that can be passed into linearoperator! function. # Keyword Arguments - There are no keyword arguments # Return - `converged`: (bool). A boolean to say whether or not the iterative solver has converged. - `threshold`: (float). The value of the residual for the first timestep # Comment - This function does nothing for conjugate gradient """ function initialize!( linearoperator!, Q, Qrhs, solver::ConjugateGradient, args..., ) return false, Inf end """ doiteration!( linearoperator!, preconditioner, Q, Qrhs, solver::ConjugateGradient, threshold, args...; applyPC! = (x, y) -> x .= y, ) # Description - This function enacts the iterative solver. It is called as part of the AbstractIterativeSystemSolver routine. SEE CODEREF for documentation on AbstractIterativeSystemSolver # Arguments - `linearoperator!`: (function). This applies the predefined linear operator on an array. Applies a linear operator to object "y" and overwrites object "z". It is a function with arguments linearoperator!(z,y, args...), where "z" gets overwritten by "y" and "args..." are additional arguments passed to the linear operator. The linear operator is assumed to return nothing. - `Q`: (array). This is an object that linearoperator! overwrites - `Qrhs`: (array). This is an object that linearoperator! acts on. This is the rhs to the linear system - `solver`: (struct). This is a scruct for dispatch, in this case for ConjugateGradient - `threshold`: (float). Either an absolute or relative tolerance - `applyPC!`: (function). Applies a preconditioner to objecy "y" and overwrites object "z". applyPC!(z,y) - `args...`: (arbitrary). This is necessary for the linearoperator! function which has a signature linearoperator!(b, x, args....) # Keyword Arguments - There are no keyword arguments # Return - `converged`: (bool). A boolean to say whether or not the iterative solver has converged. - `iteration`: (int). Iteration number for the iterative solver - `threshold`: (float). The value of the residual for the first timestep # Comment - This function does conjugate gradient """ function doiteration!( linearoperator!, preconditioner, Q, Qrhs, solver::ConjugateGradient, threshold, args...; applyPC! = (x, y) -> x .= y, ) # unroll names for convenience rtol = solver.rtol atol = solver.atol residual_norm = typemax(eltype(Q)) dims = solver.dims converged = false max_iter = solver.max_iter r0 = solver.r0 z0 = solver.z0 p0 = solver.p0 r1 = solver.r1 z1 = solver.z1 p1 = solver.p1 Lp = solver.Lp alias_p0 = solver.alias_p0 alias_Lp = solver.alias_Lp alias_Q = reshape(Q, solver.reshape_tuple) # Smack residual by linear operator linearoperator!(alias_Lp, Q, args...) # make sure that arrays are of the appropriate size alias_p0 .= Qrhs r0 .= p0 - Lp # apply the preconditioner applyPC!(z0, r0) # update p0 p0 .= z0 # TODO: FIX THIS absolute_residual = maximum(sqrt.(sum(r0 .* r0, dims = dims))) relative_residual = absolute_residual / maximum(sqrt.(sum(Qrhs .* Qrhs, dims = :))) # TODO: FIX THIS if (absolute_residual <= atol) || (relative_residual <= rtol) # wow! what a great guess converged = true return converged, 1, absolute_residual end for j in 1:max_iter linearoperator!(alias_Lp, alias_p0, args...) α = sum(r0 .* z0, dims = dims) ./ sum(p0 .* Lp, dims = dims) # Update along preconditioned direction, (note that broadcast will indeed work as expected) @. alias_Q += α * p0 @. r1 = r0 - α * Lp # TODO: FIX THIS absolute_residual = maximum(sqrt.(sum(r1 .* r1, dims = dims))) relative_residual = absolute_residual / maximum(sqrt.(sum(Qrhs .* Qrhs, dims = :))) # TODO: FIX THIS converged = false if (absolute_residual <= atol) || (relative_residual <= rtol) converged = true return converged, j, absolute_residual end applyPC!(z1, r1) β = sum(z1 .* r1, dims = dims) ./ sum(z0 .* r0, dims = dims) # Update @. p0 = z1 + β * p0 @. z0 = z1 @. r0 = r1 end # TODO: FIX THIS converged = true return converged, max_iter, absolute_residual end """ doiteration!( linearoperator!, preconditioner, Q::MPIStateArray, Qrhs::MPIStateArray, solver::ConjugateGradient, threshold, args...; applyPC! = (x, y) -> x .= y, ) # Description This function enacts the iterative solver. It is called as part of the AbstractIterativeSystemSolver routine. SEE CODEREF for documentation on AbstractIterativeSystemSolver. THIS IS A HACK TO WORK WITH MPISTATEARRAYS. THE ISSUE IS WITH RESHAPE. # Arguments - `linearoperator!`: (function). This applies the predefined linear operator on an array. Applies a linear operator to object "y" and overwrites object "z". It is a function with arguments linearoperator!(z,y, args...), where "z" gets overwritten by "y" and "args..." are additional arguments passed to the linear operator. The linear operator is assumed to return nothing. - `Q`: (array). This is an object that linearoperator! overwrites - `Qrhs`: (array). This is an object that linearoperator! acts on. This is the rhs to the linear system - `solver`: (struct). This is a scruct for dispatch, in this case for ConjugateGradient - `threshold`: (float). Either an absolute or relative tolerance - `applyPC!`: (function). Applies a preconditioner to objecy "y" and overwrites object "z". applyPC!(z,y) - `args...`: (arbitrary). This is necessary for the linearoperator! function which has a signature linearoperator!(b, x, args....) # Keyword Arguments - There are no keyword arguments # Return - `converged`: (bool). A boolean to say whether or not the iterative solver has converged. - `iteration`: (int). Iteration number for the iterative solver - `threshold`: (float). The value of the residual for the first timestep # Comment - This function does conjugate gradient """ function doiteration!( linearoperator!, preconditioner, Q::MPIStateArray, Qrhs::MPIStateArray, solver::ConjugateGradient, threshold, args...; applyPC! = (x, y) -> x .= y, ) # unroll names for convenience rtol = solver.rtol atol = solver.atol residual_norm = typemax(eltype(Q)) dims = solver.dims converged = false max_iter = solver.max_iter r0 = solver.r0 z0 = solver.z0 p0 = solver.p0 r1 = solver.r1 z1 = solver.z1 p1 = solver.p1 Lp = solver.Lp alias_p0 = solver.alias_p0 alias_Lp = solver.alias_Lp alias_Q = reshape(Q.data, solver.reshape_tuple) # Smack residual by linear operator linearoperator!(alias_Lp, Q, args...) # make sure that arrays are of the appropriate size alias_p0 .= Qrhs.data r0 .= p0 - Lp # apply the preconditioner applyPC!(z0, r0) # update p0 p0 .= z0 # TODO: FIX THIS absolute_residual = maximum(sqrt.(sum(r0 .* r0, dims = dims))) relative_residual = absolute_residual / maximum(sqrt.(sum(Qrhs .* Qrhs, dims = :))) # TODO: FIX THIS if (absolute_residual <= atol) || (relative_residual <= rtol) # wow! what a great guess converged = true return converged, 1, absolute_residual end for j in 1:max_iter linearoperator!(alias_Lp, alias_p0, args...) α = sum(r0 .* z0, dims = dims) ./ sum(p0 .* Lp, dims = dims) # Update along preconditioned direction, (note that broadcast will indeed work as expected) @. alias_Q += α * p0 @. r1 = r0 - α * Lp # TODO: FIX THIS absolute_residual = maximum(sqrt.(sum(r1 .* r1, dims = dims))) relative_residual = absolute_residual / maximum(sqrt.(sum(Qrhs .* Qrhs, dims = :))) # TODO: FIX THIS converged = false if (absolute_residual <= atol) || (relative_residual <= rtol) converged = true return converged, j, absolute_residual end applyPC!(z1, r1) β = sum(z1 .* r1, dims = dims) ./ sum(z0 .* r0, dims = dims) # Update @. p0 = z1 + β * p0 @. z0 = z1 @. r0 = r1 end # TODO: FIX THIS converged = true return converged, max_iter, absolute_residual end ================================================ FILE: src/Numerics/SystemSolvers/generalized_conjugate_residual_solver.jl ================================================ #### Generalized Conjugate Residual Solver export GeneralizedConjugateResidual """ GeneralizedConjugateResidual(K, Q; rtol, atol) # Conjugate Residual This is an object for solving linear systems using an iterative Krylov method. The constructor parameter `K` is the number of steps after which the algorithm is restarted (if it has not converged), `Q` is a reference state used only to allocate the solver internal state, and `tolerance` specifies the convergence criterion based on the relative residual norm. The amount of memory required by the solver state is roughly `(2K + 2) * size(Q)`. This object is intended to be passed to the [`linearsolve!`](@ref) command. This uses the restarted Generalized Conjugate Residual method of Eisenstat (1983). ## References - [Eisenstat1983](@cite) """ mutable struct GeneralizedConjugateResidual{K, T, AT} <: AbstractIterativeSystemSolver residual::AT L_residual::AT p::NTuple{K, AT} L_p::NTuple{K, AT} alpha::MArray{Tuple{K}, T, 1, K} normsq::MArray{Tuple{K}, T, 1, K} rtol::T atol::T function GeneralizedConjugateResidual( K, Q::AT; rtol = √eps(eltype(AT)), atol = eps(eltype(AT)), ) where {AT} T = eltype(Q) residual = similar(Q) L_residual = similar(Q) p = ntuple(i -> similar(Q), K) L_p = ntuple(i -> similar(Q), K) alpha = @MArray zeros(K) normsq = @MArray zeros(K) new{K, T, AT}(residual, L_residual, p, L_p, alpha, normsq, rtol, atol) end end function initialize!( linearoperator!, Q, Qrhs, solver::GeneralizedConjugateResidual, args..., ) residual = solver.residual p = solver.p L_p = solver.L_p @assert size(Q) == size(residual) rtol, atol = solver.rtol, solver.atol threshold = rtol * norm(Qrhs, weighted_norm) linearoperator!(residual, Q, args...) residual .-= Qrhs converged = false residual_norm = norm(residual, weighted_norm) if residual_norm < threshold converged = true return converged, threshold end p[1] .= residual linearoperator!(L_p[1], p[1], args...) threshold = max(atol, threshold) converged, threshold end function doiteration!( linearoperator!, preconditioner, Q, Qrhs, solver::GeneralizedConjugateResidual{K}, threshold, args..., ) where {K} residual = solver.residual p = solver.p L_residual = solver.L_residual L_p = solver.L_p normsq = solver.normsq alpha = solver.alpha residual_norm = typemax(eltype(Q)) for k in 1:K normsq[k] = norm(L_p[k], weighted_norm)^2 beta = -dot(residual, L_p[k], weighted_norm) / normsq[k] Q .+= beta * p[k] residual .+= beta * L_p[k] residual_norm = norm(residual, weighted_norm) if residual_norm <= threshold return (true, k, residual_norm) end linearoperator!(L_residual, residual, args...) for l in 1:k alpha[l] = -dot(L_residual, L_p[l], weighted_norm) / normsq[l] end if k < K rv_nextp = realview(p[k + 1]) rv_L_nextp = realview(L_p[k + 1]) else # restart rv_nextp = realview(p[1]) rv_L_nextp = realview(L_p[1]) end rv_residual = realview(residual) rv_p = realview.(p) rv_L_p = realview.(L_p) rv_L_residual = realview(L_residual) groupsize = 256 T = eltype(alpha) event = Event(array_device(Q)) event = linearcombination!(array_device(Q), groupsize)( rv_nextp, (one(T), alpha[1:k]...), (rv_residual, rv_p[1:k]...), false; ndrange = length(rv_nextp), dependencies = (event,), ) event = linearcombination!(array_device(Q), groupsize)( rv_L_nextp, (one(T), alpha[1:k]...), (rv_L_residual, rv_L_p[1:k]...), false; ndrange = length(rv_nextp), dependencies = (event,), ) wait(array_device(Q), event) end (false, K, residual_norm) end ================================================ FILE: src/Numerics/SystemSolvers/generalized_minimal_residual_solver.jl ================================================ #### Generalized Minimal Residual Solver export GeneralizedMinimalResidual """ GeneralizedMinimalResidual(Q; M, rtol, atol) # GMRES This is an object for solving linear systems using an iterative Krylov method. The constructor parameter `M` is the number of steps after which the algorithm is restarted (if it has not converged), `Q` is a reference state used only to allocate the solver internal state, and `rtol` specifies the convergence criterion based on the relative residual norm. The amount of memory required for the solver state is roughly `(M + 1) * size(Q)`. This object is intended to be passed to the [`linearsolve!`](@ref) command. This uses the restarted Generalized Minimal Residual method of Saad and Schultz (1986). ## References - [Saad1986](@cite) """ mutable struct GeneralizedMinimalResidual{M, MP1, MMP1, T, AT} <: AbstractIterativeSystemSolver krylov_basis::NTuple{MP1, AT} "Hessenberg matrix" H::MArray{Tuple{MP1, M}, T, 2, MMP1} "rhs of the least squares problem" g0::MArray{Tuple{MP1, 1}, T, 2, MP1} rtol::T atol::T function GeneralizedMinimalResidual( Q::AT; M = min(20, eltype(Q)), rtol = √eps(eltype(AT)), atol = eps(eltype(AT)), ) where {AT} krylov_basis = ntuple(i -> similar(Q), M + 1) H = @MArray zeros(M + 1, M) g0 = @MArray zeros(M + 1) new{M, M + 1, M * (M + 1), eltype(Q), AT}( krylov_basis, H, g0, rtol, atol, ) end end function initialize!( linearoperator!, Q, Qrhs, solver::GeneralizedMinimalResidual, args..., ) g0 = solver.g0 krylov_basis = solver.krylov_basis rtol, atol = solver.rtol, solver.atol @assert size(Q) == size(krylov_basis[1]) # store the initial residual in krylov_basis[1] linearoperator!(krylov_basis[1], Q, args...) @. krylov_basis[1] = Qrhs - krylov_basis[1] threshold = rtol * norm(krylov_basis[1], weighted_norm) residual_norm = norm(krylov_basis[1], weighted_norm) converged = false # FIXME: Should only be true for threshold zero if threshold < atol converged = true return converged, threshold end fill!(g0, 0) g0[1] = residual_norm krylov_basis[1] ./= residual_norm converged, max(threshold, atol) end function doiteration!( linearoperator!, preconditioner, Q, Qrhs, solver::GeneralizedMinimalResidual{M}, threshold, args..., ) where {M} krylov_basis = solver.krylov_basis H = solver.H g0 = solver.g0 converged = false residual_norm = typemax(eltype(Q)) Ω = LinearAlgebra.Rotation{eltype(Q)}([]) j = 1 for outer j in 1:M # Arnoldi using the Modified Gram Schmidt orthonormalization linearoperator!(krylov_basis[j + 1], krylov_basis[j], args...) for i in 1:j H[i, j] = dot(krylov_basis[j + 1], krylov_basis[i], weighted_norm) @. krylov_basis[j + 1] -= H[i, j] * krylov_basis[i] end H[j + 1, j] = norm(krylov_basis[j + 1], weighted_norm) krylov_basis[j + 1] ./= H[j + 1, j] # apply the previous Givens rotations to the new column of H @views H[1:j, j:j] .= Ω * H[1:j, j:j] # compute a new Givens rotation to zero out H[j + 1, j] G, _ = givens(H, j, j + 1, j) # apply the new rotation to H and the rhs H .= G * H g0 .= G * g0 # compose the new rotation with the others Ω = lmul!(G, Ω) residual_norm = abs(g0[j + 1]) if residual_norm < threshold converged = true break end end # solve the triangular system y = SVector{j}(@views UpperTriangular(H[1:j, 1:j]) \ g0[1:j]) ## compose the solution rv_Q = realview(Q) rv_krylov_basis = realview.(krylov_basis) groupsize = 256 event = Event(array_device(Q)) event = linearcombination!(array_device(Q), groupsize)( rv_Q, y, rv_krylov_basis, true; ndrange = length(rv_Q), dependencies = (event,), ) wait(array_device(Q), event) # if not converged restart converged || initialize!(linearoperator!, Q, Qrhs, solver, args...) (converged, j, residual_norm) end ================================================ FILE: src/Numerics/SystemSolvers/jacobian_free_newton_krylov_solver.jl ================================================ export JacobianFreeNewtonKrylovSolver, JacobianAction """ mutable struct JacobianAction{FT, AT} rhs! ϵ::FT Q::AT Qdq::AT Fq::AT Fqdq::AT end Solve for Frhs = F(q), the Jacobian action is computed ∂F(Q) F(Q + eΔQ) - F(Q) ---- ΔQ ≈ ------------------- ∂Q e ... # Arguments - `rhs!` : nonlinear operator F(Q) - `ϵ::FT` : ϵ used for finite difference, e = e(Q, ϵ) - `Q::AT` : cache for Q - `Qdq::AT` : container for Q + ϵΔQ - `Fq::AT` : cache for F(Q) - `Fqdq::AT` : container for F(Q + ϵΔQ) ... """ mutable struct JacobianAction{FT, AT} rhs!::Any ϵ::FT Q::AT Qdq::AT Fq::AT Fqdq::AT end function JacobianAction(rhs!, Q, ϵ) return JacobianAction( rhs!, ϵ, similar(Q), similar(Q), similar(Q), similar(Q), ) end """ Approximates the action of the Jacobian of a nonlinear form on a vector `ΔQ` using the difference quotient: ∂F(Q) F(Q + e ΔQ) - F(Q) JΔQ = ---- ΔQ ≈ ------------------- ∂Q e Compute JΔQ with cached Q and F(Q), and the direction dQ """ function (op::JacobianAction)(JΔQ, dQ, args...) rhs! = op.rhs! Q = op.Q Qdq = op.Qdq ϵ = op.ϵ Fq = op.Fq Fqdq = op.Fqdq FT = eltype(dQ) n = length(dQ) normdQ = norm(dQ, weighted_norm) β = √ϵ if normdQ > ϵ # for preconditioner reconstruction, it goes into this part # e depends only on the active freedoms in the preconditioner reconstruction factor = FT(1 / (n * normdQ)) Qdq .= Q .* (abs.(dQ) .> 0) e = factor * β * norm(Qdq, 1, false) + β else factor = FT(1 / n) e = factor * β * norm(Q, 1, false) + β end Qdq .= Q .+ e .* dQ rhs!(Fqdq, Qdq, args...) JΔQ .= (Fqdq .- Fq) ./ e end """ update cached Q and F(Q) before each Newton iteration """ function update_Q!(op::JacobianAction, Q, args...) op.Q .= Q Fq = op.Fq op.rhs!(Fq, Q, args...) end """ Solve for Frhs = F(Q), by finite difference ∂F(Q) F(Q + eΔQ) - F(Q) ---- ΔQ ≈ ------------------- ∂Q e Q^n+1 = Q^n - dF/dQ(Q^{n})⁻¹ (F(Q^n) - Frhs) set ΔQ = F(Q^n) - Frhs """ mutable struct JacobianFreeNewtonKrylovSolver{FT, AT} <: AbstractNonlinearSolver # small number used for finite difference ϵ::FT # tolerances for convergence tol::FT # Max number of Newton iterations M::Int # Linear solver for the Jacobian system linearsolver::Any # container for unknows ΔQ, which is updated for the linear solver ΔQ::AT # contrainer for F(Q) residual::AT end """ JacobianFreeNewtonKrylovSolver constructor """ function JacobianFreeNewtonKrylovSolver( Q, linearsolver; ϵ = 1.e-8, tol = 1.e-6, M = 30, ) FT = eltype(Q) residual = similar(Q) ΔQ = similar(Q) return JacobianFreeNewtonKrylovSolver( FT(ϵ), FT(tol), M, linearsolver, ΔQ, residual, ) end """ JacobianFreeNewtonKrylovSolver initialize the residual """ function initialize!( rhs!, Q, Qrhs, solver::JacobianFreeNewtonKrylovSolver, args..., ) # where R = Qrhs - F(Q) R = solver.residual # Computes F(Q) and stores in R rhs!(R, Q, args...) # Computes R = R - Qrhs R .-= Qrhs return norm(R, weighted_norm) end """ Solve for Frhs = F(Q), by finite difference Q^n+1 = Q^n - dF/dQ(Q^{n})⁻¹ (F(Q^n) - Frhs) set ΔQ = F(Q^n) - Frhs ... # Arguments - `rhs!`: functor rhs!(Q) = F(Q) - `jvp!`: Jacobian action jvp!(ΔQ) = dF/dQ(Q) ⋅ ΔQ - `preconditioner`: approximation of dF/dQ(Q) - `Q` : Q^n - `Qrhs` : Frhs - `solver`: linear solver ... """ function donewtoniteration!( rhs!, jvp!, preconditioner::AbstractPreconditioner, Q, Qrhs, solver::JacobianFreeNewtonKrylovSolver, args..., ) FT = eltype(Q) ΔQ = solver.ΔQ ΔQ .= FT(0.0) # R(Q) == 0, R = F(Q) - Qrhs, where F = rhs! # Compute right-hand side for Jacobian system: # J(Q)ΔQ = -R # where R = Qrhs - F(Q), which is computed at the end of last step or in the initialize function R = solver.residual # R = F(Q^n) - Frhs # ΔQ = dF/dQ(Q^{n})⁻¹ (Frhs - F(Q^n)) = -dF/dQ(Q^{n})⁻¹ R iters = linearsolve!(jvp!, preconditioner, solver.linearsolver, ΔQ, -R, args...) # Newton correction Q^{n+1} = Q^n + dF/dQ(Q^{n})⁻¹ (Frhs - F(Q^n)) Q .+= ΔQ # Compute residual norm and residual for next step rhs!(R, Q, args...) R .-= Qrhs resnorm = norm(R, weighted_norm) return resnorm, iters end ================================================ FILE: src/Numerics/SystemSolvers/preconditioners.jl ================================================ export AbstractPreconditioner, ColumnwiseLUPreconditioner, NoPreconditioner, preconditioner_update!, preconditioner_solve!, preconditioner_counter_update! """ Abstract base type for all preconditioners. """ abstract type AbstractPreconditioner end """ mutable struct NoPreconditioner end Do nothing """ mutable struct NoPreconditioner <: AbstractPreconditioner end """ Do nothing, when there is no preconditioner, preconditioner = Nothing """ function preconditioner_update!( op, dg, preconditioner::NoPreconditioner, args..., ) end """ Do nothing, when there is no preconditioner, preconditioner = Nothing """ function preconditioner_solve!(preconditioner::NoPreconditioner, Q) end """ Do nothing, when there is no preconditioner, preconditioner = Nothing """ function preconditioner_counter_update!(preconditioner::NoPreconditioner) end """ mutable struct ColumnwiseLUPreconditioner{AT} A::DGColumnBandedMatrix Q::AT PQ::AT counter::Int update_freq::Int end ... # Arguments - `A`: the lu factor of the precondition (approximated Jacobian), in the DGColumnBandedMatrix format - `Q`: MPIArray container, used to update A - `PQ`: MPIArray container, used to update A - `counter`: count the number of Newton, when counter > update_freq or counter < 0, update precondition - `update_freq`: preconditioner update frequency ... """ mutable struct ColumnwiseLUPreconditioner{AT} <: AbstractPreconditioner A::DGColumnBandedMatrix Q::AT PQ::AT counter::Int update_freq::Int end """ ColumnwiseLUPreconditioner constructor build an empty ColumnwiseLUPreconditioner ... # Arguments - `dg`: DG model, use only the grid information - `Q0`: MPIArray, use only its size - `counter`: = -1, which indicates the preconditioner is empty - `update_freq`: preconditioner update frequency ... """ function ColumnwiseLUPreconditioner(dg, Q0, update_freq = 100) single_column = false Q = similar(Q0) PQ = similar(Q0) A = empty_banded_matrix(dg, Q; single_column = single_column) # counter = -1, which indicates the preconditioner is empty ColumnwiseLUPreconditioner(A, Q, PQ, -1, update_freq) end """ update the DGColumnBandedMatrix by the finite difference approximation ... # Arguments - `op`: operator used to compute the finte difference information - `dg`: the DG model, use only the grid information ... """ function preconditioner_update!( op, dg, preconditioner::ColumnwiseLUPreconditioner, args..., ) # preconditioner.counter < 0, means newly constructed empty preconditioner if preconditioner.counter >= 0 && (preconditioner.counter < preconditioner.update_freq) return end A = preconditioner.A Q = preconditioner.Q PQ = preconditioner.PQ update_banded_matrix!(A, op, dg, Q, PQ, args...) band_lu!(A) preconditioner.counter = 0 end """ Inplace applying the preconditioner Q = P⁻¹ * Q """ function preconditioner_solve!(preconditioner::ColumnwiseLUPreconditioner, Q) A = preconditioner.A band_forward!(Q, A) band_back!(Q, A) end """ Update the preconditioner counter, after each Newton iteration """ function preconditioner_counter_update!( preconditioner::ColumnwiseLUPreconditioner, ) preconditioner.counter += 1 end ================================================ FILE: src/Ocean/HydrostaticBoussinesq/Courant.jl ================================================ using Logging, Printf using LinearAlgebra: norm using ...Mesh.Grids: VerticalDirection, HorizontalDirection, EveryDirection, min_node_distance using ...DGMethods: courant import ...Courant: advective_courant, nondiffusive_courant, diffusive_courant, viscous_courant import ...DGMethods: calculate_dt """ advective_courant(::HBModel) calculates the CFL condition due to advection """ @inline function advective_courant( m::HBModel, Q::Vars, A::Vars, D::Vars, Δx, Δt, t, direction = VerticalDirection(), ) if direction isa VerticalDirection ū = norm(A.w) elseif direction isa HorizontalDirection ū = norm(Q.u) else v = @SVector [Q.u[1], Q.u[2], A.w] ū = norm(v) end return Δt * ū / Δx end """ nondiffusive_courant(::HBModel) calculates the CFL condition due to gravity waves """ @inline function nondiffusive_courant( m::HBModel, Q::Vars, A::Vars, D::Vars, Δx, Δt, t, direction = HorizontalDirection(), ) return Δt * m.cʰ / Δx end """ viscous_courant(::HBModel) calculates the CFL condition due to viscosity """ @inline function viscous_courant( m::HBModel, Q::Vars, A::Vars, D::Vars, Δx, Δt, t, direction = VerticalDirection(), ) ν̄ = norm_viscosity(m, direction) return Δt * ν̄ / Δx^2 end @inline norm_viscosity(m::HBModel, ::VerticalDirection) = m.νᶻ @inline norm_viscosity(m::HBModel, ::HorizontalDirection) = sqrt(2) * m.νʰ @inline norm_viscosity(m::HBModel, ::EveryDirection) = sqrt(2 * m.νʰ^2 + m.νᶻ^2) """ diffusive_courant(::HBModel) calculates the CFL condition due to temperature diffusivity factor of 1000 is for convective adjustment """ @inline function diffusive_courant( m::HBModel, Q::Vars, A::Vars, D::Vars, Δx, Δt, t, direction = VerticalDirection(), ) κ̄ = norm_diffusivity(m, direction) return Δt * κ̄ / Δx^2 end @inline norm_diffusivity(m::HBModel, ::VerticalDirection) = 1000 * m.κᶻ @inline norm_diffusivity(m::HBModel, ::HorizontalDirection) = sqrt(2) * m.κʰ @inline norm_diffusivity(m::HBModel, ::EveryDirection) = sqrt(2 * m.κʰ^2 + (1000 * m.κᶻ)^2) """ calculate_dt(dg, model::HBModel, Q, Courant_number, direction::EveryDirection, t) calculates the time step based on grid spacing and model parameters takes minimum of advective, gravity wave, diffusive, and viscous CFL """ @inline function calculate_dt( dg, model::HBModel, Q, Courant_number, t, ::EveryDirection, ) Δt = one(eltype(Q)) CFL_advective = courant(advective_courant, dg, model, Q, Δt, t, VerticalDirection()) CFL_gravity = courant( nondiffusive_courant, dg, model, Q, Δt, t, HorizontalDirection(), ) CFL_viscous = courant(viscous_courant, dg, model, Q, Δt, t, VerticalDirection()) CFL_diffusive = courant(diffusive_courant, dg, model, Q, Δt, t, VerticalDirection()) CFLs = [CFL_advective, CFL_gravity, CFL_viscous, CFL_diffusive] dts = [Courant_number / CFL for CFL in CFLs] dt = min(dts...) @info @sprintf( """Calculating timestep Advective Constraint = %.1f seconds Nondiffusive Constraint = %.1f seconds Viscous Constraint = %.1f seconds Diffusive Constrait = %.1f seconds Timestep = %.1f seconds""", dts..., dt ) return dt end """ calculate_dt(dg, bl::LinearHBModel, Q, Courant_number, direction::EveryDirection) calculates the time step based on grid spacing and model parameters takes minimum of gravity wave, diffusive, and viscous CFL """ @inline function calculate_dt( dg, model::LinearHBModel, Q, Courant_number, t, ::EveryDirection, ) Δt = one(eltype(Q)) ocean = model.ocean CFL_advective = courant(advective_courant, dg, ocean, Q, Δt, t, VerticalDirection()) CFL_gravity = courant( nondiffusive_courant, dg, ocean, Q, Δt, t, HorizontalDirection(), ) CFL_viscous = courant(viscous_courant, dg, ocean, Q, Δt, t, HorizontalDirection()) CFL_diffusive = courant(diffusive_courant, dg, ocean, Q, Δt, t, HorizontalDirection()) CFLs = [CFL_advective, CFL_gravity, CFL_viscous, CFL_diffusive] dts = [Courant_number / CFL for CFL in CFLs] dt = min(dts...) @info @sprintf( """Calculating timestep Advective Constraint = %.1f seconds Nondiffusive Constraint = %.1f seconds Viscous Constraint = %.1f seconds Diffusive Constrait = %.1f seconds Timestep = %.1f seconds""", dts..., dt ) return dt end ================================================ FILE: src/Ocean/HydrostaticBoussinesq/HydrostaticBoussinesq.jl ================================================ module HydrostaticBoussinesq export HydrostaticBoussinesqModel, Forcing using StaticArrays using LinearAlgebra: dot, Diagonal using CLIMAParameters.Planet: grav using ..Ocean using ...VariableTemplates using ...MPIStateArrays using ...Mesh.Filters: apply! using ...Mesh.Grids: VerticalDirection using ...Mesh.Geometry using ...DGMethods using ...DGMethods: init_state_auxiliary! using ...DGMethods.NumericalFluxes using ...DGMethods.NumericalFluxes: RusanovNumericalFlux using ...BalanceLaws import ..Ocean: coriolis_parameter import ...DGMethods.NumericalFluxes: update_penalty! import ...BalanceLaws: vars_state, init_state_prognostic!, init_state_auxiliary!, compute_gradient_argument!, compute_gradient_flux!, flux_first_order!, flux_second_order!, source!, wavespeed, boundary_state!, boundary_conditions, update_auxiliary_state!, update_auxiliary_state_gradient!, integral_load_auxiliary_state!, integral_set_auxiliary_state!, indefinite_stack_integral!, reverse_indefinite_stack_integral!, reverse_integral_load_auxiliary_state!, reverse_integral_set_auxiliary_state! import ..Ocean: ocean_init_state!, ocean_init_aux!, ocean_boundary_state!, _ocean_boundary_state! ×(a::SVector, b::SVector) = StaticArrays.cross(a, b) ⋅(a::SVector, b::SVector) = StaticArrays.dot(a, b) ⊗(a::SVector, b::SVector) = a * b' include("hydrostatic_boussinesq_model.jl") include("bc_velocity.jl") include("bc_temperature.jl") include("LinearHBModel.jl") include("Courant.jl") end ================================================ FILE: src/Ocean/HydrostaticBoussinesq/LinearHBModel.jl ================================================ export LinearHBModel # Linear model for 1D IMEX """ LinearHBModel <: BalanceLaw A `BalanceLaw` for modeling vertical diffusion implicitly. write out the equations here # Usage model = HydrostaticBoussinesqModel(problem) linear = LinearHBModel(model) """ struct LinearHBModel{M} <: BalanceLaw ocean::M function LinearHBModel(ocean::M) where {M} return new{M}(ocean) end end """ Copy over state, aux, and diff variables from HBModel """ vars_state(lm::LinearHBModel, ::Prognostic, FT) = vars_state(lm.ocean, Prognostic(), FT) vars_state(lm::LinearHBModel, st::Gradient, FT) = vars_state(lm.ocean, st, FT) vars_state(lm::LinearHBModel, ::GradientFlux, FT) = vars_state(lm.ocean, GradientFlux(), FT) vars_state(lm::LinearHBModel, st::Auxiliary, FT) = vars_state(lm.ocean, st, FT) vars_state(lm::LinearHBModel, ::UpwardIntegrals, FT) = @vars() """ No integration, hyperbolic flux, or source terms """ @inline integrate_aux!(::LinearHBModel, _...) = nothing @inline flux_first_order!(::LinearHBModel, _...) = nothing @inline source!(::LinearHBModel, _...) = nothing """ No need to init, initialize by full model """ init_state_auxiliary!( lm::LinearHBModel, state_auxiliary::MPIStateArray, grid, direction, ) = nothing init_state_prognostic!(lm::LinearHBModel, Q::Vars, A::Vars, localgeo, t) = nothing """ compute_gradient_argument!(::LinearHBModel) copy u and θ to var_gradient this computation is done pointwise at each nodal point # arguments: - `m`: model in this case HBModel - `G`: array of gradient variables - `Q`: array of state variables - `A`: array of aux variables - `t`: time, not used """ @inline function compute_gradient_argument!( m::LinearHBModel, G::Vars, Q::Vars, A, t, ) G.∇u = Q.u G.∇θ = Q.θ return nothing end """ compute_gradient_flux!(::LinearHBModel) copy ν∇u and κ∇θ to var_diffusive this computation is done pointwise at each nodal point # arguments: - `m`: model in this case HBModel - `D`: array of diffusive variables - `G`: array of gradient variables - `Q`: array of state variables - `A`: array of aux variables - `t`: time, not used """ @inline function compute_gradient_flux!( lm::LinearHBModel, D::Vars, G::Grad, Q::Vars, A::Vars, t, ) ν = viscosity_tensor(lm.ocean) D.ν∇u = -ν * G.∇u κ = diffusivity_tensor(lm.ocean, G.∇θ[3]) D.κ∇θ = -κ * G.∇θ return nothing end """ flux_second_order!(::HBModel) calculates the parabolic flux contribution to state variables this computation is done pointwise at each nodal point # arguments: - `m`: model in this case HBModel - `F`: array of fluxes for each state variable - `Q`: array of state variables - `D`: array of diff variables - `A`: array of aux variables - `t`: time, not used # computations ∂ᵗu = -∇⋅(ν∇u) ∂ᵗθ = -∇⋅(κ∇θ) """ @inline function flux_second_order!( lm::LinearHBModel, F::Grad, Q::Vars, D::Vars, HD::Vars, A::Vars, t::Real, ) F.u += D.ν∇u F.θ += D.κ∇θ return nothing end """ wavespeed(::LinaerHBModel) calculates the wavespeed for rusanov flux """ function wavespeed(lm::LinearHBModel, n⁻, _...) C = abs(SVector(lm.ocean.cʰ, lm.ocean.cʰ, lm.ocean.cᶻ)' * n⁻) return C end boundary_conditions(linear::LinearHBModel) = boundary_conditions(linear.ocean) """ boundary_state!(nf, ::LinearHBModel, args...) applies boundary conditions for the hyperbolic fluxes dispatches to a function in OceanBoundaryConditions.jl based on bytype defined by a problem such as SimpleBoxProblem.jl """ @inline function boundary_state!(nf, bc, linear::LinearHBModel, args...) return _ocean_boundary_state!(nf, bc, linear.ocean, args...) end ================================================ FILE: src/Ocean/HydrostaticBoussinesq/bc_temperature.jl ================================================ using ..Ocean: surface_flux """ ocean_temperature_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Insulating, ::HBModel) apply insulating boundary condition for temperature sets transmissive ghost point """ function ocean_temperature_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, bc_temperature::Insulating, ocean, Q⁺, A⁺, n, Q⁻, A⁻, t, ) Q⁺.θ = Q⁻.θ return nothing end """ ocean_temperature_boundary_state!(::NumericalFluxSecondOrder, ::Insulating, ::HBModel) apply insulating boundary condition for velocity sets ghost point to have no numerical flux on the boundary for κ∇θ """ @inline function ocean_temperature_boundary_state!( nf::NumericalFluxSecondOrder, bc_temperature::Insulating, ocean, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) Q⁺.θ = Q⁻.θ D⁺.κ∇θ = n⁻ * -0 return nothing end """ ocean_temperature_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::TemperatureFlux, ::HBModel) apply temperature flux boundary condition for velocity applies insulating conditions for first-order and gradient fluxes """ function ocean_temperature_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, bc_velocity::TemperatureFlux, ocean, args..., ) return ocean_temperature_boundary_state!(nf, Insulating(), ocean, args...) end """ ocean_temperature_boundary_state!(::NumericalFluxSecondOrder, ::TemperatureFlux, ::HBModel) apply insulating boundary condition for velocity sets ghost point to have specified flux on the boundary for κ∇θ """ @inline function ocean_temperature_boundary_state!( nf::NumericalFluxSecondOrder, bc_temperature::TemperatureFlux, ocean, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) Q⁺.θ = Q⁻.θ D⁺.κ∇θ = n⁻ * surface_flux(ocean.problem, A⁻.y, Q⁻.θ) return nothing end ================================================ FILE: src/Ocean/HydrostaticBoussinesq/bc_velocity.jl ================================================ using ..Ocean: kinematic_stress """ ocean_velocity_boundary_state!(::NumericalFluxFirstOrder, ::Impenetrable{NoSlip}, ::HBModel) apply no slip boundary condition for velocity sets reflective ghost point """ function ocean_velocity_boundary_state!( nf::NumericalFluxFirstOrder, bc_velocity::Impenetrable{NoSlip}, ocean, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) Q⁺.u = -Q⁻.u A⁺.w = -A⁻.w return nothing end """ ocean_velocity_boundary_state!(::NumericalFluxGradient, ::Impenetrable{NoSlip}, ::HBModel) apply no slip boundary condition for velocity set numerical flux to zero for u """ function ocean_velocity_boundary_state!( nf::NumericalFluxGradient, bc_velocity::Impenetrable{NoSlip}, ocean, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) FT = eltype(Q⁺) Q⁺.u = SVector(-zero(FT), -zero(FT)) A⁺.w = -zero(FT) return nothing end """ ocean_velocity_boundary_state!(::NumericalFluxSecondOrder, ::Impenetrable{NoSlip}, ::HBModel) apply no slip boundary condition for velocity sets ghost point to have no numerical flux on the boundary for u """ @inline function ocean_velocity_boundary_state!( nf::NumericalFluxSecondOrder, bc_velocity::Impenetrable{NoSlip}, ocean, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) Q⁺.u = -Q⁻.u A⁺.w = -A⁻.w D⁺.ν∇u = D⁻.ν∇u return nothing end """ ocean_velocity_boundary_state!(::NumericalFluxFirstOrder, ::Impenetrable{FreeSlip}, ::HBModel) apply free slip boundary condition for velocity sets reflective ghost point """ function ocean_velocity_boundary_state!( nf::NumericalFluxFirstOrder, bc_velocity::Impenetrable{FreeSlip}, ocean, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) v⁻ = @SVector [Q⁻.u[1], Q⁻.u[2], A⁻.w] v⁺ = v⁻ - 2 * n⁻ ⋅ v⁻ .* SVector(n⁻) Q⁺.u = @SVector [v⁺[1], v⁺[2]] A⁺.w = v⁺[3] return nothing end """ ocean_velocity_boundary_state!(::NumericalFluxGradient, ::Impenetrable{FreeSlip}, ::HBModel) apply free slip boundary condition for velocity sets non-reflective ghost point """ function ocean_velocity_boundary_state!( nf::NumericalFluxGradient, bc_velocity::Impenetrable{FreeSlip}, ocean, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) v⁻ = @SVector [Q⁻.u[1], Q⁻.u[2], A⁻.w] v⁺ = v⁻ - n⁻ ⋅ v⁻ .* SVector(n⁻) Q⁺.u = @SVector [v⁺[1], v⁺[2]] A⁺.w = v⁺[3] return nothing end """ ocean_velocity_normal_boundary_flux_second_order!(::NumericalFluxSecondOrder, ::Impenetrable{FreeSlip}, ::HBModel) apply free slip boundary condition for velocity apply zero numerical flux in the normal direction """ function ocean_velocity_boundary_state!( nf::NumericalFluxSecondOrder, bc_velocity::Impenetrable{FreeSlip}, ocean, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) Q⁺.u = Q⁻.u A⁺.w = A⁻.w D⁺.ν∇u = n⁻ * (@SVector [-0, -0])' return nothing end """ ocean_velocity_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{FreeSlip}, ::HBModel) apply free slip boundary condition for velocity sets non-reflective ghost point """ function ocean_velocity_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, bc_velocity::Penetrable{FreeSlip}, ocean, args..., ) return nothing end """ ocean_velocity_boundary_state!(::NumericalFluxSecondOrder, ::Penetrable{FreeSlip}, ::HBModel) apply free slip boundary condition for velocity sets non-reflective ghost point """ function ocean_velocity_boundary_state!( nf::NumericalFluxSecondOrder, bc_velocity::Penetrable{FreeSlip}, ocean, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) Q⁺.u = Q⁻.u A⁺.w = A⁻.w D⁺.ν∇u = n⁻ * (@SVector [-0, -0])' return nothing end """ ocean_velocity_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Impenetrable{KinematicStress}, ::HBModel) apply kinematic stress boundary condition for velocity applies free slip conditions for first-order and gradient fluxes """ function ocean_velocity_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, bc_velocity::Impenetrable{<:KinematicStress}, ocean, args..., ) return ocean_velocity_boundary_state!( nf, Impenetrable(FreeSlip()), ocean, args..., ) end """ ocean_velocity_boundary_state!(::NumericalFluxSecondOrder, ::Impenetrable{KinematicStress}, ::HBModel) apply kinematic stress boundary condition for velocity sets ghost point to have specified flux on the boundary for ν∇u """ @inline function ocean_velocity_boundary_state!( nf::NumericalFluxSecondOrder, bc_velocity::Impenetrable{<:KinematicStress}, ocean, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) Q⁺.u = Q⁻.u D⁺.ν∇u = n⁻ * kinematic_stress(ocean.problem, A⁻.y, ocean.ρₒ, bc_velocity.drag)' return nothing end """ ocean_velocity_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{KinematicStress}, ::HBModel) apply kinematic stress boundary condition for velocity applies free slip conditions for first-order and gradient fluxes """ function ocean_velocity_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, bc_velocity::Penetrable{<:KinematicStress}, ocean, args..., ) return ocean_velocity_boundary_state!( nf, Penetrable(FreeSlip()), ocean, args..., ) end """ ocean_velocity_boundary_state!(::NumericalFluxSecondOrder, ::Penetrable{KinematicStress}, ::HBModel) apply kinematic stress boundary condition for velocity sets ghost point to have specified flux on the boundary for ν∇u """ @inline function ocean_velocity_boundary_state!( nf::NumericalFluxSecondOrder, bc_velocity::Penetrable{<:KinematicStress}, ocean, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) Q⁺.u = Q⁻.u D⁺.ν∇u = n⁻ * kinematic_stress( ocean.problem, A⁻.y, ocean.ρₒ, bc_velocity.drag.stress, )' return nothing end ================================================ FILE: src/Ocean/HydrostaticBoussinesq/hydrostatic_boussinesq_model.jl ================================================ """ HydrostaticBoussinesqModel <: BalanceLaw A `BalanceLaw` for ocean modeling. write out the equations here ρₒ = reference density of sea water cʰ = maximum horizontal wave speed cᶻ = maximum vertical wave speed αᵀ = thermal expansitivity coefficient νʰ = horizontal viscosity νᶻ = vertical viscosity κʰ = horizontal diffusivity κᶻ = vertical diffusivity fₒ = first coriolis parameter (constant term) β = second coriolis parameter (linear term) # Usage HydrostaticBoussinesqModel(problem) """ struct HydrostaticBoussinesqModel{C, PS, P, MA, TA, F, FT, I} <: BalanceLaw param_set::PS problem::P coupling::C momentum_advection::MA tracer_advection::TA forcing::F state_filter::I ρₒ::FT cʰ::FT cᶻ::FT αᵀ::FT νʰ::FT νᶻ::FT κʰ::FT κᶻ::FT κᶜ::FT fₒ::FT β::FT function HydrostaticBoussinesqModel{FT}( param_set::PS, problem::P; coupling::C = Uncoupled(), momentum_advection::MA = nothing, tracer_advection::TA = NonLinearAdvectionTerm(), forcing::F = Forcing(), state_filter::I = nothing, ρₒ = FT(1000), # kg / m^3 cʰ = FT(0), # m/s cᶻ = FT(0), # m/s αᵀ = FT(2e-4), # (m/s)^2 / K νʰ = FT(5e3), # m^2 / s νᶻ = FT(5e-3), # m^2 / s κʰ = FT(1e3), # m^2 / s # horizontal diffusivity κᶻ = FT(1e-4), # m^2 / s # background vertical diffusivity κᶜ = FT(1e-1), # m^2 / s # diffusivity for convective adjustment fₒ = FT(1e-4), # Hz β = FT(1e-11), # Hz / m ) where {FT <: AbstractFloat, PS, P, C, MA, TA, F, I} return new{C, PS, P, MA, TA, F, FT, I}( param_set, problem, coupling, momentum_advection, tracer_advection, forcing, state_filter, ρₒ, cʰ, cᶻ, αᵀ, νʰ, νᶻ, κʰ, κᶻ, κᶜ, fₒ, β, ) end end HBModel = HydrostaticBoussinesqModel boundary_conditions(ocean::HBModel) = ocean.problem.boundary_conditions @inline noforcing(args...) = 0 function Forcing(; u = noforcing, v = noforcing, η = noforcing, θ = noforcing) return (u = u, v = v, η = η, θ = θ) end """ vars_state(::HBModel, ::Prognostic) prognostic variables evolved forward in time u = (u,v) = (zonal velocity, meridional velocity) η = sea surface height θ = temperature """ function vars_state(m::HBModel, ::Prognostic, T) @vars begin u::SVector{2, T} η::T # real a 2-D variable TODO: should be 2D θ::T end end """ init_state_prognostic!(::HBModel) sets the initial value for state variables dispatches to ocean_init_state! which is defined in a problem file such as SimpleBoxProblem.jl """ function init_state_prognostic!(m::HBModel, Q::Vars, A::Vars, local_geometry, t) return ocean_init_state!(m, m.problem, Q, A, local_geometry, t) end """ vars_state(::HBModel, ::Auxiliary) helper variables for computation second half is because there is no dedicated integral kernels these variables are used to compute vertical integrals w = vertical velocity wz0 = w at z = 0 pkin = bulk hydrostatic pressure contribution first half of these are fields that are used for computation y = north-south coordinate """ function vars_state(m::HBModel, ::Auxiliary, T) @vars begin y::T # y-coordinate of the box w::T # ∫(-∇⋅u) pkin::T # ∫(-αᵀθ) wz0::T # w at z=0 uᵈ::SVector{2, T} # velocity deviation from vertical mean ΔGᵘ::SVector{2, T} # vertically averaged tendency end end function ocean_init_aux! end """ init_state_auxiliary!(::HBModel) sets the initial value for auxiliary variables (those that aren't related to vertical integrals) dispatches to ocean_init_aux! which is defined in a problem file such as SimpleBoxProblem.jl """ function init_state_auxiliary!( m::HBModel, state_auxiliary::MPIStateArray, grid, direction, ) init_state_auxiliary!( m, (m, A, tmp, geom) -> ocean_init_aux!(m, m.problem, A, geom), state_auxiliary, grid, direction, ) end """ vars_state(::HBModel, ::Gradient) variables that you want to take a gradient of these are just copies in our model """ function vars_state(m::HBModel, ::Gradient, T) @vars begin ∇u::SVector{2, T} ∇uᵈ::SVector{2, T} ∇θ::T end end """ compute_gradient_argument!(::HBModel) copy u and θ to var_gradient this computation is done pointwise at each nodal point # arguments: - `m`: model in this case HBModel - `G`: array of gradient variables - `Q`: array of state variables - `A`: array of aux variables - `t`: time, not used """ @inline function compute_gradient_argument!(m::HBModel, G::Vars, Q::Vars, A, t) G.∇θ = Q.θ velocity_gradient_argument!(m, m.coupling, G, Q, A, t) return nothing end @inline function velocity_gradient_argument!( m::HBModel, ::Uncoupled, G, Q, A, t, ) G.∇u = Q.u return nothing end """ vars_state(::HBModel, ::GradientFlux, FT) the output of the gradient computations multiplies ∇u by viscosity tensor and ∇θ by the diffusivity tensor """ function vars_state(m::HBModel, ::GradientFlux, T) @vars begin ∇ʰu::T ν∇u::SMatrix{3, 2, T, 6} κ∇θ::SVector{3, T} end end """ compute_gradient_flux!(::HBModel) copy ∇u and ∇θ to var_diffusive this computation is done pointwise at each nodal point # arguments: - `m`: model in this case HBModel - `D`: array of diffusive variables - `G`: array of gradient variables - `Q`: array of state variables - `A`: array of aux variables - `t`: time, not used """ @inline function compute_gradient_flux!( m::HBModel, D::Vars, G::Grad, Q::Vars, A::Vars, t, ) # store ∇ʰu for continuity equation (convert gradient to divergence) D.∇ʰu = G.∇u[1, 1] + G.∇u[2, 2] velocity_gradient_flux!(m, m.coupling, D, G, Q, A, t) κ = diffusivity_tensor(m, G.∇θ[3]) D.κ∇θ = -κ * G.∇θ return nothing end @inline function velocity_gradient_flux!(m::HBModel, ::Uncoupled, D, G, Q, A, t) ν = viscosity_tensor(m) D.ν∇u = -ν * G.∇u return nothing end """ viscosity_tensor(::HBModel) uniform viscosity with different values for horizontal and vertical directions # Arguments - `m`: model object to dispatch on and get viscosity parameters """ @inline viscosity_tensor(m::HBModel) = Diagonal(@SVector [m.νʰ, m.νʰ, m.νᶻ]) """ diffusivity_tensor(::HBModel) uniform diffusivity in the horizontal direction applies convective adjustment in the vertical, bump by 1000 if ∂θ∂z < 0 # Arguments - `m`: model object to dispatch on and get diffusivity parameters - `∂θ∂z`: value of the derivative of temperature in the z-direction """ @inline function diffusivity_tensor(m::HBModel, ∂θ∂z) ∂θ∂z < 0 ? κ = m.κᶜ : κ = m.κᶻ return Diagonal(@SVector [m.κʰ, m.κʰ, κ]) end """ vars_integral(::HBModel) location to store integrands for bottom up integrals ∇hu = the horizontal divegence of u, e.g. dw/dz """ function vars_state(m::HBModel, ::UpwardIntegrals, T) @vars begin ∇ʰu::T αᵀθ::T end end """ integral_load_auxiliary_state!(::HBModel) copy w to var_integral this computation is done pointwise at each nodal point arguments: m -> model in this case HBModel I -> array of integrand variables Q -> array of state variables A -> array of aux variables """ @inline function integral_load_auxiliary_state!( m::HBModel, I::Vars, Q::Vars, A::Vars, ) I.∇ʰu = A.w # borrow the w value from A... I.αᵀθ = -m.αᵀ * Q.θ # integral will be reversed below return nothing end """ integral_set_auxiliary_state!(::HBModel) copy integral results back out to aux this computation is done pointwise at each nodal point arguments: m -> model in this case HBModel A -> array of aux variables I -> array of integrand variables """ @inline function integral_set_auxiliary_state!(m::HBModel, A::Vars, I::Vars) A.w = I.∇ʰu A.pkin = I.αᵀθ return nothing end """ vars_reverse_integral(::HBModel) location to store integrands for top down integrals αᵀθ = density perturbation """ function vars_state(m::HBModel, ::DownwardIntegrals, T) @vars begin αᵀθ::T end end """ reverse_integral_load_auxiliary_state!(::HBModel) copy αᵀθ to var_reverse_integral this computation is done pointwise at each nodal point arguments: m -> model in this case HBModel I -> array of integrand variables A -> array of aux variables """ @inline function reverse_integral_load_auxiliary_state!( m::HBModel, I::Vars, Q::Vars, A::Vars, ) I.αᵀθ = A.pkin return nothing end """ reverse_integral_set_auxiliary_state!(::HBModel) copy reverse integral results back out to aux this computation is done pointwise at each nodal point arguments: m -> model in this case HBModel A -> array of aux variables I -> array of integrand variables """ @inline function reverse_integral_set_auxiliary_state!( m::HBModel, A::Vars, I::Vars, ) A.pkin = I.αᵀθ return nothing end """ flux_first_order!(::HBModel) calculates the hyperbolic flux contribution to state variables this computation is done pointwise at each nodal point # arguments: m -> model in this case HBModel F -> array of fluxes for each state variable Q -> array of state variables A -> array of aux variables t -> time, not used # computations ∂ᵗu = ∇⋅(g*η + g∫αᵀθdz + v⋅u) ∂ᵗθ = ∇⋅(vθ) where v = (u,v,w) """ @inline function flux_first_order!( m::HBModel, F::Grad, Q::Vars, A::Vars, t::Real, direction, ) # ∇ʰ • (g η) hydrostatic_pressure!(m, m.coupling, F, Q, A, t) # ∇ʰ • (- ∫(αᵀ θ)) kinematic_pressure!(m, F, Q, A, t) # ∇ʰ • (v ⊗ u) momentum_advection!(m, m.momentum_advection, F, Q, A, t) # ∇ • (u θ) tracer_advection!(m, m.tracer_advection, F, Q, A, t) return nothing end @inline function hydrostatic_pressure!(m::HBModel, ::Uncoupled, F, Q, A, t) η = Q.η Iʰ = @SMatrix [ 1 -0 -0 1 -0 -0 ] F.u += grav(parameter_set(m)) * η * Iʰ return nothing end @inline function kinematic_pressure!(m::HBModel, F, Q, A, t) pkin = A.pkin Iʰ = @SMatrix [ 1 -0 -0 1 -0 -0 ] F.u += grav(parameter_set(m)) * pkin * Iʰ return nothing end momentum_advection!(::HBModel, ::Nothing, _...) = nothing @inline function momentum_advection!( ::HBModel, ::NonLinearAdvectionTerm, F, Q, A, t, ) u = Q.u @inbounds v = @SVector [Q.u[1], Q.u[2], A.w] F.u += v * u' return nothing end tracer_advection!(::HBModel, ::Nothing, _...) = nothing @inline function tracer_advection!( ::HBModel, ::NonLinearAdvectionTerm, F, Q, A, t, ) θ = Q.θ @inbounds v = @SVector [Q.u[1], Q.u[2], A.w] F.θ += v * θ return nothing end """ flux_second_order!(::HBModel) calculates the parabolic flux contribution to state variables this computation is done pointwise at each nodal point # arguments: - `m`: model in this case HBModel - `F`: array of fluxes for each state variable - `Q`: array of state variables - `D`: array of diff variables - `A`: array of aux variables - `t`: time, not used # computations ∂ᵗu = -∇⋅(ν∇u) ∂ᵗθ = -∇⋅(κ∇θ) """ @inline function flux_second_order!( m::HBModel, F::Grad, Q::Vars, D::Vars, HD::Vars, A::Vars, t::Real, ) F.u += D.ν∇u F.θ += D.κ∇θ return nothing end """ source!(::HBModel) Calculates the source term contribution to state variables. This computation is done pointwise at each nodal point. Arguments: m -> model in this case HBModel F -> array of fluxes for each state variable Q -> array of state variables A -> array of aux variables t -> time, not used Computations: ∂ᵗu = -f × u ∂ᵗη = w|(z=0) """ @inline function source!( m::HBModel, S::Vars, Q::Vars, D::Vars, A::Vars, t::Real, direction, ) # explicit forcing for SSH wz0 = A.wz0 S.η += wz0 coriolis_force!(m, m.coupling, S, Q, A, t) # Arguments for forcing functions # args = y, t, u, v, w, η, θ args = tuple(A.y, t, Q.u[1], Q.u[2], A.w, Q.η, Q.θ) Su = m.forcing.u(args...) Sv = m.forcing.v(args...) S.u += @SVector [Su, Sv] S.η += m.forcing.η(args...) S.θ += m.forcing.θ(args...) return nothing end @inline function coriolis_force!(m::HBModel, ::Uncoupled, S, Q, A, t) # f × u f = coriolis_parameter(m, m.problem, A.y) u, v = Q.u # Horizontal components of velocity S.u -= @SVector [-f * v, f * u] return nothing end """ wavespeed(::HBModel) calculates the wavespeed for rusanov flux """ @inline wavespeed(m::HBModel, n⁻, _...) = abs(SVector(m.cʰ, m.cʰ, m.cᶻ)' * n⁻) """ update_penalty(::HBModel) set Δη = 0 when computing numerical fluxes """ # We want not have jump penalties on η (since not a flux variable) function update_penalty!( ::RusanovNumericalFlux, ::HBModel, n⁻, λ, ΔQ::Vars, Q⁻, A⁻, Q⁺, A⁺, t, ) ΔQ.η = -0 return nothing end filter_state!(Q, filter::Nothing, grid) = nothing filter_state!(Q, filter, grid) = apply!(Q, UnitRange(1, size(Q, 2)), grid) """ update_auxiliary_state!(::HBModel) Applies the vertical filter to the zonal and meridional velocities to preserve numerical incompressibility Applies an exponential filter to θ to anti-alias the non-linear advective term Doesn't actually touch the aux variables any more, but we need a better filter interface than this anyways """ function update_auxiliary_state!( dg::DGModel, m::HBModel, Q::MPIStateArray, t::Real, elems::UnitRange, ) FT = eltype(Q) MD = dg.modeldata # `update_aux!` gets called twice, once for the real elements and once for # the ghost elements. Only apply the filters to the real elems. if elems == dg.grid.topology.realelems # required to ensure that after integration velocity field is divergence free vert_filter = MD.vert_filter apply!(Q, (:u,), dg.grid, vert_filter, direction = VerticalDirection()) exp_filter = MD.exp_filter apply!(Q, (:θ,), dg.grid, exp_filter, direction = VerticalDirection()) filter_state!(Q, m.state_filter, dg.grid) end compute_flow_deviation!(dg, m, m.coupling, Q, t) return true end @inline compute_flow_deviation!(dg, ::HBModel, ::Uncoupled, _...) = nothing """ update_auxiliary_state_gradient!(::HBModel) ∇hu to w for integration performs integration for w and pkin (should be moved to its own integral kernels) copies down w and wz0 because we don't have 2D structures """ function update_auxiliary_state_gradient!( dg::DGModel, m::HBModel, Q::MPIStateArray, t::Real, elems::UnitRange, ) FT = eltype(Q) A = dg.state_auxiliary D = dg.state_gradient_flux # load -∇ʰu as ∂ᶻw index_w = varsindex(vars_state(m, Auxiliary(), FT), :w) index_∇ʰu = varsindex(vars_state(m, GradientFlux(), FT), :∇ʰu) @views @. A.data[:, index_w, elems] = -D.data[:, index_∇ʰu, elems] # compute integrals for w and pkin indefinite_stack_integral!(dg, m, Q, A, t, elems) # bottom -> top reverse_indefinite_stack_integral!(dg, m, Q, A, t, elems) # top -> bottom # We are unable to use vars (ie A.w) for this because this operation will # return a SubArray, and adapt (used for broadcasting along reshaped arrays) # has a limited recursion depth for the types allowed. number_aux = number_states(m, Auxiliary()) index_wz0 = varsindex(vars_state(m, Auxiliary(), FT), :wz0) info = basic_grid_info(dg) Nqh, Nqk = info.Nqh, info.Nqk nelemv, nelemh = info.nvertelem, info.nhorzelem nrealelemh = info.nhorzrealelem # project w(z=0) down the stack data = reshape(A.data, Nqh, Nqk, number_aux, nelemv, nelemh) flat_wz0 = @view data[:, end:end, index_w, end:end, 1:nrealelemh] boxy_wz0 = @view data[:, :, index_wz0, :, 1:nrealelemh] boxy_wz0 .= flat_wz0 return true end """ boundary_state!(nf, bc, ::HBModel, args...) applies boundary conditions for the hyperbolic fluxes dispatches to a function in OceanBoundaryConditions.jl based on bytype defined by a problem such as SimpleBoxProblem.jl """ @inline boundary_state!(nf, bc, ocean::HBModel, args...) = _ocean_boundary_state!(nf, bc, ocean, args...) #= """ boundary_state!(nf, ::HBModel, args...) applies boundary conditions for the hyperbolic fluxes dispatches to a function in OceanBoundaryConditions.jl based on bytype defined by a problem such as SimpleBoxProblem.jl """ @inline function boundary_state!(nf, ocean::HBModel, args...) boundary_conditions = ocean.problem.boundary_conditions return ocean_boundary_state!(nf, boundary_conditions, ocean, args...) end =# """ ocean_boundary_state!(nf, bc::OceanBC, ::HBModel, args...) splits boundary condition application into velocity and temperature conditions """ @inline function ocean_boundary_state!(nf, bc::OceanBC, ocean::HBModel, args...) ocean_velocity_boundary_state!(nf, bc.velocity, ocean, args...) ocean_temperature_boundary_state!(nf, bc.temperature, ocean, args...) return nothing end """ ocean_boundary_state!(nf, boundaries::Tuple, ::HBModel, Q⁺, A⁺, n, Q⁻, A⁻, bctype) applies boundary conditions for the first-order and gradient fluxes dispatches to a function in OceanBoundaryConditions.jl based on bytype defined by a problem such as SimpleBoxProblem.jl """ @generated function ocean_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, boundaries::Tuple, ocean, Q⁺, A⁺, n, Q⁻, A⁻, bctype, t, args..., ) N = fieldcount(boundaries) return quote Base.Cartesian.@nif( $(N + 1), i -> bctype == i, # conditionexpr i -> ocean_boundary_state!( nf, boundaries[i], ocean, Q⁺, A⁺, n, Q⁻, A⁻, t, ), # expr i -> error("Invalid boundary tag") ) # elseexpr return nothing end end """ ocean_boundary_state!(nf, boundaries::Tuple, ::HBModel, Q⁺, A⁺, D⁺, n, Q⁻, A⁻, D⁻, bctype) applies boundary conditions for the second-order fluxes dispatches to a function in OceanBoundaryConditions.jl based on bytype defined by a problem such as SimpleBoxProblem.jl """ @generated function ocean_boundary_state!( nf::NumericalFluxSecondOrder, boundaries::Tuple, ocean, Q⁺, D⁺, HD⁺, A⁺, n, Q⁻, D⁻, HD⁻, A⁻, bctype, t, args..., ) N = fieldcount(boundaries) return quote Base.Cartesian.@nif( $(N + 1), i -> bctype == i, # conditionexpr i -> ocean_boundary_state!( nf, boundaries[i], ocean, Q⁺, D⁺, A⁺, n, Q⁻, D⁻, A⁻, t, ), # expr i -> error("Invalid boundary tag") ) # elseexpr return nothing end end ================================================ FILE: src/Ocean/JLD2Writer.jl ================================================ module JLD2Writers using JLD2 using ..CartesianDomains: DiscontinuousSpectralElementGrid using ..Ocean: current_step, current_time using ..CartesianFields: SpectralElementField struct JLD2Writer{A, F, M, O} filepath::F model::M outputs::O array_type::A end function Base.show(io::IO, writer::JLD2Writer{A}) where {A} header = "JLD2Writer{$A}" filepath = " ├── filepath: $(writer.filepath)" outputs = " └── $(length(writer.outputs)) outputs: $(keys(ow.outputs))" print(io, header, '\n', filepath, '\n', outputs) return nothing end """ JLD2Writer(model, outputs=model.fields; filepath, array_type=Array, overwrite_existing=true) Returns a utility for writing field output to JLD2 files, `overwrite_existing` file at `filepath`. `write!(jld2_writer::JLD2Writer)` writes `outputs` to `filepath`, where `outputs` is either a `NamedTuple` or `Dict`ionary of `fields` or functions of the form `output(model)`. Field data is converted to `array_type` before outputting. """ function JLD2Writer( model, outputs = model.fields; filepath, array_type = Array, overwrite_existing = true, ) # Convert grid to CPU cpu_grid = DiscontinuousSpectralElementGrid(model.domain, array_type = array_type) # Initialize output overwrite_existing && isfile(filepath) && rm(filepath; force = true) file = jldopen(filepath, "a+") file["domain"] = model.domain file["grid"] = cpu_grid close(file) writer = JLD2Writer(filepath, model, outputs, array_type) write!(writer, first = true) return writer end initialize_output!(file, args...) = nothing initialize_output!(file, field::SpectralElementField, name) = file["$name/meta/realelems"] = field.realelems function write!(writer::JLD2Writer; first = false) model = writer.model filepath = writer.filepath outputs = writer.outputs # Add new data to file file = jldopen(filepath, "a+") N_output = first ? 0 : length(keys(file["times"])) step = current_step(model) time = current_time(model) file["times/$N_output"] = time file["steps/$N_output"] = step for (name, output) in zip(keys(outputs), values(outputs)) first && initialize_output!(file, output, name) write_single_output!(file, output, name, N_output, writer) end close(file) return nothing end function write_field!(file, field, name, N_output, writer) data = convert(writer.array_type, field.data) file["$name/$N_output"] = data return nothing end write_single_output!(file, field::SpectralElementField, args...) = write_field!(file, field, args...) function write_single_output!(file, output, name, N_output, writer) data = output(writer.model) file["$name/$N_output"] = data return nothing end struct OutputTimeSeries{F, N, D, G, T, S} filepath::F name::N domain::D grid::G times::T steps::S end function OutputTimeSeries(name, filepath) file = jldopen(filepath) domain, grid, times, steps = [nothing for i in 1:4] try domain = file["domain"] grid = file["grid"] output_indices = keys(file["times"]) times = [file["times/$i"] for i in output_indices] steps = [file["steps/$i"] for i in output_indices] catch err @warn "Could not build time series of $name from $filepath because $(sprint(showerror, err))" finally close(file) end return OutputTimeSeries(filepath, name, domain, grid, times, steps) end function Base.length(timeseries::OutputTimeSeries) file = jldopen(timeseries.filepath) timeseries_length = length(keys(file["times"])) close(file) return timeseries_length end function Base.getindex(timeseries::OutputTimeSeries, i) name = timeseries.name domain = timeseries.domain grid = timeseries.grid file = jldopen(timeseries.filepath) data = file["$name/$(i-1)"] realelems = file["$name/meta/realelems"] close(file) realdata = view(data, :, realelems) return SpectralElementField(domain, grid, realdata, data, realelems) end end # module ================================================ FILE: src/Ocean/Ocean.jl ================================================ module Ocean using ..BalanceLaws using ..CartesianDomains using ..CartesianFields using ..Problems export AbstractOceanModel, AbstractOceanProblem, AbstractOceanCoupling, Uncoupled, Coupled, AdvectionTerm, NonLinearAdvectionTerm, InitialConditions abstract type AbstractOceanModel <: BalanceLaw end abstract type AbstractOceanProblem <: AbstractProblem end abstract type AbstractOceanCoupling end struct Uncoupled <: AbstractOceanCoupling end struct Coupled <: AbstractOceanCoupling end abstract type AdvectionTerm end struct NonLinearAdvectionTerm <: AdvectionTerm end function ocean_init_state! end function ocean_init_aux! end function ocean_boundary_state! end function coriolis_parameter end function kinematic_stress end function surface_flux end include("OceanBC.jl") include("HydrostaticBoussinesq/HydrostaticBoussinesq.jl") using .HydrostaticBoussinesq: HydrostaticBoussinesqModel, Forcing include("ShallowWater/ShallowWaterModel.jl") include("SplitExplicit/SplitExplicitModel.jl") include("SplitExplicit01/SplitExplicitModel.jl") include("OceanProblems/OceanProblems.jl") include("SuperModels.jl") using .OceanProblems: InitialConditions using .SuperModels: HydrostaticBoussinesqSuperModel, current_time, current_step, Δt include("JLD2Writer.jl") using .JLD2Writers: JLD2Writer, OutputTimeSeries, write! end ================================================ FILE: src/Ocean/OceanBC.jl ================================================ export OceanBC, VelocityBC, VelocityDragBC, TemperatureBC, Impenetrable, Penetrable, NoSlip, FreeSlip, KinematicStress, Insulating, TemperatureFlux using StaticArrays using ..BalanceLaws using ..DGMethods.NumericalFluxes """ OceanBC(velocity = Impenetrable(NoSlip()) temperature = Insulating()) The standard boundary condition for OceanModel. The default options imply a "no flux" boundary condition. """ Base.@kwdef struct OceanBC{M, T} velocity::M = Impenetrable(NoSlip()) temperature::T = Insulating() end abstract type VelocityBC end abstract type VelocityDragBC end abstract type TemperatureBC end """ Impenetrable(drag::VelocityDragBC) :: VelocityBC Defines an impenetrable wall model for velocity. This implies: - no flow in the direction normal to the boundary, and - flow parallel to the boundary is subject to the `drag` condition. """ struct Impenetrable{D <: VelocityDragBC} <: VelocityBC drag::D end """ Penetrable(drag::VelocityDragBC) :: VelocityBC Defines an penetrable wall model for velocity. This implies: - no constraint on flow in the direction normal to the boundary, and - flow parallel to the boundary is subject to the `drag` condition. """ struct Penetrable{D <: VelocityDragBC} <: VelocityBC drag::D end """ NoSlip() :: VelocityDragBC Zero velocity at the boundary. """ struct NoSlip <: VelocityDragBC end """ FreeSlip() :: VelocityDragBC No surface drag on velocity parallel to the boundary. """ struct FreeSlip <: VelocityDragBC end """ KinematicStress(stress) :: VelocityDragBC Applies the specified kinematic stress on velocity normal to the boundary. Prescribe the net inward kinematic stress across the boundary by `stress`, a function with signature `stress(problem, state, aux, t)`, returning the flux (in m²/s²). """ struct KinematicStress{S} <: VelocityDragBC stress::S function KinematicStress(stress::S = nothing) where {S} new{S}(stress) end end kinematic_stress(problem, y, ρ₀) = @SVector [0, 0] # fallback for generic problems kinematic_stress(problem, y, ρ₀, ::Nothing) = kinematic_stress(problem, y, ρ₀) kinematic_stress(problem, y, ρ₀, stress) = stress(y) """ Insulating() :: TemperatureBC No temperature flux across the boundary """ struct Insulating <: TemperatureBC end """ TemperatureFlux(flux) :: TemperatureBC Prescribe the net inward temperature flux across the boundary by `flux`, a function with signature `flux(problem, state, aux, t)`, returning the flux (in m⋅K/s). """ struct TemperatureFlux{T} <: TemperatureBC flux::T function TemperatureFlux(flux::T = nothing) where {T} new{T}(flux) end end # these functions just trim off the extra arguments function _ocean_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, bc, ocean, Q⁺, A⁺, n, Q⁻, A⁻, t, _..., ) return ocean_boundary_state!(nf, bc, ocean, Q⁺, A⁺, n, Q⁻, A⁻, t) end function _ocean_boundary_state!( nf::NumericalFluxSecondOrder, bc, ocean, Q⁺, D⁺, HD⁺, A⁺, n, Q⁻, D⁻, HD⁻, A⁻, t, _..., ) return ocean_boundary_state!(nf, bc, ocean, Q⁺, D⁺, A⁺, n, Q⁻, D⁻, A⁻, t) end ================================================ FILE: src/Ocean/OceanProblems/OceanProblems.jl ================================================ module OceanProblems export SimpleBox, Fixed, Rotating, HomogeneousBox, OceanGyre using StaticArrays using CLIMAParameters.Planet: grav using ...Problems using ..Ocean using ..BalanceLaws: parameter_set using ..HydrostaticBoussinesq using ..ShallowWater using ..SplitExplicit01 import ..Ocean: ocean_init_state!, ocean_init_aux!, kinematic_stress, surface_flux, coriolis_parameter HBModel = HydrostaticBoussinesqModel SWModel = ShallowWaterModel include("simple_box_problem.jl") include("homogeneous_box.jl") include("shallow_water_initial_states.jl") function ocean_init_state!( m::SWModel, p::HomogeneousBox, Q, A, local_geometry, t, ) if t == 0 null_init_state!(p, m.turbulence, Q, A, local_geometry, 0) else gyre_init_state!(m, p, m.turbulence, Q, A, local_geometry, t) end end @inline coriolis_parameter(m::SWModel, p::HomogeneousBox, y) = m.fₒ + m.β * (y - p.Lʸ / 2) include("ocean_gyre.jl") include("initial_value_problem.jl") end # module ================================================ FILE: src/Ocean/OceanProblems/homogeneous_box.jl ================================================ ########################## # Homogenous wind stress # # Constant temperature # ########################## """ HomogeneousBox <: AbstractSimpleBoxProblem Container structure for a simple box problem with wind-stress. Lˣ = zonal (east-west) length Lʸ = meridional (north-south) length H = height of the ocean τₒ = maximum value of wind-stress (amplitude) """ struct HomogeneousBox{T, BC} <: AbstractSimpleBoxProblem Lˣ::T Lʸ::T H::T τₒ::T boundary_conditions::BC function HomogeneousBox{FT}( Lˣ, # m Lʸ, # m H; # m τₒ = FT(1e-1), # N/m² BC = ( OceanBC(Impenetrable(NoSlip()), Insulating()), OceanBC(Impenetrable(NoSlip()), Insulating()), OceanBC(Penetrable(KinematicStress()), Insulating()), ), ) where {FT <: AbstractFloat} return new{FT, typeof(BC)}(Lˣ, Lʸ, H, τₒ, BC) end end """ ocean_init_state!(::HomogeneousBox) initialize u,v with random values, η with 0, and θ with a constant (20) # Arguments - `p`: HomogeneousBox problem object, used to dispatch on - `Q`: state vector - `A`: auxiliary state vector, not used - `localgeo`: the local geometry, not used - `t`: time to evaluate at, not used """ function ocean_init_state!(m::HBModel, p::HomogeneousBox, Q, A, localgeo, t) Q.u = @SVector [0, 0] Q.η = 0 Q.θ = 20 return nothing end """ kinematic_stress(::HomogeneousBox) jet stream like windstress # Arguments - `p`: problem object to dispatch on and get additional parameters - `y`: y-coordinate in the box """ @inline kinematic_stress(p::HomogeneousBox, y, ρ) = @SVector [(p.τₒ / ρ) * cos(y * π / p.Lʸ), -0] @inline kinematic_stress(p::HomogeneousBox, y) = @SVector [-p.τₒ * cos(π * y / p.Lʸ), -0] ================================================ FILE: src/Ocean/OceanProblems/initial_value_problem.jl ================================================ ##### ##### Initial value problem ##### struct InitialValueProblem{FT, IC, BC} <: AbstractSimpleBoxProblem Lˣ::FT Lʸ::FT H::FT initial_conditions::IC boundary_conditions::BC """ InitialValueProblem{FT}(; dimensions, initial_conditions=InitialConditions(), boundary_conditions = (OceanBC(Impenetrable(FreeSlip()), Insulating()), OceanBC(Penetrable(FreeSlip()), Insulating()))) Returns an `InitialValueProblem` with `dimensions = (Lˣ, Lʸ, H)`, `initial_conditions`, and `boundary_conditions`. The default `initial_conditions` are resting with no temperature perturbation; the default list of `boundary_conditions` provide implementations for an impenetrable, insulating boundary, and a penetrable, insulating boundary. """ function InitialValueProblem{FT}(; dimensions, initial_conditions = InitialConditions(), boundary_conditions = ( OceanBC(Impenetrable(FreeSlip()), Insulating()), OceanBC(Penetrable(FreeSlip()), Insulating()), ), ) where {FT} return new{FT, typeof(initial_conditions), typeof(boundary_conditions)}( FT.(dimensions)..., initial_conditions, boundary_conditions, ) end end ##### ##### Initial conditions ##### resting(x, y, z) = 0 struct InitialConditions{U, V, T, E} u::U v::V θ::T η::E end """ InitialConditions(; u=resting, v=resting, θ=resting, η=resting) Stores initial conditions for each prognostic variable provided as functions of `x, y, z`. Example ======= # A Gaussian surface perturbation a = 0.1 # m, amplitude L = 1e5 # m, horizontal scale of the perturbation ηᵢ(x, y, z) = a * exp(-(x^2 + y^2) / 2L^2) ics = InitialConditions(η=ηᵢ) """ InitialConditions(; u = resting, v = resting, θ = resting, η = resting) = InitialConditions(u, v, θ, η) """ ocean_init_state!(::HydrostaticBoussinesqModel, ic::InitialCondition, state, aux, local_geometry, time) Initialize the state variables `u = (u, v)` (a vector), `θ`, and `η`. Mutates `state`. This function is called by `init_state_prognostic!(::HydrostaticBoussinesqModel, ...)`. """ function ocean_init_state!( ::HydrostaticBoussinesqModel, ivp::InitialValueProblem, state, aux, local_geometry, time, ) ics = ivp.initial_conditions x, y, z = local_geometry.coord state.u = @SVector [ics.u(x, y, z), ics.v(x, y, z)] state.θ = ics.θ(x, y, z) state.η = ics.η(x, y, z) return nothing end ================================================ FILE: src/Ocean/OceanProblems/ocean_gyre.jl ================================================ """ OceanGyre <: AbstractSimpleBoxProblem Container structure for a simple box problem with wind-stress, coriolis force, and temperature forcing. Lˣ = zonal (east-west) length Lʸ = meridional (north-south) length H = height of the ocean τₒ = maximum value of wind-stress (amplitude) λʳ = temperature relaxation penetration constant (meters / second) θᴱ = maximum surface temperature """ struct OceanGyre{T, BC} <: AbstractSimpleBoxProblem Lˣ::T Lʸ::T H::T τₒ::T λʳ::T θᴱ::T boundary_conditions::BC function OceanGyre{FT}( Lˣ, # m Lʸ, # m H; # m τₒ = FT(1e-1), # N/m² λʳ = FT(4 // 86400), # m/s θᴱ = FT(10), # K BC = ( OceanBC(Impenetrable(NoSlip()), Insulating()), OceanBC(Impenetrable(NoSlip()), Insulating()), OceanBC(Penetrable(KinematicStress()), TemperatureFlux()), ), ) where {FT <: AbstractFloat} return new{FT, typeof(BC)}(Lˣ, Lʸ, H, τₒ, λʳ, θᴱ, BC) end end """ ocean_init_state!(::OceanGyre) initialize u,v,η with 0 and θ linearly distributed between 9 at z=0 and 1 at z=H # Arguments - `p`: OceanGyre problem object, used to dispatch on and obtain ocean height H - `Q`: state vector - `A`: auxiliary state vector, not used - `localgeo`: the local geometry information - `t`: time to evaluate at, not used """ function ocean_init_state!( ::Union{HBModel, OceanModel}, p::OceanGyre, Q, A, localgeo, t, ) coords = localgeo.coord @inbounds y = coords[2] @inbounds z = coords[3] @inbounds H = p.H Q.u = @SVector [-0, -0] Q.η = -0 Q.θ = (5 + 4 * cos(y * π / p.Lʸ)) * (1 + z / H) return nothing end function ocean_init_state!( ::Union{SWModel, BarotropicModel}, ::OceanGyre, Q, A, localgeo, t, ) Q.U = @SVector [-0, -0] Q.η = -0 return nothing end """ kinematic_stress(::OceanGyre) jet stream like windstress # Arguments - `p`: problem object to dispatch on and get additional parameters - `y`: y-coordinate in the box """ @inline kinematic_stress(p::OceanGyre, y, ρ) = @SVector [(p.τₒ / ρ) * cos(y * π / p.Lʸ), -0] @inline kinematic_stress(p::OceanGyre, y) = @SVector [-p.τₒ * cos(π * y / p.Lʸ), -0] """ surface_flux(::OceanGyre) cool-warm north-south linear temperature gradient # Arguments - `p`: problem object to dispatch on and get additional parameters - `y`: y-coordinate in the box - `θ`: temperature within element on boundary """ @inline function surface_flux(p::OceanGyre, y, θ) Lʸ = p.Lʸ θᴱ = p.θᴱ λʳ = p.λʳ θʳ = θᴱ * (1 - y / Lʸ) return λʳ * (θ - θʳ) end ================================================ FILE: src/Ocean/OceanProblems/shallow_water_initial_states.jl ================================================ using ..ShallowWater: TurbulenceClosure, LinearDrag, ConstantViscosity function null_init_state!( ::HomogeneousBox, ::TurbulenceClosure, state, aux, local_geometry, t, ) T = eltype(state.U) state.U = @SVector zeros(T, 2) state.η = 0 return nothing end η_lsw(x, y, t) = cos(π * x) * cos(π * y) * cos(√2 * π * t) u_lsw(x, y, t) = 2^(-0.5) * sin(π * x) * cos(π * y) * sin(√2 * π * t) v_lsw(x, y, t) = 2^(-0.5) * cos(π * x) * sin(π * y) * sin(√2 * π * t) function lsw_init_state!( m::ShallowWaterModel, p::HomogeneousBox, state, aux, local_geometry, t, ) coords = local_geometry.coord state.U = @SVector [ u_lsw(coords[1], coords[2], t), v_lsw(coords[1], coords[2], t), ] state.η = η_lsw(coords[1], coords[2], t) return nothing end v_lkw(x, y, t) = 0 u_lkw(x, y, t) = exp(-0.5 * y^2) * exp(-0.5 * (x - t + 5)^2) η_lkw(x, y, t) = 1 + u_lkw(x, y, t) function lkw_init_state!( m::ShallowWaterModel, p::HomogeneousBox, state, aux, local_geometry, t, ) coords = local_geometry.coord state.U = @SVector [ u_lkw(coords[1], coords[2], t), v_lkw(coords[1], coords[2], t), ] state.η = η_lkw(coords[1], coords[2], t) return nothing end R₋(ϵ) = (-1 - sqrt(1 + (2 * π * ϵ)^2)) / (2ϵ) R₊(ϵ) = (-1 + sqrt(1 + (2 * π * ϵ)^2)) / (2ϵ) D(ϵ) = (R₊(ϵ) * (exp(R₋(ϵ)) - 1) + R₋(ϵ) * (1 - exp(R₊(ϵ)))) / (exp(R₊(ϵ)) - exp(R₋(ϵ))) R₂(x₁, ϵ) = (1 / D(ϵ)) * ( ( (R₊(ϵ) * (exp(R₋(ϵ)) - 1)) * exp(R₊(ϵ) * x₁) + (R₋(ϵ) * (1 - exp(R₊(ϵ)))) * exp(R₋(ϵ) * x₁) ) / (exp(R₊(ϵ)) - exp(R₋(ϵ))) ) R₁(x₁, ϵ) = (π / D(ϵ)) * ( 1 .+ ( (exp(R₋(ϵ)) - 1) * exp(R₊(ϵ) * x₁) .+ (1 - exp(R₊(ϵ))) * exp(R₋(ϵ) * x₁) ) / (exp(R₊(ϵ)) - exp(R₋(ϵ))) ) 𝒱(x₁, y₁, ϵ) = R₂(x₁, ϵ) * sin.(π * y₁) 𝒰(x₁, y₁, ϵ) = -R₁(x₁, ϵ) * cos.(π * y₁) ℋ(x₁, y₁, ϵ, βᵖ, fₒ, γ) = (R₂(x₁, ϵ) / (π * fₒ)) * γ * cos(π * y₁) + (R₁(x₁, ϵ) / π) * (sin(π * y₁) * (1.0 + βᵖ * (y₁ - 0.5)) + (βᵖ / π) * cos(π * y₁)) function gyre_init_state!( m::SWModel, p::HomogeneousBox, T::LinearDrag, state, aux, local_geometry, t, ) coords = local_geometry.coord FT = eltype(state) τₒ = p.τₒ fₒ = m.fₒ β = m.β Lˣ = p.Lˣ Lʸ = p.Lʸ H = p.H γ = T.λ βᵖ = β * Lʸ / fₒ ϵ = γ / (Lˣ * β) _grav::FT = grav(parameter_set(m)) uˢ(ϵ) = (τₒ * D(ϵ)) / (H * γ * π) hˢ(ϵ) = (fₒ * Lˣ * uˢ(ϵ)) / _grav u = uˢ(ϵ) * 𝒰(coords[1] / Lˣ, coords[2] / Lʸ, ϵ) v = uˢ(ϵ) * 𝒱(coords[1] / Lˣ, coords[2] / Lʸ, ϵ) h = hˢ(ϵ) * ℋ(coords[1] / Lˣ, coords[2] / Lʸ, ϵ, βᵖ, fₒ, γ) state.U = @SVector [H * u, H * v] state.η = h return nothing end t1(x, δᵐ) = cos((√3 * x) / (2 * δᵐ)) + (√3^-1) * sin((√3 * x) / (2 * δᵐ)) t2(x, δᵐ) = 1 - exp((-x) / (2 * δᵐ)) * t1(x, δᵐ) t3(y, Lʸ) = π * sin(π * y / Lʸ) t4(x, Lˣ, C) = C * (1 - x / Lˣ) η_munk(x, y, Lˣ, Lʸ, δᵐ, C) = t4(x, Lˣ, C) * t3(y, Lʸ) * t2(x, δᵐ) function gyre_init_state!( m::SWModel, p::HomogeneousBox, V::ConstantViscosity, state, aux, local_geometry, t, ) coords = local_geometry.coord FT = eltype(state.U) _grav::FT = grav(parameter_set(m)) τₒ = p.τₒ fₒ = m.fₒ β = m.β Lˣ = p.Lˣ Lʸ = p.Lʸ H = p.H ν = V.ν δᵐ = (ν / β)^(1 / 3) C = τₒ / (_grav * H) * (fₒ / β) state.η = η_munk(coords[1], coords[2], Lˣ, Lʸ, δᵐ, C) state.U = @SVector zeros(FT, 2) return nothing end ================================================ FILE: src/Ocean/OceanProblems/simple_box_problem.jl ================================================ abstract type AbstractSimpleBoxProblem <: AbstractOceanProblem end """ ocean_init_aux!(::HBModel, ::AbstractSimpleBoxProblem) save y coordinate for computing coriolis, wind stress, and sea surface temperature # Arguments - `m`: model object to dispatch on and get viscosities and diffusivities - `p`: problem object to dispatch on and get additional parameters - `A`: auxiliary state vector - `geom`: geometry stuff """ function ocean_init_aux!(::HBModel, ::AbstractSimpleBoxProblem, A, geom) FT = eltype(A) @inbounds A.y = geom.coord[2] # needed for proper CFL condition calculation A.w = -0 A.pkin = -0 A.wz0 = -0 A.uᵈ = @SVector [-0, -0] A.ΔGᵘ = @SVector [-0, -0] return nothing end function ocean_init_aux!(::OceanModel, ::AbstractSimpleBoxProblem, A, geom) FT = eltype(A) @inbounds A.y = geom.coord[2] # needed for proper CFL condition calculation A.w = -0 A.pkin = -0 A.wz0 = -0 A.u_d = @SVector [-0, -0] A.ΔGu = @SVector [-0, -0] return nothing end function ocean_init_aux!(::SWModel, ::AbstractSimpleBoxProblem, A, geom) @inbounds A.y = geom.coord[2] A.Gᵁ = @SVector [-0, -0] A.Δu = @SVector [-0, -0] return nothing end function ocean_init_aux!(::BarotropicModel, ::AbstractSimpleBoxProblem, A, geom) @inbounds A.y = geom.coord[2] A.Gᵁ = @SVector [-0, -0] A.U_c = @SVector [-0, -0] A.η_c = -0 A.U_s = @SVector [-0, -0] A.η_s = -0 A.Δu = @SVector [-0, -0] A.η_diag = -0 A.Δη = -0 return nothing end """ coriolis_parameter northern hemisphere coriolis # Arguments - `m`: model object to dispatch on and get coriolis parameters - `y`: y-coordinate in the box """ @inline coriolis_parameter( m::Union{HBModel, OceanModel}, ::AbstractSimpleBoxProblem, y, ) = m.fₒ + m.β * y @inline coriolis_parameter( m::Union{SWModel, BarotropicModel}, ::AbstractSimpleBoxProblem, y, ) = m.fₒ + m.β * y ############################ # Basic box problem # # Set up dimensions of box # ############################ abstract type AbstractRotation end struct Rotating <: AbstractRotation end struct Fixed <: AbstractRotation end """ SimpleBoxProblem <: AbstractSimpleBoxProblem Stub structure with the dimensions of the box. Lˣ = zonal (east-west) length Lʸ = meridional (north-south) length H = height of the ocean """ struct SimpleBox{R, T, BC} <: AbstractSimpleBoxProblem rotation::R Lˣ::T Lʸ::T H::T boundary_conditions::BC function SimpleBox{FT}( Lˣ, # m Lʸ, # m H; # m rotation = Fixed(), BC = ( OceanBC(Impenetrable(FreeSlip()), Insulating()), OceanBC(Penetrable(FreeSlip()), Insulating()), ), ) where {FT <: AbstractFloat} return new{typeof(rotation), FT, typeof(BC)}(rotation, Lˣ, Lʸ, H, BC) end end @inline coriolis_parameter( m::Union{HBModel, OceanModel}, ::SimpleBox{R}, y, ) where {R <: Fixed} = -0 @inline coriolis_parameter( m::Union{SWModel, BarotropicModel}, ::SimpleBox{R}, y, ) where {R <: Fixed} = -0 @inline coriolis_parameter( m::Union{HBModel, OceanModel}, ::SimpleBox{R}, y, ) where {R <: Rotating} = m.fₒ @inline coriolis_parameter( m::Union{SWModel, BarotropicModel}, ::SimpleBox{R}, y, ) where {R <: Rotating} = m.fₒ function ocean_init_state!( m::Union{SWModel, BarotropicModel}, p::SimpleBox, Q, A, local_geometry, t, ) coords = local_geometry.coord k = (2π / p.Lˣ, 2π / p.Lʸ, 2π / p.H) ν = viscosity(m) gH = gravity_speed(m) @inbounds f = coriolis_parameter(m, p, coords[2]) U, V, η = barotropic_state!(p.rotation, (coords..., t), ν, k, (gH, f)) Q.U = @SVector [U, V] Q.η = η return nothing end viscosity(m::SWModel) = (m.turbulence.ν, m.turbulence.ν, -0) viscosity(m::BarotropicModel) = (m.baroclinic.νʰ, m.baroclinic.νʰ, -0) gravity_speed(m::SWModel) = grav(parameter_set(m)) * m.problem.H gravity_speed(m::BarotropicModel) = grav(parameter_set(m)) * m.baroclinic.problem.H function ocean_init_state!( m::Union{HBModel, OceanModel}, p::SimpleBox, Q, A, local_geometry, t, ) coords = local_geometry.coord k = (2π / p.Lˣ, 2π / p.Lʸ, 2π / p.H) ν = (m.νʰ, m.νʰ, m.νᶻ) gH = grav(parameter_set(m)) * p.H @inbounds f = coriolis_parameter(m, p, coords[2]) U, V, η = barotropic_state!(p.rotation, (coords..., t), ν, k, (gH, f)) u°, v° = baroclinic_deviation(p.rotation, (coords..., t), ν, k, f) u = u° + U / p.H v = v° + V / p.H Q.u = @SVector [u, v] Q.η = η Q.θ = -0 return nothing end function barotropic_state!( ::Fixed, (x, y, z, t), (νˣ, νʸ, νᶻ), (kˣ, kʸ, kᶻ), params, ) gH, _ = params M = @SMatrix [-νˣ*kˣ^2 gH*kˣ; -kˣ 0] A = exp(M * t) * @SVector [1, 1] U = A[1] * sin(kˣ * x) V = -0 η = A[2] * cos(kˣ * x) return (U = U, V = V, η = η) end function baroclinic_deviation( ::Fixed, (x, y, z, t), (νˣ, νʸ, νᶻ), (kˣ, kʸ, kᶻ), f, ) λ = νˣ * kˣ^2 + νᶻ * kᶻ^2 u° = exp(-λ * t) * cos(kᶻ * z) * sin(kˣ * x) v° = -0 return (u° = u°, v° = v°) end function barotropic_state!( ::Rotating, (x, y, z, t), (νˣ, νʸ, νᶻ), (kˣ, kʸ, kᶻ), params, ) gH, f = params M = @SMatrix [-νˣ*kˣ^2 f gH*kˣ; -f -νˣ*kˣ^2 0; -kˣ 0 0] A = exp(M * t) * @SVector [1, 1, 1] U = A[1] * sin(kˣ * x) V = A[2] * sin(kˣ * x) η = A[3] * cos(kˣ * x) return (U = U, V = V, η = η) end function baroclinic_deviation( ::Rotating, (x, y, z, t), (νˣ, νʸ, νᶻ), (kˣ, kʸ, kᶻ), f, ) λ = νˣ * kˣ^2 + νᶻ * kᶻ^2 M = @SMatrix[-λ f; -f -λ] A = exp(M * t) * @SVector[1, 1] u° = A[1] * cos(kᶻ * z) * sin(kˣ * x) v° = A[2] * cos(kᶻ * z) * sin(kˣ * x) return (u° = u°, v° = v°) end @inline kinematic_stress(p::SimpleBox, y) = @SVector [-0, -0] ================================================ FILE: src/Ocean/README.md ================================================ Code for shallow water problem configurations and for specializing ClimateMachine core tools to ocean scenarios. # Code structure (aspirational) # Equations - ExplicitHydrostaticBoussinesq - SplitExplicitHydrostaticBoussinesq - ShallowWater # Models - ExplicitHydrostaticBoussinesq - SplitExplicitHydrostaticBoussinesq - ShallowWater ================================================ FILE: src/Ocean/ShallowWater/ShallowWaterModel.jl ================================================ module ShallowWater export ShallowWaterModel using StaticArrays using ...MPIStateArrays: MPIStateArray using LinearAlgebra: dot, Diagonal using CLIMAParameters.Planet: grav using ..Ocean using ...VariableTemplates using ...Mesh.Geometry using ...DGMethods using ...DGMethods.NumericalFluxes using ...BalanceLaws using ..Ocean: kinematic_stress, coriolis_parameter import ...DGMethods.NumericalFluxes: update_penalty! import ...BalanceLaws: vars_state, init_state_prognostic!, init_state_auxiliary!, compute_gradient_argument!, compute_gradient_flux!, flux_first_order!, flux_second_order!, source!, wavespeed, boundary_conditions, boundary_state! import ..Ocean: ocean_init_state!, ocean_init_aux!, ocean_boundary_state!, _ocean_boundary_state! using ...Mesh.Geometry: LocalGeometry ×(a::SVector, b::SVector) = StaticArrays.cross(a, b) ⋅(a::SVector, b::SVector) = StaticArrays.dot(a, b) ⊗(a::SVector, b::SVector) = a * b' abstract type TurbulenceClosure end struct LinearDrag{L} <: TurbulenceClosure λ::L end struct ConstantViscosity{L} <: TurbulenceClosure ν::L end """ ShallowWaterModel <: BalanceLaw A `BalanceLaw` for shallow water modeling. write out the equations here # Usage ShallowWaterModel(problem) """ struct ShallowWaterModel{C, PS, P, T, A, FT} <: BalanceLaw param_set::PS problem::P coupling::C turbulence::T advection::A c::FT fₒ::FT β::FT function ShallowWaterModel{FT}( param_set::PS, problem::P, turbulence::T, advection::A; coupling::C = Uncoupled(), c = FT(0), # m/s fₒ = FT(1e-4), # Hz β = FT(1e-11), # Hz / m ) where {FT <: AbstractFloat, PS, P, T, A, C} return new{C, PS, P, T, A, FT}( param_set, problem, coupling, turbulence, advection, c, fₒ, β, ) end end SWModel = ShallowWaterModel function vars_state(m::SWModel, ::Prognostic, T) @vars begin η::T U::SVector{2, T} end end function init_state_prognostic!(m::SWModel, state::Vars, aux::Vars, localgeo, t) ocean_init_state!(m, m.problem, state, aux, localgeo, t) end function vars_state(m::SWModel, ::Auxiliary, T) @vars begin y::T Gᵁ::SVector{2, T} # integral of baroclinic tendency Δu::SVector{2, T} # reconciliation Δu = 1/H * (Ū - ∫u) end end function init_state_auxiliary!( m::SWModel, state_auxiliary::MPIStateArray, grid, direction, ) init_state_auxiliary!( m, (m, A, tmp, geom) -> ocean_init_aux!(m, m.problem, A, geom), state_auxiliary, grid, direction, ) end function vars_state(m::SWModel, ::Gradient, T) @vars begin ∇U::SVector{2, T} end end function compute_gradient_argument!( m::SWModel, f::Vars, q::Vars, α::Vars, t::Real, ) compute_gradient_argument!(m.turbulence, f, q, α, t) end compute_gradient_argument!(::LinearDrag, _...) = nothing @inline function compute_gradient_argument!( T::ConstantViscosity, f::Vars, q::Vars, α::Vars, t::Real, ) f.∇U = q.U return nothing end function vars_state(m::SWModel, ::GradientFlux, T) @vars begin ν∇U::SMatrix{3, 2, T, 6} end end function compute_gradient_flux!( m::SWModel, σ::Vars, δ::Grad, q::Vars, α::Vars, t::Real, ) compute_gradient_flux!(m, m.turbulence, σ, δ, q, α, t) end compute_gradient_flux!(::SWModel, ::LinearDrag, _...) = nothing @inline function compute_gradient_flux!( ::SWModel, T::ConstantViscosity, σ::Vars, δ::Grad, q::Vars, α::Vars, t::Real, ) ν = Diagonal(@SVector [T.ν, T.ν, -0]) ∇U = δ.∇U σ.ν∇U = -ν * ∇U return nothing end @inline function flux_first_order!( m::SWModel, F::Grad, q::Vars, α::Vars, t::Real, direction, ) U = @SVector [q.U[1], q.U[2], -0] η = q.η H = m.problem.H Iʰ = @SMatrix [ 1 -0 -0 1 -0 -0 ] F.η += U F.U += grav(parameter_set(m)) * H * η * Iʰ advective_flux!(m, m.advection, F, q, α, t) return nothing end advective_flux!(::SWModel, ::Nothing, _...) = nothing @inline function advective_flux!( m::SWModel, ::NonLinearAdvectionTerm, F::Grad, q::Vars, α::Vars, t::Real, ) U = q.U H = m.problem.H V = @SVector [U[1], U[2], -0] F.U += 1 / H * V ⊗ U return nothing end function flux_second_order!( m::SWModel, G::Grad, q::Vars, σ::Vars, ::Vars, α::Vars, t::Real, ) flux_second_order!(m, m.turbulence, G, q, σ, α, t) end flux_second_order!(::SWModel, ::LinearDrag, _...) = nothing @inline function flux_second_order!( ::SWModel, ::ConstantViscosity, G::Grad, q::Vars, σ::Vars, α::Vars, t::Real, ) G.U += σ.ν∇U return nothing end @inline wavespeed(m::SWModel, n⁻, q::Vars, α::Vars, t::Real, direction) = m.c @inline function source!( m::SWModel{P}, S::Vars, q::Vars, d::Vars, α::Vars, t::Real, direction, ) where {P} # f × u U, V = q.U f = coriolis_parameter(m, m.problem, α.y) S.U -= @SVector [-f * V, f * U] forcing_term!(m, m.coupling, S, q, α, t) linear_drag!(m.turbulence, S, q, α, t) return nothing end @inline function forcing_term!(m::SWModel, ::Uncoupled, S, Q, A, t) S.U += kinematic_stress(m.problem, A.y) return nothing end linear_drag!(::ConstantViscosity, _...) = nothing @inline function linear_drag!(T::LinearDrag, S::Vars, q::Vars, α::Vars, t::Real) λ = T.λ U = q.U S.U -= λ * U return nothing end boundary_conditions(shallow::SWModel) = shallow.problem.boundary_conditions """ boundary_state!(nf, ::SWModel, args...) applies boundary conditions for the hyperbolic fluxes dispatches to a function in OceanBoundaryConditions.jl based on bytype defined by a problem such as SimpleBoxProblem.jl """ @inline function boundary_state!(nf, bc, shallow::SWModel, args...) return _ocean_boundary_state!(nf, bc, shallow, args...) end """ ocean_boundary_state!(nf, bc::OceanBC, ::SWModel) splits boundary condition application into velocity """ @inline function ocean_boundary_state!(nf, bc::OceanBC, m::SWModel, args...) return ocean_boundary_state!(nf, bc.velocity, m, m.turbulence, args...) end include("bc_velocity.jl") end ================================================ FILE: src/Ocean/ShallowWater/bc_velocity.jl ================================================ """ ocean_boundary_state!(::NumericalFluxFirstOrder, ::Impenetrable{FreeSlip}, ::SWModel) apply free slip boundary condition for velocity sets reflective ghost point """ @inline function ocean_boundary_state!( ::NumericalFluxFirstOrder, ::Impenetrable{FreeSlip}, ::SWModel, ::TurbulenceClosure, q⁺, a⁺, n⁻, q⁻, a⁻, t, args..., ) q⁺.η = q⁻.η V⁻ = @SVector [q⁻.U[1], q⁻.U[2], -0] V⁺ = V⁻ - 2 * n⁻ ⋅ V⁻ .* SVector(n⁻) q⁺.U = @SVector [V⁺[1], V⁺[2]] return nothing end """ ocean_boundary_state!(::Union{NumericalFluxGradient, NumericalFluxSecondOrder}, ::Impenetrable{FreeSlip}, ::SWModel) no second order flux computed for linear drag """ ocean_boundary_state!( ::Union{NumericalFluxGradient, NumericalFluxSecondOrder}, ::VelocityBC, ::SWModel, ::LinearDrag, _..., ) = nothing """ ocean_boundary_state!(::NumericalFluxGradient, ::Impenetrable{FreeSlip}, ::SWModel) apply free slip boundary condition for velocity sets non-reflective ghost point """ function ocean_boundary_state!( ::NumericalFluxGradient, ::Impenetrable{FreeSlip}, ::SWModel, ::ConstantViscosity, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, args..., ) V⁻ = @SVector [Q⁻.U[1], Q⁻.U[2], -0] V⁺ = V⁻ - n⁻ ⋅ V⁻ .* SVector(n⁻) Q⁺.U = @SVector [V⁺[1], V⁺[2]] return nothing end """ shallow_normal_boundary_flux_second_order!(::NumericalFluxSecondOrder, ::Impenetrable{FreeSlip}, ::SWModel) apply free slip boundary condition for velocity apply zero numerical flux in the normal direction """ function ocean_boundary_state!( ::NumericalFluxSecondOrder, ::Impenetrable{FreeSlip}, ::SWModel, ::ConstantViscosity, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, args..., ) Q⁺.U = Q⁻.U D⁺.ν∇U = n⁻ * (@SVector [-0, -0])' return nothing end """ ocean_boundary_state!(::NumericalFluxFirstOrder, ::Impenetrable{NoSlip}, ::SWModel) apply no slip boundary condition for velocity sets reflective ghost point """ @inline function ocean_boundary_state!( ::NumericalFluxFirstOrder, ::Impenetrable{NoSlip}, ::SWModel, ::TurbulenceClosure, q⁺, α⁺, n⁻, q⁻, α⁻, t, args..., ) q⁺.η = q⁻.η q⁺.U = -q⁻.U return nothing end """ ocean_boundary_state!(::NumericalFluxGradient, ::Impenetrable{NoSlip}, ::SWModel) apply no slip boundary condition for velocity set numerical flux to zero for U """ @inline function ocean_boundary_state!( ::NumericalFluxGradient, ::Impenetrable{NoSlip}, ::SWModel, ::ConstantViscosity, q⁺, α⁺, n⁻, q⁻, α⁻, t, args..., ) FT = eltype(q⁺) q⁺.U = @SVector zeros(FT, 2) return nothing end """ ocean_boundary_state!(::NumericalFluxSecondOrder, ::Impenetrable{NoSlip}, ::SWModel) apply no slip boundary condition for velocity sets ghost point to have no numerical flux on the boundary for U """ @inline function ocean_boundary_state!( ::NumericalFluxSecondOrder, ::Impenetrable{NoSlip}, ::SWModel, ::ConstantViscosity, q⁺, σ⁺, α⁺, n⁻, q⁻, σ⁻, α⁻, t, args..., ) q⁺.U = -q⁻.U σ⁺.ν∇U = σ⁻.ν∇U return nothing end """ ocean_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{FreeSlip}, ::SWModel) no mass boundary condition for penetrable """ ocean_boundary_state!( ::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{FreeSlip}, ::SWModel, ::ConstantViscosity, _..., ) = nothing """ ocean_boundary_state!(::NumericalFluxSecondOrder, ::Penetrable{FreeSlip}, ::SWModel) apply free slip boundary condition for velocity apply zero numerical flux in the normal direction """ function ocean_boundary_state!( ::NumericalFluxSecondOrder, ::Penetrable{FreeSlip}, ::SWModel, ::ConstantViscosity, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, args..., ) Q⁺.U = Q⁻.U D⁺.ν∇U = n⁻ * (@SVector [-0, -0])' return nothing end """ ocean_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Impenetrable{KinematicStress}, ::HBModel) apply kinematic stress boundary condition for velocity applies free slip conditions for first-order and gradient fluxes """ function ocean_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Impenetrable{<:KinematicStress}, shallow::SWModel, turb::TurbulenceClosure, args..., ) return ocean_boundary_state!( nf, Impenetrable(FreeSlip()), shallow, turb, args..., ) end """ ocean_boundary_state!(::NumericalFluxSecondOrder, ::Impenetrable{KinematicStress}, ::HBModel) apply kinematic stress boundary condition for velocity sets ghost point to have specified flux on the boundary for ν∇u """ @inline function ocean_boundary_state!( ::NumericalFluxSecondOrder, ::Impenetrable{<:KinematicStress}, shallow::SWModel, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) Q⁺.U = Q⁻.U D⁺.ν∇U = n⁻ * kinematic_stress(shallow.problem, A⁻.y, 1000)' # applies windstress for now, will be fixed in a later PR return nothing end """ ocean_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{KinematicStress}, ::HBModel) apply kinematic stress boundary condition for velocity applies free slip conditions for first-order and gradient fluxes """ function ocean_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{<:KinematicStress}, shallow::SWModel, turb::TurbulenceClosure, args..., ) return ocean_boundary_state!( nf, Penetrable(FreeSlip()), shallow, turb, args..., ) end """ ocean_boundary_state!(::NumericalFluxSecondOrder, ::Penetrable{KinematicStress}, ::HBModel) apply kinematic stress boundary condition for velocity sets ghost point to have specified flux on the boundary for ν∇u """ @inline function ocean_boundary_state!( ::NumericalFluxSecondOrder, ::Penetrable{<:KinematicStress}, shallow::SWModel, ::TurbulenceClosure, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) Q⁺.u = Q⁻.u D⁺.ν∇u = n⁻ * kinematic_stress(shallow.problem, A⁻.y, 1000)' # applies windstress for now, will be fixed in a later PR return nothing end ================================================ FILE: src/Ocean/SplitExplicit/Communication.jl ================================================ @inline function initialize_states!( baroclinic::HBModel{C}, barotropic::SWModel{C}, model_bc, model_bt, state_bc, state_bt, ) where {C <: Coupled} model_bc.state_auxiliary.ΔGᵘ .= -0 return nothing end @inline function tendency_from_slow_to_fast!( baroclinic::HBModel{C}, barotropic::SWModel{C}, model_bc, model_bt, state_bc, state_bt, forcing_tendency, ) where {C <: Coupled} FT = eltype(state_bc) info = basic_grid_info(model_bc) Nqh, Nqk = info.Nqh, info.Nqk nelemv, nelemh = info.nvertelem, info.nhorzelem nrealelemh = info.nhorzrealelem #### integrate the tendency model_int = model_bc.modeldata.integral_model integral = model_int.balance_law update_auxiliary_state!(model_int, integral, forcing_tendency, 0) ### properly shape MPIStateArrays num_aux_int = number_states(integral, Auxiliary()) data_int = model_int.state_auxiliary.data data_int = reshape(data_int, Nqh, Nqk, num_aux_int, nelemv, nelemh) num_aux_bt = number_states(barotropic, Auxiliary()) data_bt = model_bt.state_auxiliary.data data_bt = reshape(data_bt, Nqh, num_aux_bt, nelemh) num_aux_bc = number_states(baroclinic, Auxiliary()) data_bc = model_bc.state_auxiliary.data data_bc = reshape(data_bc, Nqh, Nqk, num_aux_bc, nelemv, nelemh) ### get vars indices index_∫du = varsindex(vars_state(integral, Auxiliary(), FT), :(∫x)) index_Gᵁ = varsindex(vars_state(barotropic, Auxiliary(), FT), :Gᵁ) index_ΔGᵘ = varsindex(vars_state(baroclinic, Auxiliary(), FT), :ΔGᵘ) ### get top value (=integral over full depth) of ∫du ∫du = @view data_int[:, end, index_∫du, end, 1:nrealelemh] ### copy into Gᵁ of barotropic model Gᵁ = @view data_bt[:, index_Gᵁ, 1:nrealelemh] Gᵁ .= ∫du ### get top value (=integral over full depth) of ∫du ∫du = @view data_int[:, end:end, index_∫du, end:end, 1:nrealelemh] ### save vertically averaged tendency to remove from 3D tendency ### need to reshape for the broadcast ΔGᵘ = @view data_bc[:, :, index_ΔGᵘ, :, 1:nrealelemh] ΔGᵘ .-= ∫du / baroclinic.problem.H return nothing end @inline function cummulate_fast_solution!( baroclinic::HBModel{C}, barotropic::SWModel{C}, model_bt, state_bt, fast_time, fast_dt, substep, ) where {C <: Coupled} return nothing end @inline function reconcile_from_fast_to_slow!( baroclinic::HBModel{C}, barotropic::SWModel{C}, model_bc, model_bt, state_bc, state_bt, ) where {C <: Coupled} FT = eltype(state_bc) info = basic_grid_info(model_bc) Nqh, Nqk = info.Nqh, info.Nqk nelemv, nelemh = info.nvertelem, info.nhorzelem nrealelemh = info.nhorzrealelem ### integrate the horizontal velocity model_int = model_bc.modeldata.integral_model integral = model_int.balance_law update_auxiliary_state!(model_int, integral, state_bc, 0) ### properly shape MPIStateArrays num_aux_int = number_states(integral, Auxiliary()) data_int = model_int.state_auxiliary.data data_int = reshape(data_int, Nqh, Nqk, num_aux_int, nelemv, nelemh) num_aux_bt = number_states(barotropic, Auxiliary()) data_bt_aux = model_bt.state_auxiliary.data data_bt_aux = reshape(data_bt_aux, Nqh, num_aux_bt, nelemh) num_state_bt = number_states(barotropic, Prognostic()) data_bt_state = state_bt.data data_bt_state = reshape(data_bt_state, Nqh, num_state_bt, nelemh) num_state_bc = number_states(baroclinic, Prognostic()) data_bc_state = state_bc.data data_bc_state = reshape(data_bc_state, Nqh, Nqk, num_state_bc, nelemv, nelemh) ### get vars indices index_∫u = varsindex(vars_state(integral, Auxiliary(), FT), :(∫x)) index_Δu = varsindex(vars_state(barotropic, Auxiliary(), FT), :Δu) index_U = varsindex(vars_state(barotropic, Prognostic(), FT), :U) index_u = varsindex(vars_state(baroclinic, Prognostic(), FT), :u) index_η_3D = varsindex(vars_state(baroclinic, Prognostic(), FT), :η) index_η_2D = varsindex(vars_state(barotropic, Prognostic(), FT), :η) ### get top value (=integral over full depth) ∫u = @view data_int[:, end, index_∫u, end, 1:nrealelemh] ### Δu is a place holder for 1/H * (Ū - ∫u) Δu = @view data_bt_aux[:, index_Δu, 1:nrealelemh] U = @view data_bt_state[:, index_U, 1:nrealelemh] Δu .= 1 / baroclinic.problem.H * (U - ∫u) ### copy the 2D contribution down the 3D solution ### need to reshape for the broadcast data_bt_aux = reshape(data_bt_aux, Nqh, 1, num_aux_bt, 1, nelemh) Δu = @view data_bt_aux[:, :, index_Δu, :, 1:nrealelemh] u = @view data_bc_state[:, :, index_u, :, 1:nrealelemh] u .+= Δu ### copy η from barotropic mode to baroclinic mode ### need to reshape for the broadcast data_bt_state = reshape(data_bt_state, Nqh, 1, num_state_bt, 1, nelemh) η_2D = @view data_bt_state[:, :, index_η_2D, :, 1:nrealelemh] η_3D = @view data_bc_state[:, :, index_η_3D, :, 1:nrealelemh] η_3D .= η_2D return nothing end ================================================ FILE: src/Ocean/SplitExplicit/HydrostaticBoussinesqCoupling.jl ================================================ using ..Ocean: coriolis_parameter using ..HydrostaticBoussinesq using ...DGMethods import ...BalanceLaws import ..HydrostaticBoussinesq: viscosity_tensor, coriolis_force!, velocity_gradient_argument!, velocity_gradient_flux!, hydrostatic_pressure!, compute_flow_deviation! @inline function velocity_gradient_argument!(m::HBModel, ::Coupled, G, Q, A, t) G.∇u = Q.u G.∇uᵈ = A.uᵈ return nothing end @inline function velocity_gradient_flux!(m::HBModel, ::Coupled, D, G, Q, A, t) ν = viscosity_tensor(m) ∇u = @SMatrix [ G.∇uᵈ[1, 1] G.∇uᵈ[1, 2] G.∇uᵈ[2, 1] G.∇uᵈ[2, 2] G.∇u[3, 1] G.∇u[3, 2] ] D.ν∇u = -ν * ∇u return nothing end @inline hydrostatic_pressure!(::HBModel, ::Coupled, _...) = nothing @inline function coriolis_force!(m::HBModel, ::Coupled, S, Q, A, t) # f × u f = coriolis_parameter(m, m.problem, A.y) uᵈ, vᵈ = A.uᵈ # Horizontal components of velocity S.u -= @SVector [-f * vᵈ, f * uᵈ] return nothing end # Compute Horizontal Flow deviation from vertical mean @inline function compute_flow_deviation!(dg, m::HBModel, ::Coupled, Q, t) FT = eltype(Q) info = basic_grid_info(dg) Nqh, Nqk = info.Nqh, info.Nqk nelemv, nelemh = info.nvertelem, info.nhorzelem nrealelemh = info.nhorzrealelem #### integrate the tendency model_int = dg.modeldata.integral_model integral = model_int.balance_law update_auxiliary_state!(model_int, integral, Q, 0) ### properly shape MPIStateArrays num_int = number_states(integral, Auxiliary()) data_int = model_int.state_auxiliary.data data_int = reshape(data_int, Nqh, Nqk, num_int, nelemv, nelemh) num_aux = number_states(m, Auxiliary()) data_aux = dg.state_auxiliary.data data_aux = reshape(data_aux, Nqh, Nqk, num_aux, nelemv, nelemh) num_state = number_states(m, Prognostic()) data_state = reshape(Q.data, Nqh, Nqk, num_state, nelemv, nelemh) ### get vars indices index_∫u = varsindex(vars_state(integral, Auxiliary(), FT), :(∫x)) index_uᵈ = varsindex(vars_state(m, Auxiliary(), FT), :uᵈ) index_u = varsindex(vars_state(m, Prognostic(), FT), :u) ### get top value (=integral over full depth) ∫u = @view data_int[:, end:end, index_∫u, end:end, 1:nrealelemh] uᵈ = @view data_aux[:, :, index_uᵈ, :, 1:nrealelemh] u = @view data_state[:, :, index_u, :, 1:nrealelemh] ## make a copy of horizontal velocity ## and remove vertical mean velocity uᵈ .= u uᵈ .-= ∫u / m.problem.H return nothing end ================================================ FILE: src/Ocean/SplitExplicit/ShallowWaterCoupling.jl ================================================ import ..ShallowWater: forcing_term! @inline function forcing_term!(::SWModel, ::Coupled, S, Q, A, t) S.U += A.Gᵁ return nothing end ================================================ FILE: src/Ocean/SplitExplicit/SplitExplicitModel.jl ================================================ module SplitExplicit using StaticArrays using ..Ocean using ..HydrostaticBoussinesq using ..ShallowWater using ...VariableTemplates using ...MPIStateArrays using ...Mesh.Geometry using ...DGMethods using ...BalanceLaws import ...BalanceLaws: initialize_states!, tendency_from_slow_to_fast!, cummulate_fast_solution!, reconcile_from_fast_to_slow!, boundary_conditions HBModel = HydrostaticBoussinesqModel SWModel = ShallowWaterModel function initialize_states!( ::HBModel{C}, ::SWModel{C}, _..., ) where {C <: Uncoupled} return nothing end function tendency_from_slow_to_fast!( ::HBModel{C}, ::SWModel{C}, _..., ) where {C <: Uncoupled} return nothing end function cummulate_fast_solution!( ::HBModel{C}, ::SWModel{C}, _..., ) where {C <: Uncoupled} return nothing end function reconcile_from_fast_to_slow!( ::HBModel{C}, ::SWModel{C}, _..., ) where {C <: Uncoupled} return nothing end include("VerticalIntegralModel.jl") include("Communication.jl") include("ShallowWaterCoupling.jl") include("HydrostaticBoussinesqCoupling.jl") end ================================================ FILE: src/Ocean/SplitExplicit/VerticalIntegralModel.jl ================================================ import ...BalanceLaws: vars_state, init_state_prognostic!, init_state_auxiliary!, update_auxiliary_state!, integral_load_auxiliary_state!, integral_set_auxiliary_state! struct VerticalIntegralModel{M} <: BalanceLaw ocean::M function VerticalIntegralModel(ocean::M) where {M} return new{M}(ocean) end end vars_state(tm::VerticalIntegralModel, st::Prognostic, FT) = vars_state(tm.ocean, st, FT) function vars_state(m::VerticalIntegralModel, ::Auxiliary, T) @vars begin ∫x::SVector{2, T} end end init_state_auxiliary!( tm::VerticalIntegralModel, A::MPIStateArray, grid, direction, ) = nothing function vars_state(m::VerticalIntegralModel, ::UpwardIntegrals, T) @vars begin ∫x::SVector{2, T} end end @inline function integral_load_auxiliary_state!( m::VerticalIntegralModel, I::Vars, Q::Vars, A::Vars, ) I.∫x = A.∫x return nothing end @inline function integral_set_auxiliary_state!( m::VerticalIntegralModel, A::Vars, I::Vars, ) A.∫x = I.∫x return nothing end function update_auxiliary_state!( dg::DGModel, tm::VerticalIntegralModel, x::MPIStateArray, t::Real, ) A = dg.state_auxiliary # copy tendency vector to aux state for integration function f!(::VerticalIntegralModel, x, A, t) @inbounds begin A.∫x = @SVector [x.u[1], x.u[2]] end return nothing end update_auxiliary_state!(f!, dg, tm, x, t) # compute integral for Gᵁ indefinite_stack_integral!(dg, tm, x, A, t) # bottom -> top return true end ================================================ FILE: src/Ocean/SplitExplicit01/BarotropicModel.jl ================================================ struct BarotropicModel{M} <: AbstractOceanModel baroclinic::M function BarotropicModel(baroclinic::M) where {M} return new{M}(baroclinic) end end parameter_set(m::BarotropicModel) = parameter_set(m.baroclinic) function vars_state(m::BarotropicModel, ::Prognostic, T) @vars begin U::SVector{2, T} η::T end end function init_state_prognostic!( m::BarotropicModel, Q::Vars, A::Vars, localgeo, t, ) Q.U = @SVector [-0, -0] Q.η = -0 return nothing end function vars_state(m::BarotropicModel, ::Auxiliary, T) @vars begin Gᵁ::SVector{2, T} # integral of baroclinic tendency U_c::SVector{2, T} # cumulate U value over fast time-steps η_c::T # cumulate η value over fast time-steps U_s::SVector{2, T} # starting U field value η_s::T # starting η field value Δu::SVector{2, T} # reconciliation adjustment to u, Δu = 1/H * (U_averaged - ∫u) η_diag::T # η from baroclinic model (for diagnostic) Δη::T # diagnostic difference: η_barotropic - η_baroclinic y::T # y-coordinate of grid end end function init_state_auxiliary!( m::BarotropicModel, state_aux::MPIStateArray, grid, direction, ) init_state_auxiliary!( m, (m, A, tmp, geom) -> ocean_init_aux!(m, m.baroclinic.problem, A, geom), state_aux, grid, direction, ) end function vars_state(m::BarotropicModel, ::Gradient, T) @vars begin U::SVector{2, T} end end @inline function compute_gradient_argument!( m::BarotropicModel, G::Vars, Q::Vars, A, t, ) G.U = Q.U return nothing end function vars_state(m::BarotropicModel, ::GradientFlux, T) @vars begin ν∇U::SMatrix{3, 2, T, 6} end end @inline function compute_gradient_flux!( m::BarotropicModel, D::Vars, G::Grad, Q::Vars, A::Vars, t, ) ν = viscosity_tensor(m) D.ν∇U = -ν * G.U return nothing end @inline function viscosity_tensor(bm::BarotropicModel) m = bm.baroclinic return Diagonal(@SVector [m.νʰ, m.νʰ, 0]) end vars_state(m::BarotropicModel, ::UpwardIntegrals, T) = @vars() vars_state(m::BarotropicModel, ::DownwardIntegrals, T) = @vars() @inline function flux_first_order!( m::BarotropicModel, F::Grad, Q::Vars, A::Vars, t::Real, direction, ) @inbounds begin U = @SVector [Q.U[1], Q.U[2], 0] η = Q.η H = m.baroclinic.problem.H g = m.baroclinic.grav Iʰ = @SMatrix [ 1 0 0 1 0 0 ] F.η += U F.U += g * H * η * Iʰ end end @inline function flux_second_order!( m::BarotropicModel, F::Grad, Q::Vars, D::Vars, HD::Vars, A::Vars, t::Real, ) # numerical diffusivity for stability F.U += D.ν∇U return nothing end @inline function source!( m::BarotropicModel, S::Vars, Q::Vars, D::Vars, A::Vars, t::Real, direction, ) @inbounds begin U = Q.U # f × u f = coriolis_force(m.baroclinic, A.y) S.U -= @SVector [-f * U[2], f * U[1]] # vertically integrated baroclinic model tendency S.U += A.Gᵁ end end @inline wavespeed(m::BarotropicModel, n⁻, _...) = abs(SVector(m.baroclinic.cʰ, m.baroclinic.cʰ, m.baroclinic.cᶻ)' * n⁻) # We want not have jump penalties on η (since not a flux variable) function update_penalty!( ::RusanovNumericalFlux, ::BarotropicModel, n⁻, λ, ΔQ::Vars, Q⁻, A⁻, Q⁺, A⁺, t, ) ΔQ.η = -0 return nothing end boundary_conditions(bm::BarotropicModel) = (bm.baroclinic.problem.boundary_conditions[1],) """ boundary_state!(nf, bc, ::BarotropicModel, args...) applies boundary conditions for this model dispatches to a function in OceanBoundaryConditions.jl based on BC type defined by a problem such as SimpleBoxProblem.jl """ @inline function boundary_state!(nf, bc, bm::BarotropicModel, args...) return ocean_model_boundary!(bm, bc, nf, args...) end ================================================ FILE: src/Ocean/SplitExplicit01/Communication.jl ================================================ import ...BalanceLaws: tendency_from_slow_to_fast!, cummulate_fast_solution!, reconcile_from_fast_to_slow! using Printf @inline function set_fast_for_stepping!( slow::OceanModel, fast::BarotropicModel, dgFast, Qfast, S_fast, slow_dt, rkC, rkW, s, nStages, fast_time_rec, fast_steps, ) FT = typeof(slow_dt) #- inverse ratio of additional fast time steps (for weighted average) # --> do 1/add more time-steps and average from: 1 - 1/add up to: 1 + 1/add add = slow.add_fast_substeps #- set time-step # Warning: only make sense for LS3NRK33Heuns # where 12 is lowest common mutiple (LCM) of all RK-Coeff inverse fast_dt = fast_time_rec[1] steps = fast_dt > 0 ? ceil(Int, slow_dt / fast_dt / FT(12)) : 1 ntsFull = 12 * steps fast_dt = slow_dt / ntsFull add = add > 0 ? floor(Int, ntsFull / add) : 0 #- time to start fast time-stepping (fast_time_rec[3]) for this stage: if s == nStages # Warning: only works with few RK-scheme such as LS3NRK33Heuns fast_time_rec[3] = rkW[1] fract_dt = (1 - fast_time_rec[3]) fast_time_rec[3] *= slow_dt fast_steps[2] = 1 else fast_time_rec[3] = 0.0 fract_dt = rkC[s + 1] - fast_time_rec[3] fast_steps[2] = 0 end #- set number of sub-steps we need # will time-average fast over: fast_steps[1] , fast_steps[3] # centered on fract_dt*slow_dt which corresponds to advance in time of slow steps = ceil(Int, fract_dt * slow_dt / fast_dt) add = min(add, steps - 1) fast_steps[1] = steps - add fast_steps[3] = steps + add fast_time_rec[1] = fract_dt * slow_dt / steps #- select which fast time-step (fast_steps[2]) solution to save for next time-step # Warning: only works with few RK-scheme such as LS3NRK33Heuns fast_steps[2] *= steps if s == 1 fast_steps[2] = round(Int, ntsFull * rkW[1]) end # @printf("Update @ s= %i : frac_dt = %.6f , dt_fast = %.1f , steps= %i , add= %i\n", # s, fract_dt, fast_time_rec[1], steps, add) # println(" fast_time_rec = ",fast_time_rec) # println(" fast_steps = ",fast_steps) # set starting point for fast-state solution # Warning: only works with few RK-scheme such as LS3NRK33Heuns if s == 1 S_fast.η .= Qfast.η S_fast.U .= Qfast.U elseif s == nStages Qfast.η .= dgFast.state_auxiliary.η_s Qfast.U .= dgFast.state_auxiliary.U_s else Qfast.η .= S_fast.η Qfast.U .= S_fast.U end # initialise cumulative arrays fast_time_rec[2] = 0.0 dgFast.state_auxiliary.η_c .= -0 dgFast.state_auxiliary.U_c .= (@SVector [-0, -0])' return nothing end @inline function initialize_fast_state!( slow::OceanModel, fast::BarotropicModel, dgSlow, dgFast, Qslow, Qfast, slow_dt, fast_time_rec, fast_steps; firstStage = false, ) #- inverse ratio of additional fast time steps (for weighted average) # --> do 1/add more time-steps and average from: 1 - 1/add up to: 1 + 1/add add = slow.add_fast_substeps #- set time-step and number of sub-steps we need # will time-average fast over: fast_steps[1] , fast_steps[3] # centered on fast_steps[2] which corresponds to advance in time of slow fast_dt = fast_time_rec[1] if add == 0 steps = fast_dt > 0 ? ceil(Int, slow_dt / fast_dt) : 1 fast_steps[1:3] = [1 1 1] * steps else steps = fast_dt > 0 ? ceil(Int, slow_dt / fast_dt / add) : 1 fast_steps[2] = add * steps fast_steps[1] = (add - 1) * steps fast_steps[3] = (add + 1) * steps end fast_time_rec[1] = slow_dt / fast_steps[2] fast_time_rec[2] = 0.0 # @printf("Update: frac_dt = %.1f , dt_fast = %.1f , nsubsteps= %i\n", # slow_dt,fast_time_rec[1],fast_steps[3]) # println(" fast_steps = ",fast_steps) dgFast.state_auxiliary.η_c .= -0 dgFast.state_auxiliary.U_c .= (@SVector [-0, -0])' # set fast-state to previously stored value if !firstStage Qfast.η .= dgFast.state_auxiliary.η_s Qfast.U .= dgFast.state_auxiliary.U_s end return nothing end @inline function initialize_adjustment!( slow::OceanModel, fast::BarotropicModel, dgSlow, dgFast, Qslow, Qfast, ) ## reset tendency adjustment before calling Baroclinic Model dgSlow.state_auxiliary.ΔGu .= 0 return nothing end @inline function tendency_from_slow_to_fast!( slow::OceanModel, fast::BarotropicModel, dgSlow, dgFast, Qslow, Qfast, dQslow2fast, ) FT = eltype(Qslow) # integrate the tendency tendency_dg = dgSlow.modeldata.tendency_dg tend = tendency_dg.balance_law grid = dgSlow.grid elems = grid.topology.elems update_auxiliary_state!(tendency_dg, tend, dQslow2fast, 0, elems) info = basic_grid_info(dgSlow) Nqh, Nqk = info.Nqh, info.Nqk nelemv, nelemh = info.nvertelem, info.nhorzelem nrealelemh = info.nhorzrealelem ## get top value (=integral over full depth) of ∫du nb_aux_tnd = number_states(tend, Auxiliary()) data_tnd = reshape( tendency_dg.state_auxiliary.data, Nqh, Nqk, nb_aux_tnd, nelemv, nelemh, ) index_∫du = varsindex(vars_state(tend, Auxiliary(), FT), :∫du) flat_∫du = @view data_tnd[:, end, index_∫du, end, 1:nrealelemh] ## copy into Gᵁ of dgFast nb_aux_fst = number_states(fast, Auxiliary()) data_fst = reshape(dgFast.state_auxiliary.data, Nqh, nb_aux_fst, nelemh) index_Gᵁ = varsindex(vars_state(fast, Auxiliary(), FT), :Gᵁ) boxy_Gᵁ = @view data_fst[:, index_Gᵁ, 1:nrealelemh] boxy_Gᵁ .= flat_∫du ## scale by -1/H and copy back to ΔGu # note: since tendency_dg.state_auxiliary.∫du is not used after this, could be # re-used to store a 3-D copy of "-Gu" nb_aux_slw = number_states(slow, Auxiliary()) data_slw = reshape( dgSlow.state_auxiliary.data, Nqh, Nqk, nb_aux_slw, nelemv, nelemh, ) index_ΔGu = varsindex(vars_state(slow, Auxiliary(), FT), :ΔGu) boxy_ΔGu = @view data_slw[:, :, index_ΔGu, :, 1:nrealelemh] boxy_∫du = @view data_tnd[:, end:end, index_∫du, end:end, 1:nrealelemh] boxy_ΔGu .= -boxy_∫du / slow.problem.H return nothing end @inline function cummulate_fast_solution!( fast::BarotropicModel, dgFast, Qfast, fast_time, fast_dt, substep, fast_steps, fast_time_rec, ) #- might want to use some of the weighting factors: weights_η & weights_U #- should account for case where fast_dt < fast.param.dt # cumulate Fast solution: if substep >= fast_steps[1] dgFast.state_auxiliary.U_c .+= Qfast.U dgFast.state_auxiliary.η_c .+= Qfast.η fast_time_rec[2] += 1.0 end # save mid-point solution to start from the next time-step if substep == fast_steps[2] dgFast.state_auxiliary.U_s .= Qfast.U dgFast.state_auxiliary.η_s .= Qfast.η end return nothing end @inline function reconcile_from_fast_to_slow!( slow::OceanModel, fast::BarotropicModel, dgSlow, dgFast, Qslow, Qfast, fast_time_rec; lastStage = false, ) FT = eltype(Qslow) info = basic_grid_info(dgSlow) Nqh, Nqk = info.Nqh, info.Nqk nelemv, nelemh = info.nvertelem, info.nhorzelem nrealelemh = info.nhorzrealelem grid = dgSlow.grid elems = grid.topology.elems # need to calculate int_u using integral kernels # u_slow := u_slow + (1/H) * (u_fast - \int_{-H}^{0} u_slow) ## get time weighted averaged out of cumulative arrays dgFast.state_auxiliary.U_c .*= 1 / fast_time_rec[2] dgFast.state_auxiliary.η_c .*= 1 / fast_time_rec[2] # @printf(" reconcile_from_fast_to_slow! @ s= %i : time_Count = %6.3f\n", # 0, fast_time_rec[2]) # # s, fast_time_rec[2]) ## Compute: \int_{-H}^{0} u_slow # integrate vertically horizontal velocity flowintegral_dg = dgSlow.modeldata.flowintegral_dg flowint = flowintegral_dg.balance_law update_auxiliary_state!(flowintegral_dg, flowint, Qslow, 0, elems) # get top value (=integral over full depth) nb_aux_flw = number_states(flowint, Auxiliary()) data_flw = reshape( flowintegral_dg.state_auxiliary.data, Nqh, Nqk, nb_aux_flw, nelemv, nelemh, ) index_∫u = varsindex(vars_state(flowint, Auxiliary(), FT), :∫u) flat_∫u = @view data_flw[:, end, index_∫u, end, 1:nrealelemh] ## substract ∫u from U and divide by H # Δu is a place holder for 1/H * (Ū - ∫u) Δu = dgFast.state_auxiliary.Δu Δu .= dgFast.state_auxiliary.U_c nb_aux_fst = number_states(fast, Auxiliary()) data_fst = reshape(dgFast.state_auxiliary.data, Nqh, nb_aux_fst, nelemh) index_Δu = varsindex(vars_state(fast, Auxiliary(), FT), :Δu) boxy_Δu = @view data_fst[:, index_Δu, 1:nrealelemh] boxy_Δu .-= flat_∫u boxy_Δu ./= slow.problem.H ## apply the 2D correction to the 3D solution nb_cons_slw = number_states(slow, Prognostic()) data_slw = reshape(Qslow.data, Nqh, Nqk, nb_cons_slw, nelemv, nelemh) index_u = varsindex(vars_state(slow, Prognostic(), FT), :u) boxy_u = @view data_slw[:, :, index_u, :, 1:nrealelemh] boxy_u .+= reshape(boxy_Δu, Nqh, 1, 2, 1, nrealelemh) ## save Eta from 3D model into η_diag (aux var of 2D model) ## and store difference between η from Barotropic Model and η_diag ## Note: since 3D η is not used (just in output), only do this at last stage ## (save computation and get Δη diagnose over full time-step) if lastStage index_η = varsindex(vars_state(slow, Prognostic(), FT), :η) boxy_η_3D = @view data_slw[:, :, index_η, :, 1:nrealelemh] flat_η = @view data_slw[:, end, index_η, end, 1:nrealelemh] index_η_diag = varsindex(vars_state(fast, Auxiliary(), FT), :η_diag) boxy_η_diag = @view data_fst[:, index_η_diag, 1:nrealelemh] boxy_η_diag .= flat_η dgFast.state_auxiliary.Δη .= dgFast.state_auxiliary.η_c - dgFast.state_auxiliary.η_diag ## copy 2D model Eta over to 3D model index_η_c = varsindex(vars_state(fast, Auxiliary(), FT), :η_c) boxy_η_2D = @view data_fst[:, index_η_c, 1:nrealelemh] boxy_η_3D .= reshape(boxy_η_2D, Nqh, 1, 1, 1, nrealelemh) # reset fast-state to end of time-step value Qfast.η .= dgFast.state_auxiliary.η_s Qfast.U .= dgFast.state_auxiliary.U_s end return nothing end ================================================ FILE: src/Ocean/SplitExplicit01/Continuity3dModel.jl ================================================ struct Continuity3dModel{M} <: AbstractOceanModel ocean::M function Continuity3dModel(ocean::M) where {M} return new{M}(ocean) end end vars_state(cm::Continuity3dModel, ::Prognostic, FT) = vars_state(cm.ocean, Prognostic(), FT) # Continuity3dModel is used to compute the horizontal divergence of u vars_state(cm::Continuity3dModel, ::Auxiliary, T) = @vars() vars_state(cm::Continuity3dModel, ::Gradient, T) = @vars() vars_state(cm::Continuity3dModel, ::GradientFlux, T) = @vars() vars_state(cm::Continuity3dModel, ::UpwardIntegrals, T) = @vars() init_state_auxiliary!(cm::Continuity3dModel, _...) = nothing init_state_prognostic!(cm::Continuity3dModel, _...) = nothing @inline flux_second_order!(cm::Continuity3dModel, _...) = nothing @inline source!(cm::Continuity3dModel, _...) = nothing @inline update_penalty!(::RusanovNumericalFlux, ::Continuity3dModel, _...) = nothing # This allows the balance law framework to compute the horizontal gradient of u # (which will be stored back in the field θ) @inline function flux_first_order!( m::Continuity3dModel, flux::Grad, state::Vars, aux::Vars, t::Real, direction, ) @inbounds begin u = state.u # Horizontal components of velocity v = @SVector [u[1], u[2], -0] # ∇ • (v) # Just using θ to store w = ∇h • u flux.θ += v end return nothing end # This is zero because when taking the horizontal gradient we're piggy-backing # on θ and want to ensure we do not use it's jump @inline wavespeed(cm::Continuity3dModel, n⁻, _...) = -zero(eltype(n⁻)) boundary_conditions(cm::Continuity3dModel) = ( cm.ocean.problem.boundary_conditions[1], cm.ocean.problem.boundary_conditions[1], cm.ocean.problem.boundary_conditions[1], ) boundary_state!( ::Union{NumericalFluxGradient, NumericalFluxSecondOrder}, bc, cm::Continuity3dModel, _..., ) = nothing """ boundary_state!(nf, bc, ::Continuity3dModel, args...) applies boundary conditions for the hyperbolic fluxes dispatches to a function in OceanBoundaryConditions.jl based on BC type defined by a problem such as SimpleBoxProblem.jl """ @inline function boundary_state!( nf::NumericalFluxFirstOrder, bc, cm::Continuity3dModel, args..., ) return ocean_model_boundary!(cm, bc, nf, args...) end ================================================ FILE: src/Ocean/SplitExplicit01/IVDCModel.jl ================================================ # Linear model equations, for split-explicit ocean model implicit vertical diffusion # convective adjustment step. # # In this version the operator is tweked to be the indentity for testing """ IVDCModel{M} <: BalanceLaw This code defines DG `BalanceLaw` terms for an operator, L, that is evaluated from iterative implicit solver to solve an equation of the form (L + 1/Δt) ϕ^{n+1} = ϕ^{n}/Δt where L is a vertical diffusion operator with a spatially varying diffusion coefficient. # Usage parent_model = OceanModel{FT}(prob...) linear_model = IVDCModel( parent_model ) """ # Create a new child linear model instance, attached to whatever parent # BalanceLaw instantiates this. # (Not sure we need parent, but maybe we will get some parameters from it) struct IVDCModel{M} <: AbstractOceanModel parent_om::M function IVDCModel(parent_om::M;) where {M} return new{M}(parent_om) end end """ Set model state variables and operators """ # State variable and initial value, just one for now, θ vars_state(m::IVDCModel, ::Prognostic, FT) = @vars(θ::FT) function init_state_prognostic!(m::IVDCModel, Q::Vars, A::Vars, localgeo, t) @inbounds begin Q.θ = -0 end return nothing end vars_state(m::IVDCModel, ::Auxiliary, FT) = @vars(θ_init::FT) function init_state_auxiliary!(m::IVDCModel, A::Vars, _...) @inbounds begin A.θ_init = -0 end return nothing end # Variables and operations used in differentiating first derivatives vars_state(m::IVDCModel, ::Gradient, FT) = @vars(∇θ::FT, ∇θ_init::FT,) @inline function compute_gradient_argument!( m::IVDCModel, G::Vars, Q::Vars, A, t, ) G.∇θ = Q.θ G.∇θ_init = A.θ_init return nothing end # Variables and operations used in differentiating second derivatives vars_state(m::IVDCModel, ::GradientFlux, FT) = @vars(κ∇θ::SVector{3, FT}) @inline function compute_gradient_flux!( m::IVDCModel, D::Vars, G::Grad, Q::Vars, A::Vars, t, ) κ = diffusivity_tensor(m, G.∇θ_init[3]) D.κ∇θ = -κ * G.∇θ return nothing end @inline function diffusivity_tensor(m::IVDCModel, ∂θ∂z) κᶻ = m.parent_om.κᶻ * 0.5 κᶜ = m.parent_om.κᶜ ∂θ∂z < 0 ? κ = (@SVector [0, 0, κᶜ]) : κ = (@SVector [0, 0, κᶻ]) # ∂θ∂z <= 1e-10 ? κ = (@SVector [0, 0, κᶜ]) : κ = (@SVector [0, 0, κᶻ]) return Diagonal(-κ) end # Function to apply I to state variable @inline function source!( m::IVDCModel, S::Vars, Q::Vars, D::Vars, A::Vars, t, direction, ) #ivdc_dt = m.ivdc_dt ivdc_dt = m.parent_om.ivdc_dt @inbounds begin S.θ = Q.θ / ivdc_dt # S.θ = 0 end return nothing end ## Numerical fluxes and boundaries function flux_first_order!(::IVDCModel, _...) end function flux_second_order!( ::IVDCModel, F::Grad, S::Vars, D::Vars, H::Vars, A::Vars, t, ) F.θ += D.κ∇θ # F.θ = 0 end function wavespeed(m::IVDCModel, n⁻, _...) C = abs(SVector(m.parent_om.cʰ, m.parent_om.cʰ, m.parent_om.cᶻ)' * n⁻) # C = abs(SVector(m.parent_om.cʰ, m.parent_om.cʰ, 50)' * n⁻) # C = abs(SVector(1, 1, 1)' * n⁻) ### C = abs(SVector(10, 10, 10)' * n⁻) # C = abs(SVector(50, 50, 50)' * n⁻) # C = abs(SVector( 0, 0, 0)' * n⁻) return C end boundary_conditions(m::IVDCModel) = boundary_conditions(m.parent_om) function boundary_state!( nf::Union{ NumericalFluxFirstOrder, NumericalFluxGradient, CentralNumericalFluxGradient, }, bc, m::IVDCModel, Q⁺, A⁺, n, Q⁻, A⁻, t, _..., ) Q⁺.θ = Q⁻.θ return nothing end ### From - function numerical_boundary_flux_gradient! , DGMethods/NumericalFluxes.jl ### boundary_state!( ### numerical_flux, ### balance_law, ### state_conservative⁺, ### state_auxiliary⁺, ### normal_vector, ### state_conservative⁻, ### state_auxiliary⁻, ### bctype, ### t, ### state1⁻, ### aux1⁻, ### ) function boundary_state!( nf::Union{NumericalFluxSecondOrder, CentralNumericalFluxSecondOrder}, bctype, m::IVDCModel, Q⁺, D⁺, HD⁺, A⁺, n⁻, Q⁻, D⁻, HD⁻, A⁻, t, _..., ) Q⁺.θ = Q⁻.θ D⁺.κ∇θ = n⁻ * -0 # D⁺.κ∇θ = n⁻ * -0 + 7000 # D⁺.κ∇θ = -D⁻.κ∇θ return nothing end ### boundary_state!( ### numerical_flux, ### balance_law, ### state_conservative⁺, ### state_gradient_flux⁺, ### state_auxiliary⁺, ### normal_vector, ### state_conservative⁻, ### state_gradient_flux⁻, ### state_auxiliary⁻, ### bctype, ### t, ### state1⁻, ### diff1⁻, ### aux1⁻, ### ) ### boundary_flux_second_order!( ### numerical_flux, ### balance_law, ### Grad{S}(flux), ### state_conservative⁺, ### state_gradient_flux⁺, ### state_hyperdiffusive⁺, ### state_auxiliary⁺, ### normal_vector, ### state_conservative⁻, ### state_gradient_flux⁻, ### state_hyperdiffusive⁻, ### state_auxiliary⁻, ### bctype, ### t, ### state1⁻, ### diff1⁻, ### aux1⁻, ### ) ================================================ FILE: src/Ocean/SplitExplicit01/OceanBoundaryConditions.jl ================================================ abstract type OceanBoundaryCondition end """ Defining dummy structs to dispatch on for boundary conditions. """ struct CoastlineFreeSlip <: OceanBoundaryCondition end struct CoastlineNoSlip <: OceanBoundaryCondition end struct OceanFloorFreeSlip <: OceanBoundaryCondition end struct OceanFloorNoSlip <: OceanBoundaryCondition end struct OceanFloorLinearDrag <: OceanBoundaryCondition end struct OceanSurfaceNoStressNoForcing <: OceanBoundaryCondition end struct OceanSurfaceStressNoForcing <: OceanBoundaryCondition end struct OceanSurfaceNoStressForcing <: OceanBoundaryCondition end struct OceanSurfaceStressForcing <: OceanBoundaryCondition end # these functions just trim off the extra arguments function ocean_model_boundary!( model::AbstractOceanModel, bc, nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, Q⁺, A⁺, n, Q⁻, A⁻, t, _..., ) return ocean_boundary_state!(model, bc, nf, Q⁺, A⁺, n, Q⁻, A⁻, t) end function ocean_model_boundary!( model::AbstractOceanModel, bc, nf::NumericalFluxSecondOrder, Q⁺, D⁺, HD⁺, A⁺, n, Q⁻, D⁻, HD⁻, A⁻, t, _..., ) return ocean_boundary_state!(model, bc, nf, Q⁺, D⁺, A⁺, n, Q⁻, D⁻, A⁻, t) end """ CoastlineFreeSlip applies boundary condition ∇u = 0 and ∇θ = 0 """ """ ocean_boundary_state!(::AbstractOceanModel, ::CoastlineFreeSlip, ::NumericalFluxFirstOrder) apply free slip boundary conditions for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::CoastlineFreeSlip, ::NumericalFluxFirstOrder, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) u⁻ = Q⁻.u n = @SVector [n⁻[1], n⁻[2]] # Q⁺.u = u⁻ - 2 * (n⋅u⁻) * n Q⁺.u = u⁻ - 2 * (n ∘ u⁻) * n return nothing end @inline function ocean_boundary_state!( ::BarotropicModel, ::CoastlineFreeSlip, ::NumericalFluxFirstOrder, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) U⁻ = Q⁻.U n = @SVector [n⁻[1], n⁻[2]] # Q⁺.U = U⁻ - 2 * (n⋅U⁻) * n Q⁺.U = U⁻ - 2 * (n ∘ U⁻) * n return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::CoastlineFreeSlip, ::NumericalFluxGradient) apply free slip boundary conditions for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::CoastlineFreeSlip, ::NumericalFluxGradient, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) u⁻ = Q⁻.u ud⁻ = A⁻.u_d n = @SVector [n⁻[1], n⁻[2]] # Q⁺.u = u⁻ - (n⋅u⁻) * n Q⁺.u = u⁻ - (n ∘ u⁻) * n A⁺.u_d = ud⁻ - (n ∘ ud⁻) * n return nothing end @inline function ocean_boundary_state!( ::BarotropicModel, ::CoastlineFreeSlip, ::NumericalFluxGradient, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) U⁻ = Q⁻.U n = @SVector [n⁻[1], n⁻[2]] # Q⁺.U = U⁻ - (n⋅U⁻) * n Q⁺.U = U⁻ - (n ∘ U⁻) * n return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::CoastlineFreeSlip, ::NumericalFluxSecondOrder) apply free slip boundary conditions for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::CoastlineFreeSlip, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) D⁺.ν∇u = n⁻ * (@SVector [-0, -0])' D⁺.κ∇θ = n⁻ * -0 return nothing end @inline function ocean_boundary_state!( ::BarotropicModel, ::CoastlineFreeSlip, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) D⁺.ν∇U = n⁻ * (@SVector [-0, -0])' return nothing end """ CoastlineNoSlip applies boundary condition u = 0 and ∇θ = 0 """ """ ocean_boundary_state!(::AbstractOceanModel, ::CoastlineNoSlip, ::NumericalFluxFirstOrder) apply no slip boundary condition for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::CoastlineNoSlip, ::NumericalFluxFirstOrder, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) Q⁺.u = -Q⁻.u return nothing end @inline function ocean_boundary_state!( ::BarotropicModel, ::CoastlineNoSlip, ::NumericalFluxFirstOrder, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) Q⁺.U = -Q⁻.U return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::CoastlineNoSlip, ::NumericalFluxGradient) apply no slip boundary condition for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::CoastlineNoSlip, ::NumericalFluxGradient, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) FT = eltype(Q⁺) Q⁺.u = SVector(-zero(FT), -zero(FT)) A⁺.u_d = SVector(-zero(FT), -zero(FT)) return nothing end @inline function ocean_boundary_state!( ::BarotropicModel, ::CoastlineNoSlip, ::NumericalFluxGradient, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) FT = eltype(Q⁺) Q⁺.U = SVector(-zero(FT), -zero(FT)) return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::CoastlineNoSlip, ::NumericalFluxSecondOrder) apply no slip boundary condition for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::CoastlineNoSlip, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) D⁺.ν∇u = D⁻.ν∇u D⁺.κ∇θ = n⁻ * -0 return nothing end @inline function ocean_boundary_state!( ::BarotropicModel, ::CoastlineNoSlip, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) D⁺.ν∇U = D⁻.ν∇U return nothing end """ OceanFloorFreeSlip applies boundary condition ∇u = 0 and ∇θ = 0 """ """ ocean_boundary_state!(::AbstractOceanModel, ::OceanFloorFreeSlip, ::NumericalFluxFirstOrder) apply free slip boundary conditions for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::OceanFloorFreeSlip, ::NumericalFluxFirstOrder, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) A⁺.w = -A⁻.w return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::OceanFloorFreeSlip, ::NumericalFluxGradient) apply free slip boundary condition for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::OceanFloorFreeSlip, ::NumericalFluxGradient, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) FT = eltype(Q⁺) A⁺.w = -zero(FT) return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::OceanFloorFreeSlip, ::NumericalFluxSecondOrder) apply free slip boundary conditions for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::OceanFloorFreeSlip, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) D⁺.ν∇u = n⁻ * (@SVector [-0, -0])' D⁺.κ∇θ = n⁻ * -0 return nothing end """ OceanFloorNoSlip applies boundary condition u = 0 and ∇θ = 0 """ """ ocean_boundary_state!(::AbstractOceanModel, ::Union{OceanFloorNoSlip, OceanFloorLinearDrag}, ::NumericalFluxFirstOrder) apply no slip boundary condition for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::Union{OceanFloorNoSlip, OceanFloorLinearDrag}, ::NumericalFluxFirstOrder, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) Q⁺.u = -Q⁻.u A⁺.w = -A⁻.w return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::Union{OceanFloorNoSlip, OceanFloorLinearDrag}, ::NumericalFluxGradient) apply no slip boundary condition for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::Union{OceanFloorNoSlip, OceanFloorLinearDrag}, ::NumericalFluxGradient, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) FT = eltype(Q⁺) Q⁺.u = SVector(-zero(FT), -zero(FT)) A⁺.w = -zero(FT) return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::OceanFloorNoSlip, ::NumericalFluxSecondOrder) apply no slip boundary condition for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::OceanFloorNoSlip, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) D⁺.ν∇u = D⁻.ν∇u D⁺.κ∇θ = n⁻ * -0 return nothing end """ OceanFloorLinearDrag applies boundary condition u = 0 with linear drag on viscous-flux and ∇θ = 0 """ """ ocean_boundary_state!(::AbstractOceanModel, ::OceanFloorLinearDrag, ::NumericalFluxSecondOrder) apply linear drag boundary condition for velocity apply no penetration boundary for temperature """ @inline function ocean_boundary_state!( m::AbstractOceanModel, ::OceanFloorLinearDrag, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) u, v = Q⁻.u D⁺.ν∇u = -m.problem.Cd_lin * @SMatrix [-0 -0; -0 -0; u v] D⁺.κ∇θ = n⁻ * -0 return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::Union{OceanSurface*}, ::Union{NumericalFluxFirstOrder, NumericalFluxGradient}) applying neumann boundary conditions, so don't need to do anything for these numerical fluxes """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::Union{ OceanSurfaceNoStressNoForcing, OceanSurfaceStressNoForcing, OceanSurfaceNoStressForcing, OceanSurfaceStressForcing, }, ::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, Q⁺, A⁺, n⁻, Q⁻, A⁻, t, ) return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::OceanSurfaceNoStressNoForcing, ::NumericalFluxSecondOrder) apply no flux boundary condition for velocity apply no flux boundary condition for temperature """ @inline function ocean_boundary_state!( ::AbstractOceanModel, ::OceanSurfaceNoStressNoForcing, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) D⁺.ν∇u = n⁻ * (@SVector [-0, -0])' D⁺.κ∇θ = n⁻ * -0 return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::OceanSurfaceStressNoForcing, ::NumericalFluxSecondOrder) apply wind-stress boundary condition for velocity apply no flux boundary condition for temperature """ @inline function ocean_boundary_state!( m::AbstractOceanModel, ::OceanSurfaceStressNoForcing, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) τᶻ = velocity_flux(m.problem, A⁻.y, m.ρₒ) D⁺.ν∇u = n⁻ * (@SVector [-τᶻ, -0])' D⁺.κ∇θ = n⁻ * -0 return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::OceanSurfaceNoStressForcing, ::NumericalFluxSecondOrder) apply no flux boundary condition for velocity apply forcing boundary condition for temperature """ @inline function ocean_boundary_state!( m::AbstractOceanModel, ::OceanSurfaceNoStressForcing, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) σᶻ = temperature_flux(m.problem, A⁻.y, Q⁻.θ) D⁺.ν∇u = n⁻ * (@SVector [-0, -0])' D⁺.κ∇θ = -n⁻ * σᶻ return nothing end """ ocean_boundary_state!(::AbstractOceanModel, ::OceanSurfaceStressForcing, ::NumericalFluxSecondOrder) apply wind-stress boundary condition for velocity apply forcing boundary condition for temperature """ @inline function ocean_boundary_state!( m::AbstractOceanModel, ::OceanSurfaceStressForcing, ::NumericalFluxSecondOrder, Q⁺, D⁺, A⁺, n⁻, Q⁻, D⁻, A⁻, t, ) τᶻ = velocity_flux(m.problem, A⁻.y, m.ρₒ) σᶻ = temperature_flux(m.problem, A⁻.y, Q⁻.θ) D⁺.ν∇u = n⁻ * (@SVector [-τᶻ, -0])' D⁺.κ∇θ = -n⁻ * σᶻ return nothing end @inline velocity_flux(p::AbstractOceanProblem, y, ρ) = -(p.τₒ / ρ) * cos(y * π / p.Lʸ) @inline function temperature_flux(p::AbstractOceanProblem, y, θ) θʳ = p.θᴱ * (1 - y / p.Lʸ) return p.λʳ * (θʳ - θ) end ================================================ FILE: src/Ocean/SplitExplicit01/OceanModel.jl ================================================ struct OceanModel{P, T} <: AbstractOceanModel problem::P grav::T ρₒ::T cʰ::T cᶻ::T add_fast_substeps::T numImplSteps::T ivdc_dt::T αᵀ::T νʰ::T νᶻ::T κʰ::T κᶻ::T κᶜ::T fₒ::T β::T function OceanModel{FT}( problem; grav = FT(10), # m/s^2 ρₒ = FT(1000), # kg/m^3 cʰ = FT(0), # m/s cᶻ = FT(0), # m/s add_fast_substeps = 0, numImplSteps = 0, ivdc_dt = FT(1), αᵀ = FT(2e-4), # 1/K νʰ = FT(5e3), # m^2/s νᶻ = FT(5e-3), # m^2/s κʰ = FT(1e3), # m^2/s κᶻ = FT(1e-4), # vertical diffusivity, m^2/s κᶜ = FT(1e-4), # convective adjustment vertical diffusivity, m^2/s fₒ = FT(1e-4), # Hz β = FT(1e-11), # Hz/m ) where {FT <: AbstractFloat} return new{typeof(problem), FT}( problem, grav, ρₒ, cʰ, cᶻ, add_fast_substeps, numImplSteps, ivdc_dt, αᵀ, νʰ, νᶻ, κʰ, κᶻ, κᶜ, fₒ, β, ) end end function calculate_dt(grid, ::OceanModel, _...) #= minΔx = min_node_distance(grid, HorizontalDirection()) minΔz = min_node_distance(grid, VerticalDirection()) CFL_gravity = minΔx / model.cʰ CFL_diffusive = minΔz^2 / (1000 * model.κᶻ) CFL_viscous = minΔz^2 / model.νᶻ dt = 1 // 2 * minimum([CFL_gravity, CFL_diffusive, CFL_viscous]) =# # FT = eltype(grid) # dt = FT(1) return nothing end """ OceanDGModel() helper function to add required filtering not used in the Driver+Config setup """ function OceanDGModel( bl::OceanModel, grid, numfluxnondiff, numfluxdiff, gradnumflux; kwargs..., ) N = polynomialorders(grid) Nvert = N[end] vert_filter = CutoffFilter(grid, Nvert - 1) exp_filter = ExponentialFilter(grid, 1, 8) flowintegral_dg = DGModel( FlowIntegralModel(bl), grid, numfluxnondiff, numfluxdiff, gradnumflux, ) tendency_dg = DGModel( TendencyIntegralModel(bl), grid, numfluxnondiff, numfluxdiff, gradnumflux, ) conti3d_dg = DGModel( Continuity3dModel(bl), grid, numfluxnondiff, numfluxdiff, gradnumflux, ) FT = eltype(grid) conti3d_Q = init_ode_state(conti3d_dg, FT(0); init_on_cpu = true) ivdc_dg = DGModel( IVDCModel(bl), grid, numfluxnondiff, numfluxdiff, gradnumflux; direction = VerticalDirection(), ) ivdc_Q = init_ode_state(ivdc_dg, FT(0); init_on_cpu = true) # Not sure this is needed since we set values later, # but we'll do it just in case! ivdc_RHS = init_ode_state(ivdc_dg, FT(0); init_on_cpu = true) # Not sure this is needed since we set values later, # but we'll do it just in case! ivdc_bgm_solver = BatchedGeneralizedMinimalResidual( ivdc_dg, ivdc_Q; max_subspace_size = 10, ) modeldata = ( vert_filter = vert_filter, exp_filter = exp_filter, flowintegral_dg = flowintegral_dg, tendency_dg = tendency_dg, conti3d_dg = conti3d_dg, conti3d_Q = conti3d_Q, ivdc_dg = ivdc_dg, ivdc_Q = ivdc_Q, ivdc_RHS = ivdc_RHS, ivdc_bgm_solver = ivdc_bgm_solver, ) return DGModel( bl, grid, numfluxnondiff, numfluxdiff, gradnumflux; kwargs..., modeldata = modeldata, ) end function vars_state(m::OceanModel, ::Prognostic, T) @vars begin u::SVector{2, T} η::T θ::T end end function init_state_prognostic!(m::OceanModel, Q::Vars, A::Vars, localgeo, t) return ocean_init_state!(m.problem, Q, A, localgeo, t) end function vars_state(m::OceanModel, ::Auxiliary, T) @vars begin w::T pkin::T # kinematic pressure: ∫(-g αᵀ θ) wz0::T # w at z=0 u_d::SVector{2, T} # velocity deviation from vertical mean ΔGu::SVector{2, T} y::T # y-coordinate of the box end end function init_state_auxiliary!( m::OceanModel, state_aux::MPIStateArray, grid, direction, ) init_state_auxiliary!( m, (m, A, tmp, geom) -> ocean_init_aux!(m, m.problem, A, geom), state_aux, grid, direction, ) end function vars_state(m::OceanModel, ::Gradient, T) @vars begin u::SVector{2, T} ud::SVector{2, T} θ::T end end @inline function compute_gradient_argument!( m::OceanModel, G::Vars, Q::Vars, A, t, ) G.u = Q.u G.ud = A.u_d G.θ = Q.θ return nothing end function vars_state(m::OceanModel, ::GradientFlux, T) @vars begin ν∇u::SMatrix{3, 2, T, 6} κ∇θ::SVector{3, T} end end @inline function compute_gradient_flux!( m::OceanModel, D::Vars, G::Grad, Q::Vars, A::Vars, t, ) ν = viscosity_tensor(m) # D.ν∇u = ν * G.u D.ν∇u = -@SMatrix [ m.νʰ*G.ud[1, 1] m.νʰ*G.ud[1, 2] m.νʰ*G.ud[2, 1] m.νʰ*G.ud[2, 2] m.νᶻ*G.u[3, 1] m.νᶻ*G.u[3, 2] ] κ = diffusivity_tensor(m, G.θ[3]) D.κ∇θ = -κ * G.θ return nothing end @inline viscosity_tensor(m::OceanModel) = Diagonal(@SVector [m.νʰ, m.νʰ, m.νᶻ]) @inline function diffusivity_tensor(m::OceanModel, ∂θ∂z) if m.numImplSteps > 0 κ = (@SVector [m.κʰ, m.κʰ, m.κᶻ * 0.5]) else ∂θ∂z < 0 ? κ = (@SVector [m.κʰ, m.κʰ, m.κᶜ]) : κ = (@SVector [m.κʰ, m.κʰ, m.κᶻ]) end return Diagonal(κ) end """ vars_integral(::OceanModel) location to store integrands for bottom up integrals ∇hu = the horizontal divegence of u, e.g. dw/dz """ function vars_state(m::OceanModel, ::UpwardIntegrals, T) @vars begin ∇hu::T buoy::T # ∫u::SVector{2, T} end end """ integral_load_auxiliary_state!(::OceanModel) copy w to var_integral this computation is done pointwise at each nodal point arguments: m -> model in this case OceanModel I -> array of integrand variables Q -> array of state variables A -> array of aux variables """ @inline function integral_load_auxiliary_state!( m::OceanModel, I::Vars, Q::Vars, A::Vars, ) I.∇hu = A.w # borrow the w value from A... I.buoy = m.grav * m.αᵀ * Q.θ # buoyancy to integrate vertically from top (=reverse) # I.∫u = Q.u return nothing end """ integral_set_auxiliary_state!(::OceanModel) copy integral results back out to aux this computation is done pointwise at each nodal point arguments: m -> model in this case OceanModel A -> array of aux variables I -> array of integrand variables """ @inline function integral_set_auxiliary_state!(m::OceanModel, A::Vars, I::Vars) A.w = I.∇hu A.pkin = -I.buoy # A.∫u = I.∫u return nothing end """ vars_reverse_integral(::OceanModel) location to store integrands for top down integrals αᵀθ = density perturbation """ function vars_state(m::OceanModel, ::DownwardIntegrals, T) @vars begin buoy::T end end """ reverse_integral_load_auxiliary_state!(::OceanModel) copy αᵀθ to var_reverse_integral this computation is done pointwise at each nodal point arguments: m -> model in this case OceanModel I -> array of integrand variables A -> array of aux variables """ @inline function reverse_integral_load_auxiliary_state!( m::OceanModel, I::Vars, Q::Vars, A::Vars, ) I.buoy = A.pkin return nothing end """ reverse_integral_set_auxiliary_state!(::OceanModel) copy reverse integral results back out to aux this computation is done pointwise at each nodal point arguments: m -> model in this case OceanModel A -> array of aux variables I -> array of integrand variables """ @inline function reverse_integral_set_auxiliary_state!( m::OceanModel, A::Vars, I::Vars, ) A.pkin = I.buoy return nothing end @inline function flux_first_order!( m::OceanModel, F::Grad, Q::Vars, A::Vars, t::Real, direction, ) @inbounds begin u = Q.u # Horizontal components of velocity θ = Q.θ w = A.w # vertical velocity pkin = A.pkin v = @SVector [u[1], u[2], w] Iʰ = @SMatrix [ 1 -0 -0 1 -0 -0 ] # ∇h • (g η) #- jmc: put back this term to check # η = Q.η # F.u += m.grav * η * Iʰ # ∇ • (u θ) F.θ += v * θ # ∇h • pkin F.u += pkin * Iʰ # ∇h • (v ⊗ u) # F.u += v * u' end return nothing end @inline function flux_second_order!( m::OceanModel, F::Grad, Q::Vars, D::Vars, HD::Vars, A::Vars, t::Real, ) #- vertical viscosity only (horizontal fluxes in horizontal model) # F.u -= @SVector([0, 0, 1]) * D.ν∇u[3, :]' #- all 3 direction viscous flux for horizontal momentum tendency F.u += D.ν∇u F.θ += D.κ∇θ return nothing end @inline function source!( m::OceanModel, S::Vars, Q::Vars, D::Vars, A::Vars, t::Real, direction, ) @inbounds begin u = Q.u # Horizontal components of velocity ud = A.u_d # Horizontal velocity deviation from vertical mean # f × u f = coriolis_force(m, A.y) # S.u -= @SVector [-f * u[2], f * u[1]] S.u -= @SVector [-f * ud[2], f * ud[1]] #- borotropic tendency adjustment S.u += A.ΔGu # switch this to S.η if you comment out the fast mode in MultistateMultirateRungeKutta S.η += A.wz0 end return nothing end @inline coriolis_force(m::OceanModel, y) = m.fₒ + m.β * y function update_auxiliary_state!( dg::DGModel, m::OceanModel, Q::MPIStateArray, t::Real, elems::UnitRange, ) FT = eltype(Q) A = dg.state_auxiliary MD = dg.modeldata # `update_auxiliary_state!` gets called twice, once for the real elements # and once for the ghost elements. Only apply the filters to the real elems. if elems == dg.grid.topology.realelems # required to ensure that after integration velocity field is divergence free vert_filter = MD.vert_filter # Q[1] = u[1] = u, Q[2] = u[2] = v apply!(Q, (1, 2), dg.grid, vert_filter, direction = VerticalDirection()) exp_filter = MD.exp_filter # Q[4] = θ apply!(Q, (4,), dg.grid, exp_filter, direction = VerticalDirection()) end #---------- # Compute Divergence of Horizontal Flow field using "conti3d_dg" DGmodel conti3d_dg = dg.modeldata.conti3d_dg # ct3d_Q = dg.modeldata.conti3d_Q # ct3d_dQ = similar(ct3d_Q) # fill!(ct3d_dQ, 0) #- Instead, use directly conti3d_Q to store dQ (since we will not update the state) ct3d_dQ = dg.modeldata.conti3d_Q ct3d_bl = conti3d_dg.balance_law # call "conti3d_dg" DGmodel # note: with "increment = false", just return tendency (no state update) p = nothing conti3d_dg(ct3d_dQ, Q, p, t; increment = false) # Copy from ct3d_dQ.θ which is realy ∇h•u into A.w (which will be integrated) function f!(::OceanModel, dQ, A, t) @inbounds begin A.w = dQ.θ end end update_auxiliary_state!(f!, dg, m, ct3d_dQ, t, elems) #---------- info = basic_grid_info(dg) Nqh, Nqk = info.Nqh, info.Nqk nelemv, nelemh = info.nvertelem, info.nhorzelem nrealelemh = info.nhorzrealelem # compute integrals for w and pkin indefinite_stack_integral!(dg, m, Q, A, t, elems) # bottom -> top reverse_indefinite_stack_integral!(dg, m, Q, A, t, elems) # top -> bottom # copy down wz0 # We are unable to use vars (ie A.w) for this because this operation will # return a SubArray, and adapt (used for broadcasting along reshaped arrays) # has a limited recursion depth for the types allowed. nb_aux_m = number_states(m, Auxiliary()) data_m = reshape(A.data, Nqh, Nqk, nb_aux_m, nelemv, nelemh) # project w(z=0) down the stack index_w = varsindex(vars_state(m, Auxiliary(), FT), :w) index_wz0 = varsindex(vars_state(m, Auxiliary(), FT), :wz0) flat_wz0 = @view data_m[:, end:end, index_w, end:end, 1:nrealelemh] boxy_wz0 = @view data_m[:, :, index_wz0, :, 1:nrealelemh] boxy_wz0 .= flat_wz0 # Compute Horizontal Flow deviation from vertical mean flowintegral_dg = dg.modeldata.flowintegral_dg flowint = flowintegral_dg.balance_law update_auxiliary_state!(flowintegral_dg, flowint, Q, 0, elems) ## get top value (=integral over full depth) nb_aux_flw = number_states(flowint, Auxiliary()) data_flw = reshape( flowintegral_dg.state_auxiliary.data, Nqh, Nqk, nb_aux_flw, nelemv, nelemh, ) index_∫u = varsindex(vars_state(flowint, Auxiliary(), FT), :∫u) flat_∫u = @view data_flw[:, end:end, index_∫u, end:end, 1:nrealelemh] ## make a copy of horizontal velocity A.u_d .= Q.u ## and remove vertical mean velocity index_ud = varsindex(vars_state(m, Auxiliary(), FT), :u_d) boxy_ud = @view data_m[:, :, index_ud, :, 1:nrealelemh] boxy_ud .-= flat_∫u / m.problem.H return true end @inline wavespeed(m::OceanModel, n⁻, _...) = abs(SVector(m.cʰ, m.cʰ, m.cᶻ)' * n⁻) # We want not have jump penalties on η (since not a flux variable) function update_penalty!( ::RusanovNumericalFlux, ::OceanModel, n⁻, λ, ΔQ::Vars, Q⁻, A⁻, Q⁺, A⁺, t, ) ΔQ.η = -0 return nothing end boundary_conditions(ocean::OceanModel) = ocean.problem.boundary_conditions """ boundary_state!(nf, bc, ::OceanModel, args...) applies boundary conditions for this model dispatches to a function in OceanBoundaryConditions.jl based on BC type defined by a problem such as SimpleBoxProblem.jl """ @inline function boundary_state!(nf, bc, ocean::OceanModel, args...) return ocean_model_boundary!(ocean, bc, nf, args...) end ================================================ FILE: src/Ocean/SplitExplicit01/SplitExplicitLSRK2nMethod.jl ================================================ export SplitExplicitLSRK2nSolver using KernelAbstractions using KernelAbstractions.Extras: @unroll using StaticArrays using ...SystemSolvers using ...MPIStateArrays: array_device, realview using ...GenericCallbacks using ...ODESolvers: AbstractODESolver, LowStorageRungeKutta2N, update!, updatedt!, getdt import ...ODESolvers: dostep! using ...BalanceLaws: # initialize_fast_state!, # initialize_adjustment!, tendency_from_slow_to_fast!, cummulate_fast_solution!, reconcile_from_fast_to_slow! LSRK2N = LowStorageRungeKutta2N @doc """ SplitExplicitLSRK2nSolver(slow_solver, fast_solver; dt, t0 = 0, coupled = true) This is a time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q_fast} = f_fast(Q_fast, Q_slow, t) \\dot{Q_slow} = f_slow(Q_slow, Q_fast, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This method performs an operator splitting to timestep the Sea-Surface elevation and vertically averaged horizontal velocity of the model at a faster rate than the full model, using LowStorageRungeKutta2N time-stepping. """ SplitExplicitLSRK2nSolver mutable struct SplitExplicitLSRK2nSolver{SS, FS, RT, MSA} <: AbstractODESolver "slow solver" slow_solver::SS "fast solver" fast_solver::FS "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int "storage for transfer tendency" dQ2fast::MSA function SplitExplicitLSRK2nSolver( slow_solver::LSRK2N, fast_solver, Q = nothing; dt = getdt(slow_solver), t0 = slow_solver.t, ) where {AT <: AbstractArray} SS = typeof(slow_solver) FS = typeof(fast_solver) RT = real(eltype(slow_solver.dQ)) dQ2fast = similar(slow_solver.dQ) dQ2fast .= -0.0 MSA = typeof(dQ2fast) return new{SS, FS, RT, MSA}( slow_solver, fast_solver, RT(dt), RT(t0), 0, dQ2fast, ) end end function dostep!( Qvec, split::SplitExplicitLSRK2nSolver{SS}, param, time::Real, ) where {SS <: LSRK2N} slow = split.slow_solver fast = split.fast_solver Qslow = Qvec.slow Qfast = Qvec.fast dQslow = slow.dQ dQ2fast = split.dQ2fast slow_bl = slow.rhs!.balance_law fast_bl = fast.rhs!.balance_law groupsize = 256 slow_dt = getdt(slow) fast_dt_in = getdt(fast) for slow_s in 1:length(slow.RKA) # Current slow state time slow_stage_time = time + slow.RKC[slow_s] * slow_dt # Fractional time for slow stage if slow_s == length(slow.RKA) fract_dt = (1 - slow.RKC[slow_s]) * slow_dt else fract_dt = (slow.RKC[slow_s + 1] - slow.RKC[slow_s]) * slow_dt end # Initialize fast model and set time-step and number of substeps we need # Note: to reproduce previous Fast output, 1) remove "firstStage = ..." line fast_steps = [0 0 0] FT = typeof(slow_dt) fast_time_rec = [fast_dt_in FT(0)] initialize_fast_state!( slow_bl, fast_bl, slow.rhs!, fast.rhs!, Qslow, Qfast, fract_dt, fast_time_rec, fast_steps; firstStage = (slow_s == 1), ) # Initialize tentency adjustment before evaluation of slow mode initialize_adjustment!( slow_bl, fast_bl, slow.rhs!, fast.rhs!, Qslow, Qfast, ) # Evaluate the slow mode # --> save tendency for the fast slow.rhs!(dQ2fast, Qslow, param, slow_stage_time, increment = false) # vertically integrate slow tendency to advance fast equation # and use vertical mean for slow model (negative source) # ---> work with dQ2fast as input tendency_from_slow_to_fast!( slow_bl, fast_bl, slow.rhs!, fast.rhs!, Qslow, Qfast, dQ2fast, ) # Compute (and RK update) slow tendency slow.rhs!(dQslow, Qslow, param, slow_stage_time, increment = true) # Update (RK-stage) slow state event = Event(array_device(Qslow)) event = update!(array_device(Qslow), groupsize)( realview(dQslow), realview(Qslow), slow.RKA[slow_s % length(slow.RKA) + 1], slow.RKB[slow_s], slow_dt, nothing, nothing, nothing; ndrange = length(realview(Qslow)), dependencies = (event,), ) wait(array_device(Qslow), event) # Determine number of substeps we need fast_dt = fast_time_rec[1] nsubsteps = fast_steps[3] updatedt!(fast, fast_dt) for substep in 1:nsubsteps fast_time = slow_stage_time + (substep - 1) * fast_dt dostep!(Qfast, fast, param, fast_time) # cumulate fast solution cummulate_fast_solution!( fast_bl, fast.rhs!, Qfast, fast_time, fast_dt, substep, fast_steps, fast_time_rec, ) end # Reconcile slow equation using fast equation # Note: to reproduce previous Fast output, 2) remove "lastStage = ..." line reconcile_from_fast_to_slow!( slow_bl, fast_bl, slow.rhs!, fast.rhs!, Qslow, Qfast, fast_time_rec; lastStage = (slow_s == length(slow.RKA)), ) end # reset fast time-step to original value updatedt!(fast, fast_dt_in) # now do implicit mixing step nImplSteps = slow_bl.numImplSteps if nImplSteps > 0 # 1. get implicit mising model, model state variable array and solver handles ivdc_dg = slow.rhs!.modeldata.ivdc_dg ivdc_bl = ivdc_dg.balance_law ivdc_Q = slow.rhs!.modeldata.ivdc_Q ivdc_solver = slow.rhs!.modeldata.ivdc_bgm_solver # ivdc_solver_dt = getdt(ivdc_solver) # would work if solver time-step was set # FT = typeof(slow_dt) # ivdc_solver_dt = slow_dt / FT(nImplSteps) # just recompute time-step ivdc_solver_dt = ivdc_bl.parent_om.ivdc_dt # println("ivdc_solver_dt = ",ivdc_solver_dt ) # 2. setup start RHS, initial guess and values for computing mixing coeff ivdc_Q.θ .= Qslow.θ ivdc_RHS = slow.rhs!.modeldata.ivdc_RHS ivdc_RHS.θ .= Qslow.θ ivdc_RHS.θ .= ivdc_RHS.θ ./ ivdc_solver_dt ivdc_dg.state_auxiliary.θ_init .= ivdc_Q.θ # 3. Invoke iterative solver println("BEFORE maximum(ivdc_Q.θ[:]): ", maximum(ivdc_Q.realdata[:])) println("BEFORE minimum(ivdc_Q.θ[:]): ", minimum(ivdc_Q.realdata[:])) lm!(y, x) = ivdc_dg(y, x, nothing, 0; increment = false) solve_tot = 0 iter_tot = 0 for i in 1:nImplSteps solve_time = @elapsed iters = linearsolve!(lm!, nothing, ivdc_solver, ivdc_Q, ivdc_RHS) solve_tot = solve_tot + solve_time iter_tot = iter_tot + iters # Set new RHS and initial values ivdc_RHS.θ .= ivdc_Q.θ ./ ivdc_solver_dt ivdc_dg.state_auxiliary.θ_init .= ivdc_Q.θ end println("solver iters, time: ", iter_tot, ", ", solve_tot) println("AFTER maximum(ivdc_Q.θ[:]): ", maximum(ivdc_Q.realdata[:])) println("AFTER minimum(ivdc_Q.θ[:]): ", minimum(ivdc_Q.realdata[:])) # exit() # Now update Qslow.θ .= ivdc_Q.θ end return nothing end ================================================ FILE: src/Ocean/SplitExplicit01/SplitExplicitLSRK3nMethod.jl ================================================ export SplitExplicitLSRK3nSolver using KernelAbstractions using KernelAbstractions.Extras: @unroll using StaticArrays using ...SystemSolvers using ...MPIStateArrays: array_device, realview using ...GenericCallbacks #using Printf using ...ODESolvers: AbstractODESolver, LowStorageRungeKutta3N, update!, updatedt!, getdt import ...ODESolvers: dostep! using ...BalanceLaws: tendency_from_slow_to_fast!, cummulate_fast_solution!, reconcile_from_fast_to_slow! LSRK3N = LowStorageRungeKutta3N @doc """ SplitExplicitLSRK3nSolver(slow_solver, fast_solver; dt, t0 = 0, coupled = true) This is a time stepping object for explicitly time stepping the differential equation given by the right-hand-side function `f` with the state `Q`, i.e., ```math \\dot{Q_fast} = f_fast(Q_fast, Q_slow, t) \\dot{Q_slow} = f_slow(Q_slow, Q_fast, t) ``` with the required time step size `dt` and optional initial time `t0`. This time stepping object is intended to be passed to the `solve!` command. This method performs an operator splitting to timestep the Sea-Surface elevation and vertically averaged horizontal velocity of the model at a faster rate than the full model, using LowStorageRungeKutta3N time-stepping. """ SplitExplicitLSRK3nSolver mutable struct SplitExplicitLSRK3nSolver{SS, FS, RT, MSA, MSB} <: AbstractODESolver "slow solver" slow_solver::SS "fast solver" fast_solver::FS "time step" dt::RT "time" t::RT "elapsed time steps" steps::Int "storage for transfer tendency" dQ2fast::MSA "saving original fast state" S_fast::MSB function SplitExplicitLSRK3nSolver( slow_solver::LSRK3N, fast_solver, Q = nothing; dt = getdt(slow_solver), t0 = slow_solver.t, ) where {AT <: AbstractArray} SS = typeof(slow_solver) FS = typeof(fast_solver) RT = real(eltype(slow_solver.dQ)) dQ2fast = similar(slow_solver.dQ) dQ2fast .= -0.0 #- Warning: Number of fast-solution to save (here only 1, in S_fast) should be as # large as number of non zero Butcher Coeff (including Weight "b") below 2 diagonal, # i.e., with ns= Number of Stages and a(ns+1,:) = b(:), all non zero a(i,j)|_{i > j+1}. # Saving only 1 fast-solution workd for LS3NRK33Heuns. S_fast = similar(fast_solver.dQ) S_fast .= -0.0 MSA = typeof(dQ2fast) MSB = typeof(S_fast) return new{SS, FS, RT, MSA, MSB}( slow_solver, fast_solver, RT(dt), RT(t0), 0, dQ2fast, S_fast, ) end end function dostep!( Qvec, split::SplitExplicitLSRK3nSolver{SS}, param, time::Real, ) where {SS <: LSRK3N} slow = split.slow_solver fast = split.fast_solver Qslow = Qvec.slow Qfast = Qvec.fast dQslow = slow.dQ dRslow = slow.dR dQ2fast = split.dQ2fast S_fast = split.S_fast slow_bl = slow.rhs!.balance_law fast_bl = fast.rhs!.balance_law rv_Q = realview(Qslow) rv_dQ = realview(dQslow) rv_dR = realview(dRslow) groupsize = 256 slow_dt = getdt(slow) fast_dt_in = getdt(fast) rkA = slow.RKA rkB = slow.RKB rkC = slow.RKC rkW = slow.RKW nStages = length(rkC) rv_dR .= -0 for s in 1:nStages # Current slow state time slow_stage_time = time + rkC[s] * slow_dt # @printf("-- main dostep! stage s=%3i , t= %10.2f\n",s,slow_stage_time) # Initialize fast model and set time-step and number of substeps we need fast_steps = [0 0 0] FT = typeof(slow_dt) fast_time_rec = [fast_dt_in FT(0) FT(0)] set_fast_for_stepping!( slow_bl, fast_bl, fast.rhs!, Qfast, S_fast, slow_dt, rkC, rkW, s, nStages, fast_time_rec, fast_steps, ) # Initialize tentency adjustment before evaluation of slow mode initialize_adjustment!( slow_bl, fast_bl, slow.rhs!, fast.rhs!, Qslow, Qfast, ) # Evaluate the slow mode # --> save tendency for the fast slow.rhs!(dQ2fast, Qslow, param, slow_stage_time, increment = false) # vertically integrate slow tendency to advance fast equation # and use vertical mean for slow model (negative source) # ---> work with dQ2fast as input tendency_from_slow_to_fast!( slow_bl, fast_bl, slow.rhs!, fast.rhs!, Qslow, Qfast, dQ2fast, ) # Compute (and RK update) slow tendency slow.rhs!(dQslow, Qslow, param, slow_stage_time, increment = true) # Update (RK-stage) slow state event = Event(array_device(Qslow)) event = update!(array_device(Qslow), groupsize)( rv_dQ, rv_dR, rv_Q, rkA[s % nStages + 1, 1], rkA[s % nStages + 1, 2], rkB[s, 1], rkB[s, 2], slow_dt, nothing, nothing, nothing; ndrange = length(rv_Q), dependencies = (event,), ) wait(array_device(Qslow), event) # Determine number of substeps we need fast_dt = fast_time_rec[1] nsubsteps = fast_steps[3] updatedt!(fast, fast_dt) for substep in 1:nsubsteps fast_time = time + fast_time_rec[3] + (substep - 1) * fast_dt # @printf("-- main dostep! substep=%3i , t= %10.2f\n",substep,fast_time) dostep!(Qfast, fast, param, fast_time) # cumulate fast solution cummulate_fast_solution!( fast_bl, fast.rhs!, Qfast, fast_time, fast_dt, substep, fast_steps, fast_time_rec, ) end # reconcile slow equation using fast equation reconcile_from_fast_to_slow!( slow_bl, fast_bl, slow.rhs!, fast.rhs!, Qslow, Qfast, fast_time_rec; lastStage = (s == nStages), ) end # reset fast time-step to original value updatedt!(fast, fast_dt_in) # now do implicit mixing step nImplSteps = slow_bl.numImplSteps if nImplSteps > 0 # 1. get implicit mising model, model state variable array and solver handles ivdc_dg = slow.rhs!.modeldata.ivdc_dg ivdc_bl = ivdc_dg.balance_law ivdc_Q = slow.rhs!.modeldata.ivdc_Q ivdc_solver = slow.rhs!.modeldata.ivdc_bgm_solver # ivdc_solver_dt = getdt(ivdc_solver) # would work if solver time-step was set # FT = typeof(slow_dt) # ivdc_solver_dt = slow_dt / FT(nImplSteps) # just recompute time-step ivdc_solver_dt = ivdc_bl.parent_om.ivdc_dt # println("ivdc_solver_dt = ",ivdc_solver_dt ) # 2. setup start RHS, initial guess and values for computing mixing coeff ivdc_Q.θ .= Qslow.θ ivdc_RHS = slow.rhs!.modeldata.ivdc_RHS ivdc_RHS.θ .= Qslow.θ ivdc_RHS.θ .= ivdc_RHS.θ ./ ivdc_solver_dt ivdc_dg.state_auxiliary.θ_init .= ivdc_Q.θ # 3. Invoke iterative solver println("BEFORE maximum(ivdc_Q.θ[:]): ", maximum(ivdc_Q.realdata[:])) println("BEFORE minimum(ivdc_Q.θ[:]): ", minimum(ivdc_Q.realdata[:])) lm!(y, x) = ivdc_dg(y, x, nothing, 0; increment = false) solve_tot = 0 iter_tot = 0 for i in 1:nImplSteps solve_time = @elapsed iters = linearsolve!(lm!, nothing, ivdc_solver, ivdc_Q, ivdc_RHS) solve_tot = solve_tot + solve_time iter_tot = iter_tot + iters # Set new RHS and initial values ivdc_RHS.θ .= ivdc_Q.θ ./ ivdc_solver_dt ivdc_dg.state_auxiliary.θ_init .= ivdc_Q.θ end println("solver iters, time: ", iter_tot, ", ", solve_tot) println("AFTER maximum(ivdc_Q.θ[:]): ", maximum(ivdc_Q.realdata[:])) println("AFTER minimum(ivdc_Q.θ[:]): ", minimum(ivdc_Q.realdata[:])) # exit() # Now update Qslow.θ .= ivdc_Q.θ end return nothing end ================================================ FILE: src/Ocean/SplitExplicit01/SplitExplicitModel.jl ================================================ module SplitExplicit01 export OceanDGModel, OceanModel, Continuity3dModel, HorizontalModel, BarotropicModel, AbstractOceanProblem #using Printf using StaticArrays using LinearAlgebra: I, dot, Diagonal using ...VariableTemplates using ...MPIStateArrays using ...DGMethods: init_ode_state, basic_grid_info using ...Mesh.Filters: CutoffFilter, apply!, ExponentialFilter using ...Mesh.Grids: polynomialorders, dimensionality, dofs_per_element, VerticalDirection, HorizontalDirection, min_node_distance using ...BalanceLaws #import ...BalanceLaws: nodal_update_auxiliary_state! using ...DGMethods.NumericalFluxes: NumericalFluxFirstOrder, NumericalFluxGradient, NumericalFluxSecondOrder, RusanovNumericalFlux, CentralNumericalFluxFirstOrder, CentralNumericalFluxGradient, CentralNumericalFluxSecondOrder using ..Ocean: AbstractOceanProblem import ...DGMethods.NumericalFluxes: update_penalty!, numerical_flux_second_order!, NumericalFluxFirstOrder import ...DGMethods: LocalGeometry, DGModel, calculate_dt import ...BalanceLaws: vars_state, flux_first_order!, flux_second_order!, source!, wavespeed, parameter_set, boundary_conditions, boundary_state!, update_auxiliary_state!, update_auxiliary_state_gradient!, compute_gradient_argument!, init_state_auxiliary!, init_state_prognostic!, compute_gradient_flux!, indefinite_stack_integral!, reverse_indefinite_stack_integral!, integral_load_auxiliary_state!, integral_set_auxiliary_state!, reverse_integral_load_auxiliary_state!, reverse_integral_set_auxiliary_state! import ...SystemSolvers: BatchedGeneralizedMinimalResidual, linearsolve! ×(a::SVector, b::SVector) = StaticArrays.cross(a, b) ∘(a::SVector, b::SVector) = StaticArrays.dot(a, b) abstract type AbstractOceanModel <: BalanceLaw end function ocean_init_aux! end function ocean_init_state! end function ocean_model_boundary! end function set_fast_for_stepping! end function initialize_fast_state! end function initialize_adjustment! end include("SplitExplicitLSRK2nMethod.jl") include("SplitExplicitLSRK3nMethod.jl") include("OceanModel.jl") include("Continuity3dModel.jl") include("VerticalIntegralModel.jl") include("BarotropicModel.jl") include("IVDCModel.jl") include("Communication.jl") include("OceanBoundaryConditions.jl") end ================================================ FILE: src/Ocean/SplitExplicit01/VerticalIntegralModel.jl ================================================ struct TendencyIntegralModel{M} <: AbstractOceanModel ocean::M function TendencyIntegralModel(ocean::M) where {M} return new{M}(ocean) end end vars_state(tm::TendencyIntegralModel, ::Prognostic, FT) = vars_state(tm.ocean, Prognostic(), FT) vars_state(tm::TendencyIntegralModel, ::GradientFlux, FT) = @vars() function vars_state(m::TendencyIntegralModel, ::Auxiliary, T) @vars begin ∫du::SVector{2, T} end end function vars_state(m::TendencyIntegralModel, ::UpwardIntegrals, T) @vars begin ∫du::SVector{2, T} end end @inline function integral_load_auxiliary_state!( m::TendencyIntegralModel, I::Vars, Q::Vars, A::Vars, ) I.∫du = A.∫du return nothing end @inline function integral_set_auxiliary_state!( m::TendencyIntegralModel, A::Vars, I::Vars, ) A.∫du = I.∫du return nothing end init_state_auxiliary!(tm::TendencyIntegralModel, A::Vars, _...) = nothing function update_auxiliary_state!( dg::DGModel, tm::TendencyIntegralModel, dQ::MPIStateArray, t::Real, elems::UnitRange, ) A = dg.state_auxiliary # copy tendency vector to aux state for integration function f!(::TendencyIntegralModel, dQ, A, t) @inbounds begin A.∫du = @SVector [dQ.u[1], dQ.u[2]] end return nothing end update_auxiliary_state!(f!, dg, tm, dQ, t) # compute integral for Gᵁ indefinite_stack_integral!(dg, tm, dQ, A, t, elems) # bottom -> top return true end #------------------------------------------------------------------------------- struct FlowIntegralModel{M} <: AbstractOceanModel ocean::M function FlowIntegralModel(ocean::M) where {M} return new{M}(ocean) end end vars_state(fm::FlowIntegralModel, ::Prognostic, FT) = vars_state(fm.ocean, Prognostic(), FT) vars_state(fm::FlowIntegralModel, ::GradientFlux, FT) = @vars() function vars_state(m::FlowIntegralModel, ::Auxiliary, T) @vars begin ∫u::SVector{2, T} end end function vars_state(m::FlowIntegralModel, ::UpwardIntegrals, T) @vars begin ∫u::SVector{2, T} end end @inline function integral_load_auxiliary_state!( m::FlowIntegralModel, I::Vars, Q::Vars, A::Vars, ) I.∫u = Q.u return nothing end @inline function integral_set_auxiliary_state!( m::FlowIntegralModel, A::Vars, I::Vars, ) A.∫u = I.∫u return nothing end init_state_auxiliary!(fm::FlowIntegralModel, A::Vars, _...) = nothing function update_auxiliary_state!( dg::DGModel, fm::FlowIntegralModel, Q::MPIStateArray, t::Real, elems::UnitRange, ) A = dg.state_auxiliary # compute vertical integral of u indefinite_stack_integral!(dg, fm, Q, A, t, elems) # bottom -> top return true end ================================================ FILE: src/Ocean/SuperModels.jl ================================================ module SuperModels using MPI using ClimateMachine using ClimateMachine: Settings using ...BalanceLaws: parameter_set using ...DGMethods.NumericalFluxes using ..HydrostaticBoussinesq: HydrostaticBoussinesqModel, NonLinearAdvectionTerm, Forcing using ..OceanProblems: InitialValueProblem, InitialConditions using ..Ocean: FreeSlip, Impenetrable, Insulating, OceanBC, Penetrable using ..CartesianFields: SpectralElementField using ...Mesh.Filters: CutoffFilter, ExponentialFilter using ...Mesh.Grids: polynomialorders, DiscontinuousSpectralElementGrid using ClimateMachine: LS3NRK33Heuns, OceanBoxGCMConfigType, OceanBoxGCMSpecificInfo, DriverConfiguration import ClimateMachine: SolverConfiguration ##### ##### It's super good ##### struct HydrostaticBoussinesqSuperModel{D, G, E, S, F, N, T, C} domain::D grid::G equations::E state::S fields::F numerical_fluxes::N timestepper::T solver_configuration::C end """ HydrostaticBoussinesqSuperModel(; domain, time_step, parameters, initial_conditions = InitialConditions(), advection = (momentum = NonLinearAdvectionTerm(), tracers = NonLinearAdvectionTerm()), turbulence_closure = (νʰ=0, νᶻ=0, κʰ=0, κᶻ=0), coriolis = (f₀=0, β=0), rusanov_wave_speeds = (cʰ=0, cᶻ=0), buoyancy = (αᵀ=0,) numerical_fluxes = ( first_order = RusanovNumericalFlux(), second_order = CentralNumericalFluxSecondOrder(), gradient = CentralNumericalFluxGradient()) timestepper = ClimateMachine.ExplicitSolverType(solver_method=LS3NRK33Heuns), ) Builds a `SuperModel` that solves the Hydrostatic Boussinesq equations. """ function HydrostaticBoussinesqSuperModel(; domain, parameters, time_step, # We don't want to have to provide this here, but neverthless it's required. initial_conditions = InitialConditions(), advection = ( momentum = NonLinearAdvectionTerm(), tracers = NonLinearAdvectionTerm(), ), turbulence_closure = (νʰ = 0, νᶻ = 0, κʰ = 0, κᶻ = 0), coriolis = (f₀ = 0, β = 0), rusanov_wave_speeds = (cʰ = 0, cᶻ = 0), buoyancy = (αᵀ = 0,), forcing = Forcing(), numerical_fluxes = ( first_order = RusanovNumericalFlux(), second_order = CentralNumericalFluxSecondOrder(), gradient = CentralNumericalFluxGradient(), ), timestepper = ClimateMachine.ExplicitSolverType( solver_method = LS3NRK33Heuns, ), filters = nothing, modeldata = NamedTuple(), array_type = Settings.array_type, mpicomm = MPI.COMM_WORLD, init_on_cpu = true, boundary_tags = ((0, 0), (0, 0), (1, 2)), boundary_conditions = ( OceanBC(Impenetrable(FreeSlip()), Insulating()), OceanBC(Penetrable(FreeSlip()), Insulating()), ), ) ##### ##### Build the grid ##### # Change global setting if its set here Settings.array_type = array_type grid = DiscontinuousSpectralElementGrid( domain; boundary_tags = boundary_tags, mpicomm = mpicomm, array_type = array_type, ) FT = eltype(domain) ##### ##### Construct generic problem type InitialValueProblem ##### problem = InitialValueProblem{FT}( dimensions = (domain.L.x, domain.L.y, domain.L.z), initial_conditions = initial_conditions, boundary_conditions = boundary_conditions, ) ##### ##### Build HydrostaticBoussinesqModel/Equations ##### equations = HydrostaticBoussinesqModel{eltype(domain)}( parameters, problem, momentum_advection = advection.momentum, tracer_advection = advection.tracers, forcing = forcing, cʰ = convert(FT, rusanov_wave_speeds.cʰ), cᶻ = convert(FT, rusanov_wave_speeds.cᶻ), αᵀ = convert(FT, buoyancy.αᵀ), νʰ = convert(FT, turbulence_closure.νʰ), νᶻ = convert(FT, turbulence_closure.νᶻ), κʰ = convert(FT, turbulence_closure.κʰ), κᶻ = convert(FT, turbulence_closure.κᶻ), fₒ = convert(FT, coriolis.f₀), β = FT(coriolis.β), ) #### #### "modeldata" #### #### OceanModels require filters (?). If one was not provided, we build a default. #### # Default vertical filter and horizontal exponential filter: if isnothing(filters) filters = ( vert_filter = CutoffFilter(grid, polynomialorders(grid)), exp_filter = ExponentialFilter(grid, 1, 8), ) end modeldata = merge(modeldata, filters) #### #### We build a DriverConfiguration here for the purposes of building #### a SolverConfiguration. Then we throw it away. #### driver_configuration = DriverConfiguration( OceanBoxGCMConfigType(), "", (domain.Np, domain.Np), eltype(domain), array_type, parameter_set(equations), equations, MPI.COMM_WORLD, grid, numerical_fluxes.first_order, numerical_fluxes.second_order, numerical_fluxes.gradient, nothing, nothing, # filter OceanBoxGCMSpecificInfo(), ) #### #### Pass through the SolverConfiguration interface so that we use #### the checkpointing infrastructure #### solver_configuration = ClimateMachine.SolverConfiguration( zero(FT), convert(FT, time_step), driver_configuration, init_on_cpu = init_on_cpu, ode_dt = convert(FT, time_step), ode_solver_type = timestepper, Courant_number = 0.4, modeldata = modeldata, ) state = solver_configuration.Q u = SpectralElementField(domain, grid, state, 1) v = SpectralElementField(domain, grid, state, 2) η = SpectralElementField(domain, grid, state, 3) θ = SpectralElementField(domain, grid, state, 4) fields = (u = u, v = v, η = η, θ = θ) return HydrostaticBoussinesqSuperModel( domain, grid, equations, state, fields, numerical_fluxes, timestepper, solver_configuration, ) end current_time(model::HydrostaticBoussinesqSuperModel) = model.solver_configuration.solver.t Δt(model::HydrostaticBoussinesqSuperModel) = model.solver_configuration.solver.dt current_step(model::HydrostaticBoussinesqSuperModel) = model.solver_configuration.solver.steps end # module ================================================ FILE: src/Utilities/SingleStackUtils/SingleStackUtils.jl ================================================ module SingleStackUtils export get_vars_from_nodal_stack, get_vars_from_element_stack, get_horizontal_variance, get_horizontal_mean, reduce_nodal_stack, reduce_element_stack, horizontally_average!, dict_of_nodal_states, NodalStack, single_stack_diagnostics using OrderedCollections using UnPack using StaticArrays import KernelAbstractions: CPU using ..BalanceLaws using ..DGMethods using ..DGMethods.Grids using ..MPIStateArrays using ..VariableTemplates """ get_vars_from_nodal_stack( grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars; vrange::UnitRange = 1:size(Q, 3), i::Int = 1, j::Int = 1, exclude::Vector{String} = String[], interp = false, ) where {T, dim, N} Return a dictionary whose keys are the `flattenednames()` of the variables specified in `vars` (as returned by e.g. `vars_state`), and whose values are arrays of the values for that variable along the vertical dimension in `Q`. Only a single element is expected in the horizontal as this is intended for the single stack configuration and `i` and `j` identify the horizontal nodal coordinates. Variables listed in `exclude` are skipped. """ function get_vars_from_nodal_stack( grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars; vrange::UnitRange = 1:size(Q, 3), i::Int = 1, j::Int = 1, exclude::Vector{String} = String[], interp = false, ) where {T, dim, N} # extract grid information and bring `Q` to the host if needed FT = eltype(Q) Nq = N .+ 1 # Code assumes the same polynomial order in all horizontal directions @inbounds begin Nq1 = Nq[1] Nq2 = Nq[2] Nqk = dim == 2 ? 1 : Nq[dim] end Np = dofs_per_element(grid) state_data = array_device(Q) isa CPU ? Q.realdata : Array(Q.realdata) # set up the dictionary to be returned var_names = flattenednames(vars) stack_vals = OrderedDict() num_vars = varsize(vars) vars_wanted = Int[] @inbounds for vi in 1:num_vars if !(var_names[vi] in exclude) stack_vals[var_names[vi]] = FT[] push!(vars_wanted, vi) end end elemtobndy = convert(Array, grid.elemtobndy) vmap⁻ = convert(Array, grid.vmap⁻) vmap⁺ = convert(Array, grid.vmap⁺) vgeo = convert(Array, grid.vgeo) # extract values from `state_data` @inbounds for ev in vrange, k in 1:Nqk, v in vars_wanted if interp && k == 1 && elemtobndy[5, ev] == 0 # Get face degree of freedom number n = i + Nq1 * ((j - 1)) # get the element numbers ev⁻ = ev # Get neighboring id data id⁻, id⁺ = vmap⁻[n, 5, ev⁻], vmap⁺[n, 5, ev⁻] ev⁺ = ((id⁺ - 1) ÷ Np) + 1 # get the volume degree of freedom numbers vid⁻, vid⁺ = ((id⁻ - 1) % Np) + 1, ((id⁺ - 1) % Np) + 1 J⁻, J⁺ = vgeo[vid⁻, Grids._M, ev⁻], vgeo[vid⁺, Grids._M, ev⁺] state_local = J⁻ * state_data[vid⁻, v, ev⁻] state_local += J⁺ * state_data[vid⁺, v, ev⁺] state_local /= (J⁻ + J⁺) push!(stack_vals[var_names[v]], state_local) elseif interp && k == Nqk && elemtobndy[6, ev] == 0 # Get face degree of freedom number n = i + Nq1 * ((j - 1)) # get the element numbers ev⁻ = ev # Get neighboring id data id⁻, id⁺ = vmap⁻[n, 6, ev⁻], vmap⁺[n, 6, ev⁻] # periodic and need to handle this point (otherwise handled above) if id⁺ == id⁻ vid⁻ = ((id⁻ - 1) % Np) + 1 state_local = state_data[vid⁻, v, ev⁻] push!(stack_vals[var_names[v]], state_local) end else ijk = i + Nq1 * ((j - 1) + Nq2 * (k - 1)) state_local = state_data[ijk, v, ev] push!(stack_vals[var_names[v]], state_local) end end return stack_vals end """ get_vars_from_element_stack( grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars; vrange::UnitRange = 1:size(Q, 3), exclude::Vector{String} = String[], interp = false, ) where {T, dim, N} Return an array of [`get_vars_from_nodal_stack()`](@ref)s whose dimensions are the number of nodal points per element in the horizontal plane. Variables listed in `exclude` are skipped. """ function get_vars_from_element_stack( grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars; vrange::UnitRange = 1:size(Q, 3), exclude::Vector{String} = String[], interp = false, ) where {T, dim, N} Nq = N .+ 1 @inbounds Nq1 = Nq[1] @inbounds Nq2 = Nq[2] return [ get_vars_from_nodal_stack( grid, Q, vars, vrange = vrange, i = i, j = j, exclude = exclude, interp = interp, ) for i in 1:Nq1, j in 1:Nq2 ] end """ get_horizontal_mean( grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars; vrange::UnitRange = 1:size(Q, 3), exclude::Vector{String} = String[], interp = false, ) where {T, dim, N} Return a dictionary whose keys are the `flattenednames()` of the variables specified in `vars` (as returned by e.g. `vars_state`), and whose values are arrays of the horizontal averages for that variable along the vertical dimension in `Q`. Only a single element is expected in the horizontal as this is intended for the single stack configuration. Variables listed in `exclude` are skipped. """ function get_horizontal_mean( grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars; vrange::UnitRange = 1:size(Q, 3), exclude::Vector{String} = String[], interp = false, ) where {T, dim, N} Nq = N .+ 1 @inbounds Nq1 = Nq[1] @inbounds Nq2 = Nq[2] vars_avg = OrderedDict() vars_sq = OrderedDict() for i in 1:Nq1 for j in 1:Nq2 vars_nodal = get_vars_from_nodal_stack( grid, Q, vars, vrange = vrange, i = i, j = j, exclude = exclude, interp = interp, ) vars_avg = merge(+, vars_avg, vars_nodal) end end map!(x -> x ./ Nq1 / Nq1, values(vars_avg)) return vars_avg end """ get_horizontal_variance( grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars; vrange::UnitRange = 1:size(Q, 3), exclude::Vector{String} = String[], interp = false, ) where {T, dim, N} Return a dictionary whose keys are the `flattenednames()` of the variables specified in `vars` (as returned by e.g. `vars_state`), and whose values are arrays of the horizontal variance for that variable along the vertical dimension in `Q`. Only a single element is expected in the horizontal as this is intended for the single stack configuration. Variables listed in `exclude` are skipped. """ function get_horizontal_variance( grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars; vrange::UnitRange = 1:size(Q, 3), exclude::Vector{String} = String[], interp = false, ) where {T, dim, N} Nq = N .+ 1 @inbounds Nq1 = Nq[1] @inbounds Nq2 = Nq[2] vars_avg = OrderedDict() vars_sq = OrderedDict() for i in 1:Nq1 for j in 1:Nq2 vars_nodal = get_vars_from_nodal_stack( grid, Q, vars, vrange = vrange, i = i, j = j, exclude = exclude, interp = interp, ) vars_nodal_sq = OrderedDict(vars_nodal) map!(x -> x .^ 2, values(vars_nodal_sq)) vars_avg = merge(+, vars_avg, vars_nodal) vars_sq = merge(+, vars_sq, vars_nodal_sq) end end map!(x -> (x ./ Nq1 / Nq1) .^ 2, values(vars_avg)) map!(x -> x ./ Nq1 / Nq1, values(vars_sq)) vars_var = merge(-, vars_sq, vars_avg) return vars_var end """ reduce_nodal_stack( op::Function, grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars::NamedTuple, var::String; vrange::UnitRange = 1:size(Q, 3), ) where {T, dim, N} Reduce `var` from `vars` within `Q` over all nodal points in the specified `vrange` of elements with `op`. Return a tuple `(result, z)` where `result` is the final value returned by `op` and `z` is the index within `vrange` where the `result` was determined. """ function reduce_nodal_stack( op::Function, grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars::Type, var::String; vrange::UnitRange = 1:size(Q, 3), i::Int = 1, j::Int = 1, ) where {T, dim, N} Nq = N .+ 1 @inbounds begin Nq1 = Nq[1] Nq2 = Nq[2] Nqk = dim == 2 ? 1 : Nq[dim] end var_names = flattenednames(vars) var_ind = findfirst(s -> s == var, var_names) if var_ind === nothing return end state_data = array_device(Q) isa CPU ? Q.realdata : Array(Q.realdata) z = vrange.start FT = eltype(state_data) # Initialize result with identity operation for operator result = if op isa typeof(+) zero(FT) elseif op isa typeof(*) one(FT) elseif op isa typeof(min) floatmax(FT) elseif op isa typeof(max) -floatmax(FT) else error("unknown operator: $op") end for ev in vrange for k in 1:Nqk ijk = i + Nq1 * ((j - 1) + Nq2 * (k - 1)) new_result = op(result, state_data[ijk, var_ind, ev]) if !isequal(new_result, result) result = new_result z = ev end end end return (result, z) end """ reduce_element_stack( op::Function, grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars::NamedTuple, var::String; vrange::UnitRange = 1:size(Q, 3), ) where {T, dim, N} Reduce `var` from `vars` within `Q` over all nodal points in the specified `vrange` of elements with `op`. Return a tuple `(result, z)` where `result` is the final value returned by `op` and `z` is the index within `vrange` where the `result` was determined. """ function reduce_element_stack( op::Function, grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, vars::Type, var::String; vrange::UnitRange = 1:size(Q, 3), ) where {T, dim, N} Nq = N .+ 1 @inbounds Nq1 = Nq[1] @inbounds Nq2 = Nq[2] return [ reduce_nodal_stack( op, grid, Q, vars, var, vrange = vrange, i = i, j = j, ) for i in 1:Nq1, j in 1:Nq2 ] end """ horizontally_average!( grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, i_vars, ) where {T, dim, N} Horizontally average variables, from variable indexes `i_vars`, in `MPIStateArray` `Q`. !!! note These are not proper horizontal averages-- the main purpose of this method is to ensure that there are no horizontal fluxes for a single stack configuration. """ function horizontally_average!( grid::DiscontinuousSpectralElementGrid{T, dim, N}, Q::MPIStateArray, i_vars, ) where {T, dim, N} Nq = N .+ 1 @inbounds begin Nq1 = Nq[1] Nq2 = Nq[2] Nqk = dim == 2 ? 1 : Nq[dim] end ArrType = typeof(Q.data) state_data = array_device(Q) isa CPU ? Q.realdata : Array(Q.realdata) for ev in 1:size(state_data, 3), k in 1:Nqk, i_v in i_vars Q_sum = 0 for i in 1:Nq1, j in 1:Nq2 Q_sum += state_data[i + Nq1 * ((j - 1) + Nq2 * (k - 1)), i_v, ev] end Q_ave = Q_sum / (Nq1 * Nq2) for i in 1:Nq1, j in 1:Nq2 ijk = i + Nq1 * ((j - 1) + Nq2 * (k - 1)) state_data[ijk, i_v, ev] = Q_ave end end Q.realdata .= ArrType(state_data) end get_data(solver_config, ::Prognostic) = solver_config.Q get_data(solver_config, ::Auxiliary) = solver_config.dg.state_auxiliary get_data(solver_config, ::GradientFlux) = solver_config.dg.state_gradient_flux """ dict_of_nodal_states( solver_config, state_types = (Prognostic(), Auxiliary()); aux_excludes = [], interp = false, ) A dictionary of single stack prognostic and auxiliary variables at the `i=1`,`j=1` node given - `solver_config` a `SolverConfiguration` - `aux_excludes` a vector of strings containing the variables to exclude from the auxiliary state. """ function dict_of_nodal_states( solver_config, state_types = (Prognostic(), Auxiliary()); aux_excludes = String[], interp = false, ) FT = eltype(solver_config.Q) all_state_vars = [] for st in state_types state_vars = get_vars_from_nodal_stack( solver_config.dg.grid, get_data(solver_config, st), vars_state(solver_config.dg.balance_law, st, FT), exclude = st isa Auxiliary ? aux_excludes : String[], interp = interp, ) push!(all_state_vars, state_vars...) end return OrderedDict(all_state_vars...) end # A container for holding various # global or point-wise states: struct States{P, A, D, HD} prog::P aux::A diffusive::D hyperdiffusive::HD end """ NodalStack( bl::BalanceLaw, grid::DiscontinuousSpectralElementGrid, prognostic, auxiliary, diffusive, hyperdiffusive; i = 1, j = 1, interp = true, ) A struct whose `iterate(::NodalStack)` traverses the nodal stack and returns a NamedTuple of point-wise fields (`Vars`). # Example ```julia for state_local in NodalStack( bl, grid, prognostic, # global field along nodal stack auxiliary, diffusive, hyperdiffusive ) prog = state_local.prog # point-wise field along nodal stack end ``` ## TODO: Make `prognostic`, `auxiliary`, `diffusive`, `hyperdiffusive` optional # Arguments - `bl` the balance law - `grid` the discontinuous spectral element grid - `prognostic` the global prognostic state - `auxiliary` the global auxiliary state - `diffusive` the global diffusive state (gradient-flux) - `hyperdiffusive` the global hyperdiffusive state - `i,j` the `i,j`'th nodal stack (in the horizontal directions) - `interp` a bool indicating whether to interpolate the duplicate Gauss-Lebotto points at the element faces. !!! warn Before iterating, the data is transferred from the device (GPU) to the host (CPU), as this is intended for debugging / diagnostics usage. """ struct NodalStack{N, BL, G, S, VR, TI, TJ, CI, IN} bl::BL grid::G states::S vrange::VR i::TI j::TJ cart_ind::CI interp::IN function NodalStack( bl::BalanceLaw, grid::DiscontinuousSpectralElementGrid; prognostic, auxiliary, diffusive, hyperdiffusive, i = 1, j = 1, interp = true, ) states = States(prognostic, auxiliary, diffusive, hyperdiffusive) vrange = 1:size(prognostic, 3) grid_info = basic_grid_info(grid) @unpack Nqk = grid_info if last(polynomialorders(grid)) == 0 interp = false end # Store cartesian indices, so we can map the iter_state # to the cartesian space `Q[i, var, j]` if interp cart_ind = CartesianIndices(((Nqk - 1), size(prognostic, 3))) else cart_ind = CartesianIndices((Nqk, size(prognostic, 3))) end args = (bl, grid, states, vrange, i, j, cart_ind, interp) BL, G, S, VR, TI, TJ, CI, IN = typeof.(args) if interp len = size(prognostic, 3) * (Nqk - 1) + 1 else len = size(prognostic, 3) * Nqk end new{len, BL, G, S, VR, TI, TJ, CI, IN}(args...) end end Base.length(gs::NodalStack{N}) where {N} = N # Helper function get_state(v, state, vid⁻, vid⁺, ev⁻, ev⁺, J⁻, J⁺) = (J⁻ * state[vid⁻, v, ev⁻] + J⁺ * state[vid⁺, v, ev⁺]) / (J⁻ + J⁺) to_cpu(state) = array_device(state) isa CPU ? state.realdata : Array(state.realdata) function interp_top(state, args, n_vars, bl, st) vs = Vars{vars_state(bl, st, eltype(state))} if n_vars ≠ 0 return vs(map(v -> get_state(v, state, args...), 1:n_vars)) else return nothing end end function interp_bot(state, vid⁻, ev⁻, n_vars, bl, st) vs = Vars{vars_state(bl, st, eltype(state))} if n_vars ≠ 0 return vs(map(v -> state[vid⁻, v, ev⁻], 1:n_vars)) else return nothing end end function no_interp(state, ijk, ev, n_vars, bl, st) vs = Vars{vars_state(bl, st, eltype(state))} if n_vars ≠ 0 return vs(map(v -> state[ijk, v, ev], 1:n_vars)) else return nothing end end function Base.iterate(gs::NodalStack, iter_state = 1) iter_state > length(gs) && return nothing # extract grid information grid = gs.grid FT = eltype(grid) grid_info = basic_grid_info(grid) @unpack N, Nq, Np, Nqk = grid_info @inbounds Nq1, Nq2 = Nq[1], Nq[2] states = gs.states bl = gs.bl interp = gs.interp # bring `Q` to the host if needed prognostic = to_cpu(states.prog) auxiliary = to_cpu(states.aux) diffusive = to_cpu(states.diffusive) hyperdiffusive = to_cpu(states.hyperdiffusive) n_vars_prog = size(prognostic, 2) n_vars_aux = size(auxiliary, 2) n_vars_diff = size(diffusive, 2) n_vars_hd = size(hyperdiffusive, 2) elemtobndy = convert(Array, grid.elemtobndy) vmap⁻ = convert(Array, grid.vmap⁻) vmap⁺ = convert(Array, grid.vmap⁺) vgeo = convert(Array, grid.vgeo) i, j = gs.i, gs.j if iter_state == length(gs) ijk_cart = (Nqk, size(states.prog, 3)) else ijk_cart = Tuple(gs.cart_ind[iter_state]) end ev = ijk_cart[2] k = ijk_cart[1] iter_state⁺ = iter_state + 1 if interp && k == 1 && elemtobndy[5, ev] == 0 # Get face degree of freedom number n = i + Nq1 * ((j - 1)) # get the element numbers ev⁻ = ev # Get neighboring id data id⁻, id⁺ = vmap⁻[n, 5, ev⁻], vmap⁺[n, 5, ev⁻] ev⁺ = ((id⁺ - 1) ÷ Np) + 1 # get the volume degree of freedom numbers vid⁻, vid⁺ = ((id⁻ - 1) % Np) + 1, ((id⁺ - 1) % Np) + 1 J⁻, J⁺ = vgeo[vid⁻, Grids._M, ev⁻], vgeo[vid⁺, Grids._M, ev⁺] args = (vid⁻, vid⁺, ev⁻, ev⁺, J⁻, J⁺) #! format: off prog = interp_top(prognostic, args, n_vars_prog, bl, Prognostic()) aux = interp_top(auxiliary, args, n_vars_aux, bl, Auxiliary()) ∇flux = interp_top(diffusive, args, n_vars_diff, bl, GradientFlux()) hyperdiff = interp_top(hyperdiffusive, args, n_vars_hd, bl, Hyperdiffusive()) #! format: on return ((; prog, aux, ∇flux, hyperdiff), iter_state⁺) elseif interp && k == Nqk && elemtobndy[6, ev] == 0 # Get face degree of freedom number n = i + Nq1 * ((j - 1)) # get the element numbers ev⁻ = ev # Get neighboring id data id⁻, id⁺ = vmap⁻[n, 6, ev⁻], vmap⁺[n, 6, ev⁻] # periodic and need to handle this point (otherwise handled above) if id⁺ == id⁻ vid⁻ = ((id⁻ - 1) % Np) + 1 #! format: off prog = interp_bot(prognostic, vid⁻, ev⁻, n_vars_prog, bl, Prognostic()) aux = interp_bot(auxiliary, vid⁻, ev⁻, n_vars_aux, bl, Auxiliary()) ∇flux = interp_bot(diffusive, vid⁻, ev⁻, n_vars_diff, bl, GradientFlux()) hyperdiff = interp_bot( hyperdiffusive, vid⁻, ev⁻, n_vars_hd, bl, Hyperdiffusive()) #! format: on return ((; prog, aux, ∇flux, hyperdiff), iter_state⁺) else error("uncaught case in iterate(::NodalStack)") end else ijk = i + Nq1 * ((j - 1) + Nq2 * (k - 1)) #! format: off prog = no_interp(prognostic, ijk, ev, n_vars_prog, bl, Prognostic()) aux = no_interp(auxiliary, ijk, ev, n_vars_aux, bl, Auxiliary()) ∇flux = no_interp(diffusive, ijk, ev, n_vars_diff, bl, GradientFlux()) hyperdiff = no_interp(hyperdiffusive, ijk, ev, n_vars_hd, bl, Hyperdiffusive()) #! format: on return ((; prog, aux, ∇flux, hyperdiff), iter_state⁺) end end include("single_stack_diagnostics.jl") end # module ================================================ FILE: src/Utilities/SingleStackUtils/single_stack_diagnostics.jl ================================================ using ..Orientations import ..VariableTemplates: flattened_named_tuple using ..VariableTemplates # Sometimes `NodalStack` returns local states # that is `nothing`. Here, we return `nothing` # to preserve the keys (e.g., `hyperdiff`) # when misssing. flattened_named_tuple(v::Nothing, ft::FlattenType = FlattenArr()) = nothing """ single_stack_diagnostics( grid::DiscontinuousSpectralElementGrid, bl::BalanceLaw, t::Real, direction; kwargs..., ) # Arguments - `grid` the grid - `bl` the balance law - `t` time - `direction` direction - `kwargs` keyword arguments, passed to [`NodalStack`](@ref). An array of nested NamedTuples, containing results of - `z` - altitude - `prog` - the prognostic state - `aux` - the auxiliary state - `∇flux` - the gradient-flux (diffusive) state - `hyperdiff` - the hyperdiffusive state and all the nested NamedTuples, merged together, from the `precompute` methods. """ function single_stack_diagnostics( grid::DiscontinuousSpectralElementGrid, bl::BalanceLaw, t::Real, direction; kwargs..., ) return [ begin @unpack prog, aux, ∇flux, hyperdiff = local_states diffusive = ∇flux state = prog hyperdiffusive = hyperdiff _args_fx1 = (; state, aux, t, direction) _args_fx2 = (; state, aux, t, diffusive, hyperdiffusive) _args_src = (; state, aux, t, direction, diffusive) cache_fx1 = precompute(bl, _args_fx1, Flux{FirstOrder}()) cache_fx2 = precompute(bl, _args_fx2, Flux{SecondOrder}()) cache_src = precompute(bl, _args_src, Source()) z = altitude(bl, aux) nt = (; z = altitude(bl, aux), prog = flattened_named_tuple(prog), # Vars -> flattened NamedTuples aux = flattened_named_tuple(aux), # Vars -> flattened NamedTuples ∇flux = flattened_named_tuple(∇flux), # Vars -> flattened NamedTuples hyperdiff = flattened_named_tuple(hyperdiff), # Vars -> flattened NamedTuples cache_fx1, cache_fx2, cache_src, ) # Flatten top level: flattened_named_tuple(nt) end for local_states in NodalStack(bl, grid; kwargs...) ] end ================================================ FILE: src/Utilities/TicToc/TicToc.jl ================================================ """ TicToc -- timing measurement Low-overhead time interval measurement via minimally invasive macros. """ module TicToc using Printf export @tic, @toc, tictoc # explicitly enable due to issues with pre-compilation const tictoc_enabled = false # disable to reduce overhead const tictoc_track_memory = true if tictoc_track_memory mutable struct TimingInfo ncalls::Int time::UInt64 allocd::Int64 gctime::UInt64 curr::UInt64 mem::Base.GC_Num end TimingInfo() = TimingInfo( 0, 0, 0, 0, 0, Base.GC_Num(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ) else # !tictoc_track_memory mutable struct TimingInfo ncalls::Int time::UInt64 curr::UInt64 end TimingInfo() = TimingInfo(0, 0, 0) end # if tictoc_track_memory const timing_infos = TimingInfo[] const timing_info_names = Symbol[] const atexit_function_registered = Ref(false) # `@tic` helper function _tic(nm) @static if !tictoc_enabled return quote end end exti = Symbol("tictoc__", nm) global timing_info_names if exti in timing_info_names err_ex = quote throw(ArgumentError("$(nm) already used in @tic")) end else err_ex = quote end end push!(timing_info_names, exti) @static if tictoc_track_memory quote $(err_ex) global $(exti) $(exti).curr = time_ns() $(exti).mem = Base.gc_num() end else quote $(err_ex) global $(exti) $(exti).curr = time_ns() end end end """ @tic nm Indicate the start of the interval `nm`. """ macro tic(args...) na = length(args) if na != 1 throw(ArgumentError("wrong number of arguments in @tic")) end ex = args[1] if !isa(ex, Symbol) throw(ArgumentError("need a name argument to @tic")) end return _tic(ex) end # `@toc` helper function _toc(nm) @static if !tictoc_enabled return quote end end exti = Symbol("tictoc__", nm) @static if tictoc_track_memory quote global $(exti) $(exti).time += time_ns() - $(exti).curr $(exti).ncalls += 1 local diff = Base.GC_Diff(Base.gc_num(), $(exti).mem) $(exti).allocd += diff.allocd $(exti).gctime += diff.total_time end else quote global $(exti) $(exti).time += time_ns() - $(exti).curr $(exti).ncalls += 1 end end end """ @toc nm Indicate the end of the interval `nm`. """ macro toc(args...) na = length(args) if na != 1 throw(ArgumentError("wrong number of arguments in @toc")) end ex = args[1] if !isa(ex, Symbol) throw(ArgumentError("need a name argument to @toc")) end return _toc(ex) end """ print_timing_info() `atexit()` function, writes all information about every interval to `stdout`. """ function print_timing_info() println("TicToc timing information") @static if tictoc_track_memory println("name,ncalls,tottime(ns),allocbytes,gctime") else println("name,ncalls,tottime(ns)") end for i in 1:length(timing_info_names) @static if tictoc_track_memory s = @sprintf( "%s,%d,%d,%d,%d", timing_info_names[i], timing_infos[i].ncalls, timing_infos[i].time, timing_infos[i].allocd, timing_infos[i].gctime ) else s = @sprintf( "%s,%d,%d", timing_info_names[i], timing_infos[i].ncalls, timing_infos[i].time ) end println(s) end end """ tictoc() Call at program start (only once!) to set up the globals used by the macros and to register the at-exit callback. """ function tictoc() @static if !tictoc_enabled return 0 end global timing_info_names for nm in timing_info_names exti = Symbol(nm) isdefined(@__MODULE__, exti) && continue expr = quote const $exti = $TimingInfo() end eval(Expr(:toplevel, expr)) push!(timing_infos, getfield(@__MODULE__, exti)) end if parse(Int, get(ENV, "TICTOC_PRINT_RESULTS", "0")) == 1 if !atexit_function_registered[] atexit(print_timing_info) atexit_function_registered[] = true end end return length(timing_info_names) end end # module ================================================ FILE: src/Utilities/VariableTemplates/VariableTemplates.jl ================================================ module VariableTemplates export varsize, Vars, Grad, @vars, varsindex, varsindices using StaticArrays using LinearAlgebra """ varsindex(S, p::Symbol, [sp::Symbol...]) Return a range of indices corresponding to the property `p` and (optionally) its subproperties `sp` based on the template type `S`. # Examples ```julia-repl julia> S = @vars(x::Float64, y::Float64) julia> varsindex(S, :y) 2:2 julia> S = @vars(x::Float64, y::@vars(α::Float64, β::SVector{3, Float64})) julia> varsindex(S, :y, :β) 3:5 ``` """ function varsindex(::Type{S}, insym::Symbol) where {S <: NamedTuple} offset = 0 for varsym in fieldnames(S) T = fieldtype(S, varsym) if T <: Real offset += 1 varrange = offset:offset elseif T <: SHermitianCompact LT = StaticArrays.lowertriangletype(T) N = length(LT) varrange = offset .+ (1:N) offset += N elseif T <: StaticArray N = length(T) varrange = offset .+ (1:N) offset += N else varrange = offset .+ (1:varsize(T)) offset += varsize(T) end if insym == varsym return varrange end end error("symbol '$insym' not found") end # return `Symbol`s unchanged. wrap_val(sym) = sym # wrap integer in `Val` wrap_val(i::Int) = Val(i) # We enforce that calls to `varsindex` on # an `NTuple` must be unrapped in `Val`. # This is enforced to synchronize failures # on the CPU and GPU, rather than allowing # CPU-working and GPU-breaking versions. # This means that users _must_ wrap `sym` # in `Val`, which can be done with `wrap_val` # above. Base.@propagate_inbounds function varsindex( ::Type{S}, sym::Symbol, rest..., ) where {S <: Union{NamedTuple, Tuple}} vi = varsindex(fieldtype(S, sym), rest...) return varsindex(S, sym)[vi] end Base.@propagate_inbounds function varsindex( ::Type{S}, ::Val{i}, rest..., ) where {i, S <: Union{NamedTuple, Tuple}} et = eltype(S) offset = (i - 1) * varsize(et) vi = varsindex(et, rest...) return (vi.start + offset):(vi.stop + offset) end Base.@propagate_inbounds function varsindex( ::Type{S}, ::Val{i}, ) where {i, S <: SArray} return i:i end """ varsindices(S, ps::Tuple) varsindices(S, ps...) Return a tuple of indices corresponding to the properties specified by `ps` based on the template type `S`. Properties can be specified using either symbols or strings. # Examples ```julia-repl julia> S = @vars(x::Float64, y::Float64, z::Float64) julia> varsindices(S, (:x, :z)) (1, 3) julia> S = @vars(x::Float64, y::@vars(α::Float64, β::SVector{3, Float64})) julia> varsindices(S, "x", "y.β") (1, 3, 4, 5) ``` """ function varsindices(::Type{S}, vars::Tuple) where {S <: NamedTuple} indices = Int[] for var in vars splitvar = split(string(var), '.') append!(indices, collect(varsindex(S, map(Symbol, splitvar)...))) end Tuple(indices) end varsindices(::Type{S}, vars...) where {S <: NamedTuple} = varsindices(S, vars) """ varsize(S) The number of elements specified by the template type `S`. """ varsize(::Type{T}) where {T <: Real} = 1 varsize(::Type{Tuple{}}) = 0 varsize(::Type{NamedTuple{(), Tuple{}}}) = 0 varsize(::Type{SArray{S, T, N} where L}) where {S, T, N} = prod(S.parameters) include("var_names.jl") # TODO: should be possible to get rid of @generated @generated function varsize(::Type{S}) where {S} types = fieldtypes(S) isempty(types) ? 0 : sum(varsize, types) end function process_vars!(syms, typs, expr) if expr isa LineNumberNode return elseif expr isa Expr && expr.head == :block for arg in expr.args process_vars!(syms, typs, arg) end return elseif expr.head == :(::) push!(syms, expr.args[1]) push!(typs, expr.args[2]) return else error("Invalid expression") end end """ @vars(var1::Type1, var2::Type2) A convenient syntax for describing a `NamedTuple` type. # Example ```julia julia> @vars(a::Float64, b::Float64) NamedTuple{(:a, :b),Tuple{Float64,Float64}} ``` """ macro vars(args...) syms = Any[] typs = Any[] for arg in args process_vars!(syms, typs, arg) end :(NamedTuple{$(tuple(syms...)), Tuple{$(esc.(typs)...)}}) end struct GetVarError <: Exception sym::Symbol end struct SetVarError <: Exception sym::Symbol end abstract type AbstractVars{S, A, offset} end """ Vars{S,A,offset}(array::A) Defines property overloading for `array` using the type `S` as a template. `offset` is used to shift the starting element of the array. """ struct Vars{S, A, offset} <: AbstractVars{S, A, offset} array::A end Vars{S}(array) where {S} = Vars{S, typeof(array), 0}(array) Base.parent(v::AbstractVars) = getfield(v, :array) Base.eltype(v::AbstractVars) = eltype(parent(v)) Base.propertynames(::AbstractVars{S}) where {S} = fieldnames(S) Base.similar(v::AbstractVars) = typeof(v)(similar(parent(v))) @generated function Base.getproperty( v::Vars{S, A, offset}, sym::Symbol, ) where {S, A, offset} expr = quote Base.@_inline_meta array = parent(v) end for k in fieldnames(S) T = fieldtype(S, k) if T <: Real retexpr = :($T(array[$(offset + 1)])) offset += 1 elseif T <: SHermitianCompact LT = StaticArrays.lowertriangletype(T) N = length(LT) retexpr = :($T($LT($([:(array[$(offset + i)]) for i in 1:N]...)))) offset += N elseif T <: StaticArray N = length(T) retexpr = :($T($([:(array[$(offset + i)]) for i in 1:N]...))) offset += N else retexpr = :(Vars{$T, A, $offset}(array)) offset += varsize(T) end push!(expr.args, :( if sym == $(QuoteNode(k)) return @inbounds $retexpr end )) end push!(expr.args, :(throw(GetVarError(sym)))) expr end @generated function Base.setproperty!( v::Vars{S, A, offset}, sym::Symbol, val, ) where {S, A, offset} expr = quote Base.@_inline_meta array = parent(v) end for k in fieldnames(S) T = fieldtype(S, k) if T <: Real retexpr = :(array[$(offset + 1)] = val) offset += 1 elseif T <: SHermitianCompact LT = StaticArrays.lowertriangletype(T) N = length(LT) retexpr = :( array[($(offset + 1)):($(offset + N))] .= $T(val).lowertriangle ) offset += N elseif T <: StaticArray N = length(T) retexpr = :(array[($(offset + 1)):($(offset + N))] .= val[:]) offset += N else offset += varsize(T) continue end push!(expr.args, :( if sym == $(QuoteNode(k)) return @inbounds $retexpr end )) end push!(expr.args, :(throw(SetVarError(sym)))) expr end """ Grad{S,A,offset}(array::A) Defines property overloading along slices of the second dimension of `array` using the type `S` as a template. `offset` is used to shift the starting element of the array. """ struct Grad{S, A, offset} <: AbstractVars{S, A, offset} array::A end Grad{S}(array) where {S} = Grad{S, typeof(array), 0}(array) @generated function Base.getproperty( v::Grad{S, A, offset}, sym::Symbol, ) where {S, A, offset} if A <: SubArray M = size(fieldtype(A, 1), 1) else M = size(A, 1) end expr = quote Base.@_inline_meta array = parent(v) end for k in fieldnames(S) T = fieldtype(S, k) if T <: Real retexpr = :(SVector{$M, $T}( $([:(array[$i, $(offset + 1)]) for i in 1:M]...), )) offset += 1 elseif T <: StaticArray N = length(T) retexpr = :(SMatrix{$M, $N, $(eltype(T))}( $([:(array[$i, $(offset + j)]) for i in 1:M, j in 1:N]...), )) offset += N else retexpr = :(Grad{$T, A, $offset}(array)) offset += varsize(T) end push!(expr.args, :( if sym == $(QuoteNode(k)) return @inbounds $retexpr end )) end push!(expr.args, :(throw(GetVarError(sym)))) expr end @generated function Base.setproperty!( v::Grad{S, A, offset}, sym::Symbol, val::AbstractArray, ) where {S, A, offset} if A <: SubArray M = size(fieldtype(A, 1), 1) else M = size(A, 1) end expr = quote Base.@_inline_meta array = parent(v) end for k in fieldnames(S) T = fieldtype(S, k) if T <: Real retexpr = :(array[:, $(offset + 1)] = val) offset += 1 elseif T <: StaticArray N = length(T) retexpr = :( array[ :, # static range is used here to force dispatch to # StaticArrays setindex! because generic setindex! is slow StaticArrays.SUnitRange($(offset + 1), $(offset + N)), ] = val ) offset += N else offset += varsize(T) continue end push!(expr.args, :( if sym == $(QuoteNode(k)) return @inbounds $retexpr end )) end push!(expr.args, :(throw(SetVarError(sym)))) expr end export unroll_map, @unroll_map """ @unroll_map(f::F, N::Int, args...) where {F} unroll_map(f::F, N::Int, args...) where {F} Unroll N-expressions and wrap arguments in `Val`. """ @generated function unroll_map(f::F, ::Val{N}, args...) where {F, N} quote Base.@_inline_meta Base.Cartesian.@nexprs $N i -> f(Val(i), args...) end end macro unroll_map(func, N, args...) @assert func.head == :(->) body = func.args[2] pushfirst!(body.args, :(Base.@_inline_meta)) quote $unroll_map($(esc(func)), Val($(esc(N))), $(esc(args))...) end end export vuntuple """ vuntuple(f::F, N::Int) Val-Unroll ntuple: wrap `ntuple` arguments in `Val` for unrolling. """ vuntuple(f::F, N::Int) where {F} = ntuple(i -> f(Val(i)), Val(N)) # Inside unroll_map expressions, all indexes `i` # are wrapped in `Val`, so we must redirect # these methods: Base.@propagate_inbounds Base.getindex(t::Tuple, ::Val{i}) where {i} = Base.getindex(t, i) Base.@propagate_inbounds Base.getindex(a::SArray, ::Val{i}) where {i} = Base.getindex(a, i) Base.@propagate_inbounds function Base.getindex( v::Vars{NTuple{N, T}, A, offset}, ::Val{i}, ) where {N, T, A, offset, i} # 1 <= i <= N return Vars{T, A, offset + (i - 1) * varsize(T)}(parent(v)) end Base.@propagate_inbounds function Base.getindex( v::Grad{NTuple{N, T}, A, offset}, ::Val{i}, ) where {N, T, A, offset, i} # 1 <= i <= N return Grad{T, A, offset + (i - 1) * varsize(T)}(parent(v)) end """ getpropertyorindex An interchangeably and nested-friendly `getproperty`/`getindex`. """ function getpropertyorindex end # Redirect to Base getproperty/getindex: Base.@propagate_inbounds getpropertyorindex(t::Tuple, ::Val{i}) where {i} = Base.getindex(t, i) Base.@propagate_inbounds getpropertyorindex( a::AbstractArray, ::Val{i}, ) where {i} = Base.getindex(a, i) Base.@propagate_inbounds getpropertyorindex(v::AbstractVars, s::Symbol) = Base.getproperty(v, s) Base.@propagate_inbounds getpropertyorindex( v::AbstractVars, ::Val{i}, ) where {i} = Base.getindex(v, Val(i)) # Only one element left: Base.@propagate_inbounds getpropertyorindex( v::AbstractVars, t::Tuple{A}, ) where {A} = getpropertyorindex(v, t[1]) Base.@propagate_inbounds getpropertyorindex( a::AbstractArray, t::Tuple{A}, ) where {A} = getpropertyorindex(a, t[1]) # Peel first element from tuple and recurse: Base.@propagate_inbounds getpropertyorindex(v::AbstractVars, t::Tuple) = getpropertyorindex(getpropertyorindex(v, t[1]), Tuple(t[2:end])) # Redirect to getpropertyorindex: Base.@propagate_inbounds Base.getproperty(v::AbstractVars, tup_chain::Tuple) = getpropertyorindex(v, tup_chain) Base.@propagate_inbounds Base.getindex(v::AbstractVars, tup_chain::Tuple) = getpropertyorindex(v, tup_chain) include("flattened_tup_chain.jl") function Base.show(io::IO, v::AbstractVars) s = "$(nameof(typeof(v))) object:\n" for tup in flattened_tup_chain(v, RetainArr()) name = join(tup, "_") val = getproperty(v, wrap_val.(tup)) s *= " $name = $val\n" end print(io, s) end end # module ================================================ FILE: src/Utilities/VariableTemplates/flattened_tup_chain.jl ================================================ using LinearAlgebra export flattened_tup_chain, flattened_named_tuple, flattened_tuple export FlattenType, FlattenArr, RetainArr abstract type FlattenType end """ FlattenArr Flatten arrays in `flattened_tup_chain` and `flattened_named_tuple`. """ struct FlattenArr <: FlattenType end """ RetainArr Do _not_ flatten arrays in `flattened_tup_chain` and `flattened_named_tuple`. """ struct RetainArr <: FlattenType end # The Vars instance has many empty entries. # Keeping all of the keys results in many # duplicated values. So, it's best we # "prune" the tree by removing the keys: flattened_tup_chain( ::Type{NamedTuple{(), Tuple{}}}, ::FlattenType = FlattenArr(); prefix = (Symbol(),), ) = () flattened_tup_chain( ::Type{T}, ::FlattenType; prefix = (Symbol(),), ) where {T <: Real} = (prefix,) flattened_tup_chain( ::Type{T}, ::RetainArr; prefix = (Symbol(),), ) where {T <: SArray} = (prefix,) flattened_tup_chain( ::Type{T}, ::FlattenArr; prefix = (Symbol(),), ) where {T <: SArray} = ntuple(i -> (prefix..., i), length(T)) flattened_tup_chain( ::Type{T}, ::RetainArr; prefix = (Symbol(),), ) where {T <: SHermitianCompact} = (prefix,) flattened_tup_chain( ::Type{T}, ::FlattenType; prefix = (Symbol(),), ) where {T <: SHermitianCompact} = ntuple(i -> (prefix..., i), length(StaticArrays.lowertriangletype(T))) flattened_tup_chain( ::Type{T}, ::RetainArr; prefix = (Symbol(),), ) where {N, TA, T <: Diagonal{N, TA}} = (prefix,) flattened_tup_chain( ::Type{T}, ::FlattenArr; prefix = (Symbol(),), ) where {N, TA, T <: Diagonal{N, TA}} = ntuple(i -> (prefix..., i), length(TA)) flattened_tup_chain(::Type{T}, ::FlattenType; prefix = (Symbol(),)) where {T} = (prefix,) """ flattened_tup_chain(::Type{T}) where {T <: Union{NamedTuple,NTuple}} An array of tuples, containing symbols and integers for every combination of each field in the `Vars` array. """ function flattened_tup_chain( ::Type{T}, ft::FlattenType = FlattenArr(); prefix = (Symbol(),), ) where {T <: Union{NamedTuple, NTuple}} map(1:fieldcount(T)) do i Ti = fieldtype(T, i) name = fieldname(T, i) sname = name isa Int ? name : Symbol(name) flattened_tup_chain( Ti, ft; prefix = prefix == (Symbol(),) ? (sname,) : (prefix..., sname), ) end |> Iterators.flatten |> collect end flattened_tup_chain( ::AbstractVars{S}, ft::FlattenType = FlattenArr(), ) where {S} = flattened_tup_chain(S, ft) """ flattened_named_tuple A flattened NamedTuple, given a `Vars` or nested `NamedTuple` instance. # Example: ```julia using Test using ClimateMachine.VariableTemplates nt = (x = 1, a = (y = 2, z = 3, b = ((a = 1,), (a = 2,), (a = 3,)))); fnt = flattened_named_tuple(nt); @test keys(fnt) == (:x, :a_y, :a_z, :a_b_1_a, :a_b_2_a, :a_b_3_a) @test length(fnt) == 6 @test fnt.x == 1 @test fnt.a_y == 2 @test fnt.a_z == 3 @test fnt.a_b_1_a == 1 @test fnt.a_b_2_a == 2 @test fnt.a_b_3_a == 3 ``` """ function flattened_named_tuple end function flattened_named_tuple(v::AbstractVars, ft::FlattenType = FlattenArr()) ftc = flattened_tup_chain(v, ft) keys_ = Symbol.(join.(ftc, :_)) vals = map(x -> getproperty(v, wrap_val.(x)), ftc) length(keys_) == length(vals) || error("key-value mismatch") return (; zip(keys_, vals)...) end function flattened_named_tuple(nt::NamedTuple, ft::FlattenType = FlattenArr()) ftc = flattened_tup_chain(typeof(nt), ft) keys_ = Symbol.(join.(ftc, :_)) vals = flattened_tuple(ft, nt) length(keys_) == length(vals) || error("key-value mismatch") return (; zip(keys_, vals)...) end flattened_tuple(::FlattenArr, a::AbstractArray) = tuple(a...) flattened_tuple(::RetainArr, a::AbstractArray) = tuple(a) flattened_tuple(::FlattenArr, a::Diagonal) = tuple(a.diag...) flattened_tuple(::RetainArr, a::Diagonal) = tuple(a.diag) flattened_tuple(::FlattenArr, a::SHermitianCompact) = tuple(a.lowertriangle...) flattened_tuple(::RetainArr, a::SHermitianCompact) = tuple(a.lowertriangle) # when we splat an empty tuple `b` into `flattened_tuple(ft, b...)` flattened_tuple(::FlattenType) = () # for structs flattened_tuple(::FlattenType, a) = (a,) # Divide and conquer: flattened_tuple(ft::FlattenType, a, b...) = tuple(flattened_tuple(ft, a)..., flattened_tuple(ft, b...)...) flattened_tuple(ft::FlattenType, a::Tuple) = flattened_tuple(ft, a...) flattened_tuple(ft::FlattenType, a::NamedTuple) = flattened_tuple(ft, Tuple(a)) ================================================ FILE: src/Utilities/VariableTemplates/var_names.jl ================================================ export flattenednames flattenednames(nt::Type{NTuple{N, T}}; prefix = "") where {N, T} = Iterators.flatten([ flattenednames(T; prefix = "$(prefix)[$i]") for i in 1:N ]) |> collect flattenednames(::Type{NamedTuple{(), Tuple{}}}; prefix = "") = () flattenednames(::Type{T}; prefix = "") where {T <: Real} = (prefix,) flattenednames(::Type{T}; prefix = "") where {T <: SArray} = ntuple(i -> "$prefix[$i]", length(T)) function flattenednames(::Type{T}; prefix = "") where {T <: SHermitianCompact} N = size(T, 1) [["$prefix[$i,$j]" for i in j:N] for j in 1:N] |> Iterators.flatten |> collect end function flattenednames(::Type{T}; prefix = "") where {T <: NamedTuple} map(1:fieldcount(T)) do i Ti = fieldtype(T, i) name = fieldname(T, i) flattenednames( Ti, prefix = prefix == "" ? string(name) : string(prefix, '.', name), ) end |> Iterators.flatten |> collect end ================================================ FILE: test/Arrays/basics.jl ================================================ using Test, MPI using ClimateMachine using ClimateMachine.MPIStateArrays ClimateMachine.init() const ArrayType = ClimateMachine.array_type() const mpicomm = MPI.COMM_WORLD @testset "MPIStateArray basics" begin Q = MPIStateArray{Float32}(mpicomm, ArrayType, 4, 6, 8) @test eltype(Q) == Float32 @test size(Q) == (4, 6, 8) fillval = 0.5f0 fill!(Q, fillval) ClimateMachine.gpu_allowscalar(true) @test Q[1] == fillval @test Q[2, 3, 4] == fillval @test Q[end] == fillval @test Array(Q) == fill(fillval, 4, 6, 8) Q[2, 3, 4] = 2fillval @test Q[2, 3, 4] != fillval ClimateMachine.gpu_allowscalar(false) Qp = copy(Q) @test typeof(Qp) == typeof(Q) @test eltype(Qp) == eltype(Q) @test size(Qp) == size(Q) @test Array(Qp) == Array(Q) Qp = similar(Q) @test typeof(Qp) == typeof(Q) @test eltype(Qp) == eltype(Q) @test size(Qp) == size(Q) copyto!(Qp, Q) @test Array(Qp) == Array(Q) end ================================================ FILE: test/Arrays/broadcasting.jl ================================================ using Test, MPI using ClimateMachine using ClimateMachine.MPIStateArrays ClimateMachine.init() const ArrayType = ClimateMachine.array_type() const mpicomm = MPI.COMM_WORLD @testset "MPIStateArray broadcasting" begin let localsize = (4, 6, 8) A = rand(Float32, localsize) B = rand(Float32, localsize) QA = MPIStateArray{Float32}(mpicomm, ArrayType, localsize...) QB = similar(QA) QA .= A QB .= B @test Array(QA) == A @test Array(QB) == B QC = QA .+ QB @test typeof(QC) == typeof(QA) C = Array(QC) @test C == A .+ B QC = QA .+ sqrt.(QB) C = Array(QC) @test C ≈ A .+ sqrt.(B) QC = QA .+ sqrt.(QB) .* exp.(QA .- QB .^ 2) C = Array(QC) @test C ≈ A .+ sqrt.(B) .* exp.(A .- B .^ 2) # writing to an existing array instead of creating a new one fill!(QC, 0) QC .= QA .+ sqrt.(QB) .* exp.(QA .- QB .^ 2) C = Array(QC) @test C ≈ A .+ sqrt.(B) .* exp.(A .- B .^ 2) end let numelems = 12 realelems = 1:7 ghostelems = 8:12 QA = MPIStateArray{Int}( mpicomm, ArrayType, 1, 1, numelems, realelems = realelems, ghostelems = ghostelems, ) QB = similar(QA) fill!(QA, 1) fill!(QB, 3) QB .= QA .+ QB @test all(Array(QB)[realelems] .== 4) @test all(Array(QB)[ghostelems] .== 3) end end ================================================ FILE: test/Arrays/mpi_comm.jl ================================================ using Test using MPI using ClimateMachine using ClimateMachine.MPIStateArrays using ClimateMachine.Mesh.BrickMesh using Pkg using KernelAbstractions ClimateMachine.init() const ArrayType = ClimateMachine.array_type() const comm = MPI.COMM_WORLD function main() crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) @assert csize == 3 if crank == 0 numreal = 4 numghost = 3 nabrtorank = [1, 2] sendelems = [1, 2, 3, 4, 1, 4] nabrtorecv = [1:2, 3:3] nabrtosend = [1:4, 5:6] vmaprecv = [ 37, 38, 39, 40, 42, 43, 44, 45, 46, 49, 52, 53, 54, 57, 60, 61, 62, 63, ] vmapsend = [3, 6, 9, 10, 11, 12, 19, 22, 25, 34, 35, 36, 1, 2, 3, 28, 31, 34] nabrtovmaprecv = [1:13, 14:18] nabrtovmapsend = [1:12, 13:18] expectedghostdata = [ 1001, 1002, 1003, 1004, 1006, 1007, 1008, 1009, 1010, 1013, 1016, 1017, 1018, 2003, 2006, 2007, 2008, 2009, ] elseif crank == 1 numreal = 2 numghost = 4 nabrtorank = [0] sendelems = [1, 2] nabrtorecv = [1:4] nabrtosend = [1:2] vmaprecv = [21, 24, 27, 28, 29, 30, 37, 40, 43, 52, 53, 54] vmapsend = [1, 2, 3, 4, 6, 7, 8, 9, 10, 13, 16, 17, 18] nabrtovmaprecv = [1:length(vmaprecv)] nabrtovmapsend = [1:length(vmapsend)] expectedghostdata = [3, 6, 9, 10, 11, 12, 19, 22, 25, 34, 35, 36] elseif crank == 2 numreal = 1 numghost = 2 nabrtorank = [0] sendelems = [1] nabrtorecv = [1:2] nabrtosend = [1:1] vmaprecv = [10, 11, 12, 19, 22, 25] vmapsend = [3, 6, 7, 8, 9] nabrtovmaprecv = [1:length(vmaprecv)] nabrtovmapsend = [1:length(vmapsend)] expectedghostdata = [1, 2, 3, 28, 31, 34] end numelem = numreal + numghost realelems = 1:numreal ghostelems = numreal .+ (1:numghost) weights = Array{Int64}(undef, (0, 0, 0)) A = MPIStateArray{Int64}( comm, ArrayType, 9, 2, numelem, realelems, ghostelems, ArrayType(vmaprecv), ArrayType(vmapsend), nabrtorank, nabrtovmaprecv, nabrtovmapsend, ArrayType(weights), ) Q = Array(A.data) Q .= -1 shift = 100 Q[:, 1, realelems] .= reshape((crank * 1000) .+ (1:(9 * numreal)), 9, numreal) Q[:, 2, realelems] .= reshape((crank * 1000) .+ shift .+ (1:(9 * numreal)), 9, numreal) copyto!(A.data, Q) event = Event(array_device(A)) event = MPIStateArrays.begin_ghost_exchange!(A; dependencies = event) event = MPIStateArrays.end_ghost_exchange!(A; dependencies = event) wait(array_device(A), event) Q = Array(A.data) @test all(expectedghostdata .== Q[:, 1, :][:][vmaprecv]) @test all(shift .+ expectedghostdata .== Q[:, 2, :][:][vmaprecv]) end main() ================================================ FILE: test/Arrays/reductions.jl ================================================ using Test, MPI using LinearAlgebra using ClimateMachine using ClimateMachine.MPIStateArrays ClimateMachine.init() const ArrayType = ClimateMachine.array_type() const mpicomm = MPI.COMM_WORLD mpisize = MPI.Comm_size(mpicomm) mpirank = MPI.Comm_rank(mpicomm) @testset "MPIStateArray reductions" begin localsize = (4, 6, 8) A = Array{Float32}(reshape(1:prod(localsize), localsize)) globalA = vcat([A for _ in 1:mpisize]...) QA = MPIStateArray{Float32}(mpicomm, ArrayType, localsize...) QA .= A @test norm(QA, 1) ≈ norm(globalA, 1) @test norm(QA) ≈ norm(globalA) @test norm(QA, Inf) ≈ norm(globalA, Inf) @test norm(QA; dims = (1, 3)) ≈ mapslices(norm, globalA; dims = (1, 3)) @test norm(QA, 1; dims = (1, 3)) ≈ mapslices(S -> norm(S, 1), globalA, dims = (1, 3)) @test norm(QA, Inf; dims = (1, 3)) ≈ mapslices(S -> norm(S, Inf), globalA, dims = (1, 3)) B = Array{Float32}(reshape(reverse(1:prod(localsize)), localsize)) globalB = vcat([B for _ in 1:mpisize]...) QB = similar(QA) QB .= B @test isapprox(euclidean_distance(QA, QB), norm(globalA .- globalB)) @test isapprox(dot(QA, QB), dot(globalA, globalB)) C = fill(Float32(mpirank + 1), localsize) globalC = vcat([fill(i, localsize) for i in 1:mpisize]...) QC = similar(QA) QC .= C @test sum(QC) == sum(globalC) @test Array(sum(QC; dims = (1, 3))) == sum(globalC; dims = (1, 3)) @test maximum(QC) == maximum(globalC) @test Array(maximum(QC; dims = (1, 3))) == maximum(globalC; dims = (1, 3)) @test minimum(QC) == minimum(globalC) @test Array(minimum(QC; dims = (1, 3))) == minimum(globalC; dims = (1, 3)) end ================================================ FILE: test/Arrays/runtests.jl ================================================ using Test include(joinpath("..", "testhelpers.jl")) @testset "MPIStateArrays reductions" begin runmpi(joinpath(@__DIR__, "basics.jl")) runmpi(joinpath(@__DIR__, "broadcasting.jl")) runmpi(joinpath(@__DIR__, "reductions.jl")) runmpi(joinpath(@__DIR__, "reductions.jl"), ntasks = 3) runmpi(joinpath(@__DIR__, "varsindex.jl")) end ================================================ FILE: test/Arrays/varsindex.jl ================================================ using Test, MPI using ClimateMachine using ClimateMachine.MPIStateArrays using ClimateMachine.MPIStateArrays: getstateview using ClimateMachine.VariableTemplates: @vars, varsindex, varsindices using StaticArrays ClimateMachine.init() const ArrayType = ClimateMachine.array_type() const mpicomm = MPI.COMM_WORLD const V = @vars begin a::Float32 b::SVector{3, Float32} c::SMatrix{3, 8, Float32} d::Float32 e::@vars begin a::Float32 b::SVector{3, Float32} d::Float32 end end const VNT = @vars begin a::Float32 e::Tuple{ntuple(3) do i @vars(b::SVector{3, Float32}) end...} end @testset "MPIStateArray varsindex" begin # check with invalid vars size @test_throws ErrorException MPIStateArray{Float32, V}( mpicomm, ArrayType, 4, 1, 8, ) Q = MPIStateArray{Float32, V}(mpicomm, ArrayType, 4, 34, 8) @test Q.a === view(MPIStateArrays.realview(Q), :, 1:1, :) @test Q.b === view(MPIStateArrays.realview(Q), :, 2:4, :) @test Q.c === view(MPIStateArrays.realview(Q), :, 5:28, :) @test Q.d === view(MPIStateArrays.realview(Q), :, 29:29, :) @test Q.e === view(MPIStateArrays.realview(Q), :, 30:34, :) @test getstateview(Q, "a") === view(MPIStateArrays.realview(Q), :, 1:1, :) @test getstateview(Q, "b") === view(MPIStateArrays.realview(Q), :, 2:4, :) @test getstateview(Q, "c") === view(MPIStateArrays.realview(Q), :, 5:28, :) @test getstateview(Q, "d") === view(MPIStateArrays.realview(Q), :, 29:29, :) @test getstateview(Q, "e") === view(MPIStateArrays.realview(Q), :, 30:34, :) @test getstateview(Q, "e.a") === view(MPIStateArrays.realview(Q), :, 30:30, :) @test getstateview(Q, "e.b") === view(MPIStateArrays.realview(Q), :, 31:33, :) @test getstateview(Q, "e.d") === view(MPIStateArrays.realview(Q), :, 34:34, :) @test getstateview(Q, :(a)) === view(MPIStateArrays.realview(Q), :, 1:1, :) @test getstateview(Q, :(b)) === view(MPIStateArrays.realview(Q), :, 2:4, :) @test getstateview(Q, :(c)) === view(MPIStateArrays.realview(Q), :, 5:28, :) @test getstateview(Q, :(d)) === view(MPIStateArrays.realview(Q), :, 29:29, :) @test getstateview(Q, :(e)) === view(MPIStateArrays.realview(Q), :, 30:34, :) @test getstateview(Q, :(e.a)) === view(MPIStateArrays.realview(Q), :, 30:30, :) @test getstateview(Q, :(e.b)) === view(MPIStateArrays.realview(Q), :, 31:33, :) @test getstateview(Q, :(e.d)) === view(MPIStateArrays.realview(Q), :, 34:34, :) @test_throws ErrorException Q.aa @test_throws ErrorException getstateview(Q, "aa") P = similar(Q) @test P.a === view(MPIStateArrays.realview(P), :, 1:1, :) @test P.b === view(MPIStateArrays.realview(P), :, 2:4, :) @test P.c === view(MPIStateArrays.realview(P), :, 5:28, :) @test P.d === view(MPIStateArrays.realview(P), :, 29:29, :) @test P.e === view(MPIStateArrays.realview(P), :, 30:34, :) @test getstateview(P, "a") === view(MPIStateArrays.realview(P), :, 1:1, :) @test getstateview(P, "b") === view(MPIStateArrays.realview(P), :, 2:4, :) @test getstateview(P, "c") === view(MPIStateArrays.realview(P), :, 5:28, :) @test getstateview(P, "d") === view(MPIStateArrays.realview(P), :, 29:29, :) @test getstateview(P, "e") === view(MPIStateArrays.realview(P), :, 30:34, :) @test getstateview(P, "e.a") === view(MPIStateArrays.realview(P), :, 30:30, :) @test getstateview(P, "e.b") === view(MPIStateArrays.realview(P), :, 31:33, :) @test getstateview(P, "e.d") === view(MPIStateArrays.realview(P), :, 34:34, :) @test getstateview(P, :(a)) === view(MPIStateArrays.realview(P), :, 1:1, :) @test getstateview(P, :(b)) === view(MPIStateArrays.realview(P), :, 2:4, :) @test getstateview(P, :(c)) === view(MPIStateArrays.realview(P), :, 5:28, :) @test getstateview(P, :(d)) === view(MPIStateArrays.realview(P), :, 29:29, :) @test getstateview(P, :(e)) === view(MPIStateArrays.realview(P), :, 30:34, :) @test getstateview(P, :(e.a)) === view(MPIStateArrays.realview(P), :, 30:30, :) @test getstateview(P, :(e.b)) === view(MPIStateArrays.realview(P), :, 31:33, :) @test getstateview(P, :(e.d)) === view(MPIStateArrays.realview(P), :, 34:34, :) A = MPIStateArray{Float32}(mpicomm, ArrayType, 4, 29, 8) @test_throws ErrorException A.a @test_throws ErrorException getstateview(A, "a") end @testset "MPIStateArray show_not_finite_fields" begin post_msg = "are not finite (has NaNs or Inf)" Q = MPIStateArray{Float32, V}(mpicomm, ArrayType, 4, 34, 8) Q .= 1 Qv = view(MPIStateArrays.realview(Q), :, 1:1, :) Qv .= NaN msg = "Field(s) (a) " * post_msg @test_logs (:warn, msg) show_not_finite_fields(Q) Qv = view(MPIStateArrays.realview(Q), :, 2:2, :) Qv .= NaN msg = "Field(s) (a, and b[1]) " * post_msg @test_logs (:warn, msg) show_not_finite_fields(Q) Qv = view(MPIStateArrays.realview(Q), :, 31:31, :) Qv .= NaN msg = "Field(s) (a, b[1], and e.b[1]) " * post_msg @test_logs (:warn, msg) show_not_finite_fields(Q) Q .= 1 @test show_not_finite_fields(Q) == nothing end @testset "MPIStateArray show_not_finite_fields - ntuple vars" begin post_msg = "are not finite (has NaNs or Inf)" Q = MPIStateArray{Float32, VNT}(mpicomm, ArrayType, 4, 10, 8) Q .= 1 Qv = view(MPIStateArrays.realview(Q), :, 1:1, :) Qv .= NaN msg = "Field(s) (a) " * post_msg @test_logs (:warn, msg) show_not_finite_fields(Q) Qv = view(MPIStateArrays.realview(Q), :, 2:2, :) Qv .= NaN msg = "Field(s) (a, and e[1].b[1]) " * post_msg @test_logs (:warn, msg) show_not_finite_fields(Q) Qv = view(MPIStateArrays.realview(Q), :, 6:6, :) Qv .= NaN msg = "Field(s) (a, e[1].b[1], and e[2].b[2]) " * post_msg @test_logs (:warn, msg) show_not_finite_fields(Q) end ================================================ FILE: test/Atmos/EDMF/Artifacts.toml ================================================ [PyCLES_output] git-tree-sha1 = "61af161b398cb0daabeb2eb1d3c57c7ba3629514" ================================================ FILE: test/Atmos/EDMF/bomex_edmf.jl ================================================ using ClimateMachine using ClimateMachine.SingleStackUtils using ClimateMachine.Checkpoint using ClimateMachine.DGMethods using ClimateMachine.SystemSolvers import ClimateMachine.DGMethods: custom_filter! using ClimateMachine.Mesh.Filters: apply! using ClimateMachine.BalanceLaws: vars_state using JLD2, FileIO const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath(clima_dir, "experiments", "AtmosLES", "bomex_model.jl")) include(joinpath("helper_funcs", "diagnostics_configuration.jl")) include("edmf_model.jl") include("edmf_kernels.jl") """ init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} Initialize EDMF state variables. This method is only called at `t=0`. """ function init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} # Aliases: gm = state en = state.turbconv.environment up = state.turbconv.updraft N_up = n_updrafts(turbconv) # GCM setting - Initialize the grid mean profiles of prognostic variables (ρ,e_int,q_tot,u,v,w) z = altitude(m, aux) # SCM setting - need to have separate cases coded and called from a folder - see what LES does # a moist_thermo state is used here to convert the input θ,q_tot to e_int, q_tot profile e_int = internal_energy(m, state, aux) param_set = parameter_set(m) if moisture_model(m) isa DryModel ρq_tot = FT(0) ts = PhaseDry(param_set, e_int, state.ρ) else ρq_tot = gm.moisture.ρq_tot ts = PhaseEquil_ρeq(param_set, state.ρ, e_int, ρq_tot / state.ρ) end T = air_temperature(ts) p = air_pressure(ts) q = PhasePartition(ts) θ_liq = liquid_ice_pottemp(ts) a_min = turbconv.subdomains.a_min @unroll_map(N_up) do i up[i].ρa = gm.ρ * a_min up[i].ρaw = gm.ρu[3] * a_min up[i].ρaθ_liq = gm.ρ * a_min * θ_liq up[i].ρaq_tot = ρq_tot * a_min end # initialize environment covariance with zero for now if z <= FT(2500) en.ρatke = gm.ρ * (FT(1) - z / FT(3000)) else en.ρatke = FT(0) end en.ρaθ_liq_cv = FT(1e-5) / max(z, FT(10)) en.ρaq_tot_cv = FT(1e-5) / max(z, FT(10)) en.ρaθ_liq_q_tot_cv = FT(1e-7) / max(z, FT(10)) return nothing end; struct ZeroVerticalVelocityFilter <: AbstractCustomFilter end function custom_filter!(::ZeroVerticalVelocityFilter, bl, state, aux) state.ρu = SVector(state.ρu[1], state.ρu[2], 0) end function main(::Type{FT}, cl_args) where {FT} surface_flux = cl_args["surface_flux"] # DG polynomial order N = 4 nelem_vert = 20 # Prescribe domain parameters zmax = FT(3000) t0 = FT(0) # Simulation time timeend = FT(400) CFLmax = FT(1.2) config_type = SingleStackConfigType ode_solver_type = ClimateMachine.IMEXSolverType( implicit_model = AtmosAcousticGravityLinearModel, implicit_solver = SingleColumnLU, solver_method = ARK2GiraldoKellyConstantinescu, split_explicit_implicit = true, discrete_splitting = false, ) N_updrafts = 1 N_quad = 3 turbconv = EDMF(FT, N_updrafts, N_quad, param_set) model = bomex_model(FT, config_type, zmax, surface_flux; turbconv = turbconv) # Assemble configuration driver_config = ClimateMachine.SingleStackConfiguration( "BOMEX_EDMF", N, nelem_vert, zmax, param_set, model; hmax = zmax, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFLmax, ) # --- Zero-out horizontal variations: vsp = vars_state(model, Prognostic(), FT) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :turbconv), ) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :energy, :ρe), ) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :moisture, :ρq_tot), ) vsa = vars_state(model, Auxiliary(), FT) horizontally_average!( driver_config.grid, solver_config.dg.state_auxiliary, varsindex(vsa, :turbconv), ) # --- dgn_config = config_diagnostics(driver_config, timeend; interval = "50ssecs") cbtmarfilter = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, ("moisture.ρq_tot", turbconv_filters(turbconv)...), solver_config.dg.grid, TMARFilter(), ) Filters.apply!( ZeroVerticalVelocityFilter(), solver_config.dg.grid, solver_config.dg.balance_law, solver_config.Q, solver_config.dg.state_auxiliary, ) nothing end diag_arr = [single_stack_diagnostics(solver_config)] time_data = FT[0] # Define the number of outputs from `t0` to `timeend` n_outputs = 10 # This equates to exports every ceil(Int, timeend/n_outputs) time-step: every_x_simulation_time = ceil(Int, timeend / n_outputs) cb_data_vs_time = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do diag_vs_z = single_stack_diagnostics(solver_config) nstep = getsteps(solver_config.solver) # Save to disc (for debugging): # @save "bomex_edmf_nstep=$nstep.jld2" diag_vs_z push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) nothing end check_cons = ( ClimateMachine.ConservationCheck("ρ", "3000steps", FT(0.001)), ClimateMachine.ConservationCheck("energy.ρe", "3000steps", FT(0.0025)), ) cb_print_step = GenericCallbacks.EveryXSimulationSteps(100) do @show getsteps(solver_config.solver) nothing end result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, check_cons = check_cons, user_callbacks = (cbtmarfilter, cb_data_vs_time, cb_print_step), check_euclidean_distance = true, ) diag_vs_z = single_stack_diagnostics(solver_config) push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) return solver_config, diag_arr, time_data end # add a command line argument to specify the kind of surface flux # TODO: this will move to the future namelist functionality bomex_args = ArgParseSettings(autofix_names = true) add_arg_group!(bomex_args, "BOMEX") @add_arg_table! bomex_args begin "--surface-flux" help = "specify surface flux for energy and moisture" metavar = "prescribed|bulk" arg_type = String default = "prescribed" end cl_args = ClimateMachine.init( parse_clargs = true, custom_clargs = bomex_args, output_dir = get(ENV, "CLIMATEMACHINE_SETTINGS_OUTPUT_DIR", "output"), fix_rng_seed = true, ) solver_config, diag_arr, time_data = main(Float64, cl_args) include(joinpath(@__DIR__, "report_mse_bomex.jl")) nothing ================================================ FILE: test/Atmos/EDMF/closures/entr_detr.jl ================================================ #### Entrainment-Detrainment kernels function entr_detr( bl::AtmosModel{FT}, state::Vars, aux::Vars, ts_up, ts_en, env, buoy, ) where {FT} turbconv = turbconv_model(bl) EΔ_up = vuntuple(n_updrafts(turbconv)) do i entr_detr(bl, turbconv.entr_detr, state, aux, ts_up, ts_en, env, buoy, i) end E_dyn, Δ_dyn, E_trb = ntuple(i -> map(x -> x[i], EΔ_up), 3) return E_dyn, Δ_dyn, E_trb end """ entr_detr( m::AtmosModel{FT}, entr::EntrainmentDetrainment, state::Vars, aux::Vars, ts_up, ts_en, env, buoy, i, ) where {FT} Returns the dynamic entrainment and detrainment rates, as well as the turbulent entrainment rate, following Cohen et al. (JAMES, 2020), given: - `m`, an `AtmosModel` - `entr`, an `EntrainmentDetrainment` model - `state`, state variables - `aux`, auxiliary variables - `ts_up`, updraft thermodynamic states - `ts_en`, environment thermodynamic states - `env`, NamedTuple of environment variables - `buoy`, NamedTuple of environment and updraft buoyancies - `i`, index of the updraft """ function entr_detr( m::AtmosModel{FT}, entr::EntrainmentDetrainment, state::Vars, aux::Vars, ts_up, ts_en, env, buoy, i, ) where {FT} # Alias convention: gm = state en = state.turbconv.environment up = state.turbconv.updraft en_aux = aux.turbconv.environment up_aux = aux.turbconv.updraft turbconv = turbconv_model(m) N_up = n_updrafts(turbconv) ρ_inv = 1 / gm.ρ a_up_i = up[i].ρa * ρ_inv lim_E = entr.lim_ϵ lim_amp = entr.lim_amp w_min = entr.w_min # precompute vars w_up_i = fix_void_up(up[i].ρa, up[i].ρaw / up[i].ρa) sqrt_tke = sqrt(max(en.ρatke, 0) * ρ_inv / env.a) # ensure far from zero Δw = filter_w(w_up_i - env.w, w_min) w_up_i = filter_w(w_up_i, w_min) Δb = buoy.up[i] - buoy.en D_E, D_δ, M_δ, M_E = nondimensional_exchange_functions( m, entr, state, aux, ts_up, ts_en, env, buoy, i, ) # I am commenting this out for now, to make sure there is no slowdown here Λ_w = abs(Δb / Δw) Λ_tke = entr.c_λ * abs(Δb / (max(en.ρatke * ρ_inv, 0) + w_min)) λ = lamb_smooth_minimum( SVector(Λ_w, Λ_tke), turbconv.mix_len.smin_ub, turbconv.mix_len.smin_rm, ) # compute entrainment/detrainment components # TO DO: Add updraft height dependency (non-local) E_trb = 2 * up[i].ρa * entr.c_t * sqrt_tke / turbconv.pressure.H_up_min E_dyn = up[i].ρa * λ * (D_E + M_E) Δ_dyn = up[i].ρa * λ * (D_δ + M_δ) E_dyn = max(E_dyn, FT(0)) Δ_dyn = max(Δ_dyn, FT(0)) E_trb = max(E_trb, FT(0)) return E_dyn, Δ_dyn, E_trb end; ================================================ FILE: test/Atmos/EDMF/closures/mixing_length.jl ================================================ #### Mixing length model kernels """ mixing_length( m::AtmosModel{FT}, ml::MixingLengthModel, args, Δ::Tuple, Et::Tuple, ts_gm, ts_en, env, ) where {FT} Returns the mixing length used in the diffusive turbulence closure, given: - `m`, an `AtmosModel` - `ml`, a `MixingLengthModel` - `args`, the top-level arguments - `Δ`, the detrainment rate - `Et`, the turbulent entrainment rate - `ts_gm`, grid-mean thermodynamic states - `ts_en`, environment thermodynamic states - `env`, NamedTuple of environment variables """ function mixing_length( m::AtmosModel{FT}, ml::MixingLengthModel, args, Δ::Tuple, Et::Tuple, Shear², ts_gm, ts_en, env, ) where {FT} @unpack state, aux, diffusive, t = args # TODO: use functions: obukhov_length, ustar, ϕ_m turbconv = turbconv_model(m) # Alias convention: gm = state en = state.turbconv.environment up = state.turbconv.updraft gm_aux = aux N_up = n_updrafts(turbconv) z = altitude(m, aux) param_set = parameter_set(m) _grav::FT = grav(param_set) ρinv = 1 / gm.ρ tke_en = max(en.ρatke, 0) * ρinv / env.a # buoyancy related functions # compute obukhov_length and ustar from SurfaceFlux.jl here ustar = turbconv.surface.ustar obukhov_length = turbconv.surface.obukhov_length ∂b∂z, Nˢ_eff = compute_buoyancy_gradients(m, args, ts_gm, ts_en) Grad_Ri = ∇Richardson_number(∂b∂z, Shear², 1 / ml.max_length, ml.Ri_c) Pr_t = turbulent_Prandtl_number(ml.Pr_n, Grad_Ri, ml.ω_pr) # compute L1 Nˢ_fact = (sign(Nˢ_eff - eps(FT)) + 1) / 2 coeff = min(ml.c_b * sqrt(tke_en) / Nˢ_eff, ml.max_length) L_Nˢ = coeff * Nˢ_fact + ml.max_length * (FT(1) - Nˢ_fact) # compute L2 - law of the wall # TODO: use zLL from altitude surf_vals = subdomain_surface_values(m, gm, gm_aux, turbconv.surface.zLL) L_W = ml.κ * max(z, 5) / (sqrt(turbconv.surface.κ_star²) * ml.c_m) if obukhov_length < -eps(FT) L_W *= min((FT(1) - ml.a2 * z / obukhov_length)^ml.a1, 1 / ml.κ) end # compute L3 - entrainment detrainment sources # Production/destruction terms a = ml.c_m * (Shear² - ∂b∂z / Pr_t) * sqrt(tke_en) # Dissipation term b = FT(0) a_up = vuntuple(i -> up[i].ρa * ρinv, N_up) w_up = vuntuple(N_up) do i fix_void_up(up[i].ρa, up[i].ρaw / up[i].ρa) end b = sum( ntuple(N_up) do i Δ[i] / gm.ρ / env.a * ((w_up[i] - env.w) * (w_up[i] - env.w) / 2 - tke_en) - (w_up[i] - env.w) * Et[i] / gm.ρ * env.w / env.a end, ) c_neg = ml.c_d * tke_en * sqrt(tke_en) if abs(a) > ml.random_minval && 4 * a * c_neg > -b^2 l_entdet = max(-b / FT(2) / a + sqrt(b^2 + 4 * a * c_neg) / 2 / a, FT(0)) elseif abs(a) < eps(FT) && abs(b) > eps(FT) l_entdet = c_neg / b else l_entdet = FT(0) end L_tke = l_entdet if L_Nˢ < eps(FT) || L_Nˢ > ml.max_length L_Nˢ = ml.max_length end if L_W < eps(FT) || L_W > ml.max_length L_W = ml.max_length end if L_tke < eps(FT) || L_tke > ml.max_length L_tke = ml.max_length end l_mix = lamb_smooth_minimum(SVector(L_Nˢ, L_W, L_tke), ml.smin_ub, ml.smin_rm) return l_mix, ∂b∂z, Pr_t end; ================================================ FILE: test/Atmos/EDMF/closures/pressure.jl ================================================ #### Pressure model kernels function perturbation_pressure(bl::AtmosModel{FT}, args, env, buoy) where {FT} dpdz = vuntuple(n_updrafts(turbconv_model(bl))) do i perturbation_pressure(bl, turbconv_model(bl).pressure, args, env, buoy, i) end return dpdz end """ perturbation_pressure( m::AtmosModel{FT}, press::PressureModel, args, env, buoy, i, ) where {FT} Returns the value of perturbation pressure gradient for updraft i following He et al. (JAMES, 2020), given: - `m`, an `AtmosModel` - `press`, a `PressureModel` - `args`, top-level arguments - `env`, NamedTuple of environment variables - `buoy`, NamedTuple of environment and updraft buoyancies - `i`, index of the updraft """ function perturbation_pressure( m::AtmosModel{FT}, press::PressureModel, args, env, buoy, i, ) where {FT} @unpack state, diffusive, aux = args # Alias convention: up = state.turbconv.updraft up_aux = aux.turbconv.updraft up_dif = diffusive.turbconv.updraft w_up_i = fix_void_up(up[i].ρa, up[i].ρaw / up[i].ρa) nh_press_buoy = press.α_b * buoy.up[i] nh_pressure_adv = -press.α_a * w_up_i * up_dif[i].∇w[3] # TO DO: Add updraft height dependency (non-local) nh_pressure_drag = press.α_d * (w_up_i - env.w) * abs(w_up_i - env.w) / press.H_up_min dpdz = nh_press_buoy + nh_pressure_adv + nh_pressure_drag return dpdz end; ================================================ FILE: test/Atmos/EDMF/closures/surface_functions.jl ================================================ #### Surface model kernels using Statistics """ subdomain_surface_values( atmos::AtmosModel{FT}, state::Vars, aux::Vars, zLL::FT, ) where {FT} Returns the surface values of updraft area fraction, updraft liquid water potential temperature (`θ_liq`), updraft total water specific humidity (`q_tot`), environmental variances of `θ_liq` and `q_tot`, environmental covariance of `θ_liq` with `q_tot`, and environmental TKE, given: - `atmos`, an `AtmosModel` - `state`, state variables - `aux`, auxiliary variables - `zLL`, height of the lowest nodal level """ function subdomain_surface_values( atmos::AtmosModel, state::Vars, aux::Vars, zLL, ) turbconv = turbconv_model(atmos) subdomain_surface_values(turbconv.surface, turbconv, atmos, state, aux, zLL) end function subdomain_surface_values( surf::SurfaceModel, turbconv::EDMF{FT}, atmos::AtmosModel{FT}, state::Vars, aux::Vars, zLL::FT, ) where {FT} N_up = n_updrafts(turbconv) gm = state # TODO: change to new_thermo_state ts = recover_thermo_state(atmos, state, aux) q = PhasePartition(ts) _cp_m = cp_m(ts) lv = latent_heat_vapor(ts) Π = exner(ts) ρ_inv = 1 / gm.ρ upd_surface_std = turbconv.surface.upd_surface_std θ_liq_surface_flux = surf.shf / Π / _cp_m q_tot_surface_flux = surf.lhf / lv # these value should be given from the SurfaceFluxes.jl once it is merged oblength = turbconv.surface.obukhov_length ustar = turbconv.surface.ustar unstable = oblength < -eps(FT) fact = unstable ? (1 - surf.ψϕ_stab * zLL / oblength)^(-FT(2 // 3)) : 1 tke_fact = unstable ? cbrt(zLL / oblength * zLL / oblength) : 0 ustar² = ustar^2 θ_liq_cv = 4 * (θ_liq_surface_flux * θ_liq_surface_flux) / (ustar²) * fact q_tot_cv = 4 * (q_tot_surface_flux * q_tot_surface_flux) / (ustar²) * fact θ_liq_q_tot_cv = 4 * (θ_liq_surface_flux * q_tot_surface_flux) / (ustar²) * fact tke = ustar² * (surf.κ_star² + tke_fact) a_up_surf = ntuple(i -> FT(surf.a / N_up), N_up) e_int = internal_energy(atmos, state, aux) ts_new = new_thermo_state(atmos, state, aux) θ_liq = liquid_ice_pottemp(ts_new) θ_liq_up_surf = ntuple(N_up) do i θ_liq + upd_surface_std[i] * sqrt(max(θ_liq_cv, 0)) end ρq_tot = moisture_model(atmos) isa DryModel ? FT(0) : gm.moisture.ρq_tot q_tot_up_surf = ntuple(N_up) do i ρq_tot * ρ_inv + upd_surface_std[i] * sqrt(max(q_tot_cv, 0)) end return (; a_up_surf, θ_liq_up_surf, q_tot_up_surf, θ_liq_cv, q_tot_cv, θ_liq_q_tot_cv, tke, ) end; function subdomain_surface_values( surf::NeutralDrySurfaceModel, turbconv::EDMF{FT}, atmos::AtmosModel{FT}, state::Vars, aux::Vars, zLL::FT, ) where {FT} N_up = n_updrafts(turbconv) θ_liq_cv = FT(0) q_tot_cv = FT(0) θ_liq_q_tot_cv = FT(0) tke = surf.κ_star² * surf.ustar * surf.ustar ts_new = new_thermo_state(atmos, state, aux) a_up_surf = ntuple(i -> FT(surf.a / N_up), N_up) q_tot_up_surf = ntuple(i -> FT(0), N_up) θ_liq_up_surf = ntuple(i -> liquid_ice_pottemp(ts_new), N_up) return (; a_up_surf, θ_liq_up_surf, q_tot_up_surf, θ_liq_cv, q_tot_cv, θ_liq_q_tot_cv, tke, ) end; """ percentile_bounds_mean_norm( low_percentile::FT, high_percentile::FT, n_samples::Int, ) where {FT <: AbstractFloat} Returns the mean of all instances of a standard Gaussian random variable that have a CDF higher than low_percentile and lower than high_percentile, given a total of n_samples of the standard Gaussian, given: - `low_percentile`, lower limit of the CDF - `high_percentile`, higher limit of the CDF - `n_samples`, the total number of samples drawn from the Gaussian """ function percentile_bounds_mean_norm( low_percentile::FT, high_percentile::FT, n_samples::Int, ) where {FT <: AbstractFloat} x = rand(Normal(), n_samples) xp_low = quantile(Normal(), low_percentile) xp_high = quantile(Normal(), high_percentile) filter!(y -> xp_low < y < xp_high, x) return Statistics.mean(x) end ================================================ FILE: test/Atmos/EDMF/closures/turbulence_functions.jl ================================================ #### Turbulence model kernels """ thermo_variables(ts::ThermodynamicState) A NamedTuple of thermodynamic variables, computed from the given thermodynamic state. """ function thermo_variables(ts::ThermodynamicState) return ( θ_dry = dry_pottemp(ts), θ_liq = liquid_ice_pottemp(ts), q_tot = total_specific_humidity(ts), T = air_temperature(ts), R_m = gas_constant_air(ts), q_vap = vapor_specific_humidity(ts), q_liq = liquid_specific_humidity(ts), q_ice = ice_specific_humidity(ts), ) end """ compute_buoyancy_gradients( m::AtmosModel{FT}, args, ts_gm, ts_en ) where {FT} Returns the environmental buoyancy gradient following Tan et al. (JAMES, 2018) and the effective environmental static stability following Lopez-Gomez et al. (JAMES, 2020), given: - `m`, an `AtmosModel` - `args`, top-level arguments - `ts_gm`, grid-mean thermodynamic state - `ts_en`, environment thermodynamic state """ function compute_buoyancy_gradients( m::AtmosModel{FT}, args, ts_gm, ts_en, ) where {FT} @unpack state, aux, diffusive = args # Alias convention: gm = state en_dif = diffusive.turbconv.environment N_up = n_updrafts(turbconv_model(m)) param_set = parameter_set(m) _grav::FT = grav(param_set) _R_d::FT = R_d(param_set) _R_v::FT = R_v(param_set) ε_v::FT = 1 / molmass_ratio(param_set) p = air_pressure(ts_gm) q_tot_en = total_specific_humidity(ts_en) θ_liq_en = liquid_ice_pottemp(ts_en) lv = latent_heat_vapor(ts_en) T = air_temperature(ts_en) Π = exner(ts_en) q_liq = liquid_specific_humidity(ts_en) _cp_m = cp_m(ts_en) θ_virt = virtual_pottemp(ts_en) (ts_dry, ts_cloudy, cloud_frac) = compute_subdomain_statistics(m, args, ts_gm, ts_en) cloudy = thermo_variables(ts_cloudy) dry = thermo_variables(ts_dry) prefactor = _grav * (_R_d * gm.ρ / p * Π) ∂b∂θl_dry = prefactor * (1 + (ε_v - 1) * dry.q_tot) ∂b∂qt_dry = prefactor * dry.θ_liq * (ε_v - 1) if cloud_frac > FT(0) num = prefactor * (1 + ε_v * (1 + lv / _R_v / cloudy.T) * cloudy.q_vap - cloudy.q_tot) den = 1 + lv * lv / _cp_m / _R_v / cloudy.T / cloudy.T * cloudy.q_vap ∂b∂θl_cloudy = num / den ∂b∂qt_cloudy = (lv / _cp_m / cloudy.T * ∂b∂θl_cloudy - prefactor) * cloudy.θ_dry else ∂b∂θl_cloudy = FT(0) ∂b∂qt_cloudy = FT(0) end ∂b∂θl = (cloud_frac * ∂b∂θl_cloudy + (1 - cloud_frac) * ∂b∂θl_dry) ∂b∂qt = (cloud_frac * ∂b∂qt_cloudy + (1 - cloud_frac) * ∂b∂qt_dry) # Partial buoyancy gradients ∂b∂z_θl = en_dif.∇θ_liq[3] * ∂b∂θl ∂b∂z_qt = en_dif.∇q_tot[3] * ∂b∂qt ∂b∂z = ∂b∂z_θl + ∂b∂z_qt # Computation of buoyancy frequency based on θ_lv ∂θvl∂θ_liq = 1 + (ε_v - 1) * q_tot_en ∂θvl∂qt = (ε_v - 1) * θ_liq_en # apply chain-rule ∂θvl∂z = ∂θvl∂θ_liq * en_dif.∇θ_liq[3] + ∂θvl∂qt * en_dif.∇q_tot[3] ∂θv∂θvl = exp(lv * q_liq / _cp_m / T) λ_stb = cloud_frac Nˢ_eff = _grav / θ_virt * ((1 - λ_stb) * en_dif.∇θv[3] + λ_stb * ∂θvl∂z * ∂θv∂θvl) return ∂b∂z, Nˢ_eff end; """ ∇Richardson_number( ∂b∂z::FT, Shear²::FT, minval::FT, Ri_c::FT, ) where {FT} Returns the gradient Richardson number, given: - `∂b∂z`, the vertical buoyancy gradient - `Shear²`, the squared vertical gradient of horizontal velocity - `maxval`, maximum value of the output, typically the critical Ri number """ function ∇Richardson_number( ∂b∂z::FT, Shear²::FT, minval::FT, Ri_c::FT, ) where {FT} return min(∂b∂z / max(Shear², minval), Ri_c) end; """ turbulent_Prandtl_number( Pr_n::FT, Grad_Ri::FT, ω_pr::FT ) where {FT} Returns the turbulent Prandtl number, given: - `Pr_n`, the turbulent Prandtl number under neutral conditions - `Grad_Ri`, the gradient Richardson number """ function turbulent_Prandtl_number(Pr_n::FT, Grad_Ri::FT, ω_pr::FT) where {FT} if Grad_Ri > FT(0) factor = 2 * Grad_Ri / (1 + ω_pr * Grad_Ri - sqrt((1 + ω_pr * Grad_Ri)^2 - 4 * Grad_Ri)) else factor = FT(1) end return Pr_n * factor end; ================================================ FILE: test/Atmos/EDMF/compute_mse.jl ================================================ using ClimateMachine if parse(Bool, get(ENV, "CLIMATEMACHINE_PLOT_EDMF_COMPARISON", "false")) using Plots end using OrderedCollections using Test using NCDatasets using Dierckx using PrettyTables using Printf using ArtifactWrappers # Get PyCLES_output dataset folder: #! format: off PyCLES_output_dataset = ArtifactWrapper( @__DIR__, isempty(get(ENV, "CI", "")), "PyCLES_output", ArtifactFile[ # ArtifactFile(url = "https://caltech.box.com/shared/static/johlutwhohvr66wn38cdo7a6rluvz708.nc", filename = "Rico.nc",), ArtifactFile(url = "https://caltech.box.com/shared/static/zraeiftuzlgmykzhppqwrym2upqsiwyb.nc", filename = "Gabls.nc",), # ArtifactFile(url = "https://caltech.box.com/shared/static/toyvhbwmow3nz5bfa145m5fmcb2qbfuz.nc", filename = "DYCOMS_RF01.nc",), # ArtifactFile(url = "https://caltech.box.com/shared/static/ivo4751camlph6u3k68ftmb1dl4z7uox.nc", filename = "TRMM_LBA.nc",), # ArtifactFile(url = "https://caltech.box.com/shared/static/4osqp0jpt4cny8fq2ukimgfnyi787vsy.nc", filename = "ARM_SGP.nc",), ArtifactFile(url = "https://caltech.box.com/shared/static/jci8l11qetlioab4cxf5myr1r492prk6.nc", filename = "Bomex.nc",), # ArtifactFile(url = "https://caltech.box.com/shared/static/pzuu6ii99by2s356ij69v5cb615200jq.nc", filename = "Soares.nc",), # ArtifactFile(url = "https://caltech.box.com/shared/static/7upt639siyc2umon8gs6qsjiqavof5cq.nc", filename = "Nieuwstadt.nc",), ], ) PyCLES_output_dataset_path = get_data_folder(PyCLES_output_dataset) #! format: on include("variable_map.jl") function compute_mse( grid, bl, time_cm, dons_arr, ds, experiment, best_mse, t_compare, plot_dir = nothing, ) mse = Dict() # Ensure domain matches: z_les = ds["z_half"][:] z_cm = get_z(grid; rm_dupes = true) @info "Z extent for LES vs CLIMA:" @show extrema(z_cm) @show extrema(z_les) time_les = ds["t"][:] # Find the nearest matching final time: t_cmp = min(time_cm[end], time_les[end]) # Accidentally running a short simulation # could improve MSE. So, let's test that # we run for at least t_compare. We should # increase this as we can reach higher CFL. @test t_cmp >= t_compare # Ensure z_cm and dons_arr fields are consistent lengths: @test length(z_cm) == length(dons_arr[1][first(keys(dons_arr[1]))]) data_cm = Dict() dons_cont = Dict() cm_variables = [] computed_mse = [] table_best_mse = [] mse_reductions = [] pycles_variables = [] data_scales = [] pycles_weight = [] for (ftc) in keys(best_mse) # Only compare fields defined for var_map tup = var_map(ftc) tup == nothing && continue # Unpack the data LES_var = tup[1] facts = tup[2] push!(cm_variables, ftc) push!(pycles_variables, LES_var) data_ds = ds.group["profiles"] data_les = data_ds[LES_var][:] # Scale the data for comparison ρ = data_ds["rho"][:] a_up = data_ds["updraft_fraction"][:] a_en = 1 .- data_ds["updraft_fraction"][:] ρa_up = ρ .* a_up ρa_en = ρ .* a_en ρa = occursin("updraft", ftc) in ftc ? ρa_up : ρa_en if :a in facts && :ρ in facts data_les .*= ρa push!(pycles_weight, "ρa") elseif :ρ in facts data_les .*= ρ push!(pycles_weight, "ρ") else push!(pycles_weight, "1") end # Interpolate data steady_data = length(size(data_les)) == 1 if steady_data data_les_cont = Spline1D(z_les, data_les) else # unsteady data data_les_cont = Spline2D(time_les, z_les, data_les') end data_cm_arr_ = [ dons_arr[i][ftc][i_z] for i in 1:length(time_cm), i_z in 1:length(z_cm) ] data_cm_arr = reshape(data_cm_arr_, (length(time_cm), length(z_cm))) data_cm[ftc] = Spline2D(time_cm, z_cm, data_cm_arr) # Compute data scale data_scale = sum(abs.(data_les)) / length(data_les) push!(data_scales, data_scale) # Plot comparison if plot_dir ≠ nothing p = plot() if steady_data data_les_cont_mapped = map(z -> data_les_cont(z), z_cm) else data_les_cont_mapped = map(z -> data_les_cont(t_cmp, z), z_cm) end plot!( data_les_cont_mapped, z_cm ./ 10^3, xlabel = ftc, ylabel = "z [km]", label = "PyCLES", ) plot!( map(z -> data_cm[ftc](t_cmp, z), z_cm), z_cm ./ 10^3, xlabel = ftc, ylabel = "z [km]", label = "CM", ) mkpath(plot_dir) ftc_name = replace(ftc, "." => "_") savefig(joinpath(plot_dir, "$ftc_name.png")) end # Compute mean squared error (mse) if steady_data mse_single_var = sum(map(z_cm) do z (data_les_cont(z) - data_cm[ftc](t_cmp, z))^2 end) else mse_single_var = sum(map(z_cm) do z (data_les_cont(t_cmp, z) - data_cm[ftc](t_cmp, z))^2 end) end # Normalize by data scale mse[ftc] = mse_single_var / data_scale^2 push!(mse_reductions, (best_mse[ftc] - mse[ftc]) / best_mse[ftc] * 100) push!(computed_mse, mse[ftc]) push!(table_best_mse, best_mse[ftc]) end # Tabulate output header = [ "Variable" "Variable" "Weight" "Data scale" "MSE" "MSE" "MSE" "ClimateMachine (EDMF)" "PyCLES" "PyCLES" "" "Computed" "Best" "Reduction (%)" ] table_data = hcat( cm_variables, pycles_variables, pycles_weight, data_scales, computed_mse, table_best_mse, mse_reductions, ) @info @sprintf( "Experiment comparison: %s at time t=%s\n", experiment, t_cmp ) hl_worsened_mse = Highlighter( (data, i, j) -> !sufficient_mse(data[i, 5], data[i, 6]) && j == 5, crayon"red bold", ) hl_worsened_mse_reduction = Highlighter( (data, i, j) -> !sufficient_mse(data[i, 5], data[i, 6]) && j == 7, crayon"red bold", ) hl_improved_mse = Highlighter( (data, i, j) -> sufficient_mse(data[i, 5], data[i, 6]) && j == 7, crayon"green bold", ) pretty_table( table_data, header, formatters = ft_printf("%.16e", 5:6), header_crayon = crayon"yellow bold", subheader_crayon = crayon"green bold", highlighters = ( hl_worsened_mse, hl_improved_mse, hl_worsened_mse_reduction, ), crop = :none, ) return mse end sufficient_mse(computed_mse, best_mse) = computed_mse <= best_mse + sqrt(eps()) function test_mse(computed_mse, best_mse, key) mse_not_regressed = sufficient_mse(computed_mse[key], best_mse[key]) @test mse_not_regressed mse_not_regressed || @show key end function dons(diag_vs_z) return Dict(map(keys(first(diag_vs_z))) do k string(k) => [getproperty(ca, k) for ca in diag_vs_z] end) end get_dons_arr(diag_arr) = [dons(diag_vs_z) for diag_vs_z in diag_arr] dons_arr = get_dons_arr(diag_arr) ================================================ FILE: test/Atmos/EDMF/edmf_kernels.jl ================================================ #### EDMF model kernels using CLIMAParameters.Planet: e_int_v0, grav, day, R_d, R_v, molmass_ratio using Printf using ClimateMachine.Atmos: nodal_update_auxiliary_state!, Advect using ClimateMachine.BalanceLaws using ClimateMachine.MPIStateArrays: MPIStateArray using ClimateMachine.DGMethods: LocalGeometry, DGModel import ClimateMachine.Mesh.Filters: vars_state_filtered import ClimateMachine.BalanceLaws: vars_state, prognostic_vars, prognostic_to_primitive!, primitive_to_prognostic!, get_prog_state, get_specific_state, flux, precompute, source, eq_tends, update_auxiliary_state!, init_state_prognostic!, compute_gradient_argument!, compute_gradient_flux! import ClimateMachine.TurbulenceConvection: init_aux_turbconv!, turbconv_nodal_update_auxiliary_state!, turbconv_boundary_state!, turbconv_normal_boundary_flux_second_order! using Thermodynamics: air_pressure, air_density include(joinpath("helper_funcs", "nondimensional_exchange_functions.jl")) include(joinpath("helper_funcs", "lamb_smooth_minimum.jl")) include(joinpath("helper_funcs", "utility_funcs.jl")) include(joinpath("helper_funcs", "subdomain_statistics.jl")) include(joinpath("helper_funcs", "diagnose_environment.jl")) include(joinpath("helper_funcs", "subdomain_thermo_states.jl")) include(joinpath("helper_funcs", "save_subdomain_temperature.jl")) include(joinpath("closures", "entr_detr.jl")) include(joinpath("closures", "pressure.jl")) include(joinpath("closures", "mixing_length.jl")) include(joinpath("closures", "turbulence_functions.jl")) include(joinpath("closures", "surface_functions.jl")) function vars_state(m::NTuple{N, Updraft}, st::Auxiliary, FT) where {N} return Tuple{ntuple(i -> vars_state(m[i], st, FT), N)...} end vars_state(::Updraft, ::Auxiliary, FT) = @vars(T::FT) vars_state(::Environment, ::Auxiliary, FT) = @vars(T::FT) function vars_state(m::EDMF, st::Auxiliary, FT) @vars( environment::vars_state(m.environment, st, FT), updraft::vars_state(m.updraft, st, FT) ) end function vars_state(::Updraft, ::Prognostic, FT) @vars(ρa::FT, ρaw::FT, ρaθ_liq::FT, ρaq_tot::FT,) end function vars_state(::Environment, ::Prognostic, FT) @vars(ρatke::FT, ρaθ_liq_cv::FT, ρaq_tot_cv::FT, ρaθ_liq_q_tot_cv::FT,) end function vars_state(::Updraft, ::Primitive, FT) @vars(a::FT, aw::FT, aθ_liq::FT, aq_tot::FT,) end function vars_state(::Environment, ::Primitive, FT) @vars(atke::FT, aθ_liq_cv::FT, aq_tot_cv::FT, aθ_liq_q_tot_cv::FT,) end function vars_state( m::NTuple{N, Updraft}, st::Union{Prognostic, Primitive}, FT, ) where {N} return Tuple{ntuple(i -> vars_state(m[i], st, FT), N)...} end function vars_state(m::EDMF, st::Union{Prognostic, Primitive}, FT) @vars( environment::vars_state(m.environment, st, FT), updraft::vars_state(m.updraft, st, FT) ) end function vars_state(::Updraft, ::Gradient, FT) @vars(w::FT,) end function vars_state(::Environment, ::Gradient, FT) @vars( θ_liq::FT, q_tot::FT, w::FT, tke::FT, θ_liq_cv::FT, q_tot_cv::FT, θ_liq_q_tot_cv::FT, θv::FT, h_tot::FT, ) end function vars_state(m::NTuple{N, Updraft}, st::Gradient, FT) where {N} return Tuple{ntuple(i -> vars_state(m[i], st, FT), N)...} end function vars_state(m::EDMF, st::Gradient, FT) @vars( environment::vars_state(m.environment, st, FT), updraft::vars_state(m.updraft, st, FT), u::FT, v::FT ) end function vars_state(m::NTuple{N, Updraft}, st::GradientFlux, FT) where {N} return Tuple{ntuple(i -> vars_state(m[i], st, FT), N)...} end function vars_state(::Updraft, st::GradientFlux, FT) @vars(∇w::SVector{3, FT},) end function vars_state(::Environment, ::GradientFlux, FT) @vars( ∇θ_liq::SVector{3, FT}, ∇q_tot::SVector{3, FT}, ∇w::SVector{3, FT}, ∇tke::SVector{3, FT}, ∇θ_liq_cv::SVector{3, FT}, ∇q_tot_cv::SVector{3, FT}, ∇θ_liq_q_tot_cv::SVector{3, FT}, ∇θv::SVector{3, FT}, ∇h_tot::SVector{3, FT}, ) end function vars_state(m::EDMF, st::GradientFlux, FT) @vars( environment::vars_state(m.environment, st, FT), updraft::vars_state(m.updraft, st, FT), ∇u::SVector{3, FT}, ∇v::SVector{3, FT} ) end function vars_state_filtered(::Updraft, FT) @vars(a::FT, aw::FT, aθ_liq::FT, aq_tot::FT,) end function vars_state_filtered(m::NTuple{N, Updraft}, FT) where {N} return Tuple{ntuple(i -> vars_state_filtered(m[i], FT), N)...} end function vars_state_filtered(::Environment, FT) @vars(atke::FT, aθ_liq_cv::FT, aq_tot_cv::FT, aθ_liq_q_tot_cv::FT,) end function vars_state_filtered(m::EDMF, FT) @vars( environment::vars_state_filtered(m.environment, FT), updraft::vars_state_filtered(m.updraft, FT) ) end abstract type EDMFPrognosticVariable <: AbstractPrognosticVariable end abstract type EnvironmentPrognosticVariable <: EDMFPrognosticVariable end struct en_ρatke <: EnvironmentPrognosticVariable end struct en_ρaθ_liq_cv <: EnvironmentPrognosticVariable end struct en_ρaq_tot_cv <: EnvironmentPrognosticVariable end struct en_ρaθ_liq_q_tot_cv <: EnvironmentPrognosticVariable end abstract type UpdraftPrognosticVariable{i} <: EDMFPrognosticVariable end struct up_ρa{i} <: UpdraftPrognosticVariable{i} end struct up_ρaw{i} <: UpdraftPrognosticVariable{i} end struct up_ρaθ_liq{i} <: UpdraftPrognosticVariable{i} end struct up_ρaq_tot{i} <: UpdraftPrognosticVariable{i} end prognostic_vars(m::EDMF) = (prognostic_vars(m.environment)..., prognostic_vars(m.updraft)...) prognostic_vars(m::Environment) = (en_ρatke(), en_ρaθ_liq_cv(), en_ρaq_tot_cv(), en_ρaθ_liq_q_tot_cv()) function prognostic_vars(m::NTuple{N, Updraft}) where {N} t_ρa = vuntuple(i -> up_ρa{i}(), N) t_ρaw = vuntuple(i -> up_ρaw{i}(), N) t_ρaθ_liq = vuntuple(i -> up_ρaθ_liq{i}(), N) t_ρaq_tot = vuntuple(i -> up_ρaq_tot{i}(), N) t = (t_ρa..., t_ρaw..., t_ρaθ_liq..., t_ρaq_tot...) return t end get_prog_state(state, ::en_ρatke) = (state.turbconv.environment, :ρatke) get_prog_state(state, ::en_ρaθ_liq_cv) = (state.turbconv.environment, :ρaθ_liq_cv) get_prog_state(state, ::en_ρaq_tot_cv) = (state.turbconv.environment, :ρaq_tot_cv) get_prog_state(state, ::en_ρaθ_liq_q_tot_cv) = (state.turbconv.environment, :ρaθ_liq_q_tot_cv) get_prog_state(state, ::up_ρa{i}) where {i} = (state.turbconv.updraft[i], :ρa) get_prog_state(state, ::up_ρaw{i}) where {i} = (state.turbconv.updraft[i], :ρaw) get_prog_state(state, ::up_ρaθ_liq{i}) where {i} = (state.turbconv.updraft[i], :ρaθ_liq) get_prog_state(state, ::up_ρaq_tot{i}) where {i} = (state.turbconv.updraft[i], :ρaq_tot) get_specific_state(state, ::en_ρatke) = (state.turbconv.environment, :atke) get_specific_state(state, ::en_ρaθ_liq_cv) = (state.turbconv.environment, :aθ_liq_cv) get_specific_state(state, ::en_ρaq_tot_cv) = (state.turbconv.environment, :aq_tot_cv) get_specific_state(state, ::en_ρaθ_liq_q_tot_cv) = (state.turbconv.environment, :aθ_liq_q_tot_cv) get_specific_state(state, ::up_ρa{i}) where {i} = (state.turbconv.updraft[i], :a) get_specific_state(state, ::up_ρaw{i}) where {i} = (state.turbconv.updraft[i], :aw) get_specific_state(state, ::up_ρaθ_liq{i}) where {i} = (state.turbconv.updraft[i], :aθ_liq) get_specific_state(state, ::up_ρaq_tot{i}) where {i} = (state.turbconv.updraft[i], :aq_tot) struct EntrDetr{N_up} <: TendencyDef{Source} end struct PressSource{N_up} <: TendencyDef{Source} end struct BuoySource{N_up} <: TendencyDef{Source} end struct ShearSource <: TendencyDef{Source} end struct DissSource <: TendencyDef{Source} end struct GradProdSource <: TendencyDef{Source} end prognostic_vars(::EntrDetr{N_up}) where {N_up} = ( vuntuple(i -> up_ρa{i}, N_up)..., vuntuple(i -> up_ρaw{i}, N_up)..., vuntuple(i -> up_ρaθ_liq{i}, N_up)..., vuntuple(i -> up_ρaq_tot{i}, N_up)..., en_ρatke(), en_ρaθ_liq_cv(), en_ρaq_tot_cv(), en_ρaθ_liq_q_tot_cv(), ) prognostic_vars(::PressSource{N_up}) where {N_up} = vuntuple(i -> up_ρaw{i}(), N_up) prognostic_vars(::BuoySource{N_up}) where {N_up} = vuntuple(i -> up_ρaw{i}(), N_up) EntrDetr(m::EDMF) = EntrDetr{n_updrafts(m)}() BuoySource(m::EDMF) = BuoySource{n_updrafts(m)}() PressSource(m::EDMF) = PressSource{n_updrafts(m)}() # Dycore tendencies eq_tends( pv::Union{Momentum, Energy, TotalMoisture}, m::EDMF, flux::Flux{SecondOrder}, ) = eq_tends(pv, m.coupling, flux) eq_tends( pv::Union{Momentum, Energy, TotalMoisture}, ::Decoupled, ::Flux{SecondOrder}, ) = () eq_tends( pv::Union{Momentum, Energy, TotalMoisture}, ::Coupled, ::Flux{SecondOrder}, ) = (SGSFlux(),) # Turbconv tendencies eq_tends(pv::EDMFPrognosticVariable, m::AtmosModel, tt::Flux{O}) where {O} = eq_tends(pv, turbconv_model(m), tt) eq_tends(::EDMFPrognosticVariable, m::EDMF, ::Flux{O}) where {O} = () eq_tends(::EnvironmentPrognosticVariable, m::EDMF, ::Flux{SecondOrder}) = (Diffusion(),) eq_tends(pv::EDMFPrognosticVariable, m::EDMF, ::Flux{FirstOrder}) = (Advect(),) eq_tends(pv::PV, m::EDMF, ::Source) where {PV} = () eq_tends(::EDMFPrognosticVariable, m::EDMF, ::Source) = (EntrDetr(m),) eq_tends(pv::en_ρatke, m::EDMF, ::Source) = (EntrDetr(m), PressSource(m), BuoySource(m), ShearSource(), DissSource()) eq_tends( ::Union{en_ρaθ_liq_cv, en_ρaq_tot_cv, en_ρaθ_liq_q_tot_cv}, m::EDMF, ::Source, ) = (EntrDetr(m), DissSource(), GradProdSource()) eq_tends(::up_ρaw, m::EDMF, ::Source) = (EntrDetr(m), PressSource(m), BuoySource(m)) struct SGSFlux <: TendencyDef{Flux{SecondOrder}} end """ init_aux_turbconv!( turbconv::EDMF{FT}, m::AtmosModel{FT}, aux::Vars, geom::LocalGeometry, ) where {FT} Initialize EDMF auxiliary variables. """ function init_aux_turbconv!( turbconv::EDMF{FT}, m::AtmosModel{FT}, aux::Vars, geom::LocalGeometry, ) where {FT} end; function turbconv_nodal_update_auxiliary_state!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, t::Real, ) where {FT} save_subdomain_temperature!(m, state, aux) end; function prognostic_to_primitive!( turbconv::EDMF, atmos, moist::DryModel, prim::Vars, prog::Vars, ) N_up = n_updrafts(turbconv) prim_en = prim.turbconv.environment prog_en = prog.turbconv.environment prim_up = prim.turbconv.updraft prog_up = prog.turbconv.updraft ρ_inv = 1 / prog.ρ prim_en.atke = prog_en.ρatke * ρ_inv prim_en.aθ_liq_cv = prog_en.ρaθ_liq_cv * ρ_inv prim_en.aθ_liq_q_tot_cv = prog_en.ρaθ_liq_q_tot_cv * ρ_inv if moist isa DryModel prim_en.aq_tot_cv = 0 else prim_en.aq_tot_cv = prog_en.ρaq_tot_cv * ρ_inv end @unroll_map(N_up) do i prim_up[i].a = prog_up[i].ρa * ρ_inv prim_up[i].aw = prog_up[i].ρaw * ρ_inv prim_up[i].aθ_liq = prog_up[i].ρaθ_liq * ρ_inv if moist isa DryModel prim_up[i].aq_tot = 0 else prim_up[i].aq_tot = prog_up[i].ρaq_tot * ρ_inv end end end function primitive_to_prognostic!( turbconv::EDMF, atmos, moist::DryModel, prog::Vars, prim::Vars, ) N_up = n_updrafts(turbconv) prim_en = prim.turbconv.environment prog_en = prog.turbconv.environment prim_up = prim.turbconv.updraft prog_up = prog.turbconv.updraft ρ_gm = prog.ρ prog_en.ρatke = prim_en.atke * ρ_gm prog_en.ρaθ_liq_cv = prim_en.aθ_liq_cv * ρ_gm prog_en.ρaθ_liq_q_tot_cv = prim_en.aθ_liq_q_tot_cv * ρ_gm if moist isa DryModel prog_en.ρaq_tot_cv = 0 else prog_en.ρaq_tot_cv = prim_en.aq_tot_cv * ρ_gm end @unroll_map(N_up) do i prog_up[i].ρa = prim_up[i].a * ρ_gm prog_up[i].ρaw = prim_up[i].aw * ρ_gm prog_up[i].ρaθ_liq = prim_up[i].aθ_liq * ρ_gm if moist isa DryModel prog_up[i].ρaq_tot = 0 else prog_up[i].ρaq_tot = prim_up[i].aq_tot * ρ_gm end end end function compute_gradient_argument!( turbconv::EDMF{FT}, m::AtmosModel{FT}, transform::Vars, state::Vars, aux::Vars, t::Real, ) where {FT} N_up = n_updrafts(turbconv) z = altitude(m, aux) # Aliases: gm_tf = transform.turbconv up_tf = transform.turbconv.updraft en_tf = transform.turbconv.environment gm = state up = state.turbconv.updraft en = state.turbconv.environment # Recover thermo states ts = recover_thermo_state_all(m, state, aux) # Get environment variables env = environment_vars(state, N_up) param_set = parameter_set(m) @unroll_map(N_up) do i up_tf[i].w = fix_void_up(up[i].ρa, up[i].ρaw / up[i].ρa) end _grav::FT = grav(param_set) ρ_inv = 1 / gm.ρ θ_liq_en = liquid_ice_pottemp(ts.en) if moisture_model(m) isa DryModel q_tot_en = FT(0) else q_tot_en = total_specific_humidity(ts.en) end # populate gradient arguments en_tf.θ_liq = θ_liq_en en_tf.q_tot = q_tot_en en_tf.w = env.w en_tf.tke = en.ρatke / (env.a * gm.ρ) en_tf.θ_liq_cv = en.ρaθ_liq_cv / (env.a * gm.ρ) if moisture_model(m) isa DryModel en_tf.q_tot_cv = FT(0) en_tf.θ_liq_q_tot_cv = FT(0) else en_tf.q_tot_cv = en.ρaq_tot_cv / (env.a * gm.ρ) en_tf.θ_liq_q_tot_cv = en.ρaθ_liq_q_tot_cv / (env.a * gm.ρ) end en_tf.θv = virtual_pottemp(ts.en) en_e_kin = FT(1 // 2) * ((gm.ρu[1] * ρ_inv)^2 + (gm.ρu[2] * ρ_inv)^2 + env.w^2) # TBD: Check en_e_tot = total_energy(en_e_kin, _grav * z, ts.en) en_tf.h_tot = total_specific_enthalpy(ts.en, en_e_tot) gm_tf.u = gm.ρu[1] * ρ_inv gm_tf.v = gm.ρu[2] * ρ_inv end; function compute_gradient_flux!( turbconv::EDMF{FT}, m::AtmosModel{FT}, diffusive::Vars, ∇transform::Grad, state::Vars, aux::Vars, t::Real, ) where {FT} args = (; diffusive, state, aux, t) N_up = n_updrafts(turbconv) # Aliases: gm = state gm_dif = diffusive.turbconv gm_∇tf = ∇transform.turbconv up_dif = diffusive.turbconv.updraft up_∇tf = ∇transform.turbconv.updraft en = state.turbconv.environment en_dif = diffusive.turbconv.environment en_∇tf = ∇transform.turbconv.environment @unroll_map(N_up) do i up_dif[i].∇w = up_∇tf[i].w end env = environment_vars(state, N_up) ρa₀ = gm.ρ * env.a # first moment grid mean coming from environment gradients only en_dif.∇θ_liq = en_∇tf.θ_liq en_dif.∇q_tot = en_∇tf.q_tot en_dif.∇w = en_∇tf.w # second moment env cov en_dif.∇tke = en_∇tf.tke en_dif.∇θ_liq_cv = en_∇tf.θ_liq_cv en_dif.∇q_tot_cv = en_∇tf.q_tot_cv en_dif.∇θ_liq_q_tot_cv = en_∇tf.θ_liq_q_tot_cv en_dif.∇θv = en_∇tf.θv en_dif.∇h_tot = en_∇tf.h_tot gm_dif.∇u = gm_∇tf.u gm_dif.∇v = gm_∇tf.v end; function source(::up_ρa{i}, ::EntrDetr, atmos, args) where {i} @unpack E_dyn, Δ_dyn, ρa_up = args.precomputed.turbconv return fix_void_up(ρa_up[i], E_dyn[i] - Δ_dyn[i]) end function source(::up_ρaw{i}, ::EntrDetr, atmos, args) where {i} @unpack E_dyn, Δ_dyn, E_trb, env, ρa_up, w_up = args.precomputed.turbconv up = args.state.turbconv.updraft entr = fix_void_up(ρa_up[i], (E_dyn[i] + E_trb[i]) * env.w) detr = fix_void_up(ρa_up[i], (Δ_dyn[i] + E_trb[i]) * w_up[i]) return entr - detr end function source(::up_ρaθ_liq{i}, ::EntrDetr, atmos, args) where {i} @unpack E_dyn, Δ_dyn, E_trb, env, ρa_up, ts_en = args.precomputed.turbconv up = args.state.turbconv.updraft θ_liq_en = liquid_ice_pottemp(ts_en) entr = fix_void_up(ρa_up[i], (E_dyn[i] + E_trb[i]) * θ_liq_en) detr = fix_void_up(ρa_up[i], (Δ_dyn[i] + E_trb[i]) * up[i].ρaθ_liq / ρa_up[i]) return entr - detr end function source(::up_ρaq_tot{i}, ::EntrDetr, atmos, args) where {i} @unpack E_dyn, Δ_dyn, E_trb, ρa_up, ts_en = args.precomputed.turbconv up = args.state.turbconv.updraft q_tot_en = total_specific_humidity(ts_en) entr = fix_void_up(ρa_up[i], (E_dyn[i] + E_trb[i]) * q_tot_en) detr = fix_void_up(ρa_up[i], (Δ_dyn[i] + E_trb[i]) * up[i].ρaq_tot / ρa_up[i]) return entr - detr end function source(::en_ρatke, ::EntrDetr, atmos, args) @unpack E_dyn, Δ_dyn, E_trb, env, ρa_up, w_up = args.precomputed.turbconv @unpack state = args up = state.turbconv.updraft en = state.turbconv.environment gm = state N_up = n_updrafts(turbconv_model(atmos)) ρ_inv = 1 / gm.ρ tke_en = enforce_positivity(en.ρatke) * ρ_inv / env.a entr_detr = vuntuple(N_up) do i fix_void_up( ρa_up[i], E_trb[i] * (env.w - gm.ρu[3] * ρ_inv) * (env.w - w_up[i]) - (E_dyn[i] + E_trb[i]) * tke_en + Δ_dyn[i] * (w_up[i] - env.w) * (w_up[i] - env.w) / 2, ) end return sum(entr_detr) end function source(::en_ρaθ_liq_cv, ::EntrDetr, atmos, args) @unpack E_dyn, Δ_dyn, E_trb, ρa_up, ts_en = args.precomputed.turbconv @unpack state = args ts_gm = args.precomputed.ts up = state.turbconv.updraft en = state.turbconv.environment N_up = n_updrafts(turbconv_model(atmos)) θ_liq = liquid_ice_pottemp(ts_gm) θ_liq_en = liquid_ice_pottemp(ts_en) entr_detr = vuntuple(N_up) do i fix_void_up( ρa_up[i], Δ_dyn[i] * (up[i].ρaθ_liq / ρa_up[i] - θ_liq_en) * (up[i].ρaθ_liq / ρa_up[i] - θ_liq_en) + E_trb[i] * (θ_liq_en - θ_liq) * (θ_liq_en - up[i].ρaθ_liq / ρa_up[i]) + E_trb[i] * (θ_liq_en - θ_liq) * (θ_liq_en - up[i].ρaθ_liq / ρa_up[i]) - (E_dyn[i] + E_trb[i]) * en.ρaθ_liq_cv, ) end return sum(entr_detr) end function source(::en_ρaq_tot_cv, ::EntrDetr, atmos, args) @unpack E_dyn, Δ_dyn, E_trb, ρa_up, ts_en = args.precomputed.turbconv @unpack state = args FT = eltype(state) up = state.turbconv.updraft en = state.turbconv.environment gm = state N_up = n_updrafts(turbconv_model(atmos)) q_tot_en = total_specific_humidity(ts_en) ρ_inv = 1 / gm.ρ ρq_tot = moisture_model(atmos) isa DryModel ? FT(0) : gm.moisture.ρq_tot entr_detr = vuntuple(N_up) do i fix_void_up( ρa_up[i], Δ_dyn[i] * (up[i].ρaq_tot / ρa_up[i] - q_tot_en) * (up[i].ρaq_tot / ρa_up[i] - q_tot_en) + E_trb[i] * (q_tot_en - ρq_tot * ρ_inv) * (q_tot_en - up[i].ρaq_tot / ρa_up[i]) + E_trb[i] * (q_tot_en - ρq_tot * ρ_inv) * (q_tot_en - up[i].ρaq_tot / ρa_up[i]) - (E_dyn[i] + E_trb[i]) * en.ρaq_tot_cv, ) end return sum(entr_detr) end function source(::en_ρaθ_liq_q_tot_cv, ::EntrDetr, atmos, args) @unpack E_dyn, Δ_dyn, E_trb, ρa_up, ts_en = args.precomputed.turbconv @unpack state = args FT = eltype(state) ts_gm = args.precomputed.ts up = state.turbconv.updraft en = state.turbconv.environment gm = state N_up = n_updrafts(turbconv_model(atmos)) q_tot_en = total_specific_humidity(ts_en) θ_liq = liquid_ice_pottemp(ts_gm) θ_liq_en = liquid_ice_pottemp(ts_en) ρ_inv = 1 / gm.ρ ρq_tot = moisture_model(atmos) isa DryModel ? FT(0) : gm.moisture.ρq_tot entr_detr = vuntuple(N_up) do i fix_void_up( ρa_up[i], Δ_dyn[i] * (up[i].ρaθ_liq / ρa_up[i] - θ_liq_en) * (up[i].ρaq_tot / ρa_up[i] - q_tot_en) + E_trb[i] * (θ_liq_en - θ_liq) * (q_tot_en - up[i].ρaq_tot / ρa_up[i]) + E_trb[i] * (q_tot_en - ρq_tot * ρ_inv) * (θ_liq_en - up[i].ρaθ_liq / ρa_up[i]) - (E_dyn[i] + E_trb[i]) * en.ρaθ_liq_q_tot_cv, ) end return sum(entr_detr) end function source(::en_ρatke, ::PressSource, atmos, args) @unpack env, ρa_up, dpdz, w_up = args.precomputed.turbconv up = args.state.turbconv.updraft N_up = n_updrafts(turbconv_model(atmos)) press_tke = vuntuple(N_up) do i fix_void_up(ρa_up[i], ρa_up[i] * (w_up[i] - env.w) * dpdz[i]) end return sum(press_tke) end function source(::en_ρatke, ::ShearSource, atmos, args) @unpack env, K_m, Shear² = args.precomputed.turbconv gm = args.state ρa₀ = gm.ρ * env.a # production from mean gradient and Dissipation return ρa₀ * K_m * Shear² # tke Shear source end function source(::en_ρatke, ::BuoySource, atmos, args) @unpack env, K_h, ∂b∂z_env = args.precomputed.turbconv gm = args.state ρa₀ = gm.ρ * env.a return -ρa₀ * K_h * ∂b∂z_env # tke Buoyancy source end function source(::en_ρatke, ::DissSource, atmos, args) @unpack Diss₀ = args.precomputed.turbconv en = args.state.turbconv.environment return -Diss₀ * en.ρatke # tke Dissipation end function source(::en_ρaθ_liq_cv, ::DissSource, atmos, args) @unpack Diss₀ = args.precomputed.turbconv en = args.state.turbconv.environment return -Diss₀ * en.ρaθ_liq_cv end function source(::en_ρaq_tot_cv, ::DissSource, atmos, args) @unpack Diss₀ = args.precomputed.turbconv en = args.state.turbconv.environment return -Diss₀ * en.ρaq_tot_cv end function source(::en_ρaθ_liq_q_tot_cv, ::DissSource, atmos, args) @unpack Diss₀ = args.precomputed.turbconv en = args.state.turbconv.environment return -Diss₀ * en.ρaθ_liq_q_tot_cv end function source(::en_ρaθ_liq_cv, ::GradProdSource, atmos, args) @unpack env, K_h = args.precomputed.turbconv gm = args.state en_dif = args.diffusive.turbconv.environment ρa₀ = gm.ρ * env.a return ρa₀ * (2 * K_h * en_dif.∇θ_liq[3] * en_dif.∇θ_liq[3]) end function source(::en_ρaq_tot_cv, ::GradProdSource, atmos, args) @unpack env, K_h = args.precomputed.turbconv gm = args.state en_dif = args.diffusive.turbconv.environment ρa₀ = gm.ρ * env.a return ρa₀ * (2 * K_h * en_dif.∇q_tot[3] * en_dif.∇q_tot[3]) end function source(::en_ρaθ_liq_q_tot_cv, ::GradProdSource, atmos, args) @unpack env, K_h = args.precomputed.turbconv gm = args.state en_dif = args.diffusive.turbconv.environment ρa₀ = gm.ρ * env.a return ρa₀ * (2 * K_h * en_dif.∇θ_liq[3] * en_dif.∇q_tot[3]) end function source(::up_ρaw{i}, ::BuoySource, atmos, args) where {i} @unpack buoy = args.precomputed.turbconv up = args.state.turbconv.updraft return up[i].ρa * buoy.up[i] end function source(::up_ρaw{i}, ::PressSource, atmos, args) where {i} @unpack dpdz = args.precomputed.turbconv up = args.state.turbconv.updraft return -up[i].ρa * dpdz[i] end function compute_ρa_up(atmos, state, aux) # Aliases: turbconv = turbconv_model(atmos) gm = state up = state.turbconv.updraft N_up = n_updrafts(turbconv) a_min = turbconv.subdomains.a_min a_max = turbconv.subdomains.a_max # in future GCM implementations we need to think about grid mean advection ρa_up = vuntuple(N_up) do i gm.ρ * enforce_unit_bounds(up[i].ρa / gm.ρ, a_min, a_max) end return ρa_up end function flux(::up_ρa{i}, ::Advect, atmos, args) where {i} @unpack state, aux = args @unpack ρa_up = args.precomputed.turbconv up = state.turbconv.updraft ẑ = vertical_unit_vector(atmos, aux) return fix_void_up(ρa_up[i], up[i].ρaw) * ẑ end function flux(::up_ρaw{i}, ::Advect, atmos, args) where {i} @unpack state, aux = args @unpack ρa_up, w_up = args.precomputed.turbconv up = state.turbconv.updraft ẑ = vertical_unit_vector(atmos, aux) return fix_void_up(ρa_up[i], up[i].ρaw * w_up[i]) * ẑ end function flux(::up_ρaθ_liq{i}, ::Advect, atmos, args) where {i} @unpack state, aux = args @unpack ρa_up, w_up = args.precomputed.turbconv up = state.turbconv.updraft ẑ = vertical_unit_vector(atmos, aux) return fix_void_up(ρa_up[i], w_up[i] * up[i].ρaθ_liq) * ẑ end function flux(::up_ρaq_tot{i}, ::Advect, atmos, args) where {i} @unpack state, aux = args @unpack ρa_up, w_up = args.precomputed.turbconv up = state.turbconv.updraft ẑ = vertical_unit_vector(atmos, aux) return fix_void_up(ρa_up[i], w_up[i] * up[i].ρaq_tot) * ẑ end function flux(::en_ρatke, ::Advect, atmos, args) @unpack state, aux = args @unpack env = args.precomputed.turbconv en = state.turbconv.environment ẑ = vertical_unit_vector(atmos, aux) return en.ρatke * env.w * ẑ end function flux(::en_ρaθ_liq_cv, ::Advect, atmos, args) @unpack state, aux = args @unpack env = args.precomputed.turbconv en = state.turbconv.environment ẑ = vertical_unit_vector(atmos, aux) return en.ρaθ_liq_cv * env.w * ẑ end function flux(::en_ρaq_tot_cv, ::Advect, atmos, args) @unpack state, aux = args @unpack env = args.precomputed.turbconv en = state.turbconv.environment ẑ = vertical_unit_vector(atmos, aux) return en.ρaq_tot_cv * env.w * ẑ end function flux(::en_ρaθ_liq_q_tot_cv, ::Advect, atmos, args) @unpack state, aux = args @unpack env = args.precomputed.turbconv en = state.turbconv.environment ẑ = vertical_unit_vector(atmos, aux) return en.ρaθ_liq_q_tot_cv * env.w * ẑ end function precompute(::EDMF, bl, args, ts, ::Flux{FirstOrder}) @unpack state, aux = args FT = eltype(state) turbconv = turbconv_model(bl) env = environment_vars(state, n_updrafts(turbconv)) ρa_up = compute_ρa_up(bl, state, aux) up = state.turbconv.updraft gm = state N_up = n_updrafts(turbconv) ρ_inv = 1 / gm.ρ w_up = vuntuple(N_up) do i fix_void_up(ρa_up[i], up[i].ρaw / ρa_up[i]) end θ_liq_up = vuntuple(N_up) do i fix_void_up(up[i].ρa, up[i].ρaθ_liq / up[i].ρa, liquid_ice_pottemp(ts)) end a_up = vuntuple(N_up) do i fix_void_up(up[i].ρa, up[i].ρa * ρ_inv) end if !(moisture_model(bl) isa DryModel) q_tot_up = vuntuple(N_up) do i fix_void_up(up[i].ρa, up[i].ρaq_tot / up[i].ρa, gm.moisture.ρq_tot) end else q_tot_up = vuntuple(i -> FT(0), N_up) end return (; env, a_up, q_tot_up, ρa_up, θ_liq_up, w_up) end function precompute(::EDMF, bl, args, ts, ::Flux{SecondOrder}) @unpack state, aux, diffusive, t = args en_dif = diffusive.turbconv.environment up = state.turbconv.updraft gm = state ts_gm = ts FT = eltype(state) z = altitude(bl, aux) param_set = parameter_set(bl) turbconv = turbconv_model(bl) N_up = n_updrafts(turbconv) _grav::FT = grav(param_set) ρ_inv = 1 / gm.ρ env = environment_vars(state, N_up) ts_en = new_thermo_state_en(bl, moisture_model(bl), state, aux, ts_gm) ts_up = new_thermo_state_up(bl, moisture_model(bl), state, aux, ts_gm) buoy = compute_buoyancy(bl, state, env, ts_en, ts_up, aux.ref_state) E_dyn, Δ_dyn, E_trb = entr_detr(bl, state, aux, ts_up, ts_en, env, buoy) Shear² = diffusive.turbconv.∇u[3]^2 + diffusive.turbconv.∇v[3]^2 + diffusive.turbconv.environment.∇w[3]^2 l_mix, ∂b∂z_env, Pr_t = mixing_length( bl, turbconv.mix_len, args, Δ_dyn, E_trb, Shear², ts_gm, ts_en, env, ) ρa_up = compute_ρa_up(bl, state, aux) en = state.turbconv.environment tke_en = enforce_positivity(en.ρatke) / env.a / state.ρ K_m = turbconv.mix_len.c_m * l_mix * sqrt(tke_en) K_h = K_m / Pr_t ρaw_up = vuntuple(i -> up[i].ρaw, N_up) w_up = vuntuple(N_up) do i fix_void_up(ρa_up[i], up[i].ρaw / ρa_up[i]) end ρu_gm_tup = Tuple(gm.ρu) ρa_en = gm.ρ * env.a # TODO: Consider turbulent contribution: e_kin_up = FT(1 / 2) .* ntuple(N_up) do i (ρu_gm_tup[1] * ρ_inv)^2 + (ρu_gm_tup[2] * ρ_inv)^2 + w_up[i]^2 end e_kin_en = FT(1 // 2) * ((gm.ρu[1] * ρ_inv)^2 + (gm.ρu[2] * ρ_inv)^2 + env.w)^2 e_tot_up = ntuple(i -> total_energy(e_kin_up[i], _grav * z, ts_up[i]), N_up) h_tot_up = ntuple(i -> total_specific_enthalpy(ts_up[i], e_tot_up[i]), N_up) e_tot_en = total_energy(e_kin_en, _grav * z, ts_en) h_tot_en = total_specific_enthalpy(ts_en, e_tot_en) h_tot_gm = total_specific_enthalpy(ts, gm.energy.ρe * ρ_inv) massflux_h_tot = sum( ntuple(N_up) do i fix_void_up( ρa_up[i], ρa_up[i] * (h_tot_gm - h_tot_up[i]) * (gm.ρu[3] * ρ_inv - ρaw_up[i] / ρa_up[i]), ) end, ) massflux_h_tot += (ρa_en * (h_tot_gm - h_tot_en) * (ρu_gm_tup[3] * ρ_inv - env.w)) ρh_sgs_flux = SVector{3, FT}( 0, 0, -gm.ρ * env.a * K_h * en_dif.∇h_tot[3] + massflux_h_tot, ) return (; env, ρa_up, ρaw_up, ts_en, ts_up, E_dyn, Δ_dyn, E_trb, l_mix, ∂b∂z_env, K_h, K_m, Pr_t, ρh_sgs_flux, ) end """ compute_buoyancy( bl::BalanceLaw, state::Vars, env::NamedTuple, ts_en::ThermodynamicState, ts_up, ref_state::Vars ) Compute buoyancies of subdomains """ function compute_buoyancy( bl::BalanceLaw, state::Vars, env::NamedTuple, ts_en::ThermodynamicState, ts_up, ref_state::Vars, ) FT = eltype(state) param_set = parameter_set(bl) N_up = n_updrafts(turbconv_model(bl)) _grav::FT = grav(param_set) gm = state ρ_inv = 1 / gm.ρ buoyancy_en = -_grav * (air_density(ts_en) - ref_state.ρ) * ρ_inv up = state.turbconv.updraft a_up = vuntuple(N_up) do i fix_void_up(up[i].ρa, up[i].ρa * ρ_inv) end abs_buoyancy_up = vuntuple(N_up) do i -_grav * (air_density(ts_up[i]) - ref_state.ρ) * ρ_inv end b_gm = grid_mean_b(env, a_up, N_up, abs_buoyancy_up, buoyancy_en) # remove the gm_b from all subdomains buoyancy_up = vuntuple(N_up) do i abs_buoyancy_up[i] - b_gm end buoyancy_en -= b_gm return (; up = buoyancy_up, en = buoyancy_en) end function precompute(::EDMF, bl, args, ts, ::Source) @unpack state, aux, diffusive, t = args ts_gm = ts gm = state up = state.turbconv.updraft turbconv = turbconv_model(bl) N_up = n_updrafts(turbconv) # Get environment variables env = environment_vars(state, N_up) # Recover thermo states ts_en = new_thermo_state_en(bl, moisture_model(bl), state, aux, ts_gm) ts_up = new_thermo_state_up(bl, moisture_model(bl), state, aux, ts_gm) ρa_up = compute_ρa_up(bl, state, aux) Shear² = diffusive.turbconv.∇u[3]^2 + diffusive.turbconv.∇v[3]^2 + diffusive.turbconv.environment.∇w[3]^2 buoy = compute_buoyancy(bl, state, env, ts_en, ts_up, aux.ref_state) E_dyn, Δ_dyn, E_trb = entr_detr(bl, state, aux, ts_up, ts_en, env, buoy) dpdz = perturbation_pressure(bl, args, env, buoy) l_mix, ∂b∂z_env, Pr_t = mixing_length( bl, turbconv.mix_len, args, Δ_dyn, E_trb, Shear², ts_gm, ts_en, env, ) w_up = vuntuple(N_up) do i fix_void_up(ρa_up[i], up[i].ρaw / ρa_up[i]) end en = state.turbconv.environment tke_en = enforce_positivity(en.ρatke) / env.a / state.ρ K_m = turbconv.mix_len.c_m * l_mix * sqrt(tke_en) K_h = K_m / Pr_t Diss₀ = turbconv.mix_len.c_d * sqrt(tke_en) / l_mix tke_buoy_prod = -gm.ρ * env.a * K_h * ∂b∂z_env # tke Buoyancy source return (; env, Diss₀, buoy, K_m, K_h, ρa_up, w_up, ts_en, ts_up, E_dyn, Δ_dyn, E_trb, dpdz, l_mix, ∂b∂z_env, Pr_t, Shear², tke_buoy_prod, ) end function flux(::Energy, ::SGSFlux, atmos, args) @unpack state, aux, diffusive = args @unpack ts = args.precomputed @unpack ρh_sgs_flux = args.precomputed.turbconv return ρh_sgs_flux end function flux(::TotalMoisture, ::SGSFlux, atmos, args) @unpack state, diffusive = args @unpack env, K_h, ρa_up, ρaw_up, ts_en = args.precomputed.turbconv FT = eltype(state) en_dif = diffusive.turbconv.environment up = state.turbconv.updraft gm = state ρ_inv = 1 / gm.ρ N_up = n_updrafts(turbconv_model(atmos)) ρq_tot = moisture_model(atmos) isa DryModel ? FT(0) : gm.moisture.ρq_tot ρaq_tot_up = vuntuple(i -> up[i].ρaq_tot, N_up) ρa_en = gm.ρ * env.a q_tot_en = total_specific_humidity(ts_en) ρu_gm_tup = Tuple(gm.ρu) massflux_q_tot = sum( ntuple(N_up) do i fix_void_up( ρa_up[i], ρa_up[i] * (ρq_tot * ρ_inv - ρaq_tot_up[i] / ρa_up[i]) * (ρu_gm_tup[3] * ρ_inv - ρaw_up[i] / ρa_up[i]), ) end, ) massflux_q_tot += (ρa_en * (ρq_tot * ρ_inv - q_tot_en) * (ρu_gm_tup[3] * ρ_inv - env.w)) ρq_tot_sgs_flux = -gm.ρ * env.a * K_h * en_dif.∇q_tot[3] + massflux_q_tot return SVector{3, FT}(0, 0, ρq_tot_sgs_flux) end function flux(::Momentum, ::SGSFlux, atmos, args) @unpack state, diffusive = args @unpack env, K_m, ρa_up, ρaw_up = args.precomputed.turbconv FT = eltype(state) en_dif = diffusive.turbconv.environment gm_dif = diffusive.turbconv up = state.turbconv.updraft gm = state ρ_inv = 1 / gm.ρ N_up = n_updrafts(turbconv_model(atmos)) ρa_en = gm.ρ * env.a ρu_gm_tup = Tuple(gm.ρu) massflux_w = sum( ntuple(N_up) do i fix_void_up( ρa_up[i], ρa_up[i] * (ρu_gm_tup[3] * ρ_inv - ρaw_up[i] / ρa_up[i]) * (ρu_gm_tup[3] * ρ_inv - ρaw_up[i] / ρa_up[i]), ) end, ) massflux_w += ( ρa_en * (ρu_gm_tup[3] * ρ_inv - env.w) * (ρu_gm_tup[3] * ρ_inv - env.w) ) ρw_sgs_flux = -gm.ρ * env.a * K_m * en_dif.∇w[3] + massflux_w ρu_sgs_flux = -gm.ρ * env.a * K_m * gm_dif.∇u[3] ρv_sgs_flux = -gm.ρ * env.a * K_m * gm_dif.∇v[3] return SMatrix{3, 3, FT, 9}( 0, 0, ρu_sgs_flux, 0, 0, ρv_sgs_flux, 0, 0, ρw_sgs_flux, ) end function flux(::en_ρaθ_liq_cv, ::Diffusion, atmos, args) @unpack state, aux, diffusive = args @unpack env, l_mix, Pr_t, K_h = args.precomputed.turbconv en_dif = diffusive.turbconv.environment gm = state ẑ = vertical_unit_vector(atmos, aux) return -gm.ρ * env.a * K_h * en_dif.∇θ_liq_cv[3] * ẑ end function flux(::en_ρaq_tot_cv, ::Diffusion, atmos, args) @unpack state, aux, diffusive = args @unpack env, l_mix, Pr_t, K_h = args.precomputed.turbconv en_dif = diffusive.turbconv.environment gm = state ẑ = vertical_unit_vector(atmos, aux) return -gm.ρ * env.a * K_h * en_dif.∇q_tot_cv[3] * ẑ end function flux(::en_ρaθ_liq_q_tot_cv, ::Diffusion, atmos, args) @unpack state, aux, diffusive = args @unpack env, l_mix, Pr_t, K_h = args.precomputed.turbconv en_dif = diffusive.turbconv.environment gm = state ẑ = vertical_unit_vector(atmos, aux) return -gm.ρ * env.a * K_h * en_dif.∇θ_liq_q_tot_cv[3] * ẑ end function flux(::en_ρatke, ::Diffusion, atmos, args) @unpack state, aux, diffusive = args @unpack env, K_m = args.precomputed.turbconv gm = state en_dif = diffusive.turbconv.environment ẑ = vertical_unit_vector(atmos, aux) return -gm.ρ * env.a * K_m * en_dif.∇tke[3] * ẑ end # First order boundary conditions function turbconv_boundary_state!( nf, bc::EDMFBottomBC, atmos::AtmosModel{FT}, state⁺::Vars, args, ) where {FT} @unpack state⁻, aux⁻, aux_int⁻ = args turbconv = turbconv_model(atmos) N_up = n_updrafts(turbconv) up⁺ = state⁺.turbconv.updraft en⁺ = state⁺.turbconv.environment gm⁻ = state⁻ gm_a⁻ = aux⁻ zLL = altitude(atmos, aux_int⁻) surf_vals = subdomain_surface_values(atmos, gm⁻, gm_a⁻, zLL) a_up_surf = surf_vals.a_up_surf @unroll_map(N_up) do i up⁺[i].ρaw = FT(0) up⁺[i].ρa = gm⁻.ρ * a_up_surf[i] up⁺[i].ρaθ_liq = gm⁻.ρ * a_up_surf[i] * surf_vals.θ_liq_up_surf[i] if !(moisture_model(atmos) isa DryModel) up⁺[i].ρaq_tot = gm⁻.ρ * a_up_surf[i] * surf_vals.q_tot_up_surf[i] else up⁺[i].ρaq_tot = FT(0) end end a_en = environment_area(gm⁻, N_up) en⁺.ρatke = gm⁻.ρ * a_en * surf_vals.tke en⁺.ρaθ_liq_cv = gm⁻.ρ * a_en * surf_vals.θ_liq_cv if !(moisture_model(atmos) isa DryModel) en⁺.ρaq_tot_cv = gm⁻.ρ * a_en * surf_vals.q_tot_cv en⁺.ρaθ_liq_q_tot_cv = gm⁻.ρ * a_en * surf_vals.θ_liq_q_tot_cv else en⁺.ρaq_tot_cv = FT(0) en⁺.ρaθ_liq_q_tot_cv = FT(0) end end; function turbconv_boundary_state!( nf, bc::EDMFTopBC, atmos::AtmosModel{FT}, state⁺::Vars, args, ) where {FT} N_up = n_updrafts(turbconv_model(atmos)) up⁺ = state⁺.turbconv.updraft @unroll_map(N_up) do i up⁺[i].ρaw = FT(0) end end; # The boundary conditions for second-order unknowns # (here we prescribe a flux at state⁺ to match that at state⁻ so that the flux divergence is zero) function turbconv_normal_boundary_flux_second_order!( nf, bc::EDMFBottomBC, atmos::AtmosModel, fluxᵀn::Vars, args, ) @unpack state⁻, aux⁻, diffusive⁻, hyperdiff⁻, t, n⁻ = args en_flx = fluxᵀn.turbconv.environment tend_type = Flux{SecondOrder}() _args⁻ = (; state = state⁻, aux = aux⁻, t, diffusive = diffusive⁻, hyperdiffusive = hyperdiff⁻, ) pargs = merge(_args⁻, (precomputed = precompute(atmos, _args⁻, tend_type),)) total_flux = Σfluxes( en_ρatke(), eq_tends(en_ρatke(), atmos, tend_type), atmos, pargs, ) nd_ρatke = dot(n⁻, total_flux) en_flx.ρatke = nd_ρatke total_flux = Σfluxes( en_ρaθ_liq_cv(), eq_tends(en_ρaθ_liq_cv(), atmos, tend_type), atmos, pargs, ) nd_ρaθ_liq_cv = dot(n⁻, total_flux) en_flx.ρaθ_liq_cv = nd_ρaθ_liq_cv if !(moisture_model(atmos) isa DryModel) total_flux = Σfluxes( en_ρaq_tot_cv(), eq_tends(en_ρaq_tot_cv(), atmos, tend_type), atmos, pargs, ) nd_ρaq_tot_cv = dot(n⁻, total_flux) en_flx.ρaq_tot_cv = nd_ρaq_tot_cv total_flux = Σfluxes( en_ρaθ_liq_q_tot_cv(), eq_tends(en_ρaθ_liq_q_tot_cv(), atmos, tend_type), atmos, pargs, ) nd_ρaθ_liq_q_tot_cv = dot(n⁻, total_flux) en_flx.ρaθ_liq_q_tot_cv = nd_ρaθ_liq_q_tot_cv end end; function turbconv_normal_boundary_flux_second_order!( nf, bc::EDMFTopBC, atmos::AtmosModel{FT}, fluxᵀn::Vars, args, ) where {FT} turbconv = turbconv_model(atmos) N_up = n_updrafts(turbconv) up_flx = fluxᵀn.turbconv.updraft en_flx = fluxᵀn.turbconv.environment @unroll_map(N_up) do i up_flx[i].ρa = FT(0) up_flx[i].ρaθ_liq = FT(0) up_flx[i].ρaq_tot = FT(0) end en_flx.ρatke = FT(0) en_flx.ρaθ_liq_cv = FT(0) en_flx.ρaq_tot_cv = FT(0) en_flx.ρaθ_liq_q_tot_cv = FT(0) end; ================================================ FILE: test/Atmos/EDMF/edmf_model.jl ================================================ #### EDMF model using DocStringExtensions using CLIMAParameters: AbstractEarthParameterSet using CLIMAParameters.Atmos.EDMF using CLIMAParameters.SubgridScale """ EntrainmentDetrainment An Entrainment-Detrainment model for EDMF, containing all related model and free parameters. # Fields $(DocStringExtensions.FIELDS) """ Base.@kwdef struct EntrainmentDetrainment{FT <: AbstractFloat} "Entrainment TKE scale" c_λ::FT "Entrainment factor" c_ε::FT "Detrainment factor" c_δ::FT "Turbulent Entrainment factor" c_t::FT "Detrainment RH power" β::FT "Logistic function scale ‵[1/s]‵" μ_0::FT "Updraft mixing fraction" χ::FT "Minimum updraft velocity" w_min::FT "Exponential area limiter scale" lim_ϵ::FT "Exponential area limiter amplitude" lim_amp::FT end """ EntrainmentDetrainment{FT}(param_set) where {FT} Constructor for `EntrainmentDetrainment` for EDMF, given: - `param_set`, an AbstractEarthParameterSet """ function EntrainmentDetrainment{FT}( param_set::AbstractEarthParameterSet, ) where {FT} c_λ_ = c_λ(param_set) c_ε_ = c_ε(param_set) c_δ_ = c_δ(param_set) c_t_ = c_t(param_set) β_ = β(param_set) μ_0_ = μ_0(param_set) χ_ = χ(param_set) w_min_ = w_min(param_set) lim_ϵ_ = lim_ϵ(param_set) lim_amp_ = lim_amp(param_set) args = (c_λ_, c_ε_, c_δ_, c_t_, β_, μ_0_, χ_, w_min_, lim_ϵ_, lim_amp_) return EntrainmentDetrainment{FT}(args...) end """ SubdomainModel A subdomain model for EDMF, containing all related model and free parameters. TODO: `a_max` is valid for all subdomains, but it is insufficient to ensure `a_en` is not negative. Limits can be imposed for updrafts, but this is a limit is dictated by 1 - Σᵢ aᵢ, which must be somehow satisfied by regularizing prognostic source terms. # Fields $(DocStringExtensions.FIELDS) """ Base.@kwdef struct SubdomainModel{FT <: AbstractFloat} "Minimum area fraction for any subdomain" a_min::FT "Maximum area fraction for any subdomain" a_max::FT end function SubdomainModel( ::Type{FT}, N_up; a_min::FT = FT(0), a_max::FT = 1 - N_up * a_min, ) where {FT} return SubdomainModel(; a_min = a_min, a_max = a_max) end """ SurfaceModel A surface model for EDMF, containing all boundary values and parameters needed by the model. # Fields $(DocStringExtensions.FIELDS) """ Base.@kwdef struct SurfaceModel{FT <: AbstractFloat, SV} "Area" a::FT "Surface covariance stability coefficient" ψϕ_stab::FT "Square ratio of rms turbulent velocity to friction velocity" κ_star²::FT "Updraft normalized standard deviation at the surface" upd_surface_std::SV # The following will be deleted after SurfaceFlux coupling "Liquid water potential temperature ‵[k]‵" θ_liq::FT = 299.1 "Specific humidity ‵[kg/kg]‵" q_tot::FT = 22.45e-3 "Sensible heat flux ‵[w/m^2]‵" shf::FT = 9.5 "Latent heat flux ‵[w/m^2]‵" lhf::FT = 147.2 "Friction velocity" ustar::FT = 0.28 "Monin - Obukhov length" obukhov_length::FT = 0 "Height of the lowest level" zLL::FT = 60 end """ SurfaceModel{FT}(N_up, param_set) where {FT} Constructor for `SurfaceModel` for EDMF, given: - `N_up`, the number of updrafts - `param_set`, an AbstractEarthParameterSet """ function SurfaceModel{FT}(N_up, param_set::AbstractEarthParameterSet) where {FT} a_surf_ = a_surf(param_set) κ_star²_ = κ_star²(param_set) ψϕ_stab_ = ψϕ_stab(param_set) if a_surf_ > FT(0) upd_surface_std = SVector( ntuple(N_up) do i percentile_bounds_mean_norm( 1 - a_surf_ + (i - 1) * FT(a_surf_ / N_up), 1 - a_surf_ + i * FT(a_surf_ / N_up), 1000, ) end, ) else upd_surface_std = SVector(ntuple(i -> FT(0), N_up)) end SV = typeof(upd_surface_std) return SurfaceModel{FT, SV}(; upd_surface_std = upd_surface_std, a = a_surf_, κ_star² = κ_star²_, ψϕ_stab = ψϕ_stab_, ) end """ NeutralDrySurfaceModel A surface model for EDMF simulations in a dry, neutral environment, containing all boundary values and parameters needed by the model. # Fields $(DocStringExtensions.FIELDS) """ Base.@kwdef struct NeutralDrySurfaceModel{FT <: AbstractFloat} "Area" a::FT "Square ratio of rms turbulent velocity to friction velocity" κ_star²::FT "Friction velocity" ustar::FT = 0.3 "Height of the lowest level" zLL::FT = 60 "Monin - Obukhov length" obukhov_length::FT = 0 end """ NeutralDrySurfaceModel{FT}(N_up, param_set) where {FT} Constructor for `NeutralDrySurfaceModel` for EDMF, given: - `param_set`, an AbstractEarthParameterSet """ function NeutralDrySurfaceModel{FT}( param_set::AbstractEarthParameterSet, ) where {FT} a_surf_ = a_surf(param_set) κ_star²_ = κ_star²(param_set) return NeutralDrySurfaceModel{FT}(; a = a_surf_, κ_star² = κ_star²_) end """ PressureModel A pressure model for EDMF, containing all related model and free parameters. # Fields $(DocStringExtensions.FIELDS) """ Base.@kwdef struct PressureModel{FT <: AbstractFloat} "Pressure drag" α_d::FT "Pressure advection" α_a::FT "Pressure buoyancy" α_b::FT "Minimum diagnostic updraft height for closures" H_up_min::FT end """ PressureModel{FT}(param_set) where {FT} Constructor for `PressureModel` for EDMF, given: - `param_set`, an AbstractEarthParameterSet """ function PressureModel{FT}(param_set::AbstractEarthParameterSet) where {FT} α_d_ = α_d(param_set) α_a_ = α_a(param_set) α_b_ = α_b(param_set) H_up_min_ = H_up_min(param_set) args = (α_d_, α_a_, α_b_, H_up_min_) return PressureModel{FT}(args...) end """ MixingLengthModel A mixing length model for EDMF, containing all related model and free parameters. # Fields $(DocStringExtensions.FIELDS) """ Base.@kwdef struct MixingLengthModel{FT <: AbstractFloat} "dissipation coefficient" c_d::FT "Eddy Viscosity" c_m::FT "Static Stability coefficient" c_b::FT "Empirical stability function coefficient" a1::FT "Empirical stability function coefficient" a2::FT "Von Karmen constant" κ::FT "Prandtl number empirical coefficient" ω_pr::FT "Prandtl number scale" Pr_n::FT "Critical Richardson number" Ri_c::FT "smooth minimum's fractional upper bound" smin_ub::FT "smooth minimum's regularization minimum" smin_rm::FT "Maximum mixing length" max_length::FT "Random small number variable that should be addressed" random_minval::FT end """ MixingLengthModel{FT}(param_set) where {FT} Constructor for `MixingLengthModel` for EDMF, given: - `param_set`, an AbstractEarthParameterSet """ function MixingLengthModel{FT}(param_set::AbstractEarthParameterSet) where {FT} c_d_ = c_d(param_set) c_m_ = c_m(param_set) c_b_ = c_b(param_set) a1_ = a1(param_set) a2_ = a2(param_set) κ = von_karman_const(param_set) ω_pr_ = ω_pr(param_set) Pr_n_ = Pr_n(param_set) Ri_c_ = Ri_c(param_set) smin_ub_ = smin_ub(param_set) smin_rm_ = smin_rm(param_set) max_length = 1e6 random_minval = 1e-9 args = ( c_d_, c_m_, c_b_, a1_, a2_, κ, ω_pr_, Pr_n_, Ri_c_, smin_ub_, smin_rm_, max_length, random_minval, ) return MixingLengthModel{FT}(args...) end abstract type AbstractStatisticalModel end struct SubdomainMean <: AbstractStatisticalModel end struct GaussQuad <: AbstractStatisticalModel end struct LogNormalQuad <: AbstractStatisticalModel end """ MicrophysicsModel A microphysics model for EDMF, containing all related model and free parameters and assumed subdomain distributions. # Fields $(DocStringExtensions.FIELDS) """ Base.@kwdef struct MicrophysicsModel{FT <: AbstractFloat, SM} "Subdomain statistical model" statistical_model::SM end """ MicrophysicsModel( FT; statistical_model = SubdomainMean() ) Constructor for `MicrophysicsModel` for EDMF, given: - `FT`, the float type used - `statistical_model`, the assumed environmental distribution of thermodynamic variables. """ function MicrophysicsModel(FT; statistical_model = SubdomainMean()) args = (statistical_model,) return MicrophysicsModel{FT, typeof(statistical_model)}(args...) end """ Environment <: BalanceLaw A `BalanceLaw` for the environment subdomain arising in EDMF. """ Base.@kwdef struct Environment{FT <: AbstractFloat, N_quad} <: BalanceLaw end """ Updraft <: BalanceLaw A `BalanceLaw` for the updraft subdomains arising in EDMF. """ Base.@kwdef struct Updraft{FT <: AbstractFloat} <: BalanceLaw end abstract type Coupling end """ Decoupled <: Coupling Dispatch on decoupled model (default) - The EDMF SGS tendencies do not modify the grid-mean equations. """ struct Decoupled <: Coupling end """ Coupled <: Coupling Dispatch on coupled model - The EDMF SGS tendencies modify the grid-mean equations. """ struct Coupled <: Coupling end """ EDMF <: TurbulenceConvectionModel A turbulence convection model for the EDMF scheme, containing all closure models and free parameters. # Fields $(DocStringExtensions.FIELDS) """ Base.@kwdef struct EDMF{ FT <: AbstractFloat, N, UP, EN, ED, P, S, MP, ML, SD, C, } <: TurbulenceConvectionModel "Updrafts" updraft::UP "Environment" environment::EN "Entrainment-Detrainment model" entr_detr::ED "Pressure model" pressure::P "Surface model" surface::S "Microphysics model" micro_phys::MP "Mixing length model" mix_len::ML "Subdomain model" subdomains::SD "Coupling mode" coupling::C end """ EDMF( FT, N_up, N_quad, param_set; updraft = ntuple(i -> Updraft{FT}(), N_up), environment = Environment{FT, N_quad}(), entr_detr = EntrainmentDetrainment{FT}(param_set), pressure = PressureModel{FT}(param_set), surface = SurfaceModel{FT}(N_up, param_set), micro_phys = MicrophysicsModel(FT), mix_len = MixingLengthModel{FT}(param_set), subdomain = SubdomainModel(FT, N_up), ) Constructor for `EDMF` subgrid-scale scheme, given: - `FT`, the AbstractFloat type used - `N_up`, the number of updrafts - `N_quad`, the quadrature order. `N_quad^2` is the total number of quadrature points used for environmental distributions. - `updraft`, a tuple containing N_up updraft BalanceLaws - `environment`, the environment BalanceLaw - `entr_detr`, an `EntrainmentDetrainment` model - `pressure`, a `PressureModel` - `surface`, a `SurfaceModel` - `micro_phys`, a `MicrophysicsModel` - `mix_len`, a `MixingLengthModel` - `subdomain`, a `SubdomainModel` - `coupling`, a coupling type """ function EDMF( FT, N_up, N_quad, param_set; updraft = ntuple(i -> Updraft{FT}(), N_up), environment = Environment{FT, N_quad}(), entr_detr = EntrainmentDetrainment{FT}(param_set), pressure = PressureModel{FT}(param_set), surface = SurfaceModel{FT}(N_up, param_set), micro_phys = MicrophysicsModel(FT), mix_len = MixingLengthModel{FT}(param_set), subdomain = SubdomainModel(FT, N_up), coupling = Decoupled(), ) args = ( updraft, environment, entr_detr, pressure, surface, micro_phys, mix_len, subdomain, coupling, ) return EDMF{FT, N_up, typeof.(args)...}(args...) end import ClimateMachine.TurbulenceConvection: turbconv_sources, turbconv_bcs """ EDMFTopBC <: TurbConvBC Boundary conditions for the top of the EDMF. """ struct EDMFTopBC <: TurbConvBC end """ EDMFBottomBC <: TurbConvBC Boundary conditions for the bottom of the EDMF. """ struct EDMFBottomBC <: TurbConvBC end n_updrafts(m::EDMF{FT, N_up}) where {FT, N_up} = N_up n_updrafts(m::TurbulenceConvectionModel) = 0 turbconv_filters(m::TurbulenceConvectionModel) = () turbconv_filters(m::EDMF) = ( "turbconv.environment.ρatke", "turbconv.environment.ρaθ_liq_cv", "turbconv.environment.ρaq_tot_cv", "turbconv.updraft", ) n_quad_points(m::Environment{FT, N_quad}) where {FT, N_quad} = N_quad turbconv_sources(m::EDMF) = () turbconv_bcs(::EDMF) = (EDMFBottomBC(), EDMFTopBC()) ================================================ FILE: test/Atmos/EDMF/ekman_layer.jl ================================================ #!/usr/bin/env julia --project #= # This driver file simulates the ekman_layer_model.jl in a single column setting. # # The user may select in main() the following configurations: # - DG or FV vertical discretization by changing the boolean `finite_volume`, # - Compressible() or Anelastic1D() changing the compressibility, # - Constant kinematic viscosity, Smagorinsky-Lilly or EDMF SGS fluxes. # # The default is DG, Anelastic1D(), constant kinematic viscosity of 0.1. # =# using JLD2, FileIO using ClimateMachine using ClimateMachine.SingleStackUtils using ClimateMachine.Checkpoint using ClimateMachine.BalanceLaws: vars_state using ClimateMachine.Atmos const clima_dir = dirname(dirname(pathof(ClimateMachine))); import CLIMAParameters import ClimateMachine.DGMethods.FVReconstructions: FVLinear include(joinpath(clima_dir, "experiments", "AtmosLES", "ekman_layer_model.jl")) include(joinpath("helper_funcs", "diagnostics_configuration.jl")) include("edmf_model.jl") include("edmf_kernels.jl") CLIMAParameters.Planet.T_surf_ref(::EarthParameterSet) = 290.0 CLIMAParameters.Atmos.EDMF.a_surf(::EarthParameterSet) = 0.0 function set_clima_parameters(filename) eval(:(include($filename))) end """ init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} Initialize EDMF state variables if turbconv=EDMF(...) is selected. This method is only called at `t=0`. """ function init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} # Aliases: gm = state en = state.turbconv.environment up = state.turbconv.updraft N_up = n_updrafts(turbconv) z = altitude(m, aux) param_set = parameter_set(m) ts = new_thermo_state(m, state, aux) θ_liq = liquid_ice_pottemp(ts) a_min = turbconv.subdomains.a_min @unroll_map(N_up) do i up[i].ρa = gm.ρ * a_min up[i].ρaw = gm.ρu[3] * a_min up[i].ρaθ_liq = gm.ρ * a_min * θ_liq up[i].ρaq_tot = FT(0) end en.ρatke = z > FT(250) ? FT(0) : gm.ρ * FT(0.3375) * FT(1 - z / 250.0) * FT(1 - z / 250.0) * FT(1 - z / 250.0) en.ρaθ_liq_cv = FT(0) en.ρaq_tot_cv = FT(0) en.ρaθ_liq_q_tot_cv = FT(0) return nothing end; function main(::Type{FT}, cl_args) where {FT} # Change boolean to control vertical discretization type finite_volume = false # Choice of compressibility and CFL # compressibility = Compressible() compressibility = Anelastic1D() str_comp = compressibility == Compressible() ? "COMPRESS" : "ANELASTIC" # Choice of SGS model # turbconv = NoTurbConv() N_updrafts = 1 N_quad = 3 turbconv = EDMF( FT, N_updrafts, N_quad, param_set, surface = NeutralDrySurfaceModel{FT}(param_set), ) C_smag_ = C_smag(param_set) turbulence = ConstantKinematicViscosity(FT(0.1)) # turbulence = SmagorinskyLilly{FT}(C_smag_) # Prescribe domain parameters zmax = FT(400) # Simulation time t0 = FT(0) timeend = FT(3600 * 2) # Change to 7h for low-level jet CFLmax = compressibility == Compressible() ? FT(1) : FT(100) config_type = SingleStackConfigType ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) if finite_volume N = (1, 0) nelem_vert = 80 ref_state = HydrostaticState( DryAdiabaticProfile{FT}(param_set), ; subtract_off = false, ) output_prefix = string("EL_", str_comp, "_FVM") fv_reconstruction = FVLinear() else N = 4 nelem_vert = 20 ref_state = HydrostaticState(DryAdiabaticProfile{FT}(param_set),) output_prefix = string("EL_", str_comp, "_DG") fv_reconstruction = nothing end surface_flux = cl_args["surface_flux"] model = ekman_layer_model( FT, config_type, zmax, surface_flux; turbulence = turbulence, turbconv = turbconv, ref_state = ref_state, compressibility = compressibility, ) # Assemble configuration driver_config = ClimateMachine.SingleStackConfiguration( output_prefix, N, nelem_vert, zmax, param_set, model; hmax = FT(40), fv_reconstruction = fv_reconstruction, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFLmax, ) # --- Zero-out horizontal variations: vsp = vars_state(model, Prognostic(), FT) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :turbconv), ) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :energy, :ρe), ) vsa = vars_state(model, Auxiliary(), FT) horizontally_average!( driver_config.grid, solver_config.dg.state_auxiliary, varsindex(vsa, :turbconv), ) # --- dgn_config = config_diagnostics(driver_config) # boyd vandeven filter cb_boyd = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, AtmosSpecificFilterPerturbations(driver_config.bl), solver_config.dg.grid, BoydVandevenFilter(solver_config.dg.grid, 1, 4); state_auxiliary = solver_config.dg.state_auxiliary, ) nothing end diag_arr = [single_stack_diagnostics(solver_config)] time_data = FT[0] # Define the number of outputs from `t0` to `timeend` n_outputs = 5 # This equates to exports every ceil(Int, timeend/n_outputs) time-step: every_x_simulation_time = ceil(Int, timeend / n_outputs) cb_data_vs_time = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do diag_vs_z = single_stack_diagnostics(solver_config) nstep = getsteps(solver_config.solver) push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) nothing end # Mass tendencies = 0 for Anelastic1D model, # so mass should be completely conserved: Δρ_lim = compressibility == Compressible() ? FT(0.001) : FT(0.00000001) check_cons = (ClimateMachine.ConservationCheck("ρ", "3000steps", Δρ_lim),) cb_print_step = GenericCallbacks.EveryXSimulationSteps(100) do @show getsteps(solver_config.solver) nothing end if !isnothing(cl_args["cparam_file"]) ClimateMachine.Settings.output_dir = cl_args["cparam_file"] * ".output" end result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, check_cons = check_cons, user_callbacks = (cb_boyd, cb_data_vs_time, cb_print_step), check_euclidean_distance = true, ) diag_vs_z = single_stack_diagnostics(solver_config) push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) return solver_config, diag_arr, time_data end # ArgParse in global scope to modify Clima Parameters sl_args = ArgParseSettings(autofix_names = true) add_arg_group!(sl_args, "EkmanLayer") @add_arg_table! sl_args begin "--cparam-file" help = "specify CLIMAParameters file" arg_type = Union{String, Nothing} default = nothing "--surface-flux" help = "specify surface flux for energy and moisture" metavar = "prescribed|bulk|custom_sl" arg_type = String default = "prescribed" end cl_args = ClimateMachine.init(parse_clargs = true, custom_clargs = sl_args) if !isnothing(cl_args["cparam_file"]) filename = cl_args["cparam_file"] set_clima_parameters(filename) end solver_config, diag_arr, time_data = main(Float64, cl_args) ## Uncomment lines to save output using JLD2 # output_dir = @__DIR__; # mkpath(output_dir); # function dons(diag_vs_z) # return Dict(map(keys(first(diag_vs_z))) do k # string(k) => [getproperty(ca, k) for ca in diag_vs_z] # end) # end # get_dons_arr(diag_arr) = [dons(diag_vs_z) for diag_vs_z in diag_arr] # dons_arr = get_dons_arr(diag_arr) # println(dons_arr[1].keys) # z = get_z(solver_config.dg.grid; rm_dupes = true); # save( # string(output_dir, "/ekman.jld2"), # "dons_arr", # dons_arr, # "time_data", # time_data, # "z", # z, # ) nothing ================================================ FILE: test/Atmos/EDMF/helper_funcs/diagnose_environment.jl ================================================ #### Diagnose environment variables """ environment_vars(state::Vars, N_up::Int) A NamedTuple of environment variables """ function environment_vars(state::Vars, N_up::Int) return (a = environment_area(state, N_up), w = environment_w(state, N_up)) end """ environment_area( state::Vars, N_up::Int, ) Returns the environmental area fraction, given: - `state`, state variables - `N_up`, number of updrafts """ function environment_area(state::Vars, N_up::Int) up = state.turbconv.updraft return 1 - sum(vuntuple(i -> up[i].ρa, N_up)) / state.ρ end """ environment_w(state::Vars, N_up::Int) Returns the environmental vertical velocity, given: - `state`, state variables - `N_up`, number of updrafts """ function environment_w(state::Vars, N_up::Int) ρ_inv = 1 / state.ρ a_en = environment_area(state, N_up) up = state.turbconv.updraft return (state.ρu[3] - sum(vuntuple(i -> up[i].ρaw, N_up))) / a_en * ρ_inv end """ grid_mean_b(env, a_up, N_up::Int, buoyancy_up, buoyancy_en) Returns the grid-mean buoyancy with respect to the reference state, given: - `env`, environment variables - `a_up`, updraft area fractions - `N_up`, number of updrafts - `buoyancy_up`, updraft buoyancies - `buoyancy_en`, environment buoyancy """ function grid_mean_b(env, a_up, N_up::Int, buoyancy_up, buoyancy_en) ∑abuoyancy_up = sum(vuntuple(i -> buoyancy_up[i] * a_up[i], N_up)) return env.a * buoyancy_en + ∑abuoyancy_up end ================================================ FILE: test/Atmos/EDMF/helper_funcs/diagnostics_configuration.jl ================================================ """ config_diagnostics(driver_config, timeend; interval=nothing) Returns the state and tendency diagnostic groups """ function config_diagnostics(driver_config, timeend; interval = nothing) FT = eltype(driver_config.grid) info = driver_config.config_info if interval == nothing interval = "$(cld(timeend, 2) + 10)ssecs" #interval = "10steps" end boundaries = [ FT(0) FT(0) FT(0) FT(info.hmax) FT(info.hmax) FT(info.zmax) ] axes = ( [FT(1)], [FT(1)], collect(range(boundaries[1, 3], boundaries[2, 3], step = FT(50)),), ) interpol = ClimateMachine.InterpolationConfiguration( driver_config, boundaries; axes = axes, ) ds_dgngrp = setup_dump_state_diagnostics( SingleStackConfigType(), interval, driver_config.name, interpol = interpol, ) dt_dgngrp = setup_dump_tendencies_diagnostics( SingleStackConfigType(), interval, driver_config.name, interpol = interpol, ) return ClimateMachine.DiagnosticsConfiguration([ds_dgngrp, dt_dgngrp]) end ================================================ FILE: test/Atmos/EDMF/helper_funcs/lamb_smooth_minimum.jl ================================================ using LambertW """ lambertw_gpu(N) Returns `real(LambertW.lambertw(Float64(N - 1) / MathConstants.e))`, valid for `N = 2` and `N = 3`. TODO: add `LambertW.lambertw` support to KernelAbstractions. """ function lambertw_gpu(N) if !(N == 2 || N == 3) error("Bad N in lambertw_gpu") end return (0.2784645427610738, 0.46305551336554884)[N - 1] end """ lamb_smooth_minimum( l::AbstractArray{FT}; frac_upper_bound::FT, reg_min::FT, ) where {FT} Returns the smooth minimum of the elements of an array following the formulation of Lopez-Gomez et al. (JAMES, 2020), Appendix A, given: - `l`, an array of candidate elements - `frac_upper_bound`, defines the upper bound of the smooth minimum as `smin(x) = min(x)*(1+frac_upper_bound)` - `reg_min`, defines the minimum value of the regularizer Λ """ function lamb_smooth_minimum( l::AbstractArray{FT}, frac_upper_bound::FT, reg_min::FT, ) where {FT} xmin = minimum(l) # Get regularizer for exponential weights N_l = length(l) denom = FT(lambertw_gpu(N_l)) Λ = max(FT(xmin) * frac_upper_bound / denom, reg_min) num = sum(i -> l[i] * exp(-(l[i] - xmin) / Λ), 1:N_l) den = sum(i -> exp(-(l[i] - xmin) / Λ), 1:N_l) smin = num / den return smin end ================================================ FILE: test/Atmos/EDMF/helper_funcs/nondimensional_exchange_functions.jl ================================================ """ nondimensional_exchange_functions( m::AtmosModel{FT}, entr::EntrainmentDetrainment, state::Vars, aux::Vars, t::Real, ts_up, ts_en, env, buoy, i, ) where {FT} Returns the nondimensional entrainment and detrainment functions following Cohen et al. (JAMES, 2020), given: - `m`, an `AtmosModel` - `entr`, an `EntrainmentDetrainment` model - `state`, state variables - `aux`, auxiliary variables - `ts_up`, updraft thermodynamic states - `ts_en`, environment thermodynamic states - `env`, NamedTuple of environment variables - `buoy`, NamedTuple of environment and updraft buoyancies - `i`, the updraft index """ function nondimensional_exchange_functions( m::AtmosModel{FT}, entr::EntrainmentDetrainment, state::Vars, aux::Vars, ts_up, ts_en, env, buoy, i, ) where {FT} # Alias convention: gm = state up = state.turbconv.updraft up_aux = aux.turbconv.updraft en_aux = aux.turbconv.environment # precompute vars w_min = entr.w_min N_up = n_updrafts(turbconv_model(m)) ρinv = 1 / gm.ρ a_up_i = up[i].ρa * ρinv w_up_i = fix_void_up(up[i].ρa, up[i].ρaw / up[i].ρa) # thermodynamic variables RH_up = relative_humidity(ts_up[i]) RH_en = relative_humidity(ts_en) Δw = filter_w(w_up_i - env.w, w_min) Δb = buoy.up[i] - buoy.en c_δ = sign(condensate(ts_en) + condensate(ts_up[i])) * entr.c_δ # compute dry and moist aux functions μ_ij = (entr.χ - a_up_i / (a_up_i + env.a)) * Δb / Δw D_ε = entr.c_ε / (1 + exp(-μ_ij / entr.μ_0)) M_ε = c_δ * (max((RH_en^entr.β - RH_up^entr.β), 0))^(1 / entr.β) D_δ = entr.c_ε / (1 + exp(μ_ij / entr.μ_0)) M_δ = c_δ * (max((RH_up^entr.β - RH_en^entr.β), 0))^(1 / entr.β) return D_ε, D_δ, M_δ, M_ε end; ================================================ FILE: test/Atmos/EDMF/helper_funcs/save_subdomain_temperature.jl ================================================ # Convenience wrapper save_subdomain_temperature!(m, state, aux) = save_subdomain_temperature!(m, moisture_model(m), state, aux) using KernelAbstractions: @print """ save_subdomain_temperature!( m::AtmosModel, moist::EquilMoist, state::Vars, aux::Vars, ) Updates the subdomain sensible temperature, given: - `m`, an `AtmosModel` - `moist`, an `EquilMoist` model - `state`, state variables - `aux`, auxiliary variables """ function save_subdomain_temperature!( m::AtmosModel, moist::EquilMoist, state::Vars, aux::Vars, ) N_up = n_updrafts(turbconv_model(m)) ts = recover_thermo_state(m, state, aux) ts_up = new_thermo_state_up(m, state, aux, ts) ts_en = new_thermo_state_en(m, state, aux, ts) @unroll_map(N_up) do i aux.turbconv.updraft[i].T = air_temperature(ts_up[i]) end aux.turbconv.environment.T = air_temperature(ts_en) return nothing end # No need to save temperature for DryModel. function save_subdomain_temperature!( m::AtmosModel, moist::DryModel, state::Vars, aux::Vars, ) end ================================================ FILE: test/Atmos/EDMF/helper_funcs/subdomain_statistics.jl ================================================ #### Subdomain statistics function compute_subdomain_statistics(m::AtmosModel, args, ts_gm, ts_en) turbconv = turbconv_model(m) return compute_subdomain_statistics( turbconv.micro_phys.statistical_model, m, args, ts_gm, ts_en, ) end """ compute_subdomain_statistics( statistical_model::SubdomainMean, m::AtmosModel{FT}, args, ts_gm, ts_en, ) where {FT} Returns a cloud fraction and cloudy and dry thermodynamic states in the subdomain. """ function compute_subdomain_statistics( statistical_model::SubdomainMean, m::AtmosModel{FT}, args, ts_gm, ts_en, ) where {FT} cloud_frac = has_condensate(ts_en) ? FT(1) : FT(0) dry = ts_en cloudy = ts_en return (dry = dry, cloudy = cloudy, cloud_frac = cloud_frac) end ================================================ FILE: test/Atmos/EDMF/helper_funcs/subdomain_thermo_states.jl ================================================ #### thermo states for subdomains using KernelAbstractions: @print export new_thermo_state_up, new_thermo_state_en, recover_thermo_state_all, recover_thermo_state_up, recover_thermo_state_en #### #### Interface #### """ new_thermo_state_up(bl, state, aux) Updraft thermodynamic states given: - `bl`, parent `BalanceLaw` - `state`, state variables - `aux`, auxiliary variables !!! note This method calls saturation adjustment for EquilMoist models. """ new_thermo_state_up( bl::AtmosModel, state::Vars, aux::Vars, ts::ThermodynamicState = recover_thermo_state(bl, state, aux), ) = new_thermo_state_up(bl, moisture_model(bl), state, aux, ts) """ new_thermo_state_en(bl, state, aux) Environment thermodynamic state given: - `bl`, parent `BalanceLaw` - `state`, state variables - `aux`, auxiliary variables !!! note This method calls saturation adjustment for EquilMoist models. """ new_thermo_state_en( bl::AtmosModel, state::Vars, aux::Vars, ts::ThermodynamicState = recover_thermo_state(bl, state, aux), ) = new_thermo_state_en(bl, moisture_model(bl), state, aux, ts) """ recover_thermo_state_all(bl, state, aux) Recover NamedTuple of all thermo states # TODO: Define/call `recover_thermo_state` when it's safely implemented (see https://github.com/CliMA/ClimateMachine.jl/issues/1648) """ function recover_thermo_state_all(bl, state, aux) ts = new_thermo_state(bl, state, aux) return ( gm = ts, en = new_thermo_state_en(bl, moisture_model(bl), state, aux, ts), up = new_thermo_state_up(bl, moisture_model(bl), state, aux, ts), ) end """ recover_thermo_state_up(bl, state, aux, ts = new_thermo_state(bl, state, aux)) Recover the updraft thermodynamic states given: - `bl`, parent `BalanceLaw` - `state`, state variables - `aux`, auxiliary variables !!! warn Right now we are directly calling new_thermo_state_up to avoid inconsistent aux states in kernels where the aux states are out of sync with the boundary state. # TODO: Define/call `recover_thermo_state` when it's safely implemented (see https://github.com/CliMA/ClimateMachine.jl/issues/1648) """ function recover_thermo_state_up( bl, state, aux, ts = new_thermo_state(bl, state, aux), ) return new_thermo_state_up(bl, moisture_model(bl), state, aux, ts) end """ recover_thermo_state_en(bl, state, aux, ts = recover_thermo_state(bl, state, aux)) Recover the environment thermodynamic state given: - `bl`, parent `BalanceLaw` - `state`, state variables - `aux`, auxiliary variables !!! warn Right now we are directly calling new_thermo_state_up to avoid inconsistent aux states in kernels where the aux states are out of sync with the boundary state. # TODO: Define/call `recover_thermo_state` when it's safely implemented (see https://github.com/CliMA/ClimateMachine.jl/issues/1648) """ function recover_thermo_state_en( bl, state, aux, ts = new_thermo_state(bl, state, aux), ) return new_thermo_state_en(bl, moisture_model(bl), state, aux, ts) end #### #### Implementation #### function new_thermo_state_up( m::AtmosModel{FT}, moist::DryModel, state::Vars, aux::Vars, ts::ThermodynamicState, ) where {FT} N_up = n_updrafts(turbconv_model(m)) up = state.turbconv.updraft p = air_pressure(ts) param_set = parameter_set(m) # compute thermo state for updrafts ts_up = vuntuple(N_up) do i ρa_up = up[i].ρa ρaθ_liq_up = up[i].ρaθ_liq θ_liq_up = fix_void_up(ρa_up, ρaθ_liq_up / ρa_up, liquid_ice_pottemp(ts)) PhaseDry_pθ(param_set, p, θ_liq_up) end return ts_up end function new_thermo_state_up( m::AtmosModel{FT}, moist::EquilMoist, state::Vars, aux::Vars, ts::ThermodynamicState, ) where {FT} N_up = n_updrafts(turbconv_model(m)) up = state.turbconv.updraft p = air_pressure(ts) param_set = parameter_set(m) # compute thermo state for updrafts ts_up = vuntuple(N_up) do i ρa_up = up[i].ρa ρaθ_liq_up = up[i].ρaθ_liq ρaq_tot_up = up[i].ρaq_tot θ_liq_up = fix_void_up(ρa_up, ρaθ_liq_up / ρa_up, liquid_ice_pottemp(ts)) q_tot_up = fix_void_up( ρa_up, ρaq_tot_up / ρa_up, total_specific_humidity(ts), ) PhaseEquil_pθq(param_set, p, θ_liq_up, q_tot_up) end return ts_up end function new_thermo_state_en( m::AtmosModel, moist::DryModel, state::Vars, aux::Vars, ts::ThermodynamicState, ) turbconv = turbconv_model(m) N_up = n_updrafts(turbconv) up = state.turbconv.updraft # diagnose environment thermo state ρ_inv = 1 / state.ρ p = air_pressure(ts) θ_liq = liquid_ice_pottemp(ts) a_en = environment_area(state, N_up) ρaθ_liq_up = vuntuple(N_up) do i fix_void_up(up[i].ρa, up[i].ρaθ_liq) end θ_liq_en = (θ_liq - sum(vuntuple(j -> ρaθ_liq_up[j] * ρ_inv, N_up))) / a_en a_min = turbconv.subdomains.a_min a_max = turbconv.subdomains.a_max param_set = parameter_set(m) if !(0 <= θ_liq_en) @print("ρaθ_liq_up = ", ρaθ_liq_up[Val(1)], "\n") @print("θ_liq = ", θ_liq, "\n") @print("θ_liq_en = ", θ_liq_en, "\n") error("Environment θ_liq_en out-of-bounds in new_thermo_state_en") end ts_en = PhaseDry_pθ(param_set, p, θ_liq_en) return ts_en end function new_thermo_state_en( m::AtmosModel, moist::EquilMoist, state::Vars, aux::Vars, ts::ThermodynamicState, ) turbconv = turbconv_model(m) N_up = n_updrafts(turbconv) up = state.turbconv.updraft # diagnose environment thermo state ρ_inv = 1 / state.ρ p = air_pressure(ts) θ_liq = liquid_ice_pottemp(ts) q_tot = total_specific_humidity(ts) a_en = environment_area(state, N_up) θ_liq_en = (θ_liq - sum(vuntuple(j -> up[j].ρaθ_liq * ρ_inv, N_up))) / a_en q_tot_en = (q_tot - sum(vuntuple(j -> up[j].ρaq_tot * ρ_inv, N_up))) / a_en a_min = turbconv.subdomains.a_min a_max = turbconv.subdomains.a_max param_set = parameter_set(m) if !(0 <= θ_liq_en) @print("θ_liq_en = ", θ_liq_en, "\n") error("Environment θ_liq_en out-of-bounds in new_thermo_state_en") end if !(0 <= q_tot_en <= 1) @print("q_tot_en = ", q_tot_en, "\n") error("Environment q_tot_en out-of-bounds in new_thermo_state_en") end ts_en = PhaseEquil_pθq(param_set, p, θ_liq_en, q_tot_en) return ts_en end ================================================ FILE: test/Atmos/EDMF/helper_funcs/utility_funcs.jl ================================================ """ filter_w(w::FT, w_min::FT) where {FT} Return velocity such that `abs(filter_w(w, w_min)) >= abs(w_min)` while preserving the sign of `w`. """ filter_w(w::FT, w_min::FT) where {FT} = max(abs(w), abs(w_min)) * (w < 0 ? sign(w) : 1) """ enforce_unit_bounds(a_up_i::FT, a_min::FT, a_max::FT) where {FT} Enforce variable to be positive. Ideally, this safety net will be removed once we have robust positivity preserving methods. For now, we need this to avoid domain error in certain circumstances. """ enforce_unit_bounds(a_up_i::FT, a_min::FT, a_max::FT) where {FT} = clamp(a_up_i, a_min, a_max) """ enforce_positivity(x::FT) where {FT} Enforce variable to be positive. Ideally, this safety net will be removed once we have robust positivity preserving methods. For now, we need this to avoid domain error in certain circumstances. """ enforce_positivity(x::FT) where {FT} = max(x, FT(0)) """ fix_void_up(ρa_up_i::FT, val::FT, fallback = FT(0)) where {FT} Substitute value by a consistent fallback in case of negligible area fraction (void updraft). """ function fix_void_up(ρa_up_i::FT, val::FT, fallback = FT(0)) where {FT} tol = sqrt(eps(FT)) return ρa_up_i > tol ? val : fallback end ================================================ FILE: test/Atmos/EDMF/report_mse_bomex.jl ================================================ using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); if parse(Bool, get(ENV, "CLIMATEMACHINE_PLOT_EDMF_COMPARISON", "false")) plot_dir = joinpath(clima_dir, "output", "bomex_edmf", "pycles_comparison") else plot_dir = nothing end include(joinpath(@__DIR__, "compute_mse.jl")) data_file = Dataset(joinpath(PyCLES_output_dataset_path, "Bomex.nc"), "r") #! format: off best_mse = OrderedDict() best_mse["prog_ρ"] = 3.4936203419421122e-02 best_mse["prog_ρu_1"] = 3.0715584660655654e+03 best_mse["prog_ρu_2"] = 1.2897432819935302e-03 best_mse["prog_moisture_ρq_tot"] = 4.1572900459802775e-02 best_mse["prog_turbconv_environment_ρatke"] = 8.5590220998989253e+02 best_mse["prog_turbconv_environment_ρaθ_liq_cv"] = 8.5667227621106434e+01 best_mse["prog_turbconv_environment_ρaq_tot_cv"] = 1.6437582130429374e+02 best_mse["prog_turbconv_updraft_1_ρa"] = 8.0846471185616252e+01 best_mse["prog_turbconv_updraft_1_ρaw"] = 8.5071186636845333e-02 best_mse["prog_turbconv_updraft_1_ρaθ_liq"] = 9.0386637425608303e+00 best_mse["prog_turbconv_updraft_1_ρaq_tot"] = 1.0803377515313473e+01 #! format: on computed_mse = compute_mse( solver_config.dg.grid, solver_config.dg.balance_law, time_data, dons_arr, data_file, "Bomex", best_mse, 400, plot_dir, ) @testset "BOMEX EDMF Solution Quality Assurance (QA) tests" begin #! format: off test_mse(computed_mse, best_mse, "prog_ρ") test_mse(computed_mse, best_mse, "prog_ρu_1") test_mse(computed_mse, best_mse, "prog_ρu_2") test_mse(computed_mse, best_mse, "prog_moisture_ρq_tot") test_mse(computed_mse, best_mse, "prog_turbconv_environment_ρatke") test_mse(computed_mse, best_mse, "prog_turbconv_environment_ρaθ_liq_cv") test_mse(computed_mse, best_mse, "prog_turbconv_environment_ρaq_tot_cv") test_mse(computed_mse, best_mse, "prog_turbconv_updraft_1_ρa") test_mse(computed_mse, best_mse, "prog_turbconv_updraft_1_ρaw") test_mse(computed_mse, best_mse, "prog_turbconv_updraft_1_ρaθ_liq") test_mse(computed_mse, best_mse, "prog_turbconv_updraft_1_ρaq_tot") #! format: on end ================================================ FILE: test/Atmos/EDMF/report_mse_sbl_anelastic.jl ================================================ using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); if parse(Bool, get(ENV, "CLIMATEMACHINE_PLOT_EDMF_COMPARISON", "false")) plot_dir = joinpath(clima_dir, "output", "sbl_edmf", "pycles_comparison") else plot_dir = nothing end include(joinpath(@__DIR__, "compute_mse.jl")) data_file = Dataset(joinpath(PyCLES_output_dataset_path, "Gabls.nc"), "r") #! format: off best_mse = OrderedDict() best_mse["prog_ρ"] = 9.3809207150296822e-03 best_mse["prog_ρu_1"] = 6.7269975116623837e+03 best_mse["prog_ρu_2"] = 6.8630628605220889e-01 #! format: on computed_mse = compute_mse( solver_config.dg.grid, solver_config.dg.balance_law, time_data, dons_arr, data_file, "Gabls", best_mse, 1800, plot_dir, ) @testset "SBL Anelastic Solution Quality Assurance (QA) tests" begin #! format: off test_mse(computed_mse, best_mse, "prog_ρ") test_mse(computed_mse, best_mse, "prog_ρu_1") test_mse(computed_mse, best_mse, "prog_ρu_2") #! format: on end ================================================ FILE: test/Atmos/EDMF/report_mse_sbl_coupled_edmf_an1d.jl ================================================ using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); if parse(Bool, get(ENV, "CLIMATEMACHINE_PLOT_EDMF_COMPARISON", "false")) plot_dir = joinpath(clima_dir, "output", "sbl_edmf", "pycles_comparison") else plot_dir = nothing end include(joinpath(@__DIR__, "compute_mse.jl")) data_file = Dataset(joinpath(PyCLES_output_dataset_path, "Gabls.nc"), "r") #! format: off best_mse = OrderedDict() best_mse["prog_ρ"] = 9.3808142287632006e-03 best_mse["prog_ρu_1"] = 6.7427140307178524e+03 best_mse["prog_ρu_2"] = 7.2306841961112966e-01 best_mse["prog_turbconv_environment_ρatke"] = 2.9810806322235749e+02 best_mse["prog_turbconv_environment_ρaθ_liq_cv"] = 8.1270487249851797e+01 best_mse["prog_turbconv_updraft_1_ρa"] = 2.7223017351724246e+02 best_mse["prog_turbconv_updraft_1_ρaw"] = 5.5909371368198686e+02 best_mse["prog_turbconv_updraft_1_ρaθ_liq"] = 2.7933498788454813e+02 #! format: on computed_mse = compute_mse( solver_config.dg.grid, solver_config.dg.balance_law, time_data, dons_arr, data_file, "Gabls", best_mse, 1800, plot_dir, ) @testset "SBL Coupled EDMF Solution Quality Assurance (QA) tests" begin #! format: off test_mse(computed_mse, best_mse, "prog_ρ") test_mse(computed_mse, best_mse, "prog_ρu_1") test_mse(computed_mse, best_mse, "prog_ρu_2") test_mse(computed_mse, best_mse, "prog_turbconv_environment_ρatke") test_mse(computed_mse, best_mse, "prog_turbconv_environment_ρaθ_liq_cv") test_mse(computed_mse, best_mse, "prog_turbconv_updraft_1_ρa") test_mse(computed_mse, best_mse, "prog_turbconv_updraft_1_ρaw") test_mse(computed_mse, best_mse, "prog_turbconv_updraft_1_ρaθ_liq") #! format: on end ================================================ FILE: test/Atmos/EDMF/report_mse_sbl_edmf.jl ================================================ using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); if parse(Bool, get(ENV, "CLIMATEMACHINE_PLOT_EDMF_COMPARISON", "false")) plot_dir = joinpath(clima_dir, "output", "sbl_edmf", "pycles_comparison") else plot_dir = nothing end include(joinpath(@__DIR__, "compute_mse.jl")) data_file = Dataset(joinpath(PyCLES_output_dataset_path, "Gabls.nc"), "r") #! format: off best_mse = OrderedDict() best_mse["prog_ρ"] = 6.8041561724036673e-03 best_mse["prog_ρu_1"] = 6.2586216904022822e+03 best_mse["prog_ρu_2"] = 1.2965826326450741e-04 best_mse["prog_turbconv_environment_ρatke"] = 4.9035225536000081e+02 best_mse["prog_turbconv_environment_ρaθ_liq_cv"] = 8.7727377301948991e+01 best_mse["prog_turbconv_updraft_1_ρa"] = 1.8213581913998944e+01 best_mse["prog_turbconv_updraft_1_ρaw"] = 1.7800899665237452e-01 best_mse["prog_turbconv_updraft_1_ρaθ_liq"] = 1.3358308964295857e+01 #! format: on computed_mse = compute_mse( solver_config.dg.grid, solver_config.dg.balance_law, time_data, dons_arr, data_file, "Gabls", best_mse, 60, plot_dir, ) @testset "SBL EDMF Solution Quality Assurance (QA) tests" begin #! format: off test_mse(computed_mse, best_mse, "prog_ρ") test_mse(computed_mse, best_mse, "prog_ρu_1") test_mse(computed_mse, best_mse, "prog_ρu_2") test_mse(computed_mse, best_mse, "prog_turbconv_environment_ρatke") test_mse(computed_mse, best_mse, "prog_turbconv_environment_ρaθ_liq_cv") test_mse(computed_mse, best_mse, "prog_turbconv_updraft_1_ρa") test_mse(computed_mse, best_mse, "prog_turbconv_updraft_1_ρaw") test_mse(computed_mse, best_mse, "prog_turbconv_updraft_1_ρaθ_liq") #! format: on end ================================================ FILE: test/Atmos/EDMF/report_mse_sbl_ss_implicit.jl ================================================ using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); if parse(Bool, get(ENV, "CLIMATEMACHINE_PLOT_EDMF_COMPARISON", "false")) plot_dir = joinpath(clima_dir, "output", "sbl_edmf", "pycles_comparison") else plot_dir = nothing end include(joinpath(@__DIR__, "compute_mse.jl")) data_file = Dataset(joinpath(PyCLES_output_dataset_path, "Gabls.nc"), "r") #! format: off best_mse = OrderedDict() best_mse["prog_ρ"] = 7.9878085788692155e-03 best_mse["prog_ρu_1"] = 3.0923908085978383e+03 best_mse["prog_ρu_2"] = 4.4228530597867703e+01 #! format: on computed_mse = compute_mse( solver_config.dg.grid, solver_config.dg.balance_law, time_data, dons_arr, data_file, "Gabls", best_mse, 3600 * 6, plot_dir, ) @testset "SBL Implicit Solution Quality Assurance (QA) tests" begin #! format: off test_mse(computed_mse, best_mse, "prog_ρ") test_mse(computed_mse, best_mse, "prog_ρu_1") test_mse(computed_mse, best_mse, "prog_ρu_2") #! format: on end ================================================ FILE: test/Atmos/EDMF/stable_bl_anelastic1d.jl ================================================ using JLD2, FileIO using ClimateMachine using ClimateMachine.SingleStackUtils using ClimateMachine.Checkpoint using ClimateMachine.BalanceLaws: vars_state import ClimateMachine.BalanceLaws: projection import ClimateMachine.DGMethods using ClimateMachine.Atmos const clima_dir = dirname(dirname(pathof(ClimateMachine))); import CLIMAParameters include(joinpath(clima_dir, "experiments", "AtmosLES", "stable_bl_model.jl")) include("edmf_model.jl") include("edmf_kernels.jl") # CLIMAParameters.Planet.T_surf_ref(::EarthParameterSet) = 290.0 # default CLIMAParameters.Planet.T_surf_ref(::EarthParameterSet) = 265 CLIMAParameters.Atmos.EDMF.a_surf(::EarthParameterSet) = 0.0 function set_clima_parameters(filename) eval(:(include($filename))) end """ init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} Initialize EDMF state variables. This method is only called at `t=0`. """ function init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} # Aliases: gm = state en = state.turbconv.environment up = state.turbconv.updraft N_up = n_updrafts(turbconv) # GCM setting - Initialize the grid mean profiles of prognostic variables (ρ,e_int,q_tot,u,v,w) z = altitude(m, aux) # SCM setting - need to have separate cases coded and called from a folder - see what LES does # a thermo state is used here to convert the input θ to e_int profile e_int = internal_energy(m, state, aux) param_set = parameter_set(m) ts = PhaseDry(param_set, e_int, state.ρ) T = air_temperature(ts) p = air_pressure(ts) q = PhasePartition(ts) θ_liq = liquid_ice_pottemp(ts) a_min = turbconv.subdomains.a_min @unroll_map(N_up) do i up[i].ρa = gm.ρ * a_min up[i].ρaw = gm.ρu[3] * a_min up[i].ρaθ_liq = gm.ρ * a_min * θ_liq up[i].ρaq_tot = FT(0) end # initialize environment covariance with zero for now if z <= FT(250) en.ρatke = gm.ρ * FT(0.4) * FT(1 - z / 250.0) * FT(1 - z / 250.0) * FT(1 - z / 250.0) en.ρaθ_liq_cv = gm.ρ * FT(0.4) * FT(1 - z / 250.0) * FT(1 - z / 250.0) * FT(1 - z / 250.0) else en.ρatke = FT(0) en.ρaθ_liq_cv = FT(0) end en.ρaq_tot_cv = FT(0) en.ρaθ_liq_q_tot_cv = FT(0) return nothing end; function main(::Type{FT}, cl_args) where {FT} surface_flux = cl_args["surface_flux"] # Choice of compressibility and CFL # compressibility = Compressible() compressibility = Anelastic1D() str_comp = compressibility == Compressible() ? "COMPRESS" : "ANELASTIC" # DG polynomial order N = 4 nelem_vert = 20 # Prescribe domain parameters zmax = FT(400) # Simulation time t0 = FT(0) timeend = FT(1800 * 1) CFLmax = compressibility == Compressible() ? FT(1) : FT(100) config_type = SingleStackConfigType ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) # Choice of SGS model N_updrafts = 1 N_quad = 3 turbconv = NoTurbConv() # turbconv = EDMF( # FT, # N_updrafts, # N_quad, # param_set, # surface = NeutralDrySurfaceModel{FT}(param_set), # ) C_smag_ = C_smag(param_set) # turbulence = ConstantKinematicViscosity(FT(0.1)) turbulence = SmagorinskyLilly{FT}(C_smag_) model = stable_bl_model( FT, config_type, zmax, surface_flux; turbulence = turbulence, turbconv = turbconv, compressibility = compressibility, ) # Assemble configuration driver_config = ClimateMachine.SingleStackConfiguration( string("SBL_", str_comp, "_1D"), N, nelem_vert, zmax, param_set, model; hmax = FT(40), ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFLmax, ) # --- Zero-out horizontal variations: vsp = vars_state(model, Prognostic(), FT) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :turbconv), ) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :energy, :ρe), ) vsa = vars_state(model, Auxiliary(), FT) horizontally_average!( driver_config.grid, solver_config.dg.state_auxiliary, varsindex(vsa, :turbconv), ) # --- dgn_config = config_diagnostics(driver_config) # boyd vandeven filter cb_boyd = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, ("energy.ρe",), solver_config.dg.grid, BoydVandevenFilter( solver_config.dg.grid, 1, #default=0 4, #default=32 ), ) nothing end diag_arr = [single_stack_diagnostics(solver_config)] time_data = FT[0] # Define the number of outputs from `t0` to `timeend` n_outputs = 5 # This equates to exports every ceil(Int, timeend/n_outputs) time-step: every_x_simulation_time = ceil(Int, timeend / n_outputs) cb_data_vs_time = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do diag_vs_z = single_stack_diagnostics(solver_config) nstep = getsteps(solver_config.solver) push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) nothing end # Mass tendencies = 0 for Anelastic1D model, # so mass should be completely conserved: check_cons = (ClimateMachine.ConservationCheck("ρ", "3000steps", FT(0.00000001)),) cb_print_step = GenericCallbacks.EveryXSimulationSteps(100) do @show getsteps(solver_config.solver) nothing end if !isnothing(cl_args["cparam_file"]) ClimateMachine.Settings.output_dir = cl_args["cparam_file"] * ".output" end result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, check_cons = check_cons, user_callbacks = (cb_boyd, cb_data_vs_time, cb_print_step), check_euclidean_distance = true, ) diag_vs_z = single_stack_diagnostics(solver_config) push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) return solver_config, diag_arr, time_data end # ArgParse in global scope to modify Clima Parameters sbl_args = ArgParseSettings(autofix_names = true) add_arg_group!(sbl_args, "StableBoundaryLayer") @add_arg_table! sbl_args begin "--cparam-file" help = "specify CLIMAParameters file" arg_type = Union{String, Nothing} default = nothing "--surface-flux" help = "specify surface flux for energy and moisture" metavar = "prescribed|bulk|custom_sbl" arg_type = String default = "custom_sbl" end cl_args = ClimateMachine.init(parse_clargs = true, custom_clargs = sbl_args) if !isnothing(cl_args["cparam_file"]) filename = cl_args["cparam_file"] set_clima_parameters(filename) end solver_config, diag_arr, time_data = main(Float64, cl_args) include(joinpath(@__DIR__, "report_mse_sbl_anelastic.jl")) nothing ================================================ FILE: test/Atmos/EDMF/stable_bl_coupled_edmf_an1d.jl ================================================ using JLD2, FileIO using ClimateMachine using ClimateMachine.SingleStackUtils using ClimateMachine.Checkpoint using ClimateMachine.BalanceLaws: vars_state import ClimateMachine.BalanceLaws: projection import ClimateMachine.DGMethods using ClimateMachine.Atmos const clima_dir = dirname(dirname(pathof(ClimateMachine))); import CLIMAParameters include(joinpath(clima_dir, "experiments", "AtmosLES", "stable_bl_model.jl")) include("edmf_model.jl") include("edmf_kernels.jl") CLIMAParameters.Planet.T_surf_ref(::EarthParameterSet) = 265 CLIMAParameters.Atmos.EDMF.a_surf(::EarthParameterSet) = 0.0 function set_clima_parameters(filename) eval(:(include($filename))) end """ init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} Initialize EDMF state variables. This method is only called at `t=0`. """ function init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} # Aliases: gm = state en = state.turbconv.environment up = state.turbconv.updraft N_up = n_updrafts(turbconv) # GCM setting - Initialize the grid mean profiles of prognostic variables (ρ,e_int,q_tot,u,v,w) z = altitude(m, aux) # SCM setting - need to have separate cases coded and called from a folder - see what LES does # a thermo state is used here to convert the input θ to e_int profile e_int = internal_energy(m, state, aux) param_set = parameter_set(m) ts = PhaseDry(param_set, e_int, state.ρ) T = air_temperature(ts) p = air_pressure(ts) q = PhasePartition(ts) θ_liq = liquid_ice_pottemp(ts) a_min = turbconv.subdomains.a_min @unroll_map(N_up) do i up[i].ρa = gm.ρ * a_min up[i].ρaw = gm.ρu[3] * a_min up[i].ρaθ_liq = gm.ρ * a_min * θ_liq up[i].ρaq_tot = FT(0) end # initialize environment covariance with zero for now if z <= FT(250) en.ρatke = gm.ρ * FT(0.4) * FT(1 - z / 250.0) * FT(1 - z / 250.0) * FT(1 - z / 250.0) en.ρaθ_liq_cv = gm.ρ * FT(0.4) * FT(1 - z / 250.0) * FT(1 - z / 250.0) * FT(1 - z / 250.0) else en.ρatke = FT(0) en.ρaθ_liq_cv = FT(0) end en.ρaq_tot_cv = FT(0) en.ρaθ_liq_q_tot_cv = FT(0) return nothing end; function main(::Type{FT}, cl_args) where {FT} surface_flux = cl_args["surface_flux"] # Choice of compressibility and CFL # compressibility = Compressible() compressibility = Anelastic1D() str_comp = compressibility == Compressible() ? "COMPRESS" : "ANELASTIC" # DG polynomial order N = 4 nelem_vert = 20 # Prescribe domain parameters zmax = FT(400) # Simulation time t0 = FT(0) timeend = FT(1800 * 1) CFLmax = compressibility == Compressible() ? FT(1) : FT(100) config_type = SingleStackConfigType ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) # Choice of SGS model N_updrafts = 1 N_quad = 3 turbconv = EDMF( FT, N_updrafts, N_quad, param_set, surface = NeutralDrySurfaceModel{FT}(param_set), coupling = Coupled(), ) turbulence = ConstantKinematicViscosity(FT(0.0)) model = stable_bl_model( FT, config_type, zmax, surface_flux; turbulence = turbulence, turbconv = turbconv, compressibility = compressibility, ) # Assemble configuration driver_config = ClimateMachine.SingleStackConfiguration( string("SBL_COUPLED_", str_comp, "_1D"), N, nelem_vert, zmax, param_set, model; hmax = FT(40), ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFLmax, ) # --- Zero-out horizontal variations: vsp = vars_state(model, Prognostic(), FT) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :turbconv), ) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :energy, :ρe), ) vsa = vars_state(model, Auxiliary(), FT) horizontally_average!( driver_config.grid, solver_config.dg.state_auxiliary, varsindex(vsa, :turbconv), ) # --- dgn_config = config_diagnostics(driver_config) # boyd vandeven filter num_state_prognostic = number_states(driver_config.bl, Prognostic()) cb_boyd = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, 1:num_state_prognostic, solver_config.dg.grid, BoydVandevenFilter( solver_config.dg.grid, 1, #default=0 4, #default=32 ), ) nothing end diag_arr = [single_stack_diagnostics(solver_config)] time_data = FT[0] # Define the number of outputs from `t0` to `timeend` n_outputs = 5 # This equates to exports every ceil(Int, timeend/n_outputs) time-step: every_x_simulation_time = ceil(Int, timeend / n_outputs) cb_data_vs_time = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do diag_vs_z = single_stack_diagnostics(solver_config) nstep = getsteps(solver_config.solver) push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) nothing end # Mass tendencies = 0 for Anelastic1D model, # so mass should be completely conserved: check_cons = (ClimateMachine.ConservationCheck("ρ", "3000steps", FT(0.00000001)),) cb_print_step = GenericCallbacks.EveryXSimulationSteps(100) do @show getsteps(solver_config.solver) nothing end if !isnothing(cl_args["cparam_file"]) ClimateMachine.Settings.output_dir = cl_args["cparam_file"] * ".output" end result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, check_cons = check_cons, user_callbacks = (cb_boyd, cb_data_vs_time, cb_print_step), check_euclidean_distance = true, ) diag_vs_z = single_stack_diagnostics(solver_config) push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) return solver_config, diag_arr, time_data end # ArgParse in global scope to modify Clima Parameters sbl_args = ArgParseSettings(autofix_names = true) add_arg_group!(sbl_args, "StableBoundaryLayer") @add_arg_table! sbl_args begin "--cparam-file" help = "specify CLIMAParameters file" arg_type = Union{String, Nothing} default = nothing "--surface-flux" help = "specify surface flux for energy and moisture" metavar = "prescribed|bulk|custom_sbl" arg_type = String default = "custom_sbl" end cl_args = ClimateMachine.init(parse_clargs = true, custom_clargs = sbl_args) if !isnothing(cl_args["cparam_file"]) filename = cl_args["cparam_file"] set_clima_parameters(filename) end solver_config, diag_arr, time_data = main(Float64, cl_args) # Uncomment lines to save output using JLD2 output_dir = @__DIR__; mkpath(output_dir); function dons(diag_vs_z) return Dict(map(keys(first(diag_vs_z))) do k string(k) => [getproperty(ca, k) for ca in diag_vs_z] end) end get_dons_arr(diag_arr) = [dons(diag_vs_z) for diag_vs_z in diag_arr] dons_arr = get_dons_arr(diag_arr) println(dons_arr[1].keys) z = get_z(solver_config.dg.grid; rm_dupes = true); save( string(output_dir, "/sbl_coupled.jld2"), "dons_arr", dons_arr, "time_data", time_data, "z", z, ) include(joinpath(@__DIR__, "report_mse_sbl_coupled_edmf_an1d.jl")) nothing ================================================ FILE: test/Atmos/EDMF/stable_bl_edmf.jl ================================================ using JLD2, FileIO using ClimateMachine using ClimateMachine.SingleStackUtils using ClimateMachine.Checkpoint using ClimateMachine.BalanceLaws: vars_state const clima_dir = dirname(dirname(pathof(ClimateMachine))); import CLIMAParameters include(joinpath(clima_dir, "experiments", "AtmosLES", "stable_bl_model.jl")) include(joinpath("helper_funcs", "diagnostics_configuration.jl")) include("edmf_model.jl") include("edmf_kernels.jl") CLIMAParameters.Planet.T_surf_ref(::EarthParameterSet) = 265 CLIMAParameters.Atmos.EDMF.a_surf(::EarthParameterSet) = 0.0 """ init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} Initialize EDMF state variables. This method is only called at `t=0`. """ function init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} # Aliases: gm = state en = state.turbconv.environment up = state.turbconv.updraft N_up = n_updrafts(turbconv) # GCM setting - Initialize the grid mean profiles of prognostic variables (ρ,e_int,q_tot,u,v,w) z = altitude(m, aux) # SCM setting - need to have separate cases coded and called from a folder - see what LES does # a thermo state is used here to convert the input θ to e_int profile e_int = internal_energy(m, state, aux) param_set = parameter_set(m) ts = PhaseDry(param_set, e_int, state.ρ) T = air_temperature(ts) p = air_pressure(ts) q = PhasePartition(ts) θ_liq = liquid_ice_pottemp(ts) a_min = turbconv.subdomains.a_min @unroll_map(N_up) do i up[i].ρa = gm.ρ * a_min up[i].ρaw = gm.ρu[3] * a_min up[i].ρaθ_liq = gm.ρ * a_min * θ_liq up[i].ρaq_tot = FT(0) end # initialize environment covariance with zero for now if z <= FT(250) en.ρatke = gm.ρ * FT(0.4) * FT(1 - z / 250.0) * FT(1 - z / 250.0) * FT(1 - z / 250.0) en.ρaθ_liq_cv = gm.ρ * FT(0.4) * FT(1 - z / 250.0) * FT(1 - z / 250.0) * FT(1 - z / 250.0) else en.ρatke = FT(0) en.ρaθ_liq_cv = FT(0) end en.ρaq_tot_cv = FT(0) en.ρaθ_liq_q_tot_cv = FT(0) return nothing end; function main(::Type{FT}, cl_args) where {FT} surface_flux = cl_args["surface_flux"] # DG polynomial order N = 4 nelem_vert = 15 # Prescribe domain parameters zmax = FT(400) t0 = FT(0) # Simulation time timeend = FT(60) CFLmax = FT(0.50) config_type = SingleStackConfigType ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) N_updrafts = 1 N_quad = 3 # Using N_quad = 1 leads to norm(Q) = NaN at init. turbconv = EDMF( FT, N_updrafts, N_quad, param_set, surface = NeutralDrySurfaceModel{FT}(param_set), ) model = stable_bl_model( FT, config_type, zmax, surface_flux; turbconv = turbconv, ) # Assemble configuration driver_config = ClimateMachine.SingleStackConfiguration( "SBL_EDMF", N, nelem_vert, zmax, param_set, model; hmax = FT(40), ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFLmax, ) # --- Zero-out horizontal variations: vsp = vars_state(model, Prognostic(), FT) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :turbconv), ) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :energy, :ρe), ) vsa = vars_state(model, Auxiliary(), FT) horizontally_average!( driver_config.grid, solver_config.dg.state_auxiliary, varsindex(vsa, :turbconv), ) # --- dgn_config = config_diagnostics(driver_config, timeend; interval = "10ssecs") cbtmarfilter = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, (turbconv_filters(turbconv)...,), solver_config.dg.grid, TMARFilter(), ) nothing end diag_arr = [single_stack_diagnostics(solver_config)] time_data = FT[0] # Define the number of outputs from `t0` to `timeend` n_outputs = 5 # This equates to exports every ceil(Int, timeend/n_outputs) time-step: every_x_simulation_time = ceil(Int, timeend / n_outputs) cb_data_vs_time = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do diag_vs_z = single_stack_diagnostics(solver_config) nstep = getsteps(solver_config.solver) # Save to disc (for debugging): # @save "bomex_edmf_nstep=$nstep.jld2" diag_vs_z push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) nothing end check_cons = ( ClimateMachine.ConservationCheck("ρ", "3000steps", FT(0.001)), ClimateMachine.ConservationCheck("energy.ρe", "3000steps", FT(0.1)), ) cb_print_step = GenericCallbacks.EveryXSimulationSteps(100) do @show getsteps(solver_config.solver) nothing end result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, check_cons = check_cons, user_callbacks = (cbtmarfilter, cb_data_vs_time, cb_print_step), check_euclidean_distance = true, ) diag_vs_z = single_stack_diagnostics(solver_config) push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) return solver_config, diag_arr, time_data end # add a command line argument to specify the kind of surface flux # TODO: this will move to the future namelist functionality sbl_args = ArgParseSettings(autofix_names = true) add_arg_group!(sbl_args, "StableBoundaryLayer") @add_arg_table! sbl_args begin "--surface-flux" help = "specify surface flux for energy and moisture" metavar = "prescribed|bulk|custom_sbl" arg_type = String default = "custom_sbl" end cl_args = ClimateMachine.init( parse_clargs = true, custom_clargs = sbl_args, output_dir = get(ENV, "CLIMATEMACHINE_SETTINGS_OUTPUT_DIR", "output"), fix_rng_seed = true, ) solver_config, diag_arr, time_data = main(Float64, cl_args) ## Uncomment lines to save output using JLD2 # output_dir = @__DIR__; # mkpath(output_dir); # function dons(diag_vs_z) # return Dict(map(keys(first(diag_vs_z))) do k # string(k) => [getproperty(ca, k) for ca in diag_vs_z] # end) # end # get_dons_arr(diag_arr) = [dons(diag_vs_z) for diag_vs_z in diag_arr] # dons_arr = get_dons_arr(diag_arr) # println(dons_arr[1].keys) # z = get_z(solver_config.dg.grid; rm_dupes = true); # save( # string(output_dir, "/sbl_edmf.jld2"), # "dons_arr", # dons_arr, # "time_data", # time_data, # "z", # z, # ) include(joinpath(@__DIR__, "report_mse_sbl_edmf.jl")) nothing ================================================ FILE: test/Atmos/EDMF/stable_bl_edmf_fvm.jl ================================================ using JLD2, FileIO using ClimateMachine using ClimateMachine.SingleStackUtils using ClimateMachine.Checkpoint using ClimateMachine.BalanceLaws: vars_state import ClimateMachine.DGMethods.FVReconstructions: FVLinear const clima_dir = dirname(dirname(pathof(ClimateMachine))); import CLIMAParameters include(joinpath(clima_dir, "experiments", "AtmosLES", "stable_bl_model.jl")) include("edmf_model.jl") include("edmf_kernels.jl") CLIMAParameters.Planet.T_surf_ref(::EarthParameterSet) = 265 CLIMAParameters.Atmos.EDMF.a_surf(::EarthParameterSet) = 0.0 """ init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} Initialize EDMF state variables. This method is only called at `t=0`. """ function init_state_prognostic!( turbconv::EDMF{FT}, m::AtmosModel{FT}, state::Vars, aux::Vars, localgeo, t::Real, ) where {FT} # Aliases: gm = state en = state.turbconv.environment up = state.turbconv.updraft N_up = n_updrafts(turbconv) # GCM setting - Initialize the grid mean profiles of prognostic variables (ρ,e_int,q_tot,u,v,w) z = altitude(m, aux) # SCM setting - need to have separate cases coded and called from a folder - see what LES does # a thermo state is used here to convert the input θ to e_int profile e_int = internal_energy(m, state, aux) param_set = parameter_set(m) ts = PhaseDry(param_set, e_int, state.ρ) T = air_temperature(ts) p = air_pressure(ts) q = PhasePartition(ts) θ_liq = liquid_ice_pottemp(ts) a_min = turbconv.subdomains.a_min @unroll_map(N_up) do i up[i].ρa = gm.ρ * a_min up[i].ρaw = gm.ρu[3] * a_min up[i].ρaθ_liq = gm.ρ * a_min * θ_liq up[i].ρaq_tot = FT(0) end # initialize environment covariance with zero for now if z <= FT(250) en.ρatke = gm.ρ * FT(0.4) * FT(1 - z / 250.0) * FT(1 - z / 250.0) * FT(1 - z / 250.0) en.ρaθ_liq_cv = gm.ρ * FT(0.4) * FT(1 - z / 250.0) * FT(1 - z / 250.0) * FT(1 - z / 250.0) else en.ρatke = FT(0) en.ρaθ_liq_cv = FT(0) end en.ρaq_tot_cv = FT(0) en.ρaθ_liq_q_tot_cv = FT(0) return nothing end; function main(::Type{FT}, cl_args) where {FT} surface_flux = cl_args["surface_flux"] # DG polynomial order N = (1, 0) nelem_vert = 80 # Prescribe domain parameters zmax = FT(400) t0 = FT(0) # Simulation time timeend = FT(60) CFLmax = FT(0.50) config_type = SingleStackConfigType ode_solver_type = ClimateMachine.ExplicitSolverType( # solver_method = LSRK144NiegemannDiehlBusch, solver_method = LSRK54CarpenterKennedy, ) N_updrafts = 1 N_quad = 3 # Using N_quad = 1 leads to norm(Q) = NaN at init. turbconv = EDMF( FT, N_updrafts, N_quad, param_set, surface = NeutralDrySurfaceModel{FT}(param_set), ) model = stable_bl_model( FT, config_type, zmax, surface_flux; turbconv = turbconv, ref_state = HydrostaticState( DecayingTemperatureProfile{FT}(param_set); subtract_off = false, ), ) # Assemble configuration driver_config = ClimateMachine.SingleStackConfiguration( "SBL_EDMF", N, nelem_vert, zmax, param_set, model; hmax = FT(40), numerical_flux_first_order = RoeNumericalFlux(), fv_reconstruction = HBFVReconstruction(model, FVLinear()), ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFLmax, ) # --- Zero-out horizontal variations: vsp = vars_state(model, Prognostic(), FT) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :turbconv), ) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :energy, :ρe), ) vsa = vars_state(model, Auxiliary(), FT) horizontally_average!( driver_config.grid, solver_config.dg.state_auxiliary, varsindex(vsa, :turbconv), ) # --- dgn_config = config_diagnostics(driver_config) cbtmarfilter = GenericCallbacks.EveryXSimulationSteps(100) do nstep = getsteps(solver_config.solver) Filters.apply!( solver_config.Q, (turbconv_filters(turbconv)...,), solver_config.dg.grid, TMARFilter(), ) nothing end diag_arr = [single_stack_diagnostics(solver_config)] time_data = FT[0] # Define the number of outputs from `t0` to `timeend` n_outputs = 5 # This equates to exports every ceil(Int, timeend/n_outputs) time-step: every_x_simulation_time = ceil(Int, timeend / n_outputs) cb_data_vs_time = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do diag_vs_z = single_stack_diagnostics(solver_config) nstep = getsteps(solver_config.solver) # Save to disc (for debugging): # @save "bomex_edmf_nstep=$nstep.jld2" diag_vs_z push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) nothing end check_cons = ( ClimateMachine.ConservationCheck("ρ", "3000steps", FT(0.01)), ClimateMachine.ConservationCheck("energy.ρe", "3000steps", FT(0.1)), ) cb_print_step = GenericCallbacks.EveryXSimulationSteps(500) do @show getsteps(solver_config.solver) nothing end result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, check_cons = check_cons, user_callbacks = (cbtmarfilter, cb_data_vs_time, cb_print_step), check_euclidean_distance = true, ) diag_vs_z = single_stack_diagnostics(solver_config) push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) return solver_config, diag_arr, time_data end # add a command line argument to specify the kind of surface flux # TODO: this will move to the future namelist functionality sbl_args = ArgParseSettings(autofix_names = true) add_arg_group!(sbl_args, "StableBoundaryLayer") @add_arg_table! sbl_args begin "--surface-flux" help = "specify surface flux for energy and moisture" metavar = "prescribed|bulk|custom_sbl" arg_type = String default = "custom_sbl" end cl_args = ClimateMachine.init( parse_clargs = true, custom_clargs = sbl_args, output_dir = get(ENV, "CLIMATEMACHINE_SETTINGS_OUTPUT_DIR", "output"), fix_rng_seed = true, ) solver_config, diag_arr, time_data = main(Float64, cl_args) include(joinpath(@__DIR__, "report_mse_sbl_edmf.jl")) nothing ================================================ FILE: test/Atmos/EDMF/stable_bl_single_stack_implicit.jl ================================================ using ClimateMachine using ClimateMachine.SystemSolvers using ClimateMachine.ODESolvers using ClimateMachine.MPIStateArrays using ClimateMachine.SingleStackUtils using ClimateMachine.Checkpoint using ClimateMachine.BalanceLaws: vars_state using JLD2, FileIO const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath(clima_dir, "experiments", "AtmosLES", "stable_bl_model.jl")) include("edmf_model.jl") include("edmf_kernels.jl") function main(::Type{FT}, cl_args) where {FT} surface_flux = cl_args["surface_flux"] # DG polynomial order N = 1 nelem_vert = 50 # Prescribe domain parameters zmax = FT(400) t0 = FT(0) # Simulation time timeend = FT(3600 * 6) CFLmax = FT(40.0) config_type = SingleStackConfigType ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) N_updrafts = 1 N_quad = 3 # Using N_quad = 1 leads to norm(Q) = NaN at init. turbconv = NoTurbConv() C_smag = FT(0.23) model = stable_bl_model( FT, config_type, zmax, surface_flux; turbulence = SmagorinskyLilly{FT}(C_smag), turbconv = turbconv, ) # Assemble configuration driver_config = ClimateMachine.SingleStackConfiguration( "SBL_EDMF", N, nelem_vert, zmax, param_set, model; hmax = FT(40), ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFLmax, ) #################### Change the ode_solver to implicit solver dg = solver_config.dg Q = solver_config.Q vdg = DGModel( driver_config; state_auxiliary = dg.state_auxiliary, direction = VerticalDirection(), ) # linear solver relative tolerance rtol which should be slightly smaller than the nonlinear solver tol linearsolver = BatchedGeneralizedMinimalResidual( dg, Q; max_subspace_size = 30, atol = -1.0, rtol = 5e-5, ) """ N(q)(Q) = Qhat => F(Q) = N(q)(Q) - Qhat F(Q) == 0 ||F(Q^i) || / ||F(Q^0) || < tol """ # ϵ is a sensity parameter for this problem, it determines the finite difference Jacobian dF = (F(Q + ϵdQ) - F(Q))/ϵ # I have also try larger tol, but tol = 1e-3 does not work nonlinearsolver = JacobianFreeNewtonKrylovSolver(Q, linearsolver; tol = 1e-4, ϵ = 1.e-10) # this is a second order time integrator, to change it to a first order time integrator # change it ARK1ForwardBackwardEuler, which can reduce the cost by half at the cost of accuracy # and stability # preconditioner_update_freq = 50 means updating the preconditioner every 50 Newton solves, # update it more freqent will accelerate the convergence of linear solves, but updating it # is very expensive ode_solver = ARK2ImplicitExplicitMidpoint( dg, vdg, NonLinearBackwardEulerSolver( nonlinearsolver; isadjustable = true, preconditioner_update_freq = 50, ), Q; dt = solver_config.dt, t0 = 0, split_explicit_implicit = false, variant = NaiveVariant(), ) solver_config.solver = ode_solver ####################################### # --- Zero-out horizontal variations: vsp = vars_state(model, Prognostic(), FT) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :turbconv), ) horizontally_average!( driver_config.grid, solver_config.Q, varsindex(vsp, :energy, :ρe), ) vsa = vars_state(model, Auxiliary(), FT) horizontally_average!( driver_config.grid, solver_config.dg.state_auxiliary, varsindex(vsa, :turbconv), ) # --- dgn_config = config_diagnostics(driver_config) cbtmarfilter = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, (turbconv_filters(turbconv)...,), solver_config.dg.grid, TMARFilter(), ) nothing end diag_arr = [single_stack_diagnostics(solver_config)] time_data = FT[0] # Define the number of outputs from `t0` to `timeend` n_outputs = 5 # This equates to exports every ceil(Int, timeend/n_outputs) time-step: every_x_simulation_time = ceil(Int, timeend / n_outputs) cb_data_vs_time = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do diag_vs_z = single_stack_diagnostics(solver_config) nstep = getsteps(solver_config.solver) # Save to disc (for debugging): # @save "bomex_edmf_nstep=$nstep.jld2" diag_vs_z push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) nothing end check_cons = (ClimateMachine.ConservationCheck("ρ", "3000steps", FT(0.001)),) cb_print_step = GenericCallbacks.EveryXSimulationSteps(100) do @show getsteps(solver_config.solver) nothing end result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, check_cons = check_cons, user_callbacks = (cbtmarfilter, cb_data_vs_time, cb_print_step), check_euclidean_distance = true, ) diag_vs_z = single_stack_diagnostics(solver_config) push!(diag_arr, diag_vs_z) push!(time_data, gettime(solver_config.solver)) return solver_config, diag_arr, time_data end # add a command line argument to specify the kind of surface flux # TODO: this will move to the future namelist functionality sbl_args = ArgParseSettings(autofix_names = true) add_arg_group!(sbl_args, "StableBoundaryLayer") @add_arg_table! sbl_args begin "--surface-flux" help = "specify surface flux for energy and moisture" metavar = "prescribed|bulk|custom_sbl" arg_type = String default = "custom_sbl" end cl_args = ClimateMachine.init( parse_clargs = true, custom_clargs = sbl_args, output_dir = get(ENV, "CLIMATEMACHINE_SETTINGS_OUTPUT_DIR", "output"), fix_rng_seed = true, ) solver_config, diag_arr, time_data = main(Float64, cl_args) include(joinpath(@__DIR__, "report_mse_sbl_ss_implicit.jl")) nothing ================================================ FILE: test/Atmos/EDMF/variable_map.jl ================================================ #! format: off var_map(s::String) = var_map(Val(Symbol(s))) var_map(::Val{T}) where {T} = nothing var_map(::Val{:prog_ρ}) = ("rho", ()) var_map(::Val{:prog_ρu_1}) = ("u_mean", (:ρ,)) var_map(::Val{:prog_ρu_2}) = ("v_mean", (:ρ,)) var_map(::Val{:prog_moisture_ρq_tot}) = ("qt_mean", (:ρ,)) var_map(::Val{:prog_turbconv_updraft_1_ρa}) = ("updraft_fraction", (:ρ,)) var_map(::Val{:prog_turbconv_updraft_1_ρaw}) = ("updraft_w", (:ρ, :a)) var_map(::Val{:prog_turbconv_updraft_1_ρaq_tot}) = ("updraft_qt", (:ρ, :a)) var_map(::Val{:prog_turbconv_updraft_1_ρaθ_liq}) = ("updraft_thetali", (:ρ, :a)) var_map(::Val{:prog_turbconv_environment_ρatke}) = ("tke_mean", (:ρ, :a)) var_map(::Val{:prog_turbconv_environment_ρaθ_liq_cv}) = ("env_thetali2", (:ρ, :a)) var_map(::Val{:prog_turbconv_environment_ρaq_tot_cv}) = ("env_qt2", (:ρ, :a)) #! format: on ================================================ FILE: test/Atmos/Model/Artifacts.toml ================================================ [ref_state] git-tree-sha1 = "f6b0e460e5660732eb9e755b1e06d395ea3fb518" ================================================ FILE: test/Atmos/Model/discrete_hydrostatic_balance.jl ================================================ using ClimateMachine ClimateMachine.init(parse_clargs = true) using ClimateMachine.MPIStateArrays using ClimateMachine.Atmos using ClimateMachine.ConfigTypes using ClimateMachine.ODESolvers using ClimateMachine.SystemSolvers: ManyColumnLU using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.Mesh.Grids using Thermodynamics.TemperatureProfiles using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Geometry: LocalGeometry using LinearAlgebra using StaticArrays using Test using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() function init_to_ref_state!(problem, bl, state, aux, localgeo, t) FT = eltype(state) state.ρ = aux.ref_state.ρ state.ρu = SVector{3, FT}(0, 0, 0) state.energy.ρe = aux.ref_state.ρe end function config_balanced( FT, poly_order, temp_profile, numflux, (config_type, config_fun, config_args), ) ref_state = HydrostaticState(temp_profile; subtract_off = false) physics = AtmosPhysics{FT}( param_set; ref_state = ref_state, turbulence = ConstantDynamicViscosity(FT(0)), hyperdiffusion = NoHyperDiffusion(), moisture = DryModel(), ) model = AtmosModel{FT}( config_type, physics; source = (Gravity(),), init_state_prognostic = init_to_ref_state!, ) config = config_fun( "balanced state", poly_order, config_args..., param_set, nothing; model = model, numerical_flux_first_order = numflux, ) return config end function main() FT = Float64 poly_order = 4 timestart = FT(0) timeend = FT(100) domain_height = FT(50e3) LES_params = let LES_resolution = ntuple(_ -> domain_height / 3poly_order, 3) LES_domain = ntuple(_ -> domain_height, 3) (LES_resolution, LES_domain...) end GCM_params = let GCM_resolution = (3, 3) (GCM_resolution, domain_height) end GCM = (AtmosGCMConfigType, ClimateMachine.AtmosGCMConfiguration, GCM_params) LES = (AtmosLESConfigType, ClimateMachine.AtmosLESConfiguration, LES_params) imex_solver_type = ClimateMachine.IMEXSolverType( splitting_type = HEVISplitting(), implicit_model = AtmosAcousticGravityLinearModel, implicit_solver = ManyColumnLU, solver_method = ARK2GiraldoKellyConstantinescu, ) explicit_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK54CarpenterKennedy, ) @testset for config in (LES, GCM) @testset for ode_solver_type in (explicit_solver_type, imex_solver_type) @testset for numflux in ( CentralNumericalFluxFirstOrder(), RoeNumericalFlux(), HLLCNumericalFlux(), ) @testset for temp_profile in ( IsothermalProfile(param_set, FT), DecayingTemperatureProfile{FT}(param_set), ) driver_config = config_balanced( FT, poly_order, temp_profile, numflux, config, ) solver_config = ClimateMachine.SolverConfiguration( timestart, timeend, driver_config, Courant_number = FT(0.1), init_on_cpu = true, ode_solver_type = ode_solver_type, CFL_direction = EveryDirection(), diffdir = HorizontalDirection(), ) Qinit = similar(solver_config.Q) Qinit .= solver_config.Q ClimateMachine.invoke!(solver_config) error = euclidean_distance(solver_config.Q, Qinit) / norm(Qinit) @test error <= 100 * eps(FT) end end end end end main() ================================================ FILE: test/Atmos/Model/get_atmos_ref_states.jl ================================================ using ClimateMachine using ClimateMachine.ConfigTypes using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.Checkpoint using ClimateMachine.Mesh.Grids using Thermodynamics using Thermodynamics.TemperatureProfiles using ClimateMachine.SingleStackUtils using ClimateMachine.VariableTemplates using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using Test ClimateMachine.init() function get_atmos_ref_states(nelem_vert, N_poly, RH) FT = Float64 physics = AtmosPhysics{FT}( param_set; ref_state = HydrostaticState( DecayingTemperatureProfile{FT}(param_set), RH, ), ) model = AtmosModel{FT}( SingleStackConfigType, physics; init_state_prognostic = (_...) -> nothing, ) driver_config = ClimateMachine.SingleStackConfiguration( "ref_state", N_poly, nelem_vert, FT(25e3), param_set, model, ) solver_config = ClimateMachine.SolverConfiguration( FT(0), FT(10), driver_config; skip_update_aux = true, ode_dt = FT(1), ) return solver_config end ================================================ FILE: test/Atmos/Model/ref_state.jl ================================================ include("get_atmos_ref_states.jl") using JLD2 using Pkg.Artifacts using ArtifactWrappers using Thermodynamics const TD = Thermodynamics @testset "Hydrostatic reference states - regression test" begin ref_state_dataset = ArtifactWrapper( @__DIR__, isempty(get(ENV, "CI", "")), "ref_state", ArtifactFile[ArtifactFile( url = "https://caltech.box.com/shared/static/gyq292ns79wm9xpmy1sse3qtnpcxw54q.jld2", filename = "ref_state.jld2", ),], ) ref_state_dataset_path = get_data_folder(ref_state_dataset) data_file = joinpath(ref_state_dataset_path, "ref_state.jld2") RH = 0.5 (nelem_vert, N_poly) = (20, 4) solver_config = get_atmos_ref_states(nelem_vert, N_poly, RH) dons_arr = dict_of_nodal_states(solver_config, (Auxiliary(),)) T = dons_arr["ref_state.T"] p = dons_arr["ref_state.p"] ρ = dons_arr["ref_state.ρ"] @load "$data_file" T_ref p_ref ρ_ref @test all(isapprox.(T, T_ref; rtol = 1e-6)) @test all(p .≈ p_ref) @test all(ρ .≈ ρ_ref) end @testset "Hydrostatic reference states - correctness" begin RH = 0.5 # Fails on (80, 1) for (nelem_vert, N_poly) in [(40, 2), (20, 4)] solver_config = get_atmos_ref_states(nelem_vert, N_poly, RH) dons_arr = dict_of_nodal_states(solver_config) phase_type = PhaseEquil T = dons_arr["ref_state.T"] p = dons_arr["ref_state.p"] ρ = dons_arr["ref_state.ρ"] q_tot = dons_arr["ref_state.ρq_tot"] ./ ρ q_pt = PhasePartition.(q_tot) # TODO: test that ρ and p are in discrete hydrostatic balance # Test state for thermodynamic consistency (with ideal gas law) T_igl = TD.air_temperature_from_ideal_gas_law.(Ref(param_set), p, ρ, q_pt) @test all(T .≈ T_igl) # Test that relative humidity in reference state is approximately # input relative humidity RH_ref = relative_humidity.(Ref(param_set), T, p, Ref(phase_type), q_pt) @show max(abs.(RH .- RH_ref)...) @test all(isapprox.(RH, RH_ref, atol = 0.05)) end end ================================================ FILE: test/Atmos/Model/runtests.jl ================================================ using Test @testset "Atmos Model" begin include("ref_state.jl") end ================================================ FILE: test/Atmos/Parameterizations/Microphysics/KM_ice.jl ================================================ using Dierckx include("KinematicModel.jl") # speed up the relaxation timescales for cloud water and cloud ice CLIMAParameters.Atmos.Microphysics.τ_cond_evap(::AbstractParameterSet) = 0.5 CLIMAParameters.Atmos.Microphysics.τ_sub_dep(::AbstractParameterSet) = 0.5 function vars_state(m::KinematicModel, ::Prognostic, FT) @vars begin ρ::FT ρu::SVector{3, FT} ρe::FT ρq_tot::FT ρq_liq::FT ρq_ice::FT ρq_rai::FT ρq_sno::FT end end function vars_state(m::KinematicModel, ::Auxiliary, FT) @vars begin # defined in init_state_auxiliary p::FT z_coord::FT x_coord::FT # defined in update_aux u::FT w::FT q_tot::FT q_vap::FT q_liq::FT q_ice::FT q_rai::FT q_sno::FT e_tot::FT e_kin::FT e_pot::FT e_int::FT T::FT S_liq::FT S_ice::FT RH::FT rain_w::FT snow_w::FT # more diagnostics src_cloud_liq::FT src_cloud_ice::FT src_rain_acnv::FT src_snow_acnv::FT src_liq_rain_accr::FT src_liq_snow_accr::FT src_ice_snow_accr::FT src_ice_rain_accr::FT src_snow_rain_accr::FT src_rain_accr_sink::FT src_rain_evap::FT src_snow_subl::FT src_snow_melt::FT flag_cloud_liq::FT flag_cloud_ice::FT flag_rain::FT flag_snow::FT # helpers for bc ρe_init::FT ρq_tot_init::FT end end function init_kinematic_eddy!(eddy_model, state, aux, localgeo, t, spline_fun) FT = eltype(state) _grav::FT = grav(param_set) dc = eddy_model.data_config (x, y, z) = localgeo.coord (xc, yc, zc) = localgeo.center_coord @inbounds begin init_T, init_qt, init_p, init_ρ, init_dρ = spline_fun # density q_pt_0 = PhasePartition(init_qt(z)) R_m, cp_m, cv_m, γ = gas_constants(param_set, q_pt_0) T::FT = init_T(z) ρ::FT = init_ρ(z) state.ρ = ρ aux.p = init_p(z) # moisture state.ρq_tot = ρ * init_qt(z) state.ρq_liq = ρ * q_pt_0.liq state.ρq_ice = ρ * q_pt_0.ice state.ρq_rai = ρ * FT(0) state.ρq_sno = ρ * FT(0) # [Grabowski1998](@cite) # velocity (derivative of streamfunction) # This is actually different than what comes out from taking a # derivative of Ψ from the paper. I have sin(π/2/X(x-xc)). # This setup makes more sense to me though. _Z::FT = FT(15000) _X::FT = FT(10000) _xc::FT = FT(30000) _A::FT = FT(4.8 * 1e4) _S::FT = FT(2.5 * 1e-2) * FT(0.01) #TODO _ρ_00::FT = FT(1) ρu::FT = FT(0) ρw::FT = FT(0) fact = _A / _ρ_00 * ( init_ρ(z) * FT(π) / _Z * cos(FT(π) / _Z * z) + init_dρ(z) * sin(FT(π) / _Z * z) ) if zc < _Z if x >= (_xc + _X) ρu = _S * z - fact ρw = FT(0) elseif x <= (_xc - _X) ρu = _S * z + fact ρw = FT(0) else ρu = _S * z - fact * sin(FT(π / 2.0) / _X * (x - _xc)) ρw = _A * init_ρ(z) / _ρ_00 * FT(π / 2.0) / _X * sin(FT(π) / _Z * z) * cos(FT(π / 2.0) / _X * (x - _xc)) end else ρu = _S * z ρw = FT(0) end state.ρu = SVector(ρu, FT(0), ρw) u::FT = ρu / ρ w::FT = ρw / ρ # energy e_kin::FT = 1 // 2 * (u^2 + w^2) e_pot::FT = _grav * z e_int::FT = internal_energy(param_set, T, q_pt_0) e_tot::FT = e_kin + e_pot + e_int state.ρe = ρ * e_tot end return nothing end function nodal_update_auxiliary_state!( m::KinematicModel, state::Vars, aux::Vars, t::Real, ) FT = eltype(state) _grav::FT = grav(param_set) _T_freeze::FT = T_freeze(param_set) @inbounds begin if t == FT(0) aux.ρe_init = state.ρe aux.ρq_tot_init = state.ρq_tot end # velocity aux.u = state.ρu[1] / state.ρ aux.w = state.ρu[3] / state.ρ # water aux.q_tot = state.ρq_tot / state.ρ aux.q_liq = state.ρq_liq / state.ρ aux.q_ice = state.ρq_ice / state.ρ aux.q_rai = state.ρq_rai / state.ρ aux.q_sno = state.ρq_sno / state.ρ aux.q_vap = aux.q_tot - aux.q_liq - aux.q_ice # energy aux.e_tot = state.ρe / state.ρ aux.e_kin = 1 // 2 * (aux.u^2 + aux.w^2) aux.e_pot = _grav * aux.z_coord aux.e_int = aux.e_tot - aux.e_kin - aux.e_pot # supersaturation q = PhasePartition(aux.q_tot, aux.q_liq, aux.q_ice) aux.T = air_temperature(param_set, aux.e_int, q) ts_neq = PhaseNonEquil_ρTq(param_set, state.ρ, aux.T, q) aux.S_liq = max(0, supersaturation(ts_neq, Liquid())) aux.S_ice = max(0, supersaturation(ts_neq, Ice())) aux.RH = relative_humidity(ts_neq) * FT(100) aux.rain_w = terminal_velocity(param_set, CM1M.RainType(), state.ρ, aux.q_rai) aux.snow_w = terminal_velocity(param_set, CM1M.SnowType(), state.ρ, aux.q_sno) # more diagnostics ts_eq = PhaseEquil_ρTq(param_set, state.ρ, aux.T, aux.q_tot) q_eq = PhasePartition(ts_eq) aux.src_cloud_liq = conv_q_vap_to_q_liq_ice(param_set, CM1M.LiquidType(), q_eq, q) aux.src_cloud_ice = conv_q_vap_to_q_liq_ice(param_set, CM1M.IceType(), q_eq, q) aux.src_rain_acnv = conv_q_liq_to_q_rai(param_set, aux.q_liq) aux.src_snow_acnv = conv_q_ice_to_q_sno(param_set, q, state.ρ, aux.T) aux.src_liq_rain_accr = accretion( param_set, CM1M.LiquidType(), CM1M.RainType(), aux.q_liq, aux.q_rai, state.ρ, ) aux.src_liq_snow_accr = accretion( param_set, CM1M.LiquidType(), CM1M.SnowType(), aux.q_liq, aux.q_sno, state.ρ, ) aux.src_ice_snow_accr = accretion( param_set, CM1M.IceType(), CM1M.SnowType(), aux.q_ice, aux.q_sno, state.ρ, ) aux.src_ice_rain_accr = accretion( param_set, CM1M.IceType(), CM1M.RainType(), aux.q_ice, aux.q_rai, state.ρ, ) aux.src_rain_accr_sink = accretion_rain_sink(param_set, aux.q_ice, aux.q_rai, state.ρ) if aux.T < _T_freeze aux.src_snow_rain_accr = accretion_snow_rain( param_set, CM1M.SnowType(), CM1M.RainType(), aux.q_sno, aux.q_rai, state.ρ, ) else aux.src_snow_rain_accr = accretion_snow_rain( param_set, CM1M.RainType(), CM1M.SnowType(), aux.q_rai, aux.q_sno, state.ρ, ) end aux.src_rain_evap = evaporation_sublimation( param_set, CM1M.RainType(), q, aux.q_rai, state.ρ, aux.T, ) aux.src_snow_subl = evaporation_sublimation( param_set, CM1M.SnowType(), q, aux.q_sno, state.ρ, aux.T, ) aux.src_snow_melt = snow_melt(param_set, aux.q_sno, state.ρ, aux.T) aux.flag_cloud_liq = FT(0) aux.flag_cloud_ice = FT(0) aux.flag_rain = FT(0) aux.flag_snow = FT(0) if (aux.q_liq >= FT(0)) aux.flag_cloud_liq = FT(1) end if (aux.q_ice >= FT(0)) aux.flag_cloud_ice = FT(1) end if (aux.q_rai >= FT(0)) aux.flag_rain = FT(1) end if (aux.q_sno >= FT(0)) aux.flag_snow = FT(1) end end end function boundary_state!( ::RusanovNumericalFlux, bctype, m::KinematicModel, state⁺, aux⁺, n, state⁻, aux⁻, t, args..., ) # 1 - left (x = 0, z = ...) # 2 - right (x = -1, z = ...) # 3,4 - y boundary (periodic) # 5 - bottom (x = ..., z = 0) # 6 - top (x = ..., z = -1) FT = eltype(state⁻) @inbounds state⁺.ρ = state⁻.ρ @inbounds state⁺.ρe = aux⁻.ρe_init @inbounds state⁺.ρq_tot = aux⁻.ρq_tot_init @inbounds state⁺.ρq_liq = FT(0) #state⁻.ρq_liq @inbounds state⁺.ρq_ice = FT(0) #state⁻.ρq_ice @inbounds state⁺.ρq_rai = FT(0) @inbounds state⁺.ρq_sno = FT(0) if bctype == 1 @inbounds state⁺.ρu = SVector(state⁻.ρu[1], FT(0), FT(0)) end if bctype == 2 @inbounds state⁺.ρu = SVector(state⁻.ρu[1], FT(0), FT(0)) @inbounds state⁺.ρe = state⁻.ρe @inbounds state⁺.ρq_tot = state⁻.ρq_tot @inbounds state⁺.ρq_liq = state⁻.ρq_liq @inbounds state⁺.ρq_ice = state⁻.ρq_ice end if bctype == 5 @inbounds state⁺.ρu -= 2 * dot(state⁻.ρu, n) .* SVector(n) end if bctype == 6 @inbounds state⁺.ρu = SVector(state⁻.ρu[1], FT(0), state⁻.ρu[3]) end end @inline function wavespeed( m::KinematicModel, nM, state::Vars, aux::Vars, t::Real, _..., ) FT = eltype(state) @inbounds begin u = state.ρu / state.ρ q_rai::FT = state.ρq_rai / state.ρ q_sno::FT = state.ρq_sno / state.ρ rain_w = terminal_velocity(param_set, CM1M.RainType(), state.ρ, q_rai) snow_w = terminal_velocity(param_set, CM1M.SnowType(), state.ρ, q_sno) nu = nM[1] * u[1] + nM[3] * max(u[3], rain_w, snow_w, u[3] - rain_w, u[3] - snow_w) end return abs(nu) end @inline function flux_first_order!( m::KinematicModel, flux::Grad, state::Vars, aux::Vars, t::Real, _..., ) FT = eltype(state) @inbounds begin q_rai::FT = state.ρq_rai / state.ρ q_sno::FT = state.ρq_sno / state.ρ rain_w = terminal_velocity(param_set, CM1M.RainType(), state.ρ, q_rai) snow_w = terminal_velocity(param_set, CM1M.SnowType(), state.ρ, q_sno) # advect moisture ... flux.ρ = SVector(state.ρu[1], FT(0), state.ρu[3]) flux.ρq_tot = SVector( state.ρu[1] * state.ρq_tot / state.ρ, FT(0), state.ρu[3] * state.ρq_tot / state.ρ, ) flux.ρq_liq = SVector( state.ρu[1] * state.ρq_liq / state.ρ, FT(0), state.ρu[3] * state.ρq_liq / state.ρ, ) flux.ρq_ice = SVector( state.ρu[1] * state.ρq_ice / state.ρ, FT(0), state.ρu[3] * state.ρq_ice / state.ρ, ) flux.ρq_rai = SVector( state.ρu[1] * state.ρq_rai / state.ρ, FT(0), (state.ρu[3] / state.ρ - rain_w) * state.ρq_rai, ) flux.ρq_sno = SVector( state.ρu[1] * state.ρq_sno / state.ρ, FT(0), (state.ρu[3] / state.ρ - snow_w) * state.ρq_sno, ) # ... energy ... flux.ρe = SVector( state.ρu[1] / state.ρ * (state.ρe + aux.p), FT(0), state.ρu[3] / state.ρ * (state.ρe + aux.p), ) # ... and don't advect momentum (kinematic setup) end end function source!( m::KinematicModel, source::Vars, state::Vars, diffusive::Vars, aux::Vars, t::Real, direction, ) FT = eltype(state) _grav::FT = grav(param_set) _e_int_v0::FT = e_int_v0(param_set) _e_int_i0::FT = e_int_i0(param_set) _cv_d::FT = cv_d(param_set) _cv_v::FT = cv_v(param_set) _cv_l::FT = cv_l(param_set) _cv_i::FT = cv_i(param_set) _T_0::FT = T_0(param_set) _T_freeze = T_freeze(param_set) @inbounds begin e_tot = state.ρe / state.ρ q_tot = state.ρq_tot / state.ρ q_liq = state.ρq_liq / state.ρ q_ice = state.ρq_ice / state.ρ q_rai = state.ρq_rai / state.ρ q_sno = state.ρq_sno / state.ρ u = state.ρu[1] / state.ρ w = state.ρu[3] / state.ρ ρ = state.ρ e_int = e_tot - 1 // 2 * (u^2 + w^2) - _grav * aux.z_coord q = PhasePartition(q_tot, q_liq, q_ice) T = air_temperature(param_set, e_int, q) _Lf = latent_heat_fusion(param_set, T) # equilibrium state at current T ts_eq = PhaseEquil_ρTq(param_set, state.ρ, T, q_tot) q_eq = PhasePartition(ts_eq) # zero out the source terms source.ρq_tot = FT(0) source.ρq_liq = FT(0) source.ρq_ice = FT(0) source.ρq_rai = FT(0) source.ρq_sno = FT(0) source.ρe = FT(0) # vapour -> cloud liquid water source.ρq_liq += ρ * conv_q_vap_to_q_liq_ice(param_set, CM1M.LiquidType(), q_eq, q) # vapour -> cloud ice source.ρq_ice += ρ * conv_q_vap_to_q_liq_ice(param_set, CM1M.IceType(), q_eq, q) ## cloud liquid water -> rain acnv = ρ * conv_q_liq_to_q_rai(param_set, q_liq) source.ρq_liq -= acnv source.ρq_tot -= acnv source.ρq_rai += acnv source.ρe -= acnv * (_cv_l - _cv_d) * (T - _T_0) ## cloud ice -> snow acnv = ρ * conv_q_ice_to_q_sno(param_set, q, state.ρ, T) source.ρq_ice -= acnv source.ρq_tot -= acnv source.ρq_sno += acnv source.ρe -= acnv * ((_cv_i - _cv_d) * (T - _T_0) - _e_int_i0) # cloud liquid water + rain -> rain accr = ρ * accretion( param_set, CM1M.LiquidType(), CM1M.RainType(), q_liq, q_rai, state.ρ, ) source.ρq_liq -= accr source.ρq_tot -= accr source.ρq_rai += accr source.ρe -= accr * (_cv_l - _cv_d) * (T - _T_0) # cloud ice + snow -> snow accr = ρ * accretion( param_set, CM1M.IceType(), CM1M.SnowType(), q_ice, q_sno, state.ρ, ) source.ρq_ice -= accr source.ρq_tot -= accr source.ρq_sno += accr source.ρe -= accr * ((_cv_i - _cv_d) * (T - _T_0) - _e_int_i0) # cloud liquid water + snow -> snow or rain accr = ρ * accretion( param_set, CM1M.LiquidType(), CM1M.SnowType(), q_liq, q_sno, state.ρ, ) if T < _T_freeze source.ρq_liq -= accr source.ρq_tot -= accr source.ρq_sno += accr source.ρe -= accr * ((_cv_i - _cv_d) * (T - _T_0) - _e_int_i0) else source.ρq_liq -= accr source.ρq_tot -= accr source.ρq_sno -= accr * (_cv_l / _Lf * (T - _T_freeze)) source.ρq_rai += accr * (FT(1) + _cv_l / _Lf * (T - _T_freeze)) source.ρe += -accr * ((_cv_l - _cv_d) * (T - _T_0) + _cv_l * (T - _T_freeze)) end # cloud ice + rain -> snow accr = ρ * accretion( param_set, CM1M.IceType(), CM1M.RainType(), q_ice, q_rai, state.ρ, ) accr_rain_sink = ρ * accretion_rain_sink(param_set, q_ice, q_rai, state.ρ) source.ρq_ice -= accr source.ρq_tot -= accr source.ρq_rai -= accr_rain_sink source.ρq_sno += accr + accr_rain_sink source.ρe += accr_rain_sink * _Lf - accr * ((_cv_i - _cv_d) * (T - _T_0) - _e_int_i0) # rain + snow -> snow or rain if T < _T_freeze accr = ρ * accretion_snow_rain( param_set, CM1M.SnowType(), CM1M.RainType(), q_sno, q_rai, state.ρ, ) source.ρq_sno += accr source.ρq_rai -= accr source.ρe += accr * _Lf else accr = ρ * accretion_snow_rain( param_set, CM1M.RainType(), CM1M.SnowType(), q_rai, q_sno, state.ρ, ) source.ρq_rai += accr source.ρq_sno -= accr source.ρe -= accr * _Lf end # rain -> vapour evap = ρ * evaporation_sublimation( param_set, CM1M.RainType(), q, q_rai, state.ρ, T, ) source.ρq_rai += evap source.ρq_tot -= evap source.ρe -= evap * (_cv_l - _cv_d) * (T - _T_0) # snow -> vapour subl = ρ * evaporation_sublimation( param_set, CM1M.SnowType(), q, q_sno, state.ρ, T, ) source.ρq_sno += subl source.ρq_tot -= subl source.ρe -= subl * ((_cv_i - _cv_d) * (T - _T_0) - _e_int_i0) # snow -> rain melt = ρ * snow_melt(param_set, q_sno, state.ρ, T) source.ρq_sno -= melt source.ρq_rai += melt source.ρe -= melt * _Lf end end function main() # Working precision FT = Float64 # DG polynomial order N = 4 # Domain resolution and size Δx = FT(500) Δy = FT(1) Δz = FT(250) resolution = (Δx, Δy, Δz) # Domain extents xmax = 90000 ymax = 10 zmax = 16000 # initial configuration wmax = FT(0.6) # max velocity of the eddy [m/s] θ_0 = FT(289) # init. theta value (const) [K] p_0 = FT(101500) # surface pressure [Pa] p_1000 = FT(100000) # reference pressure in theta definition [Pa] qt_0 = FT(7.5 * 1e-3) # init. total water specific humidity (const) [kg/kg] z_0 = FT(0) # surface height # time stepping t_ini = FT(0) t_end = FT(5 * 60) #FT(4 * 60 * 60) #TODO dt = FT(0.25) #CFL = FT(1.75) filter_freq = 1 output_freq = 1200 interval = "1200steps" # periodicity and boundary numbers periodicity_x = false periodicity_y = true periodicity_z = false idx_bc_left = 1 idx_bc_right = 2 idx_bc_front = 3 idx_bc_back = 4 idx_bc_bottom = 5 idx_bc_top = 6 #! format: off z_range = [ 0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300, 3400, 3500, 3600, 3700, 3800, 3900, 4000, 4100, 4200, 4300, 4400, 4500, 4600, 4700, 4800, 4900, 5000, 5100, 5200, 5300, 5400, 5500, 5600, 5700, 5800, 5900, 6000, 6100, 6200, 6300, 6400, 6500, 6600, 6700, 6800, 6900, 7000, 7100, 7200, 7300, 7400, 7500, 7600, 7700, 7800, 7900, 8000, 8100, 8200, 8300, 8400, 8500, 8600, 8700, 8800, 8900, 9000, 9100, 9200, 9300, 9400, 9500, 9600, 9700, 9800, 9900, 10000, 10100, 10200, 10300, 10400, 10500, 10600, 10700, 10800, 10900, 11000, 11100, 11200, 11300, 11400, 11500, 11600, 11700, 11800, 11900, 12000, 12100, 12200, 12300, 12400, 12500, 12600, 12700, 12800, 12900, 13000, 13100, 13200, 13300, 13400, 13500, 13600, 13700, 13800, 13900, 14000, 14100, 14200, 14300, 14400, 14500, 14600, 14700, 14800, 14900, 15000, 15100, 15200, 15300, 15400, 15500, 15600, 15700, 15800, 15900, 16000, 16100, 16200, 16300, 16400, 16500, 16600, 16700, 16800, 16900, 17000] T_range = [ 299.184, 297.628892697526, 296.498576625316, 295.708541362848, 295.174276489603, 294.811271585061, 294.5350162287, 294.261, 293.920421043582, 293.507311764631, 293.031413133474, 292.502466120435, 291.930211695841, 291.324390830018, 290.694744493293, 290.051013655991, 289.402939288437, 288.760262346828, 288.130849318341, 287.515505258001, 286.913372692428, 286.323594148241, 285.74531215206, 285.177669230506, 284.619807910198, 284.070870717755, 283.530000179798, 282.996338822946, 282.469029173819, 281.947213759037, 281.43003510522, 280.916635738988, 280.406158186959, 279.897744975755, 279.390538631995, 278.883681682298, 278.376316653285, 277.867586071576, 277.356632463789, 276.842598356546, 276.324685598782, 275.802667484647, 275.276636664165, 274.746689276312, 274.212921460063, 273.675429354393, 273.134309098278, 272.589656830692, 272.041568690611, 271.49014081701, 270.935469348864, 270.377650425148, 269.816780184838, 269.252954766908, 268.686270310335, 268.116822954093, 267.544708837157, 266.970024098502, 266.392864877104, 265.813327311938, 265.231507541979, 264.64747745881, 264.060960130126, 263.47141707779, 262.878303486873, 262.281074542448, 261.679185429586, 261.072091333358, 260.459247438837, 259.840108931094, 259.214130995201, 258.580768816228, 257.939477579249, 257.289712469334, 256.630928671556, 255.962581370986, 255.284125752695, 254.595017001755, 253.894710327766, 253.182887991267, 252.46001426264, 251.726721646298, 250.983642646651, 250.231409768111, 249.470655515088, 248.702012391995, 247.926112903242, 247.14358955324, 246.355074846402, 245.561201287137, 244.762601379858, 243.959907628976, 243.153728314245, 242.344349049499, 241.531825963971, 240.716210279609, 239.897553218363, 239.075906002182, 238.251319853016, 237.423845992814, 236.593535643524, 235.760440027097, 234.924610365481, 234.086097880626, 233.244953794481, 232.401229328996, 231.554975706119, 230.7062441478, 229.855085875989, 229.001552112634, 228.145694079684, 227.287574051742, 226.42757986392, 225.566470305038, 224.705024343724, 223.84402094861, 222.984239088323, 222.126457731494, 221.271455846752, 220.420012402726, 219.572906368047, 218.730916711342, 217.894822401242, 217.065402406377, 216.243435695375, 215.429701236865, 214.624977999479, 213.830040741192, 213.045586818019, 212.272245655601, 211.51064437337, 210.761410090759, 210.0251699272, 209.302551002126, 208.594180434969, 207.90068534516, 207.222682432974, 206.560708873131, 205.915265185628, 205.286851694754, 204.675968724799, 204.083116600052, 203.508795644802, 202.95350618334, 202.417748539953, 201.902023577843, 201.407120974388, 200.934594512862, 200.486122895611, 200.063384824981, 199.668059003318, 199.301824132969, 198.96635891628, 198.663342055596, 198.394452253265, 198.161279286222, 197.963547643009, 197.799211358657, 197.666154525954, 197.562261237686, 197.485415586642, 197.433501665607, 197.40440356737, 197.396005384718, 197.406191210439, 197.432899562191, 197.47476703369, 197.530913717737, 197.600469393593, 197.682563840515, 197.776326837764] q_range = [ 0.0162321692080669, 0.0167673003973785, 0.0169842024326598, 0.0169337519538153, 0.0166663497239751, 0.0162321692080669, 0.015681352854829, 0.0150641570577189, 0.0144310460364482, 0.0138327343680399, 0.013320177602368, 0.012928720727755, 0.0126305522364067, 0.0123819207241334, 0.0121389587958033, 0.0118577075098814, 0.0115066332638913, 0.0111042588941075, 0.010681699769686, 0.0102701709364724, 0.0099009900990099, 0.00959624936114117, 0.00934081059469477, 0.00911016542683696, 0.00887975233624587, 0.0086249628234361, 0.00832855696312967, 0.0080029519599734, 0.00766802230757524, 0.00734369421478042, 0.007049945387747, 0.00680137732291751, 0.00659092388806814, 0.00640607314371358, 0.00623428890344645, 0.00606301560481065, 0.00588287130354119, 0.005697230175774, 0.00551266483334836, 0.00533575758863248, 0.00517309988062077, 0.00502869447640062, 0.004896158939909, 0.00476650506495816, 0.00463073495294506, 0.00447984071677452, 0.00430803604057719, 0.00412246330674869, 0.0039335098478028, 0.00375157835183152, 0.00358708648864089, 0.00344726315880705, 0.00332653452987796, 0.00321611598159287, 0.00310721394018452, 0.00299102691924227, 0.00286116318208214, 0.00272089896736183, 0.00257593295768495, 0.00243197016394643, 0.00229472213908012, 0.00216860732664965, 0.00205285001385606, 0.00194537329026098, 0.00184409796137261, 0.00174694285001248, 0.0016522111336286, 0.00155974942036123, 0.00146979043510184, 0.0013825670343689, 0.00129831219414761, 0.00121701485317001, 0.001137687161242, 0.00105909638608903, 0.000980009009141718, 0.00089919072834449, 0.000815997019873583, 0.000732145660692171, 0.00064994635423271, 0.000571710312185518, 0.000499750124937531, 0.000435858502312957, 0.000379744409038409, 0.000330595173284745, 0.000287597487595375, 0.000249937515621095, 0.000216895300299521, 0.000188127832006747, 0.000163386230957203, 0.000142421466578296, 0.000124984376952881, 0.000110689772327441, 9.86086827423503e-05, 8.7676088112503e-05, 7.68268601082915e-05, 6.49957752746072e-05, 5.15108887771554e-05, 3.72737503739521e-05, 2.35794100864691e-05, 1.17230575137195e-05, 2.999991000027e-06, 1.69619792857815e-06, 3.07897588648539e-06, 2.26364535967942e-06, 3.65542843474416e-07, 1.49999775000338e-06, 2.45283190408077e-06, 2.55351934700394e-06, 2.09779045350945e-06, 1.38137398580681e-06, 6.99999510000343e-07, 2.86443287526343e-07, 1.21662921693633e-07, 1.23660803334821e-07, 2.10439089554136e-07, 2.99999910000027e-07, 3.27788493770303e-07, 2.99022297121994e-07, 2.36361815382337e-07, 1.62467544245733e-07, 9.9999990000001e-08, 6.64022211520834e-08, 5.82475139663064e-08, 6.68916919016523e-08, 8.36905765913686e-08, 9.9999990000001e-08, 1.09002484657468e-07, 1.11187531664659e-07, 1.0887133139317e-07, 1.04370084083517e-07, 9.9999990000001e-08, 9.75877787592422e-08, 9.70022971221656e-08, 9.76229211090621e-08, 9.88290267308115e-08, 9.9999990000001e-08, 1.00646340200832e-07, 1.00803219684934e-07, 1.0063692406888e-07, 1.00313748968563e-07, 9.9999990000001e-08, 9.98268004299175e-08, 9.97847641264901e-08, 9.9829322608123e-08, 9.99159173931719e-08, 9.9999990000001e-08, 1.00046398078965e-07, 1.00057663808278e-07, 1.0004572549811e-07, 1.00022521458629e-07, 9.9999990000001e-08, 9.99875472541912e-08, 9.99845206403455e-08, 9.99877153994048e-08, 9.99939367723098e-08, 9.9999990000001e-08, 1.00003352904274e-07, 1.00004193630342e-07, 1.00003352904274e-07, 1.00001671452137e-07, 9.9999990000001e-08, 9.99989811287191e-08, 9.99986448382919e-08, 9.99988129835055e-08, 9.99993174191465e-08, 9.9999990000001e-08, 1.00000662580856e-07, 1.00001167016497e-07, 1.0000133516171e-07, 1.00000998871283e-07, 9.9999990000001e-08] p_range = [ 101500, 100315.025749516, 99139.9660271538, 97974.7710180798, 96819.3909074598, 95673.7758804597, 94537.876176083, 93411.6422486837, 92295.0246064536, 91187.9737719499, 90090.4404029628, 89002.3752464385, 87923.7291106931, 86854.4528968052, 85794.49764766, 84743.814421206, 83702.3543252787, 82670.0686672607, 81646.9088044215, 80632.826109327, 79627.7720985403, 78631.6983820215, 77644.5566288927, 76666.2986022276, 75696.8762101083, 74736.2413760207, 73784.3460742183, 72841.1424820254, 71906.5828275339, 70980.6193544154, 70063.204453007, 69154.2906087673, 68253.8303673938, 67361.7763702768, 66478.0814065118, 65602.6982808849, 64735.579849904, 63876.6791769641, 63025.949377182, 62183.3435815507, 61348.8150705175, 60522.317221463, 59703.8034731598, 58893.2273619179, 58090.542574603, 57295.7028140736, 56508.6618359204, 55729.3736066619, 54957.7921455483, 54193.8714880201, 53437.5658219265, 52688.8294339685, 51947.6166734621, 51213.8819892159, 50487.5799836161, 49768.6652753625, 49057.0925369589, 48352.8166561238, 47655.7925743794, 46965.9752497711, 46283.3197958903, 45607.7814272171, 44939.3154221458, 44277.8771606411, 43623.4221794563, 42975.9060319998, 42335.2843266238, 41701.5128914544, 41074.5476095614, 40454.3443808926, 39840.8592642808, 39234.0484216161, 38633.868080086, 38040.274570662, 37453.2243845259, 36872.6740298782, 36298.5800710783, 35730.8992971221, 35169.5885531642, 34614.6047016162, 34065.9047673383, 33523.4458805626, 32987.1852382959, 32457.0801436939, 31933.0880637786, 31415.1664829788, 30903.2729431824, 30397.3652161132, 29897.4011309542, 29403.3385345499, 28915.135440009, 28432.7499682896, 27956.1403087067, 27485.2647592591, 27020.0817857294, 26560.5498717234, 26106.6275597007, 25658.2736275352, 25215.4469119545, 24778.106267783, 24346.2107202062, 23919.7194049202, 23498.5915276787, 23082.7864056438, 22672.2635279751, 22266.9824021027, 21866.9025958119, 21471.9839183077, 21082.1862391501, 20697.469446465, 20317.7936031548, 19943.1188855014, 19573.4055416778, 19208.6139342067, 18848.7046021555, 18493.6381033452, 18143.3750575744, 17797.8763325512, 17457.1028579612, 17121.0155825635, 16789.575634671, 16462.7442590809, 16140.4827744667, 15822.752617039, 15509.5154044811, 15200.7327737537, 14896.3664255555, 14596.378315538, 14300.7304630906, 14009.3849072278, 13722.3038717104, 13439.4497001572, 13160.7848122237, 12886.2717485712, 12615.8732366994, 12349.552023955, 12087.2709233428, 11828.9930105004, 11574.6814267238, 11324.2993335354, 11077.8100828701, 10835.1771502091, 10596.3640894538, 10361.3345792892, 10130.0524910676, 9902.48171660711, 9678.58621552163, 9458.33021860609, 9241.67802445076, 9028.59395243382, 8819.04251762682, 8612.98836336888, 8410.3962187252, 8211.2309379189, 8015.45756519932, 7823.04116500174, 7633.94687886457, 7448.14015673894, 7265.58652567913, 7086.25151273945, 6910.10064497419, 6910.10064497419, 6910.10064497419, 6910.10064497419, 6910.10064497419, 6910.10064497419, 6910.10064497419, 6910.10064497419, 6910.10064497419, 6910.10064497419, 6910.10064497419 ] ρ_range = [ 1.17051362179725, 1.16251824590627, 1.15313016112624, 1.1426566568231, 1.13140764159872, 1.1196895485888, 1.10780103484676, 1.0960305075832, 1.08459745274435, 1.07348344546654, 1.06261389733783, 1.05192695939713, 1.04140273218196, 1.03103180334705, 1.02080494033636, 1.01071303046314, 1.00073949487493, 0.990838128123433, 0.980963129632192, 0.971094063252625, 0.961216822759604, 0.951323251200104, 0.941426814080701, 0.931545806522527, 0.921697879536919, 0.911900034022987, 0.902164578162167, 0.892487313537494, 0.882860249793038, 0.873275726601859, 0.863726411598162, 0.854208105591224, 0.844727922437057, 0.8352954667279, 0.825919969478444, 0.816610286976537, 0.807373343850795, 0.798209593686815, 0.789117941453129, 0.780097337389561, 0.771146612530646, 0.762264270399455, 0.753452744759353, 0.74471551136303, 0.736055874178972, 0.72747696742561, 0.718980350842218, 0.7105619137831, 0.70221632469523, 0.693938467344086, 0.685723438615901, 0.677567861578885, 0.669473709625074, 0.661444143644113, 0.653482167320322, 0.645590627902435, 0.637771283999691, 0.630022091606339, 0.622340165988717, 0.614722736979947, 0.607167148420651, 0.599671383580347, 0.592236161810936, 0.584863205035121, 0.577554164218497, 0.570310604494631, 0.563133875176496, 0.556024730482117, 0.548983747911074, 0.542011464088805, 0.535108377645103, 0.528275027848979, 0.521512222336075, 0.514820790002398, 0.508201503057759, 0.501655076786909, 0.495181994058148, 0.488781994213898, 0.482454636660537, 0.476199055919507, 0.470012952419531, 0.463893943289264, 0.457840307961496, 0.451850519497349, 0.445923093407251, 0.440056584487831, 0.434249561420135, 0.428500536139493, 0.422808039743611, 0.41717064648228, 0.411586974922854, 0.406055719826046, 0.40057578774956, 0.395146674561446, 0.389768253950893, 0.384440397458089, 0.37916287643901, 0.373935102771167, 0.36875641578014, 0.363626174398787, 0.358543758947717, 0.353508656205969, 0.348520707739897, 0.343579834253573, 0.338685947906162, 0.333838950271578, 0.329038686145689, 0.324284810799633, 0.319576938026155, 0.314914687777366, 0.310297688380068, 0.305725572307512, 0.301197591585183, 0.296712545171569, 0.292269251091959, 0.287866570849034, 0.283503406532332, 0.279178694598262, 0.27489141222907, 0.270640579366203, 0.266425260909222, 0.262244566294465, 0.258097650982816, 0.25398371429053, 0.249901999425458, 0.245851790781137, 0.241832414286703, 0.237843244370408, 0.233883781756254, 0.229953637104228, 0.226052458592054, 0.222179928272777, 0.218335761470863, 0.214519707224664, 0.210731549222293, 0.206971102905131, 0.203238226271481, 0.199532888953038, 0.195855122091922, 0.192204980437394, 0.188582544697456, 0.184987920053912, 0.181421234993557, 0.177882641628151, 0.174372316360016, 0.170890455921146, 0.167437037955477, 0.164011457709294, 0.160613093688105, 0.157241413931045, 0.15389597846219, 0.150576437441233, 0.147282529608045, 0.144014082002788, 0.140771009961674, 0.137553374518383, 0.134362580606833, 0.131201172349935, 0.128071588633207, 0.124976110750845, 0.121916861907264, 0.121948919193911, 0.121966894890357, 0.121972083953105, 0.121965790399749, 0.121949291096701, 0.121923436104917, 0.121888780280552, 0.121845875238272, 0.121795274570523, 0.121737533130168] dρ_range = [ -3.59937517425616e-05, -8.74236524613322e-05, -9.98245979099523e-05, -0.00010912795442958, -0.000115340910679925, -0.000118522777615024, -0.000118766703148947, -0.000116181583122055, -0.00011260864631165, -0.000109795778726265, -0.000107714951035596, -0.000106039943861968, -0.000104460270820561, -0.000102973683474333, -0.000101578762228199, -0.000100274450938608, -9.92861102161202e-05, -9.88281212364185e-05, -9.86963271464388e-05, -9.87085122902761e-05, -9.88588230322522e-05, -9.8981065311496e-05, -9.89171829920543e-05, -9.86735488551799e-05, -9.82566574041453e-05, -9.76729921159812e-05, -9.70500064028184e-05, -9.65086017246105e-05, -9.60454400625446e-05, -9.56572439955241e-05, -9.5340728706457e-05, -9.5008763743794e-05, -9.45788919208772e-05, -9.40548370115828e-05, -9.3440348194004e-05, -9.27391611164088e-05, -9.20016033219379e-05, -9.12752248054719e-05, -9.05595697070144e-05, -8.98541831357713e-05, -8.91654815701201e-05, -8.84752762943482e-05, -8.77494447742739e-05, -8.69897168393693e-05, -8.61978039593136e-05, -8.53753836002353e-05, -8.45662005091732e-05, -8.38114256450538e-05, -8.31088828495735e-05, -8.24564348214017e-05, -8.1851961718636e-05, -8.12540449384666e-05, -8.06237270285041e-05, -7.99625864890717e-05, -7.92721952020196e-05, -7.85541037865398e-05, -7.78377768942751e-05, -7.71508783726086e-05, -7.64922509147597e-05, -7.58607550079355e-05, -7.5255257575603e-05, -7.46578263440909e-05, -7.40437160173945e-05, -7.34126676822411e-05, -7.27655403821961e-05, -7.21031866632626e-05, -7.14303672571544e-05, -7.07515642299482e-05, -7.00671933172211e-05, -6.93776513635009e-05, -6.86833098465315e-05, -6.79822059852417e-05, -6.72725239627913e-05, -6.65548356116998e-05, -6.58297114361597e-05, -6.50977145039132e-05, -6.43646771280071e-05, -6.36360540138981e-05, -6.29118551436968e-05, -6.22041292322341e-05, -6.15221570675246e-05, -6.08606412866085e-05, -6.02146109367996e-05, -5.95836319123706e-05, -5.89672983766289e-05, -5.8365225270403e-05, -5.7777757303502e-05, -5.72051962789751e-05, -5.66471081172719e-05, -5.61030583593601e-05, -5.55726043916327e-05, -5.50543285425413e-05, -5.45449966705289e-05, -5.40374636372204e-05, -5.35311627048121e-05, -5.30261985318617e-05, -5.25253559659224e-05, -5.20312184290027e-05, -5.15435901963297e-05, -5.10622691485893e-05, -5.05870395077161e-05, -5.01151309694529e-05, -4.96439716171912e-05, -4.91736441117515e-05, -4.87042469224382e-05, -4.82358875773372e-05, -4.77700486961607e-05, -4.73081018750182e-05, -4.68499874419861e-05, -4.63956358750055e-05, -4.59449597549036e-05, -4.54985344751462e-05, -4.50631284786667e-05, -4.46397691588802e-05, -4.42280098023119e-05, -4.38274214139499e-05, -4.34376409961601e-05, -4.30583023124663e-05, -4.26889789379916e-05, -4.23292356052957e-05, -4.19786196104028e-05, -4.16366783570754e-05, -4.13029635571471e-05, -4.09770310849877e-05, -4.06584588726149e-05, -4.03468387207095e-05, -4.00417403212974e-05, -3.9742477928175e-05, -3.94474194224351e-05, -3.91560587225531e-05, -3.88680420136295e-05, -3.85830382145498e-05, -3.83007173685112e-05, -3.80207275262849e-05, -3.77427401195186e-05, -3.74664417323129e-05, -3.71911016163793e-05, -3.69155924693975e-05, -3.66396540346307e-05, -3.6363043157771e-05, -3.60854967755032e-05, -3.58067813502568e-05, -3.55266676411338e-05, -3.52449049917705e-05, -3.49612716252254e-05, -3.46756639284376e-05, -3.43938806523058e-05, -3.41187600097204e-05, -3.38494075224641e-05, -3.35849193859056e-05, -3.33243734832797e-05, -3.3066881627664e-05, -3.28115645135523e-05, -3.25575278845173e-05, -3.23039138342865e-05, -3.20463462400501e-05, -3.17652064416084e-05, -3.14588936623928e-05, -3.11289784299643e-05, -3.07770405929711e-05, -1.50046969840055e-05, -2.48015587883801e-07, -1.13657079374249e-07, -7.70202792434148e-09, -1.15984516793898e-07, -2.12713494118585e-07, -3.03471417808635e-07, -3.88725590955189e-07, -4.68452477358432e-07, -5.42635999340642e-07, -6.1126695981267e-07] #! format: on init_T = Spline1D(z_range, T_range) init_qt = Spline1D(z_range, q_range) init_p = Spline1D(z_range, p_range) init_ρ = Spline1D(z_range, ρ_range) init_dρ = Spline1D(z_range, dρ_range) driver_config, ode_solver_type = config_kinematic_eddy( FT, N, resolution, xmax, ymax, zmax, wmax, θ_0, p_0, p_1000, qt_0, z_0, periodicity_x, periodicity_y, periodicity_z, idx_bc_left, idx_bc_right, idx_bc_front, idx_bc_back, idx_bc_bottom, idx_bc_top, ) solver_config = ClimateMachine.SolverConfiguration( t_ini, t_end, driver_config, (init_T, init_qt, init_p, init_ρ, init_dρ), ode_solver_type = ode_solver_type, ode_dt = dt, init_on_cpu = true, #Courant_number = CFL, ) model = driver_config.bl mpicomm = MPI.COMM_WORLD # get state variables indices for filtering ρq_liq_ind = varsindex(vars_state(model, Prognostic(), FT), :ρq_liq) ρq_ice_ind = varsindex(vars_state(model, Prognostic(), FT), :ρq_ice) ρq_rai_ind = varsindex(vars_state(model, Prognostic(), FT), :ρq_rai) ρq_sno_ind = varsindex(vars_state(model, Prognostic(), FT), :ρq_sno) # get aux variables indices for testing q_tot_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_tot) q_vap_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_vap) q_liq_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_liq) q_ice_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_ice) q_rai_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_rai) q_sno_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_sno) S_liq_ind = varsindex(vars_state(model, Auxiliary(), FT), :S_liq) S_ice_ind = varsindex(vars_state(model, Auxiliary(), FT), :S_ice) rain_w_ind = varsindex(vars_state(model, Auxiliary(), FT), :rain_w) snow_w_ind = varsindex(vars_state(model, Auxiliary(), FT), :snow_w) # filter out negative values cb_tmar_filter = GenericCallbacks.EveryXSimulationSteps(filter_freq) do (init = false) Filters.apply!( solver_config.Q, (:ρq_tot, :ρq_liq, :ρq_ice, :ρq_rai, :ρq_sno), solver_config.dg.grid, TMARFilter(), ) nothing end cb_boyd_filter = GenericCallbacks.EveryXSimulationSteps(filter_freq) do (init = false) Filters.apply!( solver_config.Q, (:ρq_tot, :ρq_liq, :ρq_ice, :ρq_rai, :ρq_sno, :ρe, :ρ), solver_config.dg.grid, BoydVandevenFilter(solver_config.dg.grid, 1, 8), ) end # output for paraview # initialize base output prefix directory from rank 0 vtkdir = abspath(joinpath(ClimateMachine.Settings.output_dir, "vtk")) if MPI.Comm_rank(mpicomm) == 0 mkpath(vtkdir) end MPI.Barrier(mpicomm) vtkstep = [0] cb_vtk = GenericCallbacks.EveryXSimulationSteps(output_freq) do (init = false) out_dirname = @sprintf( "microphysics_test_4_mpirank%04d_step%04d", MPI.Comm_rank(mpicomm), vtkstep[1] ) out_path_prefix = joinpath(vtkdir, out_dirname) @info "doing VTK output" out_path_prefix writevtk( out_path_prefix, solver_config.Q, solver_config.dg, flattenednames(vars_state(model, Prognostic(), FT)), solver_config.dg.state_auxiliary, flattenednames(vars_state(model, Auxiliary(), FT)), ) vtkstep[1] += 1 nothing end # output for netcdf boundaries = [ FT(0) FT(0) FT(0) xmax ymax zmax ] interpol = ClimateMachine.InterpolationConfiguration( driver_config, boundaries, resolution, ) dgngrps = [ setup_dump_state_diagnostics( AtmosLESConfigType(), interval, driver_config.name, interpol = interpol, ), setup_dump_aux_diagnostics( AtmosLESConfigType(), interval, driver_config.name, interpol = interpol, ), ] dgn_config = ClimateMachine.DiagnosticsConfiguration(dgngrps) # call solve! function for time-integrator result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, user_callbacks = (cb_boyd_filter, cb_tmar_filter), check_euclidean_distance = true, ) max_q_tot = maximum(abs.(solver_config.dg.state_auxiliary[:, q_tot_ind, :])) @test !isnan(max_q_tot) max_q_vap = maximum(abs.(solver_config.dg.state_auxiliary[:, q_vap_ind, :])) @test !isnan(max_q_vap) max_q_liq = maximum(abs.(solver_config.dg.state_auxiliary[:, q_liq_ind, :])) @test !isnan(max_q_liq) max_q_ice = maximum(abs.(solver_config.dg.state_auxiliary[:, q_ice_ind, :])) @test !isnan(max_q_ice) max_q_rai = maximum(abs.(solver_config.dg.state_auxiliary[:, q_rai_ind, :])) @test !isnan(max_q_rai) max_q_sno = maximum(abs.(solver_config.dg.state_auxiliary[:, q_sno_ind, :])) @test !isnan(max_q_sno) end main() ================================================ FILE: test/Atmos/Parameterizations/Microphysics/KM_saturation_adjustment.jl ================================================ include("KinematicModel.jl") function vars_state(m::KinematicModel, ::Prognostic, FT) @vars begin ρ::FT ρu::SVector{3, FT} ρe::FT ρq_tot::FT end end function vars_state(m::KinematicModel, ::Auxiliary, FT) @vars begin # defined in init_state_auxiliary p::FT x_coord::FT z_coord::FT # defined in update_aux u::FT w::FT q_tot::FT q_vap::FT q_liq::FT q_ice::FT e_tot::FT e_kin::FT e_pot::FT e_int::FT T::FT S_liq::FT RH::FT end end function init_kinematic_eddy!(eddy_model, state, aux, localgeo, t) (x, y, z) = localgeo.coord FT = eltype(state) _grav::FT = grav(param_set) dc = eddy_model.data_config @inbounds begin # density q_pt_0 = PhasePartition(dc.qt_0) R_m, cp_m, cv_m, γ = gas_constants(param_set, q_pt_0) T::FT = dc.θ_0 * (aux.p / dc.p_1000)^(R_m / cp_m) ρ::FT = aux.p / R_m / T state.ρ = ρ # moisture state.ρq_tot = ρ * dc.qt_0 # velocity (derivative of streamfunction) ρu::FT = dc.wmax * dc.xmax / dc.zmax * cos(FT(π) * z / dc.zmax) * cos(2 * FT(π) * x / dc.xmax) ρw::FT = 2 * dc.wmax * sin(FT(π) * z / dc.zmax) * sin(2 * π * x / dc.xmax) state.ρu = SVector(ρu, FT(0), ρw) u::FT = ρu / ρ w::FT = ρw / ρ # energy e_kin::FT = 1 // 2 * (u^2 + w^2) e_pot::FT = _grav * z e_int::FT = internal_energy(param_set, T, q_pt_0) e_tot::FT = e_kin + e_pot + e_int state.ρe = ρ * e_tot end return nothing end function nodal_update_auxiliary_state!( m::KinematicModel, state::Vars, aux::Vars, t::Real, ) FT = eltype(state) _grav::FT = grav(param_set) @inbounds begin aux.u = state.ρu[1] / state.ρ aux.w = state.ρu[3] / state.ρ aux.q_tot = state.ρq_tot / state.ρ aux.e_tot = state.ρe / state.ρ aux.e_kin = 1 // 2 * (aux.u^2 + aux.w^2) aux.e_pot = _grav * aux.z_coord aux.e_int = aux.e_tot - aux.e_kin - aux.e_pot # saturation adjustment happens here ts = PhaseEquil_ρeq(param_set, state.ρ, aux.e_int, aux.q_tot) q = PhasePartition(ts) aux.T = ts.T aux.q_vap = vapor_specific_humidity(q) aux.q_liq = q.liq aux.q_ice = q.ice # TODO: add super_saturation method in moist thermo #aux.S = max(0, aux.q_vap / q_vap_saturation(ts) - FT(1)) * FT(100) aux.S_liq = max(0, supersaturation(ts, Liquid())) aux.RH = relative_humidity(ts) end end function boundary_state!( ::RusanovNumericalFlux, bctype, m::KinematicModel, state⁺, aux⁺, n, state⁻, aux⁻, t, args..., ) end @inline function wavespeed( m::KinematicModel, nM, state::Vars, aux::Vars, t::Real, _..., ) @inbounds u = state.ρu / state.ρ return abs(dot(nM, u)) end @inline function flux_first_order!( m::KinematicModel, flux::Grad, state::Vars, aux::Vars, t::Real, direction, ) FT = eltype(state) @inbounds begin # advect moisture ... flux.ρq_tot = SVector( state.ρu[1] * state.ρq_tot / state.ρ, FT(0), state.ρu[3] * state.ρq_tot / state.ρ, ) # ... energy ... flux.ρe = SVector( state.ρu[1] / state.ρ * (state.ρe + aux.p), FT(0), state.ρu[3] / state.ρ * (state.ρe + aux.p), ) # ... and don't advect momentum (kinematic setup) end end source!(::KinematicModel, _...) = nothing function main() # Working precision FT = Float64 # DG polynomial order N = 4 # Domain resolution and size Δx = FT(20) Δy = FT(1) Δz = FT(20) resolution = (Δx, Δy, Δz) # Domain extents xmax = 1500 ymax = 10 zmax = 1500 # initial configuration wmax = FT(0.6) # max velocity of the eddy [m/s] θ_0 = FT(289) # init. theta value (const) [K] p_0 = FT(101500) # surface pressure [Pa] p_1000 = FT(100000) # reference pressure in theta definition [Pa] qt_0 = FT(7.5 * 1e-3) # init. total water specific humidity (const) [kg/kg] z_0 = FT(0) # surface height # time stepping t_ini = FT(0) t_end = FT(60 * 30) dt = 40 output_freq = 9 interval = "9steps" # periodicity and boundary numbers periodicity_x = true periodicity_y = true periodicity_z = false idx_bc_left = 0 idx_bc_right = 0 idx_bc_front = 0 idx_bc_back = 0 idx_bc_bottom = 1 idx_bc_top = 2 driver_config, ode_solver_type = config_kinematic_eddy( FT, N, resolution, xmax, ymax, zmax, wmax, θ_0, p_0, p_1000, qt_0, z_0, periodicity_x, periodicity_y, periodicity_z, idx_bc_left, idx_bc_right, idx_bc_front, idx_bc_back, idx_bc_bottom, idx_bc_top, ) solver_config = ClimateMachine.SolverConfiguration( t_ini, t_end, driver_config; ode_solver_type = ode_solver_type, ode_dt = dt, init_on_cpu = true, #Courant_number = CFL, ) mpicomm = MPI.COMM_WORLD # output for paraview # initialize base prefix directory from rank 0 vtkdir = abspath(joinpath(ClimateMachine.Settings.output_dir, "vtk")) if MPI.Comm_rank(mpicomm) == 0 mkpath(vtkdir) end MPI.Barrier(mpicomm) model = driver_config.bl vtkstep = [0] cbvtk = GenericCallbacks.EveryXSimulationSteps(output_freq) do out_dirname = @sprintf( "new_ex_1_mpirank%04d_step%04d", MPI.Comm_rank(mpicomm), vtkstep[1] ) out_path_prefix = joinpath(vtkdir, out_dirname) @info "doing VTK output" out_path_prefix writevtk( out_path_prefix, solver_config.Q, solver_config.dg, flattenednames(vars_state(model, Prognostic(), FT)), solver_config.dg.state_auxiliary, flattenednames(vars_state(model, Auxiliary(), FT)), ) vtkstep[1] += 1 nothing end # output for netcdf boundaries = [ FT(0) FT(0) FT(0) xmax ymax zmax ] interpol = ClimateMachine.InterpolationConfiguration( driver_config, boundaries, resolution, ) dgngrps = [ setup_dump_state_diagnostics( AtmosLESConfigType(), interval, driver_config.name, interpol = interpol, ), setup_dump_aux_diagnostics( AtmosLESConfigType(), interval, driver_config.name, interpol = interpol, ), ] dgn_config = ClimateMachine.DiagnosticsConfiguration(dgngrps) # get aux variables indices for testing q_tot_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_tot) q_vap_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_vap) q_liq_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_liq) q_ice_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_ice) S_liq_ind = varsindex(vars_state(model, Auxiliary(), FT), :S_liq) # call solve! function for time-integrator result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, user_callbacks = (cbvtk,), check_euclidean_distance = true, ) # no supersaturation max_S_liq = maximum(abs.(solver_config.dg.state_auxiliary[:, S_liq_ind, :])) @test isequal(max_S_liq, FT(0)) # qt is conserved max_q_tot = maximum(abs.(solver_config.dg.state_auxiliary[:, q_tot_ind, :])) min_q_tot = minimum(abs.(solver_config.dg.state_auxiliary[:, q_tot_ind, :])) @test isapprox(max_q_tot, qt_0; rtol = 1e-3) @test isapprox(min_q_tot, qt_0; rtol = 1e-3) # q_vap + q_liq = q_tot max_water_diff = maximum(abs.( solver_config.dg.state_auxiliary[:, q_tot_ind, :] .- solver_config.dg.state_auxiliary[:, q_vap_ind, :] .- solver_config.dg.state_auxiliary[:, q_liq_ind, :], )) @test isequal(max_water_diff, FT(0)) # no ice max_q_ice = maximum(abs.(solver_config.dg.state_auxiliary[:, q_ice_ind, :])) @test isequal(max_q_ice, FT(0)) # q_liq ∈ reference range max_q_liq = maximum(solver_config.dg.state_auxiliary[:, q_liq_ind, :]) min_q_liq = minimum(solver_config.dg.state_auxiliary[:, q_liq_ind, :]) @test max_q_liq < FT(1e-3) @test isequal(min_q_liq, FT(0)) end main() ================================================ FILE: test/Atmos/Parameterizations/Microphysics/KM_warm_rain.jl ================================================ include("KinematicModel.jl") function vars_state(m::KinematicModel, ::Prognostic, FT) @vars begin ρ::FT ρu::SVector{3, FT} ρe::FT ρq_tot::FT ρq_liq::FT ρq_ice::FT ρq_rai::FT end end function vars_state(m::KinematicModel, ::Auxiliary, FT) @vars begin # defined in init_state_auxiliary p::FT x_coord::FT z_coord::FT # defined in update_aux u::FT w::FT q_tot::FT q_vap::FT q_liq::FT q_ice::FT q_rai::FT e_tot::FT e_kin::FT e_pot::FT e_int::FT T::FT S_liq::FT RH::FT rain_w::FT # more diagnostics src_cloud_liq::FT src_cloud_ice::FT src_acnv::FT src_accr::FT src_rain_evap::FT flag_rain::FT flag_cloud_liq::FT flag_cloud_ice::FT end end function init_kinematic_eddy!(eddy_model, state, aux, localgeo, t) (x, y, z) = localgeo.coord FT = eltype(state) _grav::FT = grav(param_set) dc = eddy_model.data_config @inbounds begin # density q_pt_0 = PhasePartition(dc.qt_0) R_m, cp_m, cv_m, γ = gas_constants(param_set, q_pt_0) T::FT = dc.θ_0 * (aux.p / dc.p_1000)^(R_m / cp_m) ρ::FT = aux.p / R_m / T state.ρ = ρ # moisture state.ρq_tot = ρ * dc.qt_0 state.ρq_liq = ρ * q_pt_0.liq state.ρq_ice = ρ * q_pt_0.ice state.ρq_rai = ρ * FT(0) # velocity (derivative of streamfunction) ρu::FT = dc.wmax * dc.xmax / dc.zmax * cos(π * z / dc.zmax) * cos(2 * π * x / dc.xmax) ρw::FT = 2 * dc.wmax * sin(π * z / dc.zmax) * sin(2 * π * x / dc.xmax) state.ρu = SVector(ρu, FT(0), ρw) u::FT = ρu / ρ w::FT = ρw / ρ # energy e_kin::FT = 1 // 2 * (u^2 + w^2) e_pot::FT = _grav * z e_int::FT = internal_energy(param_set, T, q_pt_0) e_tot::FT = e_kin + e_pot + e_int state.ρe = ρ * e_tot end return nothing end function nodal_update_auxiliary_state!( m::KinematicModel, state::Vars, aux::Vars, t::Real, ) FT = eltype(state) _grav::FT = grav(param_set) @inbounds begin # velocity aux.u = state.ρu[1] / state.ρ aux.w = state.ρu[3] / state.ρ # water aux.q_tot = state.ρq_tot / state.ρ aux.q_liq = state.ρq_liq / state.ρ aux.q_ice = state.ρq_ice / state.ρ aux.q_rai = state.ρq_rai / state.ρ q = PhasePartition(aux.q_tot, aux.q_liq, aux.q_ice) aux.q_vap = vapor_specific_humidity(q) # energy aux.e_tot = state.ρe / state.ρ aux.e_kin = 1 // 2 * (aux.u^2 + aux.w^2) aux.e_pot = _grav * aux.z_coord aux.e_int = aux.e_tot - aux.e_kin - aux.e_pot # supersaturation q = PhasePartition(aux.q_tot, aux.q_liq, aux.q_ice) aux.T = air_temperature(param_set, aux.e_int, q) ts_neq = PhaseNonEquil_ρTq(param_set, state.ρ, aux.T, q) aux.S_liq = max(0, supersaturation(ts_neq, Liquid())) aux.RH = relative_humidity(ts_neq) * FT(100) aux.rain_w = terminal_velocity(param_set, CM1M.RainType(), state.ρ, aux.q_rai) # more diagnostics ts_eq = PhaseEquil_ρTq(param_set, state.ρ, aux.T, aux.q_tot) q_eq = PhasePartition(ts_eq) aux.src_cloud_liq = conv_q_vap_to_q_liq_ice(param_set, CM1M.LiquidType(), q_eq, q) aux.src_cloud_ice = conv_q_vap_to_q_liq_ice(param_set, CM1M.IceType(), q_eq, q) aux.src_acnv = conv_q_liq_to_q_rai(param_set, aux.q_liq) aux.src_accr = accretion( param_set, CM1M.LiquidType(), CM1M.RainType(), aux.q_liq, aux.q_rai, state.ρ, ) aux.src_rain_evap = evaporation_sublimation( param_set, CM1M.RainType(), q, aux.q_rai, state.ρ, aux.T, ) aux.flag_cloud_liq = FT(0) aux.flag_cloud_ice = FT(0) aux.flag_rain = FT(0) if (aux.q_liq >= FT(0)) aux.flag_cloud_liq = FT(1) end if (aux.q_ice >= FT(0)) aux.flag_cloud_ice = FT(1) end if (aux.q_rai >= FT(0)) aux.flag_rain = FT(1) end end end function boundary_state!( ::RusanovNumericalFlux, bctype, m::KinematicModel, state⁺, aux⁺, n, state⁻, aux⁻, t, args..., ) FT = eltype(state⁻) #state⁺.ρu -= 2 * dot(state⁻.ρu, n) .* SVector(n) #state⁺.ρq_rai = -state⁻.ρq_rai @inbounds state⁺.ρq_rai = FT(0) end @inline function wavespeed( m::KinematicModel, nM, state::Vars, aux::Vars, t::Real, _..., ) FT = eltype(state) @inbounds begin u = state.ρu / state.ρ q_rai::FT = state.ρq_rai / state.ρ rain_w = terminal_velocity(param_set, CM1M.RainType(), state.ρ, q_rai) nu = nM[1] * u[1] + nM[3] * max(u[3], rain_w, u[3] - rain_w) end return abs(nu) end @inline function flux_first_order!( m::KinematicModel, flux::Grad, state::Vars, aux::Vars, t::Real, _..., ) FT = eltype(state) @inbounds begin q_rai::FT = state.ρq_rai / state.ρ rain_w = terminal_velocity(param_set, CM1M.RainType(), state.ρ, q_rai) # advect moisture ... flux.ρq_tot = SVector( state.ρu[1] * state.ρq_tot / state.ρ, FT(0), state.ρu[3] * state.ρq_tot / state.ρ, ) flux.ρq_liq = SVector( state.ρu[1] * state.ρq_liq / state.ρ, FT(0), state.ρu[3] * state.ρq_liq / state.ρ, ) flux.ρq_ice = SVector( state.ρu[1] * state.ρq_ice / state.ρ, FT(0), state.ρu[3] * state.ρq_ice / state.ρ, ) flux.ρq_rai = SVector( state.ρu[1] * state.ρq_rai / state.ρ, FT(0), (state.ρu[3] / state.ρ - rain_w) * state.ρq_rai, ) # ... energy ... flux.ρe = SVector( state.ρu[1] / state.ρ * (state.ρe + aux.p), FT(0), state.ρu[3] / state.ρ * (state.ρe + aux.p), ) # ... and don't advect momentum (kinematic setup) end end function source!( m::KinematicModel, source::Vars, state::Vars, diffusive::Vars, aux::Vars, t::Real, direction, ) FT = eltype(state) _grav::FT = grav(param_set) _e_int_v0::FT = e_int_v0(param_set) _cv_v::FT = cv_v(param_set) _cv_d::FT = cv_d(param_set) _T_0::FT = T_0(param_set) @inbounds begin e_tot = state.ρe / state.ρ q_tot = state.ρq_tot / state.ρ q_liq = state.ρq_liq / state.ρ q_ice = state.ρq_ice / state.ρ q_rai = state.ρq_rai / state.ρ u = state.ρu[1] / state.ρ w = state.ρu[3] / state.ρ e_int = e_tot - 1 // 2 * (u^2 + w^2) - _grav * aux.z_coord q = PhasePartition(q_tot, q_liq, q_ice) T = air_temperature(param_set, e_int, q) # equilibrium state at current T ts_eq = PhaseEquil_ρTq(param_set, state.ρ, T, q_tot) q_eq = PhasePartition(ts_eq) # zero out the source terms source.ρq_tot = FT(0) source.ρq_liq = FT(0) source.ρq_ice = FT(0) source.ρq_rai = FT(0) source.ρe = FT(0) # cloud water and ice condensation/evaporation source.ρq_liq += state.ρ * conv_q_vap_to_q_liq_ice(param_set, CM1M.LiquidType(), q_eq, q) source.ρq_ice += state.ρ * conv_q_vap_to_q_liq_ice(param_set, CM1M.IceType(), q_eq, q) # tendencies from rain src_q_rai_acnv = conv_q_liq_to_q_rai(param_set, q_liq) src_q_rai_accr = accretion( param_set, CM1M.LiquidType(), CM1M.RainType(), q_liq, q_rai, state.ρ, ) src_q_rai_evap = evaporation_sublimation( param_set, CM1M.RainType(), q, q_rai, state.ρ, T, ) src_q_rai_tot = src_q_rai_acnv + src_q_rai_accr + src_q_rai_evap source.ρq_liq -= state.ρ * (src_q_rai_acnv + src_q_rai_accr) source.ρq_rai += state.ρ * src_q_rai_tot source.ρq_tot -= state.ρ * src_q_rai_tot source.ρe -= state.ρ * src_q_rai_tot * (_e_int_v0 - (_cv_v - _cv_d) * (T - _T_0)) end end function main() # Working precision FT = Float64 # DG polynomial order N = 4 # Domain resolution and size Δx = FT(20) Δy = FT(1) Δz = FT(20) resolution = (Δx, Δy, Δz) # Domain extents xmax = 1500 ymax = 10 zmax = 1500 # initial configuration wmax = FT(0.6) # max velocity of the eddy [m/s] θ_0 = FT(289) # init. theta value (const) [K] p_0 = FT(101500) # surface pressure [Pa] p_1000 = FT(100000) # reference pressure in theta definition [Pa] qt_0 = FT(7.5 * 1e-3) # init. total water specific humidity (const) [kg/kg] z_0 = FT(0) # surface height # time stepping t_ini = FT(0) t_end = FT(30 * 60) dt = FT(5) #CFL = FT(1.75) filter_freq = 1 output_freq = 72 interval = "9steps" # periodicity and boundary numbers periodicity_x = true periodicity_y = true periodicity_z = false idx_bc_left = 0 idx_bc_right = 0 idx_bc_front = 0 idx_bc_back = 0 idx_bc_bottom = 1 idx_bc_top = 2 driver_config, ode_solver_type = config_kinematic_eddy( FT, N, resolution, xmax, ymax, zmax, wmax, θ_0, p_0, p_1000, qt_0, z_0, periodicity_x, periodicity_y, periodicity_z, idx_bc_left, idx_bc_right, idx_bc_front, idx_bc_back, idx_bc_bottom, idx_bc_top, ) solver_config = ClimateMachine.SolverConfiguration( t_ini, t_end, driver_config; ode_solver_type = ode_solver_type, ode_dt = dt, init_on_cpu = true, #Courant_number = CFL, ) model = driver_config.bl mpicomm = MPI.COMM_WORLD # get state variables indices for filtering ρq_liq_ind = varsindex(vars_state(model, Prognostic(), FT), :ρq_liq) ρq_ice_ind = varsindex(vars_state(model, Prognostic(), FT), :ρq_ice) ρq_rai_ind = varsindex(vars_state(model, Prognostic(), FT), :ρq_rai) # get aux variables indices for testing q_tot_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_tot) q_vap_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_vap) q_liq_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_liq) q_ice_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_ice) q_rai_ind = varsindex(vars_state(model, Auxiliary(), FT), :q_rai) S_liq_ind = varsindex(vars_state(model, Auxiliary(), FT), :S_liq) rain_w_ind = varsindex(vars_state(model, Auxiliary(), FT), :rain_w) # filter out negative values cb_tmar_filter = GenericCallbacks.EveryXSimulationSteps(filter_freq) do (init = false) Filters.apply!( solver_config.Q, (:ρq_liq, :ρq_ice, :ρq_rai), solver_config.dg.grid, TMARFilter(), ) nothing end # output for paraview # initialize base output prefix directory from rank 0 vtkdir = abspath(joinpath(ClimateMachine.Settings.output_dir, "vtk")) if MPI.Comm_rank(mpicomm) == 0 mkpath(vtkdir) end MPI.Barrier(mpicomm) # vtk output vtkstep = [0] cb_vtk = GenericCallbacks.EveryXSimulationSteps(output_freq) do (init = false) out_dirname = @sprintf( "microphysics_test_3_mpirank%04d_step%04d", MPI.Comm_rank(mpicomm), vtkstep[1] ) out_path_prefix = joinpath(vtkdir, out_dirname) @info "doing VTK output" out_path_prefix writevtk( out_path_prefix, solver_config.Q, solver_config.dg, flattenednames(vars_state(model, Prognostic(), FT)), solver_config.dg.state_auxiliary, flattenednames(vars_state(model, Auxiliary(), FT)), ) vtkstep[1] += 1 nothing end # output for netcdf boundaries = [ FT(0) FT(0) FT(0) xmax ymax zmax ] interpol = ClimateMachine.InterpolationConfiguration( driver_config, boundaries, resolution, ) dgngrps = [ setup_dump_state_diagnostics( AtmosLESConfigType(), interval, driver_config.name, interpol = interpol, ), setup_dump_aux_diagnostics( AtmosLESConfigType(), interval, driver_config.name, interpol = interpol, ), ] dgn_config = ClimateMachine.DiagnosticsConfiguration(dgngrps) # call solve! function for time-integrator result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, user_callbacks = (cb_tmar_filter, cb_vtk), check_euclidean_distance = true, ) # supersaturation in the model max_S_liq = maximum(abs.(solver_config.dg.state_auxiliary[:, S_liq_ind, :])) @test max_S_liq < FT(0.25) @test max_S_liq > FT(0) # qt < reference number max_q_tot = maximum(abs.(solver_config.dg.state_auxiliary[:, q_tot_ind, :])) @test max_q_tot < FT(0.0077) # no ice max_q_ice = maximum(abs.(solver_config.dg.state_auxiliary[:, q_ice_ind, :])) @test isequal(max_q_ice, FT(0)) # q_liq ∈ reference range max_q_liq = maximum(solver_config.dg.state_auxiliary[:, q_liq_ind, :]) min_q_liq = minimum(solver_config.dg.state_auxiliary[:, q_liq_ind, :]) @test max_q_liq < FT(1e-3) @test abs(min_q_liq) < FT(1e-5) # q_rai ∈ reference range max_q_rai = maximum(solver_config.dg.state_auxiliary[:, q_rai_ind, :]) min_q_rai = minimum(solver_config.dg.state_auxiliary[:, q_rai_ind, :]) @test max_q_rai < FT(3e-5) @test abs(min_q_rai) < FT(7e-8) # terminal velocity ∈ reference range max_rain_w = maximum(solver_config.dg.state_auxiliary[:, rain_w_ind, :]) min_rain_w = minimum(solver_config.dg.state_auxiliary[:, rain_w_ind, :]) @test max_rain_w < FT(4) @test isequal(min_rain_w, FT(0)) end main() ================================================ FILE: test/Atmos/Parameterizations/Microphysics/KinematicModel.jl ================================================ # The set-up was designed for the # 8th International Cloud Modelling Workshop # ([Muhlbauer2013](@cite)) # # See chapter 2 in [Arabas2015](@cite) for setup details: using Dates using DocStringExtensions using LinearAlgebra using Logging using MPI using Printf using StaticArrays using Test using ClimateMachine ClimateMachine.init(diagnostics = "default") using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.Diagnostics using ClimateMachine.Grids using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Filters using ClimateMachine.Mesh.Topologies using Thermodynamics: gas_constants, PhaseEquil, PhaseEquil_ρeq, PhasePartition_equil, PhasePartition, internal_energy, q_vap_saturation, relative_humidity, PhaseEquil_ρTq, PhaseNonEquil_ρTq, air_temperature, latent_heat_fusion, Liquid, Ice, supersaturation, vapor_specific_humidity using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.VTK using CloudMicrophysics.Microphysics_0M using CloudMicrophysics.Microphysics_1M import CloudMicrophysics const CM1M = CloudMicrophysics.Microphysics_1M using CLIMAParameters using CLIMAParameters.Planet: R_d, cp_d, cv_d, cv_v, cv_l, cv_i, T_0, T_freeze, e_int_v0, e_int_i0, grav using CLIMAParameters.Atmos.Microphysics struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, Hyperdiffusive import ClimateMachine.BalanceLaws: vars_state, init_state_prognostic!, init_state_auxiliary!, nodal_init_state_auxiliary!, nodal_update_auxiliary_state!, flux_first_order!, flux_second_order!, wavespeed, parameter_set, boundary_conditions, boundary_state!, source! import ClimateMachine.DGMethods: DGModel using ClimateMachine.Mesh.Geometry: LocalGeometry struct KinematicModelConfig{FT} xmax::FT ymax::FT zmax::FT wmax::FT θ_0::FT p_0::FT p_1000::FT qt_0::FT z_0::FT periodicity_x::Bool periodicity_y::Bool periodicity_z::Bool idx_bc_left::Int idx_bc_right::Int idx_bc_front::Int idx_bc_back::Int idx_bc_bottom::Int idx_bc_top::Int end struct KinematicModel{FT, PS, O, M, P, S, BC, IS, DC} <: BalanceLaw param_set::PS orientation::O moisture::M precipitation::P source::S boundarycondition::BC init_state_prognostic::IS data_config::DC end parameter_set(m::KinematicModel) = m.param_set function KinematicModel{FT}( ::Type{AtmosLESConfigType}, param_set::AbstractParameterSet; orientation::O = FlatOrientation(), moisture::M = nothing, precipitation::P = nothing, source::S = nothing, boundarycondition::BC = nothing, init_state_prognostic::IS = nothing, data_config::DC = nothing, ) where {FT <: AbstractFloat, O, M, P, S, BC, IS, DC} @assert param_set ≠ nothing @assert init_state_prognostic ≠ nothing atmos = ( param_set, orientation, moisture, precipitation, source, boundarycondition, init_state_prognostic, data_config, ) return KinematicModel{FT, typeof.(atmos)...}(atmos...) end vars_state(m::KinematicModel, ::Gradient, FT) = @vars() vars_state(m::KinematicModel, ::GradientFlux, FT) = @vars() function nodal_init_state_auxiliary!( m::KinematicModel, aux::Vars, tmp::Vars, geom::LocalGeometry, ) FT = eltype(aux) x, y, z = geom.coord dc = m.data_config param_set = parameter_set(m) _R_d::FT = R_d(param_set) _cp_d::FT = cp_d(param_set) _grav::FT = grav(param_set) # TODO - should R_d and cp_d here be R_m and cp_m? R_m, cp_m, cv_m, γ = gas_constants(param_set, PhasePartition(dc.qt_0)) # Pressure profile assuming hydrostatic and constant θ and qt profiles. # It is done this way to be consistent with Arabas paper. # It's not necessarily the best way to initialize with our model variables. p = dc.p_1000 * ( (dc.p_0 / dc.p_1000)^(_R_d / _cp_d) - _R_d / _cp_d * _grav / dc.θ_0 / R_m * (z - dc.z_0) )^(_cp_d / _R_d) @inbounds begin aux.p = p aux.x_coord = x aux.z_coord = z end end function init_state_prognostic!( m::KinematicModel, state::Vars, aux::Vars, localgeo, t, args..., ) m.init_state_prognostic(m, state, aux, localgeo, t, args...) end boundary_conditions(::KinematicModel) = (1, 2, 3, 4, 5, 6) function boundary_state!( ::CentralNumericalFluxSecondOrder, bctype, m::KinematicModel, state⁺, aux⁺, n, state⁻, aux⁻, t, args..., ) end @inline function flux_second_order!( m::KinematicModel, flux::Grad, state::Vars, diffusive::Vars, hyperdiffusive::Vars, aux::Vars, t::Real, ) end @inline function flux_second_order!( m::KinematicModel, flux::Grad, state::Vars, τ, d_h_tot, ) end function config_kinematic_eddy( FT, N, resolution, xmax, ymax, zmax, wmax, θ_0, p_0, p_1000, qt_0, z_0, periodicity_x, periodicity_y, periodicity_z, idx_bc_left, idx_bc_right, idx_bc_front, idx_bc_back, idx_bc_bottom, idx_bc_top, ) # Choose explicit solver ode_solver = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) kmc = KinematicModelConfig( FT(xmax), FT(ymax), FT(zmax), FT(wmax), FT(θ_0), FT(p_0), FT(p_1000), FT(qt_0), FT(z_0), Bool(periodicity_x), Bool(periodicity_y), Bool(periodicity_z), Int(idx_bc_left), Int(idx_bc_right), Int(idx_bc_front), Int(idx_bc_back), Int(idx_bc_bottom), Int(idx_bc_top), ) # Set up the model model = KinematicModel{FT}( AtmosLESConfigType, param_set; init_state_prognostic = init_kinematic_eddy!, data_config = kmc, ) config = ClimateMachine.AtmosLESConfiguration( "KinematicModel", N, resolution, FT(xmax), FT(ymax), FT(zmax), param_set, init_kinematic_eddy!, boundary = ( (Int(idx_bc_left), Int(idx_bc_right)), (Int(idx_bc_front), Int(idx_bc_back)), (Int(idx_bc_bottom), Int(idx_bc_top)), ), periodicity = ( Bool(periodicity_x), Bool(periodicity_y), Bool(periodicity_z), ), xmin = FT(0), ymin = FT(0), zmin = FT(0), model = model, ) return config, ode_solver end ================================================ FILE: test/Atmos/prog_prim_conversion/runtests.jl ================================================ module TestPrimitivePrognosticConversion using CLIMAParameters using CLIMAParameters.Planet using Test using StaticArrays using UnPack using ClimateMachine ClimateMachine.init() const ArrayType = ClimateMachine.array_type() const clima_dir = dirname(dirname(pathof(ClimateMachine))) using ClimateMachine.BalanceLaws using ClimateMachine.ConfigTypes using ClimateMachine.VariableTemplates using Thermodynamics using Thermodynamics.TemperatureProfiles using Thermodynamics.TestedProfiles using ClimateMachine.Atmos using ClimateMachine.BalanceLaws: prognostic_to_primitive!, primitive_to_prognostic! const BL = BalanceLaws struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() atol_temperature = 5e-1 atol_energy = cv_d(param_set) * atol_temperature import ClimateMachine.BalanceLaws: vars_state # Assign aux.ref_state different than state for general testing function assign!(aux, bl, nt, st::Auxiliary) @unpack p, ρ, e_pot, = nt aux.ref_state.ρ = ρ / 2 aux.ref_state.p = p / 2 aux.orientation.Φ = e_pot end function assign!(state, bl, nt, st::Prognostic, aux) @unpack u, v, w, ρ, e_kin, e_pot, T, q_pt = nt state.ρ = ρ assign!(state, bl, compressibility_model(bl), nt, st, aux) state.ρu = SVector(state.ρ * u, state.ρ * v, state.ρ * w) param_set = parameter_set(bl) state.energy.ρe = state.ρ * total_energy(param_set, e_kin, e_pot, T, q_pt) assign!(state, bl, moisture_model(bl), nt, st) end assign!(state, bl, moisture::DryModel, nt, ::Prognostic) = nothing assign!(state, bl, moisture::EquilMoist, nt, ::Prognostic) = (state.moisture.ρq_tot = state.ρ * nt.q_pt.tot) function assign!(state, bl, moisture::NonEquilMoist, nt, ::Prognostic) state.moisture.ρq_tot = state.ρ * nt.q_pt.tot state.moisture.ρq_liq = state.ρ * nt.q_pt.liq state.moisture.ρq_ice = state.ρ * nt.q_pt.ice end # Assign prog.ρ = aux.ref_state.ρ in anelastic1D assign!(state, bl, ::Compressible, nt, ::Prognostic, aux) = nothing function assign!(state, bl, ::Anelastic1D, nt, ::Prognostic, aux) state.ρ = aux.ref_state.ρ end function assign!(state, bl, nt, st::Primitive, aux) @unpack u, v, w, ρ, p = nt state.ρ = ρ state.u = SVector(u, v, w) state.p = p assign!(state, bl, compressibility_model(bl), nt, st, aux) assign!(state, bl, moisture_model(bl), nt, st) end assign!(state, bl, moisture::DryModel, nt, ::Primitive) = nothing assign!(state, bl, moisture::EquilMoist, nt, ::Primitive) = (state.moisture.q_tot = nt.q_pt.tot) function assign!(state, bl, moisture::NonEquilMoist, nt, ::Primitive) state.moisture.q_tot = nt.q_pt.tot state.moisture.q_liq = nt.q_pt.liq state.moisture.q_ice = nt.q_pt.ice end # Assign prim.p = aux.ref_state.p in anelastic1D assign!(state, bl, ::Compressible, nt, ::Primitive, aux) = nothing function assign!(state, bl, ::Anelastic1D, nt, ::Primitive, aux) state.p = aux.ref_state.p end @testset "Prognostic-Primitive conversion (dry)" begin FT = Float64 compressibility = (Anelastic1D(), Compressible()) for comp in compressibility physics = AtmosPhysics{FT}( param_set; moisture = DryModel(), compressibility = comp, ) bl = AtmosModel{FT}( AtmosLESConfigType, physics; init_state_prognostic = x -> x, ) vs_prog = vars_state(bl, Prognostic(), FT) vs_prim = vars_state(bl, Primitive(), FT) vs_aux = vars_state(bl, Auxiliary(), FT) prog_arr = zeros(varsize(vs_prog)) prim_arr = zeros(varsize(vs_prim)) aux_arr = zeros(varsize(vs_aux)) prog = Vars{vs_prog}(prog_arr) prim = Vars{vs_prim}(prim_arr) aux = Vars{vs_aux}(aux_arr) for nt in TestedProfiles.PhaseDryProfiles(param_set, ArrayType) assign!(aux, bl, nt, Auxiliary()) # Test prognostic_to_primitive! identity assign!(prog, bl, nt, Prognostic(), aux) prog_0 = deepcopy(parent(prog)) prim_arr .= 0 prognostic_to_primitive!(bl, prim, prog, aux) @test !all(parent(prim) .≈ parent(prog)) # ensure not calling fallback primitive_to_prognostic!(bl, prog, prim, aux) @test all(parent(prog) .≈ prog_0) # Test primitive_to_prognostic! identity assign!(prim, bl, nt, Primitive(), aux) prim_0 = deepcopy(parent(prim)) prog_arr .= 0 primitive_to_prognostic!(bl, prog, prim, aux) @test !all(parent(prim) .≈ parent(prog)) # ensure not calling fallback prognostic_to_primitive!(bl, prim, prog, aux) @test all(parent(prim) .≈ prim_0) end end end @testset "Prognostic-Primitive conversion (EquilMoist)" begin FT = Float64 compressibility = (Compressible(),) # Anelastic1D() does not converge for comp in compressibility physics = AtmosPhysics{FT}( param_set; moisture = EquilMoist(; maxiter = 5), # maxiter=3 does not converge compressibility = comp, ) bl = AtmosModel{FT}( AtmosLESConfigType, physics; init_state_prognostic = x -> x, ) vs_prog = vars_state(bl, Prognostic(), FT) vs_prim = vars_state(bl, Primitive(), FT) vs_aux = vars_state(bl, Auxiliary(), FT) prog_arr = zeros(varsize(vs_prog)) prim_arr = zeros(varsize(vs_prim)) aux_arr = zeros(varsize(vs_aux)) prog = Vars{vs_prog}(prog_arr) prim = Vars{vs_prim}(prim_arr) aux = Vars{vs_aux}(aux_arr) err_max_fwd = 0 err_max_bwd = 0 for nt in TestedProfiles.PhaseEquilProfiles(param_set, ArrayType) assign!(aux, bl, nt, Auxiliary()) # Test prognostic_to_primitive! identity assign!(prog, bl, nt, Prognostic(), aux) prog_0 = deepcopy(parent(prog)) prim_arr .= 0 prognostic_to_primitive!(bl, prim, prog, aux) @test !all(parent(prim) .≈ parent(prog)) # ensure not calling fallback primitive_to_prognostic!(bl, prog, prim, aux) @test all(parent(prog)[1:4] .≈ prog_0[1:4]) @test isapprox(parent(prog)[5], prog_0[5]; atol = atol_energy) # @test all(parent(prog)[5] .≈ prog_0[5]) # fails @test all(parent(prog)[6] .≈ prog_0[6]) err_max_fwd = max(abs(parent(prog)[5] .- prog_0[5]), err_max_fwd) # Test primitive_to_prognostic! identity assign!(prim, bl, nt, Primitive(), aux) prim_0 = deepcopy(parent(prim)) prog_arr .= 0 primitive_to_prognostic!(bl, prog, prim, aux) @test !all(parent(prim) .≈ parent(prog)) # ensure not calling fallback prognostic_to_primitive!(bl, prim, prog, aux) @test all(parent(prim)[1:4] .≈ prim_0[1:4]) # @test all(parent(prim)[5] .≈ prim_0[5]) # fails @test isapprox(parent(prim)[5], prim_0[5]; atol = atol_energy) @test all(parent(prim)[6] .≈ prim_0[6]) err_max_bwd = max(abs(parent(prim)[5] .- prim_0[5]), err_max_bwd) end end # We may want/need to improve this later, so leaving debug info: # @show err_max_fwd # @show err_max_bwd end @testset "Prognostic-Primitive conversion (NonEquilMoist)" begin FT = Float64 compressibility = (Anelastic1D(), Compressible()) for comp in compressibility physics = AtmosPhysics{FT}( param_set; moisture = NonEquilMoist(), compressibility = comp, ) bl = AtmosModel{FT}( AtmosLESConfigType, physics; init_state_prognostic = x -> x, ) vs_prog = vars_state(bl, Prognostic(), FT) vs_prim = vars_state(bl, Primitive(), FT) vs_aux = vars_state(bl, Auxiliary(), FT) prog_arr = zeros(varsize(vs_prog)) prim_arr = zeros(varsize(vs_prim)) aux_arr = zeros(varsize(vs_aux)) prog = Vars{vs_prog}(prog_arr) prim = Vars{vs_prim}(prim_arr) aux = Vars{vs_aux}(aux_arr) for nt in TestedProfiles.PhaseEquilProfiles(param_set, ArrayType) assign!(aux, bl, nt, Auxiliary()) # Test prognostic_to_primitive! identity assign!(prog, bl, nt, Prognostic(), aux) prog_0 = deepcopy(parent(prog)) prim_arr .= 0 prognostic_to_primitive!(bl, prim, prog, aux) @test !all(parent(prim) .≈ parent(prog)) # ensure not calling fallback primitive_to_prognostic!(bl, prog, prim, aux) @test all(parent(prog) .≈ prog_0) # Test primitive_to_prognostic! identity assign!(prim, bl, nt, Primitive(), aux) prim_0 = deepcopy(parent(prim)) prog_arr .= 0 primitive_to_prognostic!(bl, prog, prim, aux) @test !all(parent(prim) .≈ parent(prog)) # ensure not calling fallback prognostic_to_primitive!(bl, prim, prog, aux) @test all(parent(prim) .≈ prim_0) end end end @testset "Prognostic-Primitive conversion (array interface)" begin FT = Float64 physics = AtmosPhysics{FT}(param_set; moisture = DryModel()) bl = AtmosModel{FT}( AtmosLESConfigType, physics; init_state_prognostic = x -> x, ) vs_prog = vars_state(bl, Prognostic(), FT) vs_prim = vars_state(bl, Primitive(), FT) vs_aux = vars_state(bl, Auxiliary(), FT) prog_arr = zeros(varsize(vs_prog)) prim_arr = zeros(varsize(vs_prim)) aux_arr = zeros(varsize(vs_aux)) prog = Vars{vs_prog}(prog_arr) prim = Vars{vs_prim}(prim_arr) aux = Vars{vs_aux}(aux_arr) for nt in TestedProfiles.PhaseDryProfiles(param_set, ArrayType) assign!(aux, bl, nt, Auxiliary()) # Test prognostic_to_primitive! identity assign!(prog, bl, nt, Prognostic(), aux) prog_0 = deepcopy(parent(prog)) prim_arr .= 0 BL.prognostic_to_primitive!(bl, prim_arr, prog_arr, aux_arr) @test !all(prog_arr .≈ prim_arr) # ensure not calling fallback BL.primitive_to_prognostic!(bl, prog_arr, prim_arr, aux_arr) @test all(parent(prog) .≈ prog_0) # Test primitive_to_prognostic! identity assign!(prim, bl, nt, Primitive(), aux) prim_0 = deepcopy(parent(prim)) prog_arr .= 0 BL.primitive_to_prognostic!(bl, prog_arr, prim_arr, aux_arr) @test !all(prog_arr .≈ prim_arr) # ensure not calling fallback BL.prognostic_to_primitive!(bl, prim_arr, prog_arr, aux_arr) @test all(parent(prim) .≈ prim_0) end end end ================================================ FILE: test/Atmos/runtests.jl ================================================ using Test, Pkg @testset "Atmos" begin all_tests = isempty(ARGS) || "all" in ARGS ? true : false for submodule in ["Model", "prog_prim_conversion"] if all_tests || "$submodule" in ARGS || "Atmos/$submodule" in ARGS || "Atmos" in ARGS include_test(submodule) end end end ================================================ FILE: test/BalanceLaws/runtests.jl ================================================ using Test using Random using StaticArrays: SVector Random.seed!(1234) using ClimateMachine.VariableTemplates: @vars, varsize, Vars using ClimateMachine.BalanceLaws const BL = BalanceLaws import ClimateMachine.BalanceLaws: vars_state, eq_tends, prognostic_vars struct TestBL <: BalanceLaw end struct X <: AbstractPrognosticVariable end struct Y <: AbstractPrognosticVariable end struct F1 <: TendencyDef{Flux{FirstOrder}} end struct F2 <: TendencyDef{Flux{SecondOrder}} end struct S <: TendencyDef{Source} end prognostic_vars(::TestBL) = (X(), Y()) eq_tends(::X, ::TestBL, ::Flux{FirstOrder}) = (F1(),) eq_tends(::Y, ::TestBL, ::Flux{FirstOrder}) = (F1(),) eq_tends(::X, ::TestBL, ::Flux{SecondOrder}) = (F2(),) eq_tends(::Y, ::TestBL, ::Flux{SecondOrder}) = (F2(),) eq_tends(::X, ::TestBL, ::Source) = (S(),) eq_tends(::Y, ::TestBL, ::Source) = (S(),) @testset "BalanceLaws" begin bl = TestBL() @test prognostic_vars(bl) == (X(), Y()) show_tendencies(bl) show_tendencies(bl; include_module = true) show_tendencies(bl; table_complete = true) end vars_state(bl::TestBL, st::Prognostic, FT) = @vars begin ρ::FT ρu::SVector{3, FT} end vars_state(bl::TestBL, st::Auxiliary, FT) = @vars() @testset "Prognostic-Primitive conversion (identity)" begin FT = Float64 bl = TestBL() vs_prog = vars_state(bl, Prognostic(), FT) vs_prim = vars_state(bl, Primitive(), FT) vs_aux = vars_state(bl, Auxiliary(), FT) prim_arr = zeros(varsize(vs_prim)) prog_arr = zeros(varsize(vs_prog)) aux_arr = zeros(varsize(vs_aux)) # Test prognostic_to_primitive! identity prog_arr .= rand(varsize(vs_prog)) prog_0 = deepcopy(prog_arr) prim_arr .= 0 BL.prognostic_to_primitive!(bl, prim_arr, prog_arr, aux_arr) BL.primitive_to_prognostic!(bl, prog_arr, prim_arr, aux_arr) @test all(prog_arr .≈ prog_0) # Test primitive_to_prognostic! identity prim_arr .= rand(varsize(vs_prim)) prim_0 = deepcopy(prim_arr) prog_arr .= 0 BL.primitive_to_prognostic!(bl, prog_arr, prim_arr, aux_arr) BL.prognostic_to_primitive!(bl, prim_arr, prog_arr, aux_arr) @test all(prim_arr .≈ prim_0) end ================================================ FILE: test/Common/CartesianDomains/runtests.jl ================================================ using ClimateMachine ClimateMachine.init() using ClimateMachine.CartesianDomains @testset "Cartesian domains" begin for FT in (Float64, Float32) domain = RectangularDomain( FT, Ne = (16, 24, 1), Np = 4, x = (0, π), y = (0, 1.1), z = (-1, 0), periodicity = (false, false, false), ) @test eltype(domain) == FT @test domain.Ne == (x = 16, y = 24, z = 1) @test domain.Np == 4 @test domain.L.x == FT(π) @test domain.L.y == FT(1.1) @test domain.L.z == FT(1) end end ================================================ FILE: test/Common/CartesianFields/runtests.jl ================================================ using ClimateMachine ClimateMachine.init() using ClimateMachine.CartesianFields: RectangularElement, assemble @testset "Cartesian fields" begin Nx = 3 Ny = 4 Nz = 5 data = rand(Nx, Ny, Nz) x = repeat(range(0.0, 1.0, length = Nx), 1, Ny, Nz) y = repeat(range(0.0, 1.1, length = Ny), Nx, 1, Nz) z = repeat(range(0.0, 1.2, length = Nz), Nx, Ny, 1) element = RectangularElement(data, x, y, z) @test size(element) == (Nx, Ny, Nz) @test maximum(element) == maximum(data) @test minimum(element) == minimum(data) @test maximum(abs, element) == maximum(abs, data) @test minimum(abs, element) == minimum(abs, data) @test element[1, 1, 1] == data[1, 1, 1] east_element = RectangularElement(2 .* data, x .+ x[end, 1, 1], y, z) north_element = RectangularElement(3 .* data, x, y .+ y[1, end, 1], z) top_element = RectangularElement(4 .* data, x, y, z .+ z[1, 1, end]) west_east = assemble(Val(1), element, east_element) south_north = assemble(Val(2), element, north_element) bottom_top = assemble(Val(3), element, top_element) @test west_east.x[1, 1, 1] == x[1, 1, 1] @test west_east.x[end, 1, 1] == 2 * x[end, 1, 1] @test south_north.y[1, 1, 1] == y[1, 1, 1] @test south_north.y[1, end, 1] == 2 * y[1, end, 1] @test bottom_top.z[1, 1, 1] == z[1, 1, 1] @test bottom_top.z[1, 1, end] == 2 * z[1, 1, end] northeast_element = RectangularElement(5 .* data, x .+ x[end, 1, 1], y .+ y[1, end, 1], z) # Remember that matrix literals are transposed, so "northeast" # is the bottom right corner (for example). four_elements = [ element north_element east_element northeast_element ] four_elements = reshape(four_elements, 2, 2, 1) four_way = assemble(four_elements) @test four_way.x[end, 1, 1] == west_east.x[end, 1, 1] @test four_way.y[1, end, 1] == south_north.y[1, end, 1] @test four_way.z[1, 1, end] == z[1, 1, end] end ================================================ FILE: test/Common/Spectra/gcm_standalone_visual_test.jl ================================================ # Standalone test file that tests spectra visually #using Plots # uncomment when using the plotting code below using ClimateMachine.ConfigTypes using ClimateMachine.Spectra: compute_gaussian!, compute_legendre!, SpectralSphericalMesh, trans_grid_to_spherical!, power_spectrum_1d, power_spectrum_2d, compute_wave_numbers using FFTW include("spherical_helper_test.jl") FT = Float64 # -- TEST 1: power_spectrum_1d(AtmosGCMConfigType(), var_grid, z, lat, lon, weight) nlats = 32 # Setup grid sinθ, wts = compute_gaussian!(nlats) yarray = asin.(sinθ) .* 180 / π xarray = 180.0 ./ nlats * collect(FT, 1:1:(2nlats))[:] .- 180.0 z = 1 # Setup variable mass_weight = ones(Float64, length(z)); var_grid = 1.0 * reshape( sin.(xarray / xarray[end] * 5.0 * 2π) .* (yarray .* 0.0 .+ 1.0)', length(xarray), length(yarray), 1, ) + 1.0 * reshape( sin.(xarray / xarray[end] * 10.0 * 2π) .* (yarray .* 0.0 .+ 1.0)', length(xarray), length(yarray), 1, ) nm_spectrum, wave_numbers = power_spectrum_1d( AtmosGCMConfigType(), var_grid, z, yarray, xarray, mass_weight, ); # Check visually plot(wave_numbers[:, 16, 1], nm_spectrum[:, 16, 1], xlims = (0, 20)) contourf(var_grid[:, :, 1]) contourf(nm_spectrum[2:20, :, 1]) # -- TEST 2: power_spectrum_gcm_2d # Setup grid sinθ, wts = compute_gaussian!(nlats) yarray = asin.(sinθ) .* 180 / π xarray = 180.0 ./ nlats * collect(FT, 1:1:(2nlats))[:] .- 180.0 z = 1 # Setup variable: use an example analytical P_nm function P_32 = sqrt(105 / 8) * (sinθ .- sinθ .^ 3) var_grid = 1.0 * reshape( sin.(xarray / xarray[end] * 3.0 * π) .* P_32', length(xarray), length(yarray), 1, ) mass_weight = ones(Float64, z); spectrum, wave_numbers, spherical, mesh = power_spectrum_2d(AtmosGCMConfigType(), var_grid, mass_weight) # Grid to spherical to grid reconstruction reconstruction = trans_spherical_to_grid!(mesh, spherical) # Check visually contourf(var_grid[:, :, 1]) contourf(reconstruction[:, :, 1]) contourf(var_grid[:, :, 1] .- reconstruction[:, :, 1]) # Spectrum contourf( collect(0:1:(mesh.num_fourier - 1))[:], collect(0:1:(mesh.num_spherical - 1))[:], (spectrum[:, :, 1])', xlabel = "m", ylabel = "n", ) # Check magnitude println(0.5 .* sum(spectrum)) dθ = π / length(wts) cosθ = sqrt.(1 .- sinθ .^ 2) area_factor = reshape(cosθ .* dθ .^ 2 / 4π, (1, length(cosθ))) println(sum(0.5 .* var_grid[:, :, 1] .^ 2 .* area_factor)) # NB: can verify against published packages, e.g., https://github.com/jswhit/pyspharm ================================================ FILE: test/Common/Spectra/runtests.jl ================================================ module TestSpectra using Test using FFTW using ClimateMachine.ConfigTypes using ClimateMachine.Spectra using ClimateMachine.Spectra: compute_gaussian!, compute_legendre!, SpectralSphericalMesh, trans_grid_to_spherical!, compute_wave_numbers include("spherical_helper_test.jl") @testset "power_spectrum_1d (GCM)" begin FT = Float64 # -- TEST 1: power_spectrum_1d(AtmosGCMConfigType(), var_grid, z, lat, lon, weight) nlats = 32 # Setup grid sinθ, wts = compute_gaussian!(nlats) yarray = asin.(sinθ) .* 180 / π xarray = 180.0 ./ nlats * collect(FT, 1:1:(2nlats))[:] .- 180.0 z = 1 # Setup variable mass_weight = ones(Float64, length(z)) var_grid = 1.0 * reshape( sin.(xarray / xarray[end] * 5.0 * 2π) .* (yarray .* 0.0 .+ 1.0)', length(xarray), length(yarray), 1, ) + 1.0 * reshape( sin.(xarray / xarray[end] * 10.0 * 2π) .* (yarray .* 0.0 .+ 1.0)', length(xarray), length(yarray), 1, ) nm_spectrum, wave_numbers = power_spectrum_1d( AtmosGCMConfigType(), var_grid, z, yarray, xarray, mass_weight, ) nm_spectrum_ = nm_spectrum[:, 10, 1] var_grid_ = var_grid[:, 10, 1] sum_spec = sum(nm_spectrum_) sum_grid = sum(var_grid_ .^ 2) / length(var_grid_) sum_res = (sum_spec - sum_grid) / sum_grid @test sum_res < 0.1 end @testset "power_spectrum_2d (GCM)" begin # -- TEST 2: power_spectrum_2d # Setup grid FT = Float64 nlats = 32 sinθ, wts = compute_gaussian!(nlats) cosθ = sqrt.(1 .- sinθ .^ 2) yarray = asin.(sinθ) .* 180 / π xarray = 180.0 ./ nlats * collect(FT, 1:1:(2nlats))[:] .- 180.0 z = 1 # Setup variable: use an example analytical P_nm function P_32 = sqrt(105 / 8) * (sinθ .- sinθ .^ 3) var_grid = 1.0 * reshape( sin.(xarray / xarray[end] * 3.0 * π) .* P_32', length(xarray), length(yarray), 1, ) mass_weight = ones(Float64, z) spectrum, wave_numbers, spherical, mesh = power_spectrum_2d(AtmosGCMConfigType(), var_grid, mass_weight) # Grid to spherical to grid reconstruction reconstruction = trans_spherical_to_grid!(mesh, spherical) sum_spec = sum((0.5 * spectrum)) dθ = π / length(wts) area_factor = reshape(cosθ .* dθ .^ 2 / 4π, (1, length(cosθ))) sum_grid = sum(0.5 .* var_grid[:, :, 1] .^ 2 .* area_factor) # scaled to average over Earth's area (units: m2/s2) sum_reco = sum(0.5 .* reconstruction[:, :, 1] .^ 2 .* area_factor) sum_res_1 = (sum_spec - sum_grid) / sum_grid sum_res_2 = (sum_reco - sum_grid) / sum_grid @test abs(sum_res_1) < 0.1 @test abs(sum_res_2) < 0.1 end end ================================================ FILE: test/Common/Spectra/spherical_helper_test.jl ================================================ # additional helper functions for spherical harmonics spectra """ TransSphericalToGrid!(mesh, snm ) Transforms a variable expressed in spherical harmonics (var_spherical[num_fourier+1, num_spherical+1]) onto a Gaussian grid (pfield[nλ, nθ]) [THIS IS USED FOR TESTING ONLY] With F_{m,n} = (-1)^m F_{-m,n}* P_{m,n} = (-1)^m P_{-m,n} F(λ, η) = ∑_{m= -N}^{N} ∑_{n=|m|}^{N} F_{m,n} P_{m,n}(η) e^{imλ} = ∑_{m= 0}^{N} ∑_{n=m}^{N} F_{m,n} P_{m,n} e^{imλ} + ∑_{m= 1}^{N} ∑_{n=m}^{N} F_{-m,n} P_{-m,n} e^{-imλ} Here η = sinθ, N = num_fourier, and denote ! extra coeffients in snm n > N are not used. ∑_{n=m}^{N} F_{m,n} P_{m,n} = g_{m}(η) m = 1, ... N ∑_{n=m}^{N} F_{m,n} P_{m,n}/2.0 = g_{m}(η) m = 0 We have F(λ, η) = ∑_{m= 0}^{N} g_{m}(η) e^{imλ} + ∑_{m= 0}^{N} g_{m}(η)* e^{-imλ} = 2real{ ∑_{m= 0}^{N} g_{m}(η) e^{imλ} } snm = F_{m,n} # Complex{Float64} [num_fourier+1, num_spherical+1] qnm = P_{m,n,η} # Float64[num_fourier+1, num_spherical+1, nθ] fourier_g = g_{m, η} # Complex{Float64} nλ×nθ with padded 0s fourier_g[num_fourier+2, :] == 0.0 pfiled = F(λ, η) # Float64[nλ, nθ] ! use all spherical harmonic modes # Arguments - mesh: struct with mesh information - snm: spherical variable # References - Ehrendorfer, M., Spectral Numerical Weather Prediction Models, Appendix B, Society for Industrial and Applied Mathematics, 2011 """ function trans_spherical_to_grid!(mesh, snm) num_fourier, num_spherical = mesh.num_fourier, mesh.num_spherical nλ, nθ, nd = mesh.nλ, mesh.nθ, mesh.nd qnm = mesh.qnm fourier_g = mesh.var_fourier .* 0.0 fourier_s = mesh.var_fourier .* 0.0 @assert(nθ % 2 == 0) nθ_half = div(nθ, 2) for m in 1:(num_fourier + 1) for n in m:num_spherical snm_t = transpose(snm[m, n, :, 1:nθ_half]) #snm[m,n, :] is complex number if (n - m) % 2 == 0 fourier_s[m, 1:nθ_half, :] .+= qnm[m, n, 1:nθ_half] .* sum(snm_t, dims = 1) #even function part else fourier_s[m, (nθ_half + 1):nθ, :] .+= qnm[m, n, 1:nθ_half] .* sum(snm_t, dims = 1) #odd function part end end end fourier_g[:, 1:nθ_half, :] .= fourier_s[:, 1:nθ_half, :] .+ fourier_s[:, (nθ_half + 1):nθ, :] fourier_g[:, nθ:-1:(nθ_half + 1), :] .= fourier_s[:, 1:nθ_half, :] .- fourier_s[:, (nθ_half + 1):nθ, :] # this got ignored... fourier_g[1, :, :] ./= 2.0 pfield = zeros(Float64, nλ, nθ, nd) for j in 1:nθ pfield[:, j, :] .= 2.0 * nλ * real.(ifft(fourier_g[:, j, :], 1)) #fourier for the first dimension end return pfield end ================================================ FILE: test/Common/runtests.jl ================================================ using Test, Pkg @testset "Common" begin all_tests = isempty(ARGS) || "all" in ARGS ? true : false for submodule in ["CartesianDomains", "CartesianFields"] if all_tests || "$submodule" in ARGS || "Common/$submodule" in ARGS || "Common" in ARGS include_test(submodule) end end end ================================================ FILE: test/Diagnostics/Debug/test_statecheck.jl ================================================ using Test # # State debug statistics # # Set up a basic environment using MPI using StaticArrays using Random using ClimateMachine using ClimateMachine.VariableTemplates using ClimateMachine.MPIStateArrays using ClimateMachine.StateCheck using ClimateMachine.GenericCallbacks @testset "$(@__FILE__)" begin ClimateMachine.init() FT = Float64 # Define some dummy vector and tensor abstract variables with associated types # and dimensions F1 = @vars begin ν∇u::SMatrix{3, 2, FT, 6} κ∇θ::SVector{3, FT} end F2 = @vars begin u::SVector{2, FT} θ::SVector{1, FT} end # Create ```MPIStateArray``` variables with arrays to hold elements of the # vectors and tensors Q1 = MPIStateArray{Float32, F1}( MPI.COMM_WORLD, ClimateMachine.array_type(), 4, 9, 8, ) Q2 = MPIStateArray{Float64, F2}( MPI.COMM_WORLD, ClimateMachine.array_type(), 4, 3, 8, ) # ### Create a call-back cb = ClimateMachine.StateCheck.sccreate( [(Q1, "My gradients"), (Q2, "My fields")], 1; prec = 15, ) # ### Invoke the call-back # Compare on local "realdata", fill via broadcast to keep GPU happy. Q1.data = rand(MersenneTwister(0), Float32, size(Q1.data)) Q2.data = rand(MersenneTwister(0), Float64, size(Q2.data)) Q1.realdata .= Q1.data Q2.realdata .= Q2.data GenericCallbacks.init!(cb, nothing, nothing, nothing, nothing) GenericCallbacks.call!(cb, nothing, nothing, nothing, nothing) # ### Check with reference values include("test_statecheck_refvals.jl") # This should return true (MPI rank != 0 always returns true) test_stat_01 = ClimateMachine.StateCheck.scdocheck(cb, (varr, parr)) # This should return false (MPI rank != 0 always returns true) varr[1][3] = varr[1][3] * 10.0 test_stat_02 = ClimateMachine.StateCheck.scdocheck(cb, (varr, parr)) if MPI.Comm_rank(MPI.COMM_WORLD) != 0 test_stat_02 = false end test_stat = test_stat_01 && !test_stat_02 @test test_stat end ================================================ FILE: test/Diagnostics/Debug/test_statecheck_refvals.jl ================================================ # # Example of a set of reference values that StateCheck will use to test current vlaues against. # These are generated by the function scprintref() and parsed by the scdocheck() # #! format: off varr = [ [ "My gradients", "ν∇u[1]", 1.34348869323730468750e-04, 9.84732866287231445313e-01, 5.23545503616333007813e-01, 3.08209930764271777814e-01 ], [ "My gradients", "ν∇u[2]", 1.16317868232727050781e-01, 9.92088317871093750000e-01, 4.83800649642944335938e-01, 2.83350456014221541157e-01 ], [ "My gradients", "ν∇u[3]", 1.05845928192138671875e-03, 9.51775908470153808594e-01, 4.65474426746368408203e-01, 2.73615551085745090099e-01 ], [ "My gradients", "ν∇u[4]", 5.97668886184692382813e-02, 9.68048095703125000000e-01, 5.42618036270141601563e-01, 2.81570862027933854765e-01 ], [ "My gradients", "ν∇u[5]", 8.31030607223510742188e-02, 9.35931921005249023438e-01, 5.05405902862548828125e-01, 2.46073509972619536290e-01 ], [ "My gradients", "ν∇u[6]", 3.09681892395019531250e-02, 9.98341441154479980469e-01, 4.54375565052032470703e-01, 3.09461067853178561915e-01 ], [ "My gradients", "κ∇θ[1]", 8.47448110580444335938e-02, 9.94180679321289062500e-01, 5.27157366275787353516e-01, 2.92455951648181833313e-01 ], [ "My gradients", "κ∇θ[2]", 1.20514631271362304688e-02, 9.93527650833129882813e-01, 4.71063584089279174805e-01, 2.96449027197666359346e-01 ], [ "My gradients", "κ∇θ[3]", 8.14980268478393554688e-02, 9.55443382263183593750e-01, 5.05038917064666748047e-01, 2.77201022741208891187e-01 ], [ "My fields", "u[1]", 3.53445491472876849315e-02, 9.73118774570230771204e-01, 4.53566376673364857197e-01, 3.28622448223506280485e-01 ], [ "My fields", "u[2]", 4.23016659320296639635e-02, 9.67799553619200114696e-01, 5.40307230728526155517e-01, 2.94603153327737565803e-01 ], [ "My fields", "θ[1]", 6.23675581701588210848e-02, 9.76550123041147521974e-01, 5.16046312418334207628e-01, 2.81983244164903890105e-01 ], ] parr = [ [ "My gradients", "ν∇u[1]", 16, 7, 16, 0 ], [ "My gradients", "ν∇u[2]", 16, 7, 16, 0 ], [ "My gradients", "ν∇u[3]", 16, 7, 16, 0 ], [ "My gradients", "ν∇u[4]", 16, 7, 16, 0 ], [ "My gradients", "ν∇u[5]", 16, 7, 16, 0 ], [ "My gradients", "ν∇u[6]", 16, 7, 16, 0 ], [ "My gradients", "κ∇θ[1]", 16, 16, 16, 0 ], [ "My gradients", "κ∇θ[2]", 16, 16, 16, 0 ], [ "My gradients", "κ∇θ[3]", 16, 16, 16, 0 ], [ "My fields", "u[1]", 16, 16, 16, 0 ], [ "My fields", "u[2]", 16, 16, 16, 0 ], [ "My fields", "θ[1]", 16, 16, 16, 0 ], ] #! format: on ================================================ FILE: test/Diagnostics/diagnostic_fields_test.jl ================================================ using Test, MPI using Random using StaticArrays using Test using ClimateMachine ClimateMachine.init() using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.ConfigTypes using ClimateMachine.Diagnostics using ClimateMachine.BalanceLaws using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.Mesh.Filters using Thermodynamics.TemperatureProfiles using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using CLIMAParameters using CLIMAParameters.Planet: R_d, cp_d, cv_d, MSLP, grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() import ClimateMachine.Mesh.Grids: _x1, _x2, _x3 import ClimateMachine.BalanceLaws: vars_state import ClimateMachine.VariableTemplates.varsindex # ------------------------ Description ------------------------- # # 1) Dry Rising Bubble (circular potential temperature perturbation) # 2) Boundaries - `All Walls` : Impenetrable(FreeSlip()) # Laterally periodic # 3) Domain - 2500m[horizontal] x 2500m[horizontal] x 2500m[vertical] # 4) Timeend - 1000s # 5) Mesh Aspect Ratio (Effective resolution) 1:1 # 7) Overrides defaults for # `init_on_cpu` # `solver_type` # `sources` # `C_smag` # 8) Default settings can be found in `src/Driver/Configurations.jl` # ------------------------ Description ------------------------- # function init_risingbubble!(problem, bl, state, aux, localgeo, t) (x, y, z) = localgeo.coord FT = eltype(state) param_set = parameter_set(bl) R_gas::FT = R_d(param_set) c_p::FT = cp_d(param_set) c_v::FT = cv_d(param_set) γ::FT = c_p / c_v p0::FT = MSLP(param_set) _grav::FT = grav(param_set) xc::FT = 1250 yc::FT = 1250 zc::FT = 1000 r = sqrt((x - xc)^2 + (y - yc)^2 + (z - zc)^2) rc::FT = 500 θ_ref::FT = 300 Δθ::FT = 0 if r <= rc Δθ = FT(5) * cospi(r / rc / 2) end #Perturbed state: θ = θ_ref + Δθ # potential temperature π_exner = FT(1) - _grav / (c_p * θ) * z # exner pressure ρ = p0 / (R_gas * θ) * (π_exner)^(c_v / R_gas) # density q_tot = FT(0) ts = PhaseEquil_ρθq(param_set, ρ, θ, q_tot) q_pt = PhasePartition(ts) ρu = SVector(FT(0), FT(0), FT(0)) #State (prognostic) variable assignment e_kin = FT(0) e_pot = gravitational_potential(bl.orientation, aux) ρe_tot = ρ * total_energy(e_kin, e_pot, ts) state.ρ = ρ state.ρu = ρu state.energy.ρe = ρe_tot state.moisture.ρq_tot = ρ * q_pt.tot end function config_risingbubble(FT, N, resolution, xmax, ymax, zmax) # Set up the model T_profile = DryAdiabaticProfile{FT}(param_set) C_smag = FT(0.23) ref_state = HydrostaticState(T_profile) physics = AtmosPhysics{FT}( param_set; ref_state = ref_state, turbulence = SmagorinskyLilly{FT}(C_smag), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; init_state_prognostic = init_risingbubble!, source = (Gravity(),), ) # Problem configuration config = ClimateMachine.AtmosLESConfiguration( "DryRisingBubble", N, resolution, xmax, ymax, zmax, param_set, init_risingbubble!, model = model, ) return config end function config_diagnostics(driver_config) interval = "10000steps" dgngrp = setup_atmos_default_diagnostics( AtmosLESConfigType(), interval, driver_config.name, ) return ClimateMachine.DiagnosticsConfiguration([dgngrp]) end #------------------------------------------------------------------------- function run_brick_diagostics_fields_test() DA = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD root = 0 pid = MPI.Comm_rank(mpicomm) npr = MPI.Comm_size(mpicomm) toler = Dict(Float64 => 1e-8, Float32 => 1e-4) # Working precision for FT in (Float32, Float64) # DG polynomial order N = (4, 5) # Domain resolution and size Δh = FT(50) Δv = FT(50) resolution = (Δh, Δh, Δv) # Domain extents xmax = FT(2500) ymax = FT(2500) zmax = FT(2500) # Simulation time t0 = FT(0) timeend = FT(1000) # Courant number CFL = FT(20) driver_config = config_risingbubble(FT, N, resolution, xmax, ymax, zmax) # Choose explicit multirate solver ode_solver_type = ClimateMachine.MultirateSolverType( fast_model = AtmosAcousticGravityLinearModel, slow_method = LSRK144NiegemannDiehlBusch, fast_method = LSRK144NiegemannDiehlBusch, timestep_ratio = 10, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config; ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFL, ) #------------------------------------------------------------------------ model = driver_config.bl Q = solver_config.Q dg = solver_config.dg grid = dg.grid vgeo = grid.vgeo Nel = size(Q.realdata, 3) Npl = size(Q.realdata, 1) ind = [ varsindex(vars_state(model, Prognostic(), FT), :ρ) varsindex(vars_state(model, Prognostic(), FT), :ρu) ] _ρ, _ρu, _ρv, _ρw = ind[1], ind[2], ind[3], ind[4] x1 = view(vgeo, :, _x1, 1:Nel) x2 = view(vgeo, :, _x2, 1:Nel) x3 = view(vgeo, :, _x3, 1:Nel) fcn0(x, y, z, xmax, ymax, zmax) = sin.(pi * x ./ xmax) .* cos.(pi * y ./ ymax) .* cos.(pi * z ./ zmax) # sample function fcnx(x, y, z, xmax, ymax, zmax) = cos.(pi * x ./ xmax) .* cos.(pi * y ./ ymax) .* cos.(pi * z ./ zmax) .* pi ./ xmax # ∂/∂x fcny(x, y, z, xmax, ymax, zmax) = -sin.(pi * x ./ xmax) .* sin.(pi * y ./ ymax) .* cos.(pi * z ./ zmax) .* pi ./ ymax # ∂/∂y fcnz(x, y, z, xmax, ymax, zmax) = -sin.(pi * x ./ xmax) .* cos.(pi * y ./ ymax) .* sin.(pi * z ./ zmax) .* pi ./ zmax # ∂/∂z Q.data[:, _ρ, 1:Nel] .= 1.0 .+ fcn0(x1, x2, x3, xmax, ymax, zmax) * 5.0 Q.data[:, _ρu, 1:Nel] .= Q.data[:, _ρ, 1:Nel] .* fcn0(x1, x2, x3, xmax, ymax, zmax) Q.data[:, _ρv, 1:Nel] .= Q.data[:, _ρ, 1:Nel] .* fcn0(x1, x2, x3, xmax, ymax, zmax) Q.data[:, _ρw, 1:Nel] .= Q.data[:, _ρ, 1:Nel] .* fcn0(x1, x2, x3, xmax, ymax, zmax) #----------------------------------------------------------------------- vgrad = Diagnostics.VectorGradients(dg, Q) vort = Diagnostics.Vorticity(dg, vgrad) #---------------------------------------------------------------------------- Ω₁_exact = fcny(x1, x2, x3, xmax, ymax, zmax) - fcnz(x1, x2, x3, xmax, ymax, zmax) Ω₂_exact = fcnz(x1, x2, x3, xmax, ymax, zmax) - fcnx(x1, x2, x3, xmax, ymax, zmax) Ω₃_exact = fcnx(x1, x2, x3, xmax, ymax, zmax) - fcny(x1, x2, x3, xmax, ymax, zmax) err = zeros(FT, 12) err[1] = maximum(abs.(fcnx(x1, x2, x3, xmax, ymax, zmax) - vgrad.∂₁u₁)) err[2] = maximum(abs.(fcny(x1, x2, x3, xmax, ymax, zmax) - vgrad.∂₂u₁)) err[3] = maximum(abs.(fcnz(x1, x2, x3, xmax, ymax, zmax) - vgrad.∂₃u₁)) err[4] = maximum(abs.(fcnx(x1, x2, x3, xmax, ymax, zmax) - vgrad.∂₁u₂)) err[5] = maximum(abs.(fcny(x1, x2, x3, xmax, ymax, zmax) - vgrad.∂₂u₂)) err[6] = maximum(abs.(fcnz(x1, x2, x3, xmax, ymax, zmax) - vgrad.∂₃u₂)) err[7] = maximum(abs.(fcnx(x1, x2, x3, xmax, ymax, zmax) - vgrad.∂₁u₃)) err[8] = maximum(abs.(fcny(x1, x2, x3, xmax, ymax, zmax) - vgrad.∂₂u₃)) err[9] = maximum(abs.(fcnz(x1, x2, x3, xmax, ymax, zmax) - vgrad.∂₃u₃)) err[10] = maximum(abs.(vort.Ω₁ - Ω₁_exact)) err[11] = maximum(abs.(vort.Ω₂ - Ω₂_exact)) err[12] = maximum(abs.(vort.Ω₃ - Ω₃_exact)) errg = MPI.Allreduce(err, max, mpicomm) @test maximum(errg) < toler[FT] end end #---------------------------------------------------------------------------- @testset "Diagnostics Fields tests" begin run_brick_diagostics_fields_test() end #------------------------------------------------ ================================================ FILE: test/Diagnostics/dm_tests.jl ================================================ using Dates using FileIO using KernelAbstractions using MPI using NCDatasets using Printf using Random using StaticArrays using Test using ClimateMachine ClimateMachine.init(diagnostics = "1steps") using ClimateMachine.Atmos using ClimateMachine.ConfigTypes using ClimateMachine.Diagnostics using ClimateMachine.DiagnosticsMachine using ClimateMachine.GenericCallbacks using ClimateMachine.MPIStateArrays using Thermodynamics using CLIMAParameters using CLIMAParameters.Planet: grav, MSLP struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() # Need to import these to define new diagnostic variables and groups. import ..DiagnosticsMachine: Settings, dv_name, dv_attrib, dv_args, dv_project, dv_scale, dv_PointwiseDiagnostic, dv_HorizontalAverage # Define some new diagnostic variables. @horizontal_average( AtmosLESConfigType, yvel, ) do (atmos::AtmosModel, states::States, curr_time, cache) states.prognostic.ρu[2] / states.prognostic.ρ end @pointwise_diagnostic( AtmosLESConfigType, zvel, ) do (atmos::AtmosModel, states::States, curr_time, cache) states.prognostic.ρu[3] / states.prognostic.ρ end # Define a new diagnostics group with some pre-defined diagnostic # variables as well as the new ones above. @diagnostics_group( "DMTest", AtmosLESConfigType, Nothing, (_...) -> nothing, NoInterpolation, u, v, w, rho, yvel, zvel, ) # Make sure DiagnosticsMachine did what it was supposed to do. @testset "DiagnosticsMachine interface" begin @test_throws UndefVarError ALESCT_HA_foo() yv = ALESCT_HA_yvel() @test yv isa HorizontalAverage yvname = dv_name(AtmosLESConfigType(), yv) @test yvname == "yvel" @test yvname ∈ keys(DiagnosticsMachine.AllDiagnosticVars[AtmosLESConfigType]) @test_throws UndefVarError ALESCT_PD_bar() zv = ALESCT_PD_zvel() @test zv isa PointwiseDiagnostic zvname = dv_name(AtmosLESConfigType(), zv) @test zvname == "zvel" @test zvname ∈ keys(DiagnosticsMachine.AllDiagnosticVars[AtmosLESConfigType]) end # Set up a simple experiment to run the diagnostics group. include("sin_init.jl") function main() FT = Float64 # DG polynomial order N = 4 # Domain resolution and size Δh = FT(25) Δv = FT(25) resolution = (Δh, Δh, Δv) xmax = FT(500) ymax = FT(500) zmax = FT(500) t0 = FT(0) dt = FT(0.01) timeend = dt driver_config = ClimateMachine.AtmosLESConfiguration( "DMTest", N, resolution, xmax, ymax, zmax, param_set, init_sin_test!, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, init_on_cpu = true, ) dm_dgngrp = DMTest("1steps", driver_config.name) dgn_config = ClimateMachine.DiagnosticsConfiguration([dm_dgngrp]) ClimateMachine.invoke!(solver_config, diagnostics_config = dgn_config) # Check the output from the diagnostics group. @testset "DiagnosticsMachine correctness" begin mpicomm = solver_config.mpicomm mpirank = MPI.Comm_rank(mpicomm) if mpirank == 0 nm = driver_config.name * "_DMTest.nc" ds = Dataset(joinpath(ClimateMachine.Settings.output_dir, nm), "r") ds_u = ds["u"][:] ds_yvel = ds["yvel"][:] ds_zvel = ds["zvel"][:] close(ds) end Q = array_device(solver_config.Q) isa CPU ? solver_config.Q : Array(solver_config.Q) havg_rho = compute_havg(solver_config, view(Q, :, 1:1, :)) havg_u = compute_havg(solver_config, view(Q, :, 2:2, :)) v = view(Q, :, 3:3, :) ./ view(Q, :, 1:1, :) havg_v = compute_havg(solver_config, v) if mpirank == 0 havg_u ./= havg_rho @test all(ds_u[:, 3] .≈ havg_u) @test all(ds_yvel[:, 3] .≈ havg_v) realelems = solver_config.dg.grid.topology.realelems w = view(Q, :, 4, realelems) ./ view(Q, :, 1, realelems) @test all(ds_zvel[:, :, 3] .≈ w) end end nothing end function compute_havg(solver_config, field) mpicomm = solver_config.mpicomm mpirank = MPI.Comm_rank(mpicomm) grid = solver_config.dg.grid grid_info = basic_grid_info(grid) topl_info = basic_topology_info(grid.topology) Nqh = grid_info.Nqh Nqk = grid_info.Nqk nvertelem = topl_info.nvertelem nhorzrealelem = topl_info.nhorzrealelem function arrange_array(A, dim = :) A = array_device(A) isa CPU ? A : Array(A) reshape( view(A, :, dim, grid.topology.realelems), Nqh, Nqk, nvertelem, nhorzrealelem, ) end field = arrange_array(field) MH = arrange_array(grid.vgeo, grid.MHid) full_field = MPI.Reduce!(sum(field .* MH, dims = (1, 4))[:], +, 0, mpicomm) full_MH = MPI.Reduce!(sum(MH, dims = (1, 4))[:], +, 0, mpicomm) if mpirank == 0 return full_field ./ full_MH else return nothing end end main() ================================================ FILE: test/Diagnostics/runtests.jl ================================================ using MPI, Test include(joinpath("..", "testhelpers.jl")) @testset "Diagnostics" begin runmpi(joinpath(@__DIR__, "sin_test.jl"), ntasks = 2) runmpi(joinpath(@__DIR__, "dm_tests.jl"), ntasks = 2) runmpi(joinpath(@__DIR__, "Debug/test_statecheck.jl"), ntasks = 2) end ================================================ FILE: test/Diagnostics/sin_init.jl ================================================ function init_sin_test!(problem, bl, state, aux, localgeo, t) (x, y, z) = localgeo.coord FT = eltype(state) param_set = parameter_set(bl) z = FT(z) _grav::FT = grav(param_set) _MSLP::FT = MSLP(param_set) # These constants are those used by Stevens et al. (2005) qref = FT(9.0e-3) q_pt_sfc = PhasePartition(qref) Rm_sfc = FT(gas_constant_air(param_set, q_pt_sfc)) T_sfc = FT(292.5) P_sfc = _MSLP # Specify moisture profiles q_liq = FT(0) q_ice = FT(0) zb = FT(600) # initial cloud bottom zi = FT(840) # initial cloud top dz_cloud = zi - zb q_liq_peak = FT(0.00045) # cloud mixing ratio at z_i if z > zb && z <= zi q_liq = (z - zb) * q_liq_peak / dz_cloud end if z <= zi θ_liq = FT(289.0) q_tot = qref else θ_liq = FT(297.5) + (z - zi)^(FT(1 / 3)) q_tot = FT(1.5e-3) end w = FT(10 + 0.5 * sin(2 * π * ((x / 1500) + (y / 1500)))) u = (5 + 2 * sin(2 * π * ((x / 1500) + (y / 1500)))) v = FT(5 + 2 * sin(2 * π * ((x / 1500) + (y / 1500)))) # Pressure H = Rm_sfc * T_sfc / _grav p = P_sfc * exp(-z / H) # Density, Temperature ts = PhaseEquil_pθq(param_set, p, θ_liq, q_tot) #ρ = air_density(ts) ρ = one(FT) e_kin = FT(1 / 2) * FT((u^2 + v^2 + w^2)) e_pot = _grav * z E = ρ * total_energy(e_kin, e_pot, ts) state.ρ = ρ state.ρu = SVector(ρ * u, ρ * v, ρ * w) state.energy.ρe = E state.moisture.ρq_tot = ρ * q_tot end ================================================ FILE: test/Diagnostics/sin_test.jl ================================================ using Dates using FileIO using MPI using NCDatasets using Printf using Random using StaticArrays using Test using ClimateMachine ClimateMachine.init() using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.ConfigTypes using ClimateMachine.Diagnostics using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.Mesh.Filters using Thermodynamics using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.Writers using ClimateMachine.GenericCallbacks using CLIMAParameters using CLIMAParameters.Planet: grav, MSLP struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() include("sin_init.jl") function config_sin_test(FT, N, resolution, xmax, ymax, zmax) config = ClimateMachine.AtmosLESConfiguration( "Diagnostics SIN test", N, resolution, xmax, ymax, zmax, param_set, init_sin_test!, ) return config end function config_diagnostics(driver_config) interval = "100steps" dgngrp = setup_atmos_default_diagnostics( AtmosLESConfigType(), interval, replace(driver_config.name, " " => "_"), writer = NetCDFWriter(), ) return ClimateMachine.DiagnosticsConfiguration([dgngrp]) end function main() # Disable driver diagnostics as we're testing it here ClimateMachine.Settings.diagnostics = "never" FT = Float64 # DG polynomial order N = 4 # Domain resolution and size Δh = FT(50) Δv = FT(20) resolution = (Δh, Δh, Δv) xmax = FT(1500) ymax = FT(1500) zmax = FT(1500) t0 = FT(0) dt = FT(0.01) timeend = dt driver_config = config_sin_test(FT, N, resolution, xmax, ymax, zmax) ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK54CarpenterKennedy, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, ode_dt = dt, init_on_cpu = true, ) dgn_config = config_diagnostics(driver_config) mpicomm = solver_config.mpicomm dg = solver_config.dg Q = solver_config.Q solver = solver_config.solver outdir = mktempdir() currtime = ODESolvers.gettime(solver) starttime = replace(string(now()), ":" => ".") Diagnostics.init(mpicomm, param_set, dg, Q, starttime, outdir, false) GenericCallbacks.init!( dgn_config.groups[1], nothing, nothing, nothing, currtime, ) ClimateMachine.invoke!(solver_config) # Check results mpirank = MPI.Comm_rank(mpicomm) if mpirank == 0 dgngrp = dgn_config.groups[1] nm = @sprintf("%s_%s.nc", dgngrp.out_prefix, dgngrp.name) ds = NCDataset(joinpath(outdir, nm), "r") ds_u = ds["u"][:] ds_cov_w_u = ds["cov_w_u"][:] N = size(ds_u, 1) err = 0 err1 = 0 for i in 1:N u = ds_u[i] cov_w_u = ds_cov_w_u[i] err += (cov_w_u - 0.5)^2 err1 += (u - 5)^2 end close(ds) err = sqrt(err / N) @test err1 <= 1e-16 @test err <= 2e-15 end end main() ================================================ FILE: test/Driver/cr_unit_tests.jl ================================================ using MPI using Printf using StaticArrays using Test import KernelAbstractions: CPU using ClimateMachine ClimateMachine.init() using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.Checkpoint using ClimateMachine.ConfigTypes using Thermodynamics.TemperatureProfiles using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.Grids using ClimateMachine.ODESolvers import ClimateMachine.MPIStateArrays: array_device using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() Base.@kwdef struct AcousticWaveSetup{FT} domain_height::FT = 10e3 T_ref::FT = 300 α::FT = 3 γ::FT = 100 nv::Int = 1 end function (setup::AcousticWaveSetup)(problem, bl, state, aux, localgeo, t) # callable to set initial conditions FT = eltype(state) λ = longitude(bl, aux) φ = latitude(bl, aux) z = altitude(bl, aux) β = min(FT(1), setup.α * acos(cos(φ) * cos(λ))) f = (1 + cos(FT(π) * β)) / 2 g = sin(setup.nv * FT(π) * z / setup.domain_height) Δp = setup.γ * f * g p = aux.ref_state.p + Δp param_set = parameter_set(bl) ts = PhaseDry_pT(param_set, p, setup.T_ref) q_pt = PhasePartition(ts) e_pot = gravitational_potential(bl.orientation, aux) e_int = internal_energy(ts) state.ρ = air_density(ts) state.ρu = SVector{3, FT}(0, 0, 0) state.energy.ρe = state.ρ * (e_int + e_pot) return nothing end function main() FT = Float64 # DG polynomial order N = 4 # Domain resolution nelem_horz = 4 nelem_vert = 6 resolution = (nelem_horz, nelem_vert) t0 = FT(0) timeend = FT(1800) # Timestep size (s) dt = FT(600) ode_solver_type = ClimateMachine.MISSolverType(; splitting_type = ClimateMachine.SlowFastSplitting(), nsubsteps = (20,), ) setup = AcousticWaveSetup{FT}() T_profile = IsothermalProfile(param_set, setup.T_ref) ref_state = HydrostaticState(T_profile) turbulence = ConstantDynamicViscosity(FT(0)) physics = AtmosPhysics{FT}( param_set; ref_state = ref_state, turbulence = turbulence, moisture = DryModel(), ) model = AtmosModel{FT}( AtmosGCMConfigType, physics; init_state_prognostic = setup, source = (Gravity(),), ) driver_config = ClimateMachine.AtmosGCMConfiguration( "Checkpoint unit tests", N, resolution, setup.domain_height, param_set, setup; model = model, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, ode_dt = dt, ) isdir(ClimateMachine.Settings.checkpoint_dir) || mkpath(ClimateMachine.Settings.checkpoint_dir) @testset "Checkpoint/restart unit tests" begin rm_checkpoint( ClimateMachine.Settings.checkpoint_dir, solver_config.name, solver_config.mpicomm, 0, ) write_checkpoint( solver_config, ClimateMachine.Settings.checkpoint_dir, solver_config.name, solver_config.mpicomm, 0, ) nm = replace(solver_config.name, " " => "_") cname = @sprintf( "%s_checkpoint_mpirank%04d_num%04d.jld2", nm, MPI.Comm_rank(solver_config.mpicomm), 0, ) cfull = joinpath(ClimateMachine.Settings.checkpoint_dir, cname) @test isfile(cfull) s_Q, s_aux, s_t = try read_checkpoint( ClimateMachine.Settings.checkpoint_dir, nm, driver_config.array_type, solver_config.mpicomm, 0, ) catch (nothing, nothing, nothing) end @test s_Q !== nothing @test s_aux !== nothing @test s_t !== nothing if Array ∉ typeof(s_Q).parameters s_Q = Array(s_Q) s_aux = Array(s_aux) end dg = solver_config.dg Q = solver_config.Q if array_device(Q) isa CPU h_Q = Q.realdata h_aux = dg.state_auxiliary.realdata else h_Q = Array(Q.realdata) h_aux = Array(dg.state_auxiliary.realdata) end t = ODESolvers.gettime(solver_config.solver) @test h_Q == s_Q @test h_aux == s_aux @test t == s_t rm_checkpoint( ClimateMachine.Settings.checkpoint_dir, solver_config.name, solver_config.mpicomm, 0, ) @test !isfile(cfull) end end main() ================================================ FILE: test/Driver/gcm_driver_test.jl ================================================ using StaticArrays using Test using ClimateMachine ClimateMachine.init() using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.Checkpoint using ClimateMachine.ConfigTypes using Thermodynamics.TemperatureProfiles using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.Grids using ClimateMachine.ODESolvers using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() Base.@kwdef struct AcousticWaveSetup{FT} domain_height::FT = 10e3 T_ref::FT = 300 α::FT = 3 γ::FT = 100 nv::Int = 1 end function (setup::AcousticWaveSetup)(problem, bl, state, aux, localgeo, t) # callable to set initial conditions FT = eltype(state) λ = longitude(bl, aux) φ = latitude(bl, aux) z = altitude(bl, aux) β = min(FT(1), setup.α * acos(cos(φ) * cos(λ))) f = (1 + cos(FT(π) * β)) / 2 g = sin(setup.nv * FT(π) * z / setup.domain_height) Δp = setup.γ * f * g p = aux.ref_state.p + Δp param_set = parameter_set(bl) ts = PhaseDry_pT(param_set, p, setup.T_ref) q_pt = PhasePartition(ts) e_pot = gravitational_potential(bl.orientation, aux) e_int = internal_energy(ts) state.ρ = air_density(ts) state.ρu = SVector{3, FT}(0, 0, 0) state.energy.ρe = state.ρ * (e_int + e_pot) return nothing end function main() FT = Float64 # DG polynomial orders N = (4, 4) # Domain resolution nelem_horz = 4 nelem_vert = 6 resolution = (nelem_horz, nelem_vert) t0 = FT(0) timeend = FT(3600) # Timestep size (s) dt = FT(1800) setup = AcousticWaveSetup{FT}() T_profile = IsothermalProfile(param_set, setup.T_ref) ref_state = HydrostaticState(T_profile) turbulence = ConstantDynamicViscosity(FT(0)) physics = AtmosPhysics{FT}( param_set; ref_state = ref_state, turbulence = turbulence, moisture = DryModel(), ) model = AtmosModel{FT}( AtmosGCMConfigType, physics; init_state_prognostic = setup, source = (Gravity(),), ) ode_solver_type = ClimateMachine.MultirateSolverType( splitting_type = ClimateMachine.HEVISplitting(), fast_model = AtmosAcousticGravityLinearModel, implicit_solver_adjustable = true, slow_method = LSRK54CarpenterKennedy, fast_method = ARK2ImplicitExplicitMidpoint, timestep_ratio = 300, ) driver_config = ClimateMachine.AtmosGCMConfiguration( "GCM Driver test", N, resolution, setup.domain_height, param_set, setup; model = model, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, ode_dt = dt, ) cb_test = 0 result = ClimateMachine.invoke!( solver_config; user_info_callback = () -> cb_test += 1, ) @test cb_test > 0 end main() ================================================ FILE: test/Driver/les_driver_test.jl ================================================ using StaticArrays using Test using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.Mesh.Grids using Thermodynamics using ClimateMachine.VariableTemplates using CLIMAParameters using CLIMAParameters.Planet: grav, MSLP struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() function init_test!(problem, bl, state, aux, localgeo, t) (x, y, z) = localgeo.coord FT = eltype(state) param_set = parameter_set(bl) z = FT(z) _grav::FT = grav(param_set) _MSLP::FT = MSLP(param_set) # These constants are those used by Stevens et al. (2005) qref = FT(9.0e-3) q_pt_sfc = PhasePartition(qref) Rm_sfc = FT(gas_constant_air(param_set, q_pt_sfc)) T_sfc = FT(290.4) P_sfc = _MSLP # Specify moisture profiles q_liq = FT(0) q_ice = FT(0) θ_liq = FT(289.0) q_tot = qref ugeo = FT(7) vgeo = FT(-5.5) u, v, w = ugeo, vgeo, FT(0) # Pressure H = Rm_sfc * T_sfc / _grav p = P_sfc * exp(-z / H) # Density, Temperature ts = PhaseEquil_pθq(param_set, p, θ_liq, q_tot) ρ = air_density(ts) e_kin = FT(1 / 2) * FT((u^2 + v^2 + w^2)) e_pot = _grav * z E = ρ * total_energy(e_kin, e_pot, ts) state.ρ = ρ state.ρu = SVector(ρ * u, ρ * v, ρ * w) state.energy.ρe = E state.moisture.ρq_tot = ρ * q_tot return nothing end function main() @test_throws ArgumentError ClimateMachine.init(dsisable_gpu = true) ClimateMachine.init() FT = Float64 # DG polynomial orders N = (4, 4) # Domain resolution and size Δh = FT(40) Δv = FT(40) resolution = (Δh, Δh, Δv) xmax = FT(320) ymax = FT(320) zmax = FT(400) t0 = FT(0) timeend = FT(10) CFL = FT(0.4) driver_config = ClimateMachine.AtmosLESConfiguration( "Driver test", N, resolution, xmax, ymax, zmax, param_set, init_test!, ) ode_solver_type = ClimateMachine.ExplicitSolverType() solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, Courant_number = CFL, ) # Test the courant wrapper # by default the CFL should be less than what asked for CFL_nondiff = ClimateMachine.DGMethods.courant( ClimateMachine.Courant.nondiffusive_courant, solver_config, ) @test CFL_nondiff < CFL CFL_adv = ClimateMachine.DGMethods.courant( ClimateMachine.Courant.advective_courant, solver_config, ) CFL_adv_v = ClimateMachine.DGMethods.courant( ClimateMachine.Courant.advective_courant, solver_config; direction = VerticalDirection(), ) CFL_adv_h = ClimateMachine.DGMethods.courant( ClimateMachine.Courant.advective_courant, solver_config; direction = HorizontalDirection(), ) # compute known advective Courant number (based on initial conditions) ugeo_abs = FT(7) vgeo_abs = FT(5.5) Δt = solver_config.dt ca_h = ugeo_abs * (Δt / Δh) + vgeo_abs * (Δt / Δh) # vertical velocity is 0 caᵥ = FT(0.0) @test isapprox(CFL_adv_v, caᵥ, atol = 10 * eps(FT)) @test isapprox(CFL_adv_h, ca_h, atol = 0.0005) @test isapprox(CFL_adv, ca_h, atol = 0.0005) cb_test = 0 result = ClimateMachine.invoke!(solver_config) # cb_test should be zero since user_info_callback not specified @test cb_test == 0 result = ClimateMachine.invoke!( solver_config, user_info_callback = () -> cb_test += 1, ) # cb_test should be greater than one if the user_info_callback got called @test cb_test > 0 # Test that if dt is not adjusted based on final time the CFL is correct solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, Courant_number = CFL, timeend_dt_adjust = false, ) CFL_nondiff = ClimateMachine.DGMethods.courant( ClimateMachine.Courant.nondiffusive_courant, solver_config, ) @test CFL_nondiff ≈ CFL # Test that if dt is not adjusted based on final time the CFL is correct for fixed_number_of_steps in (-1, 0, 10) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, Courant_number = CFL, fixed_number_of_steps = fixed_number_of_steps, ) if fixed_number_of_steps < 0 @test solver_config.timeend == timeend else @test solver_config.timeend == solver_config.dt * fixed_number_of_steps @test solver_config.numberofsteps == fixed_number_of_steps end end end main() ================================================ FILE: test/Driver/mms3.jl ================================================ using ClimateMachine ClimateMachine.init() using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.Orientations: NoOrientation using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates import Thermodynamics: total_specific_enthalpy import ClimateMachine.BalanceLaws: source, prognostic_vars using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() import CLIMAParameters # Assume zero reference temperature CLIMAParameters.Planet.T_0(::EarthParameterSet) = 0 using LinearAlgebra using MPI using StaticArrays using Test using UnPack const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath( clima_dir, "test", "Numerics", "DGMethods", "compressible_Navier_Stokes", "mms_solution_generated.jl", )) total_specific_enthalpy(ts::PhaseDry{FT}, e_tot::FT) where {FT <: Real} = zero(FT) function mms3_init_state!(problem, bl, state::Vars, aux::Vars, localgeo, t) (x1, x2, x3) = localgeo.coord state.ρ = ρ_g(t, x1, x2, x3, Val(3)) state.ρu = SVector( U_g(t, x1, x2, x3, Val(3)), V_g(t, x1, x2, x3, Val(3)), W_g(t, x1, x2, x3, Val(3)), ) state.energy.ρe = E_g(t, x1, x2, x3, Val(3)) end struct MMSSource{N} <: TendencyDef{Source} end prognostic_vars(::MMSSource{N}) where {N} = (Mass(), Momentum(), Energy()) function source(::Mass, s::MMSSource{N}, m, args) where {N} @unpack aux, t = args x1, x2, x3 = aux.coord return Sρ_g(t, x1, x2, x3, Val(N)) end function source(::Momentum, s::MMSSource{N}, m, args) where {N} @unpack aux, t = args x1, x2, x3 = aux.coord return SVector( SU_g(t, x1, x2, x3, Val(N)), SV_g(t, x1, x2, x3, Val(N)), SW_g(t, x1, x2, x3, Val(N)), ) end function source(::Energy, s::MMSSource{N}, m, args) where {N} @unpack aux, t = args x1, x2, x3 = aux.coord return SE_g(t, x1, x2, x3, Val(N)) end function main() FT = Float64 # DG polynomial order N = 4 t0 = FT(0) timeend = FT(1) ode_dt = 0.00125 nsteps = ceil(Int64, timeend / ode_dt) ode_dt = timeend / nsteps ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK54CarpenterKennedy, ) expected_result = FT(3.403104838700577e-02) problem = AtmosProblem( boundaryconditions = (InitStateBC(),), init_state_prognostic = mms3_init_state!, ) physics = AtmosPhysics{FT}( param_set; ref_state = NoReferenceState(), turbulence = ConstantDynamicViscosity(FT(μ_exact), WithDivergence()), moisture = DryModel(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, orientation = NoOrientation(), source = (MMSSource{3}(),), ) brickrange = ( range(FT(0); length = 5, stop = 1), range(FT(0); length = 5, stop = 1), range(FT(0); length = 5, stop = 1), ) topl = BrickTopology( MPI.COMM_WORLD, brickrange, periodicity = (false, false, false), connectivity = :face, ) warpfun = (x1, x2, x3) -> begin ( x1 + (x1 - 1 / 2) * cos(2 * π * x2 * x3) / 4, x2 + exp(sin(2π * (x1 * x2 + x3))) / 20, x3 + x1 / 4 + x2^2 / 2 + sin(x1 * x2 * x3), ) end grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ClimateMachine.array_type(), polynomialorder = N, meshwarp = warpfun, ) driver_config = ClimateMachine.DriverConfiguration( AtmosLESConfigType(), "MMS3", (N, N), FT, ClimateMachine.array_type(), param_set, model, MPI.COMM_WORLD, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), nothing, nothing, # filter ClimateMachine.AtmosLESSpecificInfo(), ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, ode_dt = ode_dt, ) Q₀ = solver_config.Q # turn on checkpointing ClimateMachine.Settings.checkpoint = "300steps" ClimateMachine.Settings.checkpoint_keep_one = false # run the simulation ClimateMachine.invoke!(solver_config) # turn off checkpointing and set up a restart ClimateMachine.Settings.checkpoint = "never" ClimateMachine.Settings.restart_from_num = 2 # the solver configuration is where the restart is set up solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, ode_dt = ode_dt, ) # run the restarted simulation ClimateMachine.invoke!(solver_config) # test correctness dg = DGModel(driver_config) Qe = init_ode_state(dg, timeend) result = euclidean_distance(Q₀, Qe) @test result ≈ expected_result end main() ================================================ FILE: test/Driver/runtests.jl ================================================ using MPI, Test include("../testhelpers.jl") @testset "Driver" begin runmpi(joinpath(@__DIR__, "cr_unit_tests.jl"), ntasks = 1) runmpi(joinpath(@__DIR__, "les_driver_test.jl"), ntasks = 1) runmpi(joinpath(@__DIR__, "mms3.jl"), ntasks = 2) end ================================================ FILE: test/InputOutput/VTK/runtests.jl ================================================ module TestVTK using Test using GaussQuadrature: legendre, both using ClimateMachine.VTK: writemesh_highorder, writemesh_raw @testset "VTK" begin for writemesh in (writemesh_highorder, writemesh_raw) Ns = writemesh == writemesh_raw ? ((4, 4, 4), (3, 5, 7)) : ((4, 4, 4),) for N in Ns for dim in 1:3 nelem = 3 T = Float64 Nq = N .+ 1 r = legendre(T, Nq[1], both)[1] s = dim < 2 ? [0] : legendre(T, Nq[2], both)[1] t = dim < 3 ? [0] : legendre(T, Nq[3], both)[1] Nq1 = length(r) Nq2 = length(s) Nq3 = length(t) Np = Nq1 * Nq2 * Nq3 x1 = Array{T, 4}(undef, Nq1, Nq2, Nq3, nelem) x2 = Array{T, 4}(undef, Nq1, Nq2, Nq3, nelem) x3 = Array{T, 4}(undef, Nq1, Nq2, Nq3, nelem) for e in 1:nelem, k in 1:Nq3, j in 1:Nq2, i in 1:Nq1 xoffset = nelem + 1 - 2e x1[i, j, k, e], x2[i, j, k, e], x3[i, j, k, e] = r[i] - xoffset, s[j], t[k] end if dim == 1 x1 = x1 .^ 3 elseif dim == 2 x1, x2 = x1 + sin.(π * x2) / 5, x2 + exp.(-x1 .^ 2) else x1, x2, x3 = x1 + sin.(π * x2) / 5, x2 + exp.(-hypot.(x1, x3) .^ 2), x3 + sin.(π * x1) / 5 end d = exp.(sin.(hypot.(x1, x2, x3))) s = copy(d) if dim == 1 @test "test$(dim)d.vtu" == writemesh( "test$(dim)d", x1; fields = (("d", d), ("s", s)), )[1] @test "test$(dim)d.vtu" == writemesh( "test$(dim)d", x1; x2 = x2, fields = (("d", d), ("s", s)), )[1] @test "test$(dim)d.vtu" == writemesh( "test$(dim)d", x1; x2 = x2, x3 = x3, fields = (("d", d), ("s", s)), )[1] elseif dim == 2 @test "test$(dim)d.vtu" == writemesh( "test$(dim)d", x1, x2; fields = (("d", d), ("s", s)), )[1] @test "test$(dim)d.vtu" == writemesh( "test$(dim)d", x1, x2; x3 = x3, fields = (("d", d), ("s", s)), )[1] elseif dim == 3 @test "test$(dim)d.vtu" == writemesh( "test$(dim)d", x1, x2, x3; fields = (("d", d), ("s", s)), )[1] end end end end end using MPI MPI.Initialized() || MPI.Init() using ClimateMachine.Mesh.Topologies: BrickTopology using ClimateMachine.Mesh.Grids: DiscontinuousSpectralElementGrid using ClimateMachine.VTK: writevtk_helper let mpicomm = MPI.COMM_SELF for FT in (Float64,) #Float32) for dim in 2:3 for _N in ((2, 3, 4), (0, 2, 5), (3, 0, 0), (0, 0, 0)) N = _N[1:dim] if dim == 2 Ne = (4, 5) brickrange = ( range(FT(0); length = Ne[1] + 1, stop = 1), range(FT(0); length = Ne[2] + 1, stop = 1), ) topl = BrickTopology( mpicomm, brickrange, periodicity = (false, false), connectivity = :face, ) warpfun = (x1, x2, _) -> begin (x1 + sin(x1 * x2), x2 + sin(2 * x1 * x2), 0) end elseif dim == 3 Ne = (3, 4, 5) brickrange = ( range(FT(0); length = Ne[1] + 1, stop = 1), range(FT(0); length = Ne[2] + 1, stop = 1), range(FT(0); length = Ne[3] + 1, stop = 1), ) topl = BrickTopology( mpicomm, brickrange, periodicity = (false, false, false), connectivity = :face, ) warpfun = (x1, x2, x3) -> begin ( x1 + (x1 - 1 / 2) * cos(2 * π * x2 * x3) / 4, x2 + exp(sin(2π * (x1 * x2 + x3))) / 20, x3 + x1 / 4 + x2^2 / 2 + sin(x1 * x2 * x3), ) end end grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = Array, polynomialorder = N, meshwarp = warpfun, ) Q = rand(FT, prod(N .+ 1), 3, prod(Ne)) prefix = "test$(dim)d_raw$(prod(ntuple(i->"_$(N[i])", dim)))" @test "$(prefix).vtu" == writevtk_helper( prefix, grid.vgeo, Q, grid, ("a", "b", "c"); number_sample_points = 0, )[1] prefix = "test$(dim)d_high_order$(prod(ntuple(i->"_$(N[i])", dim)))" @test "$(prefix).vtu" == writevtk_helper( prefix, grid.vgeo, Q, grid, ("a", "b", "c"); number_sample_points = 10, )[1] end end end end end #module TestVTK ================================================ FILE: test/InputOutput/Writers/runtests.jl ================================================ module TestWriters using Dates using NCDatasets using OrderedCollections using Test using ClimateMachine.Writers @testset "Writers" begin odims = OrderedDict( "x" => (collect(1:5), Dict()), "y" => (collect(1:5), Dict()), "z" => (collect(1010:10:1050), Dict()), ) ovartypes = OrderedDict( "v1" => (("x", "y", "z"), Float64, Dict()), "v2" => (("x", "y", "z"), Float64, Dict()), ) vals1 = rand(5, 5, 5) vals2 = rand(5, 5, 5) nc = NetCDFWriter() nfn, _ = mktemp() nfull = full_name(nc, nfn) touch(nfull) @test_throws ErrorException init_data(nc, nfn, true, odims, ovartypes) rm(nfull) init_data(nc, nfn, false, odims, ovartypes) append_data(nc, OrderedDict("v1" => vals1, "v2" => vals2), 2.0) NCDataset(nfull, "r") do nds xdim = nds["x"][:] ydim = nds["y"][:] zdim = nds["z"][:] @test xdim == odims["x"][1] @test ydim == odims["y"][1] @test zdim == odims["z"][1] @test try adim = nds["a"][:] adim == ones(5) false catch e true end t = nds["time"][:] v1 = nds["v1"][:] v2 = nds["v2"][:] @test length(t) == 1 @test t[1] == DateTime(1900, 1, 1, 0, 0, 2) @test v1[:, :, :, 1] == vals1 @test v2[:, :, :, 1] == vals2 end end end #module TestWriters ================================================ FILE: test/InputOutput/runtests.jl ================================================ using Test, Pkg @testset "InputOutput" begin all_tests = isempty(ARGS) || "all" in ARGS ? true : false for submodule in ["VTK", "Writers"] if all_tests || "$submodule" in ARGS || "InputOutput/$submodule" in ARGS || "InputOutput" in ARGS include_test(submodule) end end end ================================================ FILE: test/Land/Model/Artifacts.toml ================================================ [richards] git-tree-sha1 = "ff73fa6a0b6a807e71a6921f7ef7d0befe776edd" [richards_sand] git-tree-sha1 = "b0dc82dd02159c646e909bfb61170d3b9dc347f3" [tiltedv] git-tree-sha1 = "db27235cb7ce2b7674607876da15d1635906b512" ================================================ FILE: test/Land/Model/freeze_thaw_alone.jl ================================================ # Test that freeze thaw alone conserves water mass using MPI using OrderedCollections using StaticArrays using Statistics using Test using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using CLIMAParameters.Planet: ρ_cloud_liq using CLIMAParameters.Planet: ρ_cloud_ice using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Land.SoilHeatParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state @testset "Freeze thaw alone" begin struct tmp_model <: BalanceLaw end struct tmp_param_set <: AbstractParameterSet end function get_grid_spacing( N_poly::Int64, nelem_vert::Int64, zmax::FT, zmin::FT, ) where {FT} test_config = ClimateMachine.SingleStackConfiguration( "TmpModel", N_poly, nelem_vert, zmax, tmp_param_set(), tmp_model(); zmin = zmin, ) Δ = min_node_distance(test_config.grid) return Δ end function init_soil_water!(land, state, aux, coordinates, time) ϑ_l = eltype(state)(land.soil.water.initialϑ_l(aux)) θ_i = eltype(state)(land.soil.water.initialθ_i(aux)) state.soil.water.ϑ_l = ϑ_l state.soil.water.θ_i = θ_i param_set = land.param_set θ_l = volumetric_liquid_fraction(ϑ_l, land.soil.param_functions.porosity) ρc_ds = land.soil.param_functions.ρc_ds ρc_s = volumetric_heat_capacity(θ_l, θ_i, ρc_ds, param_set) state.soil.heat.ρe_int = volumetric_internal_energy( θ_i, ρc_s, land.soil.heat.initialT(aux), param_set, ) end FT = Float64 ClimateMachine.init() N_poly = 1 nelem_vert = 30 zmax = FT(0) zmin = FT(-1) t0 = FT(0) timeend = FT(60 * 60 * 24) dt = FT(1800) Δ = get_grid_spacing(N_poly, nelem_vert, zmax, zmin) freeze_thaw_source = PhaseChange{FT}(Δz = Δ) ρp = FT(2700) # kg/m^3 ρc_ds = FT(2e06) # J/m^3/K Ksat = FT(0.0) S_s = FT(1e-4) wpf = WaterParamFunctions(FT; Ksat = Ksat, S_s = S_s) soil_param_functions = SoilParamFunctions( FT; porosity = 0.75, ν_ss_gravel = 0.0, ν_ss_om = 0.0, ν_ss_quartz = 0.5, ρc_ds = ρc_ds, ρp = ρp, κ_solid = 1.0, κ_sat_unfrozen = 1.0, κ_sat_frozen = 1.0, water = wpf, ) bottom_flux = (aux, t) -> eltype(aux)(0.0) surface_flux = (aux, t) -> eltype(aux)(0.0) bc = LandDomainBC( bottom_bc = LandComponentBC( soil_water = Neumann(bottom_flux), soil_heat = Dirichlet((aux, t) -> eltype(aux)(280)), ), surface_bc = LandComponentBC( soil_water = Neumann(surface_flux), soil_heat = Dirichlet((aux, t) -> eltype(aux)(290)), ), ) ϑ_l0 = (aux) -> eltype(aux)(1e-10) θ_i0 = (aux) -> eltype(aux)(0.33) soil_water_model = SoilWaterModel(FT; initialϑ_l = ϑ_l0, initialθ_i = θ_i0) T_init = (aux) -> eltype(aux)(aux.z * 10 + 290) soil_heat_model = SoilHeatModel(FT; initialT = T_init) m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = (freeze_thaw_source,) m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) state_types = (Prognostic(), Auxiliary()) initial = Dict[dict_of_nodal_states(solver_config, state_types; interp = true)] ClimateMachine.invoke!(solver_config) final = Dict[dict_of_nodal_states(solver_config, state_types; interp = true)] m_init = ρ_cloud_liq(param_set) * sum(initial[1]["soil.water.ϑ_l"]) .+ ρ_cloud_ice(param_set) * sum(initial[1]["soil.water.θ_i"]) m_final = ρ_cloud_liq(param_set) * sum(final[1]["soil.water.ϑ_l"]) .+ ρ_cloud_ice(param_set) * sum(final[1]["soil.water.θ_i"]) @test abs(m_final - m_init) < 1e-10 end ================================================ FILE: test/Land/Model/haverkamp_test.jl ================================================ # Test that Richard's equation agrees with solution from Bonan's book, # simulation 8.2 using MPI using OrderedCollections using StaticArrays using Statistics using Dierckx using Test using Pkg.Artifacts using DelimitedFiles using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.SystemSolvers using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state using ArtifactWrappers haverkamp_dataset = ArtifactWrapper( @__DIR__, isempty(get(ENV, "CI", "")), "richards", ArtifactFile[ArtifactFile( url = "https://caltech.box.com/shared/static/dfijf07io7h5dk1k87saaewgsg9apq8d.csv", filename = "bonan_haverkamp_data.csv", ),], ) haverkamp_dataset_path = get_data_folder(haverkamp_dataset) bonan_sand_dataset = ArtifactWrapper( @__DIR__, isempty(get(ENV, "CI", "")), "richards_sand", ArtifactFile[ArtifactFile( url = "https://caltech.box.com/shared/static/2vk7bvyjah8xd5b7wxcqy72yfd2myjss.csv", filename = "sand_bonan_sp801.csv", ),], ) bonan_sand_dataset_path = get_data_folder(bonan_sand_dataset) @testset "Richard's equation - Haverkamp test" begin ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) myfloat = eltype(aux) state.soil.water.ϑ_l = myfloat(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = myfloat(land.soil.water.initialθ_i(aux)) end soil_heat_model = PrescribedTemperatureModel() Ksat = FT(0.0443 / (3600 * 100)) S_s = FT(1e-3) water_param_functions = WaterParamFunctions(FT; Ksat = Ksat, S_s = S_s) soil_param_functions = SoilParamFunctions(FT; porosity = 0.495, water = water_param_functions) surface_state = (aux, t) -> eltype(aux)(0.494) bottom_flux = (aux, t) -> aux.soil.water.K * eltype(aux)(-1) ϑ_l0 = (aux) -> eltype(aux)(0.24) bc = LandDomainBC( bottom_bc = LandComponentBC(soil_water = Neumann(bottom_flux)), surface_bc = LandComponentBC(soil_water = Dirichlet(surface_state)), ) soil_water_model = SoilWaterModel( FT; moisture_factor = MoistureDependent{FT}(), hydraulics = Haverkamp(FT;), initialϑ_l = ϑ_l0, ) m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) N_poly = 5 nelem_vert = 10 # Specify the domain boundaries zmax = FT(0) zmin = FT(-1) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(60 * 60 * 24) dt = FT(6) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) mygrid = solver_config.dg.grid Q = solver_config.Q aux = solver_config.dg.state_auxiliary ClimateMachine.invoke!(solver_config) ϑ_l_ind = varsindex(vars_state(m, Prognostic(), FT), :soil, :water, :ϑ_l) ϑ_l = Array(Q[:, ϑ_l_ind, :][:]) z_ind = varsindex(vars_state(m, Auxiliary(), FT), :z) z = Array(aux[:, z_ind, :][:]) # Compare with Bonan simulation data at 1 day. data = joinpath(haverkamp_dataset_path, "bonan_haverkamp_data.csv") ds_bonan = readdlm(data, ',') bonan_moisture = reverse(ds_bonan[:, 1]) bonan_z = reverse(ds_bonan[:, 2]) ./ 100.0 # Create an interpolation from the Bonan data bonan_moisture_continuous = Spline1D(bonan_z, bonan_moisture) bonan_at_clima_z = bonan_moisture_continuous.(z) MSE = mean((bonan_at_clima_z .- ϑ_l) .^ 2.0) @test MSE < 1e-5 end @testset "Richard's equation - Haverkamp implicit test" begin ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) myfloat = eltype(aux) state.soil.water.ϑ_l = myfloat(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = myfloat(land.soil.water.initialθ_i(aux)) end soil_heat_model = PrescribedTemperatureModel() Ksat = FT(0.0443 / (3600 * 100)) S_s = FT(1e-3) water_param_functions = WaterParamFunctions(FT; Ksat = Ksat, S_s = S_s) soil_param_functions = SoilParamFunctions(FT; porosity = 0.495, water = water_param_functions) surface_value = FT(0.494) bottom_flux_multiplier = FT(1.0) initial_moisture = FT(0.24) surface_state = (aux, t) -> surface_value bottom_flux = (aux, t) -> aux.soil.water.K * bottom_flux_multiplier ϑ_l0 = (aux) -> initial_moisture bc = LandDomainBC( bottom_bc = LandComponentBC(soil_water = Neumann(bottom_flux)), surface_bc = LandComponentBC(soil_water = Dirichlet(surface_state)), ) soil_water_model = SoilWaterModel( FT; moisture_factor = MoistureDependent{FT}(), hydraulics = Haverkamp(FT;), initialϑ_l = ϑ_l0, ) m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) N_poly = 5 nelem_vert = 10 # Specify the domain boundaries zmax = FT(0) zmin = FT(-1) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(60 * 60 * 24) dt = FT(200) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) #################### Change the ode_solver dg = solver_config.dg Q = solver_config.Q vdg = DGModel( driver_config; state_auxiliary = dg.state_auxiliary, direction = VerticalDirection(), ) linearsolver = BatchedGeneralizedMinimalResidual( dg, Q; max_subspace_size = 30, atol = -1.0, rtol = 1e-9, ) """ N(q)(Q) = Qhat => F(Q) = N(q)(Q) - Qhat F(Q) == 0 ||F(Q^i) || / ||F(Q^0) || < tol """ nonlinearsolver = JacobianFreeNewtonKrylovSolver(Q, linearsolver; tol = 1e-9) ode_solver = ARK548L2SA2KennedyCarpenter( dg, vdg, NonLinearBackwardEulerSolver( nonlinearsolver; isadjustable = true, preconditioner_update_freq = 100, ), Q; dt = dt, t0 = 0, split_explicit_implicit = false, variant = NaiveVariant(), ) solver_config.solver = ode_solver ####################################### mygrid = solver_config.dg.grid aux = solver_config.dg.state_auxiliary ClimateMachine.invoke!(solver_config) ϑ_l_ind = varsindex(vars_state(m, Prognostic(), FT), :soil, :water, :ϑ_l) ϑ_l = Array(Q[:, ϑ_l_ind, :][:]) z_ind = varsindex(vars_state(m, Auxiliary(), FT), :z) z = Array(aux[:, z_ind, :][:]) # Compare with Bonan simulation data at 1 day. data = joinpath(haverkamp_dataset_path, "bonan_haverkamp_data.csv") ds_bonan = readdlm(data, ',') bonan_moisture = reverse(ds_bonan[:, 1]) bonan_z = reverse(ds_bonan[:, 2]) ./ 100.0 # Create an interpolation from the Bonan data bonan_moisture_continuous = Spline1D(bonan_z, bonan_moisture) bonan_at_clima_z = bonan_moisture_continuous.(z) MSE = mean((bonan_at_clima_z .- ϑ_l) .^ 2.0) @test MSE < 1e-5 end @testset "Richard's equation - Sand van Genuchten test" begin ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) myfloat = eltype(aux) state.soil.water.ϑ_l = myfloat(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = myfloat(land.soil.water.initialθ_i(aux)) end soil_heat_model = PrescribedTemperatureModel() wpf = WaterParamFunctions( FT; Ksat = 34 / (3600 * 100), θ_r = 0.075, S_s = 1e-3, ) soil_param_functions = SoilParamFunctions(FT; porosity = 0.287, water = wpf) surface_state = (aux, t) -> eltype(aux)(0.267) bottom_flux = (aux, t) -> aux.soil.water.K * eltype(aux)(-1) ϑ_l0 = (aux) -> eltype(aux)(0.1) bc = LandDomainBC( bottom_bc = LandComponentBC(soil_water = Neumann(bottom_flux)), surface_bc = LandComponentBC(soil_water = Dirichlet(surface_state)), ) soil_water_model = SoilWaterModel( FT; moisture_factor = MoistureDependent{FT}(), hydraulics = vanGenuchten(FT; n = 3.96, α = 2.7), initialϑ_l = ϑ_l0, ) m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) N_poly = 1 nelem_vert = 150 # Specify the domain boundaries zmax = FT(0) zmin = FT(-1.5) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(60 * 60 * 0.8) dt = FT(0.5) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) mygrid = solver_config.dg.grid Q = solver_config.Q aux = solver_config.dg.state_auxiliary ClimateMachine.invoke!(solver_config) ϑ_l_ind = varsindex(vars_state(m, Prognostic(), FT), :soil, :water, :ϑ_l) ϑ_l = Array(Q[:, ϑ_l_ind, :][:]) z_ind = varsindex(vars_state(m, Auxiliary(), FT), :z) z = Array(aux[:, z_ind, :][:]) # Compare with Bonan simulation data at 0.8 h data = joinpath(bonan_sand_dataset_path, "sand_bonan_sp801.csv") ds_bonan = readdlm(data, ',') bonan_moisture = reverse(ds_bonan[:, 1]) bonan_z = reverse(ds_bonan[:, 2]) ./ 100.0 # Create an interpolation from the Bonan data bonan_moisture_continuous = Spline1D(bonan_z, bonan_moisture) bonan_at_clima_z = bonan_moisture_continuous.(z) MSE = mean((bonan_at_clima_z .- ϑ_l) .^ 2.0) @test MSE < 1e-5 end ================================================ FILE: test/Land/Model/heat_analytic_unit_test.jl ================================================ # Test heat equation agrees with analytic solution to problem 55 on page 28 in https://ocw.mit.edu/courses/mathematics/18-303-linear-partial-differential-equations-fall-2006/lecture-notes/heateqni.pdf using MPI using OrderedCollections using StaticArrays using Statistics using Dierckx using Test using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Land.SoilHeatParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws function init_soil!(land, state, aux, localgeo, time) FT = eltype(state) ϑ_l, θ_i = get_water_content(land.soil.water, aux, state, time) θ_l = volumetric_liquid_fraction(ϑ_l, land.soil.param_functions.porosity) param_set = parameter_set(land) ρc_s = volumetric_heat_capacity( θ_l, θ_i, land.soil.param_functions.ρc_ds, param_set, ) state.soil.heat.ρe_int = FT(volumetric_internal_energy( θ_i, ρc_s, land.soil.heat.initialT(aux), param_set, )) end const FT = Float64 @testset "Heat analytic unit test" begin ClimateMachine.init() soil_param_functions = SoilParamFunctions( FT; porosity = 0.495, ν_ss_gravel = 0.1, ν_ss_om = 0.1, ν_ss_quartz = 0.1, ρc_ds = 0.43314518988433487, κ_solid = 8.0, ρp = 2700.0, κ_sat_unfrozen = 0.57, κ_sat_frozen = 2.29, ) heat_surface_state = (aux, t) -> eltype(aux)(0.0) tau = FT(1) # period (sec) A = FT(5) # amplitude (K) ω = FT(2 * pi / tau) heat_bottom_state = (aux, t) -> A * cos(ω * t) T_init = (aux) -> eltype(aux)(0.0) soil_water_model = PrescribedWaterModel( (aux, t) -> eltype(aux)(0.0), (aux, t) -> eltype(aux)(0.0), ) bc = LandDomainBC( bottom_bc = LandComponentBC(soil_heat = Dirichlet(heat_bottom_state)), surface_bc = LandComponentBC(soil_heat = Dirichlet(heat_surface_state)), ) soil_heat_model = SoilHeatModel(FT; initialT = T_init) m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil!, ) N_poly = 5 nelem_vert = 10 # Specify the domain boundaries zmax = FT(1) zmin = FT(0) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(2) dt = FT(1e-4) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) mygrid = solver_config.dg.grid aux = solver_config.dg.state_auxiliary ClimateMachine.invoke!(solver_config) t = ODESolvers.gettime(solver_config.solver) z_ind = varsindex(vars_state(m, Auxiliary(), FT), :z) z = Array(aux[:, z_ind, :][:]) T_ind = varsindex(vars_state(m, Auxiliary(), FT), :soil, :heat, :T) T = Array(aux[:, T_ind, :][:]) num = exp.(sqrt(ω / 2) * (1 + im) * (1 .- z)) .- exp.(-sqrt(ω / 2) * (1 + im) * (1 .- z)) denom = exp(sqrt(ω / 2) * (1 + im)) - exp.(-sqrt(ω / 2) * (1 + im)) analytic_soln = real(num .* A * exp(im * ω * timeend) / denom) MSE = mean((analytic_soln .- T) .^ 2.0) @test eltype(aux) == FT @test MSE < 1e-5 end ================================================ FILE: test/Land/Model/prescribed_twice.jl ================================================ # Test that the land model still runs, even with the lowest/simplest # version of soil (prescribed heat and prescribed water - no state # variables) using MPI using OrderedCollections using StaticArrays using Test using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state @testset "Prescribed Models" begin ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) end soil_water_model = PrescribedWaterModel() soil_heat_model = PrescribedTemperatureModel() soil_param_functions = nothing m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; source = sources, init_state_prognostic = init_soil_water!, ) N_poly = 5 nelem_vert = 10 # Specify the domain boundaries zmax = FT(0) zmin = FT(-1) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(60) dt = FT(1) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) mygrid = solver_config.dg.grid Q = solver_config.Q aux = solver_config.dg.state_auxiliary ClimateMachine.invoke!(solver_config;) t = ODESolvers.gettime(solver_config.solver) state_vars = SingleStackUtils.get_vars_from_nodal_stack( mygrid, Q, vars_state(m, Prognostic(), FT), ) aux_vars = SingleStackUtils.get_vars_from_nodal_stack( mygrid, aux, vars_state(m, Auxiliary(), FT), ) #Make sure it runs, and that there are no state variables, and only "x,y,z" as aux. @test t == timeend @test size(Base.collect(keys(aux_vars)))[1] == 3 @test size(Base.collect(keys(state_vars)))[1] == 0 end ================================================ FILE: test/Land/Model/runtests.jl ================================================ using Test, Pkg @testset "Land" begin include("test_heat_parameterizations.jl") include("test_water_parameterizations.jl") include("prescribed_twice.jl") include("freeze_thaw_alone.jl") include("test_overland_flow_analytic.jl") include("test_physical_bc.jl") include("test_radiative_energy_flux_functions.jl") end ================================================ FILE: test/Land/Model/soil_heterogeneity.jl ================================================ using MPI using OrderedCollections using StaticArrays using Statistics using Dierckx using Test using Pkg.Artifacts using DelimitedFiles using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state @testset "hydrostatic test 1" begin ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) myfloat = eltype(aux) state.soil.water.ϑ_l = myfloat(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = myfloat(land.soil.water.initialθ_i(aux)) end soil_heat_model = PrescribedTemperatureModel() Ksat = (aux) -> eltype(aux)(0.0443 / (3600 * 100)) S_s = (aux) -> eltype(aux)((1e-3) * exp(-0.2 * aux.z)) vgn = FT(2) wpf = WaterParamFunctions(FT; Ksat = Ksat, S_s = S_s) soil_param_functions = SoilParamFunctions(FT; porosity = 0.495, water = wpf) bottom_flux = (aux, t) -> eltype(aux)(0.0) surface_flux = bottom_flux ϑ_l0 = (aux) -> eltype(aux)(0.494) bc = LandDomainBC( bottom_bc = LandComponentBC(soil_water = Neumann(bottom_flux)), surface_bc = LandComponentBC(soil_water = Neumann(surface_flux)), ) soil_water_model = SoilWaterModel( FT; moisture_factor = MoistureDependent{FT}(), hydraulics = vanGenuchten(FT; n = vgn), initialϑ_l = ϑ_l0, ) m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) N_poly = 2 nelem_vert = 20 # Specify the domain boundaries zmax = FT(0) zmin = FT(-10) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(60 * 60 * 24 * 200) dt = FT(500) n_outputs = 3 every_x_simulation_time = ceil(Int, timeend / n_outputs) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) aux = solver_config.dg.state_auxiliary state_types = (Prognostic(), Auxiliary()) dons_arr = Dict[dict_of_nodal_states(solver_config, state_types; interp = true)] time_data = FT[0] callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do dons = dict_of_nodal_states(solver_config, state_types; interp = true) push!(dons_arr, dons) push!(time_data, gettime(solver_config.solver)) nothing end ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)) z = dons_arr[1]["z"] interface_z = -1.0395 function hydrostatic_profile(z, zm, porosity, n, α) myf = eltype(z) m = FT(1 - 1 / n) S = FT((FT(1) + (α * (z - zm))^n)^(-m)) return FT(S * porosity) end function soln(z, interface, porosity, n, α, δ, S_s) if z < interface return porosity + S_s * (interface - z) * exp(-δ * z) else return hydrostatic_profile(z, interface, porosity, n, α) end end MSE = mean( ( soln.(z, interface_z, 0.495, vgn, 2.6, 0.2, 1e-3) .- dons_arr[4]["soil.water.ϑ_l"] ) .^ 2.0, ) @test MSE < 1e-4 end @testset "hydrostatic test 2" begin ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) myfloat = eltype(aux) state.soil.water.ϑ_l = myfloat(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = myfloat(land.soil.water.initialθ_i(aux)) end soil_heat_model = PrescribedTemperatureModel() Ksat = (4.42 / 3600 / 100) S_s = 1e-3 wpf = WaterParamFunctions(FT; Ksat = Ksat, S_s = S_s) soil_param_functions = SoilParamFunctions(FT, porosity = 0.6, water = wpf) bottom_flux = (aux, t) -> eltype(aux)(0.0) surface_flux = bottom_flux bc = LandDomainBC( bottom_bc = LandComponentBC(soil_water = Neumann(bottom_flux)), surface_bc = LandComponentBC(soil_water = Neumann(surface_flux)), ) sigmoid(x, offset, width) = typeof(x)(exp((x - offset) / width) / (1 + exp((x - offset) / width))) function soln(z::f, interface::f, porosity::f) where {f} function hydrostatic_profile( z::f, interface::f, porosity::f, n::f, α::f, m::f, ) ψ_interface = f(-1) ψ = -(z - interface) + ψ_interface S = (f(1) + (-α * ψ)^n)^(-m) return S * porosity end if z < interface return hydrostatic_profile( z, interface, porosity, f(1.31), f(1.9), f(1) - f(1) / f(1.31), ) else return hydrostatic_profile( z, interface, porosity, f(1.89), f(7.5), f(1) - f(1) / f(1.89), ) end end ϑ_l0 = (aux) -> soln(aux.z, -1.0, 0.6) vgα(aux) = aux.z < -1.0 ? 1.9 : 7.5 vgn(aux) = aux.z < -1.0 ? 1.31 : 1.89 soil_water_model = SoilWaterModel( FT; moisture_factor = MoistureDependent{FT}(), hydraulics = vanGenuchten(FT; n = vgn, α = vgα), initialϑ_l = ϑ_l0, ) m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) N_poly = 1 nelem_vert = 80 # Specify the domain boundaries zmax = FT(0) zmin = FT(-2) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(60 * 60 * 12) dt = FT(5) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) ClimateMachine.invoke!(solver_config) state_types = (Prognostic(), Auxiliary()) dons = dict_of_nodal_states(solver_config, state_types; interp = true) z = dons["z"] RMSE = mean( (soln.(z, Ref(-1.0), Ref(0.6)) .- dons["soil.water.ϑ_l"]) .^ 2.0, )^0.5 @test RMSE < 2.0 * eps(FT) end ================================================ FILE: test/Land/Model/test_bc.jl ================================================ # Test that the way we specify boundary conditions works as expected using MPI using OrderedCollections using StaticArrays using Statistics using Test using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state @testset "Boundary condition functions" begin ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) myfloat = eltype(state) state.soil.water.ϑ_l = myfloat(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = myfloat(land.soil.water.initialθ_i(aux)) end wpf = WaterParamFunctions(FT; Ksat = 1e-7, S_s = 1e-3) soil_param_functions = SoilParamFunctions(FT; porosity = 0.75, water = wpf) bottom_flux_amplitude = FT(-3.0) f = FT(pi * 2.0 / 300.0) bottom_flux = (aux, t) -> bottom_flux_amplitude * sin(f * t) * aux.soil.water.K surface_state = (aux, t) -> eltype(aux)(0.2) ϑ_l0 = (aux) -> eltype(aux)(0.2) bc = LandDomainBC( bottom_bc = LandComponentBC(soil_water = Neumann(bottom_flux)), surface_bc = LandComponentBC(soil_water = Dirichlet(surface_state)), ) soil_water_model = SoilWaterModel(FT; initialϑ_l = ϑ_l0) soil_heat_model = PrescribedTemperatureModel() m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) N_poly = 5 nelem_vert = 50 # Specify the domain boundaries zmax = FT(0) zmin = FT(-1) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(300) dt = FT(0.05) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) mygrid = solver_config.dg.grid Q = solver_config.Q aux = solver_config.dg.state_auxiliary grads = solver_config.dg.state_gradient_flux K∇h_vert_ind = varsindex(vars_state(m, GradientFlux(), FT), :soil, :water)[3] K_ind = varsindex(vars_state(m, Auxiliary(), FT), :soil, :water, :K) n_outputs = 30 every_x_simulation_time = ceil(Int, timeend / n_outputs) dons_arr = Dict([k => Dict() for k in 1:n_outputs]...) iostep = [1] callback = GenericCallbacks.EveryXSimulationTime( every_x_simulation_time, ) do (init = false) t = ODESolvers.gettime(solver_config.solver) K = aux[:, K_ind, :] K∇h_vert = grads[:, K∇h_vert_ind, :] all_vars = Dict{String, Array}( "t" => [t], "K" => K, "K∇h_vert" => K∇h_vert, ) dons_arr[iostep[1]] = all_vars iostep[1] += 1 nothing end ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)) t = ODESolvers.gettime(solver_config.solver) K = aux[:, K_ind, :] K∇h_vert = grads[:, K∇h_vert_ind, :] all_vars = Dict{String, Array}("t" => [t], "K" => K, "K∇h_vert" => K∇h_vert) dons_arr[n_outputs] = all_vars computed_bottom_∇h = [dons_arr[k]["K∇h_vert"][1] for k in 1:n_outputs] ./ [dons_arr[k]["K"][1] for k in 1:n_outputs] t = [dons_arr[k]["t"][1] for k in 1:n_outputs] # we need a -1 out in front here because the flux BC is on -K∇h prescribed_bottom_∇h = t -> FT(-1) * FT(-3.0 * sin(pi * 2.0 * t / 300.0)) MSE = mean((prescribed_bottom_∇h.(t) .- computed_bottom_∇h) .^ 2.0) @test MSE < 1e-7 end ================================================ FILE: test/Land/Model/test_bc_3d.jl ================================================ # Test that the way we specify boundary conditions works as expected using MPI using OrderedCollections using StaticArrays using Statistics using Test using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state @testset "Boundary condition functions" begin ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) myfloat = eltype(state) state.soil.water.ϑ_l = myfloat(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = myfloat(land.soil.water.initialθ_i(aux)) end wpf = WaterParamFunctions(FT; Ksat = 1e-7, S_s = 1e-3) soil_param_functions = SoilParamFunctions(FT; porosity = 0.75, water = wpf) bottom_flux_amplitude = FT(-3.0) f = FT(pi * 2.0 / 300.0) # nota bene: the flux is -K∇h bottom_flux = (aux, t) -> bottom_flux_amplitude * sin(f * t) * aux.soil.water.K surface_state = (aux, t) -> eltype(aux)(0.2) lateral_state = (aux, t) -> eltype(aux)(0.0) ϑ_l0 = (aux) -> eltype(aux)(0.2) bc = LandDomainBC( bottom_bc = LandComponentBC(soil_water = Neumann(bottom_flux)), surface_bc = LandComponentBC(soil_water = Dirichlet(surface_state)), minx_bc = LandComponentBC(soil_water = Neumann(lateral_state)), maxx_bc = LandComponentBC(soil_water = Neumann(lateral_state)), miny_bc = LandComponentBC(soil_water = Neumann(lateral_state)), maxy_bc = LandComponentBC(soil_water = Neumann(lateral_state)), ) soil_water_model = SoilWaterModel(FT; initialϑ_l = ϑ_l0) soil_heat_model = PrescribedTemperatureModel() m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) N_poly = 5 xres = FT(0.2) yres = FT(0.2) zres = FT(0.01) # Specify the domain boundaries. zmax = FT(0) zmin = FT(-1) xmax = FT(1) ymax = FT(1) driver_config = ClimateMachine.MultiColumnLandModel( "LandModel", (N_poly, N_poly), (xres, yres, zres), xmax, ymax, zmax, param_set, m; zmin = zmin, ) t0 = FT(0) timeend = FT(300) dt = FT(0.5) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) n_outputs = 30 every_x_simulation_time = ceil(Int, timeend / n_outputs) state_types = (Auxiliary(), GradientFlux()) dons_arr = Dict[dict_of_nodal_states(solver_config, state_types; interp = false)] time_data = FT[0] # store time data # We specify a function which evaluates `every_x_simulation_time` and returns # the state vector, appending the variables we are interested in into # `all_data`. callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do dons = dict_of_nodal_states(solver_config, state_types; interp = false) push!(dons_arr, dons) push!(time_data, gettime(solver_config.solver)) nothing end # # Run the integration ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)) computed_bottom_∇h = [dons_arr[k]["soil.water.K∇h[3]"][1] for k in 2:n_outputs] ./ [dons_arr[k]["soil.water.K"][1] for k in 2:n_outputs] t = time_data[2:n_outputs] # we need a -1 out in front here because the flux BC is on -K∇h prescribed_bottom_∇h = t -> FT(-1) * FT(-3.0 * sin(pi * 2.0 * t / 300.0)) MSE = mean((prescribed_bottom_∇h.(t) .- computed_bottom_∇h) .^ 2.0) computed_y1_∇h = maximum(abs.( [dons_arr[k]["soil.water.K∇h[2]"][1] for k in 2:n_outputs] ./ [dons_arr[k]["soil.water.K"][1] for k in 2:n_outputs], )) computed_x1_∇h = maximum(abs.( [dons_arr[k]["soil.water.K∇h[1]"][1] for k in 2:n_outputs] ./ [dons_arr[k]["soil.water.K"][1] for k in 2:n_outputs], )) @test MSE < 1e-4 @test computed_x1_∇h < 1e-10 @test computed_y1_∇h < 1e-10 end ================================================ FILE: test/Land/Model/test_heat_parameterizations.jl ================================================ using MPI using OrderedCollections using StaticArrays using Test using CLIMAParameters using CLIMAParameters.Planet: ρ_cloud_liq, ρ_cloud_ice, cp_l, cp_i, T_0, LH_f0 using CLIMAParameters.Atmos.Microphysics: K_therm struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land.SoilHeatParameterizations using ClimateMachine.Land @testset "Land heat parameterizations" begin FT = Float64 # Density of liquid water (kg/m``^3``) _ρ_l = FT(ρ_cloud_liq(param_set)) # Density of ice water (kg/m``^3``) _ρ_i = FT(ρ_cloud_ice(param_set)) # Volum. isobaric heat capacity liquid water (J/m3/K) _ρcp_l = FT(cp_l(param_set) * _ρ_l) # Volumetric isobaric heat capacity ice (J/m3/K) _ρcp_i = FT(cp_i(param_set) * _ρ_i) # Reference temperature (K) _T_ref = FT(T_0(param_set)) # Latent heat of fusion at ``T_0`` (J/kg) _LH_f0 = FT(LH_f0(param_set)) # Thermal conductivity of dry air κ_air = FT(K_therm(param_set)) @test temperature_from_ρe_int(5.4e7, 0.05, 2.1415e6, param_set) == FT(_T_ref + (5.4e7 + 0.05 * _ρ_i * _LH_f0) / 2.1415e6) @test volumetric_heat_capacity(0.25, 0.05, 1e6, param_set) == FT(1e6 + 0.25 * _ρcp_l + 0.05 * _ρcp_i) @test volumetric_internal_energy(0.05, 2.1415e6, 300.0, param_set) == FT(2.1415e6 * (300.0 - _T_ref) - 0.05 * _ρ_i * _LH_f0) @test saturated_thermal_conductivity(0.25, 0.05, 0.57, 2.29) == FT(0.57^(0.25 / (0.05 + 0.25)) * 2.29^(0.05 / (0.05 + 0.25))) @test saturated_thermal_conductivity(0.0, 0.0, 0.57, 2.29) == FT(0.0) @test relative_saturation(0.25, 0.05, 0.4) == FT((0.25 + 0.05) / 0.4) # Test branching in kersten_number soil_param_functions = SoilParamFunctions( FT; porosity = 0.2, ν_ss_gravel = 0.1, ν_ss_om = 0.1, ν_ss_quartz = 0.1, κ_solid = 0.1, ρp = 1.0, ) # ice fraction = 0 @test kersten_number(0.0, 0.75, soil_param_functions) == FT( 0.75^((FT(1) + 0.1 - 0.24 * 0.1 - 0.1) / FT(2)) * ( (FT(1) + exp(-18.1 * 0.75))^(-FT(3)) - ((FT(1) - 0.75) / FT(2))^FT(3) )^(FT(1) - 0.1), ) # ice fraction ~= 0 @test kersten_number(0.05, 0.75, soil_param_functions) == FT(0.75^(FT(1) + 0.1)) @test thermal_conductivity(1.5, 0.7287, 0.7187) == FT(0.7287 * 0.7187 + (FT(1) - 0.7287) * 1.5) @test volumetric_internal_energy_liq(300.0, param_set) == FT(_ρcp_l * (300.0 - _T_ref)) @test k_solid(FT(0.5), FT(0.25), FT(2.0), FT(3.0), FT(2.0)) == FT(2)^FT(0.5) * FT(2)^FT(0.25) * FT(3.0)^FT(0.25) @test ksat_frozen(FT(0.5), FT(0.1), FT(0.4)) == FT(0.5)^FT(0.9) * FT(0.4)^FT(0.1) @test ksat_unfrozen(FT(0.5), FT(0.1), FT(0.4)) == FT(0.5)^FT(0.9) * FT(0.4)^FT(0.1) @test k_dry(param_set, soil_param_functions) == ((FT(0.053) * FT(0.1) - κ_air) * FT(0.8) + κ_air * FT(1.0)) / (FT(1.0) - (FT(1.0) - FT(0.053)) * FT(0.8)) end ================================================ FILE: test/Land/Model/test_overland_flow_analytic.jl ================================================ using MPI using OrderedCollections using StaticArrays using Test using Statistics using DelimitedFiles using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SurfaceFlow using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state using ArtifactWrappers # Test that the land model with no surface flow works correctly @testset "NoSurfaceFlow Model" begin function init_land_model!(land, state, aux, localgeo, time) end ClimateMachine.init() FT = Float64 soil_water_model = PrescribedWaterModel() soil_heat_model = PrescribedTemperatureModel() soil_param_functions = nothing m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) m_surface = NoSurfaceFlowModel() sources = () m = LandModel( param_set, m_soil; surface = m_surface, source = sources, init_state_prognostic = init_land_model!, ) N_poly = 5 nelem_vert = 10 # Specify the domain boundaries zmax = FT(0) zmin = FT(-1) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(10) dt = FT(1) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) mygrid = solver_config.dg.grid Q = solver_config.Q aux = solver_config.dg.state_auxiliary ClimateMachine.invoke!(solver_config;) t = ODESolvers.gettime(solver_config.solver) state_vars = SingleStackUtils.get_vars_from_nodal_stack( mygrid, Q, vars_state(m, Prognostic(), FT), ) aux_vars = SingleStackUtils.get_vars_from_nodal_stack( mygrid, aux, vars_state(m, Auxiliary(), FT), ) #Make sure it runs, and that there are no state variables, and only "x,y,z" as aux. @test t == timeend @test size(Base.collect(keys(aux_vars)))[1] == 3 @test size(Base.collect(keys(state_vars)))[1] == 0 end # Constant slope analytical test case defined as Model 1 / Eqn 6 # DOI: 10.1061/(ASCE)0733-9429(2007)133:2(217) @testset "Analytical Overland Model" begin function warp_constant_slope( xin, yin, zin; topo_max = 0.2, zmin = -0.1, xmax = 400, ) FT = eltype(xin) zmax = FT((FT(1.0) - xin / xmax) * topo_max) alpha = FT(1.0) - zmax / zmin zout = zmin + (zin - zmin) * alpha x, y, z = xin, yin, zout return x, y, z end ClimateMachine.init() FT = Float64 soil_water_model = PrescribedWaterModel() soil_heat_model = PrescribedTemperatureModel() soil_param_functions = nothing m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) m_surface = OverlandFlowModel( (x, y) -> eltype(x)(-0.0016), (x, y) -> eltype(x)(0.0); mannings = (x, y) -> eltype(x)(0.025), ) bc = LandDomainBC( minx_bc = LandComponentBC( surface = Dirichlet((aux, t) -> eltype(aux)(0)), ), ) function init_land_model!(land, state, aux, localgeo, time) state.surface.height = eltype(state)(0) end # units in m / s precip(x, y, t) = t < (30 * 60) ? 1.4e-5 : 0.0 sources = (Precip{FT}(precip),) m = LandModel( param_set, m_soil; surface = m_surface, boundary_conditions = bc, source = sources, init_state_prognostic = init_land_model!, ) N_poly = 1 xres = FT(2.286) yres = FT(0.25) zres = FT(0.1) # Specify the domain boundaries. zmax = FT(0) zmin = FT(-0.1) xmax = FT(182.88) ymax = FT(1.0) topo_max = FT(0.0016 * xmax) driver_config = ClimateMachine.MultiColumnLandModel( "LandModel", (N_poly, N_poly), (xres, yres, zres), xmax, ymax, zmax, param_set, m; zmin = zmin, meshwarp = (x...) -> warp_constant_slope( x...; topo_max = topo_max, zmin = zmin, xmax = xmax, ), ) t0 = FT(0) timeend = FT(60 * 60) dt = FT(10) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) mygrid = solver_config.dg.grid Q = solver_config.Q h_index = varsindex(vars_state(m, Prognostic(), FT), :surface, :height) n_outputs = 60 every_x_simulation_time = ceil(Int, timeend / n_outputs) dons = Dict([k => Dict() for k in 1:n_outputs]...) iostep = [1] callback = GenericCallbacks.EveryXSimulationTime( every_x_simulation_time, ) do (init = false) t = ODESolvers.gettime(solver_config.solver) h = Q[:, h_index, :] all_vars = Dict{String, Array}("t" => [t], "h" => h) dons[iostep[1]] = all_vars iostep[1] += 1 return end ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)) # Compare flowrate analytical derivation aux = solver_config.dg.state_auxiliary # get all nodal points at the max X bound of the domain mask = Array(aux[:, 1, :] .== 182.88) n_outputs = length(dons) # get prognostic variable height from nodal state (m^2) height = [mean(Array(dons[k]["h"])[mask[:]]) for k in 1:n_outputs] # get similation timesteps (s) time_data = [dons[l]["t"][1] for l in 1:n_outputs] alpha = sqrt(0.0016) / 0.025 i = 1.4e-5 L = xmax m = 5 / 3 t_c = (L * i^(1 - m) / alpha)^(1 / m) t_r = 30 * 60 q = height .^ (m) .* alpha function g(m, y, i, t_r, L, alpha, t) output = L / alpha - y^(m) / i - y^(m - 1) * m * (t - t_r) return output end function dg(m, y, i, t_r, L, alpha, t) output = -y^(m - 1) * m / i - y^(m - 2) * m * (m - 1) * (t - t_r) return output end function analytic(t, alpha, t_c, t_r, i, L, m) if t < t_c return alpha * (i * t)^(m) end if t <= t_r && t > t_c return alpha * (i * t_c)^(m) end if t > t_r yL = (i * (t - t_r)) delta = 1 error = g(m, yL, i, t_r, L, alpha, t) while abs(error) > 1e-4 delta = -g(m, yL, i, t_r, L, alpha, t) / dg(m, yL, i, t_r, L, alpha, t) yL = yL + delta error = g(m, yL, i, t_r, L, alpha, t) end return alpha * yL^m end end q = Array(q) # copy to host if GPU array @test sqrt_rmse_over_max_q = sqrt(mean( (analytic.(time_data, alpha, t_c, t_r, i, L, m) .- q) .^ 2.0, )) / maximum(q) < 3e-3 end ================================================ FILE: test/Land/Model/test_overland_flow_vcatchment.jl ================================================ using Test using Statistics using DelimitedFiles using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SurfaceFlow using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state using ArtifactWrappers @testset "V Catchment Maxwell River Model" begin tv_dataset = ArtifactWrapper( @__DIR__, isempty(get(ENV, "CI", "")), "tiltedv", ArtifactFile[ArtifactFile( url = "https://caltech.box.com/shared/static/qi2gftjw2vu2j66b0tyfef427xxj3ug7.csv", filename = "TiltedVOutput.csv", ),], ) tv_dataset_path = get_data_folder(tv_dataset) ClimateMachine.init() FT = Float64 soil_water_model = PrescribedWaterModel() soil_heat_model = PrescribedTemperatureModel() soil_param_functions = nothing m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) function x_slope(x, y) MFT = eltype(x) if x < MFT(800) MFT(-0.05) elseif x <= MFT(820) MFT(0) else MFT(0.05) end end function y_slope(x, y) MFT = eltype(x) MFT(-0.02) end function channel_mannings(x, y) MFT = eltype(x) return x >= MFT(800) && x <= MFT(820) ? MFT(2.5 * 60 * 10^-3) : MFT(2.5 * 60 * 10^-4) end m_surface = OverlandFlowModel(x_slope, y_slope; mannings = channel_mannings) bc = LandDomainBC( miny_bc = LandComponentBC( surface = Dirichlet((aux, t) -> eltype(aux)(0)), ), minx_bc = LandComponentBC( surface = Dirichlet((aux, t) -> eltype(aux)(0)), ), maxx_bc = LandComponentBC( surface = Dirichlet((aux, t) -> eltype(aux)(0)), ), ) function init_land_model!(land, state, aux, localgeo, time) state.surface.height = eltype(state)(0) end # units in m / s precip(x, y, t) = t < eltype(t)(90 * 60) ? eltype(t)(3e-6) : eltype(t)(0.0) sources = (Precip{FT}(precip),) m = LandModel( param_set, m_soil; surface = m_surface, boundary_conditions = bc, source = sources, init_state_prognostic = init_land_model!, ) N_poly_hori = 1 N_poly_vert = 1 xres = FT(20) yres = FT(20) zres = FT(1) # Specify the domain boundaries. zmax = FT(1) zmin = FT(0) xmax = FT(1620) ymax = FT(1000) driver_config = ClimateMachine.MultiColumnLandModel( "LandModel", (N_poly_hori, N_poly_vert), (xres, yres, zres), xmax, ymax, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = RusanovNumericalFlux(), ) t0 = FT(0) timeend = FT(180 * 60) dt = FT(0.5) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) mygrid = solver_config.dg.grid Q = solver_config.Q h_index = varsindex(vars_state(m, Prognostic(), FT), :surface, :height) n_outputs = 60 every_x_simulation_time = ceil(Int, timeend / n_outputs) dons = Dict([k => Dict() for k in 1:n_outputs]...) iostep = [1] callback = GenericCallbacks.EveryXSimulationTime( every_x_simulation_time, ) do (init = false) t = ODESolvers.gettime(solver_config.solver) h = Q[:, h_index, :] all_vars = Dict{String, Array}("t" => [t], "h" => h) dons[iostep[1]] = all_vars iostep[1] += 1 return end ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)) aux = solver_config.dg.state_auxiliary x = Array(aux[:, 1, :]) y = Array(aux[:, 2, :]) z = Array(aux[:, 3, :]) # Get points at outlet (y = ymax) mask2 = (Float64.(y .== 1000.0)) .== 1 n_outputs = length(dons) function compute_Q(h, xv) height = max.(h, 0.0) v = calculate_velocity(m_surface, xv, 1000.0, height)# put in y = 1000.0 speed = sqrt(v[1]^2.0 + v[2]^2.0 + v[3]^2.0) Q_outlet = speed .* height .* 60.0 # multiply by 60 so it is per minute, not per second return Q_outlet end # We divide by 4 because we have 4 nodal points with the same value at each x (z = 0, 1) # Multiply by xres because the solution at each point roughly represents that the solution for that range in x Q = [ sum(compute_Q.(Array(dons[k]["h"])[:][mask2[:]], x[mask2[:]])) for k in 1:n_outputs ] ./ 4.0 .* xres data = joinpath(tv_dataset_path, "TiltedVOutput.csv") ds_tv = readdlm(data, ',') error = sqrt(mean(Q .- ds_tv)) @test error < 5e-7 end ================================================ FILE: test/Land/Model/test_physical_bc.jl ================================================ # Test that the way we specify boundary conditions works as expected using MPI using OrderedCollections using StaticArrays using Statistics using Test using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Land.Runoff using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state import ClimateMachine.DGMethods.FVReconstructions: FVLinear @testset "NoRunoff" begin ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) myfloat = eltype(state) state.soil.water.ϑ_l = myfloat(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = myfloat(land.soil.water.initialθ_i(aux)) end wpf = WaterParamFunctions(FT; Ksat = 1e-7, S_s = 1e-3) soil_param_functions = SoilParamFunctions(FT; porosity = 0.75, water = wpf) surface_precip_amplitude = FT(3e-8) f = FT(pi * 2.0 / 300.0) precip = (t) -> surface_precip_amplitude * sin(f * t) ϑ_l0 = (aux) -> eltype(aux)(0.2) bc = LandDomainBC( bottom_bc = LandComponentBC( soil_water = Neumann((aux, t) -> eltype(aux)(0.0)), ), surface_bc = LandComponentBC( soil_water = SurfaceDrivenWaterBoundaryConditions( FT; precip_model = DrivenConstantPrecip{FT}(precip), runoff_model = NoRunoff(), ), ), ) soil_water_model = SoilWaterModel(FT; initialϑ_l = ϑ_l0) soil_heat_model = PrescribedTemperatureModel() m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) N_poly = 5 nelem_vert = 50 # Specify the domain boundaries zmax = FT(0) zmin = FT(-1) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(150) dt = FT(0.05) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) n_outputs = 60 mygrid = solver_config.dg.grid every_x_simulation_time = ceil(Int, timeend / n_outputs) state_types = (Prognostic(), Auxiliary(), GradientFlux()) dons_arr = Dict[dict_of_nodal_states(solver_config, state_types; interp = true)] time_data = FT[0] # store time data # We specify a function which evaluates `every_x_simulation_time` and returns # the state vector, appending the variables we are interested in into # `dons_arr`. callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do dons = dict_of_nodal_states(solver_config, state_types; interp = true) push!(dons_arr, dons) push!(time_data, gettime(solver_config.solver)) nothing end # # Run the integration ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)) dons = dict_of_nodal_states(solver_config, state_types; interp = true) push!(dons_arr, dons) push!(time_data, gettime(solver_config.solver)) # Get z-coordinate z = get_z(solver_config.dg.grid; rm_dupes = true) N = length(dons_arr) # Note - we take the indices 2:N here to avoid the t = 0 spot. Gradients # are not calculated before the invoke! command, so we cannot compare at t=0. computed_surface_flux = [dons_arr[k]["soil.water.K∇h[3]"][end] for k in 2:N] t = time_data[2:N] prescribed_surface_flux = t -> FT(-1) * FT(3e-8 * sin(pi * 2.0 * t / 300.0)) MSE = mean((prescribed_surface_flux.(t) .- computed_surface_flux) .^ 2.0) @test MSE < 5e-7 end @testset "Explicit Dirichlet BC Comparison" begin # This tests the "if" branch of compute_surface_grad_bc ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) myfloat = eltype(state) state.soil.water.ϑ_l = myfloat(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = myfloat(land.soil.water.initialθ_i(aux)) end Ksat = FT(0.0443 / (3600 * 100)) S_s = FT(1e-4) wpf = WaterParamFunctions(FT; Ksat = Ksat, S_s = S_s) soil_param_functions = SoilParamFunctions(FT; porosity = 0.495, water = wpf) surface_precip_amplitude = FT(-5e-4) precip = (t) -> surface_precip_amplitude hydraulics = vanGenuchten(FT; n = 2.0) function hydrostatic_profile(z, zm, porosity, n, α) myf = eltype(z) m = FT(1 - 1 / n) S = FT((FT(1) + (α * (z - zm))^n)^(-m)) return FT(S * porosity) end N_poly = 2 nelem_vert = 50 # Specify the domain boundaries zmax = FT(0) zmin = FT(-0.35) Δz = abs(FT((zmin - zmax) / nelem_vert / 2)) ϑ_l0 = (aux) -> eltype(aux)(hydrostatic_profile( aux.z, -0.35, 0.495, hydraulics.n, hydraulics.α, )) soil_water_model = SoilWaterModel( FT; moisture_factor = MoistureDependent{FT}(), hydraulics = hydraulics, initialϑ_l = ϑ_l0, ) soil_heat_model = PrescribedTemperatureModel() m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () bc = LandDomainBC( bottom_bc = LandComponentBC( soil_water = Neumann((aux, t) -> eltype(aux)(0.0)), ), surface_bc = LandComponentBC( soil_water = SurfaceDrivenWaterBoundaryConditions( FT; precip_model = DrivenConstantPrecip{FT}(precip), runoff_model = CoarseGridRunoff{FT}(Δz), ), ), ) m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), #fv_reconstruction = FVLinear(), ) t0 = FT(0) timeend = FT(60 * 60) dt = FT(0.1) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) state_types = (Prognostic(), Auxiliary(), GradientFlux()) ClimateMachine.invoke!(solver_config;)# user_callbacks = (callback,)) srf_dons = dict_of_nodal_states(solver_config, state_types; interp = true) ###### Repeat with explicit dirichlet BC soil_water_model2 = SoilWaterModel( FT; moisture_factor = MoistureDependent{FT}(), hydraulics = hydraulics, initialϑ_l = ϑ_l0, ) soil_heat_model2 = PrescribedTemperatureModel() m_soil2 = SoilModel(soil_param_functions, soil_water_model2, soil_heat_model2) bc2 = LandDomainBC( bottom_bc = LandComponentBC( soil_water = Neumann((aux, t) -> eltype(aux)(0.0)), ), surface_bc = LandComponentBC( soil_water = Dirichlet((aux, t) -> eltype(aux)(0.495)), ), ) m2 = LandModel( param_set, m_soil2; boundary_conditions = bc2, source = sources, init_state_prognostic = init_soil_water!, ) driver_config2 = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m2; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), # fv_reconstruction = FVLinear(), ) solver_config2 = ClimateMachine.SolverConfiguration( t0, timeend, driver_config2, ode_dt = dt, ) ClimateMachine.invoke!(solver_config2;) dir_dons = dict_of_nodal_states(solver_config2, state_types; interp = true) # Here we take the solution near the surface, where changes between the two # would occur. @test mean( ( srf_dons["soil.water.ϑ_l"][(end - 15):end] .- dir_dons["soil.water.ϑ_l"][(end - 15):end] ) .^ 2.0, ) < 5e-5 end @testset "Explicit Flux BC Comparison" begin # This tests the else branch of compute_surface_grad_bc ClimateMachine.init() FT = Float64 function init_soil_water!(land, state, aux, localgeo, time) myfloat = eltype(state) state.soil.water.ϑ_l = myfloat(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = myfloat(land.soil.water.initialθ_i(aux)) end Ksat = FT(0.0443 / (3600 * 100)) S_s = FT(1e-4) wpf = WaterParamFunctions(FT; Ksat = Ksat, S_s = S_s) soil_param_functions = SoilParamFunctions(FT; porosity = 0.495, water = wpf) surface_precip_amplitude = FT(0) precip = (t) -> surface_precip_amplitude hydraulics = vanGenuchten(FT; n = 2.0) function hydrostatic_profile(z, zm, porosity, n, α) myf = eltype(z) m = FT(1 - 1 / n) S = FT((FT(1) + (α * (z - zm))^n)^(-m)) return FT(S * porosity) end ϑ_l0 = (aux) -> eltype(aux)(hydrostatic_profile( aux.z, -1, 0.495, hydraulics.n, hydraulics.α, )) soil_water_model = SoilWaterModel( FT; moisture_factor = MoistureDependent{FT}(), hydraulics = hydraulics, initialϑ_l = ϑ_l0, ) soil_heat_model = PrescribedTemperatureModel() m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () N_poly = (1, 0) nelem_vert = 200 # Specify the domain boundaries zmax = FT(0) zmin = FT(-1) Δz = FT(1 / 200 / 2) bc = LandDomainBC( bottom_bc = LandComponentBC( soil_water = Neumann((aux, t) -> eltype(aux)(0.0)), ), surface_bc = LandComponentBC( soil_water = SurfaceDrivenWaterBoundaryConditions( FT; precip_model = DrivenConstantPrecip{FT}(precip), runoff_model = CoarseGridRunoff{FT}(Δz), ), ), ) m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), fv_reconstruction = FVLinear(), ) t0 = FT(0) timeend = FT(100000) dt = FT(4) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) state_types = (Prognostic(), Auxiliary(), GradientFlux()) ClimateMachine.invoke!(solver_config;) srf_dons = dict_of_nodal_states(solver_config, state_types; interp = true) # Note - we only look at differences between the solutions at the surface, because # near the bottom they will agree. z = srf_dons["z"][150:end] error1 = sqrt(mean( ( srf_dons["soil.water.ϑ_l"][150:end] .- hydrostatic_profile.(z, -1, 0.495, hydraulics.n, hydraulics.α) ) .^ 2.0, )) error2 = sqrt(mean(srf_dons["soil.water.K∇h[3]"][150:end] .^ 2.0)) @test error1 < 1e-5 @test error2 < eps(FT) end ================================================ FILE: test/Land/Model/test_radiative_energy_flux_functions.jl ================================================ # Test functions used in runoff modeling. using MPI using OrderedCollections using StaticArrays using Statistics using Test using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.RadiativeEnergyFlux using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Land.SoilHeatParameterizations using ClimateMachine.Land.Runoff using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state @testset "Radiative energy flux testing" begin F = Float32 user_nswf = t -> F(2 * t) user_swf = t -> F(2 * t) user_α = t -> F(0.2 * t) prescribed_swf_and_a = PrescribedSwFluxAndAlbedo(F; α = user_α, swf = user_swf) prescribed_nswf = PrescribedNetSwFlux(F; nswf = user_nswf) flux_from_prescribed_swf_and_albedo = compute_net_radiative_energy_flux.( Ref(prescribed_swf_and_a), [1, 2, 3, 4], ) flux_from_prescribed_nswf = compute_net_radiative_energy_flux.(Ref(prescribed_nswf), [1, 2, 3, 4]) @test flux_from_prescribed_swf_and_albedo ≈ F.(([0.8, 0.6, 0.4, 0.2]) .* ([2, 4, 6, 8])) @test eltype(flux_from_prescribed_swf_and_albedo) == F @test flux_from_prescribed_nswf ≈ F.([2, 4, 6, 8]) @test eltype(flux_from_prescribed_nswf) == F end @testset "Heat analytic unit test" begin ClimateMachine.init() FT = Float64 function init_soil!(land, state, aux, localgeo, time) myfloat = eltype(state) ϑ_l, θ_i = get_water_content(land.soil.water, aux, state, time) θ_l = volumetric_liquid_fraction(ϑ_l, land.soil.param_functions.porosity) ρc_s = volumetric_heat_capacity( θ_l, θ_i, land.soil.param_functions.ρc_ds, land.param_set, ) state.soil.heat.ρe_int = myfloat(volumetric_internal_energy( θ_i, ρc_s, land.soil.heat.initialT(aux), land.param_set, )) end soil_param_functions = SoilParamFunctions( FT; porosity = 0.4, ν_ss_gravel = 0.2, ν_ss_om = 0.2, ν_ss_quartz = 0.2, ρc_ds = 1.0, # diffusivity = k/ρc. When no water, ρc = ρc_ds. κ_solid = 1.0, ρp = 1.0, κ_sat_unfrozen = 0.57, κ_sat_frozen = 2.29, ) # Prescribed net short wave flux, initial temperature A = FT(5) prescribed_nswf = t -> FT(-A * t) T_init = FT(300.0) T_init_func = aux -> T_init # Flux entering = flux leaving soil column bc = LandDomainBC( bottom_bc = LandComponentBC( soil_heat = Neumann((aux, t) -> FT(-A * t)), ), surface_bc = LandComponentBC( soil_heat = SurfaceDrivenHeatBoundaryConditions( FT; nswf_model = PrescribedNetSwFlux(FT; nswf = prescribed_nswf), ), ), ) soil_water_model = PrescribedWaterModel() soil_heat_model = SoilHeatModel(FT; initialT = T_init_func) m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model) sources = () m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil!, ) N_poly = 5 nelem_vert = 10 # Specify the domain boundaries zmax = FT(1) zmin = FT(0) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(1) dt = FT(1e-4) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) mygrid = solver_config.dg.grid aux = solver_config.dg.state_auxiliary ClimateMachine.invoke!(solver_config) t = ODESolvers.gettime(solver_config.solver) z_ind = varsindex(vars_state(m, Auxiliary(), FT), :z) z = Array(aux[:, z_ind, :][:]) T_ind = varsindex(vars_state(m, Auxiliary(), FT), :soil, :heat, :T) T = Array(aux[:, T_ind, :][:]) k = k_dry(param_set, soil_param_functions) diffusivity = k / soil_param_functions.ρc_ds A = A / k # Because the flux is applied on κ∇T. κ∇T = A*t in clima, whereas ∇T = A*t in the analytic soln. approx_sum_term = sum( ( -2 * A / (diffusivity * pi^4) * cos.(n * pi * z) * (pi * n * sin(pi * n) + cos(pi * n) - 1) * (1 - exp(-timeend * diffusivity * (pi * n)^2)) / n^4 ) for n in 1:100 ) approx_analytic_soln = A * timeend .* z .+ T_init .- A * timeend / 2 .+ approx_sum_term MSE = mean((approx_analytic_soln .- T) .^ 2.0) @test eltype(aux) == FT @test MSE < 1e-5 end ================================================ FILE: test/Land/Model/test_water_parameterizations.jl ================================================ # Test the branching of the SoilWaterParameterizations functions, # test the instantiation of them works as expected, and that they # return what we expect. using MPI using OrderedCollections using StaticArrays using Test using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations @testset "Land water parameterizations" begin FT = Float64 test_array = [0.5, 1.0] vg_model = vanGenuchten(FT;) mm = MoistureDependent{FT}() bc_model = BrooksCorey(FT;) hk_model = Haverkamp(FT;) #Use an array to confirm that extra arguments are unused. @test viscosity_factor.(Ref(ConstantViscosity{FT}()), test_array) ≈ [1.0, 1.0] @test impedance_factor.(Ref(NoImpedance{FT}()), test_array) ≈ [1.0, 1.0] @test moisture_factor.( Ref(MoistureIndependent{FT}()), Ref(vg_model), test_array, ) ≈ [1.0, 1.0] viscosity_model = TemperatureDependentViscosity{FT}(; T_ref = FT(1.0)) @test viscosity_factor(viscosity_model, FT(1.0)) == 1 impedance_model = IceImpedance{FT}(; Ω = 2.0) @test impedance_factor(impedance_model, 0.5) == FT(0.1) imp_f = impedance_factor(impedance_model, 0.5) vis_f = viscosity_factor(viscosity_model, 1.0) m_f = moisture_factor(MoistureDependent{FT}(), vg_model, 1.0) Ksat = 1.0 @test hydraulic_conductivity(Ksat, imp_f, vis_f, m_f) == FT(0.1) @test_throws Exception effective_saturation(0.5, -1.0, 0.0) @test effective_saturation(0.5, 0.25, 0.05) - 4.0 / 9.0 < eps(FT) n = vg_model.n m = 1.0 - 1.0 / n α = vg_model.α S_s = 0.001 θ_r = 0.0 ν = 1.0 @test moisture_factor.(Ref(mm), Ref(vg_model), test_array) == sqrt.(test_array) .* (FT(1) .- (FT(1) .- test_array .^ (FT(1) / m)) .^ m) .^ FT(2) @test moisture_factor.(Ref(mm), Ref(bc_model), test_array) == test_array .^ (FT(2) * bc_model.m + FT(3)) ψ = -( (test_array .^ (-FT(1) / hk_model.m) .- FT(1)) .* hk_model.α^(-hk_model.n) ) .^ (FT(1) / hk_model.n) @test moisture_factor.(Ref(mm), Ref(hk_model), test_array) == hk_model.A ./ (hk_model.A .+ abs.(ψ) .^ hk_model.k) @test pressure_head.( Ref(vg_model), Ref(ν), Ref(S_s), Ref(θ_r), test_array, Ref(0.0), ) ≈ .-((-1 .+ test_array .^ (-1 / m)) .* α^(-n)) .^ (1 / n) #test branching in pressure head @test pressure_head(vg_model, ν, S_s, θ_r, 1.5, 0.0) == 500 @test pressure_head.( Ref(hk_model), Ref(ν), Ref(S_s), Ref(θ_r), test_array, Ref(0.0), ) ≈ .-((-1 .+ test_array .^ (-1 / m)) .* α^(-n)) .^ (1 / n) m = FT(0.5) ψb = FT(0.1656) @test pressure_head(bc_model, ν, S_s, θ_r, 0.5, 0.0) ≈ -ψb * 0.5^(-m) @test volumetric_liquid_fraction.([0.5, 1.5], Ref(0.75)) ≈ [0.5, 0.75] @test inverse_matric_potential( bc_model, matric_potential(bc_model, FT(0.5)), ) - FT(0.5) < eps(FT) @test inverse_matric_potential( vg_model, matric_potential(vg_model, FT(0.5)), ) == FT(0.5) @test inverse_matric_potential( hk_model, matric_potential(hk_model, FT(0.5)), ) == FT(0.5) @test_throws Exception inverse_matric_potential(bc_model, 1) @test_throws Exception inverse_matric_potential(hk_model, 1) @test_throws Exception inverse_matric_potential(vg_model, 1) # Test that spatial dependence works properly vg_spatial = vanGenuchten(FT; n = (x) -> 2 * x, α = (x) -> x^2) @test supertype(typeof(vg_spatial.n)) == Function vg_point = vg_spatial(FT(2.0)) @test typeof(vg_point.n) == FT @test vg_point.n == 4.0 @test vg_point.α == 4.0 @test vg_point.m == 0.75 end ================================================ FILE: test/Land/runtests.jl ================================================ using Test, Pkg @testset "Land" begin all_tests = isempty(ARGS) || "all" in ARGS ? true : false for submodule in ["Model"] if all_tests || "$submodule" in ARGS || "Land/$submodule" in ARGS || "Land" in ARGS include_test(submodule) end end end ================================================ FILE: test/Numerics/DGMethods/Euler/acousticwave_1d_imex.jl ================================================ using ClimateMachine using ClimateMachine.ConfigTypes using ClimateMachine.Mesh.Topologies: StackedCubedSphereTopology, equiangular_cubed_sphere_warp, grid1d using ClimateMachine.Mesh.Grids: DiscontinuousSpectralElementGrid, VerticalDirection using ClimateMachine.Mesh.Filters using ClimateMachine.DGMethods: DGModel, init_ode_state, remainder_DGModel using ClimateMachine.DGMethods.NumericalFluxes: RusanovNumericalFlux, CentralNumericalFluxGradient, CentralNumericalFluxSecondOrder using ClimateMachine.ODESolvers using ClimateMachine.SystemSolvers using ClimateMachine.VTK: writevtk, writepvtu using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using Thermodynamics: air_density, soundspeed_air, internal_energy, PhaseDry_pT, PhasePartition using Thermodynamics.TemperatureProfiles: IsothermalProfile using ClimateMachine.Atmos: AtmosPhysics, AtmosModel, DryModel, NoPrecipitation, NoRadiation, NTracers, vars_state, Gravity, HydrostaticState, AtmosAcousticGravityLinearModel, parameter_set using ClimateMachine.TurbulenceClosures using ClimateMachine.Orientations: SphericalOrientation, gravitational_potential, altitude, latitude, longitude using ClimateMachine.VariableTemplates: flattenednames using CLIMAParameters using CLIMAParameters.Planet: planet_radius struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test const output_vtk = false const ntracers = 1 function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 5 numelem_horz = 10 numelem_vert = 5 timeend = 60 * 60 # timeend = 33 * 60 * 60 # Full simulation outputtime = 60 * 60 expected_result = Dict() expected_result[Float32] = 9.5066030866432000e+13 expected_result[Float64] = 9.5073452847149594e+13 for FT in (Float64, Float32) for split_explicit_implicit in (false, true) result = test_run( mpicomm, polynomialorder, numelem_horz, numelem_vert, timeend, outputtime, ArrayType, FT, split_explicit_implicit, ) @test result ≈ expected_result[FT] end end end function test_run( mpicomm, polynomialorder, numelem_horz, numelem_vert, timeend, outputtime, ArrayType, FT, split_explicit_implicit, ) setup = AcousticWaveSetup{FT}() _planet_radius::FT = planet_radius(param_set) vert_range = grid1d( _planet_radius, FT(_planet_radius + setup.domain_height), nelem = numelem_vert, ) topology = StackedCubedSphereTopology(mpicomm, numelem_horz, vert_range) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, meshwarp = equiangular_cubed_sphere_warp, ) T_profile = IsothermalProfile(param_set, setup.T_ref) δ_χ = @SVector [FT(ii) for ii in 1:ntracers] physics = AtmosPhysics{FT}( param_set; ref_state = HydrostaticState(T_profile), turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), tracers = NTracers{length(δ_χ), FT}(δ_χ), ) model = AtmosModel{FT}( AtmosGCMConfigType, physics; init_state_prognostic = setup, source = (Gravity(),), ) linearmodel = AtmosAcousticGravityLinearModel(model) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) lineardg = DGModel( linearmodel, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = VerticalDirection(), state_auxiliary = dg.state_auxiliary, ) # determine the time step element_size = (setup.domain_height / numelem_vert) acoustic_speed = soundspeed_air(param_set, FT(setup.T_ref)) dt_factor = 445 dt = dt_factor * element_size / acoustic_speed / polynomialorder^2 # Adjust the time step so we exactly hit 1 hour for VTK output dt = 60 * 60 / ceil(60 * 60 / dt) nsteps = ceil(Int, timeend / dt) Q = init_ode_state(dg, FT(0)) linearsolver = ManyColumnLU() # linearsolver = BatchedGeneralizedMinimalResidual( # lineardg, # Q; # atol = 1.0e-6, #sqrt(eps(FT)) * 0.01, # rtol = 1.0e-8, #sqrt(eps(FT)) * 0.01, # # Maximum number of Krylov iterations in a column #) if split_explicit_implicit rem_dg = remainder_DGModel( dg, (lineardg,); numerical_flux_first_order = ( dg.numerical_flux_first_order, (lineardg.numerical_flux_first_order,), ), ) end odesolver = ARK2GiraldoKellyConstantinescu( split_explicit_implicit ? rem_dg : dg, lineardg, LinearBackwardEulerSolver( linearsolver; isadjustable = true, preconditioner_update_freq = -1, ), Q; dt = dt, t0 = 0, split_explicit_implicit = split_explicit_implicit, ) @test getsteps(odesolver) == 0 filterorder = 18 filter = ExponentialFilter(grid, 0, filterorder) cbfilter = EveryXSimulationSteps(1) do Filters.apply!(Q, :, grid, filter, direction = VerticalDirection()) nothing end eng0 = norm(Q) @info @sprintf """Starting ArrayType = %s FT = %s polynomialorder = %d numelem_horz = %d numelem_vert = %d dt = %.16e norm(Q₀) = %.16e """ "$ArrayType" "$FT" polynomialorder numelem_horz numelem_vert dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(odesolver) runtime energy end end callbacks = (cbinfo, cbfilter) if output_vtk # create vtk dir vtkdir = "vtk_acousticwave" * "_poly$(polynomialorder)_horz$(numelem_horz)_vert$(numelem_vert)" * "_dt$(dt_factor)x_$(ArrayType)_$(FT)" mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, model) # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(odesolver)) do_output(mpicomm, vtkdir, vtkstep, dg, Q, model) end callbacks = (callbacks..., cbvtk) end solve!( Q, odesolver; numberofsteps = nsteps, adjustfinalstep = false, callbacks = callbacks, ) @test getsteps(odesolver) == nsteps # final statistics engf = norm(Q) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e """ engf engf / eng0 engf - eng0 engf end Base.@kwdef struct AcousticWaveSetup{FT} domain_height::FT = 10e3 T_ref::FT = 300 α::FT = 3 γ::FT = 100 nv::Int = 1 end function (setup::AcousticWaveSetup)(problem, bl, state, aux, localgeo, t) # callable to set initial conditions FT = eltype(state) param_set = parameter_set(bl) λ = longitude(bl, aux) φ = latitude(bl, aux) z = altitude(bl, aux) β = min(FT(1), setup.α * acos(cos(φ) * cos(λ))) f = (1 + cos(FT(π) * β)) / 2 g = sin(setup.nv * FT(π) * z / setup.domain_height) Δp = setup.γ * f * g p = aux.ref_state.p + Δp ts = PhaseDry_pT(param_set, p, setup.T_ref) q_pt = PhasePartition(ts) e_pot = gravitational_potential(bl.orientation, aux) e_int = internal_energy(ts) state.ρ = air_density(ts) state.ρu = SVector{3, FT}(0, 0, 0) state.energy.ρe = state.ρ * (e_int + e_pot) state.tracers.ρχ = @SVector [FT(ii) for ii in 1:ntracers] nothing end function do_output( mpicomm, vtkdir, vtkstep, dg, Q, model, testname = "acousticwave", ) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk(filename, Q, dg, statenames, dg.state_auxiliary, auxnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu(pvtuprefix, prefixes, (statenames..., auxnames...), eltype(Q)) @info "Done writing VTK: $pvtuprefix" end end main() ================================================ FILE: test/Numerics/DGMethods/Euler/acousticwave_mrigark.jl ================================================ using ClimateMachine using ClimateMachine.ConfigTypes using ClimateMachine.Mesh.Topologies: StackedCubedSphereTopology, equiangular_cubed_sphere_warp, grid1d using ClimateMachine.Mesh.Grids: DiscontinuousSpectralElementGrid, VerticalDirection, HorizontalDirection, EveryDirection, min_node_distance using ClimateMachine.Mesh.Filters using ClimateMachine.DGMethods: DGModel, init_ode_state, remainder_DGModel using ClimateMachine.DGMethods.NumericalFluxes: RusanovNumericalFlux, CentralNumericalFluxGradient, CentralNumericalFluxSecondOrder using ClimateMachine.ODESolvers using ClimateMachine.SystemSolvers using ClimateMachine.VTK: writevtk, writepvtu using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using Thermodynamics: air_density, soundspeed_air, internal_energy, PhaseDry_pT, PhasePartition using Thermodynamics.TemperatureProfiles: IsothermalProfile using ClimateMachine.Atmos: AtmosPhysics, AtmosModel, DryModel, NoPrecipitation, NoRadiation, NTracers, ConstantDynamicViscosity, vars_state, Gravity, HydrostaticState, AtmosAcousticGravityLinearModel, AtmosAcousticLinearModel, parameter_set using ClimateMachine.Orientations: SphericalOrientation, gravitational_potential, altitude, latitude, longitude using ClimateMachine.VariableTemplates: flattenednames using CLIMAParameters using CLIMAParameters.Planet: planet_radius struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test const output_vtk = false const ntracers = 1 function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 5 numelem_horz = 10 numelem_vert = 5 timeend = 60 * 60 # timeend = 33 * 60 * 60 # Full simulation outputtime = 60 * 60 expected_result = Dict() expected_result[Float64, true] = 9.5073337869322578e+13 expected_result[Float64, false] = 9.5073455070673781e+13 @testset "acoustic wave" begin for FT in (Float64,)# Float32) for explicit in (true, false) result = test_run( mpicomm, polynomialorder, numelem_horz, numelem_vert, timeend, outputtime, ArrayType, FT, explicit, ) @test result ≈ expected_result[FT, explicit] end end end end function test_run( mpicomm, polynomialorder, numelem_horz, numelem_vert, timeend, outputtime, ArrayType, FT, explicit_solve, ) setup = AcousticWaveSetup{FT}() _planet_radius::FT = planet_radius(param_set) vert_range = grid1d( _planet_radius, FT(_planet_radius + setup.domain_height), nelem = numelem_vert, ) topology = StackedCubedSphereTopology(mpicomm, numelem_horz, vert_range) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, meshwarp = equiangular_cubed_sphere_warp, ) hmnd = min_node_distance(grid, HorizontalDirection()) vmnd = min_node_distance(grid, VerticalDirection()) T_profile = IsothermalProfile(param_set, setup.T_ref) δ_χ = @SVector [FT(ii) for ii in 1:ntracers] fullphysics = AtmosPhysics{FT}( param_set; ref_state = HydrostaticState(T_profile), turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), tracers = NTracers{length(δ_χ), FT}(δ_χ), ) fullmodel = AtmosModel{FT}( AtmosLESConfigType, fullphysics; orientation = SphericalOrientation(), init_state_prognostic = setup, source = (Gravity(),), ) dg = DGModel( fullmodel, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) # The linear model which contains the fast modes # acousticmodel = AtmosAcousticLinearModel(fullmodel) acousticmodel = AtmosAcousticGravityLinearModel(fullmodel) vacoustic_dg = DGModel( acousticmodel, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = VerticalDirection(), state_auxiliary = dg.state_auxiliary, ) # Advection model is the difference between the fullmodel and acousticmodel. # This will be handled with explicit substepping (time step in between the # vertical and horizontally acoustic models) rem_dg = remainder_DGModel(dg, (vacoustic_dg,)) # determine the time step for the model components acoustic_speed = soundspeed_air(param_set, FT(setup.T_ref)) advection_speed = 1 # What's a reasonable number here? vacoustic_dt = vmnd / acoustic_speed remainder_dt = min(min(hmnd, vmnd) / advection_speed, hmnd / acoustic_speed) odesolver = if explicit_solve remainder_dt = 5vacoustic_dt nsteps_output = ceil(outputtime / remainder_dt) remainder_dt = outputtime / nsteps_output nsteps = ceil(Int, timeend / remainder_dt) @assert nsteps * remainder_dt ≈ timeend vacoustic_solver = LSRK54CarpenterKennedy(vacoustic_dg, Q; dt = vacoustic_dt) rem_solver = MRIGARKERK45aSandu( rem_dg, vacoustic_solver, Q; dt = remainder_dt, ) rem_solver else vacoustic_dt = 200vacoustic_dt element_size = (setup.domain_height / numelem_vert) nsteps_output = ceil(outputtime / vacoustic_dt) vacoustic_dt = outputtime / nsteps_output nsteps = ceil(Int, timeend / vacoustic_dt) @assert nsteps * vacoustic_dt ≈ timeend rem_solver = LSRK54CarpenterKennedy(rem_dg, Q; dt = remainder_dt) vacoustic_solver = MRIGARKESDIRK24LSA( vacoustic_dg, LinearBackwardEulerSolver(ManyColumnLU(); isadjustable = false), rem_solver, Q; dt = vacoustic_dt, ) vacoustic_solver end filterorder = 18 filter = ExponentialFilter(grid, 0, filterorder) cbfilter = EveryXSimulationSteps(1) do Filters.apply!(Q, :, grid, filter, direction = VerticalDirection()) nothing end eng0 = norm(Q) @info @sprintf( """Starting ArrayType = %s FT = %s polynomialorder = %d numelem_horz = %d numelem_vert = %d acoustic dt = %.16e remainder_dt = %.16e norm(Q₀) = %.16e """, "$ArrayType", "$FT", polynomialorder, numelem_horz, numelem_vert, vacoustic_dt, remainder_dt, eng0 ) # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(odesolver) runtime energy end end callbacks = (cbinfo, cbfilter) if output_vtk # create vtk dir vtkdir = "vtk_acousticwave" * "_poly$(polynomialorder)_horz$(numelem_horz)_vert$(numelem_vert)" * "_$(ArrayType)_$(FT)" * "_$(explicit_solve ? "Explicit_Explicit" : "Implicit_Explicit")" mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, fullmodel) # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(odesolver)) do_output(mpicomm, vtkdir, vtkstep, dg, Q, fullmodel) end callbacks = (callbacks..., cbvtk) end solve!( Q, odesolver; numberofsteps = nsteps, adjustfinalstep = false, callbacks = callbacks, ) # final statistics engf = norm(Q) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e """ engf engf / eng0 engf - eng0 engf end Base.@kwdef struct AcousticWaveSetup{FT} domain_height::FT = 10e3 T_ref::FT = 300 α::FT = 3 γ::FT = 100 nv::Int = 1 end function (setup::AcousticWaveSetup)(problem, bl, state, aux, localgeo, t) # callable to set initial conditions FT = eltype(state) param_set = parameter_set(bl) λ = longitude(bl, aux) φ = latitude(bl, aux) z = altitude(bl, aux) β = min(FT(1), setup.α * acos(cos(φ) * cos(λ))) f = (1 + cos(FT(π) * β)) / 2 g = sin(setup.nv * FT(π) * z / setup.domain_height) Δp = setup.γ * f * g p = aux.ref_state.p + Δp ts = PhaseDry_pT(param_set, p, setup.T_ref) q_pt = PhasePartition(ts) e_pot = gravitational_potential(bl.orientation, aux) e_int = internal_energy(ts) state.ρ = air_density(ts) state.ρu = SVector{3, FT}(0, 0, 0) state.energy.ρe = state.ρ * (e_int + e_pot) state.tracers.ρχ = @SVector [FT(ii) for ii in 1:ntracers] nothing end function do_output( mpicomm, vtkdir, vtkstep, dg, Q, model, testname = "acousticwave", ) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk(filename, Q, dg, statenames, dg.state_auxiliary, auxnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu(pvtuprefix, prefixes, (statenames..., auxnames...), eltype(Q)) @info "Done writing VTK: $pvtuprefix" end end main() ================================================ FILE: test/Numerics/DGMethods/Euler/acousticwave_variable_degree.jl ================================================ using ClimateMachine using ClimateMachine.ConfigTypes using ClimateMachine.Mesh.Topologies: StackedCubedSphereTopology, equiangular_cubed_sphere_warp, grid1d using ClimateMachine.Mesh.Grids: DiscontinuousSpectralElementGrid, VerticalDirection using ClimateMachine.Mesh.Filters using ClimateMachine.DGMethods: DGModel, init_ode_state, remainder_DGModel using ClimateMachine.DGMethods.NumericalFluxes: RusanovNumericalFlux, CentralNumericalFluxGradient, CentralNumericalFluxSecondOrder using ClimateMachine.ODESolvers using ClimateMachine.SystemSolvers using ClimateMachine.VTK: writevtk, writepvtu using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using Thermodynamics: air_density, soundspeed_air, internal_energy, PhaseDry_pT, PhasePartition using Thermodynamics.TemperatureProfiles: IsothermalProfile using ClimateMachine.Atmos: AtmosPhysics, AtmosModel, DryModel, NoPrecipitation, NoRadiation, NTracers, vars_state, Gravity, HydrostaticState, AtmosAcousticGravityLinearModel, parameter_set using ClimateMachine.TurbulenceClosures using ClimateMachine.Orientations: SphericalOrientation, gravitational_potential, altitude, latitude, longitude using ClimateMachine.VariableTemplates: flattenednames using CLIMAParameters using CLIMAParameters.Planet: planet_radius struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test const ntracers = 1 function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD # 5th order in the horizontal, cubic in the vertical polynomialorder = (5, 3) numelem_horz = 10 numelem_vert = 6 timeend = 60 * 60 # timeend = 33 * 60 * 60 # Full simulation outputtime = 60 * 60 expected_result = Dict() expected_result[Float32] = 9.5075065f13 expected_result[Float64] = 9.507349773781483e13 for FT in (Float64, Float32) for split_explicit_implicit in (false, true) result = test_run( mpicomm, polynomialorder, numelem_horz, numelem_vert, timeend, outputtime, ArrayType, FT, split_explicit_implicit, ) @test result ≈ expected_result[FT] end end end function test_run( mpicomm, polynomialorder, numelem_horz, numelem_vert, timeend, outputtime, ArrayType, FT, split_explicit_implicit, ) setup = AcousticWaveSetup{FT}() _planet_radius::FT = planet_radius(param_set) vert_range = grid1d( _planet_radius, FT(_planet_radius + setup.domain_height), nelem = numelem_vert, ) topology = StackedCubedSphereTopology(mpicomm, numelem_horz, vert_range) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, meshwarp = equiangular_cubed_sphere_warp, ) T_profile = IsothermalProfile(param_set, setup.T_ref) δ_χ = @SVector [FT(ii) for ii in 1:ntracers] physics = AtmosPhysics{FT}( param_set; ref_state = HydrostaticState(T_profile), turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), tracers = NTracers{length(δ_χ), FT}(δ_χ), ) model = AtmosModel{FT}( AtmosGCMConfigType, physics; init_state_prognostic = setup, source = (Gravity(),), ) linearmodel = AtmosAcousticGravityLinearModel(model) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) lineardg = DGModel( linearmodel, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = VerticalDirection(), state_auxiliary = dg.state_auxiliary, ) # determine the time step element_size = (setup.domain_height / numelem_vert) acoustic_speed = soundspeed_air(param_set, FT(setup.T_ref)) dt_factor = 445 Nmax = maximum(polynomialorder) dt = dt_factor * element_size / acoustic_speed / Nmax^2 # Adjust the time step so we exactly hit 1 hour for VTK output dt = 60 * 60 / ceil(60 * 60 / dt) nsteps = ceil(Int, timeend / dt) Q = init_ode_state(dg, FT(0)) linearsolver = ManyColumnLU() if split_explicit_implicit rem_dg = remainder_DGModel( dg, (lineardg,); numerical_flux_first_order = ( dg.numerical_flux_first_order, (lineardg.numerical_flux_first_order,), ), ) end odesolver = ARK2GiraldoKellyConstantinescu( split_explicit_implicit ? rem_dg : dg, lineardg, LinearBackwardEulerSolver( linearsolver; isadjustable = true, preconditioner_update_freq = -1, ), Q; dt = dt, t0 = 0, split_explicit_implicit = split_explicit_implicit, ) @test getsteps(odesolver) == 0 eng0 = norm(Q) @info @sprintf """Starting ArrayType = %s FT = %s poly order horz = %d poly order vert = %d numelem_horz = %d numelem_vert = %d dt = %.16e norm(Q₀) = %.16e """ "$ArrayType" "$FT" polynomialorder... numelem_horz numelem_vert dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(odesolver) runtime energy end end # Look ma, no filters! callbacks = (cbinfo,) solve!( Q, odesolver; numberofsteps = nsteps, adjustfinalstep = false, callbacks = callbacks, ) @test getsteps(odesolver) == nsteps # final statistics engf = norm(Q) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e """ engf engf / eng0 engf - eng0 return engf end Base.@kwdef struct AcousticWaveSetup{FT} domain_height::FT = 10e3 T_ref::FT = 300 α::FT = 3 γ::FT = 100 nv::Int = 1 end function (setup::AcousticWaveSetup)(problem, bl, state, aux, localgeo, t) # callable to set initial conditions FT = eltype(state) param_set = parameter_set(bl) λ = longitude(bl, aux) φ = latitude(bl, aux) z = altitude(bl, aux) β = min(FT(1), setup.α * acos(cos(φ) * cos(λ))) f = (1 + cos(FT(π) * β)) / 2 g = sin(setup.nv * FT(π) * z / setup.domain_height) Δp = setup.γ * f * g p = aux.ref_state.p + Δp ts = PhaseDry_pT(param_set, p, setup.T_ref) q_pt = PhasePartition(ts) e_pot = gravitational_potential(bl.orientation, aux) e_int = internal_energy(ts) state.ρ = air_density(ts) state.ρu = SVector{3, FT}(0, 0, 0) state.energy.ρe = state.ρ * (e_int + e_pot) state.tracers.ρχ = @SVector [FT(ii) for ii in 1:ntracers] nothing end main() ================================================ FILE: test/Numerics/DGMethods/Euler/fvm_balance.jl ================================================ using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.FVReconstructions: FVLinear using ClimateMachine.DGMethods.NumericalFluxes using Thermodynamics.TemperatureProfiles using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.Orientations using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using CLIMAParameters using CLIMAParameters.Planet: planet_radius struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using Test, MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = (4, 0) FT = Float64 NumericalFlux = RoeNumericalFlux @info @sprintf """Configuration ArrayType = %s FT = %s NumericalFlux = %s """ ArrayType FT NumericalFlux numelem_horz = 10 numelem_vert = 32 @testset for domain_type in (:box, :sphere) err = test_run( mpicomm, ArrayType, polynomialorder, numelem_horz, numelem_vert, NumericalFlux, FT, domain_type, ) @test err < FT(6e-12) end end function test_run( mpicomm, ArrayType, polynomialorder, numelem_horz, numelem_vert, NumericalFlux, FT, domain_type, ) domain_height = 10e3 if domain_type === :box domain_width = 20e3 horz_range = range(FT(0), length = numelem_horz + 1, stop = FT(domain_width)) vert_range = range(0, length = numelem_vert + 1, stop = domain_height) brickrange = (horz_range, horz_range, vert_range) periodicity = (true, true, false) topology = StackedBrickTopology(mpicomm, brickrange; periodicity = periodicity) meshwarp = (x...) -> identity(x) elseif domain_type === :sphere _planet_radius::FT = planet_radius(param_set) vert_range = grid1d( _planet_radius, FT(_planet_radius + domain_height), nelem = numelem_vert, ) topology = StackedCubedSphereTopology(mpicomm, numelem_horz, vert_range) meshwarp = equiangular_cubed_sphere_warp end grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, meshwarp = meshwarp, ) T0 = FT(300) temp_profile = IsothermalProfile(param_set, T0) ref_state = HydrostaticState(temp_profile; subtract_off = false) physics = AtmosPhysics{FT}( param_set; ref_state = ref_state, turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), ) problem = AtmosProblem(; physics = physics, init_state_prognostic = initialcondition!, ) if domain_type === :box configtype = AtmosLESConfigType source = (Gravity(),) elseif domain_type === :sphere configtype = AtmosGCMConfigType source = (Gravity(), Coriolis()) end model = AtmosModel{FT}(configtype, physics; problem = problem, source = source) dg = DGFVModel( model, grid, HBFVReconstruction(model, FVLinear()), NumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) timeend = FT(100) # determine the time step cfl = 0.2 dz = step(vert_range) dt = cfl * dz / soundspeed_air(param_set, T0) nsteps = ceil(Int, timeend / dt) dt = timeend / nsteps Q = init_ode_state(dg, FT(0)) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting domain_type = %s numelem_horz = %d numelem_vert = %d dt = %.16e norm(Q₀) = %.16e """ domain_type numelem_horz numelem_vert dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @views begin ρu = extrema(Array(Q.data[:, 2, :])) ρv = extrema(Array(Q.data[:, 3, :])) ρw = extrema(Array(Q.data[:, 4, :])) end runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s ρu = %.16e, %.16e ρv = %.16e, %.16e ρw = %.16e, %.16e norm(Q) = %.16e """ gettime(lsrk) runtime ρu... ρv... ρw... energy end end callbacks = (cbinfo,) solve!(Q, lsrk; timeend = timeend, callbacks = callbacks) # final statistics Qe = init_ode_state(dg, timeend) engf = norm(Q) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) errr = errf / engfe @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errr errr end function initialcondition!(problem, bl, state, aux, coords, t, args...) state.ρ = aux.ref_state.ρ state.ρu = SVector(0, 0, 0) state.energy.ρe = aux.ref_state.ρe end main() ================================================ FILE: test/Numerics/DGMethods/Euler/fvm_isentropicvortex.jl ================================================ using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods.FVReconstructions: FVConstant, FVLinear using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.Orientations using ClimateMachine.SystemSolvers using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.VTK using CLIMAParameters using CLIMAParameters.Planet: kappa_d struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test include("isentropicvortex_setup.jl") if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output_vtk = false function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 numlevels = integration_testing ? 4 : 1 expected_error = Dict() # Just to make it shorter and aligning Roe = RoeNumericalFlux # Float64, Dim 2, degree 4 in the horizontal, FV order 1, refinement level expected_error[Float64, FVConstant(), Roe, 1] = 3.5317756615538940e+01 expected_error[Float64, FVConstant(), Roe, 2] = 2.5104217086071472e+01 expected_error[Float64, FVConstant(), Roe, 3] = 1.6169569358521223e+01 expected_error[Float64, FVConstant(), Roe, 4] = 9.4731749125284708e+00 expected_error[Float64, FVLinear(), Roe, 1] = 2.6132907774912638e+01 expected_error[Float64, FVLinear(), Roe, 2] = 8.8198392537283006e+00 expected_error[Float64, FVLinear(), Roe, 3] = 2.4517109427575416e+00 expected_error[Float64, FVLinear(), Roe, 4] = 7.4154384427579900e-01 dims = 2 @testset "$(@__FILE__)" begin for FT in (Float64,) for NumericalFlux in (Roe,) setup = IsentropicVortexSetup{FT}() for fvmethod in (FVConstant(), FVLinear()) @info @sprintf """Configuration FT = %s ArrayType = %s FV Reconstruction = %s NumericalFlux = %s dims = %d """ FT ArrayType fvmethod NumericalFlux dims errors = Vector{FT}(undef, numlevels) for level in 1:numlevels # Match element numbers numelems = ( 2^(level - 1) * 5, 2^(level - 1) * 5 * polynomialorder, ) errors[level] = test_run( mpicomm, ArrayType, fvmethod, polynomialorder, numelems, NumericalFlux, setup, FT, dims, level, ) @test errors[level] ≈ expected_error[FT, fvmethod, NumericalFlux, level] end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(errors[l]) - log2(errors[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end function test_run( mpicomm, ArrayType, fvmethod, polynomialorder, numelems, NumericalFlux, setup, FT, dims, level, ) brickrange = ntuple(dims) do dim range( -setup.domain_halflength; length = numelems[dim] + 1, stop = setup.domain_halflength, ) end connectivity = dims == 3 ? :full : :face topology = StackedBrickTopology( mpicomm, brickrange; periodicity = ntuple(_ -> true, dims), connectivity = connectivity, ) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = (polynomialorder, 0), ) problem = AtmosProblem(boundaryconditions = (), init_state_prognostic = setup) physics = AtmosPhysics{FT}( param_set; ref_state = NoReferenceState(), turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, orientation = NoOrientation(), source = (), ) dgfvm = DGFVModel( model, grid, fvmethod, NumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) timeend = FT(2 * setup.domain_halflength / 10 / setup.translation_speed) # Determine the time step elementsize = minimum(step.(brickrange)) dt = elementsize / soundspeed_air(param_set, setup.T∞) / polynomialorder^2 nsteps = ceil(Int, timeend / dt) dt = timeend / nsteps Q = init_ode_state(dgfvm, FT(0)) lsrk = LSRK54CarpenterKennedy(dgfvm, Q; dt = dt, t0 = 0) eng0 = norm(Q) dims == 2 && (numelems = (numelems..., 0)) @info @sprintf """Starting refinement level %d numelems = (%d, %d, %d) dt = %.16e norm(Q₀) = %.16e """ level numelems... dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(lsrk) runtime energy end end callbacks = (cbinfo,) if output_vtk # Create vtk dir vtkdir = "vtk_isentropicvortex" * "_poly$(polynomialorder)_dims$(dims)_$(ArrayType)_$(FT)_level$(level)" mkpath(vtkdir) vtkstep = 0 # Output initial step do_output(mpicomm, vtkdir, vtkstep, dgfvm, Q, Q, model) # Setup the output callback outputtime = timeend cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dgfvm, gettime(lsrk), setup) do_output(mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model) end callbacks = (callbacks..., cbvtk) end solve!(Q, lsrk; timeend = timeend, callbacks = callbacks) # Final statistics Qe = init_ode_state(dgfvm, timeend, setup) engf = norm(Q) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished refinement level %d norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ level engf engf / eng0 engf - eng0 errf errf / engfe errf end function do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, testname = "isentropicvortex", ) ## Name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dgfvm, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## Name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end main() ================================================ FILE: test/Numerics/DGMethods/Euler/isentropicvortex.jl ================================================ using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.Orientations using ClimateMachine.SystemSolvers using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.VTK using CLIMAParameters using CLIMAParameters.Planet: kappa_d struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test include("isentropicvortex_setup.jl") if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output_vtk = false function main() ClimateMachine.init(parse_clargs = true) ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 numlevels = integration_testing ? 4 : 1 expected_error = Dict() # just to make it shorter and aligning Rusanov = RusanovNumericalFlux() Central = CentralNumericalFluxFirstOrder() Roe = RoeNumericalFlux() HLLC = HLLCNumericalFlux() RoeMoist = RoeNumericalFluxMoist() RoeMoistLM = RoeNumericalFluxMoist(; LM = true) RoeMoistHH = RoeNumericalFluxMoist(; HH = true) RoeMoistLV = RoeNumericalFluxMoist(; LV = true) RoeMoistLVPP = RoeNumericalFluxMoist(; LVPP = true) expected_error[Float64, 2, Rusanov, 1] = 1.1990999506538110e+01 expected_error[Float64, 2, Rusanov, 2] = 2.0813000228865612e+00 expected_error[Float64, 2, Rusanov, 3] = 6.3752572004789149e-02 expected_error[Float64, 2, Rusanov, 4] = 2.0984975076420455e-03 expected_error[Float64, 2, Central, 1] = 2.0840574601661153e+01 expected_error[Float64, 2, Central, 2] = 2.9255455365299827e+00 expected_error[Float64, 2, Central, 3] = 3.6935849488949657e-01 expected_error[Float64, 2, Central, 4] = 8.3528804679907434e-03 expected_error[Float64, 2, Roe, 1] = 1.2891386634733328e+01 expected_error[Float64, 2, Roe, 2] = 1.3895805145495934e+00 expected_error[Float64, 2, Roe, 3] = 6.6174934435569849e-02 expected_error[Float64, 2, Roe, 4] = 2.1917769287815940e-03 expected_error[Float64, 2, RoeMoist, 1] = 1.2415957884123003e+01 expected_error[Float64, 2, RoeMoist, 2] = 1.4188653323424882e+00 expected_error[Float64, 2, RoeMoist, 3] = 6.7913325894248130e-02 expected_error[Float64, 2, RoeMoist, 4] = 2.0377963111049128e-03 expected_error[Float64, 2, RoeMoistLM, 1] = 1.2316906651180444e+01 expected_error[Float64, 2, RoeMoistLM, 2] = 1.4359406523244560e+00 expected_error[Float64, 2, RoeMoistLM, 3] = 6.8650238542101505e-02 expected_error[Float64, 2, RoeMoistLM, 4] = 2.0000156591586842e-03 expected_error[Float64, 2, RoeMoistHH, 1] = 1.2425625606467793e+01 expected_error[Float64, 2, RoeMoistHH, 2] = 1.4029458093339020e+00 expected_error[Float64, 2, RoeMoistHH, 3] = 6.8648208937091268e-02 expected_error[Float64, 2, RoeMoistHH, 4] = 2.0711985861781648e-03 expected_error[Float64, 2, RoeMoistLV, 1] = 1.2415957884123003e+01 expected_error[Float64, 2, RoeMoistLV, 2] = 1.4188653323424882e+00 expected_error[Float64, 2, RoeMoistLV, 3] = 6.7913325894248130e-02 expected_error[Float64, 2, RoeMoistLV, 4] = 2.0377963111049128e-03 expected_error[Float64, 2, RoeMoistLVPP, 1] = 1.2441813136310969e+01 expected_error[Float64, 2, RoeMoistLVPP, 2] = 2.0219325767566727e+00 expected_error[Float64, 2, RoeMoistLVPP, 3] = 6.7716921628626484e-02 expected_error[Float64, 2, RoeMoistLVPP, 4] = 2.1051129944994005e-03 expected_error[Float64, 2, HLLC, 1] = 1.2889756097329746e+01 expected_error[Float64, 2, HLLC, 2] = 1.3895808565455936e+00 expected_error[Float64, 2, HLLC, 3] = 6.6175116756217900e-02 expected_error[Float64, 2, HLLC, 4] = 2.1917772135679118e-03 expected_error[Float64, 3, Rusanov, 1] = 3.7918869862613858e+00 expected_error[Float64, 3, Rusanov, 2] = 6.5816485664822677e-01 expected_error[Float64, 3, Rusanov, 3] = 2.0160333422867591e-02 expected_error[Float64, 3, Rusanov, 4] = 6.6360317881818034e-04 expected_error[Float64, 3, Central, 1] = 6.5903683487905749e+00 expected_error[Float64, 3, Central, 2] = 9.2513872939749997e-01 expected_error[Float64, 3, Central, 3] = 1.1680141169828175e-01 expected_error[Float64, 3, Central, 4] = 2.6414127301659534e-03 expected_error[Float64, 3, Roe, 1] = 4.0766143963611068e+00 expected_error[Float64, 3, Roe, 2] = 4.3942394181655547e-01 expected_error[Float64, 3, Roe, 3] = 2.0926351682882375e-02 expected_error[Float64, 3, Roe, 4] = 6.9310072176312712e-04 expected_error[Float64, 3, RoeMoist, 1] = 3.9262706246552574e+00 expected_error[Float64, 3, RoeMoist, 2] = 4.4868461432545598e-01 expected_error[Float64, 3, RoeMoist, 3] = 2.1476079330305119e-02 expected_error[Float64, 3, RoeMoist, 4] = 6.4440777504566171e-04 expected_error[Float64, 3, RoeMoistLM, 1] = 3.8949478745407458e+00 expected_error[Float64, 3, RoeMoistLM, 2] = 4.5408430461734528e-01 expected_error[Float64, 3, RoeMoistLM, 3] = 2.1709111570716800e-02 expected_error[Float64, 3, RoeMoistLM, 4] = 6.3246048386171073e-04 expected_error[Float64, 3, RoeMoistHH, 1] = 3.9293278268948622e+00 expected_error[Float64, 3, RoeMoistHH, 2] = 4.4365041912830866e-01 expected_error[Float64, 3, RoeMoistHH, 3] = 2.1708469753267460e-02 expected_error[Float64, 3, RoeMoistHH, 4] = 6.5497050185153694e-04 expected_error[Float64, 3, RoeMoistLV, 1] = 3.9262706246552574e+00 expected_error[Float64, 3, RoeMoistLV, 2] = 4.4868461432545598e-01 expected_error[Float64, 3, RoeMoistLV, 3] = 2.1476079330305119e-02 expected_error[Float64, 3, RoeMoistLV, 4] = 6.4440777504566171e-04 expected_error[Float64, 3, RoeMoistLVPP, 1] = 3.9344467732944071e+00 expected_error[Float64, 3, RoeMoistLVPP, 2] = 6.3939122178436703e-01 expected_error[Float64, 3, RoeMoistLVPP, 3] = 2.1413970848172485e-02 expected_error[Float64, 3, RoeMoistLVPP, 4] = 6.6569517942933988e-04 expected_error[Float64, 3, HLLC, 1] = 4.0760987751605402e+00 expected_error[Float64, 3, HLLC, 2] = 4.3942404996518236e-01 expected_error[Float64, 3, HLLC, 3] = 2.0926409337758904e-02 expected_error[Float64, 3, HLLC, 4] = 6.9310081182571569e-04 expected_error[Float32, 2, Rusanov, 1] = 1.1990781784057617e+01 expected_error[Float32, 2, Rusanov, 2] = 2.0813269615173340e+00 expected_error[Float32, 2, Rusanov, 3] = 6.7035309970378876e-02 expected_error[Float32, 2, Rusanov, 4] = 5.3008597344160080e-02 expected_error[Float32, 2, Central, 1] = 2.0840391159057617e+01 expected_error[Float32, 2, Central, 2] = 2.9256355762481689e+00 expected_error[Float32, 2, Central, 3] = 3.7092915177345276e-01 expected_error[Float32, 2, Central, 4] = 1.1543693393468857e-01 expected_error[Float32, 2, Roe, 1] = 1.2891359329223633e+01 expected_error[Float32, 2, Roe, 2] = 1.3895936012268066e+00 expected_error[Float32, 2, Roe, 3] = 6.8037144839763641e-02 expected_error[Float32, 2, Roe, 4] = 3.8893952965736389e-02 expected_error[Float32, 2, RoeMoist, 1] = 1.2415886878967285e+01 expected_error[Float32, 2, RoeMoist, 2] = 1.4188879728317261e+00 expected_error[Float32, 2, RoeMoist, 3] = 6.9743692874908447e-02 expected_error[Float32, 2, RoeMoist, 4] = 3.7607192993164063e-02 expected_error[Float32, 2, RoeMoistLM, 1] = 1.2316809654235840e+01 expected_error[Float32, 2, RoeMoistLM, 2] = 4.5408430461734528e+00 expected_error[Float32, 2, RoeMoistLM, 3] = 7.0370830595493317e-02 expected_error[Float32, 2, RoeMoistLM, 4] = 3.7792034447193146e-02 expected_error[Float32, 2, RoeMoistHH, 1] = 1.2425449371337891e+01 expected_error[Float32, 2, RoeMoistHH, 2] = 1.4030106067657471e+00 expected_error[Float32, 2, RoeMoistHH, 3] = 7.0363849401473999e-02 expected_error[Float32, 2, RoeMoistHH, 4] = 3.7904966622591019e-02 expected_error[Float32, 2, RoeMoistLV, 1] = 1.2415886878967285e+01 expected_error[Float32, 2, RoeMoistLV, 2] = 1.4188879728317261e+00 expected_error[Float32, 2, RoeMoistLV, 3] = 6.9743692874908447e-02 expected_error[Float32, 2, RoeMoistLV, 4] = 3.7607192993164063e-02 expected_error[Float32, 2, RoeMoistLVPP, 1] = 1.2441481590270996e+01 expected_error[Float32, 2, RoeMoistLVPP, 2] = 2.0217459201812744e+00 expected_error[Float32, 2, RoeMoistLVPP, 3] = 7.0483185350894928e-02 expected_error[Float32, 2, RoeMoistLVPP, 4] = 5.1601748913526535e-02 expected_error[Float32, 2, HLLC, 1] = 1.2889801025390625e+01 expected_error[Float32, 2, HLLC, 2] = 1.3895059823989868e+00 expected_error[Float32, 2, HLLC, 3] = 6.8006515502929688e-02 expected_error[Float32, 2, HLLC, 4] = 3.8637656718492508e-02 expected_error[Float32, 3, Rusanov, 1] = 3.7918186187744141e+00 expected_error[Float32, 3, Rusanov, 2] = 6.5816193819046021e-01 expected_error[Float32, 3, Rusanov, 3] = 2.0893247798085213e-02 expected_error[Float32, 3, Rusanov, 4] = 1.1554701253771782e-02 expected_error[Float32, 3, Central, 1] = 6.5903329849243164e+00 expected_error[Float32, 3, Central, 2] = 9.2512512207031250e-01 expected_error[Float32, 3, Central, 3] = 1.1707859486341476e-01 expected_error[Float32, 3, Central, 4] = 2.1001411601901054e-02 expected_error[Float32, 3, Roe, 1] = 4.0765657424926758e+00 expected_error[Float32, 3, Roe, 2] = 4.3941807746887207e-01 expected_error[Float32, 3, Roe, 3] = 2.1365188062191010e-02 expected_error[Float32, 3, Roe, 4] = 9.3323951587080956e-03 expected_error[Float32, 3, RoeMoist, 1] = 3.9262301921844482e+00 expected_error[Float32, 3, RoeMoist, 2] = 4.4864514470100403e-01 expected_error[Float32, 3, RoeMoist, 3] = 2.1889146417379379e-02 expected_error[Float32, 3, RoeMoist, 4] = 8.8266804814338684e-03 expected_error[Float32, 3, RoeMoistLM, 1] = 3.8948786258697510e+00 expected_error[Float32, 3, RoeMoistLM, 2] = 4.5405751466751099e-01 expected_error[Float32, 3, RoeMoistLM, 3] = 2.2112159058451653e-02 expected_error[Float32, 3, RoeMoistLM, 4] = 8.7371272966265678e-03 expected_error[Float32, 3, RoeMoistHH, 1] = 3.9292929172515869e+00 expected_error[Float32, 3, RoeMoistHH, 2] = 4.4363334774971008e-01 expected_error[Float32, 3, RoeMoistHH, 3] = 2.2118536755442619e-02 expected_error[Float32, 3, RoeMoistHH, 4] = 8.9262928813695908e-03 expected_error[Float32, 3, RoeMoistLV, 1] = 3.9262151718139648e+00 expected_error[Float32, 3, RoeMoistLV, 2] = 4.4865489006042480e-01 expected_error[Float32, 3, RoeMoistLV, 3] = 2.1889505907893181e-02 expected_error[Float32, 3, RoeMoistLV, 4] = 8.8385939598083496e-03 expected_error[Float32, 3, RoeMoistLVPP, 1] = 3.9343423843383789e+00 expected_error[Float32, 3, RoeMoistLVPP, 2] = 6.3935810327529907e-01 expected_error[Float32, 3, RoeMoistLVPP, 3] = 2.1930629387497902e-02 expected_error[Float32, 3, RoeMoistLVPP, 4] = 1.0632344521582127e-02 expected_error[Float32, 3, HLLC, 1] = 4.0760631561279297e+00 expected_error[Float32, 3, HLLC, 2] = 4.3940672278404236e-01 expected_error[Float32, 3, HLLC, 3] = 2.1352596580982208e-02 expected_error[Float32, 3, HLLC, 4] = 9.2315869405865669e-03 @testset "$(@__FILE__)" begin for FT in (Float64, Float32), dims in (2, 3) for NumericalFlux in ( Rusanov, Central, Roe, HLLC, RoeMoist, RoeMoistLM, RoeMoistHH, RoeMoistLV, RoeMoistLVPP, ) @info @sprintf """Configuration ArrayType = %s FT = %s NumericalFlux = %s dims = %d """ ArrayType "$FT" "$NumericalFlux" dims setup = IsentropicVortexSetup{FT}() errors = Vector{FT}(undef, numlevels) for level in 1:numlevels numelems = ntuple(dim -> dim == 3 ? 1 : 2^(level - 1) * 5, dims) errors[level] = test_run( mpicomm, ArrayType, polynomialorder, numelems, NumericalFlux, setup, FT, dims, level, ) rtol = sqrt(eps(FT)) # increase rtol for comparing with GPU results using Float32 if FT === Float32 && ArrayType !== Array rtol *= 10 # why does this factor have to be so big :( end @test isapprox( errors[level], expected_error[FT, dims, NumericalFlux, level]; rtol = rtol, ) end rates = @. log2( first(errors[1:(numlevels - 1)]) / first(errors[2:numlevels]), ) numlevels > 1 && @info "Convergence rates\n" * join( [ "rate for levels $l → $(l + 1) = $(rates[l])" for l in 1:(numlevels - 1) ], "\n", ) end end end end function test_run( mpicomm, ArrayType, polynomialorder, numelems, NumericalFlux, setup, FT, dims, level, ) brickrange = ntuple(dims) do dim range( -setup.domain_halflength; length = numelems[dim] + 1, stop = setup.domain_halflength, ) end topology = BrickTopology( mpicomm, brickrange; periodicity = ntuple(_ -> true, dims), ) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, ) problem = AtmosProblem(boundaryconditions = (), init_state_prognostic = setup) if NumericalFlux isa RoeNumericalFluxMoist moisture = EquilMoist() else moisture = DryModel() end physics = AtmosPhysics{FT}( param_set; ref_state = NoReferenceState(), turbulence = ConstantDynamicViscosity(FT(0)), moisture = moisture, ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, orientation = NoOrientation(), source = (), ) dg = DGModel( model, grid, NumericalFlux, CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) timeend = FT(2 * setup.domain_halflength / 10 / setup.translation_speed) # determine the time step elementsize = minimum(step.(brickrange)) dt = elementsize / soundspeed_air(param_set, setup.T∞) / polynomialorder^2 nsteps = ceil(Int, timeend / dt) dt = timeend / nsteps Q = init_ode_state(dg, FT(0)) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) dims == 2 && (numelems = (numelems..., 0)) @info @sprintf """Starting refinement level %d numelems = (%d, %d, %d) dt = %.16e norm(Q₀) = %.16e """ level numelems... dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(lsrk) runtime energy end end callbacks = (cbinfo,) if output_vtk # create vtk dir vtkdir = "vtk_isentropicvortex" * "_poly$(polynomialorder)_dims$(dims)_$(ArrayType)_$(FT)_level$(level)" mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, Q, model) # setup the output callback outputtime = timeend cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(lsrk), setup) do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model) end callbacks = (callbacks..., cbvtk) end solve!(Q, lsrk; timeend = timeend, callbacks = callbacks) # final statistics Qe = init_ode_state(dg, timeend, setup) engf = norm(Q) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished refinement level %d norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ level engf engf / eng0 engf - eng0 errf errf / engfe errf end function do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname = "isentropicvortex", ) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end main() ================================================ FILE: test/Numerics/DGMethods/Euler/isentropicvortex_imex.jl ================================================ using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.Orientations using ClimateMachine.SystemSolvers using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.VTK using CLIMAParameters using CLIMAParameters.Planet: kappa_d struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test include("isentropicvortex_setup.jl") if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output_vtk = false function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 numlevels = integration_testing ? 4 : 1 expected_error = Dict() expected_error[Float64, false, 1] = 2.3225467541870387e+01 expected_error[Float64, false, 2] = 5.2663709730295070e+00 expected_error[Float64, false, 3] = 1.2183770894070467e-01 expected_error[Float64, false, 4] = 2.8660813871243937e-03 expected_error[Float64, true, 1] = 2.3225467618783981e+01 expected_error[Float64, true, 2] = 5.2663709730207771e+00 expected_error[Float64, true, 3] = 1.2183770891083319e-01 expected_error[Float64, true, 4] = 2.8660813810759854e-03 @testset "$(@__FILE__)" begin for FT in (Float64,), dims in 2 for split_explicit_implicit in (false, true) let split = split_explicit_implicit ? "(Nonlinear, Linear)" : "(Full, Linear)" @info @sprintf """Configuration ArrayType = %s FT = %s dims = %d splitting = %s """ ArrayType "$FT" dims split end setup = IsentropicVortexSetup{FT}() errors = Vector{FT}(undef, numlevels) for level in 1:numlevels numelems = ntuple(dim -> dim == 3 ? 1 : 2^(level - 1) * 5, dims) errors[level] = test_run( mpicomm, ArrayType, polynomialorder, numelems, setup, split_explicit_implicit, FT, dims, level, ) @test errors[level] ≈ expected_error[FT, split_explicit_implicit, level] end rates = @. log2( first(errors[1:(numlevels - 1)]) / first(errors[2:numlevels]), ) numlevels > 1 && @info "Convergence rates\n" * join( [ "rate for levels $l → $(l + 1) = $(rates[l])" for l in 1:(numlevels - 1) ], "\n", ) end end end end function test_run( mpicomm, ArrayType, polynomialorder, numelems, setup, split_explicit_implicit, FT, dims, level, ) brickrange = ntuple(dims) do dim range( -setup.domain_halflength; length = numelems[dim] + 1, stop = setup.domain_halflength, ) end topology = BrickTopology( mpicomm, brickrange; periodicity = ntuple(_ -> true, dims), ) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, ) problem = AtmosProblem(boundaryconditions = (), init_state_prognostic = setup) physics = AtmosPhysics{FT}( param_set; ref_state = IsentropicVortexReferenceState{FT}(setup), turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, orientation = NoOrientation(), source = (), ) linear_model = AtmosAcousticLinearModel(model) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) dg_linear = DGModel( linear_model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); state_auxiliary = dg.state_auxiliary, ) if split_explicit_implicit dg_nonlinear = remainder_DGModel(dg, (dg_linear,)) end timeend = FT(2 * setup.domain_halflength / setup.translation_speed) # determine the time step elementsize = minimum(step.(brickrange)) dt = elementsize / soundspeed_air(param_set, setup.T∞) / polynomialorder^2 nsteps = ceil(Int, timeend / dt) dt = timeend / nsteps Q = init_ode_state(dg, FT(0)) linearsolver = GeneralizedMinimalResidual(Q; M = 10, rtol = 1e-10) ode_solver = ARK2GiraldoKellyConstantinescu( split_explicit_implicit ? dg_nonlinear : dg, dg_linear, LinearBackwardEulerSolver(linearsolver; isadjustable = true), Q; dt = dt, t0 = 0, split_explicit_implicit = split_explicit_implicit, paperversion = true, ) eng0 = norm(Q) dims == 2 && (numelems = (numelems..., 0)) @info @sprintf """Starting refinement level %d numelems = (%d, %d, %d) dt = %.16e norm(Q₀) = %.16e """ level numelems... dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(ode_solver) runtime energy end end callbacks = (cbinfo,) if output_vtk # create vtk dir vtkdir = "vtk_isentropicvortex_imex" * "_poly$(polynomialorder)_dims$(dims)_$(ArrayType)_$(FT)_level$(level)" * "_$(split_explicit_implicit)" mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, Q, model) # setup the output callback outputtime = timeend cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(ode_solver)) do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model) end callbacks = (callbacks..., cbvtk) end solve!(Q, ode_solver; timeend = timeend, callbacks = callbacks) # final statistics Qe = init_ode_state(dg, timeend) engf = norm(Q) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished refinement level %d norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ level engf engf / eng0 engf - eng0 errf errf / engfe errf end function do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname = "isentropicvortex_imex", ) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end main() ================================================ FILE: test/Numerics/DGMethods/Euler/isentropicvortex_lmars.jl ================================================ using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.Orientations using ClimateMachine.SystemSolvers using Thermodynamics using ClimateMachine.TurbulenceClosures using Thermodynamics.TemperatureProfiles using ClimateMachine.VariableTemplates using ClimateMachine.VTK using CLIMAParameters using CLIMAParameters.Planet: kappa_d struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test include("isentropicvortex_setup.jl") if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output_vtk = false function main() ClimateMachine.init(parse_clargs = true) ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 numlevels = integration_testing ? 4 : 4 expected_error = Dict() # just to make it shorter and aligning LMARS = LMARSNumericalFlux() @testset "$(@__FILE__)" begin for FT in (Float64,), dims in (2, 3), polynomialorder in (4,) for NumericalFlux in (LMARS,) @info @sprintf """Configuration ArrayType = %s FT = %s NumericalFlux = %s dims = %d N_poly = %d """ ArrayType "$FT" "$NumericalFlux" dims polynomialorder setup = IsentropicVortexSetup{FT}() errors = Vector{FT}(undef, numlevels) for level in 1:numlevels numelems = ntuple(dim -> dim == 3 ? 1 : 2^(level - 1) * 5, dims) errors[level] = test_run( mpicomm, ArrayType, polynomialorder, numelems, NumericalFlux, setup, FT, dims, level, ) @test isapprox(errors[level], FT(1.0); rtol = 1e-5) end end end end end function test_run( mpicomm, ArrayType, polynomialorder, numelems, NumericalFlux, setup, FT, dims, level, ) brickrange = ntuple(dims) do dim range( -setup.domain_halflength; length = numelems[dim] + 1, stop = setup.domain_halflength, ) end topology = BrickTopology( mpicomm, brickrange; periodicity = ntuple(_ -> true, dims), ) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, ) problem = AtmosProblem(boundaryconditions = (), init_state_prognostic = setup) if NumericalFlux isa RoeNumericalFluxMoist moisture = EquilMoist() else moisture = DryModel() end if NumericalFlux isa LMARSNumericalFlux ref_state = NoReferenceState() end physics = AtmosPhysics{FT}( param_set; ref_state = ref_state, turbulence = ConstantDynamicViscosity(FT(0)), moisture = moisture, ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, orientation = NoOrientation(), source = (), ) dg = DGModel( model, grid, NumericalFlux, CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) timeend = FT(2 * setup.domain_halflength / 10 / setup.translation_speed) # determine the time step elementsize = minimum(step.(brickrange)) dt = elementsize / soundspeed_air(param_set, setup.T∞) / polynomialorder^2 nsteps = ceil(Int, timeend / dt) dt = timeend / nsteps Q = init_ode_state(dg, FT(0)) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) dims == 2 && (numelems = (numelems..., 0)) @info @sprintf """Starting refinement level %d polyorder = %d numelems = (%d, %d, %d) dt = %.16e norm(Q₀) = %.16e FT = %s """ level polynomialorder numelems... dt eng0 FT # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(lsrk) runtime energy end end callbacks = (cbinfo,) if output_vtk # create vtk dir vtkdir = "vtk_isentropicvortex" * "$(typeof(NumericalFlux))" * "_poly$(polynomialorder)_dims$(dims)_$(ArrayType)_$(FT)_level$(level)" mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, Q, model) # setup the output callback outputtime = timeend / 10 cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(lsrk), setup) do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model) end callbacks = (callbacks..., cbvtk) end solve!(Q, lsrk; timeend = timeend, callbacks = callbacks) # final statistics Qe = init_ode_state(dg, timeend, setup) engf = norm(Q) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished refinement level %d norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ level engf engf / eng0 engf - eng0 errf errf / engfe engf / eng0 end function do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname = "isentropicvortex_lmars", ) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end main() ================================================ FILE: test/Numerics/DGMethods/Euler/isentropicvortex_mrigark.jl ================================================ using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.Orientations using ClimateMachine.SystemSolvers using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.VTK using CLIMAParameters using CLIMAParameters.Planet: kappa_d struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test include("isentropicvortex_setup.jl") if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output_vtk = false function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 numlevels = integration_testing ? 4 : 1 expected_error = Dict() expected_error[Float64, MRIGARKERK33aSandu, 1] = 2.3357934866477940e+01 expected_error[Float64, MRIGARKERK33aSandu, 2] = 5.3328129440361121e+00 expected_error[Float64, MRIGARKERK33aSandu, 3] = 1.2991232057877919e-01 expected_error[Float64, MRIGARKERK33aSandu, 4] = 6.1056067013518876e-03 expected_error[Float64, MRIGARKERK45aSandu, 1] = 2.3207510164213900e+01 expected_error[Float64, MRIGARKERK45aSandu, 2] = 5.2787446598866872e+00 expected_error[Float64, MRIGARKERK45aSandu, 3] = 1.2151170640665301e-01 expected_error[Float64, MRIGARKERK45aSandu, 4] = 2.1001271191583956e-03 @testset "$(@__FILE__)" begin for FT in (Float64,), dims in 2 for mrigark_method in (MRIGARKERK33aSandu, MRIGARKERK45aSandu) @info @sprintf """Configuration ArrayType = %s mrigark_method = %s FT = %s dims = %d """ ArrayType "$mrigark_method" "$FT" dims setup = IsentropicVortexSetup{FT}() errors = Vector{FT}(undef, numlevels) for level in 1:numlevels numelems = ntuple(dim -> dim == 3 ? 1 : 2^(level - 1) * 5, dims) errors[level] = test_run( mpicomm, ArrayType, polynomialorder, numelems, setup, FT, mrigark_method, dims, level, ) @test errors[level] ≈ expected_error[FT, mrigark_method, level] end rates = @. log2( first(errors[1:(numlevels - 1)]) / first(errors[2:numlevels]), ) numlevels > 1 && @info "Convergence rates\n" * join( [ "rate for levels $l → $(l + 1) = $(rates[l])" for l in 1:(numlevels - 1) ], "\n", ) end end end end function test_run( mpicomm, ArrayType, polynomialorder, numelems, setup, FT, mrigark_method, dims, level, ) brickrange = ntuple(dims) do dim range( -setup.domain_halflength; length = numelems[dim] + 1, stop = setup.domain_halflength, ) end topology = BrickTopology( mpicomm, brickrange; periodicity = ntuple(_ -> true, dims), ) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, ) problem = AtmosProblem(boundaryconditions = (), init_state_prognostic = setup) physics = AtmosPhysics{FT}( param_set; ref_state = IsentropicVortexReferenceState{FT}(setup), turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, orientation = NoOrientation(), source = (), ) # The linear model has the fast time scales fast_model = AtmosAcousticLinearModel(model) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) fast_dg = DGModel( fast_model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); state_auxiliary = dg.state_auxiliary, ) slow_dg = remainder_DGModel(dg, (fast_dg,)) timeend = FT(2 * setup.domain_halflength / setup.translation_speed) # determine the slow time step elementsize = minimum(step.(brickrange)) slow_dt = 2 * elementsize / soundspeed_air(param_set, setup.T∞) / polynomialorder^2 nsteps = ceil(Int, timeend / slow_dt) slow_dt = timeend / nsteps # arbitrary and not needed for stability, just for testing fast_dt = slow_dt / 3 Q = init_ode_state(dg, FT(0), setup) fastsolver = LSRK144NiegemannDiehlBusch(fast_dg, Q; dt = fast_dt) ode_solver = mrigark_method(slow_dg, fastsolver, Q, dt = slow_dt) eng0 = norm(Q) dims == 2 && (numelems = (numelems..., 0)) @info @sprintf """Starting refinement level %d numelems = (%d, %d, %d) slow_dt = %.16e fast_dt = %.16e norm(Q₀) = %.16e """ level numelems... slow_dt fast_dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(ode_solver) runtime energy end end callbacks = (cbinfo,) if output_vtk # create vtk dir vtkdir = "vtk_isentropicvortex_mrigark" * "_poly$(polynomialorder)_dims$(dims)_$(ArrayType)_$(FT)" * "_$(FastMethod)_level$(level)" mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, Q, model) # setup the output callback outputtime = timeend cbvtk = EveryXSimulationSteps(floor(outputtime / slow_dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(ode_solver)) do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model) end callbacks = (callbacks..., cbvtk) end solve!(Q, ode_solver; timeend = timeend, callbacks = callbacks) # final statistics Qe = init_ode_state(dg, timeend) engf = norm(Q) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished refinement level %d norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ level engf engf / eng0 engf - eng0 errf errf / engfe errf end function do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname = "isentropicvortex_mrigark", ) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end main() ================================================ FILE: test/Numerics/DGMethods/Euler/isentropicvortex_mrigark_implicit.jl ================================================ using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.Orientations using ClimateMachine.SystemSolvers using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.VTK using CLIMAParameters using CLIMAParameters.Planet: kappa_d struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test include("isentropicvortex_setup.jl") if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output_vtk = false function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 numlevels = integration_testing ? 4 : 1 expected_error = Dict() expected_error[Float64, MRIGARKIRK21aSandu, 1] = 2.3236071337679274e+01 expected_error[Float64, MRIGARKIRK21aSandu, 2] = 5.2652585224989430e+00 expected_error[Float64, MRIGARKIRK21aSandu, 3] = 1.2100430848052603e-01 expected_error[Float64, MRIGARKIRK21aSandu, 4] = 2.1974838909870273e-03 expected_error[Float64, MRIGARKESDIRK34aSandu, 1] = 2.3235626679098608e+01 expected_error[Float64, MRIGARKESDIRK34aSandu, 2] = 5.2672845223341218e+00 expected_error[Float64, MRIGARKESDIRK34aSandu, 3] = 1.2097276468825705e-01 expected_error[Float64, MRIGARKESDIRK34aSandu, 4] = 2.0920468129065205e-03 @testset "$(@__FILE__)" begin for FT in (Float64,), dims in 2 for mrigark_method in (MRIGARKIRK21aSandu, MRIGARKESDIRK34aSandu) @info @sprintf """Configuration ArrayType = %s mrigark_method = %s FT = %s dims = %d """ ArrayType "$mrigark_method" "$FT" dims setup = IsentropicVortexSetup{FT}() errors = Vector{FT}(undef, numlevels) for level in 1:numlevels numelems = ntuple(dim -> dim == 3 ? 1 : 2^(level - 1) * 5, dims) errors[level] = test_run( mpicomm, ArrayType, polynomialorder, numelems, setup, mrigark_method, FT, dims, level, ) @test errors[level] ≈ expected_error[FT, mrigark_method, level] end rates = @. log2( first(errors[1:(numlevels - 1)]) / first(errors[2:numlevels]), ) numlevels > 1 && @info "Convergence rates\n" * join( [ "rate for levels $l → $(l + 1) = $(rates[l])" for l in 1:(numlevels - 1) ], "\n", ) end end end end function test_run( mpicomm, ArrayType, polynomialorder, numelems, setup, mrigark_method, FT, dims, level, ) brickrange = ntuple(dims) do dim range( -setup.domain_halflength; length = numelems[dim] + 1, stop = setup.domain_halflength, ) end topology = BrickTopology( mpicomm, brickrange; periodicity = ntuple(_ -> true, dims), ) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, ) problem = AtmosProblem(boundaryconditions = (), init_state_prognostic = setup) physics = AtmosPhysics{FT}( param_set; ref_state = IsentropicVortexReferenceState{FT}(setup), turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, orientation = NoOrientation(), source = (), ) # This is a bad idea; this test is just testing how # implicit GARK composes with explicit methods # The linear model has the fast time scales but will be # treated implicitly (outer solver) slow_model = AtmosAcousticLinearModel(model) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) slow_dg = DGModel( slow_model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); state_auxiliary = dg.state_auxiliary, ) fast_dg = remainder_DGModel(dg, (slow_dg,)) timeend = FT(2 * setup.domain_halflength / setup.translation_speed) # determine the time step elementsize = minimum(step.(brickrange)) dt = elementsize / soundspeed_air(param_set, setup.T∞) / polynomialorder^2 / 5 nsteps = ceil(Int, timeend / dt) dt = timeend / nsteps Q = init_ode_state(dg, FT(0)) fastsolver = LSRK54CarpenterKennedy(fast_dg, Q; dt = dt) linearsolver = GeneralizedMinimalResidual(Q; M = 50, rtol = 1e-10) ode_solver = mrigark_method( slow_dg, LinearBackwardEulerSolver(linearsolver; isadjustable = true), fastsolver, Q; dt = dt, t0 = 0, ) eng0 = norm(Q) dims == 2 && (numelems = (numelems..., 0)) @info @sprintf """Starting refinement level %d numelems = (%d, %d, %d) dt = %.16e norm(Q₀) = %.16e """ level numelems... dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(ode_solver) runtime energy end end callbacks = (cbinfo,) if output_vtk # create vtk dir vtkdir = "vtk_isentropicvortex_mrigark" * "_poly$(polynomialorder)_dims$(dims)_$(ArrayType)_$(FT)" * "_$(FastMethod)_level$(level)" mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, Q, model) # setup the output callback outputtime = timeend cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(ode_solver)) do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model) end callbacks = (callbacks..., cbvtk) end solve!(Q, ode_solver; timeend = timeend, callbacks = callbacks) # final statistics Qe = init_ode_state(dg, timeend) engf = norm(Q) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished refinement level %d norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ level engf engf / eng0 engf - eng0 errf errf / engfe errf end function do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname = "isentropicvortex_mrigark", ) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end main() ================================================ FILE: test/Numerics/DGMethods/Euler/isentropicvortex_multirate.jl ================================================ using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.Orientations using ClimateMachine.SystemSolvers using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.VTK using CLIMAParameters using CLIMAParameters.Planet: kappa_d struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test include("isentropicvortex_setup.jl") if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output_vtk = false function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 numlevels = integration_testing ? 4 : 1 expected_error = Dict() expected_error[Float64, SSPRK33ShuOsher, 1] = 2.3222373077778794e+01 expected_error[Float64, SSPRK33ShuOsher, 2] = 5.2782503174265516e+00 expected_error[Float64, SSPRK33ShuOsher, 3] = 1.2281763287878383e-01 expected_error[Float64, SSPRK33ShuOsher, 4] = 2.3761870907666096e-03 expected_error[Float64, ARK2GiraldoKellyConstantinescu, 1] = 2.3245216640111998e+01 expected_error[Float64, ARK2GiraldoKellyConstantinescu, 2] = 5.2626584944153949e+00 expected_error[Float64, ARK2GiraldoKellyConstantinescu, 3] = 1.2324230746483673e-01 expected_error[Float64, ARK2GiraldoKellyConstantinescu, 4] = 3.8777995619211627e-03 @testset "$(@__FILE__)" begin for FT in (Float64,), dims in 2 for FastMethod in (SSPRK33ShuOsher, ARK2GiraldoKellyConstantinescu) @info @sprintf """Configuration ArrayType = %s FastMethod = %s FT = %s dims = %d """ ArrayType "$FastMethod" "$FT" dims setup = IsentropicVortexSetup{FT}() errors = Vector{FT}(undef, numlevels) for level in 1:numlevels numelems = ntuple(dim -> dim == 3 ? 1 : 2^(level - 1) * 5, dims) errors[level] = test_run( mpicomm, ArrayType, polynomialorder, numelems, setup, FT, FastMethod, dims, level, ) @test errors[level] ≈ expected_error[FT, FastMethod, level] end rates = @. log2( first(errors[1:(numlevels - 1)]) / first(errors[2:numlevels]), ) numlevels > 1 && @info "Convergence rates\n" * join( [ "rate for levels $l → $(l + 1) = $(rates[l])" for l in 1:(numlevels - 1) ], "\n", ) end end end end function test_run( mpicomm, ArrayType, polynomialorder, numelems, setup, FT, FastMethod, dims, level, ) brickrange = ntuple(dims) do dim range( -setup.domain_halflength; length = numelems[dim] + 1, stop = setup.domain_halflength, ) end topology = BrickTopology( mpicomm, brickrange; periodicity = ntuple(_ -> true, dims), ) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, ) problem = AtmosProblem(boundaryconditions = (), init_state_prognostic = setup) physics = AtmosPhysics{FT}( param_set; ref_state = IsentropicVortexReferenceState{FT}(setup), turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, orientation = NoOrientation(), source = (), ) # The linear model has the fast time scales fast_model = AtmosAcousticLinearModel(model) # The nonlinear model has the slow time scales dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) fast_dg = DGModel( fast_model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); state_auxiliary = dg.state_auxiliary, ) slow_dg = remainder_DGModel(dg, (fast_dg,)) timeend = FT(2 * setup.domain_halflength / setup.translation_speed) # determine the slow time step elementsize = minimum(step.(brickrange)) slow_dt = 8 * elementsize / soundspeed_air(param_set, setup.T∞) / polynomialorder^2 nsteps = ceil(Int, timeend / slow_dt) slow_dt = timeend / nsteps # arbitrary and not needed for stability, just for testing fast_dt = slow_dt / 3 Q = init_ode_state(dg, FT(0), setup) slow_ode_solver = LSRK144NiegemannDiehlBusch(slow_dg, Q; dt = slow_dt) # check if FastMethod is ARK, is there a better way ? if FastMethod == ARK2GiraldoKellyConstantinescu linearsolver = GeneralizedMinimalResidual(Q; M = 10, rtol = 1e-10) # splitting the fast part into full and linear but the fast part # is already linear so full_dg == linear_dg == fast_dg fast_ode_solver = FastMethod( fast_dg, fast_dg, LinearBackwardEulerSolver(linearsolver; isadjustable = true), Q; dt = fast_dt, paperversion = true, ) else fast_ode_solver = FastMethod(fast_dg, Q; dt = fast_dt) end ode_solver = MultirateRungeKutta((slow_ode_solver, fast_ode_solver)) eng0 = norm(Q) dims == 2 && (numelems = (numelems..., 0)) @info @sprintf """Starting refinement level %d numelems = (%d, %d, %d) slow_dt = %.16e fast_dt = %.16e norm(Q₀) = %.16e """ level numelems... slow_dt fast_dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(ode_solver) runtime energy end end callbacks = (cbinfo,) if output_vtk # create vtk dir vtkdir = "vtk_isentropicvortex_multirate" * "_poly$(polynomialorder)_dims$(dims)_$(ArrayType)_$(FT)" * "_$(FastMethod)_level$(level)" mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, Q, model) # setup the output callback outputtime = timeend cbvtk = EveryXSimulationSteps(floor(outputtime / slow_dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(ode_solver), setup) do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model) end callbacks = (callbacks..., cbvtk) end solve!(Q, ode_solver; timeend = timeend, callbacks = callbacks) # final statistics Qe = init_ode_state(dg, timeend, setup) engf = norm(Q) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished refinement level %d norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ level engf engf / eng0 engf - eng0 errf errf / engfe errf end function do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname = "isentropicvortex_mutirate", ) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end main() ================================================ FILE: test/Numerics/DGMethods/Euler/isentropicvortex_setup.jl ================================================ import ClimateMachine.Atmos: atmos_init_aux!, vars_state Base.@kwdef struct IsentropicVortexSetup{FT} p∞::FT = 10^5 T∞::FT = 300 ρ∞::FT = air_density(param_set, FT(T∞), FT(p∞)) translation_speed::FT = 150 translation_angle::FT = pi / 4 vortex_speed::FT = 50 vortex_radius::FT = 1 // 200 domain_halflength::FT = 1 // 20 end function (setup::IsentropicVortexSetup)( problem, bl, state, aux, localgeo, t, args..., ) FT = eltype(state) x = MVector(localgeo.coord) param_set = parameter_set(bl) ρ∞ = setup.ρ∞ p∞ = setup.p∞ T∞ = setup.T∞ translation_speed = setup.translation_speed α = setup.translation_angle vortex_speed = setup.vortex_speed R = setup.vortex_radius L = setup.domain_halflength u∞ = SVector(translation_speed * cos(α), translation_speed * sin(α), 0) x .-= u∞ * t # make the function periodic x .-= floor.((x .+ L) / 2L) * 2L @inbounds begin r = sqrt(x[1]^2 + x[2]^2) δu_x = -vortex_speed * x[2] / R * exp(-(r / R)^2 / 2) δu_y = vortex_speed * x[1] / R * exp(-(r / R)^2 / 2) end u = u∞ .+ SVector(δu_x, δu_y, 0) _kappa_d::FT = kappa_d(param_set) T = T∞ * (1 - _kappa_d * vortex_speed^2 / 2 * ρ∞ / p∞ * exp(-(r / R)^2)) # adiabatic/isentropic relation p = p∞ * (T / T∞)^(FT(1) / _kappa_d) ρ = air_density(param_set, T, p) state.ρ = ρ state.ρu = ρ * u e_kin = u' * u / 2 state.energy.ρe = ρ * total_energy(param_set, e_kin, FT(0), T) if !(moisture_model(bl) isa DryModel) state.moisture.ρq_tot = FT(0) end end struct IsentropicVortexReferenceState{FT} <: ReferenceState setup::IsentropicVortexSetup{FT} end vars_state(::IsentropicVortexReferenceState, ::Auxiliary, FT) = @vars(ρ::FT, ρe::FT, p::FT, T::FT) function atmos_init_aux!( atmos::AtmosModel, m::IsentropicVortexReferenceState, state_auxiliary::MPIStateArray, grid, direction, ) init_state_auxiliary!( atmos, (args...) -> init_vortex_ref_state!(m, args...), state_auxiliary, grid, direction, ) end function init_vortex_ref_state!( m::IsentropicVortexReferenceState, atmos::AtmosModel, aux::Vars, tmp::Vars, geom::LocalGeometry, ) setup = m.setup ρ∞ = setup.ρ∞ p∞ = setup.p∞ T∞ = setup.T∞ param_set = parameter_set(atmos) aux.ref_state.ρ = ρ∞ aux.ref_state.p = p∞ aux.ref_state.T = T∞ aux.ref_state.ρe = ρ∞ * internal_energy(param_set, T∞) end ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/advection_diffusion_model.jl ================================================ using StaticArrays using ClimateMachine.VariableTemplates using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, GradientLaplacian, Hyperdiffusive import ClimateMachine.BalanceLaws: vars_state, number_states, flux_first_order!, flux_second_order!, source!, compute_gradient_argument!, compute_gradient_flux!, nodal_init_state_auxiliary!, update_auxiliary_state!, init_state_prognostic!, boundary_conditions, boundary_state!, wavespeed, transform_post_gradient_laplacian! using ClimateMachine.Mesh.Geometry: LocalGeometry using ClimateMachine.DGMethods: SpaceDiscretization using ClimateMachine.DGMethods.NumericalFluxes: NumericalFluxFirstOrder, NumericalFluxSecondOrder, NumericalFluxGradient, CentralNumericalFluxDivergence, CentralNumericalFluxHigherOrder import ClimateMachine.DGMethods.NumericalFluxes: numerical_flux_first_order!, boundary_flux_second_order! using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() struct Advection{N} <: BalanceLaw end struct NoAdvection <: BalanceLaw end struct Diffusion{N} <: BalanceLaw end struct NoDiffusion <: BalanceLaw end struct HyperDiffusion{N} <: BalanceLaw end struct NoHyperDiffusion <: BalanceLaw end abstract type AdvectionDiffusionProblem end # Boundary condition types # boundary condition for operator of order O # O = 0 -> state BC (Dirichlet) # O = 1 -> gradient BC (Neumann) # O = 2 -> laplacian BC # O = 3 -> gradient laplacian BC abstract type AbstractBC{O} end struct HomogeneousBC{O} <: AbstractBC{O} end struct InhomogeneousBC{O} <: AbstractBC{O} end any_isa(bcs::AbstractBC, bc) = bcs isa bc any_isa(bcs::Tuple, bc) = mapreduce(x -> x isa bc, |, bcs) """ AdvectionDiffusion{N} <: BalanceLaw A balance law describing a system of `N` advection-diffusion-hyperdiffusion equations: ``` ∂ρ -- = - ∇ • (u ρ - σ + η) ∂t σ = D ∇ ρ η = H ∇ Δρ ``` Where - `ρ` is the solution vector - `u` is the advection velocity - `σ` is the DG diffusion auxiliary variable - `D` is the diffusion tensor - `η` is the DG hyperdiffusion auxiliary variable - `H` is the hyperdiffusion tensor """ struct AdvectionDiffusion{N, dim, P, fluxBC, A, D, HD, BC} <: BalanceLaw problem::P advection::A diffusion::D hyperdiffusion::HD boundary_conditions::BC function AdvectionDiffusion{dim}( problem::P, boundary_conditions::BC = (); num_equations = 1, flux_bc = false, advection::Bool = true, diffusion::Bool = true, hyperdiffusion::Bool = false, ) where {dim, P <: AdvectionDiffusionProblem, BC} N = num_equations adv = advection ? Advection{N}() : NoAdvection() A = typeof(adv) diff = diffusion ? Diffusion{N}() : NoDiffusion() D = typeof(diff) hyperdiff = hyperdiffusion ? HyperDiffusion{N}() : NoHyperDiffusion() HD = typeof(hyperdiff) new{N, dim, P, flux_bc, A, D, HD, BC}( problem, adv, diff, hyperdiff, boundary_conditions, ) end end # Auxiliary variables, always store # `coord` coordinate points (needed for BCs) function vars_state(m::AdvectionDiffusion, st::Auxiliary, FT) @vars begin coord::SVector{3, FT} advection::vars_state(m.advection, st, FT) diffusion::vars_state(m.diffusion, st, FT) hyperdiffusion::vars_state(m.hyperdiffusion, st, FT) end end # `u` advection velocity vars_state(::Advection{1}, ::Auxiliary, FT) = @vars(u::SVector{3, FT}) vars_state(::Advection{N}, ::Auxiliary, FT) where {N} = @vars(u::SMatrix{3, N, FT, 3N}) # `D` diffusion tensor vars_state(::Diffusion{1}, ::Auxiliary, FT) = @vars(D::SMatrix{3, 3, FT, 9}) vars_state(::Diffusion{N}, ::Auxiliary, FT) where {N} = @vars(D::SArray{Tuple{3, 3, N}, FT, 3, 9N}) # `H` hyperdiffusion tensor vars_state(::HyperDiffusion{1}, ::Auxiliary, FT) = @vars(H::SMatrix{3, 3, FT, 9}) vars_state(::HyperDiffusion{N}, ::Auxiliary, FT) where {N} = @vars(H::SArray{Tuple{3, 3, N}, FT, 3, 9N}) # Density `ρ` is the only state vars_state(::AdvectionDiffusion{1}, ::Prognostic, FT) = @vars(ρ::FT) vars_state(::AdvectionDiffusion{N}, ::Prognostic, FT) where {N} = @vars(ρ::SVector{N, FT}) function vars_state(m::AdvectionDiffusion{N}, ::Gradient, FT) where {N} # For pure advection we don't need gradients if m.diffusion isa NoDiffusion && m.hyperdiffusion isa NoHyperDiffusion return @vars() else # Take the gradient of density return N == 1 ? @vars(ρ::FT) : @vars(ρ::SVector{N, FT}) end end # Take the gradient of laplacian of density ρ vars_state(::HyperDiffusion{1}, ::GradientLaplacian, FT) = @vars(ρ::FT) vars_state(::HyperDiffusion{N}, ::GradientLaplacian, FT) where {N} = @vars(ρ::SVector{N, FT}) vars_state(m::AdvectionDiffusion, st::GradientLaplacian, FT) = vars_state(m.hyperdiffusion, st, FT) # The DG diffusion auxiliary variable: σ = D ∇ρ vars_state(::Diffusion{1}, ::GradientFlux, FT) = @vars(σ::SVector{3, FT}) vars_state(::Diffusion{N}, ::GradientFlux, FT) where {N} = @vars(σ::SMatrix{3, N, FT, 3N}) vars_state(m::AdvectionDiffusion, st::GradientFlux, FT) = vars_state(m.diffusion, st, FT) # The DG hyperdiffusion auxiliary variable: η = H ∇ Δρ vars_state(::HyperDiffusion{1}, ::Hyperdiffusive, FT) = @vars(η::SVector{3, FT}) vars_state(::HyperDiffusion{N}, ::Hyperdiffusive, FT) where {N} = @vars(η::SMatrix{3, N, FT, 3N}) vars_state(m::AdvectionDiffusion, st::Hyperdiffusive, FT) = vars_state(m.hyperdiffusion, st, FT) """ flux_first_order!(::Advection, flux::Grad, state::Vars, aux::Vars) Computes non-diffusive flux `F_adv = u ρ` where - `u` is the advection velocity - `ρ` is the advected quantity """ function flux_first_order!( ::Advection{N}, flux::Grad, state::Vars, aux::Vars, ) where {N} ρ = state.ρ u = aux.advection.u flux.ρ += u .* ρ' end flux_first_order!(::NoAdvection, flux::Grad, state::Vars, aux::Vars) = nothing flux_first_order!( m::AdvectionDiffusion, flux::Grad, state::Vars, aux::Vars, t::Real, direction, ) = flux_first_order!(m.advection, flux, state, aux) """ flux_second_order!(::Diffusion, flux::Grad, auxDG::Vars) Computes diffusive flux `F_diff = -σ` where: - `σ` is DG diffusion auxiliary variable (`σ = D ∇ ρ` with `D` being the diffusion tensor) """ function flux_second_order!(::Diffusion, flux::Grad, auxDG::Vars) σ = auxDG.σ flux.ρ += -σ end flux_second_order!(::NoDiffusion, flux::Grad, auxDG::Vars) = nothing """ flux_second_order!(::HyperDiffusion, flux::Grad, auxHDG::Vars) Computes hyperdiffusive flux `F_hyperdiff = η` where: - `η` is DG hyperdiffusion auxiliary variable (`η = H ∇ Δρ` with `H` being the hyperdiffusion tensor) """ function flux_second_order!(::HyperDiffusion, flux::Grad, auxHDG::Vars) η = auxHDG.η flux.ρ += η end flux_second_order!(::NoHyperDiffusion, flux::Grad, auxHDG::Vars) = nothing function flux_second_order!( m::AdvectionDiffusion, flux::Grad, state::Vars, auxDG::Vars, auxHDG::Vars, aux::Vars, t::Real, ) flux_second_order!(m.diffusion, flux, auxDG) flux_second_order!(m.hyperdiffusion, flux, auxHDG) end """ compute_gradient_argument!(m::AdvectionDiffusion, transform::Vars, state::Vars, aux::Vars, t::Real) Set the variable to take the gradient of (`ρ` in this case) """ function compute_gradient_argument!( m::AdvectionDiffusion, transform::Vars, state::Vars, aux::Vars, t::Real, ) transform.ρ = state.ρ end """ compute_gradient_flux!(::Diffusion, auxDG::Vars, gradvars::Grad, aux::Vars) Computes the DG diffusion auxiliary variable `σ = D ∇ ρ` where `D` is the diffusion tensor. """ function compute_gradient_flux!( ::Diffusion{N}, auxDG::Vars, gradvars::Grad, aux::Vars, ) where {N} ∇ρ = gradvars.ρ D = aux.diffusion.D if N == 1 auxDG.σ = D * ∇ρ else auxDG.σ = hcat(ntuple(n -> D[:, :, n] * ∇ρ[:, n], Val(N))...) end end compute_gradient_flux!(::NoDiffusion, auxDG::Vars, gradvars::Grad, aux::Vars) = nothing compute_gradient_flux!( m::AdvectionDiffusion, auxDG::Vars, gradvars::Grad, state::Vars, aux::Vars, t::Real, ) = compute_gradient_flux!(m.diffusion, auxDG, gradvars, aux) """ transform_post_gradient_laplacian!(::AdvectionDiffusion, auxHDG::Vars, gradvars::Grad, state::Vars, aux::Vars, t::Real) Computes the DG hyperdiffusion auxiliary variable `η = H ∇ Δρ` where `H` is the hyperdiffusion tensor. """ function transform_post_gradient_laplacian!( m::AdvectionDiffusion{N}, auxHDG::Vars, gradvars::Grad, state::Vars, aux::Vars, t::Real, ) where {N} ∇Δρ = gradvars.ρ H = aux.hyperdiffusion.H if N == 1 auxHDG.η = H * ∇Δρ else auxHDG.η = hcat(ntuple(n -> H[:, :, n] * ∇Δρ[:, n], Val(N))...) end end """ source!(m::AdvectionDiffusion, _...) There is no source in the advection-diffusion model """ source!(m::AdvectionDiffusion, _...) = nothing """ wavespeed(m::AdvectionDiffusion, nM, state::Vars, aux::Vars, t::Real) Wavespeed with respect to vector `nM` """ wavespeed( m::AdvectionDiffusion, nM, state::Vars, aux::Vars, t::Real, direction, ) = wavespeed(m.advection, nM, aux) function wavespeed(::Advection{N}, nM, aux::Vars) where {N} u = aux.advection.u if N == 1 abs(nM' * u) else SVector(ntuple(n -> abs(nM' * u[:, n]), Val(N))) end end wavespeed(::NoAdvection, nM, aux::Vars) = 0 function nodal_init_state_auxiliary!( m::AdvectionDiffusion, aux::Vars, tmp::Vars, geom::LocalGeometry, ) aux.coord = geom.coord init_velocity_diffusion!(m.problem, aux, geom) end has_variable_coefficients(::AdvectionDiffusionProblem) = false function update_auxiliary_state!( spacedisc::SpaceDiscretization, m::AdvectionDiffusion, Q::MPIStateArray, t::Real, elems::UnitRange, ) if has_variable_coefficients(m.problem) update_auxiliary_state!(spacedisc, m, Q, t, elems) do m, state, aux, t update_velocity_diffusion!(m.problem, m, state, aux, t) end return true end return false end function init_state_prognostic!( m::AdvectionDiffusion, state::Vars, aux::Vars, localgeo, t::Real, ) initial_condition!(m.problem, state, aux, localgeo, t) end """ inhomogeneous_data!(::Val{O}, problem, data, aux, x, t) Prescribes `problem` boundary condition data for an operator of order `O` """ function inhomogeneous_data! end boundary_conditions(m::AdvectionDiffusion) = m.boundary_conditions function boundary_state!( nf, bcs, m::AdvectionDiffusion{N}, stateP::Vars, auxP::Vars, nM, stateM::Vars, auxM::Vars, t, _..., ) where {N} if any_isa(bcs, InhomogeneousBC{0}) # Dirichlet inhomogeneous_data!( Val(0), m.problem, stateP, auxP, (coord = auxP.coord,), t, ) elseif any_isa(bcs, AbstractBC{1}) # Neumann stateP.ρ = stateM.ρ elseif any_isa(bcs, HomogeneousBC{0}) # zero Dirichlet stateP.ρ = N == 1 ? 0 : zeros(typeof(stateP.ρ)) end end function boundary_state!( nf::CentralNumericalFluxSecondOrder, bcs, m::AdvectionDiffusion, state⁺::Vars, diff⁺::Vars, hyperdiff⁺::Vars, aux⁺::Vars, n⁻::SVector, state⁻::Vars, diff⁻::Vars, hyperdiff⁻::Vars, aux⁻::Vars, t, _..., ) if m.diffusion isa NoDiffusion && m.hyperdiffusion isa NoHyperDiffusion return nothing end if m.diffusion isa Diffusion if any_isa(bcs, AbstractBC{0}) # Dirchlet # Just use the minus side values since Dirchlet diff⁺.σ = diff⁻.σ elseif any_isa(bcs, InhomogeneousBC{1}) # Neumann with data FT = eltype(diff⁺) ngrad = number_states(m, Gradient()) ∇state = Grad{vars_state(m, Gradient(), FT)}(similar( parent(diff⁺), Size(3, ngrad), )) # Get analytic gradient inhomogeneous_data!(Val(1), m.problem, ∇state, aux⁻, aux⁻.coord, t) compute_gradient_flux!(m.diffusion, diff⁺, ∇state, aux⁻) # compute the diffusive flux using the boundary state elseif any_isa(bcs, HomogeneousBC{1}) # zero Neumann FT = eltype(diff⁺) ngrad = number_states(m, Gradient()) ∇state = Grad{vars_state(m, Gradient(), FT)}(similar( parent(diff⁺), Size(3, ngrad), )) # Get analytic gradient ∇state.ρ = zeros(typeof(∇state.ρ)) # convert to auxDG variables compute_gradient_flux!(m.diffusion, diff⁺, ∇state, aux⁻) end end if m.hyperdiffusion isa HyperDiffusion if any_isa(bcs, InhomogeneousBC{3}) FT = eltype(hyperdiff⁺) ngradlap = number_states(m, GradientLaplacian()) ∇Δstate = Grad{vars_state(m, GradientLaplacian(), FT)}(similar( parent(hyperdiff⁺), Size(3, ngradlap), )) # Get analytic gradient of laplacian inhomogeneous_data!(Val(3), m.problem, ∇Δstate, aux⁻, aux⁻.coord, t) transform_post_gradient_laplacian!( m, hyperdiff⁺, ∇Δstate, state⁻, aux⁻, t, ) elseif any_isa(bcs, HomogeneousBC{3}) FT = eltype(hyperdiff⁺) ngradlap = number_states(m, GradientLaplacian()) ∇Δstate = Grad{vars_state(m, GradientLaplacian(), FT)}(zeros(SMatrix{ 3, ngradlap, FT, })) transform_post_gradient_laplacian!( m, hyperdiff⁺, ∇Δstate, state⁻, aux⁻, t, ) end end nothing end function boundary_flux_second_order!( nf::CentralNumericalFluxSecondOrder, bcs, m::AdvectionDiffusion{N, dim, P, true}, F, state⁺, diff⁺, hyperdiff⁺, aux⁺, n⁻, state⁻, diff⁻, hyperdiff⁻, aux⁻, t, _..., ) where {N, dim, P} if m.diffusion isa NoDiffusion && m.hyperdiffusion isa NoHyperDiffusion return nothing end # Default initialize flux to minus side if any_isa(bcs, AbstractBC{0}) # Dirchlet # Just use the minus side values since Dirchlet flux_second_order!(m, F, state⁻, diff⁻, hyperdiff⁻, aux⁻, t) elseif any_isa(bcs, InhomogeneousBC{1}) # Neumann data FT = eltype(diff⁺) ngrad = number_states(m, Gradient()) ∇state = Grad{vars_state(m, Gradient(), FT)}(similar( parent(diff⁺), Size(3, ngrad), )) # Get analytic gradient inhomogeneous_data!(Val(1), m.problem, ∇state, aux⁻, aux⁻.coord, t) # get the diffusion coefficient D = aux⁻.diffusion.D # exact the exact data ∇ρ = ∇state.ρ # set the flux if N == 1 F.ρ = -D * ∇ρ else F.ρ = hcat(ntuple(n -> -D[:, :, n] * ∇ρ[:, n], Val(N))...) end elseif any_isa(bcs, HomogeneousBC{1}) # Zero Neumann F.ρ = zeros(typeof(F.ρ)) end nothing end function boundary_state!( nf::CentralNumericalFluxDivergence, bcs, m::AdvectionDiffusion, grad⁺::Grad, aux⁺::Vars, n⁻::SVector, grad⁻::Grad, aux⁻::Vars, t, ) if m.hyperdiffusion isa NoHyperDiffusion return nothing end if any_isa(bcs, InhomogeneousBC{1}) # Get analytic gradient inhomogeneous_data!(Val(1), m.problem, grad⁺, aux⁻, aux⁻.coord, t) elseif any_isa(bcs, HomogeneousBC{1}) grad⁺.ρ = zeros(typeof(grad⁺.ρ)) end nothing end function boundary_state!( ::CentralNumericalFluxHigherOrder, bcs, m::AdvectionDiffusion{N}, state⁺::Vars, aux⁺::Vars, lap⁺::Vars, n⁻::SVector, state⁻::Vars, aux⁻::Vars, lap⁻::Vars, t, ) where {N} if m.hyperdiffusion isa NoHyperDiffusion return nothing end if any_isa(bcs, InhomogeneousBC{2}) # Get analytic laplacian inhomogeneous_data!(Val(2), m.problem, lap⁺, aux⁻, aux⁻.coord, t) elseif any_isa(bcs, HomogeneousBC{2}) lap⁺.ρ = N == 1 ? 0 : zeros(typeof(lap⁺.ρ)) end nothing end ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/advection_diffusion_model_1dimex_bgmres.jl ================================================ using MPI using ClimateMachine using Logging using Test using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.SystemSolvers: BatchedGeneralizedMinimalResidual using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu if !@isdefined integration_testing if length(ARGS) > 0 const integration_testing = parse(Bool, ARGS[1]) else const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct Pseudo1D{n, α, β, μ, δ} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::Pseudo1D{n, α, β}, aux::Vars, geom::LocalGeometry, ) where {n, α, β} # Direction of flow is n with magnitude α aux.advection.u = α * n # Diffusion of strength β in the n direction aux.diffusion.D = β * n * n' end function initial_condition!( ::Pseudo1D{n, α, β, μ, δ}, state, aux, localgeo, t, ) where {n, α, β, μ, δ} ξn = dot(n, localgeo.coord) # ξT = SVector(localgeo.coord) - ξn * n state.ρ = exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) end inhomogeneous_data!(::Val{0}, P::Pseudo1D, x...) = initial_condition!(P, x...) function inhomogeneous_data!( ::Val{1}, ::Pseudo1D{n, α, β, μ, δ}, ∇state, aux, x, t, ) where {n, α, β, μ, δ} ξn = dot(n, x) ∇state.ρ = -( 2n * (ξn - μ - α * t) / (4 * β * (δ + t)) * exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) ) end function do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname) ## Name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## Name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## Name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, dim, topl, N, timeend, FT, dt, n, α, β, μ, δ, vtkdir, outputtime, linearsolvertype, fluxBC, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) bcs = ( InhomogeneousBC{0}(), InhomogeneousBC{1}(), HomogeneousBC{0}(), HomogeneousBC{1}(), ) model = AdvectionDiffusion{dim}( Pseudo1D{n, α, β, μ, δ}(), bcs, flux_bc = fluxBC, ) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), direction = EveryDirection(), ) vdg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), state_auxiliary = dg.state_auxiliary, direction = VerticalDirection(), ) Q = init_ode_state(dg, FT(0)) linearsolver = BatchedGeneralizedMinimalResidual( dg, Q; atol = sqrt(eps(FT)) * 0.01, rtol = sqrt(eps(FT)) * 0.01, ) ode_solver = ARK548L2SA2KennedyCarpenter( dg, vdg, LinearBackwardEulerSolver(linearsolver; isadjustable = true), Q; dt = dt, t0 = 0, split_explicit_implicit = false, ) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(ode_solver), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # Create vtk dir mkpath(vtkdir) vtkstep = 0 # Output initial step do_output( mpicomm, vtkdir, vtkstep, dg, Q, Q, model, "advection_diffusion", ) # Setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(ode_solver)) do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, "advection_diffusion", ) end callbacks = (callbacks..., cbvtk) end numberofsteps = convert(Int64, cld(timeend, dt)) dt = timeend / numberofsteps @info "time step" dt numberofsteps dt * numberofsteps timeend solve!( Q, ode_solver; numberofsteps = numberofsteps, callbacks = callbacks, adjustfinalstep = false, ) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 expected_result = Dict() expected_result[2, 1, Float64] = 7.2801198255507391e-02 expected_result[2, 2, Float64] = 6.8160295851506783e-03 expected_result[2, 3, Float64] = 1.4439137164205592e-04 expected_result[2, 4, Float64] = 2.4260727323386998e-06 expected_result[3, 1, Float64] = 1.0462203776357534e-01 expected_result[3, 2, Float64] = 1.0280535683502070e-02 expected_result[3, 3, Float64] = 2.0631857053908848e-04 expected_result[3, 4, Float64] = 3.3460492914169325e-06 expected_result[2, 1, Float32] = 7.2801239788532257e-02 expected_result[2, 2, Float32] = 6.8159680813550949e-03 expected_result[2, 3, Float32] = 1.4439738879445940e-04 # This is near roundoff so we will not check it # expected_result[2, 4, Float32] = 2.6432753656990826e-06 expected_result[3, 1, Float32] = 1.0462204366922379e-01 expected_result[3, 2, Float32] = 1.0280583053827286e-02 expected_result[3, 3, Float32] = 2.0646647317335010e-04 expected_result[3, 4, Float32] = 2.0226731066941284e-05 numlevels = integration_testing ? 4 : 1 @testset "$(@__FILE__)" begin for FT in (Float64, Float32) result = zeros(FT, numlevels) for dim in 2:3 for fluxBC in (true, false) d = dim == 2 ? FT[1, 10, 0] : FT[1, 1, 10] n = SVector{3, FT}(d ./ norm(d)) α = FT(1) β = FT(1 // 100) μ = FT(-1 // 2) δ = FT(1 // 10) connectivity = dim == 2 ? :face : :full linearsolvertype = "Batched GMRES" for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem brickrange = ( ntuple( j -> range(FT(-1); length = Ne + 1, stop = 1), dim - 1, )..., range(FT(-5); length = 5Ne + 1, stop = 5), ) periodicity = ntuple(j -> false, dim) topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = ( ntuple(j -> (1, 2), dim - 1)..., (3, 4), ), connectivity = connectivity, ) dt = (α / 4) / (Ne * polynomialorder^2) outputtime = 0.01 timeend = 0.5 @info (ArrayType, FT, dim, linearsolvertype, l, fluxBC) vtkdir = output ? "vtk_advection" * "_poly$(polynomialorder)" * "_dim$(dim)_$(ArrayType)_$(FT)" * "_$(linearsolvertype)_level$(l)" : nothing result[l] = test_run( mpicomm, ArrayType, dim, topl, polynomialorder, timeend, FT, dt, n, α, β, μ, δ, vtkdir, outputtime, linearsolvertype, fluxBC, ) # Test the errors significantly larger than floating point epsilon if !(dim == 2 && l == 4 && FT == Float32) @test result[l] ≈ FT(expected_result[dim, l, FT]) end end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/advection_diffusion_model_1dimex_bjfnks.jl ================================================ using MPI using ClimateMachine using Logging using Test using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.SystemSolvers using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu if !@isdefined integration_testing if length(ARGS) > 0 const integration_testing = parse(Bool, ARGS[1]) else const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct Pseudo1D{n, α, β, μ, δ} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::Pseudo1D{n, α, β}, aux::Vars, geom::LocalGeometry, ) where {n, α, β} # Direction of flow is n with magnitude α aux.advection.u = α * n # diffusion of strength β in the n direction aux.diffusion.D = β * n * n' end function initial_condition!( ::Pseudo1D{n, α, β, μ, δ}, state, aux, localgeo, t, ) where {n, α, β, μ, δ} ξn = dot(n, localgeo.coord) # ξT = SVector(localgeo.coord) - ξn * n state.ρ = exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) end inhomogeneous_data!(::Val{0}, P::Pseudo1D, x...) = initial_condition!(P, x...) function inhomogeneous_data!( ::Val{1}, ::Pseudo1D{n, α, β, μ, δ}, ∇state, aux, x, t, ) where {n, α, β, μ, δ} ξn = dot(n, x) ∇state.ρ = -( 2n * (ξn - μ - α * t) / (4 * β * (δ + t)) * exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) ) end function do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, dim, topl, N, timeend, FT, dt, n, α, β, μ, δ, vtkdir, outputtime, linearsolvertype, fluxBC, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) bcs = ( InhomogeneousBC{0}(), InhomogeneousBC{1}(), HomogeneousBC{0}(), HomogeneousBC{1}(), ) model = AdvectionDiffusion{dim}( Pseudo1D{n, α, β, μ, δ}(), bcs, flux_bc = fluxBC, ) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), direction = EveryDirection(), ) vdg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), state_auxiliary = dg.state_auxiliary, direction = VerticalDirection(), ) linvdg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), state_auxiliary = dg.state_auxiliary, direction = VerticalDirection(), ) Q = init_ode_state(dg, FT(0)) # linearsolver = GeneralizedMinimalResidual(Q; M = 30, rtol = 1e-5) linearsolver = BatchedGeneralizedMinimalResidual(dg, Q; atol = -1.0, rtol = 1e-5) nonlinearsolver = JacobianFreeNewtonKrylovSolver(Q, linearsolver; tol = 1e-4) ode_solver = ARK548L2SA2KennedyCarpenter( dg, vdg, NonLinearBackwardEulerSolver( nonlinearsolver; isadjustable = true, preconditioner_update_freq = 1000, ), Q; dt = dt, t0 = 0, split_explicit_implicit = false, ) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(ode_solver), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # create vtk dir mkpath(vtkdir) vtkstep = 0 # output initial step do_output( mpicomm, vtkdir, vtkstep, dg, Q, Q, model, "advection_diffusion", ) # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(ode_solver)) do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, "advection_diffusion", ) end callbacks = (callbacks..., cbvtk) end numberofsteps = convert(Int64, cld(timeend, dt)) dt = timeend / numberofsteps @info "time step" dt numberofsteps dt * numberofsteps timeend solve!( Q, ode_solver; numberofsteps = numberofsteps, callbacks = callbacks, adjustfinalstep = false, ) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 expected_result = Dict() expected_result[2, 1, Float64] = 7.2801198255507391e-02 expected_result[2, 2, Float64] = 6.8160295851506783e-03 expected_result[2, 3, Float64] = 1.4439137164205592e-04 expected_result[2, 4, Float64] = 2.4260727323386998e-06 expected_result[3, 1, Float64] = 1.0462203776357534e-01 expected_result[3, 2, Float64] = 1.0280535683502070e-02 expected_result[3, 3, Float64] = 2.0631857053908848e-04 expected_result[3, 4, Float64] = 3.3460492914169325e-06 expected_result[2, 1, Float32] = 7.2801239788532257e-02 expected_result[2, 2, Float32] = 6.8159680813550949e-03 expected_result[2, 3, Float32] = 1.4439738879445940e-04 # This is near roundoff so we will not check it # expected_result[2, 4, Float32] = 2.6432753656990826e-06 expected_result[3, 1, Float32] = 1.0462204366922379e-01 expected_result[3, 2, Float32] = 1.0280583053827286e-02 expected_result[3, 3, Float32] = 2.0646647317335010e-04 expected_result[3, 4, Float32] = 2.0226731066941284e-05 numlevels = integration_testing ? 4 : 1 @testset "$(@__FILE__)" begin for FT in (Float64, Float32) result = zeros(FT, numlevels) for dim in 2:3 for fluxBC in (true, false) d = dim == 2 ? FT[1, 10, 0] : FT[1, 1, 10] n = SVector{3, FT}(d ./ norm(d)) α = FT(1) β = FT(1 // 100) μ = FT(-1 // 2) δ = FT(1 // 10) linearsolvertype = "Batched GMRES" for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem brickrange = ( ntuple( j -> range(FT(-1); length = Ne + 1, stop = 1), dim - 1, )..., range(FT(-5); length = 5Ne + 1, stop = 5), ) periodicity = ntuple(j -> false, dim) topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = ( ntuple(j -> (1, 2), dim - 1)..., (3, 4), ), ) dt = (α / 4) / (Ne * polynomialorder^2) outputtime = 0.01 timeend = 0.5 @info (ArrayType, FT, dim, linearsolvertype, l, fluxBC) vtkdir = output ? "vtk_advection" * "_poly$(polynomialorder)" * "_dim$(dim)_$(ArrayType)_$(FT)" * "_$(linearsolvertype)_level$(l)" : nothing result[l] = test_run( mpicomm, ArrayType, dim, topl, polynomialorder, timeend, FT, dt, n, α, β, μ, δ, vtkdir, outputtime, linearsolvertype, fluxBC, ) # test the errors significantly larger than floating point epsilon if !(dim == 2 && l == 4 && FT == Float32) @test result[l] ≈ FT(expected_result[dim, l, FT]) end end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/advection_sphere.jl ================================================ using MPI using ClimateMachine using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.BalanceLaws: update_auxiliary_state! using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.Atmos: SphericalOrientation, latitude, longitude using ClimateMachine.Orientations using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu import ClimateMachine.BalanceLaws: boundary_state! if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") # This is a setup similar to the one presented in [Williamson1992](@cite) struct SolidBodyRotation <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::SolidBodyRotation, aux::Vars, geom::LocalGeometry, ) FT = eltype(aux) λ = longitude(SphericalOrientation(), aux) φ = latitude(SphericalOrientation(), aux) r = norm(geom.coord) uλ = 2 * FT(π) * cos(φ) * r uφ = 0 aux.advection.u = SVector( -uλ * sin(λ) - uφ * cos(λ) * sin(φ), +uλ * cos(λ) - uφ * sin(λ) * sin(φ), +uφ * cos(φ), ) end function initial_condition!(::SolidBodyRotation, state, aux, localgeo, t) λ = longitude(SphericalOrientation(), aux) φ = latitude(SphericalOrientation(), aux) state.ρ = exp(-((3λ)^2 + (3φ)^2)) end finaltime(::SolidBodyRotation) = 1 u_scale(::SolidBodyRotation) = 2π # This is a setup similar to the one presented in [Lauritzen2012](@cite) struct ReversingDeformationalFlow <: AdvectionDiffusionProblem end init_velocity_diffusion!( ::ReversingDeformationalFlow, aux::Vars, geom::LocalGeometry, ) = nothing function initial_condition!(::ReversingDeformationalFlow, state, aux, coord, t) x, y, z = aux.coord r = norm(aux.coord) h_max = 0.95 b = 5 state.ρ = 0 for (λ, φ) in ((5π / 6, 0), (7π / 6, 0)) xi = r * cos(φ) * cos(λ) yi = r * cos(φ) * sin(λ) zi = r * sin(φ) state.ρ += h_max * exp(-b * ((x - xi)^2 + (y - yi)^2 + (z - zi)^2)) end end has_variable_coefficients(::ReversingDeformationalFlow) = true function update_velocity_diffusion!( ::ReversingDeformationalFlow, ::AdvectionDiffusion, state::Vars, aux::Vars, t::Real, ) FT = eltype(aux) λ = longitude(SphericalOrientation(), aux) φ = latitude(SphericalOrientation(), aux) r = norm(aux.coord) T = FT(5) λp = λ - FT(2π) * t / T uλ = 10 * r / T * sin(λp)^2 * sin(2φ) * cos(FT(π) * t / T) + FT(2π) * r / T * cos(φ) uφ = 10 * r / T * sin(2λp) * cos(φ) * cos(FT(π) * t / T) aux.advection.u = SVector( -uλ * sin(λ) - uφ * cos(λ) * sin(φ), +uλ * cos(λ) - uφ * sin(λ) * sin(φ), +uφ * cos(φ), ) end u_scale(::ReversingDeformationalFlow) = 2.9 finaltime(::ReversingDeformationalFlow) = 5 function advective_courant( m::AdvectionDiffusion, state::Vars, aux::Vars, diffusive::Vars, Δx, Δt, direction, ) return Δt * norm(aux.advection.u) / Δx end struct NoFlowBC end function boundary_state!( ::RusanovNumericalFlux, ::NoFlowBC, ::AdvectionDiffusion, stateP::Vars, auxP::Vars, nM, stateM::Vars, auxM::Vars, t, _..., ) auxP.advection.u = -auxM.advection.u end function do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, topl, problem, explicit_method, cfl, N, timeend, FT, vtkdir, outputtime, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = equiangular_cubed_sphere_warp, ) dx = min_node_distance(grid, HorizontalDirection()) dt = FT(cfl * dx / u_scale(problem())) dt = outputtime / ceil(Int64, outputtime / dt) bcs = (NoFlowBC(),) model = AdvectionDiffusion{3}(problem(), bcs, diffusion = false) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) odesolver = explicit_method(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting problem = %s method = %s time step = %.16e norm(Q₀) = %.16e""" problem explicit_method dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(odesolver), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end cbcfl = EveryXSimulationSteps(10) do dt = ODESolvers.getdt(odesolver) cfl = DGMethods.courant( advective_courant, dg, model, Q, dt, HorizontalDirection(), ) @info @sprintf( """Courant number simtime = %.16e courant = %.16e""", gettime(odesolver), cfl ) end callbacks = (cbinfo,) if ~isnothing(vtkdir) # create vtk dir mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, Q, model, "advection_sphere") # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(odesolver)) do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, "advection_sphere", ) end callbacks = (callbacks..., cbvtk) end solve!(Q, odesolver; timeend = timeend, callbacks = callbacks) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) Δmass = abs(weightedsum(Q) - weightedsum(Qe)) / weightedsum(Qe) @info @sprintf """Finished Δmass = %.16e norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ Δmass engf engf / eng0 engf - eng0 errf errf / engfe return errf, Δmass end using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 2 max_cfl = Dict( LSRK144NiegemannDiehlBusch => 5.0, SSPRK33ShuOsher => 1.0, SSPRK34SpiteriRuuth => 1.5, ) expected_result = Dict() expected_result[SolidBodyRotation, LSRK144NiegemannDiehlBusch, 1] = 1.3199024557832748e-01 expected_result[SolidBodyRotation, LSRK144NiegemannDiehlBusch, 2] = 1.9868931633120656e-02 expected_result[SolidBodyRotation, LSRK144NiegemannDiehlBusch, 3] = 1.4052110916915061e-03 expected_result[SolidBodyRotation, LSRK144NiegemannDiehlBusch, 4] = 9.0193766298676310e-05 expected_result[SolidBodyRotation, SSPRK33ShuOsher, 1] = 1.1055145388897809e-01 expected_result[SolidBodyRotation, SSPRK33ShuOsher, 2] = 1.5510740467668628e-02 expected_result[SolidBodyRotation, SSPRK33ShuOsher, 3] = 1.8629481690361454e-03 expected_result[SolidBodyRotation, SSPRK33ShuOsher, 4] = 2.3567040048588889e-04 expected_result[SolidBodyRotation, SSPRK34SpiteriRuuth, 1] = 1.2641959922001456e-01 expected_result[SolidBodyRotation, SSPRK34SpiteriRuuth, 2] = 2.2780375948714751e-02 expected_result[SolidBodyRotation, SSPRK34SpiteriRuuth, 3] = 3.1274951764826459e-03 expected_result[SolidBodyRotation, SSPRK34SpiteriRuuth, 4] = 3.9734060514021565e-04 expected_result[ReversingDeformationalFlow, LSRK144NiegemannDiehlBusch, 1] = 5.5387951598735408e-01 expected_result[ReversingDeformationalFlow, LSRK144NiegemannDiehlBusch, 2] = 3.7610388138383732e-01 expected_result[ReversingDeformationalFlow, LSRK144NiegemannDiehlBusch, 3] = 1.7823508719111605e-01 expected_result[ReversingDeformationalFlow, LSRK144NiegemannDiehlBusch, 4] = 3.8639493470255713e-02 expected_result[ReversingDeformationalFlow, SSPRK33ShuOsher, 1] = 5.5353962032596349e-01 expected_result[ReversingDeformationalFlow, SSPRK33ShuOsher, 2] = 3.7645487928038762e-01 expected_result[ReversingDeformationalFlow, SSPRK33ShuOsher, 3] = 1.7823263736245307e-01 expected_result[ReversingDeformationalFlow, SSPRK33ShuOsher, 4] = 3.8605903366230925e-02 expected_result[ReversingDeformationalFlow, SSPRK34SpiteriRuuth, 1] = 5.5404045660832824e-01 expected_result[ReversingDeformationalFlow, SSPRK34SpiteriRuuth, 2] = 3.7788858038003154e-01 expected_result[ReversingDeformationalFlow, SSPRK34SpiteriRuuth, 3] = 1.8007113931230376e-01 expected_result[ReversingDeformationalFlow, SSPRK34SpiteriRuuth, 4] = 3.9941331660544775e-02 numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? 4 : 1 @testset "$(@__FILE__)" begin for FT in (Float64,) for problem in (SolidBodyRotation, ReversingDeformationalFlow) for explicit_method in ( LSRK144NiegemannDiehlBusch, SSPRK33ShuOsher, SSPRK34SpiteriRuuth, ) cfl = max_cfl[explicit_method] result = zeros(FT, numlevels) for l in 1:numlevels numelems_horizontal = 2^(l - 1) * base_num_elem numelems_vertical = 1 topl = StackedCubedSphereTopology( mpicomm, numelems_horizontal, range( FT(1), stop = 2, length = numelems_vertical + 1, ), ) timeend = finaltime(problem()) outputtime = timeend @info (ArrayType, FT) vtkdir = output ? "vtk_advection_sphere" * "_$problem" * "_$explicit_method" * "_poly$(polynomialorder)" * "_$(ArrayType)_$(FT)" * "_level$(l)" : nothing result[l], Δmass = test_run( mpicomm, ArrayType, topl, problem, explicit_method, cfl, polynomialorder, timeend, FT, vtkdir, outputtime, ) @test result[l] ≈ FT(expected_result[problem, explicit_method, l]) @test Δmass <= FT(5e-14) end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/diffusion_hyperdiffusion_sphere.jl ================================================ using MPI using ClimateMachine using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.ODESolvers using ClimateMachine.VTK: writevtk, writepvtu using ClimateMachine.Mesh.Grids: min_node_distance const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end include("advection_diffusion_model.jl") struct DiffusionSphere <: AdvectionDiffusionProblem end function init_velocity_diffusion!( problem::DiffusionSphere, aux::Vars, geom::LocalGeometry, ) FT = eltype(aux) IM = SMatrix{3, 3, FT}(I) ZM = zeros(SMatrix{3, 3, FT}) μ = FT(1 / 10000) aux.diffusion.D = μ * hcat(IM, ZM) aux.hyperdiffusion.H = μ * hcat(ZM, IM) end function initial_condition!(problem::DiffusionSphere, state, aux, localgeo, t) coord = localgeo.coord x, y, z = coord r = norm(coord) θ = atan(sqrt(x^2 + y^2), z) φ = atan(y, x) @inbounds begin # m = 1 l = 2 spherical harmonic ρ₀ = cos(φ) * sin(θ) * cos(θ) l = 2 c = l * (l + 1) / r^2 μ = aux.diffusion.D[1] state.ρ = (ρ₀ * exp(-c * μ * t), ρ₀ * exp(-c^2 * μ * t)) end end function do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function run(mpicomm, ArrayType, topl, N, timeend, FT, vtkdir, outputtime) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = equiangular_cubed_sphere_warp, ) dx = min_node_distance(grid) dt = 300 * dx^4 @info "time step" dt dt = outputtime / ceil(Int64, outputtime / dt) model = AdvectionDiffusion{3}( DiffusionSphere(), advection = false, diffusion = true, hyperdiffusion = true, num_equations = 2, ) dg = DGModel( model, grid, CentralNumericalFluxFirstOrder(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), diffusion_direction = HorizontalDirection(), ) Q = init_ode_state(dg, FT(0)) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q, dims = (1, 3)) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0[1] # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(lsrk), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # create vtk dir mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, Q, model, "diffusion_sphere") # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(lsrk)) do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, "diffusion_sphere", ) end callbacks = (callbacks..., cbvtk) end solve!(Q, lsrk; timeend = timeend, callbacks = callbacks) # Print some end of the simulation information engf = norm(Q, dims = (1, 3)) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe, dims = (1, 3)) errf = norm(Q .- Qe, dims = (1, 3)) metrics = @. (engf, engf / eng0, engf - eng0, errf, errf / engfe) @info @sprintf """Finished Diffusion: norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e HyperDiffusion: norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ first.(metrics)... last.(metrics)... errf end using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 3 base_num_elem = 4 expected_result = Dict() expected_result[Diffusion, 1] = 2.6002775334282785e-06 expected_result[Diffusion, 2] = 4.1602462623838931e-07 expected_result[Diffusion, 3] = 5.8997889725858304e-08 expected_result[HyperDiffusion, 1] = 9.3032674316702424e-05 expected_result[HyperDiffusion, 2] = 1.0149377619104328e-05 expected_result[HyperDiffusion, 3] = 1.2985297333025857e-06 numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? 3 : 1 @testset "$(@__FILE__)" begin for FT in (Float64,) result = Dict() for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem vert_range = grid1d(1, 2, nelem = 1) topl = StackedCubedSphereTopology( mpicomm, Ne, vert_range, boundary = (0, 0), ) timeend = FT(1) outputtime = FT(2) @info (ArrayType, FT) vtkdir = output ? "vtk_diffusion_sphere" * "_poly$(polynomialorder)" * "_$(ArrayType)_$(FT)" * "_level$(l)" : nothing result[l] = run( mpicomm, ArrayType, topl, polynomialorder, timeend, FT, vtkdir, outputtime, ) @test result[l][1] ≈ expected_result[Diffusion, l] @test result[l][2] ≈ expected_result[HyperDiffusion, l] end @info begin msg = "" for l in 1:(numlevels - 1) rate = @. log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rates for level %d Diffusion = %e", l, rate[1] ) msg *= @sprintf(", HyperDiffusion = %e\n", rate[2]) end msg end end end end nothing ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/direction_splitting_advection_diffusion.jl ================================================ using Test using MPI using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using LinearAlgebra using Printf using Random include("advection_diffusion_model.jl") struct Box{dim} end struct Sphere end vertical_unit_vector(::Box{2}, ::SVector{3}) = SVector(0, 1, 0) vertical_unit_vector(::Box{3}, ::SVector{3}) = SVector(0, 0, 1) vertical_unit_vector(::Sphere, coord::SVector{3}) = coord / norm(coord) projection(::EveryDirection, ::SVector{3}) = I projection(::VerticalDirection, k::SVector{3}) = k * k' projection(::HorizontalDirection, k::SVector{3}) = I - k * k' struct TestProblem{adv, diff, dir, topo} <: AdvectionDiffusionProblem end initial_ρ(::Box, x) = prod(sin.(π * x)) function initial_ρ(::Sphere, x) r = norm(x) φ = atan(x[2], x[1]) θ = atan(sqrt(x[1]^2 + x[2]^2), x[3]) return sin(π * (r - 1)) * sin(φ) * sin(θ) end velocity(::Box, x) = sin.(π * x) function velocity(::Sphere, x) r = norm(x) φ = atan(x[2], x[1]) θ = atan(sqrt(x[1]^2 + x[2]^2), x[3]) return sin(π * (r - 1)) .* SVector(cos(φ) * cos(θ), sin(φ) * cos(θ), cos(φ) * sin(θ)) end function init_velocity_diffusion!( ::TestProblem{adv, diff, dir, topo}, aux::Vars, geom::LocalGeometry, ) where {adv, diff, dir, topo} k = vertical_unit_vector(topo, geom.coord) P = projection(dir, k) aux.advection.u = !isnothing(adv) ? P * velocity(topo, geom.coord) : zeros(SVector{3}) aux.diffusion.D = !isnothing(diff) ? SMatrix{3, 3}(P) / 200 : zeros(SMatrix{3, 3}) end function initial_condition!( ::TestProblem{adv, diff, dir, topo}, state, aux, localgeo, t, ) where {adv, diff, dir, topo} state.ρ = initial_ρ(topo, localgeo.coord) end function create_topology(::Box{dim}, mpicomm, Ne, FT) where {dim} brickrange = ntuple(j -> range(FT(0); length = Ne + 1, stop = 1), dim) periodicity = ntuple(j -> false, dim) bc = ntuple(j -> (1, 1), dim) connectivity = dim == 3 ? :full : :face StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = bc, connectivity = connectivity, ) end function create_topology(::Sphere, mpicomm, Ne, FT) vert_range = grid1d(FT(1), FT(2), nelem = Ne) StackedCubedSphereTopology(mpicomm, Ne, vert_range, boundary = (1, 1)) end create_dg(model, grid, direction) = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), direction = direction, ) function test_run( adv, diff, topo, mpicomm, ArrayType, FT, polynomialorder, Ne, level, ) topology = create_topology(topo(), mpicomm, Ne, FT) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, meshwarp = topo == Sphere ? equiangular_cubed_sphere_warp : (x...) -> identity(x), ) problems = ( p = TestProblem{adv, diff, EveryDirection(), topo()}(), vp = TestProblem{adv, diff, VerticalDirection(), topo()}(), hp = TestProblem{adv, diff, HorizontalDirection(), topo()}(), ) bcs = (HomogeneousBC{0}(),) models = map(p -> AdvectionDiffusion{3}(p, bcs), problems) dgmodels = map(models) do m ( dg = create_dg(m, grid, EveryDirection()), vdg = create_dg(m, grid, VerticalDirection()), hdg = create_dg(m, grid, HorizontalDirection()), ) end Q = init_ode_state(dgmodels.p.dg, FT(0), init_on_cpu = true) # do one Euler step to trigger numerical fluxes in subsequent evaluations let dt = 1e-3 dQ = similar(Q) dgmodels.p.dg(dQ, Q, nothing, FT(0)) Q .+= dt .* dQ end # evaluate all combinations dQ = map( x -> map(dg -> (dQ = similar(Q); dg(dQ, Q, nothing, FT(0)); dQ), x), dgmodels, ) # set up tolerances atolm = 6e-13 atolv = 3e-4 / 5^(level - 1) atolh = 0.0003 / 5^(level - 1) @testset "total" begin atol = topo <: Box || diff == nothing ? atolm : atolh @test isapprox(norm(dQ.p.dg .- dQ.p.vdg .- dQ.p.hdg), 0, atol = atol) @test isapprox(norm(dQ.vp.dg .- dQ.vp.vdg .- dQ.vp.hdg), 0, atol = atol) @test isapprox(norm(dQ.hp.dg .- dQ.hp.vdg .- dQ.hp.hdg), 0, atol = atol) end @testset "vertical" begin atol = topo <: Box ? atolm : atolv @test isapprox(norm(dQ.vp.dg .- dQ.vp.vdg), 0, atol = atol) @test isapprox(norm(dQ.vp.hdg), 0, atol = atol) @test isapprox(norm(dQ.vp.dg .- dQ.p.vdg), 0, atol = atol) end @testset "horizontal" begin atol = topo <: Box ? atolm : atolh @test isapprox(norm(dQ.hp.dg .- dQ.hp.hdg), 0, atol = atol) @test isapprox(norm(dQ.hp.vdg), 0, atol = atol) @test isapprox(norm(dQ.hp.dg - dQ.p.hdg), 0, atol = atol) end end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD FT = Float64 numlevels = 2 base_num_elem = 4 # This test doesn't do any heavy computational work, # but compiles a lot of model/discretization combinations. # Compilation times on the GPU are longer, so we only run one # variable-degree case on the GPU if ArrayType == Array polynomialorders = ((4, 4), (4, 2)) else polynomialorders = ((4, 2),) end @info @sprintf """Test parameters: ArrayType = %s FloatType = %s Polynomial orders = %s """ ArrayType FT polynomialorders @testset "$(@__FILE__)" begin @testset for polyorders in polynomialorders @testset for topo in (Box{2}, Box{3}, Sphere) @testset for (adv, diff) in ( (Advection, nothing), (nothing, Diffusion), (Advection, Diffusion), ) @testset for level in 1:numlevels Ne = 2^(level - 1) * base_num_elem test_run( adv, diff, topo, mpicomm, ArrayType, FT, polyorders, Ne, level, ) end end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/fvm_advection.jl ================================================ import Printf: @sprintf import LinearAlgebra: dot, norm import Dates import MPI import ClimateMachine import ClimateMachine.DGMethods.FVReconstructions: FVConstant, FVLinear import ClimateMachine.DGMethods.NumericalFluxes: RusanovNumericalFlux, CentralNumericalFluxSecondOrder, CentralNumericalFluxGradient import ClimateMachine.DGMethods: DGFVModel, init_ode_state import ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps import ClimateMachine.MPIStateArrays: MPIStateArray, euclidean_distance import ClimateMachine.Mesh.Grids: DiscontinuousSpectralElementGrid, EveryDirection import ClimateMachine.Mesh.Topologies: StackedBrickTopology import ClimateMachine.ODESolvers: LSRK54CarpenterKennedy, solve!, gettime import ClimateMachine.VTK: writevtk, writepvtu if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct Pseudo1D{n, α} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::Pseudo1D{n, α}, aux::Vars, geom::LocalGeometry, ) where {n, α} # Direction of flow is n with magnitude α aux.advection.u = α * n end function initial_condition!( ::Pseudo1D{n, α}, state, aux, localgeo, t, ) where {n, α} ξn = dot(n, localgeo.coord) state.ρ = sin((ξn - α * t) * pi) end inhomogeneous_data!(::Val{0}, P::Pseudo1D, x...) = initial_condition!(P, x...) function do_output(mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, testname) ## Name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dgfvm, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## Name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## Name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, fvmethod, dim, topl, N, timeend, FT, dt, n, α, vtkdir, outputtime, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) bcs = (InhomogeneousBC{0}(),) model = AdvectionDiffusion{dim}(Pseudo1D{n, α}(), bcs, diffusion = false) dgfvm = DGFVModel( model, grid, fvmethod, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = EveryDirection(), ) Q = init_ode_state(dgfvm, FT(0)) lsrk = LSRK54CarpenterKennedy(dgfvm, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(Dates.now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = Dates.now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(lsrk), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # Create vtk dir mkpath(vtkdir) vtkstep = 0 # Output initial step do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Q, model, "advection_diffusion", ) # Setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dgfvm, gettime(lsrk)) do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, "advection_diffusion", ) end callbacks = (callbacks..., cbvtk) end solve!(Q, lsrk; timeend = timeend, callbacks = callbacks) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dgfvm, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD base_num_elem = 4 expected_result = Dict() expected_result[2, 1, Float64, FVConstant()] = 1.0404261715459338e-01 expected_result[2, 2, Float64, FVConstant()] = 5.5995868545685376e-02 expected_result[2, 3, Float64, FVConstant()] = 2.9383695610072275e-02 expected_result[2, 4, Float64, FVConstant()] = 1.5171779426843507e-02 expected_result[2, 1, Float64, FVLinear()] = 8.3196657944903635e-02 expected_result[2, 2, Float64, FVLinear()] = 3.9277132273521774e-02 expected_result[2, 3, Float64, FVLinear()] = 1.7155773433218020e-02 expected_result[2, 4, Float64, FVLinear()] = 7.6525022056819231e-03 expected_result[3, 1, Float64, FVConstant()] = 9.6785620234054362e-02 expected_result[3, 2, Float64, FVConstant()] = 5.3406412788842651e-02 expected_result[3, 3, Float64, FVConstant()] = 2.8471535157235807e-02 expected_result[3, 4, Float64, FVConstant()] = 1.4846239937398318e-02 expected_result[3, 1, Float64, FVLinear()] = 8.5860120005258181e-02 expected_result[3, 2, Float64, FVLinear()] = 4.2844889694123235e-02 expected_result[3, 3, Float64, FVLinear()] = 1.9302295207100174e-02 expected_result[3, 4, Float64, FVLinear()] = 8.6084633401356733e-03 expected_result[2, 1, Float32, FVConstant()] = 1.0404255986213684e-01 expected_result[2, 2, Float32, FVConstant()] = 5.5995877832174301e-02 expected_result[2, 3, Float32, FVConstant()] = 2.9383875429630280e-02 expected_result[2, 4, Float32, FVConstant()] = 1.5171864069998264e-02 expected_result[2, 1, Float32, FVLinear()] = 8.3196602761745453e-02 expected_result[2, 2, Float32, FVLinear()] = 3.9277125149965286e-02 expected_result[2, 3, Float32, FVLinear()] = 1.7155680805444717e-02 expected_result[2, 4, Float32, FVLinear()] = 7.6521718874573708e-03 expected_result[3, 1, Float32, FVConstant()] = 9.6785508096218109e-02 expected_result[3, 2, Float32, FVConstant()] = 5.3406376391649246e-02 expected_result[3, 3, Float32, FVConstant()] = 2.8471505269408226e-02 expected_result[3, 4, Float32, FVConstant()] = 1.4849635772407055e-02 expected_result[3, 1, Float32, FVLinear()] = 8.5860058665275574e-02 expected_result[3, 2, Float32, FVLinear()] = 4.2844854295253754e-02 expected_result[3, 3, Float32, FVLinear()] = 1.9302234053611755e-02 expected_result[3, 4, Float32, FVLinear()] = 8.6139924824237823e-03 @testset "$(@__FILE__)" begin for FT in (Float64, Float32) numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? 4 : 1 result = zeros(FT, numlevels) for dim in 2:3 N = (ntuple(j -> 4, dim - 1)..., 0) n = dim == 2 ? SVector{3, FT}(1 / sqrt(2), 1 / sqrt(2), 0) : SVector{3, FT}(1 / sqrt(3), 1 / sqrt(3), 1 / sqrt(3)) α = FT(1) connectivity = dim == 2 ? :face : :full for fvmethod in (FVConstant(), FVLinear()) @info @sprintf """Configuration FT = %s ArrayType = %s FV Reconstruction = %s dims = %d """ FT ArrayType fvmethod dim for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem brickrange = ( ntuple( j -> range(FT(-1); length = Ne + 1, stop = 1), dim - 1, )..., range(FT(-1); length = N[1] * Ne + 1, stop = 1), ) periodicity = ntuple(j -> false, dim) bc = ntuple(j -> (1, 1), dim) topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = bc, connectivity = connectivity, ) dt = (α / 4) / (Ne * max(1, maximum(N))^2) timeend = FT(1 // 4) outputtime = timeend dt = outputtime / ceil(Int64, outputtime / dt) vtkdir = output ? "vtk_advection" * "_poly$(N)" * "_dim$(dim)_$(ArrayType)_$(FT)" * "_level$(l)" : nothing result[l] = test_run( mpicomm, ArrayType, fvmethod, dim, topl, N, timeend, FT, dt, n, α, vtkdir, outputtime, ) @test result[l] ≈ FT(expected_result[dim, l, FT, fvmethod]) end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/fvm_advection_diffusion.jl ================================================ using MPI using ClimateMachine using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods.FVReconstructions: FVConstant, FVLinear using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Test import ClimateMachine.VTK: writevtk, writepvtu import ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using Dates if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct Pseudo1D{ns, α, β, μ, δ} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::Pseudo1D{ns, α, β}, aux::Vars, geom::LocalGeometry, ) where {ns, α, β} # Direction of flow is ns[i] with magnitude α aux.advection.u = hcat(ntuple(i -> α * ns[i], Val(length(ns)))...) # Diffusion of strength β in the ns[i] direction aux.diffusion.D = hcat(ntuple(i -> β * ns[i] * ns[i]', Val(length(ns)))...) end function gaussian(x, t, α, β, μ, δ) exp(-(x - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) end function ∇gaussian(n, x, t, α, β, μ, δ) -2n * (x - μ - α * t) / (4 * β * (δ + t)) * exp(-(x - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) end function initial_condition!( ::Pseudo1D{ns, α, β, μ, δ}, state, aux, localgeo, t, ) where {ns, α, β, μ, δ} ρ = ntuple(Val(length(ns))) do i ξn = dot(ns[i], localgeo.coord) gaussian(ξn, t, α, β, μ, δ) end state.ρ = length(ns) == 1 ? ρ[1] : ρ end inhomogeneous_data!(::Val{0}, P::Pseudo1D, x...) = initial_condition!(P, x...) function inhomogeneous_data!( ::Val{1}, ::Pseudo1D{ns, α, β, μ, δ}, ∇state, aux, x, t, ) where {ns, α, β, μ, δ} ∇state.ρ = hcat(ntuple(Val(length(ns))) do i ξn = dot(ns[i], x) ∇gaussian(ns[i], ξn, t, α, β, μ, δ) end...) end function do_output(mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, testname) ## Name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dgfvm, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## Name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## Name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, dim, fvmethod, polynomialorders, level, ArrayType, FT, vtkdir, direction, ) n_hd = dim == 2 ? SVector{3, FT}(1, 0, 0) : SVector{3, FT}(1 / sqrt(2), 1 / sqrt(2), 0) n_vd = dim == 2 ? SVector{3, FT}(0, 1, 0) : SVector{3, FT}(0, 0, 1) n_dg = dim == 2 ? SVector{3, FT}(1 / sqrt(2), 1 / sqrt(2), 0) : SVector{3, FT}(1 / sqrt(3), 1 / sqrt(3), 1 / sqrt(3)) connectivity = dim == 2 ? :face : :full if direction isa EveryDirection ns = (n_hd, n_vd, n_dg) elseif direction isa HorizontalDirection ns = (n_hd,) elseif direction isa VerticalDirection ns = (n_vd,) end α = FT(1) β = FT(1 // 100) μ = FT(-1 // 2) δ = FT(1 // 10) # Grid/topology information base_num_elem = 4 Ne = 2^(level - 1) * base_num_elem N = polynomialorders L = ntuple(j -> FT(j == dim ? 1 : N[1]) / 4, dim) brickrange = ntuple(j -> range(-L[j]; length = Ne + 1, stop = L[j]), dim) periodicity = ntuple(j -> false, dim) bc = ntuple(j -> (1, 2), dim) topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = bc, connectivity = connectivity, ) dt = (α / 4) * L[1] / (Ne * polynomialorders[1]^2) timeend = 1 outputtime = timeend / 10 @info "time step" dt @info @sprintf """Test parameters: FVM Reconstruction = %s ArrayType = %s FloatType = %s Dimension = %s Direction = %s Horizontal polynomial order = %s Vertical polynomial order = %s """ fvmethod ArrayType FT dim direction polynomialorders[1] polynomialorders[end] grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorders, ) bcs = (InhomogeneousBC{0}(), InhomogeneousBC{1}()) # Model being tested model = AdvectionDiffusion{dim}( Pseudo1D{ns, α, β, μ, δ}(), bcs, num_equations = length(ns), ) # Main DG discretization dgfvm = DGFVModel( model, grid, fvmethod, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = direction, ) # Initialize all relevant state arrays and create solvers Q = init_ode_state(dgfvm, FT(0)) eng0 = norm(Q, dims = (1, 3)) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0[1] solver = LSRK54CarpenterKennedy(dgfvm, Q; dt = dt, t0 = 0) # Set up the information callback starttime = Ref(Dates.now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = Dates.now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(solver), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # Create vtk dir mkpath(vtkdir) vtkstep = 0 # Output initial step do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Q, model, "advection_diffusion", ) # Setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dgfvm, gettime(solver)) do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, "advection_diffusion", ) end callbacks = (callbacks..., cbvtk) end solve!(Q, solver; timeend = timeend, callbacks = callbacks) # Reference solution engf = norm(Q, dims = (1, 3)) Q_ref = init_ode_state(dgfvm, FT(timeend)) engfe = norm(Q_ref, dims = (1, 3)) errf = norm(Q_ref .- Q, dims = (1, 3)) metrics = @. (engf, engf / eng0, engf - eng0, errf, errf / engfe) @info begin j = 1 msg = "Finished\n" if direction isa HorizontalDirection || direction isa EveryDirection msg *= @sprintf """ Horizontal field: norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ ntuple(f -> metrics[f][j], 5)... j += 1 end if direction isa VerticalDirection || direction isa EveryDirection msg *= @sprintf """ Vertical field: norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ ntuple(f -> metrics[f][j], 5)... j += 1 end if direction isa EveryDirection msg *= @sprintf """ Diagonal field: norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ ntuple(f -> metrics[f][j], 5)... end msg end return Tuple(errf) end """ main() Run this test problem """ function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD # Dictionary keys: dim, level, and FT expected_result = Dict() expected_result[2, 1, Float32, FVConstant()] = (2.3391991853713989e-02, 5.0738707184791565e-02, 4.1857458651065826e-02) expected_result[2, 2, Float32, FVConstant()] = (2.0331495907157660e-03, 3.3669386059045792e-02, 2.3904174566268921e-02) expected_result[2, 3, Float32, FVConstant()] = (2.6557327146292664e-05, 2.0002065226435661e-02, 1.2790882028639317e-02) expected_result[2, 1, Float32, FVLinear()] = (2.3391995579004288e-02, 4.9536284059286118e-02, 3.5911198705434799e-02) expected_result[2, 2, Float32, FVLinear()] = (2.0331430714577436e-03, 2.2621221840381622e-02, 1.5531544573605061e-02) expected_result[2, 3, Float32, FVLinear()] = (2.6568108296487480e-05, 7.5304862111806870e-03, 6.4526572823524475e-03) expected_result[3, 1, Float32, FVConstant()] = (8.7377587333321571e-03, 7.1755334734916687e-02, 4.3733172118663788e-02) expected_result[3, 2, Float32, FVConstant()] = (6.2517996411770582e-04, 4.7615684568881989e-02, 2.3986011743545532e-02) expected_result[3, 3, Float32, FVConstant()] = (3.5004395613214001e-05, 2.8287241235375404e-02, 1.2639496475458145e-02) expected_result[3, 1, Float32, FVLinear()] = (8.7377615272998810e-03, 7.0054858922958374e-02, 3.7571556866168976e-02) expected_result[3, 2, Float32, FVLinear()] = (6.2518107006326318e-04, 3.1991228461265564e-02, 1.6405479982495308e-02) expected_result[3, 3, Float32, FVLinear()] = (3.5004966775886714e-05, 1.0649790056049824e-02, 7.1499347686767578e-03) expected_result[2, 1, Float64, FVConstant()] = (2.3391871809628567e-02, 5.0738761087541523e-02, 4.1857480220018319e-02) expected_result[2, 2, Float64, FVConstant()] = (2.0332783913617892e-03, 3.3669399006499491e-02, 2.3904204301839819e-02) expected_result[2, 3, Float64, FVConstant()] = (2.6572168347086839e-05, 2.0002110124502395e-02, 1.2790993871268752e-02) expected_result[2, 4, Float64, FVConstant()] = (1.9890000550154039e-07, 1.0929144069594809e-02, 6.5110938897763263e-03) expected_result[2, 1, Float64, FVLinear()] = (2.3391871809628550e-02, 4.9536356061135857e-02, 3.5911213637569099e-02) expected_result[2, 2, Float64, FVLinear()] = (2.0332783913618278e-03, 2.2621252826152356e-02, 1.5531565558700888e-02) expected_result[2, 3, Float64, FVLinear()] = (2.6572168347111800e-05, 7.5305291439555161e-03, 6.4526740563561544e-03) expected_result[2, 4, Float64, FVLinear()] = (1.9890000549995218e-07, 2.5302226025389427e-03, 2.7792905025059711e-03) expected_result[3, 1, Float64, FVConstant()] = (8.7378337431297422e-03, 7.1755444068009461e-02, 4.3733196512722658e-02) expected_result[3, 2, Float64, FVConstant()] = (6.2510740807095622e-04, 4.7615720711942776e-02, 2.3986017606198881e-02) expected_result[3, 3, Float64, FVConstant()] = (3.4995405318038341e-05, 2.8287255414151377e-02, 1.2639742577376042e-02) expected_result[3, 4, Float64, FVConstant()] = (1.4362091045094841e-06, 1.5456143768350493e-02, 6.3677406803847331e-03) expected_result[3, 1, Float64, FVLinear()] = (8.7378337431297439e-03, 7.0054986572200995e-02, 3.7571599264936015e-02) expected_result[3, 2, Float64, FVLinear()] = (6.2510740807095286e-04, 3.1991282544615307e-02, 1.6405489038457288e-02) expected_result[3, 3, Float64, FVLinear()] = (3.4995405318035868e-05, 1.0649776447227678e-02, 7.1502921502446283e-03) expected_result[3, 4, Float64, FVLinear()] = (1.4362091045110612e-06, 3.5782751203335653e-03, 3.1186151510318319e-03) @testset "Variable degree DG: advection diffusion model" begin for FT in (Float32, Float64) for direction in (EveryDirection(), HorizontalDirection(), VerticalDirection()) numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? (FT == Float64 ? 4 : 3) : 1 for dim in 2:3 for fvmethod in (FVConstant(), FVLinear(), FVLinear{3}()) polynomialorders = (4, 0) result = Dict() for level in 1:numlevels vtkdir = output ? "vtk_advection" * "_poly$(polynomialorders)" * "_dim$(dim)_$(ArrayType)_$(FT)" * "_fvmethod$(fvmethod)" * "_level$(level)" : nothing result[level] = test_run( mpicomm, dim, fvmethod, polynomialorders, level, ArrayType, FT, vtkdir, direction, ) fv_key = fvmethod isa FVLinear ? FVLinear() : fvmethod if direction isa EveryDirection @test all( result[level] .≈ FT.(expected_result[ dim, level, FT, fv_key, ]), ) elseif direction isa HorizontalDirection @test result[level][1] .≈ FT.(expected_result[ dim, level, FT, fv_key, ])[1] elseif direction isa VerticalDirection @test result[level][1] .≈ FT.(expected_result[ dim, level, FT, fv_key, ])[2] end end @info begin msg = "" for l in 1:(numlevels - 1) rate = @. log2(result[l]) - log2(result[l + 1]) msg *= @sprintf("\n rates for level %d", l) j = 1 if direction isa HorizontalDirection || direction isa EveryDirection msg *= @sprintf(", Horizontal = %e", rate[j]) j += 1 end if direction isa VerticalDirection || direction isa EveryDirection msg *= @sprintf(", Vertical = %e", rate[j]) j += 1 end if direction isa EveryDirection msg *= @sprintf(", Diagonal = %e", rate[j]) end msg *= "\n" end msg end end end end end end end main() ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/fvm_advection_diffusion_model_1dimex_bjfnks.jl ================================================ using MPI using ClimateMachine using Logging using Test using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods.FVReconstructions: FVConstant, FVLinear using ClimateMachine.MPIStateArrays using ClimateMachine.SystemSolvers using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu if !@isdefined integration_testing if length(ARGS) > 0 const integration_testing = parse(Bool, ARGS[1]) else const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct Pseudo1D{n, α, β, μ, δ} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::Pseudo1D{n, α, β}, aux::Vars, geom::LocalGeometry, ) where {n, α, β} # Direction of flow is n with magnitude α aux.advection.u = α * n # Diffusion of strength β in the n direction aux.diffusion.D = β * n * n' end function initial_condition!( ::Pseudo1D{n, α, β, μ, δ}, state, aux, localgeo, t, ) where {n, α, β, μ, δ} ξn = dot(n, localgeo.coord) # ξT = SVector(localgeo.coord) - ξn * n state.ρ = exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) end inhomogeneous_data!(::Val{0}, P::Pseudo1D, x...) = initial_condition!(P, x...) function inhomogeneous_data!( ::Val{1}, ::Pseudo1D{n, α, β, μ, δ}, ∇state, aux, x, t, ) where {n, α, β, μ, δ} ξn = dot(n, x) ∇state.ρ = -( 2n * (ξn - μ - α * t) / (4 * β * (δ + t)) * exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) ) end function do_output(mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, testname) ## Name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dgfvm, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## Name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## Name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, dim, topl, N, fvmethod, timeend, FT, dt, n, α, β, μ, δ, vtkdir, outputtime, fluxBC, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = (N, 0), ) bcs = ( InhomogeneousBC{0}(), InhomogeneousBC{1}(), HomogeneousBC{0}(), HomogeneousBC{1}(), ) model = AdvectionDiffusion{dim}( Pseudo1D{n, α, β, μ, δ}(), bcs, flux_bc = fluxBC, ) dgfvm = DGFVModel( model, grid, fvmethod, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), direction = EveryDirection(), ) vdgfvm = DGFVModel( model, grid, fvmethod, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), state_auxiliary = dgfvm.state_auxiliary, direction = VerticalDirection(), ) Q = init_ode_state(dgfvm, FT(0)) linearsolver = BatchedGeneralizedMinimalResidual( dgfvm, Q; max_subspace_size = 5, atol = sqrt(eps(FT)) * 0.01, rtol = sqrt(eps(FT)) * 0.01, ) nonlinearsolver = JacobianFreeNewtonKrylovSolver(Q, linearsolver; tol = 1e-4) ode_solver = ARK2ImplicitExplicitMidpoint( dgfvm, vdgfvm, NonLinearBackwardEulerSolver( nonlinearsolver; isadjustable = true, preconditioner_update_freq = 1000, ), Q; dt = dt, t0 = 0, split_explicit_implicit = false, ) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(ode_solver), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # Create vtk dir mkpath(vtkdir) vtkstep = 0 # Output initial step do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Q, model, "advection_diffusion", ) # Setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dgfvm, gettime(ode_solver)) do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, "advection_diffusion", ) end callbacks = (callbacks..., cbvtk) end numberofsteps = convert(Int64, cld(timeend, dt)) dt = timeend / numberofsteps @info "time step" dt numberofsteps dt * numberofsteps timeend solve!( Q, ode_solver; numberofsteps = numberofsteps, callbacks = callbacks, adjustfinalstep = false, ) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dgfvm, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 expected_result = Dict() # dim, refinement level, FT, vertical scheme expected_result[2, 1, Float64, FVConstant()] = 1.4890228213182394e-01 expected_result[2, 2, Float64, FVConstant()] = 1.1396608915201276e-01 expected_result[2, 3, Float64, FVConstant()] = 7.9360293305962212e-02 expected_result[2, 1, Float64, FVLinear()] = 1.0755278583097065e-01 expected_result[2, 2, Float64, FVLinear()] = 4.5924219102504410e-02 expected_result[2, 3, Float64, FVLinear()] = 1.0775256661783415e-02 expected_result[3, 1, Float64, FVConstant()] = 2.1167998484526718e-01 expected_result[3, 2, Float64, FVConstant()] = 1.5936623436529485e-01 expected_result[3, 3, Float64, FVConstant()] = 1.1069147173311458e-01 expected_result[3, 1, Float64, FVLinear()] = 1.5361757743888277e-01 expected_result[3, 2, Float64, FVLinear()] = 6.3622331921472902e-02 expected_result[3, 3, Float64, FVLinear()] = 1.4836612761110498e-02 numlevels = integration_testing ? 3 : 1 @testset "$(@__FILE__)" begin for FT in (Float64,) result = zeros(FT, numlevels) for dim in 2:3 connectivity = dim == 3 ? :full : :face for fvmethod in (FVConstant(), FVLinear()) for fluxBC in (true, false) d = dim == 2 ? FT[1, 10, 0] : FT[1, 1, 10] n = SVector{3, FT}(d ./ norm(d)) α = FT(1) β = FT(1 // 100) μ = FT(-1 // 2) δ = FT(1 // 10) solvertype = "HEVI_Nolinearsolver" for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem brickrange = ( ntuple( j -> range( FT(-1); length = Ne + 1, stop = 1, ), dim - 1, )..., range( FT(-5); length = 5Ne * polynomialorder + 1, stop = 5, ), ) periodicity = ntuple(j -> false, dim) topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = ( ntuple(j -> (1, 2), dim - 1)..., (3, 4), ), connectivity = connectivity, ) dt = 2 * (α / 4) / (Ne * polynomialorder^2) outputtime = 0.01 timeend = 0.5 @info ( ArrayType, FT, dim, solvertype, l, polynomialorder, fvmethod, fluxBC, ) vtkdir = output ? "vtk_fvm_advection" * "_poly$(polynomialorder)" * "_dim$(dim)_$(ArrayType)_$(FT)" * "_fvmethod$(fvmethod)" * "_$(solvertype)_level$(l)" : nothing result[l] = test_run( mpicomm, ArrayType, dim, topl, polynomialorder, fvmethod, timeend, FT, dt, n, α, β, μ, δ, vtkdir, outputtime, fluxBC, ) # Test the errors significantly larger than floating point epsilon if !(dim == 2 && l == 4 && FT == Float32) @test result[l] ≈ FT(expected_result[dim, l, FT, fvmethod]) end end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end end ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/fvm_advection_diffusion_periodic.jl ================================================ using MPI using ClimateMachine using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes import ClimateMachine.DGMethods.FVReconstructions: FVConstant, FVLinear using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Test using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct Pseudo1D{u, v, ν} <: AdvectionDiffusionProblem end # This test has two 2D equations : the first one is an advection equation with # the Gaussian initial condition the second one is an advection diffusion # equation with the Gaussian initial condition function init_velocity_diffusion!( ::Pseudo1D{u, v, ν}, aux::Vars, geom::LocalGeometry, ) where {u, v, ν} # Advection velocity of the flow is [u, v] uvec = SVector(u, v, 0) aux.advection.u = hcat(uvec, uvec) # Diffusion of the flow is νI (isentropic diffusivity) I3 = @SMatrix [1 0 0; 0 1 0; 0 0 1] aux.diffusion.D = hcat(0 * I3, ν * I3) end function initial_condition!( ::Pseudo1D{u, v, ν}, state, aux, localgeo, t, ) where {u, v, ν} FT = typeof(u) # The computational domain is [-1.5 1.5]×[-1.5 1.5] Lx, Ly = 3, 3 x, y, _ = localgeo.coord μx, μz, σ = 0, 0, Lx / 10 ρ1 = exp.(-(((x - μx) / σ)^2 + ((y - μz) / σ)^2) / 2) / (σ * sqrt(2 * pi)) ρ2 = exp.(-(((x - μx) / σ)^2 + ((y - μz) / σ)^2) / 2) / (σ * sqrt(2 * pi)) state.ρ = (ρ1, ρ2) end inhomogenous_data!(::Val{0}, P::Pseudo1D, x...) = initial_condition!(P, x...) function do_output(mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, testname) ## Name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dgfvm, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## Name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## Name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, vtkdir, fvmethod, polynomialorders, level, ArrayType, FT, ) dim = 2 Lx, Ly = FT(3), FT(3) u, v = FT(1), FT(1) ν = FT(1 // 100) # Grid/topology information base_num_elem = 4 Ne = 2^(level - 1) * base_num_elem # Match number of points N_dg_point, N_fvm_point = Ne + 1, Ne * polynomialorders[1] + 1 brickrange = ( range(-Lx / 2; length = N_dg_point, stop = Lx / 2), range(-Ly / 2; length = N_fvm_point, stop = Ly / 2), ) periodicity = ntuple(j -> true, dim) bc = ntuple(j -> (1, 2), dim) connectivity = dim == 3 ? :full : :face topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = bc, connectivity = connectivity, ) # One period timeend = Lx / u # dt ≤ CFL (Δx / Np²)/u u = √2 CFL = 1/√2 dt = Lx / (Ne * polynomialorders[1]^2) Nt = (Ne * polynomialorders[1]^2) outputtime = Ne * dt grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorders, ) # Model being tested model = AdvectionDiffusion{dim}(Pseudo1D{u, v, ν}(), num_equations = 2) # Main DG discretization dgfvm = DGFVModel( model, grid, fvmethod, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = EveryDirection(), ) # Initialize all relevant state arrays and create solvers Q = init_ode_state(dgfvm, FT(0)) solver = LSRK54CarpenterKennedy(dgfvm, Q; dt = dt, t0 = 0) # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(solver), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # Create vtk dir mkpath(vtkdir) vtkstep = 0 # Output initial step do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Q, model, "advection_diffusion_periodic", ) # Setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dgfvm, gettime(solver)) do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, "advection_diffusion_periodic", ) end callbacks = (callbacks..., cbvtk) end numberofsteps = convert(Int64, cld(timeend, dt)) dt = timeend / numberofsteps @info "time step" dt numberofsteps dt * numberofsteps timeend solve!(Q, solver; timeend = timeend, callbacks = callbacks) engf = norm(Q, dims = (1, 3)) # Reference solution Q_ref = init_ode_state(dgfvm, FT(0)) engfe = norm(Q_ref, dims = (1, 3)) errf = norm(Q_ref .- Q, dims = (1, 3)) metrics = [engf; engfe; errf] @info @sprintf """Finished Advection equation: norm(Q) = %.16e norm(Qe) = %.16e norm(Q - Qe) = %.16e Advection diffusion equation: norm(Q) = %.16e norm(Qe) = %.16e norm(Q - Qe) = %.16e """ metrics[:]... return errf end # Run this test problem function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD # Dictionary keys: dim, level, polynomial order, FT, and direction expected_result = Dict() # Dim 2, degree 4 in the horizontal, FV order 1, refinement level, Float64, equation number expected_result[2, 4, FVConstant(), 1, Float64, 1] = 4.5196354911392578e-01 expected_result[2, 4, FVConstant(), 1, Float64, 2] = 4.8622171647472179e-01 expected_result[2, 4, FVConstant(), 2, Float64, 1] = 3.5051983693457450e-01 expected_result[2, 4, FVConstant(), 2, Float64, 2] = 4.1168975732563706e-01 expected_result[2, 4, FVConstant(), 3, Float64, 1] = 2.5130141068332995e-01 expected_result[2, 4, FVConstant(), 3, Float64, 2] = 3.4635415661628755e-01 expected_result[2, 4, FVConstant(), 4, Float64, 1] = 1.6320856055402777e-01 expected_result[2, 4, FVConstant(), 4, Float64, 2] = 2.9647989774687805e-01 expected_result[2, 4, FVConstant(), 5, Float64, 1] = 9.6641259241910610e-02 expected_result[2, 4, FVConstant(), 5, Float64, 2] = 2.6372336617960646e-01 expected_result[2, 4, FVConstant(), 1, Float32, 1] = 4.5196333527565002e-01 expected_result[2, 4, FVConstant(), 1, Float32, 2] = 4.8622152209281921e-01 expected_result[2, 4, FVConstant(), 2, Float32, 1] = 3.5051953792572021e-01 expected_result[2, 4, FVConstant(), 2, Float32, 2] = 4.1168937087059021e-01 expected_result[2, 4, FVConstant(), 3, Float32, 1] = 2.5130018591880798e-01 expected_result[2, 4, FVConstant(), 3, Float32, 2] = 3.4635293483734131e-01 expected_result[2, 4, FVConstant(), 4, Float32, 1] = 1.6320574283599854e-01 expected_result[2, 4, FVConstant(), 4, Float32, 2] = 2.9647585749626160e-01 expected_result[2, 4, FVConstant(), 5, Float32, 1] = 9.6632070839405060e-02 expected_result[2, 4, FVConstant(), 5, Float32, 2] = 2.6370745897293091e-01 # Dim 2, degree 4 in the horizontal, FV order 1, refinement level, Float64, equation number expected_result[2, 4, FVLinear(), 1, Float64, 1] = 2.2783152269907422e-01 expected_result[2, 4, FVLinear(), 1, Float64, 2] = 3.1823753821941969e-01 expected_result[2, 4, FVLinear(), 2, Float64, 1] = 9.2628269590376469e-02 expected_result[2, 4, FVLinear(), 2, Float64, 2] = 2.4517823755458742e-01 expected_result[2, 4, FVLinear(), 3, Float64, 1] = 3.1401247771425542e-02 expected_result[2, 4, FVLinear(), 3, Float64, 2] = 2.2608977452273774e-01 expected_result[2, 4, FVLinear(), 4, Float64, 1] = 9.8710350648653390e-03 expected_result[2, 4, FVLinear(), 4, Float64, 2] = 2.2377315397364847e-01 expected_result[2, 4, FVLinear(), 5, Float64, 1] = 2.9619222883137744e-03 expected_result[2, 4, FVLinear(), 5, Float64, 2] = 2.2359698753564772e-01 expected_result[2, 4, FVLinear(), 1, Float32, 1] = 2.2783152269907422e-01 expected_result[2, 4, FVLinear(), 1, Float32, 2] = 3.1823753821941969e-01 expected_result[2, 4, FVLinear(), 2, Float32, 1] = 9.2628269590376469e-02 expected_result[2, 4, FVLinear(), 2, Float32, 2] = 2.4517823755458742e-01 expected_result[2, 4, FVLinear(), 3, Float32, 1] = 3.1401247771425542e-02 expected_result[2, 4, FVLinear(), 3, Float32, 2] = 2.2608977452273774e-01 expected_result[2, 4, FVLinear(), 4, Float32, 1] = 9.8710350648653390e-03 expected_result[2, 4, FVLinear(), 4, Float32, 2] = 2.2377315397364847e-01 expected_result[2, 4, FVLinear(), 5, Float32, 1] = 2.9614968225359917e-03 expected_result[2, 4, FVLinear(), 5, Float32, 2] = 2.2358775138854980e-01 polynomialorders = (4, 0) numlevels = integration_testing ? 5 : 1 # Dictionary keys: dim, level, polynomial order, FT, and direction @testset "$(@__FILE__)" begin for FT in (Float64, Float32) result = Dict() for fvmethod in (FVConstant(), FVLinear(), FVLinear{3}()) @info @sprintf """Test parameters: ArrayType = %s FloatType = %s FV Reconstruction = %s Dimension = %s Horizontal polynomial order = %s Vertical polynomial order = %s """ ArrayType FT fvmethod 2 polynomialorders[1] polynomialorders[end] for level in 1:numlevels result[level] = test_run( mpicomm, output ? "vtk_advection_diffusion_2d" * "_poly$(polynomialorders)" * "_$(ArrayType)_$(FT)" * "_level$(level)" : nothing, fvmethod, polynomialorders, level, ArrayType, FT, ) fv_key = fvmethod isa FVLinear ? FVLinear() : fvmethod @test result[level][1] ≈ FT(expected_result[ 2, polynomialorders[1], fv_key, level, FT, 1, ]) @test result[level][2] ≈ FT(expected_result[ 2, polynomialorders[1], fv_key, level, FT, 2, ]) end @info begin msg = "advection equation" for l in 1:(numlevels - 1) rate = log2(result[l][1]) - log2(result[l + 1][1]) msg *= @sprintf("\n rate for level %d = %e", l, rate) end msg end # Not an analytic solution, convergence doesn't make sense # @info begin # msg = "advection-diffusion equation" # for l in 1:(numlevels - 1) # rate = log2(result[l][2]) - log2(result[l + 1][2]) # msg *= @sprintf("\n rate for level %d = %e", l, rate) # end # msg # end end end end end main() ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/fvm_advection_sphere.jl ================================================ using MPI using ClimateMachine using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.BalanceLaws: update_auxiliary_state! using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods.FVReconstructions: FVConstant, FVLinear using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.Atmos: SphericalOrientation, latitude, longitude using ClimateMachine.Orientations using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu import ClimateMachine.BalanceLaws: boundary_state! using CLIMAParameters.Planet: planet_radius if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") # This is a setup similar to the one presented in [Williamson1992](@cite) struct SolidBodyRotation <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::SolidBodyRotation, aux::Vars, geom::LocalGeometry, ) FT = eltype(aux) λ = longitude(SphericalOrientation(), aux) φ = latitude(SphericalOrientation(), aux) r = norm(geom.coord) uλ = 2 * FT(π) * cos(φ) * r uφ = 0 aux.advection.u = SVector( -uλ * sin(λ) - uφ * cos(λ) * sin(φ), +uλ * cos(λ) - uφ * sin(λ) * sin(φ), +uφ * cos(φ), ) end function initial_condition!(::SolidBodyRotation, state, aux, localgeo, t) λ = longitude(SphericalOrientation(), aux) φ = latitude(SphericalOrientation(), aux) state.ρ = exp(-((3λ)^2 + (3φ)^2)) end finaltime(::SolidBodyRotation) = 1 u_scale(::SolidBodyRotation) = 2π """ This is the Divergent flow with smooth initial condition test case, the Case 4 in @article{nair2010class, title={A class of deformational flow test cases for linear transport problems on the sphere}, author={Nair, Ramachandran D and Lauritzen, Peter H}, journal={Journal of Computational Physics}, volume={229}, number={23}, pages={8868--8887}, year={2010}, publisher={Elsevier} } """ struct ReversingDeformationalFlow <: AdvectionDiffusionProblem end init_velocity_diffusion!( ::ReversingDeformationalFlow, aux::Vars, geom::LocalGeometry, ) = nothing function initial_condition!(::ReversingDeformationalFlow, state, aux, coord, t) x, y, z = aux.coord r = norm(aux.coord) h_max = 0.95 b = 5 state.ρ = 0 for (λ, φ) in ((5π / 6, 0), (7π / 6, 0)) xi = r * cos(φ) * cos(λ) yi = r * cos(φ) * sin(λ) zi = r * sin(φ) state.ρ += h_max * exp(-b * ((x - xi)^2 + (y - yi)^2 + (z - zi)^2) / r^2) end end has_variable_coefficients(::ReversingDeformationalFlow) = true function update_velocity_diffusion!( ::ReversingDeformationalFlow, ::AdvectionDiffusion, state::Vars, aux::Vars, t::Real, ) FT = eltype(aux) λ = longitude(SphericalOrientation(), aux) φ = latitude(SphericalOrientation(), aux) r = norm(aux.coord) T = FT(5) λp = λ - FT(2π) * t / T uλ = 10 * r / T * sin(λp)^2 * sin(2φ) * cos(FT(π) * t / T) + FT(2π) * r / T * cos(φ) uφ = 10 * r / T * sin(2λp) * cos(φ) * cos(FT(π) * t / T) aux.advection.u = SVector( -uλ * sin(λ) - uφ * cos(λ) * sin(φ), +uλ * cos(λ) - uφ * sin(λ) * sin(φ), +uφ * cos(φ), ) end u_scale(::ReversingDeformationalFlow) = 2.9 finaltime(::ReversingDeformationalFlow) = 5 function advective_courant( m::AdvectionDiffusion, state::Vars, aux::Vars, diffusive::Vars, Δx, Δt, direction, ) return Δt * norm(aux.advection.u) / Δx end struct NoFlowBC end function boundary_state!( ::RusanovNumericalFlux, ::NoFlowBC, ::AdvectionDiffusion, stateP::Vars, auxP::Vars, nM, stateM::Vars, auxM::Vars, t, _..., ) auxP.advection.u = -auxM.advection.u end function do_output(mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, testname) ## Name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dgfvm, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## Name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## Name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, vert_range, topl, problem, explicit_method, fvmethod, cfl, N, timeend, FT, vtkdir, outputtime, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = (N, 0), meshwarp = equiangular_cubed_sphere_warp, ) dx = min_node_distance(grid, HorizontalDirection()) dt = FT(cfl * dx / (vert_range[2] * u_scale(problem())) / N) dt = outputtime / ceil(Int64, outputtime / dt) bcs = (NoFlowBC(),) model = AdvectionDiffusion{3}(problem(), bcs, diffusion = false) dgfvm = DGFVModel( model, grid, fvmethod, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dgfvm, FT(0)) odesolver = explicit_method(dgfvm, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting problem = %s ArrayType = %s FV Reconstruction = %s method = %s time step = %.16e norm(Q₀) = %.16e""" problem ArrayType fvmethod explicit_method dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(odesolver), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end cbcfl = EveryXSimulationSteps(10) do dt = ODESolvers.getdt(odesolver) cfl = DGMethods.courant( advective_courant, dgfvm, model, Q, dt, HorizontalDirection(), ) @info @sprintf( """Courant number simtime = %.16e courant = %.16e""", gettime(odesolver), cfl ) end callbacks = (cbinfo,) if ~isnothing(vtkdir) # Create vtk dir mkpath(vtkdir) vtkstep = 0 # Output initial step do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Q, model, "fvm_advection_sphere", ) # Setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dgfvm, gettime(odesolver)) do_output( mpicomm, vtkdir, vtkstep, dgfvm, Q, Qe, model, "fvm_advection_sphere", ) end callbacks = (callbacks..., cbvtk) end solve!(Q, odesolver; timeend = timeend, callbacks = callbacks) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dgfvm, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) Δmass = abs(weightedsum(Q) - weightedsum(Qe)) / weightedsum(Qe) @info @sprintf """Finished Δmass = %.16e norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ Δmass engf engf / eng0 engf - eng0 errf errf / engfe return errf, Δmass end using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 3 max_cfl = Dict(LSRK144NiegemannDiehlBusch => 5.0) expected_result = Dict() expected_result[SolidBodyRotation, 1, FVConstant] = 1.6249678501611961e+07 expected_result[SolidBodyRotation, 2, FVConstant] = 7.2020207047738554e+05 expected_result[SolidBodyRotation, 3, FVConstant] = 5.2452627634607365e+04 expected_result[SolidBodyRotation, 4, FVConstant] = 3.1132403618328990e+03 expected_result[SolidBodyRotation, 1, FVLinear] = 1.6649963041466445e+07 expected_result[SolidBodyRotation, 2, FVLinear] = 7.2518593691652094e+05 expected_result[SolidBodyRotation, 3, FVLinear] = 5.2473203187596591e+04 expected_result[SolidBodyRotation, 4, FVLinear] = 3.1133127473480104e+03 expected_result[ReversingDeformationalFlow, 1, FVConstant] = 2.0097347028034222e+08 expected_result[ReversingDeformationalFlow, 2, FVConstant] = 7.0092390520693690e+07 expected_result[ReversingDeformationalFlow, 3, FVConstant] = 7.7527847763998993e+06 expected_result[ReversingDeformationalFlow, 4, FVConstant] = 1.4209138735343830e+05 expected_result[ReversingDeformationalFlow, 1, FVLinear] = 2.0156864805489743e+08 expected_result[ReversingDeformationalFlow, 2, FVLinear] = 7.0092714938572392e+07 expected_result[ReversingDeformationalFlow, 3, FVLinear] = 7.7527849036761308e+06 expected_result[ReversingDeformationalFlow, 4, FVLinear] = 1.4209138776027536e+05 numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? 4 : 1 explicit_method = LSRK144NiegemannDiehlBusch @testset "$(@__FILE__)" begin for FT in (Float64,) for problem in (SolidBodyRotation, ReversingDeformationalFlow) for fvmethod in (FVConstant, FVLinear) cfl = max_cfl[explicit_method] result = zeros(FT, numlevels) for l in 1:numlevels numelems_horizontal = 2^(l - 1) * base_num_elem numelems_vertical = 3 _planet_radius = FT(planet_radius(param_set)) domain_height = FT(10e3) vert_range = (_planet_radius, _planet_radius + domain_height) topl = StackedCubedSphereTopology( mpicomm, numelems_horizontal, range( vert_range[1], stop = vert_range[2], length = numelems_vertical + 1, ), ) timeend = finaltime(problem()) outputtime = timeend @info (ArrayType, FT) vtkdir = output ? "vtk_fvm_advection_sphere" * "_$problem" * "_$explicit_method" * "_poly$(polynomialorder)" * "_$(ArrayType)_$(FT)" * "_level$(l)" : nothing result[l], Δmass = test_run( mpicomm, ArrayType, vert_range, topl, problem, explicit_method, fvmethod(), cfl, polynomialorder, timeend, FT, vtkdir, outputtime, ) @test result[l] ≈ FT(expected_result[problem, l, fvmethod]) @test Δmass <= FT(1e-12) end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/fvm_swirl.jl ================================================ # This tutorial uses the TMAR Filter from [Light2016](@cite) # # to reproduce the tutorial in section 4b. It is a shear swirling # flow deformation of a transported quantity from LeVeque 1996. The exact # solution at the final time is the same as the initial condition. using MPI using Test using ClimateMachine ClimateMachine.init() using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Filters using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods.FVReconstructions: FVConstant, FVLinear using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))) if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") Base.@kwdef struct SwirlingFlow{FT} <: AdvectionDiffusionProblem period::FT = 5 end init_velocity_diffusion!(::SwirlingFlow, aux::Vars, geom::LocalGeometry) = nothing function initial_condition!(::SwirlingFlow, state, aux, localgeo, t) FT = eltype(state) x, y, _ = aux.coord x0, y0 = FT(1 // 3), FT(1 // 3) τ = 6hypot(x - x0, y - y0) state.ρ = exp(-τ^2) end has_variable_coefficients(::SwirlingFlow) = true function update_velocity_diffusion!( problem::SwirlingFlow, ::AdvectionDiffusion, state::Vars, aux::Vars, t::Real, ) x, y, _ = aux.coord sx, cx = sinpi(x), cospi(x) sy, cy = sinpi(y), cospi(y) ct = cospi(t / problem.period) u = 2 * sx^2 * sy * cy * ct v = -2 * sy^2 * sx * cx * ct aux.advection.u = SVector(u, v, 0) end function do_output( mpicomm, vtkdir, vtkstep, dg, Q, model, testname; number_sample_points = 0, ) ## Name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) writevtk( filename, Q, dg, statenames; number_sample_points = number_sample_points, ) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## Name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## Name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu(pvtuprefix, prefixes, (statenames...,), eltype(Q)) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, fvmethod, topl, problem, dt, N, timeend, FT, vtkdir, outputtime, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) bcs = (HomogeneousBC{0}(),) model = AdvectionDiffusion{2}(problem, bcs, diffusion = false) dg = DGFVModel( model, grid, fvmethod, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) ## We integrate so that the final solution is equal to the initial solution Qe = copy(Q) odesolver = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) # Set up the information callback starttime = Ref(Dates.now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = Dates.now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(odesolver), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # Create vtk dir if MPI.Comm_rank(mpicomm) == 0 mkpath(vtkdir) end MPI.Barrier(mpicomm) vtkstep = 0 # Output initial step do_output( mpicomm, vtkdir, vtkstep, dg, Q, model, "swirling_flow"; number_sample_points = N[1] + 1, ) # Setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(odesolver)) do_output( mpicomm, vtkdir, vtkstep, dg, Q, model, "swirling_flow"; number_sample_points = N[1] + 1, ) end callbacks = (callbacks..., cbvtk) end solve!(Q, odesolver; timeend = timeend, callbacks = callbacks) error = euclidean_distance(Q, Qe) # Print some end of the simulation information eng0 = norm(Qe) engf = norm(Q) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end let ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD expected_result = Dict() expected_result[1, FVConstant()] = 1.2437314516997458e-01 expected_result[2, FVConstant()] = 9.8829388561567838e-02 expected_result[3, FVConstant()] = 7.2096312912198937e-02 expected_result[4, FVConstant()] = 4.7720226730379636e-02 expected_result[1, FVLinear()] = 4.3807967239640234e-02 expected_result[2, FVLinear()] = 1.4913083518239653e-02 expected_result[3, FVLinear()] = 4.3666055848495802e-03 expected_result[4, FVLinear()] = 1.2749240022458719e-03 @testset "$(@__FILE__)" begin numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? 4 : 1 FT = Float64 for fvmethod in (FVConstant(), FVLinear()) result = zeros(FT, numlevels) for l in 1:numlevels Ne = 20 * 2^(l - 1) polynomialorder = (4, 0) problem = SwirlingFlow() brickrange = ( range(FT(0); length = Ne + 1, stop = 1), range( FT(0); length = polynomialorder[1] * Ne + 1, stop = 1, ), ) topology = StackedBrickTopology( mpicomm, brickrange, boundary = ((1, 1), (1, 1)), connectivity = :face, ) maxvelocity = 2 elementsize = 1 / Ne dx = elementsize / polynomialorder[1]^2 CFL = 1 dt = CFL * dx / maxvelocity vtkdir = abspath(joinpath( ClimateMachine.Settings.output_dir, "fvm_swirl_lvl_$l", )) timeend = problem.period outputtime = timeend / 10 dt = outputtime / ceil(Int64, outputtime / dt) @info @sprintf """Starting FT = %s ArrayType = %s FV Reconstuction = %s dim = %d Ne = %d polynomial order = %d final time = %.16e time step = %.16e """ FT ArrayType fvmethod 2 Ne polynomialorder[1] timeend dt result[l] = test_run( mpicomm, ArrayType, fvmethod, topology, problem, dt, polynomialorder, timeend, FT, output ? vtkdir : nothing, outputtime, ) @test result[l] ≈ FT(expected_result[l, fvmethod]) end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf("\n rate for level %d = %e\n", l, rate) end msg end end end end ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/hyperdiffusion_bc.jl ================================================ using MPI using ClimateMachine using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.ODESolvers using ClimateMachine.VTK: writevtk, writepvtu using ClimateMachine.Mesh.Grids: min_node_distance const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end include("advection_diffusion_model.jl") struct ConstantHyperDiffusion{FT} <: AdvectionDiffusionProblem μ::FT k::SVector{3, FT} end function init_velocity_diffusion!( problem::ConstantHyperDiffusion, aux::Vars, geom::LocalGeometry, ) FT = eltype(aux) aux.hyperdiffusion.H = problem.μ * SMatrix{3, 3, FT}(I) end function initial_condition!( problem::ConstantHyperDiffusion, state, aux, localgeo, t, ) x, y, z = localgeo.coord k = problem.k k2 = sum(k .^ 2) @inbounds begin state.ρ = cos(k[1] * x) * cos(k[2] * y) * cos(k[3] * z) * exp(-k2^2 * problem.μ * t) end end # Boundary conditions data inhomogeneous_data!(::Val{0}, p::ConstantHyperDiffusion, x...) = initial_condition!(p, x...) function inhomogeneous_data!( ::Val{1}, problem::ConstantHyperDiffusion, ∇state, aux, x, t, ) k = problem.k k2 = sum(k .^ 2) ∇state.ρ = -SVector( k[1] * sin(k[1] * x[1]) * cos(k[2] * x[2]) * cos(k[3] * x[3]), k[2] * cos(k[1] * x[1]) * sin(k[2] * x[2]) * cos(k[3] * x[3]), k[3] * cos(k[1] * x[1]) * cos(k[2] * x[2]) * sin(k[3] * x[3]), ) * exp(-k2^2 * problem.μ * t) end function inhomogeneous_data!( ::Val{2}, problem::ConstantHyperDiffusion, Δstate, aux, x, t, ) k = problem.k k2 = sum(k .^ 2) Δstate.ρ = -k2 * cos(k[1] * x[1]) * cos(k[2] * x[2]) * cos(k[3] * x[3]) * exp(-k2^2 * problem.μ * t) end function inhomogeneous_data!( ::Val{3}, problem::ConstantHyperDiffusion, ∇Δstate, aux, x, t, ) k = problem.k k2 = sum(k .^ 2) ∇Δstate.ρ = k2 * SVector( k[1] * sin(k[1] * x[1]) * cos(k[2] * x[2]) * cos(k[3] * x[3]), k[2] * cos(k[1] * x[1]) * sin(k[2] * x[2]) * cos(k[3] * x[3]), k[3] * cos(k[1] * x[1]) * cos(k[2] * x[2]) * sin(k[3] * x[3]), ) * exp(-k2^2 * problem.μ * t) end function do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, dim, topl, N, timeend, FT, vtkdir, outputtime, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) μ = 1 // 1000 dx = min_node_distance(grid) dt = dx^4 / 100 / μ dt = outputtime / ceil(Int64, outputtime / dt) bcx1 = (InhomogeneousBC{0}(), InhomogeneousBC{2}()) bcx2 = (InhomogeneousBC{0}(), InhomogeneousBC{1}()) bcy1 = (InhomogeneousBC{3}(), InhomogeneousBC{1}()) bcy2 = (InhomogeneousBC{3}(), InhomogeneousBC{2}()) bcz1 = (HomogeneousBC{3}(), HomogeneousBC{1}()) bcz2 = (HomogeneousBC{3}(), HomogeneousBC{1}()) k = SVector(1, 1, 0) bcs = (bcx1, bcx2, bcy1, bcy2, bcz1, bcz2) model = AdvectionDiffusion{dim}( ConstantHyperDiffusion{FT}(μ, k), bcs; advection = false, diffusion = false, hyperdiffusion = true, ) dg = DGModel( model, grid, CentralNumericalFluxFirstOrder(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting dim = %d dt = %.16e norm(Q₀) = %.16e""" dim dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(lsrk), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # create vtk dir mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, Q, model, "hyperdiffusion") # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(lsrk)) do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, "hyperdiffusion", ) end callbacks = (callbacks..., cbvtk) end solve!(Q, lsrk; timeend = timeend, callbacks = callbacks) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 expected_result = Dict() expected_result[2, 1, Float64] = 1.1666787574326038e-03 expected_result[2, 2, Float64] = 8.5948289965604964e-05 expected_result[2, 3, Float64] = 2.5117423516568199e-06 expected_result[3, 1, Float64] = 3.0867418520680958e-03 expected_result[3, 2, Float64] = 2.2739780086168765e-04 expected_result[3, 3, Float64] = 6.6454456228180395e-06 numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? 3 : 1 for FT in (Float64,) result = zeros(FT, numlevels) for dim in (2, 3) for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem xrange = range(FT(2); length = Ne + 1, stop = FT(9)) brickrange = ntuple(j -> xrange, dim) periodicity = ntuple(j -> false, dim) connectivity = dim == 2 ? :face : :full boundary = ((1, 2), (3, 4), (5, 6))[1:dim] topl = StackedBrickTopology( mpicomm, brickrange; periodicity, boundary, connectivity, ) timeend = 1 outputtime = timeend @info (ArrayType, FT) vtkdir = output ? "vtk_hyperdiffusion_bc" * "_poly$(polynomialorder)" * "_dim$(dim)_$(ArrayType)_$(FT)" * "_level$(l)" : nothing result[l] = test_run( mpicomm, ArrayType, dim, topl, polynomialorder, timeend, FT, vtkdir, outputtime, ) @test result[l] ≈ FT(expected_result[dim, l, FT]) end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf("\n rate for level %d = %e\n", l, rate) end msg end end end end nothing ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/hyperdiffusion_model.jl ================================================ using StaticArrays using ClimateMachine.VariableTemplates using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, GradientLaplacian, Hyperdiffusive import ClimateMachine.BalanceLaws: vars_state, flux_first_order!, flux_second_order!, source!, compute_gradient_argument!, compute_gradient_flux!, nodal_init_state_auxiliary!, init_state_prognostic!, boundary_conditions, boundary_state!, wavespeed, transform_post_gradient_laplacian! using ClimateMachine.Mesh.Geometry: LocalGeometry using ClimateMachine.DGMethods.NumericalFluxes: NumericalFluxFirstOrder, NumericalFluxSecondOrder abstract type HyperDiffusionProblem end struct HyperDiffusion{dim, P} <: BalanceLaw problem::P function HyperDiffusion{dim}( problem::P, ) where {dim, P <: HyperDiffusionProblem} new{dim, P}(problem) end end vars_state(::HyperDiffusion, ::Auxiliary, FT) = @vars(D::SMatrix{3, 3, FT, 9}) # # Density is only state vars_state(::HyperDiffusion, ::Prognostic, FT) = @vars(ρ::FT) # Take the gradient of density vars_state(::HyperDiffusion, ::Gradient, FT) = @vars(ρ::FT) # Take the gradient of laplacian of density vars_state(::HyperDiffusion, ::GradientLaplacian, FT) = @vars(ρ::FT) vars_state(::HyperDiffusion, ::GradientFlux, FT) = @vars() # The hyperdiffusion DG auxiliary variable: D ∇ Δρ vars_state(::HyperDiffusion, ::Hyperdiffusive, FT) = @vars(σ::SVector{3, FT}) function flux_first_order!(m::HyperDiffusion, _...) end """ flux_second_order!(m::HyperDiffusion, flux::Grad, state::Vars, auxDG::Vars, auxHDG::Vars, aux::Vars, t::Real) Computes diffusive flux `F` in: ``` ∂ρ -- = - ∇ • (σ) = - ∇ • F ∂t ``` Where - `σ` is hyperdiffusion DG auxiliary variable (`σ = D ∇ Δρ` with D being the hyperdiffusion tensor) """ function flux_second_order!( m::HyperDiffusion, flux::Grad, state::Vars, auxDG::Vars, auxHDG::Vars, aux::Vars, t::Real, ) σ = auxHDG.σ flux.ρ += σ end """ compute_gradient_argument!(m::HyperDiffusion, transform::Vars, state::Vars, aux::Vars, t::Real) Set the variable to take the gradient of (`ρ` in this case) """ function compute_gradient_argument!( m::HyperDiffusion, transform::Vars, state::Vars, aux::Vars, t::Real, ) transform.ρ = state.ρ end compute_gradient_flux!(m::HyperDiffusion, _...) = nothing function transform_post_gradient_laplacian!( m::HyperDiffusion, auxHDG::Vars, gradvars::Grad, state::Vars, aux::Vars, t::Real, ) ∇Δρ = gradvars.ρ D = aux.D auxHDG.σ = D * ∇Δρ end """ source!(m::HyperDiffusion, _...) There is no source in the hyperdiffusion model """ source!(m::HyperDiffusion, _...) = nothing function init_state_prognostic!( m::HyperDiffusion, state::Vars, aux::Vars, localgeo, t::Real, ) initial_condition!(m.problem, state, aux, localgeo, t) end boundary_conditions(::HyperDiffusion) = () boundary_state!(nf, ::HyperDiffusion, _...) = nothing ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/periodic_3D_hyperdiffusion.jl ================================================ using MPI using ClimateMachine using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.ODESolvers using ClimateMachine.VTK: writevtk, writepvtu using ClimateMachine.Mesh.Grids: min_node_distance const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end include("advection_diffusion_model.jl") struct ConstantHyperDiffusion{dim, dir, FT} <: AdvectionDiffusionProblem D::SMatrix{3, 3, FT, 9} end function init_velocity_diffusion!( problem::ConstantHyperDiffusion, aux::Vars, geom::LocalGeometry, ) where {n, α, β} aux.hyperdiffusion.H = problem.D end function initial_condition!( problem::ConstantHyperDiffusion{dim, dir}, state, aux, localgeo, t, ) where {dim, dir} @inbounds begin k = SVector(1, 2, 3) kD = k * k' .* problem.D if dir === EveryDirection() c = sum(abs2, k[SOneTo(dim)]) * sum(kD[SOneTo(dim), SOneTo(dim)]) elseif dir === HorizontalDirection() c = sum(abs2, k[SOneTo(dim - 1)]) * sum(kD[SOneTo(dim - 1), SOneTo(dim - 1)]) elseif dir === VerticalDirection() c = k[dim]^2 * kD[dim, dim] end x = localgeo.coord state.ρ = sin(dot(k[SOneTo(dim)], x[SOneTo(dim)])) * exp(-c * t) end end function do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, dim, topl, N, timeend, FT, direction, D, vtkdir, outputtime, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) dx = min_node_distance(grid) dt = dx^4 / 25 / sum(D) @info "time step" dt dt = outputtime / ceil(Int64, outputtime / dt) model = AdvectionDiffusion{dim}( ConstantHyperDiffusion{dim, direction(), FT}(D); advection = false, diffusion = false, hyperdiffusion = true, ) dg = DGModel( model, grid, CentralNumericalFluxFirstOrder(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), direction = direction(), ) Q = init_ode_state(dg, FT(0)) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(lsrk), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # create vtk dir mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, Q, model, "hyperdiffusion") # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(lsrk)) do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, "hyperdiffusion", ) end callbacks = (callbacks..., cbvtk) end solve!(Q, lsrk; timeend = timeend, callbacks = callbacks) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? 3 : 1 polynomialorder = 4 base_num_elem = 4 expected_result = Dict() expected_result[2, 1, Float64, EveryDirection] = 7.6772960298563120e-03 expected_result[2, 2, Float64, EveryDirection] = 2.3268371815073617e-03 expected_result[2, 3, Float64, EveryDirection] = 4.2641957936779901e-05 expected_result[2, 1, Float64, HorizontalDirection] = 8.4650675812606650e-04 expected_result[2, 2, Float64, HorizontalDirection] = 4.4626814795055979e-05 expected_result[2, 3, Float64, HorizontalDirection] = 1.2193396277823764e-06 expected_result[2, 1, Float64, VerticalDirection] = 6.1690465335730834e-03 expected_result[2, 2, Float64, VerticalDirection] = 2.3407593209031621e-03 expected_result[2, 3, Float64, VerticalDirection] = 4.3775160749787010e-05 expected_result[3, 1, Float64, EveryDirection] = 1.7363355506160003e-01 expected_result[3, 2, Float64, EveryDirection] = 7.3049474767548042e-02 expected_result[3, 3, Float64, EveryDirection] = 5.8530711333407105e-04 expected_result[3, 1, Float64, HorizontalDirection] = 1.9244127301149615e-02 expected_result[3, 2, Float64, HorizontalDirection] = 5.8325158696244947e-03 expected_result[3, 3, Float64, HorizontalDirection] = 1.0688753745025491e-04 expected_result[3, 1, Float64, VerticalDirection] = 1.4412891107361228e-01 expected_result[3, 2, Float64, VerticalDirection] = 6.3744013545812925e-02 expected_result[3, 3, Float64, VerticalDirection] = 9.0891011404938341e-04 numlevels = integration_testing ? 3 : 1 for FT in (Float64,) D = 1 // 100 * SMatrix{3, 3, FT}( 9 // 50, 3 // 50, 5 // 50, 3 // 50, 7 // 50, 4 // 50, 5 // 50, 4 // 50, 10 // 50, ) result = zeros(FT, numlevels) for dim in (2, 3) connectivity = dim == 2 ? :face : :full for direction in (EveryDirection, HorizontalDirection, VerticalDirection) for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem xrange = range(FT(0); length = Ne + 1, stop = FT(2pi)) brickrange = ntuple(j -> xrange, dim) periodicity = ntuple(j -> true, dim) topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, connectivity = connectivity, ) timeend = 1 outputtime = 1 @info (ArrayType, FT, dim, direction) vtkdir = output ? "vtk_hyperdiffusion" * "_poly$(polynomialorder)" * "_dim$(dim)_$(ArrayType)_$(FT)_$(direction)" * "_level$(l)" : nothing result[l] = test_run( mpicomm, ArrayType, dim, topl, polynomialorder, timeend, FT, direction, D, vtkdir, outputtime, ) @test result[l] ≈ FT(expected_result[dim, l, FT, direction]) end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf("\n rate for level %d = %e\n", l, rate) end msg end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/pseudo1D_advection_diffusion.jl ================================================ using MPI using ClimateMachine using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct Pseudo1D{n, α, β, μ, δ} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::Pseudo1D{n, α, β}, aux::Vars, geom::LocalGeometry, ) where {n, α, β} # Direction of flow is n with magnitude α aux.advection.u = α * n # diffusion of strength β in the n direction aux.diffusion.D = β * n * n' end function initial_condition!( ::Pseudo1D{n, α, β, μ, δ}, state, aux, localgeo, t, ) where {n, α, β, μ, δ} ξn = dot(n, localgeo.coord) # ξT = SVector(localgeo.coord) - ξn * n state.ρ = exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) end inhomogeneous_data!(::Val{0}, P::Pseudo1D, x...) = initial_condition!(P, x...) function inhomogeneous_data!( ::Val{1}, ::Pseudo1D{n, α, β, μ, δ}, ∇state, aux, x, t, ) where {n, α, β, μ, δ} ξn = dot(n, x) ∇state.ρ = -( 2n * (ξn - μ - α * t) / (4 * β * (δ + t)) * exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) ) end function do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, dim, topl, N, timeend, FT, direction, dt, n, α, β, μ, δ, vtkdir, outputtime, fluxBC, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) bcs = (InhomogeneousBC{0}(), InhomogeneousBC{1}()) model = AdvectionDiffusion{dim}( Pseudo1D{n, α, β, μ, δ}(), bcs, flux_bc = fluxBC, ) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), direction = direction(), ) Q = init_ode_state(dg, FT(0)) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(lsrk), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # create vtk dir mkpath(vtkdir) vtkstep = 0 # output initial step do_output( mpicomm, vtkdir, vtkstep, dg, Q, Q, model, "advection_diffusion", ) # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(lsrk)) do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, "advection_diffusion", ) end callbacks = (callbacks..., cbvtk) end solve!(Q, lsrk; timeend = timeend, callbacks = callbacks) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 expected_result = Dict() expected_result[2, 1, Float64, EveryDirection] = 1.2357162985295326e-02 expected_result[2, 2, Float64, EveryDirection] = 8.8403537443388974e-04 expected_result[2, 3, Float64, EveryDirection] = 4.9490976821250842e-05 expected_result[2, 4, Float64, EveryDirection] = 2.0311063939957157e-06 expected_result[2, 1, Float64, HorizontalDirection] = 4.6783743619257120e-02 expected_result[2, 2, Float64, HorizontalDirection] = 4.0665567827235134e-03 expected_result[2, 3, Float64, HorizontalDirection] = 5.3144336694498106e-05 expected_result[2, 4, Float64, HorizontalDirection] = 3.9780001102022647e-07 expected_result[2, 1, Float64, VerticalDirection] = 4.6783743619257120e-02 expected_result[2, 2, Float64, VerticalDirection] = 4.0665567827234102e-03 expected_result[2, 3, Float64, VerticalDirection] = 5.3144336694469002e-05 expected_result[2, 4, Float64, VerticalDirection] = 3.9780001102679913e-07 expected_result[3, 1, Float64, EveryDirection] = 9.6252415559793265e-03 expected_result[3, 2, Float64, EveryDirection] = 6.0160564826756122e-04 expected_result[3, 3, Float64, EveryDirection] = 4.0359079531195022e-05 expected_result[3, 4, Float64, EveryDirection] = 2.9987655364452885e-06 expected_result[3, 1, Float64, HorizontalDirection] = 1.7475667486259477e-02 expected_result[3, 2, Float64, HorizontalDirection] = 1.2502148161420126e-03 expected_result[3, 3, Float64, HorizontalDirection] = 6.9990810635609785e-05 expected_result[3, 4, Float64, HorizontalDirection] = 2.8724182090259972e-06 expected_result[3, 1, Float64, VerticalDirection] = 6.6162204724938736e-02 expected_result[3, 2, Float64, VerticalDirection] = 5.7509797542876833e-03 expected_result[3, 3, Float64, VerticalDirection] = 7.5157441716412944e-05 expected_result[3, 4, Float64, VerticalDirection] = 5.6257417070312578e-07 expected_result[2, 1, Float32, EveryDirection] = 1.2357043102383614e-02 expected_result[2, 2, Float32, EveryDirection] = 8.8409741874784231e-04 expected_result[2, 3, Float32, EveryDirection] = 4.9193818995263427e-05 expected_result[2, 1, Float32, HorizontalDirection] = 4.6783901751041412e-02 expected_result[2, 2, Float32, HorizontalDirection] = 4.0663350373506546e-03 expected_result[2, 3, Float32, HorizontalDirection] = 5.3044739615870640e-05 expected_result[2, 1, Float32, VerticalDirection] = 4.6783857047557831e-02 expected_result[2, 2, Float32, VerticalDirection] = 4.0663424879312515e-03 expected_result[2, 3, Float32, VerticalDirection] = 5.3172039770288393e-05 expected_result[3, 1, Float32, EveryDirection] = 9.6252141520380974e-03 expected_result[3, 2, Float32, EveryDirection] = 6.0162233421579003e-04 expected_result[3, 3, Float32, EveryDirection] = 4.0522103518014774e-05 expected_result[3, 1, Float32, HorizontalDirection] = 1.7475549131631851e-02 expected_result[3, 2, Float32, HorizontalDirection] = 1.2503013713285327e-03 expected_result[3, 3, Float32, HorizontalDirection] = 7.2867427661549300e-05 expected_result[3, 1, Float32, VerticalDirection] = 6.6162362694740295e-02 expected_result[3, 2, Float32, VerticalDirection] = 5.7506239973008633e-03 expected_result[3, 3, Float32, VerticalDirection] = 1.0193029447691515e-04 @testset "$(@__FILE__)" begin for FT in (Float64, Float32) numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? (FT == Float64 ? 4 : 3) : 1 result = zeros(FT, numlevels) for dim in 2:3 connectivity = dim == 2 ? :face : :full for direction in (EveryDirection, HorizontalDirection, VerticalDirection) for fluxBC in (true, false) if direction <: EveryDirection n = dim == 2 ? SVector{3, FT}(1 / sqrt(2), 1 / sqrt(2), 0) : SVector{3, FT}( 1 / sqrt(3), 1 / sqrt(3), 1 / sqrt(3), ) elseif direction <: HorizontalDirection n = dim == 2 ? SVector{3, FT}(1, 0, 0) : SVector{3, FT}(1 / sqrt(2), 1 / sqrt(2), 0) elseif direction <: VerticalDirection n = dim == 2 ? SVector{3, FT}(0, 1, 0) : SVector{3, FT}(0, 0, 1) end α = FT(1) β = FT(1 // 100) μ = FT(-1 // 2) δ = FT(1 // 10) for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem brickrange = ntuple( j -> range(FT(-1); length = Ne + 1, stop = 1), dim, ) periodicity = ntuple(j -> false, dim) bc = ntuple(j -> (1, 2), dim) topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = bc, connectivity = connectivity, ) dt = (α / 4) / (Ne * polynomialorder^2) @info "time step" dt timeend = 1 outputtime = 1 dt = outputtime / ceil(Int64, outputtime / dt) @info (ArrayType, FT, dim, direction, fluxBC) vtkdir = output ? "vtk_advection" * "_poly$(polynomialorder)" * "_dim$(dim)_$(ArrayType)_$(FT)_$(direction)" * "_level$(l)" : nothing result[l] = test_run( mpicomm, ArrayType, dim, topl, polynomialorder, timeend, FT, direction, dt, n, α, β, μ, δ, vtkdir, outputtime, fluxBC, ) @test result[l] ≈ FT(expected_result[dim, l, FT, direction]) end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/pseudo1D_advection_diffusion_1dimex.jl ================================================ using MPI using ClimateMachine using Logging using Test using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.SystemSolvers using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu if !@isdefined integration_testing if length(ARGS) > 0 const integration_testing = parse(Bool, ARGS[1]) else const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct Pseudo1D{n, α, β, μ, δ} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::Pseudo1D{n, α, β}, aux::Vars, geom::LocalGeometry, ) where {n, α, β} # Direction of flow is n with magnitude α aux.advection.u = α * n # diffusion of strength β in the n direction aux.diffusion.D = β * n * n' end function initial_condition!( ::Pseudo1D{n, α, β, μ, δ}, state, aux, localgeo, t, ) where {n, α, β, μ, δ} ξn = dot(n, localgeo.coord) # ξT = SVector(localgeo.coord) - ξn * n state.ρ = exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) end inhomogeneous_data!(::Val{0}, P::Pseudo1D, x...) = initial_condition!(P, x...) function inhomogeneous_data!( ::Val{1}, ::Pseudo1D{n, α, β, μ, δ}, ∇state, aux, x, t, ) where {n, α, β, μ, δ} ξn = dot(n, x) ∇state.ρ = -( 2n * (ξn - μ - α * t) / (4 * β * (δ + t)) * exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) ) end function do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, dim, topl, N, timeend, FT, dt, n, α, β, μ, δ, vtkdir, outputtime, linearsolvertype, fluxBC, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) bcs = ( InhomogeneousBC{0}(), InhomogeneousBC{1}(), HomogeneousBC{0}(), HomogeneousBC{1}(), ) model = AdvectionDiffusion{dim}( Pseudo1D{n, α, β, μ, δ}(), bcs, flux_bc = fluxBC, ) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), direction = EveryDirection(), ) vdg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), state_auxiliary = dg.state_auxiliary, direction = VerticalDirection(), ) Q = init_ode_state(dg, FT(0)) ode_solver = ARK548L2SA2KennedyCarpenter( dg, vdg, LinearBackwardEulerSolver(linearsolvertype(); isadjustable = false), Q; dt = dt, t0 = 0, split_explicit_implicit = false, ) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(ode_solver), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # create vtk dir mkpath(vtkdir) vtkstep = 0 # output initial step do_output( mpicomm, vtkdir, vtkstep, dg, Q, Q, model, "advection_diffusion", ) # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(ode_solver)) do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, "advection_diffusion", ) end callbacks = (callbacks..., cbvtk) end numberofsteps = convert(Int64, cld(timeend, dt)) dt = timeend / numberofsteps @info "time step" dt numberofsteps dt * numberofsteps timeend solve!( Q, ode_solver; numberofsteps = numberofsteps, callbacks = callbacks, adjustfinalstep = false, ) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 expected_result = Dict() expected_result[2, 1, Float64] = 7.2801198255507391e-02 expected_result[2, 2, Float64] = 6.8160295851506783e-03 expected_result[2, 3, Float64] = 1.4439137164205592e-04 expected_result[2, 4, Float64] = 2.4260727323386998e-06 expected_result[3, 1, Float64] = 1.0462203776357534e-01 expected_result[3, 2, Float64] = 1.0280535683502070e-02 expected_result[3, 3, Float64] = 2.0631857053908848e-04 expected_result[3, 4, Float64] = 3.3460492914169325e-06 expected_result[2, 1, Float32] = 7.2801239788532257e-02 expected_result[2, 2, Float32] = 6.8159680813550949e-03 expected_result[2, 3, Float32] = 1.4439738879445940e-04 # This is near roundoff so we will not check it # expected_result[2, 4, Float32] = 2.6432753656990826e-06 expected_result[3, 1, Float32] = 1.0462204366922379e-01 expected_result[3, 2, Float32] = 1.0280583053827286e-02 expected_result[3, 3, Float32] = 2.0646647317335010e-04 expected_result[3, 4, Float32] = 2.0226731066941284e-05 numlevels = integration_testing ? 4 : 1 @testset "$(@__FILE__)" begin for FT in (Float64, Float32) result = zeros(FT, numlevels) for dim in 2:3 connectivity = dim == 2 ? :face : :full for fluxBC in (true, false) for linearsolvertype in (SingleColumnLU, ManyColumnLU) d = dim == 2 ? FT[1, 10, 0] : FT[1, 1, 10] n = SVector{3, FT}(d ./ norm(d)) α = FT(1) β = FT(1 // 100) μ = FT(-1 // 2) δ = FT(1 // 10) for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem brickrange = ( ntuple( j -> range( FT(-1); length = Ne + 1, stop = 1, ), dim - 1, )..., range(FT(-5); length = 5Ne + 1, stop = 5), ) periodicity = ntuple(j -> false, dim) topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = ( ntuple(j -> (1, 2), dim - 1)..., (3, 4), ), connectivity = connectivity, ) dt = (α / 4) / (Ne * polynomialorder^2) outputtime = 0.01 timeend = 0.5 @info ( ArrayType, FT, dim, linearsolvertype, l, fluxBC, ) vtkdir = output ? "vtk_advection" * "_poly$(polynomialorder)" * "_dim$(dim)_$(ArrayType)_$(FT)" * "_$(linearsolvertype)_level$(l)" : nothing result[l] = test_run( mpicomm, ArrayType, dim, topl, polynomialorder, timeend, FT, dt, n, α, β, μ, δ, vtkdir, outputtime, linearsolvertype, fluxBC, ) # test the errors significantly larger than floating point epsilon if !(dim == 2 && l == 4 && FT == Float32) @test result[l] ≈ FT(expected_result[dim, l, FT]) end end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/pseudo1D_advection_diffusion_mrigark_implicit.jl ================================================ using MPI using ClimateMachine using Logging using Test using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.SystemSolvers using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu if !@isdefined integration_testing if length(ARGS) > 0 const integration_testing = parse(Bool, ARGS[1]) else const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct Pseudo1D{n, α, β, μ, δ} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::Pseudo1D{n, α, β}, aux::Vars, geom::LocalGeometry, ) where {n, α, β} # Direction of flow is n with magnitude α aux.advection.u = α * n # diffusion of strength β in the n direction aux.diffusion.D = β * n * n' end function initial_condition!( ::Pseudo1D{n, α, β, μ, δ}, state, aux, localgeo, t, ) where {n, α, β, μ, δ} ξn = dot(n, localgeo.coord) # ξT = SVector(localgeo.coord) - ξn * n state.ρ = exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) end inhomogeneous_data!(::Val{0}, P::Pseudo1D, x...) = initial_condition!(P, x...) function inhomogeneous_data!( ::Val{1}, ::Pseudo1D{n, α, β, μ, δ}, ∇state, aux, x, t, ) where {n, α, β, μ, δ} ξn = dot(n, x) ∇state.ρ = -( 2n * (ξn - μ - α * t) / (4 * β * (δ + t)) * exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) ) end function do_output(mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, testname) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) exactnames = statenames .* "_exact" writevtk(filename, Q, dg, statenames, Qe, exactnames) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu( pvtuprefix, prefixes, (statenames..., exactnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end function test_run( mpicomm, ArrayType, dim, topl, N, timeend, FT, dt, n, α, β, μ, δ, vtkdir, outputtime, linearsolvertype, fluxBC, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) bcs = ( InhomogeneousBC{0}(), InhomogeneousBC{1}(), HomogeneousBC{0}(), HomogeneousBC{1}(), ) model = AdvectionDiffusion{dim}( Pseudo1D{n, α, β, μ, δ}(), bcs, flux_bc = fluxBC, ) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) vdg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), state_auxiliary = dg.state_auxiliary, direction = VerticalDirection(), ) Q = init_ode_state(dg, FT(0)) # With diffussion the Vertical + Horizontal ≠ Full because of 2nd # derivative mixing. So we define a custom RHS dQ2 = similar(Q) function rhs!(dQ, Q, p, time; increment) dg(dQ, Q, p, time; increment = increment) vdg(dQ2, Q, p, time; increment = false) dQ .= dQ .- dQ2 end fastsolver = LSRK144NiegemannDiehlBusch(rhs!, Q; dt = dt) # We're dominated by spatial error, so we can get away with a low order time # integrator ode_solver = MRIGARKESDIRK46aSandu( vdg, LinearBackwardEulerSolver(linearsolvertype(); isadjustable = false), fastsolver, Q; dt = dt, t0 = 0, ) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(ode_solver), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) if ~isnothing(vtkdir) # create vtk dir mkpath(vtkdir) vtkstep = 0 # output initial step do_output( mpicomm, vtkdir, vtkstep, dg, Q, Q, model, "advection_diffusion", ) # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 Qe = init_ode_state(dg, gettime(ode_solver)) do_output( mpicomm, vtkdir, vtkstep, dg, Q, Qe, model, "advection_diffusion", ) end callbacks = (callbacks..., cbvtk) end numberofsteps = convert(Int64, cld(timeend, dt)) dt = timeend / numberofsteps @info "time step" dt numberofsteps dt * numberofsteps timeend solve!( Q, ode_solver; numberofsteps = numberofsteps, callbacks = callbacks, adjustfinalstep = false, ) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 expected_result = Dict() expected_result[2, 1, Float64] = 7.3188989633310442e-02 expected_result[2, 2, Float64] = 6.8155958327716232e-03 expected_result[2, 3, Float64] = 1.4832570828897563e-04 expected_result[2, 4, Float64] = 3.3905353801669396e-06 expected_result[3, 1, Float64] = 1.0501469629884301e-01 expected_result[3, 2, Float64] = 1.0341917570778314e-02 expected_result[3, 3, Float64] = 2.1032014288411172e-04 expected_result[3, 4, Float64] = 4.5797013335024617e-06 expected_result[2, 1, Float32] = 7.3186099529266357e-02 expected_result[2, 2, Float32] = 6.8112355656921864e-03 expected_result[2, 3, Float32] = 1.4748815738130361e-04 # This is near roundoff so we will not check it # expected_result[2, 4, Float32] = 2.9435863325488754e-05 expected_result[3, 1, Float32] = 1.0500905662775040e-01 expected_result[3, 2, Float32] = 1.0342594236135483e-02 expected_result[3, 3, Float32] = 4.2716524330899119e-04 # This is near roundoff so we will not check it # expected_result[3, 4, Float32] = 1.9564463291317225e-03 numlevels = integration_testing ? 4 : 1 @testset "$(@__FILE__)" begin for FT in (Float64, Float32) result = zeros(FT, numlevels) for dim in 2:3 connectivity = dim == 3 ? :full : :face for fluxBC in (true, false) for linearsolvertype in (SingleColumnLU,)# ManyColumnLU) d = dim == 2 ? FT[1, 10, 0] : FT[1, 1, 10] n = SVector{3, FT}(d ./ norm(d)) α = FT(1) β = FT(1 // 100) μ = FT(-1 // 2) δ = FT(1 // 10) for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem brickrange = ( ntuple( j -> range( FT(-1); length = Ne + 1, stop = 1, ), dim - 1, )..., range(FT(-5); length = 5Ne + 1, stop = 5), ) periodicity = ntuple(j -> false, dim) topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = ( ntuple(j -> (1, 2), dim - 1)..., (3, 4), ), connectivity = connectivity, ) dt = 32 * (α / 4) / (Ne * polynomialorder^2) outputtime = 0.01 timeend = 0.5 @info ( ArrayType, FT, dim, linearsolvertype, l, fluxBC, ) vtkdir = output ? "vtk_advection" * "_poly$(polynomialorder)" * "_dim$(dim)_$(ArrayType)_$(FT)" * "_$(linearsolvertype)_level$(l)" : nothing result[l] = test_run( mpicomm, ArrayType, dim, topl, polynomialorder, timeend, FT, dt, n, α, β, μ, δ, vtkdir, outputtime, linearsolvertype, fluxBC, ) # test the errors significantly larger than floating point epsilon if !(l == 4 && FT == Float32) @test result[l] ≈ FT(expected_result[dim, l, FT]) end end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/pseudo1D_heat_eqn.jl ================================================ using MPI using ClimateMachine using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps import ClimateMachine.DGMethods.NumericalFluxes: normal_boundary_flux_second_order! if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct HeatEqn{n, κ, A} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::HeatEqn{n}, aux::Vars, geom::LocalGeometry, ) where {n} # diffusion of strength 1 in the n direction aux.diffusion.D = n * n' end # solution is such that # u(1, t) = 1 # ∇u(0,t) = n function initial_condition!( ::HeatEqn{n, κ, A}, state, aux, localgeo, t, ) where {n, κ, A} ξn = dot(n, localgeo.coord) state.ρ = ξn + sum(A .* cos.(κ * ξn) .* exp.(-κ .^ 2 * t)) end inhomogeneous_data!(::Val{0}, P::HeatEqn, x...) = initial_condition!(P, x...) function normal_boundary_flux_second_order!( ::CentralNumericalFluxSecondOrder, bcs, ::AdvectionDiffusion{1, dim, HeatEqn{nd, κ, A}}, fluxᵀn::Vars{S}, n⁻, state⁻, diff⁻, hyperdiff⁻, aux⁻, state⁺, diff⁺, hyperdiff⁺, aux⁺, t, _..., ) where {S, dim, nd, κ, A} if any_isa(bcs, InhomogeneousBC{0}) fluxᵀn.ρ = -diff⁻.σ' * n⁻ elseif any_isa(bcs, InhomogeneousBC{1}) # Get exact gradient of ρ x = aux⁻.coord ξn = dot(nd, x) ∇ρ = SVector(ntuple( i -> nd[i] * (1 - sum(A .* κ .* sin.(κ * ξn) .* exp.(-κ .^ 2 * t))), Val(3), )) # Compute flux value D = aux⁻.diffusion.D fluxᵀn.ρ = -(D * ∇ρ)' * n⁻ end end function test_run( mpicomm, ArrayType, dim, topl, N, timeend, FT, direction, dt, n, κ = 10 * FT(π) / 2, A = 1, ) numberofsteps = convert(Int64, cld(timeend, dt)) dt = timeend / numberofsteps @info "time step" dt numberofsteps dt * numberofsteps timeend grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) bcs = (InhomogeneousBC{1}(), InhomogeneousBC{0}()) model = AdvectionDiffusion{dim}(HeatEqn{n, κ, A}(), bcs; advection = false) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), direction = direction(), ) Q = init_ode_state(dg, FT(0)) lsrk = LSRK144NiegemannDiehlBusch(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", gettime(lsrk), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end callbacks = (cbinfo,) solve!( Q, lsrk; numberofsteps = numberofsteps, callbacks = callbacks, adjustfinalstep = false, ) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 expected_result = Dict() expected_result[2, 1, Float64, EveryDirection] = 0.005157483268127576 expected_result[2, 2, Float64, EveryDirection] = 6.5687731035717e-5 expected_result[2, 3, Float64, EveryDirection] = 1.6644861275185443e-6 expected_result[2, 1, Float64, HorizontalDirection] = 0.020515449798977983 expected_result[2, 2, Float64, HorizontalDirection] = 0.0005686256942296802 expected_result[2, 3, Float64, HorizontalDirection] = 1.0132022682547854e-5 expected_result[2, 1, Float64, VerticalDirection] = 0.02051544979897792 expected_result[2, 2, Float64, VerticalDirection] = 0.0005686256942296017 expected_result[2, 3, Float64, VerticalDirection] = 1.0132022682754848e-5 expected_result[3, 1, Float64, EveryDirection] = 0.001260581018671256 expected_result[3, 2, Float64, EveryDirection] = 2.214908522198975e-5 expected_result[3, 3, Float64, EveryDirection] = 5.931735594156876e-7 expected_result[3, 1, Float64, HorizontalDirection] = 0.005157483268127569 expected_result[3, 2, Float64, HorizontalDirection] = 6.568773103570526e-5 expected_result[3, 3, Float64, HorizontalDirection] = 1.6644861273865866e-6 expected_result[3, 1, Float64, VerticalDirection] = 0.020515449798978087 expected_result[3, 2, Float64, VerticalDirection] = 0.0005686256942297547 expected_result[3, 3, Float64, VerticalDirection] = 1.0132022682817856e-5 expected_result[2, 1, Float32, EveryDirection] = 0.005157135 expected_result[2, 2, Float32, EveryDirection] = 6.5721644e-5 expected_result[2, 3, Float32, EveryDirection] = 3.280845e-6 expected_result[2, 1, Float32, HorizontalDirection] = 0.020514594 expected_result[2, 2, Float32, HorizontalDirection] = 0.0005684704 expected_result[2, 3, Float32, HorizontalDirection] = 1.02350195e-5 expected_result[2, 1, Float32, VerticalDirection] = 0.020514673 expected_result[2, 2, Float32, VerticalDirection] = 0.0005684843 expected_result[2, 3, Float32, VerticalDirection] = 1.0227403e-5 expected_result[3, 1, Float32, EveryDirection] = 0.0012602004 expected_result[3, 2, Float32, EveryDirection] = 2.2415780e-5 expected_result[3, 3, Float32, EveryDirection] = 1.1309192e-5 expected_result[3, 1, Float32, HorizontalDirection] = 0.005157044 expected_result[3, 2, Float32, HorizontalDirection] = 6.66792e-5 expected_result[3, 3, Float32, HorizontalDirection] = 9.930429e-5 expected_result[3, 1, Float32, VerticalDirection] = 0.020514654 expected_result[3, 2, Float32, VerticalDirection] = 0.0005684157 expected_result[3, 3, Float32, VerticalDirection] = 3.224683e-5 @testset "$(@__FILE__)" begin for FT in (Float64, Float32) numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? 3 : 1 result = zeros(FT, numlevels) for dim in 2:3 connectivity = dim == 2 ? :face : :full for direction in (EveryDirection, HorizontalDirection, VerticalDirection) if direction <: EveryDirection n = dim == 2 ? SVector{3, FT}(1 / sqrt(2), 1 / sqrt(2), 0) : SVector{3, FT}( 1 / sqrt(3), 1 / sqrt(3), 1 / sqrt(3), ) elseif direction <: HorizontalDirection n = dim == 2 ? SVector{3, FT}(1, 0, 0) : SVector{3, FT}(1 / sqrt(2), 1 / sqrt(2), 0) elseif direction <: VerticalDirection n = dim == 2 ? SVector{3, FT}(0, 1, 0) : SVector{3, FT}(0, 0, 1) end for l in 1:numlevels Ne = 2^(l - 1) * base_num_elem brickrange = ntuple( j -> range(FT(0); length = Ne + 1, stop = 1), dim, ) periodicity = ntuple(j -> false, dim) bc = ntuple(j -> (1, 2), dim) topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = bc, connectivity = connectivity, ) dt = 1 / (Ne * polynomialorder^2)^2 timeend = 0.01 @info (ArrayType, FT, dim, direction) result[l] = test_run( mpicomm, ArrayType, dim, topl, polynomialorder, timeend, FT, direction, dt, n, ) @test ( result[l] ≈ FT(expected_result[dim, l, FT, direction]) || result[l] < FT(expected_result[dim, l, FT, direction]) ) end @info begin msg = "" for l in 1:(numlevels - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/advection_diffusion/variable_degree_advection_diffusion.jl ================================================ using MPI using ClimateMachine using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Test if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end const output = parse(Bool, lowercase(get(ENV, "JULIA_CLIMA_OUTPUT", "false"))) include("advection_diffusion_model.jl") struct Pseudo1D{n1, n2, α, β, μ, δ} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::Pseudo1D{n1, n2, α, β}, aux::Vars, geom::LocalGeometry, ) where {n1, n2, α, β} # Direction of flow is n1 (resp n2) with magnitude α aux.advection.u = hcat(α * n1, α * n2) # diffusion of strength β in the n1 and n2 directions aux.diffusion.D = hcat(β * n1 * n1', β * n2 * n2') end function initial_condition!( ::Pseudo1D{n1, n2, α, β, μ, δ}, state, aux, localgeo, t, ) where {n1, n2, α, β, μ, δ} ξn1 = dot(n1, localgeo.coord) ξn2 = dot(n2, localgeo.coord) ρ1 = exp(-(ξn1 - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) ρ2 = exp(-(ξn2 - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) state.ρ = (ρ1, ρ2) end inhomogeneous_data!(::Val{0}, P::Pseudo1D, x...) = initial_condition!(P, x...) function inhomogeneous_data!( ::Val{1}, ::Pseudo1D{n1, n2, α, β, μ, δ}, ∇state, aux, x, t, ) where {n1, n2, α, β, μ, δ} ξn1 = dot(n1, x) ξn2 = dot(n2, x) ∇ρ1 = -( 2n1 * (ξn1 - μ - α * t) / (4 * β * (δ + t)) * exp(-(ξn1 - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) ) ∇ρ2 = -( 2n2 * (ξn2 - μ - α * t) / (4 * β * (δ + t)) * exp(-(ξn2 - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) ) ∇state.ρ = hcat(∇ρ1, ∇ρ2) end function test_run(mpicomm, dim, polynomialorders, level, ArrayType, FT) n_hd = dim == 2 ? SVector{3, FT}(1, 0, 0) : SVector{3, FT}(1 / sqrt(2), 1 / sqrt(2), 0) n_vd = dim == 2 ? SVector{3, FT}(0, 1, 0) : SVector{3, FT}(0, 0, 1) α = FT(1) β = FT(1 // 100) μ = FT(-1 // 2) δ = FT(1 // 10) # Grid/topology information base_num_elem = 4 Ne = 2^(level - 1) * base_num_elem brickrange = ntuple(j -> range(FT(-1); length = Ne + 1, stop = 1), dim) periodicity = ntuple(j -> false, dim) bc = ntuple(j -> (1, 2), dim) connectivity = dim == 3 ? :full : :face topl = StackedBrickTopology( mpicomm, brickrange; periodicity = periodicity, boundary = bc, connectivity = connectivity, ) dt = (α / 4) / (Ne * maximum(polynomialorders)^2) timeend = 1 @info "time step" dt @info @sprintf """Test parameters: ArrayType = %s FloatType = %s Dimension = %s Horizontal polynomial order = %s Vertical polynomial order = %s """ ArrayType FT dim polynomialorders[1] polynomialorders[end] grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorders, ) bcs = (InhomogeneousBC{0}(), InhomogeneousBC{1}()) # Model being tested model = AdvectionDiffusion{dim}( Pseudo1D{n_hd, n_vd, α, β, μ, δ}(), bcs, num_equations = 2, ) # Main DG discretization dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), direction = EveryDirection(), ) # Initialize all relevant state arrays and create solvers Q = init_ode_state(dg, FT(0)) eng0 = norm(Q, dims = (1, 3)) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0[1] solver = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) solve!(Q, solver; timeend = timeend) # Reference solution engf = norm(Q, dims = (1, 3)) Q_ref = init_ode_state(dg, FT(timeend)) engfe = norm(Q_ref, dims = (1, 3)) errf = norm(Q_ref .- Q, dims = (1, 3)) metrics = @. (engf, engf / eng0, engf - eng0, errf, errf / engfe) @info @sprintf """Finished Horizontal field: norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e Vertical field: norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ first.(metrics)... last.(metrics)... return errf end """ main() Run this test problem """ function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD # Dictionary keys: dim, level, polynomial order, FT, and direction expected_result = Dict() # Dim 2, degree 4 in the horizontal, Float64 expected_result[2, 1, 4, Float64, HorizontalDirection] = 0.0467837436192571 expected_result[2, 2, 4, Float64, HorizontalDirection] = 0.004066556782723549 expected_result[2, 3, 4, Float64, HorizontalDirection] = 5.3144336694234015e-5 expected_result[2, 4, 4, Float64, HorizontalDirection] = 3.978000110046181e-7 # Dim 2, degree 2 in the vertical, Float64 expected_result[2, 1, 2, Float64, VerticalDirection] = 0.15362016594121006 expected_result[2, 2, 2, Float64, VerticalDirection] = 0.04935353328794371 expected_result[2, 3, 2, Float64, VerticalDirection] = 0.015530511948609192 expected_result[2, 4, 2, Float64, VerticalDirection] = 0.0006275095484456197 # Dim 2, degree 2 in the horizontal, Float64 expected_result[2, 1, 2, Float64, HorizontalDirection] = 0.15362016594121003 expected_result[2, 2, 2, Float64, HorizontalDirection] = 0.04935353328794369 expected_result[2, 3, 2, Float64, HorizontalDirection] = 0.015530511948609204 expected_result[2, 4, 2, Float64, HorizontalDirection] = 0.0006275095484455967 # Dim 2, degree 4 in the vertical, Float64 expected_result[2, 1, 4, Float64, VerticalDirection] = 0.04678374361925714 expected_result[2, 2, 4, Float64, VerticalDirection] = 0.0040665567827235 expected_result[2, 3, 4, Float64, VerticalDirection] = 5.3144336694109365e-5 expected_result[2, 4, 4, Float64, VerticalDirection] = 3.978000109805811e-7 # Dim 3, degree 4 in the horizontal, Float64 expected_result[3, 1, 4, Float64, HorizontalDirection] = 0.017475667486259432 expected_result[3, 2, 4, Float64, HorizontalDirection] = 0.0012502148161420109 expected_result[3, 3, 4, Float64, HorizontalDirection] = 6.999081063570052e-5 expected_result[3, 4, 4, Float64, HorizontalDirection] = 2.8724182090419642e-6 # Dim 3, degree 2 in the vertical, Float64 expected_result[3, 1, 2, Float64, VerticalDirection] = 0.2172517221280645 expected_result[3, 2, 2, Float64, VerticalDirection] = 0.06979643612684193 expected_result[3, 3, 2, Float64, VerticalDirection] = 0.02196346062832051 expected_result[3, 4, 2, Float64, VerticalDirection] = 0.0008874325139302493 # Dim 3, degree 2 in the horizontal, Float64 expected_result[3, 1, 2, Float64, HorizontalDirection] = 0.10343354980172516 expected_result[3, 2, 2, Float64, HorizontalDirection] = 0.03415137756593495 expected_result[3, 3, 2, Float64, HorizontalDirection] = 0.0035959803480493553 expected_result[3, 4, 2, Float64, HorizontalDirection] = 0.0002714157844893719 # Dim 3, degree 4 in the vertical, Float64 expected_result[3, 1, 4, Float64, VerticalDirection] = 0.06616220472493903 expected_result[3, 2, 4, Float64, VerticalDirection] = 0.005750979754288175 expected_result[3, 3, 4, Float64, VerticalDirection] = 7.515744171591452e-5 expected_result[3, 4, 4, Float64, VerticalDirection] = 5.625741705890895e-7 # Dim 2, degree 4 in the horizontal, Float32 expected_result[2, 1, 4, Float32, HorizontalDirection] = 0.046783954f0 expected_result[2, 2, 4, Float32, HorizontalDirection] = 0.004066328f0 expected_result[2, 3, 4, Float32, HorizontalDirection] = 5.327546f-5 # Dim 2, degree 2 in the vertical, Float32 expected_result[2, 1, 2, Float32, VerticalDirection] = 0.15362015f0 expected_result[2, 2, 2, Float32, VerticalDirection] = 0.04935346f0 expected_result[2, 3, 2, Float32, VerticalDirection] = 0.015530386f0 # Dim 2, degree 2 in the horizontal, Float32 expected_result[2, 1, 2, Float32, HorizontalDirection] = 0.1536202f0 expected_result[2, 2, 2, Float32, HorizontalDirection] = 0.04935346f0 expected_result[2, 3, 2, Float32, HorizontalDirection] = 0.015530357f0 # Dim 2, degree 4 in the vertical, Float32 expected_result[2, 1, 4, Float32, VerticalDirection] = 0.04678398f0 expected_result[2, 2, 4, Float32, VerticalDirection] = 0.0040662177f0 expected_result[2, 3, 4, Float32, VerticalDirection] = 5.3401447f-5 # Dim 3, degree 4 in the horizontal, Float32 expected_result[3, 1, 4, Float32, HorizontalDirection] = 0.01747554f0 expected_result[3, 2, 4, Float32, HorizontalDirection] = 0.0012502924f0 expected_result[3, 3, 4, Float32, HorizontalDirection] = 7.00218f-5 # Dim 3, degree 2 in the vertical, Float32 expected_result[3, 1, 2, Float32, VerticalDirection] = 0.21725166f0 expected_result[3, 2, 2, Float32, VerticalDirection] = 0.06979626f0 expected_result[3, 3, 2, Float32, VerticalDirection] = 0.021963252f0 # Dim 3, degree 2 in the horizontal, Float32 expected_result[3, 1, 2, Float32, HorizontalDirection] = 0.10343349f0 expected_result[3, 2, 2, Float32, HorizontalDirection] = 0.034151305f0 expected_result[3, 3, 2, Float32, HorizontalDirection] = 0.0035958516f0 # Dim 3, degree 4 in the vertical, Float32 expected_result[3, 1, 4, Float32, VerticalDirection] = 0.06616244f0 expected_result[3, 2, 4, Float32, VerticalDirection] = 0.005750495f0 expected_result[3, 3, 4, Float32, VerticalDirection] = 7.538217f-5 @testset "Variable degree DG: advection diffusion model" begin for FT in (Float32, Float64) numlevels = integration_testing || ClimateMachine.Settings.integration_testing ? (FT == Float64 ? 4 : 3) : 1 for dim in 2:3 for polynomialorders in ((4, 2), (2, 4)) result = Dict() for level in 1:numlevels result[level] = test_run( mpicomm, dim, polynomialorders, level, ArrayType, FT, ) horiz_poly = polynomialorders[1] vert_poly = polynomialorders[2] @test result[level][1] ≈ FT(expected_result[ dim, level, horiz_poly, FT, HorizontalDirection, ]) @test result[level][2] ≈ FT(expected_result[ dim, level, vert_poly, FT, VerticalDirection, ]) end @info begin msg = "" for l in 1:(numlevels - 1) rate = @. log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rates for level %d Horizontal = %e", l, rate[1] ) msg *= @sprintf(", Vertical = %e\n", rate[2]) end msg end end end end end end main() ================================================ FILE: test/Numerics/DGMethods/compressible_Navier_Stokes/density_current_model.jl ================================================ using Test using Dates using LinearAlgebra using MPI using Printf using Random using StaticArrays using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.ConfigTypes using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Geometry using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.GenericCallbacks using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.VariableTemplates using Thermodynamics.TemperatureProfiles using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VTK using CLIMAParameters using CLIMAParameters.Planet: R_d, cp_d, cv_d, grav, MSLP struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end # -------------- Problem constants ------------------- # const dim = 3 const (xmin, xmax) = (0, 12800) const (ymin, ymax) = (0, 400) const (zmin, zmax) = (0, 6400) const Ne = (100, 2, 50) const polynomialorder = 4 const dt = 0.01 const timeend = 10dt # ------------- Initial condition function ----------- # """ # Reference See [Straka1993](@cite) """ function Initialise_Density_Current!( problem, bl, state::Vars, aux::Vars, localgeo, t, ) (x1, x2, x3) = localgeo.coord param_set = parameter_set(bl) FT = eltype(state) _R_d::FT = R_d(param_set) _grav::FT = grav(param_set) _cp_d::FT = cp_d(param_set) _cv_d::FT = cv_d(param_set) _MSLP::FT = MSLP(param_set) # initialise with dry domain q_tot::FT = 0 q_liq::FT = 0 q_ice::FT = 0 # perturbation parameters for rising bubble rx = 4000 rz = 2000 xc = 0 zc = 3000 r = sqrt((x1 - xc)^2 / rx^2 + (x3 - zc)^2 / rz^2) θ_ref::FT = 300 θ_c::FT = -15 Δθ::FT = 0 if r <= 1 Δθ = θ_c * (1 + cospi(r)) / 2 end qvar = PhasePartition(q_tot) θ = θ_ref + Δθ # potential temperature π_exner = FT(1) - _grav / (_cp_d * θ) * x3 # exner pressure ρ = _MSLP / (_R_d * θ) * (π_exner)^(_cv_d / _R_d) # density ts = PhaseEquil_ρθq(param_set, ρ, θ, q_tot) q_pt = PhasePartition(ts) U, V, W = FT(0), FT(0), FT(0) # momentum components # energy definitions e_kin = (U^2 + V^2 + W^2) / (2 * ρ) / ρ e_pot = gravitational_potential(bl, aux) e_int = internal_energy(ts) E = ρ * (e_int + e_kin + e_pot) #* total_energy(e_kin, e_pot, T, q_tot, q_liq, q_ice) state.ρ = ρ state.ρu = SVector(U, V, W) state.energy.ρe = E state.moisture.ρq_tot = ρ * q_pt.tot end # --------------- Driver definition ------------------ # function test_run( mpicomm, ArrayType, topl, dim, Ne, polynomialorder, timeend, FT, dt, ) # -------------- Define grid ----------------------------------- # grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, ) # -------------- Define model ---------------------------------- # T_profile = DryAdiabaticProfile{FT}(param_set) physics = AtmosPhysics{FT}( param_set; ref_state = HydrostaticState(T_profile), turbulence = AnisoMinDiss{FT}(1), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; init_state_prognostic = Initialise_Density_Current!, source = (Gravity(),), ) # -------------- Define DGModel --------------------------- # dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e ArrayType = %s FloatType = %s""" eng0 ArrayType FT # Set up the information callback (output field dump is via vtk callback: see cbinfo) starttime = Ref(now()) cbinfo = GenericCallbacks.EveryXWallTimeSeconds(10, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", ODESolvers.gettime(lsrk), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end vtkstep = [0] cbvtk = GenericCallbacks.EveryXSimulationSteps(3000) do (init = false) mkpath("./vtk-dc/") outprefix = @sprintf( "./vtk-dc/DC_%dD_mpirank%04d_step%04d", dim, MPI.Comm_rank(mpicomm), vtkstep[1] ) @debug "doing VTK output" outprefix writevtk( outprefix, Q, dg, flattenednames(vars_state(model, Prognostic(), FT)), dg.state_auxiliary, flattenednames(vars_state(model, Auxiliary(), FT)), ) vtkstep[1] += 1 nothing end solve!(Q, lsrk; timeend = timeend, callbacks = (cbinfo, cbvtk)) # End of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe engf / eng0 end # --------------- Test block / Loggers ------------------ # using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD for FT in (Float32, Float64) brickrange = ( range(FT(xmin); length = Ne[1] + 1, stop = xmax), range(FT(ymin); length = Ne[2] + 1, stop = ymax), range(FT(zmin); length = Ne[3] + 1, stop = zmax), ) topl = StackedBrickTopology( mpicomm, brickrange, periodicity = (false, true, false), ) engf_eng0 = test_run( mpicomm, ArrayType, topl, dim, Ne, polynomialorder, timeend, FT, dt, ) @test engf_eng0 ≈ FT(9.9999970927037096e-01) end end #nothing ================================================ FILE: test/Numerics/DGMethods/compressible_Navier_Stokes/mms_bc_atmos.jl ================================================ using Test using Dates using LinearAlgebra using MPI using Printf using StaticArrays using UnPack using ClimateMachine using ClimateMachine.ConfigTypes using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.GenericCallbacks using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.Orientations using ClimateMachine.VariableTemplates using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VTK import ClimateMachine.BalanceLaws: source, prognostic_vars using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() import CLIMAParameters # Assume zero reference temperature CLIMAParameters.Planet.T_0(::EarthParameterSet) = 0 if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end include("mms_solution_generated.jl") import Thermodynamics: total_specific_enthalpy using ClimateMachine.Atmos total_specific_enthalpy(ts::PhaseDry{FT}, e_tot::FT) where {FT <: Real} = zero(FT) function mms2_init_state!(problem, bl, state::Vars, aux::Vars, localgeo, t) (x1, x2, x3) = localgeo.coord state.ρ = ρ_g(t, x1, x2, x3, Val(2)) state.ρu = SVector( U_g(t, x1, x2, x3, Val(2)), V_g(t, x1, x2, x3, Val(2)), W_g(t, x1, x2, x3, Val(2)), ) state.energy.ρe = E_g(t, x1, x2, x3, Val(2)) end struct MMSSource{N} <: TendencyDef{Source} end prognostic_vars(::MMSSource{N}) where {N} = (Mass(), Momentum(), Energy()) function source(::Mass, s::MMSSource{N}, m, args) where {N} @unpack aux, t = args x1, x2, x3 = aux.coord return Sρ_g(t, x1, x2, x3, Val(N)) end function source(::Momentum, s::MMSSource{N}, m, args) where {N} @unpack aux, t = args x1, x2, x3 = aux.coord return SVector( SU_g(t, x1, x2, x3, Val(N)), SV_g(t, x1, x2, x3, Val(N)), SW_g(t, x1, x2, x3, Val(N)), ) end function source(::Energy, s::MMSSource{N}, m, args) where {N} @unpack aux, t = args x1, x2, x3 = aux.coord return SE_g(t, x1, x2, x3, Val(N)) end function mms3_init_state!(problem, bl, state::Vars, aux::Vars, localgeo, t) (x1, x2, x3) = localgeo.coord state.ρ = ρ_g(t, x1, x2, x3, Val(3)) state.ρu = SVector( U_g(t, x1, x2, x3, Val(3)), V_g(t, x1, x2, x3, Val(3)), W_g(t, x1, x2, x3, Val(3)), ) state.energy.ρe = E_g(t, x1, x2, x3, Val(3)) end # initial condition function test_run(mpicomm, ArrayType, dim, topl, warpfun, N, timeend, FT, dt) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = warpfun, ) physics = AtmosPhysics{FT}( param_set; ref_state = NoReferenceState(), turbulence = ConstantDynamicViscosity(FT(μ_exact), WithDivergence()), moisture = DryModel(), ) if dim == 2 problem = AtmosProblem( boundaryconditions = (InitStateBC(),), init_state_prognostic = mms2_init_state!, ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, orientation = NoOrientation(), source = (MMSSource{2}(),), ) else problem = AtmosProblem( boundaryconditions = (InitStateBC(),), init_state_prognostic = mms3_init_state!, ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, orientation = NoOrientation(), source = (MMSSource{3}(),), ) end show_tendencies(model) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) Qcpu = init_ode_state(dg, FT(0); init_on_cpu = true) @test euclidean_distance(Q, Qcpu) < sqrt(eps(FT)) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(now()) cbinfo = GenericCallbacks.EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", ODESolvers.gettime(lsrk), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end solve!(Q, lsrk; timeend = timeend, callbacks = (cbinfo,)) # solve!(Q, lsrk; timeend=timeend, callbacks=(cbinfo, cbvtk)) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 expected_result = [ 1.6931876910307017e-01 5.4603193051929394e-03 2.3307776694542282e-04 3.3983777728925593e-02 1.7808380837573065e-03 9.176181458773599e-5 ] lvls = integration_testing ? size(expected_result, 2) : 1 @testset "mms_bc_atmos" begin for FT in (Float64,) #Float32) result = zeros(FT, lvls) for dim in 2:3 for l in 1:lvls if dim == 2 Ne = ( 2^(l - 1) * base_num_elem, 2^(l - 1) * base_num_elem, ) brickrange = ( range(FT(0); length = Ne[1] + 1, stop = 1), range(FT(0); length = Ne[2] + 1, stop = 1), ) topl = BrickTopology( mpicomm, brickrange, periodicity = (false, false), ) dt = 1e-2 / Ne[1] warpfun = (x1, x2, _) -> begin (x1 + sin(x1 * x2), x2 + sin(2 * x1 * x2), 0) end elseif dim == 3 Ne = ( 2^(l - 1) * base_num_elem, 2^(l - 1) * base_num_elem, ) brickrange = ( range(FT(0); length = Ne[1] + 1, stop = 1), range(FT(0); length = Ne[2] + 1, stop = 1), range(FT(0); length = Ne[2] + 1, stop = 1), ) topl = BrickTopology( mpicomm, brickrange, periodicity = (false, false, false), ) dt = 5e-3 / Ne[1] warpfun = (x1, x2, x3) -> begin ( x1 + (x1 - 1 / 2) * cos(2 * π * x2 * x3) / 4, x2 + exp(sin(2π * (x1 * x2 + x3))) / 20, x3 + x1 / 4 + x2^2 / 2 + sin(x1 * x2 * x3), ) end end timeend = 1 nsteps = ceil(Int64, timeend / dt) dt = timeend / nsteps @info (ArrayType, FT, dim, nsteps, dt) result[l] = test_run( mpicomm, ArrayType, dim, topl, warpfun, polynomialorder, timeend, FT, dt, ) @test result[l] ≈ FT(expected_result[dim - 1, l]) end if integration_testing @info begin msg = "" for l in 1:(lvls - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end ================================================ FILE: test/Numerics/DGMethods/compressible_Navier_Stokes/mms_bc_dgmodel.jl ================================================ using MPI using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.GenericCallbacks using LinearAlgebra using StaticArrays using Logging, Printf, Dates using ClimateMachine.VTK if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end include("mms_solution_generated.jl") include("mms_model.jl") # initial condition function test_run(mpicomm, ArrayType, dim, topl, warpfun, N, timeend, FT, dt) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = warpfun, ) dg = DGModel( MMSModel{dim}(), grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) @info @sprintf """Starting norm(Q₀) = %.16e""" eng0 # Set up the information callback starttime = Ref(now()) cbinfo = GenericCallbacks.EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %.16e runtime = %s norm(Q) = %.16e""", ODESolvers.gettime(lsrk), Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end solve!(Q, lsrk; timeend = timeend, callbacks = (cbinfo,)) # solve!(Q, lsrk; timeend=timeend, callbacks=(cbinfo, cbvtk)) # Print some end of the simulation information engf = norm(Q) Qe = init_ode_state(dg, FT(timeend)) engfe = norm(Qe) errf = euclidean_distance(Q, Qe) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e norm(Q - Qe) = %.16e norm(Q - Qe) / norm(Qe) = %.16e """ engf engf / eng0 engf - eng0 errf errf / engfe errf end using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 expected_result = [ 1.6694721292986181e-01 5.4178750150416337e-03 2.3066867400713085e-04 3.3672443923201158e-02 1.7603832251132654e-03 9.1108401774885506e-05 ] lvls = integration_testing ? size(expected_result, 2) : 1 @testset "mms_bc_dgmodel" begin for FT in (Float64,) #Float32) result = zeros(FT, lvls) for dim in 2:3 for l in 1:lvls if dim == 2 Ne = ( 2^(l - 1) * base_num_elem, 2^(l - 1) * base_num_elem, ) brickrange = ( range(FT(0); length = Ne[1] + 1, stop = 1), range(FT(0); length = Ne[2] + 1, stop = 1), ) topl = BrickTopology( mpicomm, brickrange, periodicity = (false, false), ) dt = 1e-2 / Ne[1] warpfun = (x1, x2, _) -> begin (x1 + sin(x1 * x2), x2 + sin(2 * x1 * x2), 0) end elseif dim == 3 Ne = ( 2^(l - 1) * base_num_elem, 2^(l - 1) * base_num_elem, ) brickrange = ( range(FT(0); length = Ne[1] + 1, stop = 1), range(FT(0); length = Ne[2] + 1, stop = 1), range(FT(0); length = Ne[2] + 1, stop = 1), ) topl = BrickTopology( mpicomm, brickrange, periodicity = (false, false, false), ) dt = 5e-3 / Ne[1] warpfun = (x1, x2, x3) -> begin ( x1 + (x1 - 1 / 2) * cos(2 * π * x2 * x3) / 4, x2 + exp(sin(2π * (x1 * x2 + x3))) / 20, x3 + x1 / 4 + x2^2 / 2 + sin(x1 * x2 * x3), ) end end timeend = 1 nsteps = ceil(Int64, timeend / dt) dt = timeend / nsteps @info (ArrayType, FT, dim) result[l] = test_run( mpicomm, ArrayType, dim, topl, warpfun, polynomialorder, timeend, FT, dt, ) @test result[l] ≈ FT(expected_result[dim - 1, l]) end if integration_testing @info begin msg = "" for l in 1:(lvls - 1) rate = log2(result[l]) - log2(result[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/compressible_Navier_Stokes/mms_model.jl ================================================ using ClimateMachine.VariableTemplates using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux import ClimateMachine.BalanceLaws: vars_state, flux_first_order!, flux_second_order!, source!, wavespeed, boundary_conditions, boundary_state!, compute_gradient_argument!, compute_gradient_flux!, nodal_init_state_auxiliary!, init_state_prognostic! import ClimateMachine.DGMethods: init_ode_state using ClimateMachine.Mesh.Geometry: LocalGeometry struct MMSModel{dim} <: BalanceLaw end vars_state(::MMSModel, ::Auxiliary, T) = @vars(x1::T, x2::T, x3::T) vars_state(::MMSModel, ::Prognostic, T) = @vars(ρ::T, ρu::T, ρv::T, ρw::T, ρe::T) vars_state(::MMSModel, ::Gradient, T) = @vars(u::T, v::T, w::T) vars_state(::MMSModel, ::GradientFlux, T) = @vars(τ11::T, τ22::T, τ33::T, τ12::T, τ13::T, τ23::T) function flux_first_order!( ::MMSModel, flux::Grad, state::Vars, auxstate::Vars, t::Real, direction, ) # preflux T = eltype(flux) γ = T(γ_exact) ρinv = 1 / state.ρ u, v, w = ρinv * state.ρu, ρinv * state.ρv, ρinv * state.ρw P = (γ - 1) * (state.ρe - ρinv * (state.ρu^2 + state.ρv^2 + state.ρw^2) / 2) # invisc terms flux.ρ = SVector(state.ρu, state.ρv, state.ρw) flux.ρu = SVector(u * state.ρu + P, v * state.ρu, w * state.ρu) flux.ρv = SVector(u * state.ρv, v * state.ρv + P, w * state.ρv) flux.ρw = SVector(u * state.ρw, v * state.ρw, w * state.ρw + P) flux.ρe = SVector(u * (state.ρe + P), v * (state.ρe + P), w * (state.ρe + P)) end function flux_second_order!( ::MMSModel, flux::Grad, state::Vars, diffusive::Vars, hyperdiffusive::Vars, auxstate::Vars, t::Real, ) ρinv = 1 / state.ρ u, v, w = ρinv * state.ρu, ρinv * state.ρv, ρinv * state.ρw # viscous terms flux.ρu -= SVector(diffusive.τ11, diffusive.τ12, diffusive.τ13) flux.ρv -= SVector(diffusive.τ12, diffusive.τ22, diffusive.τ23) flux.ρw -= SVector(diffusive.τ13, diffusive.τ23, diffusive.τ33) flux.ρe -= SVector( u * diffusive.τ11 + v * diffusive.τ12 + w * diffusive.τ13, u * diffusive.τ12 + v * diffusive.τ22 + w * diffusive.τ23, u * diffusive.τ13 + v * diffusive.τ23 + w * diffusive.τ33, ) end function compute_gradient_argument!( ::MMSModel, transformstate::Vars, state::Vars, auxstate::Vars, t::Real, ) ρinv = 1 / state.ρ transformstate.u = ρinv * state.ρu transformstate.v = ρinv * state.ρv transformstate.w = ρinv * state.ρw end function compute_gradient_flux!( ::MMSModel, diffusive::Vars, ∇transform::Grad, state::Vars, auxstate::Vars, t::Real, ) T = eltype(diffusive) μ = T(μ_exact) dudx, dudy, dudz = ∇transform.u dvdx, dvdy, dvdz = ∇transform.v dwdx, dwdy, dwdz = ∇transform.w # strains ϵ11 = dudx ϵ22 = dvdy ϵ33 = dwdz ϵ12 = (dudy + dvdx) / 2 ϵ13 = (dudz + dwdx) / 2 ϵ23 = (dvdz + dwdy) / 2 # deviatoric stresses diffusive.τ11 = 2μ * (ϵ11 - (ϵ11 + ϵ22 + ϵ33) / 3) diffusive.τ22 = 2μ * (ϵ22 - (ϵ11 + ϵ22 + ϵ33) / 3) diffusive.τ33 = 2μ * (ϵ33 - (ϵ11 + ϵ22 + ϵ33) / 3) diffusive.τ12 = 2μ * ϵ12 diffusive.τ13 = 2μ * ϵ13 diffusive.τ23 = 2μ * ϵ23 end function source!( ::MMSModel{dim}, source::Vars, state::Vars, diffusive::Vars, aux::Vars, t::Real, direction, ) where {dim} source.ρ = Sρ_g(t, aux.x1, aux.x2, aux.x3, Val(dim)) source.ρu = SU_g(t, aux.x1, aux.x2, aux.x3, Val(dim)) source.ρv = SV_g(t, aux.x1, aux.x2, aux.x3, Val(dim)) source.ρw = SW_g(t, aux.x1, aux.x2, aux.x3, Val(dim)) source.ρe = SE_g(t, aux.x1, aux.x2, aux.x3, Val(dim)) end function wavespeed(::MMSModel, nM, state::Vars, aux::Vars, t::Real, direction) T = eltype(state) γ = T(γ_exact) ρinv = 1 / state.ρ u, v, w = ρinv * state.ρu, ρinv * state.ρv, ρinv * state.ρw P = (γ - 1) * (state.ρe - ρinv * (state.ρu^2 + state.ρv^2 + state.ρw^2) / 2) return abs(nM[1] * u + nM[2] * v + nM[3] * w) + sqrt(ρinv * γ * P) end boundary_conditions(::MMSModel) = (nothing,) function boundary_state!( ::RusanovNumericalFlux, bctype, bl::MMSModel, stateP::Vars, auxP::Vars, nM, stateM::Vars, auxM::Vars, t, _..., ) init_state_prognostic!( bl, stateP, auxP, (coord = (auxM.x1, auxM.x2, auxM.x3),), t, ) end # FIXME: This is probably not right.... boundary_state!(::CentralNumericalFluxGradient, bc, bl::MMSModel, _...) = nothing function boundary_state!( ::CentralNumericalFluxSecondOrder, bctype, bl::MMSModel, stateP::Vars, diffP::Vars, hyperdiffP::Vars, auxP::Vars, nM, stateM::Vars, diffM::Vars, hyperdiffM::Vars, auxM::Vars, t, _..., ) init_state_prognostic!( bl, stateP, auxP, (coord = (auxM.x1, auxM.x2, auxM.x3),), t, ) end function nodal_init_state_auxiliary!( ::MMSModel, aux::Vars, tmp::Vars, g::LocalGeometry, ) x1, x2, x3 = g.coord aux.x1 = x1 aux.x2 = x2 aux.x3 = x3 end function init_state_prognostic!( bl::MMSModel{dim}, state::Vars, aux::Vars, localgeo, t, ) where {dim} (x1, x2, x3) = localgeo.coord state.ρ = ρ_g(t, x1, x2, x3, Val(dim)) state.ρu = U_g(t, x1, x2, x3, Val(dim)) state.ρv = V_g(t, x1, x2, x3, Val(dim)) state.ρw = W_g(t, x1, x2, x3, Val(dim)) state.ρe = E_g(t, x1, x2, x3, Val(dim)) end ================================================ FILE: test/Numerics/DGMethods/compressible_Navier_Stokes/mms_solution.jl ================================================ # This file generates the solution used in method of manufactured solutions using LinearAlgebra, SymPy, Printf using CLIMAParameters using CLIMAParameters.Planet struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() @syms x y z t real = true μ = 1 // 100 γ = cp_d(param_set) / cv_d(param_set) output = open("mms_solution_generated.jl", "w") @printf output "const γ_exact = %s\n" γ @printf output "const μ_exact = %s\n" μ for dim in 2:3 if dim == 3 ρ = cos(π * t) * sin(π * x) * cos(π * y) * cos(π * z) + 3 U = cos(π * t) * ρ * sin(π * x) * cos(π * y) * cos(π * z) V = cos(π * t) * ρ * sin(π * x) * cos(π * y) * cos(π * z) W = cos(π * t) * ρ * sin(π * x) * cos(π * y) * sin(π * z) E = cos(π * t) * sin(π * x) * cos(π * y) * cos(π * z) + 100 else ρ = cos(π * t) * sin(π * x) * cos(π * y) + 3 U = cos(π * t) * ρ * sin(π * x) * cos(π * y) V = cos(π * t) * ρ * sin(π * x) * cos(π * y) W = cos(π * t) * 0 E = cos(π * t) * sin(π * x) * cos(π * y) + 100 end P = (γ - 1) * (E - (U^2 + V^2 + W^2) / 2ρ) u, v, w = U / ρ, V / ρ, W / ρ dudx, dudy, dudz = diff(u, x), diff(u, y), diff(u, z) dvdx, dvdy, dvdz = diff(v, x), diff(v, y), diff(v, z) dwdx, dwdy, dwdz = diff(w, x), diff(w, y), diff(w, z) ϵ11 = dudx ϵ22 = dvdy ϵ33 = dwdz ϵ12 = (dudy + dvdx) / 2 ϵ13 = (dudz + dwdx) / 2 ϵ23 = (dvdz + dwdy) / 2 τ11 = 2μ * (ϵ11 - (ϵ11 + ϵ22 + ϵ33) / 3) τ22 = 2μ * (ϵ22 - (ϵ11 + ϵ22 + ϵ33) / 3) τ33 = 2μ * (ϵ33 - (ϵ11 + ϵ22 + ϵ33) / 3) τ12 = τ21 = 2μ * ϵ12 τ13 = τ31 = 2μ * ϵ13 τ23 = τ32 = 2μ * ϵ23 Fx_x = diff.( [ U u * U + P - τ11 u * V - τ12 u * W - τ13 u * (E + P) - u * τ11 - v * τ12 - w * τ13 ], x, ) Fy_y = diff.( [ V v * U - τ21 v * V + P - τ22 v * W - τ23 v * (E + P) - u * τ21 - v * τ22 - w * τ23 ], y, ) Fz_z = diff.( [ W w * U - τ31 w * V - τ32 w * W + P - τ33 w * (E + P) - u * τ31 - v * τ32 - w * τ33 ], z, ) dρdt = simplify(Fx_x[1] + Fy_y[1] + Fz_z[1] + diff(ρ, t)) dUdt = simplify(Fx_x[2] + Fy_y[2] + Fz_z[2] + diff(U, t)) dVdt = simplify(Fx_x[3] + Fy_y[3] + Fz_z[3] + diff(V, t)) dWdt = simplify(Fx_x[4] + Fy_y[4] + Fz_z[4] + diff(W, t)) dEdt = simplify(Fx_x[5] + Fy_y[5] + Fz_z[5] + diff(E, t)) @printf output "@noinline ρ_g(t, x, y, z, ::Val{%d}) = %s\n" dim ρ @printf output "@noinline U_g(t, x, y, z, ::Val{%d}) = %s\n" dim U @printf output "@noinline V_g(t, x, y, z, ::Val{%d}) = %s\n" dim V @printf output "@noinline W_g(t, x, y, z, ::Val{%d}) = %s\n" dim W @printf output "@noinline E_g(t, x, y, z, ::Val{%d}) = %s\n" dim E @printf output "@noinline Sρ_g(t, x, y, z, ::Val{%d}) = %s\n" dim dρdt @printf output "@noinline SU_g(t, x, y, z, ::Val{%d}) = %s\n" dim dUdt @printf output "@noinline SV_g(t, x, y, z, ::Val{%d}) = %s\n" dim dVdt @printf output "@noinline SW_g(t, x, y, z, ::Val{%d}) = %s\n" dim dWdt @printf output "@noinline SE_g(t, x, y, z, ::Val{%d}) = %s\n" dim dEdt end close(output) ================================================ FILE: test/Numerics/DGMethods/compressible_Navier_Stokes/mms_solution_generated.jl ================================================ const γ_exact = 1.4 const μ_exact = 1 // 100 @noinline ρ_g(t, x, y, z, ::Val{2}) = sin(pi * x) * cos(pi * t) * cos(pi * y) + 3 @noinline U_g(t, x, y, z, ::Val{2}) = (sin(pi * x) * cos(pi * t) * cos(pi * y) + 3) * sin(pi * x) * cos(pi * t) * cos(pi * y) @noinline V_g(t, x, y, z, ::Val{2}) = (sin(pi * x) * cos(pi * t) * cos(pi * y) + 3) * sin(pi * x) * cos(pi * t) * cos(pi * y) @noinline W_g(t, x, y, z, ::Val{2}) = 0 @noinline E_g(t, x, y, z, ::Val{2}) = sin(pi * x) * cos(pi * t) * cos(pi * y) + 100 @noinline Sρ_g(t, x, y, z, ::Val{2}) = pi * ( 2 * sin(2 * pi * x) - 2 * sin(2 * pi * y) - sin(pi * (2 * t - 2 * x)) + sin(pi * (2 * t + 2 * x)) + sin(pi * (2 * t - 2 * y)) - sin(pi * (2 * t + 2 * y)) + 2 * sin(pi * (2 * x + 2 * y)) + sin(pi * (-2 * t + 2 * x + 2 * y)) + sin(pi * (2 * t + 2 * x + 2 * y)) + 10 * cos(pi * (-t + x + y)) - 2 * cos(pi * (t - x + y)) + 2 * cos(pi * (t + x - y)) + 14 * cos(pi * (t + x + y)) ) / 8 @noinline SU_g(t, x, y, z, ::Val{2}) = pi * ( -2.0 * sin(pi * t) * sin(pi * x)^2 * cos(pi * t) * cos(pi * y)^2 - 3.0 * sin(pi * t) * sin(pi * x) * cos(pi * y) - 3.0 * sin(pi * x)^3 * sin(pi * y) * cos(pi * t)^3 * cos(pi * y)^2 - 6.0 * sin(pi * x)^2 * sin(pi * y) * cos(pi * t)^2 * cos(pi * y) + 1.8 * sin(pi * x)^2 * cos(pi * t)^3 * cos(pi * x) * cos(pi * y)^3 + 3.6 * sin(pi * x) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 + 0.0233333333333333 * pi * sin(pi * x) * cos(pi * t) * cos(pi * y) + 0.00333333333333333 * pi * sin(pi * y) * cos(pi * t) * cos(pi * x) + 0.4 * cos(pi * t) * cos(pi * x) * cos(pi * y) ) @noinline SV_g(t, x, y, z, ::Val{2}) = pi * ( -2.0 * sin(pi * t) * sin(pi * x)^2 * cos(pi * t) * cos(pi * y)^2 - 3.0 * sin(pi * t) * sin(pi * x) * cos(pi * y) - 1.8 * sin(pi * x)^3 * sin(pi * y) * cos(pi * t)^3 * cos(pi * y)^2 - 3.6 * sin(pi * x)^2 * sin(pi * y) * cos(pi * t)^2 * cos(pi * y) + 3.0 * sin(pi * x)^2 * cos(pi * t)^3 * cos(pi * x) * cos(pi * y)^3 - 0.4 * sin(pi * x) * sin(pi * y) * cos(pi * t) + 6.0 * sin(pi * x) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 + 0.0233333333333333 * pi * sin(pi * x) * cos(pi * t) * cos(pi * y) + 0.00333333333333333 * pi * sin(pi * y) * cos(pi * t) * cos(pi * x) ) @noinline SW_g(t, x, y, z, ::Val{2}) = 0 @noinline SE_g(t, x, y, z, ::Val{2}) = pi * ( -1.0 * sin(pi * t) * sin(pi * x) * cos(pi * y) + 1.6 * sin(pi * x)^4 * sin(pi * y) * cos(pi * t)^4 * cos(pi * y)^3 + 3.6 * sin(pi * x)^3 * sin(pi * y) * cos(pi * t)^3 * cos(pi * y)^2 - 1.6 * sin(pi * x)^3 * cos(pi * t)^4 * cos(pi * x) * cos(pi * y)^4 - 0.0233333333333333 * pi * sin(pi * x)^2 * sin(pi * y)^2 * cos(pi * t)^2 - 2.8 * sin(pi * x)^2 * sin(pi * y) * cos(pi * t)^2 * cos(pi * y) - 3.6 * sin(pi * x)^2 * cos(pi * t)^3 * cos(pi * x) * cos(pi * y)^3 + 0.0466666666666667 * pi * sin(pi * x)^2 * cos(pi * t)^2 * cos(pi * y)^2 + 0.0133333333333333 * pi * sin(pi * x) * sin(pi * y) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y) - 140.0 * sin(pi * x) * sin(pi * y) * cos(pi * t) + 2.8 * sin(pi * x) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 - 0.0233333333333333 * pi * cos(pi * t)^2 * cos(pi * x)^2 * cos(pi * y)^2 + 140.0 * cos(pi * t) * cos(pi * x) * cos(pi * y) ) @noinline ρ_g(t, x, y, z, ::Val{3}) = sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) + 3 @noinline U_g(t, x, y, z, ::Val{3}) = (sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) + 3) * sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) @noinline V_g(t, x, y, z, ::Val{3}) = (sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) + 3) * sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) @noinline W_g(t, x, y, z, ::Val{3}) = (sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) + 3) * sin(pi * x) * sin(pi * z) * cos(pi * t) * cos(pi * y) @noinline E_g(t, x, y, z, ::Val{3}) = sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) + 100 @noinline Sρ_g(t, x, y, z, ::Val{3}) = pi * ( -sin(pi * t) * sin(pi * x) * cos(pi * y) * cos(pi * z) - 2 * sin(pi * x)^2 * sin(pi * y) * cos(pi * t)^2 * cos(pi * y) * cos(pi * z)^2 - sin(pi * x)^2 * sin(pi * z)^2 * cos(pi * t)^2 * cos(pi * y)^2 + sin(pi * x)^2 * cos(pi * t)^2 * cos(pi * y)^2 * cos(pi * z)^2 - 3 * sin(pi * x) * sin(pi * y) * cos(pi * t) * cos(pi * z) + 2 * sin(pi * x) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 * cos(pi * z)^2 + 3 * sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) + 3 * cos(pi * t) * cos(pi * x) * cos(pi * y) * cos(pi * z) ) @noinline SU_g(t, x, y, z, ::Val{3}) = pi * ( -2.0 * sin(pi * t) * sin(pi * x)^2 * cos(pi * t) * cos(pi * y)^2 * cos(pi * z)^2 - 3.0 * sin(pi * t) * sin(pi * x) * cos(pi * y) * cos(pi * z) - 3.0 * sin(pi * x)^3 * sin(pi * y) * cos(pi * t)^3 * cos(pi * y)^2 * cos(pi * z)^3 - 2.0 * sin(pi * x)^3 * sin(pi * z)^2 * cos(pi * t)^3 * cos(pi * y)^3 * cos(pi * z) + 1.0 * sin(pi * x)^3 * cos(pi * t)^3 * cos(pi * y)^3 * cos(pi * z)^3 - 6.0 * sin(pi * x)^2 * sin(pi * y) * cos(pi * t)^2 * cos(pi * y) * cos(pi * z)^2 - 0.6 * sin(pi * x)^2 * sin(pi * z)^2 * cos(pi * t)^3 * cos(pi * x) * cos(pi * y)^3 * cos(pi * z) - 3.0 * sin(pi * x)^2 * sin(pi * z)^2 * cos(pi * t)^2 * cos(pi * y)^2 + 1.8 * sin(pi * x)^2 * cos(pi * t)^3 * cos(pi * x) * cos(pi * y)^3 * cos(pi * z)^3 + 3.0 * sin(pi * x)^2 * cos(pi * t)^2 * cos(pi * y)^2 * cos(pi * z)^2 - 1.2 * sin(pi * x) * sin(pi * z)^2 * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 + 3.6 * sin(pi * x) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 * cos(pi * z)^2 + 0.0333333333333333 * pi * sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) + 0.00333333333333333 * pi * sin(pi * y) * cos(pi * t) * cos(pi * x) * cos(pi * z) - 0.00333333333333333 * pi * cos(pi * t) * cos(pi * x) * cos(pi * y) * cos(pi * z) + 0.4 * cos(pi * t) * cos(pi * x) * cos(pi * y) * cos(pi * z) ) @noinline SV_g(t, x, y, z, ::Val{3}) = pi * ( -2.0 * sin(pi * t) * sin(pi * x)^2 * cos(pi * t) * cos(pi * y)^2 * cos(pi * z)^2 - 3.0 * sin(pi * t) * sin(pi * x) * cos(pi * y) * cos(pi * z) + 0.6 * sin(pi * x)^3 * sin(pi * y) * sin(pi * z)^2 * cos(pi * t)^3 * cos(pi * y)^2 * cos(pi * z) - 1.8 * sin(pi * x)^3 * sin(pi * y) * cos(pi * t)^3 * cos(pi * y)^2 * cos(pi * z)^3 - 2.0 * sin(pi * x)^3 * sin(pi * z)^2 * cos(pi * t)^3 * cos(pi * y)^3 * cos(pi * z) + 1.0 * sin(pi * x)^3 * cos(pi * t)^3 * cos(pi * y)^3 * cos(pi * z)^3 + 1.2 * sin(pi * x)^2 * sin(pi * y) * sin(pi * z)^2 * cos(pi * t)^2 * cos(pi * y) - 3.6 * sin(pi * x)^2 * sin(pi * y) * cos(pi * t)^2 * cos(pi * y) * cos(pi * z)^2 - 3.0 * sin(pi * x)^2 * sin(pi * z)^2 * cos(pi * t)^2 * cos(pi * y)^2 + 3.0 * sin(pi * x)^2 * cos(pi * t)^3 * cos(pi * x) * cos(pi * y)^3 * cos(pi * z)^3 + 3.0 * sin(pi * x)^2 * cos(pi * t)^2 * cos(pi * y)^2 * cos(pi * z)^2 - 0.4 * sin(pi * x) * sin(pi * y) * cos(pi * t) * cos(pi * z) + 0.00333333333333333 * pi * sin(pi * x) * sin(pi * y) * cos(pi * t) * cos(pi * z) + 6.0 * sin(pi * x) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 * cos(pi * z)^2 + 0.0333333333333333 * pi * sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) + 0.00333333333333333 * pi * sin(pi * y) * cos(pi * t) * cos(pi * x) * cos(pi * z) ) @noinline SW_g(t, x, y, z, ::Val{3}) = pi * ( -2.0 * sin(pi * t) * sin(pi * x)^2 * cos(pi * t) * cos(pi * y)^2 * cos(pi * z) - 3.0 * sin(pi * t) * sin(pi * x) * cos(pi * y) - 3.0 * sin(pi * x)^3 * sin(pi * y) * cos(pi * t)^3 * cos(pi * y)^2 * cos(pi * z)^2 - 0.8 * sin(pi * x)^3 * sin(pi * z)^2 * cos(pi * t)^3 * cos(pi * y)^3 + 2.8 * sin(pi * x)^3 * cos(pi * t)^3 * cos(pi * y)^3 * cos(pi * z)^2 - 6.0 * sin(pi * x)^2 * sin(pi * y) * cos(pi * t)^2 * cos(pi * y) * cos(pi * z) + 3.0 * sin(pi * x)^2 * cos(pi * t)^3 * cos(pi * x) * cos(pi * y)^3 * cos(pi * z)^2 + 7.2 * sin(pi * x)^2 * cos(pi * t)^2 * cos(pi * y)^2 * cos(pi * z) - 0.00333333333333333 * pi * sin(pi * x) * sin(pi * y) * cos(pi * t) + 6.0 * sin(pi * x) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 * cos(pi * z) - 0.4 * sin(pi * x) * cos(pi * t) * cos(pi * y) + 0.0333333333333333 * pi * sin(pi * x) * cos(pi * t) * cos(pi * y) + 0.00333333333333333 * pi * cos(pi * t) * cos(pi * x) * cos(pi * y) ) * sin(pi * z) @noinline SE_g(t, x, y, z, ::Val{3}) = pi * ( 0.001171875 * (1 - cos(2 * pi * x))^2 * (1 - cos(4 * pi * z)) * (cos(2 * pi * t) + 1)^2 * (cos(2 * pi * y) + 1)^2 - 1.0 * sin(pi * t) * sin(pi * x) * cos(pi * y) * cos(pi * z) + 0.8 * sin(pi * x)^4 * sin(pi * y) * sin(pi * z)^2 * cos(pi * t)^4 * cos(pi * y)^3 * cos(pi * z)^2 + 1.6 * sin(pi * x)^4 * sin(pi * y) * cos(pi * t)^4 * cos(pi * y)^3 * cos(pi * z)^4 + 0.2 * sin(pi * x)^4 * sin(pi * z)^4 * cos(pi * t)^4 * cos(pi * y)^4 - 0.4 * sin(pi * x)^4 * cos(pi * t)^4 * cos(pi * y)^4 * cos(pi * z)^4 + 1.8 * sin(pi * x)^3 * sin(pi * y) * sin(pi * z)^2 * cos(pi * t)^3 * cos(pi * y)^2 * cos(pi * z) + 3.6 * sin(pi * x)^3 * sin(pi * y) * cos(pi * t)^3 * cos(pi * y)^2 * cos(pi * z)^3 - 0.8 * sin(pi * x)^3 * sin(pi * z)^2 * cos(pi * t)^4 * cos(pi * x) * cos(pi * y)^4 * cos(pi * z)^2 + 0.6 * sin(pi * x)^3 * sin(pi * z)^2 * cos(pi * t)^3 * cos(pi * y)^3 * cos(pi * z) - 1.6 * sin(pi * x)^3 * cos(pi * t)^4 * cos(pi * x) * cos(pi * y)^4 * cos(pi * z)^4 - 1.2 * sin(pi * x)^3 * cos(pi * t)^3 * cos(pi * y)^3 * cos(pi * z)^3 - 0.01 * pi * sin(pi * x)^2 * sin(pi * y)^2 * sin(pi * z)^2 * cos(pi * t)^2 - 0.0233333333333333 * pi * sin(pi * x)^2 * sin(pi * y)^2 * cos(pi * t)^2 * cos(pi * z)^2 - 0.0233333333333333 * pi * sin(pi * x)^2 * sin(pi * y) * sin(pi * z)^2 * cos(pi * t)^2 * cos(pi * y) - 2.8 * sin(pi * x)^2 * sin(pi * y) * cos(pi * t)^2 * cos(pi * y) * cos(pi * z)^2 - 0.01 * pi * sin(pi * x)^2 * sin(pi * y) * cos(pi * t)^2 * cos(pi * y) * cos(pi * z)^2 - 1.8 * sin(pi * x)^2 * sin(pi * z)^2 * cos(pi * t)^3 * cos(pi * x) * cos(pi * y)^3 * cos(pi * z) - 1.4 * sin(pi * x)^2 * sin(pi * z)^2 * cos(pi * t)^2 * cos(pi * y)^2 + 0.0133333333333333 * pi * sin(pi * x)^2 * sin(pi * z)^2 * cos(pi * t)^2 * cos(pi * y)^2 - 3.6 * sin(pi * x)^2 * cos(pi * t)^3 * cos(pi * x) * cos(pi * y)^3 * cos(pi * z)^3 + 0.0533333333333333 * pi * sin(pi * x)^2 * cos(pi * t)^2 * cos(pi * y)^2 * cos(pi * z)^2 + 1.4 * sin(pi * x)^2 * cos(pi * t)^2 * cos(pi * y)^2 * cos(pi * z)^2 + 0.0133333333333333 * pi * sin(pi * x) * sin(pi * y) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y) * cos(pi * z)^2 - 140.0 * sin(pi * x) * sin(pi * y) * cos(pi * t) * cos(pi * z) + 0.0233333333333333 * pi * sin(pi * x) * sin(pi * z)^2 * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 + 0.01 * pi * sin(pi * x) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 * cos(pi * z)^2 + 2.8 * sin(pi * x) * cos(pi * t)^2 * cos(pi * x) * cos(pi * y)^2 * cos(pi * z)^2 + 140.0 * sin(pi * x) * cos(pi * t) * cos(pi * y) * cos(pi * z) - 0.01 * pi * sin(pi * z)^2 * cos(pi * t)^2 * cos(pi * x)^2 * cos(pi * y)^2 - 0.0233333333333333 * pi * cos(pi * t)^2 * cos(pi * x)^2 * cos(pi * y)^2 * cos(pi * z)^2 + 140.0 * cos(pi * t) * cos(pi * x) * cos(pi * y) * cos(pi * z) ) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/plotting/bigfileofstuff.jl ================================================ using ClimateMachine using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Elements import ClimateMachine.Mesh.Elements: baryweights using ClimateMachine.Mesh.Grids: polynomialorders using GaussQuadrature using Base.Threads # Depending on CliMa version # old, should return a tuple of polynomial orders # polynomialorders(::DiscontinuousSpectralElementGrid{T, dim, N}) where {T, dim, N} = Tuple([N for i in 1:dim]) # new, should return a tuple of polynomial orders # polynomialorders(::DiscontinuousSpectralElementGrid{T, dim, N}) where {T, dim, N} = N # utils.jl """ function cellaverage(Q; M = nothing) # Description Compute the cell-average of Q given the mass matrix M. Assumes that Q and M are the same size # Arguments `Q`: MPIStateArrays (array) # Keyword Arguments `M`: Mass Matrix (array) # Return The cell-average of Q """ function cellaverage(Q; M = nothing) if M == nothing return nothing end return (sum(M .* Q, dims = 1) ./ sum(M, dims = 1))[:] end """ function coordinates(grid::DiscontinuousSpectralElementGrid) # Description Gets the (x,y,z) coordinates corresponding to the grid # Arguments - `grid`: DiscontinuousSpectralElementGrid # Return - `x, y, z`: views of x, y, z coordinates """ function coordinates(grid::DiscontinuousSpectralElementGrid) x = view(grid.vgeo, :, grid.x1id, :) # x-direction y = view(grid.vgeo, :, grid.x2id, :) # y-direction z = view(grid.vgeo, :, grid.x3id, :) # z-direction return x, y, z end """ function cellcenters(Q; M = nothing) # Description Get the cell-centers of every element in the grid # Arguments - `grid`: DiscontinuousSpectralElementGrid # Return - Tuple of cell-centers """ function cellcenters(grid::DiscontinuousSpectralElementGrid) x, y, z = coordinates(grid) M = view(grid.vgeo, :, grid.Mid, :) # mass matrix xC = cellaverage(x, M = M) yC = cellaverage(y, M = M) zC = cellaverage(z, M = M) return xC[:], yC[:], zC[:] end """ function massmatrix(grid; M = nothing) # Description Get the mass matrix of the grid # Arguments - `grid`: DiscontinuousSpectralElementGrid # Return - Tuple of cell-centers """ function massmatrix(grid) return view(grid.vgeo, :, grid.Mid, :) end # find_element.jl # 3D version function findelement(xC, yC, zC, location, p, lin) ex, ey, ez = size(lin) # i currentmin = ones(1) minind = ones(Int64, 1) currentmin[1] = abs.(xC[p[lin[1, 1, 1]]] .- location[1]) for i in 2:ex current = abs.(xC[p[lin[i, 1, 1]]] .- location[1]) if current < currentmin[1] currentmin[1] = current minind[1] = i end end i = minind[1] # j currentmin[1] = abs.(yC[p[lin[1, 1, 1]]] .- location[2]) minind[1] = 1 for i in 2:ey current = abs.(yC[p[lin[1, i, 1]]] .- location[2]) if current < currentmin[1] currentmin[1] = current minind[1] = i end end j = minind[1] # k currentmin[1] = abs.(zC[p[lin[1, 1, 1]]] .- location[3]) minind[1] = 1 for i in 2:ez current = abs.(zC[p[lin[1, 1, i]]] .- location[3]) if current < currentmin[1] currentmin[1] = current minind[1] = i end end k = minind[1] return p[lin[i, j, k]] end # 2D version function findelement(xC, yC, location, p, lin) ex, ey = size(lin) # i currentmin = ones(1) minind = ones(Int64, 1) currentmin[1] = abs.(xC[p[lin[1, 1]]] .- location[1]) for i in 2:ex current = abs.(xC[p[lin[i, 1]]] .- location[1]) if current < currentmin[1] currentmin[1] = current minind[1] = i end end i = minind[1] # j currentmin[1] = abs.(yC[p[lin[1, 1]]] .- location[2]) minind[1] = 1 for i in 2:ey current = abs.(yC[p[lin[1, i]]] .- location[2]) if current < currentmin[1] currentmin[1] = current minind[1] = i end end j = minind[1] return p[lin[i, j]] end # gridhelper.jl struct InterpolationHelper{S, T} points::S quadrature::S interpolation::S cartesianindex::T end function InterpolationHelper(g::DiscontinuousSpectralElementGrid) porders = polynomialorders(g) if length(porders) == 3 npx, npy, npz = porders rx, wx = GaussQuadrature.legendre(npx + 1, both) ωx = baryweights(rx) ry, wy = GaussQuadrature.legendre(npy + 1, both) ωy = baryweights(ry) rz, wz = GaussQuadrature.legendre(npz + 1, both) ωz = baryweights(rz) linlocal = reshape( collect(1:((npx + 1) * (npy + 1) * (npz + 1))), (npx + 1, npy + 1, npz + 1), ) return InterpolationHelper( (rx, ry, rz), (wx, wy, wz), (ωx, ωy, ωz), linlocal, ) elseif length(porders) == 2 npx, npy = porders rx, wx = GaussQuadrature.legendre(npx + 1, both) ωx = baryweights(rx) ry, wy = GaussQuadrature.legendre(npy + 1, both) ωy = baryweights(ry) linlocal = reshape(collect(1:((npx + 1) * (npy + 1))), (npx + 1, npy + 1)) return InterpolationHelper((rx, ry), (wx, wy), (ωx, ωy), linlocal) else println("Not supported") return nothing end return nothing end struct ElementHelper{S, T, U, Q, V, W} cellcenters::S coordinates::T cartesiansizes::U polynomialorders::Q permutation::V cartesianindex::W end addup(xC, tol) = sum(abs.(xC[1] .- xC) .≤ tol) # only valid for cartesian domains function ElementHelper(g::DiscontinuousSpectralElementGrid) porders = polynomialorders(g) x, y, z = coordinates(g) xC, yC, zC = cellcenters(g) ne = size(x)[2] ex = round(Int64, ne / addup(xC, 10^4 * eps(maximum(abs.(x))))) ey = round(Int64, ne / addup(yC, 10^4 * eps(maximum(abs.(y))))) ez = round(Int64, ne / addup(zC, 10^4 * eps(maximum(abs.(z))))) check = ne == ex * ey * ez check ? true : error("improper counting") p = getperm(xC, yC, zC, ex, ey, ez) # should use dispatch ... if length(porders) == 3 npx, npy, npz = porders lin = reshape(collect(1:length(xC)), (ex, ey, ez)) return ElementHelper( (xC, yC, zC), (x, y, z), (ex, ey, ez), porders, p, lin, ) elseif length(porders) == 2 npx, npy = porders lin = reshape(collect(1:length(xC)), (ex, ey)) check = ne == ex * ey check ? true : error("improper counting") return ElementHelper((xC, yC), (x, y), (ex, ey), porders, p, lin) else println("no constructor for polynomial order = ", porders) return nothing end return nothing end struct GridHelper{S, T, V} interpolation::S element::T grid::V end function GridHelper(g::DiscontinuousSpectralElementGrid) return GridHelper(InterpolationHelper(g), ElementHelper(g), g) end function getvalue(f, location, gridhelper::GridHelper) ih = gridhelper.interpolation eh = gridhelper.element porders = gridhelper.element.polynomialorders if length(porders) == 3 npx, npy, npz = gridhelper.element.polynomialorders fl = reshape(f, (npx + 1, npy + 1, npz + 1, prod(eh.cartesiansizes))) ip = getvalue( fl, eh.cellcenters..., location, eh.permutation, eh.cartesianindex, ih.cartesianindex, eh.coordinates..., ih.points..., ih.interpolation..., ) return ip elseif length(porders) == 2 npx, npy = gridhelper.element.polynomialorders fl = reshape(f, (npx + 1, npy + 1, prod(eh.cartesiansizes))) ip = getvalue( fl, eh.cellcenters..., location, eh.permutation, eh.cartesianindex, ih.cartesianindex, eh.coordinates..., ih.points..., ih.interpolation..., ) return ip end return nothing end # lagrange_interpolation.jl function checkgl(x, rx) for i in eachindex(rx) if abs(x - rx[i]) ≤ eps(rx[i]) return i end end return 0 end function lagrange_eval(f, newx, newy, newz, rx, ry, rz, ωx, ωy, ωz) icheck = checkgl(newx, rx) jcheck = checkgl(newy, ry) kcheck = checkgl(newz, rz) numerator = zeros(1) denominator = zeros(1) for k in eachindex(rz) if kcheck == 0 Δz = (newz .- rz[k]) polez = ωz[k] ./ Δz kk = k else polez = 1.0 k = eachindex(rz)[end] kk = kcheck end for j in eachindex(ry) if jcheck == 0 Δy = (newy .- ry[j]) poley = ωy[j] ./ Δy jj = j else poley = 1.0 j = eachindex(ry)[end] jj = jcheck end for i in eachindex(rx) if icheck == 0 Δx = (newx .- rx[i]) polex = ωx[i] ./ Δx ii = i else polex = 1.0 i = eachindex(rx)[end] ii = icheck end numerator[1] += f[ii, jj, kk] * polex * poley * polez denominator[1] += polex * poley * polez end end end return numerator[1] / denominator[1] end function lagrange_eval(f, newx, newy, rx, ry, ωx, ωy) icheck = checkgl(newx, rx) jcheck = checkgl(newy, ry) numerator = zeros(1) denominator = zeros(1) for j in eachindex(ry) if jcheck == 0 Δy = (newy .- ry[j]) poley = ωy[j] ./ Δy jj = j else poley = 1.0 j = eachindex(ry)[end] jj = jcheck end for i in eachindex(rx) if icheck == 0 Δx = (newx .- rx[i]) polex = ωx[i] ./ Δx ii = i else polex = 1.0 i = eachindex(rx)[end] ii = icheck end numerator[1] += f[ii, jj] * polex * poley denominator[1] += polex * poley end end return numerator[1] / denominator[1] end function lagrange_eval_nocheck(f, newx, newy, newz, rx, ry, rz, ωx, ωy, ωz) numerator = zeros(1) denominator = zeros(1) for k in eachindex(rz) Δz = (newz .- rz[k]) polez = ωz[k] ./ Δz for j in eachindex(ry) Δy = (newy .- ry[j]) poley = ωy[j] ./ Δy for i in eachindex(rx) Δx = (newx .- rx[i]) polex = ωx[i] ./ Δx numerator[1] += f[i, j, k] * polex * poley * polez denominator[1] += polex * poley * polez end end end return numerator[1] / denominator[1] end function lagrange_eval_nocheck(f, newx, newy, rx, ry, ωx, ωy) numerator = zeros(1) denominator = zeros(1) for j in eachindex(ry) Δy = (newy .- ry[j]) poley = ωy[j] ./ Δy for i in eachindex(rx) Δx = (newx .- rx[i]) polex = ωx[i] ./ Δx numerator[1] += f[i, j] * polex * poley denominator[1] += polex * poley end end return numerator[1] / denominator[1] end function lagrange_eval_nocheck(f, newx, rx, ωx) numerator = zeros(1) denominator = zeros(1) for i in eachindex(rx) Δx = (newx .- rx[i]) polex = ωx[i] ./ Δx numerator[1] += f[i] * polex * poley denominator[1] += polex * poley end return numerator[1] / denominator[1] end # 3D, only valid for rectangles function getvalue( fl, xC, yC, zC, location, p, lin, linlocal, x, y, z, rx, ry, rz, ωx, ωy, ωz, ) e = findelement(xC, yC, zC, location, p, lin) # need bounds to rescale, only value for cartesian xmax = x[linlocal[length(rx), 1, 1], e] xmin = x[linlocal[1, 1, 1], e] ymax = y[linlocal[1, length(ry), 1], e] ymin = y[linlocal[1, 1, 1], e] zmax = z[linlocal[1, 1, length(rz)], e] zmin = z[linlocal[1, 1, 1], e] # rescale new point to [-1,1]³ newx = 2 * (location[1] - xmin) / (xmax - xmin) - 1 newy = 2 * (location[2] - ymin) / (ymax - ymin) - 1 newz = 2 * (location[3] - zmin) / (zmax - zmin) - 1 return lagrange_eval( view(fl, :, :, :, e), newx, newy, newz, rx, ry, rz, ωx, ωy, ωz, ) end # 2D function getvalue(fl, xC, yC, location, p, lin, linlocal, x, y, rx, ry, ωx, ωy) e = findelement(xC, yC, location, p, lin) # need bounds to rescale xmax = x[linlocal[length(rx), 1, 1], e] xmin = x[linlocal[1, 1, 1], e] ymax = y[linlocal[1, length(ry), 1], e] ymin = y[linlocal[1, 1, 1], e] # rescale new point to [-1,1]² newx = 2 * (location[1] - xmin) / (xmax - xmin) - 1 newy = 2 * (location[2] - ymin) / (ymax - ymin) - 1 return lagrange_eval(view(fl, :, :, e), newx, newy, rx, ry, ωx, ωy) end # permutations.jl function getperm(xC, yC, zC, ex, ey, ez) pz = sortperm(zC) tmpY = reshape(yC[pz], (ex * ey, ez)) tmp_py = [sortperm(tmpY[:, i]) for i in 1:ez] py = zeros(Int64, length(pz)) for i in eachindex(tmp_py) n = length(tmp_py[i]) ii = (i - 1) * n + 1 py[ii:(ii + n - 1)] .= tmp_py[i] .+ ii .- 1 end tmpX = reshape(xC[pz][py], (ex, ey * ez)) tmp_px = [sortperm(tmpX[:, i]) for i in 1:(ey * ez)] px = zeros(Int64, length(pz)) for i in eachindex(tmp_px) n = length(tmp_px[i]) ii = (i - 1) * n + 1 px[ii:(ii + n - 1)] .= tmp_px[i] .+ ii .- 1 end p = [pz[py[px[i]]] for i in eachindex(px)] return p end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/plotting/plot_output.jl ================================================ using JLD2 using GLMakie filename = "/Users/ballen/Projects/Clima/CLIMA/output/vtk_bickley_2D/bickley_jet.jld2" DOF = 32 f = jldopen(filename, "r+") include("bigfileofstuff.jl") include("vizinanigans.jl") include("ScalarFields.jl") dg_grid = f["grid"] gridhelper = GridHelper(dg_grid) x, y, z = coordinates(dg_grid) xC, yC, zC = cellcenters(dg_grid) ϕ = ScalarField(copy(x), gridhelper) newx = range(-2π, 2π, length = DOF) newy = range(-2π, 2π, length = DOF) norm(f["100"]) ## Q = f["0"] dof, nstates, nelems = size(Q) states = Array{Float64, 3}[] statenames = ["ρ", "ρu", "ρv", "ρθ"] for i in 1:nstates state = zeros(length(newx), length(newy), 101) push!(states, state) end for i in 0:100 println("interpolating step " * string(i)) Qⁱ = f[string(i)] for j in 1:nstates ϕ .= Qⁱ[:, j, :] states[j][:, :, i + 1] .= ϕ(newx, newy, threads = true) end end # f["states"] = states close(f) ## scene = volumeslice(states, statenames = statenames) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/plotting/vizinanigans.jl ================================================ using GLMakie, Statistics, Printf """ visualize(states::AbstractArray; statenames = string.(1:length(states)), quantiles = (0.1, 0.99), aspect = (1,1,1), resolution = (1920, 1080), statistics = false, title = "Field = ") # Description Visualize 3D states # Arguments - `states`: Array{Array{Float64,3},1}. An array of arrays containing different fields # Keyword Arguments - `statenames`: Array{String,1}. An array of stringnames - `aspect`: Tuple{Int64,Int64,Float64}. Determines aspect ratio of box for volumes - `resolution`: Resolution of preliminary makie window - `statistics`: boolean. toggle for displaying statistics # Return - `scene`: Scene. A preliminary scene object for manipulation """ function visualize( states::AbstractArray; statenames = string.(1:length(states)), units = ["" for i in eachindex(states)], aspect = (1, 1, 1), resolution = (1920, 1080), statistics = false, title = "Field = ", bins = 300, ) # Create scene scene, layout = layoutscene(resolution = resolution) lscene = layout[2:4, 2:4] = LScene(scene) width = round(Int, resolution[1] / 4) # make menu 1/4 of preliminary resolution # Create choices and nodes stateindex = collect(1:length(states)) statenode = Node(stateindex[1]) colorchoices = [:balance, :thermal, :dense, :deep, :curl, :thermometer] colornode = Node(colorchoices[1]) if statistics llscene = layout[4, 1] = Axis( scene, xlabel = @lift(statenames[$statenode] * units[$statenode]), xlabelcolor = :black, ylabel = "pdf", ylabelcolor = :black, xlabelsize = 40, ylabelsize = 40, xticklabelsize = 25, yticklabelsize = 25, xtickcolor = :black, ytickcolor = :black, xticklabelcolor = :black, yticklabelcolor = :black, ) layout[3, 1] = Label(scene, "Statistics", width = width, textsize = 50) end # x,y,z are for determining the aspect ratio of the box if (typeof(aspect) <: Tuple) & (length(aspect) == 3) x, y, z = aspect else x, y, z = size(states[1]) end # Clim sliders upperclim_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.99) upperclim_node = upperclim_slider.value lowerclim_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.01) lowerclim_node = lowerclim_slider.value # Lift Nodes state = @lift(states[$statenode]) statename = @lift(statenames[$statenode]) clims = @lift(( quantile($state[:], $lowerclim_node), quantile($state[:], $upperclim_node), )) cmap_rgb = @lift(to_colormap($colornode)) titlename = @lift(title * $statename) # use padding and appropriate centering # Statistics if statistics histogram_node = @lift(histogram($state, bins = bins)) xs = @lift($histogram_node[1]) ys = @lift($histogram_node[2]) pdf = GLMakie.AbstractPlotting.barplot!( llscene, xs, ys, color = :red, strokecolor = :red, strokewidth = 1, ) @lift(GLMakie.AbstractPlotting.xlims!(llscene, extrema($state))) @lift(GLMakie.AbstractPlotting.ylims!( llscene, extrema($histogram_node[2]), )) vlines!( llscene, @lift($clims[1]), color = :black, linewidth = width / 100, ) vlines!( llscene, @lift($clims[2]), color = :black, linewidth = width / 100, ) end # Volume Plot volume!( lscene, 0..x, 0..y, 0..z, state, camera = cam3d!, colormap = cmap_rgb, colorrange = clims, ) # Camera cam = cameracontrols(scene.children[1]) eyeposition = Float32[2, 2, 1.3] lookat = Float32[0.82, 0.82, 0.1] # Title supertitle = layout[1, 2:4] = Label(scene, titlename, textsize = 50, color = :black) # Menus statemenu = Menu(scene, options = zip(statenames, stateindex)) on(statemenu.selection) do s statenode[] = s end colormenu = Menu(scene, options = zip(colorchoices, colorchoices)) on(colormenu.selection) do s colornode[] = s end lowerclim_string = @lift( "lower clim quantile = " * @sprintf("%0.2f", $lowerclim_node) * ", value = " * @sprintf("%0.1e", $clims[1]) ) upperclim_string = @lift( "upper clim quantile = " * @sprintf("%0.2f", $upperclim_node) * ", value = " * @sprintf("%0.1e", $clims[2]) ) # depends on makie version, vbox for old, vgrid for new layout[2, 1] = vgrid!( Label(scene, "State", width = nothing), statemenu, Label(scene, "Color", width = nothing), colormenu, Label(scene, lowerclim_string, width = nothing), lowerclim_slider, Label(scene, upperclim_string, width = nothing), upperclim_slider, ) layout[1, 1] = Label(scene, "Menu", width = width, textsize = 50) # Modify Axis axis = scene.children[1][OldAxis] # axis[:names][:axisnames] = ("↓ Zonal [m] ", "Meriodonal [m]↓ ", "Depth [m]↓ ") axis[:names][:axisnames] = ("↓", "↓ ", "↓ ") axis[:names][:align] = ((:left, :center), (:right, :center), (:right, :center)) # need to adjust size of ticks first and then size of axis names axis[:names][:textsize] = (50.0, 50.0, 50.0) axis[:ticks][:textsize] = (00.0, 00.0, 00.0) # axis[:ticks][:ranges_labels].val # current axis labels xticks = collect(range(-0, aspect[1], length = 2)) yticks = collect(range(-0, aspect[2], length = 6)) zticks = collect(range(-0, aspect[3], length = 2)) ticks = (xticks, yticks, zticks) axis[:ticks][:ranges] = ticks xtickslabels = [@sprintf("%0.1f", (xtick)) for xtick in xticks] xtickslabels[end] = "1e6" ytickslabels = ["", "south", "", "", "north", ""] ztickslabels = [@sprintf("%0.1f", (xtick)) for xtick in xticks] labels = (xtickslabels, ytickslabels, ztickslabels) axis[:ticks][:labels] = labels display(scene) # Change the default camera position after the fact # note that these change dynamically as the plot is manipulated return scene end """ histogram(array; bins = 100) # Description return arrays for plotting histogram """ function histogram( array; bins = minimum([100, length(array)]), normalize = true, ) tmp = zeros(bins) down, up = extrema(array) down, up = down == up ? (down - 1, up + 1) : (down, up) # edge case bucket = collect(range(down, up, length = bins + 1)) normalization = normalize ? length(array) : 1 for i in eachindex(array) # normalize then multiply by bins val = (array[i] - down) / (up - down) * bins ind = ceil(Int, val) # handle edge cases ind = maximum([ind, 1]) ind = minimum([ind, bins]) tmp[ind] += 1 / normalization end return (bucket[2:end] + bucket[1:(end - 1)]) .* 0.5, tmp end # 2D visualization function visualize( states::Array{Array{S, 2}, 1}; statenames = string.(1:length(states)), units = ["" for i in eachindex(states)], aspect = (1, 1, 1), resolution = (2412, 1158), title = "Zonal and Temporal Average of ", xlims = (0, 1), ylims = (0, 1), bins = 300, ) where {S} # Create scene scene, layout = layoutscene(resolution = resolution) lscene = layout[2:4, 2:4] = Axis( scene, xlabel = "South to North [m]", xlabelcolor = :black, ylabel = "Depth [m]", ylabelcolor = :black, xlabelsize = 40, ylabelsize = 40, xticklabelsize = 25, yticklabelsize = 25, xtickcolor = :black, ytickcolor = :black, xticklabelcolor = :black, yticklabelcolor = :black, titlesize = 50, ) width = round(Int, resolution[1] / 4) # make menu 1/4 of preliminary resolution # Create choices and nodes stateindex = collect(1:length(states)) statenode = Node(stateindex[1]) colorchoices = [:balance, :thermal, :dense, :deep, :curl, :thermometer] colornode = Node(colorchoices[1]) interpolationlabels = ["contour", "heatmap"] interpolationchoices = [true, false] interpolationnode = Node(interpolationchoices[1]) # Statistics llscene = layout[4, 1] = Axis( scene, xlabel = @lift(statenames[$statenode] * " " * units[$statenode]), xlabelcolor = :black, ylabel = "pdf", ylabelcolor = :black, xlabelsize = 40, ylabelsize = 40, xticklabelsize = 25, yticklabelsize = 25, xtickcolor = :black, ytickcolor = :black, xticklabelcolor = :black, yticklabelcolor = :black, ) layout[3, 1] = Label(scene, "Statistics", width = width, textsize = 50) # Clim sliders upperclim_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.99) upperclim_node = upperclim_slider.value lowerclim_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.01) lowerclim_node = lowerclim_slider.value #ylims = @lift(range($lowerval, $upperval, length = $)) # Lift Nodes state = @lift(states[$statenode]) statename = @lift(statenames[$statenode]) unit = @lift(units[$statenode]) oclims = @lift(( quantile($state[:], $lowerclim_node), quantile($state[:], $upperclim_node), )) cmap_rgb = @lift( $oclims[1] < $oclims[2] ? to_colormap($colornode) : reverse(to_colormap($colornode)) ) clims = @lift( $oclims[1] != $oclims[2] ? (minimum($oclims), maximum($oclims)) : (minimum($oclims) - 1, maximum($oclims) + 1) ) xlims = Array(range(xlims[1], xlims[2], length = 4)) #collect(range(xlims[1], xlims[2], length = size(states[1])[1])) ylims = Array(range(ylims[1], ylims[2], length = 4)) #@lift(collect(range($lowerval], $upperval, length = size($state)[2]))) # newrange = @lift(range($lowerval, $upperval, length = 4)) # lscene.yticks = @lift(Array($newrange)) titlename = @lift(title * $statename) # use padding and appropriate centering layout[1, 2:4] = Label(scene, titlename, textsize = 50) # heatmap heatmap1 = heatmap!( lscene, xlims, ylims, state, interpolate = interpolationnode, colormap = cmap_rgb, colorrange = clims, ) # statistics histogram_node = @lift(histogram($state, bins = bins)) xs = @lift($histogram_node[1]) ys = @lift($histogram_node[2]) pdf = GLMakie.AbstractPlotting.barplot!( llscene, xs, ys, color = :red, strokecolor = :red, strokewidth = 1, ) @lift(GLMakie.AbstractPlotting.xlims!(llscene, extrema($state))) @lift(GLMakie.AbstractPlotting.ylims!(llscene, extrema($histogram_node[2]))) vlines!(llscene, @lift($clims[1]), color = :black, linewidth = width / 100) vlines!(llscene, @lift($clims[2]), color = :black, linewidth = width / 100) # Menus statemenu = Menu(scene, options = zip(statenames, stateindex)) on(statemenu.selection) do s statenode[] = s end colormenu = Menu(scene, options = zip(colorchoices, colorchoices)) on(colormenu.selection) do s colornode[] = s end interpolationmenu = Menu(scene, options = zip(interpolationlabels, interpolationchoices)) on(interpolationmenu.selection) do s interpolationnode[] = s heatmap1 = heatmap!( lscene, xlims, ylims, state, interpolate = s, colormap = cmap_rgb, colorrange = clims, ) end newlabel = @lift($statename * " " * $unit) cbar = Colorbar(scene, heatmap1, label = newlabel) cbar.width = Relative(1 / 3) cbar.height = Relative(5 / 6) cbar.halign = :center cbar.flipaxisposition = true # cbar.labelpadding = -350 cbar.labelsize = 50 lowerclim_string = @lift( "clim quantile = " * @sprintf("%0.2f", $lowerclim_node) * ", value = " * @sprintf("%0.1e", $clims[1]) ) upperclim_string = @lift( "clim quantile = " * @sprintf("%0.2f", $upperclim_node) * ", value = " * @sprintf("%0.1e", $clims[2]) ) # depends on makie version, vbox for old, vgrid for new layout[2, 1] = vgrid!( Label(scene, "State", width = nothing), statemenu, Label( scene, "plotting options", width = width, textsize = 30, padding = (0, 0, 10, 0), ), interpolationmenu, Label(scene, "Color", width = nothing), colormenu, Label(scene, lowerclim_string, width = nothing), lowerclim_slider, Label(scene, upperclim_string, width = nothing), upperclim_slider, ) layout[2:4, 5] = vgrid!( Label( scene, "Color Bar", width = width / 2, textsize = 50, padding = (25, 0, 0, 00), ), cbar, ) layout[1, 1] = Label(scene, "Menu", width = width, textsize = 50) display(scene) return scene end function volumeslice( states::AbstractArray; statenames = string.(1:length(states)), units = ["" for i in eachindex(states)], aspect = (1, 1, 32 / 192), resolution = (2678, 1030), statistics = false, title = "Volume plot of ", bins = 300, statlabelsize = (20, 20), ) scene, layout = layoutscene(resolution = resolution) volumescene = layout[2:4, 2:4] = LScene(scene) menuwidth = round(Int, 350) layout[1, 1] = Label(scene, "Menu", width = menuwidth, textsize = 50) slice_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.0) slice_node = slice_slider.value directionindex = [1, 2, 3] directionnames = ["x-slice", "y-slice", "z-slice"] directionnode = Node(directionindex[1]) stateindex = collect(1:length(states)) statenode = Node(stateindex[1]) layout[1, 2:4] = Label(scene, @lift(title * statenames[$statenode]), textsize = 50) colorchoices = [:balance, :thermal, :dense, :deep, :curl, :thermometer] colornode = Node(colorchoices[1]) state = @lift(states[$statenode]) statename = @lift(statenames[$statenode]) unit = @lift(units[$statenode]) nx = @lift(size($state)[1]) ny = @lift(size($state)[2]) nz = @lift(size($state)[3]) nr = @lift([$nx, $ny, $nz]) nslider = 100 xrange = range(0.00, aspect[1], length = nslider) yrange = range(0.00, aspect[2], length = nslider) zrange = range(0.00, aspect[3], length = nslider) constx = collect(reshape(xrange, (nslider, 1, 1))) consty = collect(reshape(yrange, (1, nslider, 1))) constz = collect(reshape(zrange, (1, 1, nslider))) matx = zeros(nslider, nslider, nslider) maty = zeros(nslider, nslider, nslider) matz = zeros(nslider, nslider, nslider) matx .= constx maty .= consty matz .= constz sliceconst = [matx, maty, matz] planeslice = @lift(sliceconst[$directionnode]) upperclim_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.99) upperclim_node = upperclim_slider.value lowerclim_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.01) lowerclim_node = lowerclim_slider.value clims = @lift(( quantile($state[:], $lowerclim_node), quantile($state[:], $upperclim_node), )) volume!( volumescene, 0..aspect[1], 0..aspect[2], 0..aspect[3], state, overdraw = false, colorrange = clims, colormap = @lift(to_colormap($colornode)), ) alpha_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.5) alphanode = alpha_slider.value slicecolormap = @lift(cgrad(:viridis, alpha = $alphanode)) v = volume!( volumescene, 0..aspect[1], 0..aspect[2], 0..aspect[3], planeslice, algorithm = :iso, isorange = 0.005, isovalue = @lift($slice_node * aspect[$directionnode]), transparency = true, overdraw = false, visible = true, colormap = slicecolormap, colorrange = [-1, 0], ) # Volume histogram layout[3, 1] = Label(scene, "Statistics", textsize = 50) hscene = layout[4, 1] = Axis( scene, xlabel = @lift(statenames[$statenode] * " " * units[$statenode]), xlabelcolor = :black, ylabel = "pdf", ylabelcolor = :black, xlabelsize = 40, ylabelsize = 40, xticklabelsize = statlabelsize[1], yticklabelsize = statlabelsize[2], xtickcolor = :black, ytickcolor = :black, xticklabelcolor = :black, yticklabelcolor = :black, ) histogram_node = @lift(histogram($state, bins = bins)) vxs = @lift($histogram_node[1]) vys = @lift($histogram_node[2]) pdf = GLMakie.AbstractPlotting.barplot!( hscene, vxs, vys, color = :red, strokecolor = :red, strokewidth = 1, ) @lift(GLMakie.AbstractPlotting.xlims!(hscene, extrema($vxs))) @lift(GLMakie.AbstractPlotting.ylims!(hscene, extrema($vys))) vlines!( hscene, @lift($clims[1]), color = :black, linewidth = menuwidth / 100, ) vlines!( hscene, @lift($clims[2]), color = :black, linewidth = menuwidth / 100, ) # Slice sliceupperclim_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.99) sliceupperclim_node = sliceupperclim_slider.value slicelowerclim_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.01) slicelowerclim_node = slicelowerclim_slider.value slicexaxislabel = @lift(["y", "x", "x"][$directionnode]) sliceyaxislabel = @lift(["z", "z", "y"][$directionnode]) slicexaxis = @lift([[1, $ny], [1, $nx], [1, $nx]][$directionnode]) sliceyaxis = @lift([[1, $nz], [1, $nz], [1, $ny]][$directionnode]) slicescene = layout[2:4, 5:6] = Axis(scene, xlabel = slicexaxislabel, ylabel = sliceyaxislabel) sliced_state1 = @lift( $state[ round(Int, 1 + $slice_node * (size($state)[1] - 1)), 1:size($state)[2], 1:size($state)[3], ]) sliced_state2 = @lift( $state[ 1:size($state)[1], round(Int, 1 + $slice_node * (size($state)[2] - 1)), 1:size($state)[3], ]) sliced_state3 = @lift( $state[ 1:size($state)[1], 1:size($state)[2], round(Int, 1 + $slice_node * (size($state)[3] - 1)), ]) sliced_states = @lift([$sliced_state1, $sliced_state2, $sliced_state3]) sliced_state = @lift($sliced_states[$directionnode]) oclims = @lift(( quantile($sliced_state[:], $slicelowerclim_node), quantile($sliced_state[:], $sliceupperclim_node), )) slicecolormapnode = @lift($oclims[1] < $oclims[2] ? $colornode : $colornode) sliceclims = @lift( $oclims[1] != $oclims[2] ? (minimum($oclims), maximum($oclims)) : (minimum($oclims) - 1, maximum($oclims) + 1) ) heatmap1 = heatmap!( slicescene, slicexaxis, sliceyaxis, sliced_state, interpolate = true, colormap = slicecolormapnode, colorrange = sliceclims, ) # Colorbar newlabel = @lift($statename * " " * $unit) cbar = Colorbar(scene, heatmap1, label = newlabel) cbar.width = Relative(1 / 3) # cbar.height = Relative(5/6) cbar.halign = :left # cbar.flipaxisposition = true # cbar.labelpadding = -250 cbar.labelsize = 50 @lift(GLMakie.AbstractPlotting.xlims!(slicescene, extrema($slicexaxis))) @lift(GLMakie.AbstractPlotting.ylims!(slicescene, extrema($sliceyaxis))) sliceindex = @lift([ round(Int, 1 + $slice_node * ($nx - 1)), round(Int, 1 + $slice_node * ($ny - 1)), round(Int, 1 + $slice_node * ($nz - 1)), ][$directionnode]) slicestring = @lift(directionnames[$directionnode] * " of " * statenames[$statenode]) layout[1, 5:6] = Label(scene, slicestring, textsize = 50) axis = scene.children[1][OldAxis] axis[:names][:axisnames] = ("↓", "↓ ", "↓ ") axis[:names][:align] = ((:left, :center), (:right, :center), (:right, :center)) axis[:names][:textsize] = (50.0, 50.0, 50.0) axis[:ticks][:textsize] = (00.0, 00.0, 00.0) # Menus statemenu = Menu(scene, options = zip(statenames, stateindex)) on(statemenu.selection) do s statenode[] = s end colormenu = Menu(scene, options = zip(colorchoices, colorchoices)) on(colormenu.selection) do s colornode[] = s end # Slice Statistics layout[1, 7] = Label(scene, "Slice Menu", width = menuwidth, textsize = 50) layout[3, 7] = Label(scene, "Slice Statistics", textsize = 50) hslicescene = layout[4, 7] = Axis( scene, xlabel = @lift(statenames[$statenode] * " " * units[$statenode]), xlabelcolor = :black, ylabel = "pdf", ylabelcolor = :black, xlabelsize = 40, ylabelsize = 40, xticklabelsize = statlabelsize[1], yticklabelsize = statlabelsize[2], xtickcolor = :black, ytickcolor = :black, xticklabelcolor = :black, yticklabelcolor = :black, ) slicehistogram_node = @lift(histogram($sliced_state, bins = bins)) xs = @lift($slicehistogram_node[1]) ys = @lift($slicehistogram_node[2]) pdf = GLMakie.AbstractPlotting.barplot!( hslicescene, xs, ys, color = :blue, strokecolor = :blue, strokewidth = 1, ) @lift(GLMakie.AbstractPlotting.xlims!(hslicescene, extrema($xs))) @lift(GLMakie.AbstractPlotting.ylims!(hslicescene, extrema($ys))) vlines!( hslicescene, @lift($sliceclims[1]), color = :black, linewidth = menuwidth / 100, ) vlines!( hslicescene, @lift($sliceclims[2]), color = :black, linewidth = menuwidth / 100, ) interpolationnames = ["contour", "heatmap"] interpolationchoices = [true, false] interpolationnode = Node(interpolationchoices[1]) interpolationmenu = Menu(scene, options = zip(interpolationnames, interpolationchoices)) on(interpolationmenu.selection) do s interpolationnode[] = s # hack heatmap!( slicescene, slicexaxis, sliceyaxis, sliced_state, interpolate = s, colormap = slicecolormapnode, colorrange = sliceclims, ) end directionmenu = Menu(scene, options = zip(directionnames, directionindex)) on(directionmenu.selection) do s directionnode[] = s end slicemenustring = @lift( directionnames[$directionnode] * " at index " * string(round(Int, 1 + $slice_node * ($nr[$directionnode] - 1))) ) lowerclim_string = @lift( "quantile = " * @sprintf("%0.2f", $lowerclim_node) * ", value = " * @sprintf("%0.1e", $clims[1]) ) upperclim_string = @lift( "quantile = " * @sprintf("%0.2f", $upperclim_node) * ", value = " * @sprintf("%0.1e", $clims[2]) ) alphastring = @lift("Slice alpha = " * @sprintf("%0.2f", $alphanode)) layout[2, 1] = vgrid!( Label(scene, "State", width = nothing), statemenu, Label(scene, "Color", width = nothing), colormenu, Label(scene, "Slice Direction", width = nothing), directionmenu, Label(scene, alphastring, width = nothing), alpha_slider, Label(scene, slicemenustring, width = nothing), slice_slider, Label(scene, lowerclim_string, width = nothing), lowerclim_slider, Label(scene, upperclim_string, width = nothing), upperclim_slider, ) slicelowerclim_string = @lift( "quantile = " * @sprintf("%0.2f", $slicelowerclim_node) * ", value = " * @sprintf("%0.1e", $sliceclims[1]) ) sliceupperclim_string = @lift( "quantile = " * @sprintf("%0.2f", $sliceupperclim_node) * ", value = " * @sprintf("%0.1e", $sliceclims[2]) ) layout[2, 7] = vgrid!( Label(scene, "Contour Plot Type", width = nothing), interpolationmenu, Label(scene, slicelowerclim_string, width = nothing), slicelowerclim_slider, Label(scene, sliceupperclim_string, width = nothing), sliceupperclim_slider, cbar, ) display(scene) return scene end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/shared_source/FluidBC.jl ================================================ abstract type BoundaryCondition end """ FluidBC(momentum = Impenetrable(NoSlip()) temperature = Insulating()) The standard boundary condition for CNSEModel. The default options imply a "no flux" boundary condition. """ Base.@kwdef struct FluidBC{M, T} <: BoundaryCondition momentum::M = Impenetrable(NoSlip()) temperature::T = Insulating() end abstract type StateBC end abstract type MomentumBC <: StateBC end abstract type MomentumDragBC <: StateBC end abstract type TemperatureBC <: StateBC end (bc::StateBC)(state, aux, t) = bc.flux(bc.params, state, aux, t) """ Impenetrable(drag::MomentumDragBC) :: MomentumBC Defines an impenetrable wall model for momentum. This implies: - no flow in the direction normal to the boundary, and - flow parallel to the boundary is subject to the `drag` condition. """ struct Impenetrable{D <: MomentumDragBC} <: MomentumBC drag::D end """ Penetrable(drag::MomentumDragBC) :: MomentumBC Defines an penetrable wall model for momentum. This implies: - no constraint on flow in the direction normal to the boundary, and - flow parallel to the boundary is subject to the `drag` condition. """ struct Penetrable{D <: MomentumDragBC} <: MomentumBC drag::D end """ NoSlip() :: MomentumDragBC Zero momentum at the boundary. """ struct NoSlip <: MomentumDragBC end """ FreeSlip() :: MomentumDragBC No surface drag on momentum parallel to the boundary. """ struct FreeSlip <: MomentumDragBC end """ MomentumFlux(stress) :: MomentumDragBC Applies the specified kinematic stress on momentum normal to the boundary. Prescribe the net inward kinematic stress across the boundary by `stress`, a function with signature `stress(problem, state, aux, t)`, returning the flux (in m²/s²). """ Base.@kwdef struct MomentumFlux{𝒯, 𝒫} <: MomentumDragBC flux::𝒯 = nothing params::𝒫 = nothing end """ Insulating() :: TemperatureBC No temperature flux across the boundary """ struct Insulating <: TemperatureBC end """ TemperatureFlux(flux) :: TemperatureBC Prescribe the net inward temperature flux across the boundary by `flux`, a function with signature `flux(problem, state, aux, t)`, returning the flux (in m⋅K/s). """ Base.@kwdef struct TemperatureFlux{𝒯, 𝒫} <: TemperatureBC flux::𝒯 = nothing params::𝒫 = nothing end function check_bc(bcs, label) bctype = FluidBC bc_ρu = check_bc(bcs, Val(:ρu), label) bc_ρθ = check_bc(bcs, Val(:ρθ), label) return bctype(bc_ρu, bc_ρθ) end function check_bc(bcs, ::Val{:ρθ}, label) if haskey(bcs, :ρθ) if haskey(bcs[:ρθ], label) return bcs[:ρθ][label] end end return Insulating() end function check_bc(bcs, ::Val{:ρu}, label) if haskey(bcs, :ρu) if haskey(bcs[:ρu], label) return bcs[:ρu][label] end end return Impenetrable(FreeSlip()) end # these functions just trim off the extra arguments function _cnse_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, bc, model, state⁺, aux⁺, n, state⁻, aux⁻, t, _..., ) return cnse_boundary_state!(nf, bc, model, state⁺, aux⁺, n, state⁻, aux⁻, t) end function _cnse_boundary_state!( nf::NumericalFluxSecondOrder, bc, model, state⁺, gradflux⁺, hyperflux⁺, aux⁺, n, state⁻, gradflux⁻, hyperflux⁻, aux⁻, t, _..., ) return cnse_boundary_state!( nf, bc, model, state⁺, gradflux⁺, aux⁺, n, state⁻, gradflux⁻, aux⁻, t, ) end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/shared_source/ScalarFields.jl ================================================ using Base.Threads, LinearAlgebra import Base: getindex, materialize!, broadcasted abstract type AbstractField end struct ScalarField{S, T} <: AbstractField data::S grid::T end function (ϕ::ScalarField)(x::Tuple) return getvalue(ϕ.data, x, ϕ.grid) end function (ϕ::ScalarField)(x::Number, y::Number, z::Number) return getvalue(ϕ.data, (x, y, z), ϕ.grid) end function (ϕ::ScalarField)(x::Number, y::Number) return getvalue(ϕ.data, (x, y), ϕ.grid) end getindex(ϕ::ScalarField, i::Int) = ϕ.data[i] materialize!(ϕ::ScalarField, f::Base.Broadcast.Broadcasted) = materialize!(ϕ.data, f) broadcasted(identity, ϕ::ScalarField) = broadcasted(Base.identity, ϕ.data) function (ϕ::ScalarField)( xlist::StepRangeLen, ylist::StepRangeLen, zlist::StepRangeLen; threads = false, ) newfield = zeros(length(xlist), length(ylist), length(zlist)) if threads @threads for k in eachindex(zlist) for j in eachindex(ylist) for i in eachindex(xlist) newfield[i, j, k] = getvalue(ϕ.data, (xlist[i], ylist[j], zlist[k]), ϕ.grid) end end end else for k in eachindex(zlist) for j in eachindex(ylist) for i in eachindex(xlist) newfield[i, j, k] = getvalue(ϕ.data, (xlist[i], ylist[j], zlist[k]), ϕ.grid) end end end end return newfield end function (ϕ::ScalarField)( xlist::StepRangeLen, ylist::StepRangeLen; threads = false, ) newfield = zeros(length(xlist), length(ylist)) if threads @threads for j in eachindex(ylist) for i in eachindex(xlist) newfield[i, j] = getvalue(ϕ.data, (xlist[i], ylist[j]), ϕ.grid) end end else for j in eachindex(ylist) for i in eachindex(xlist) newfield[i, j] = getvalue(ϕ.data, (xlist[i], ylist[j]), ϕ.grid) end end end return newfield end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/shared_source/VectorFields.jl ================================================ import Base: getindex abstract type AbstractRepresentation end struct Cartesian <: AbstractRepresentation end struct Spherical <: AbstractRepresentation end struct Covariant <: AbstractRepresentation end struct Contravariant <: AbstractRepresentation end Base.@kwdef struct VectorField{S, T, C} <: AbstractField data::S grid::T representation::C = Cartesian() end function VectorField(ϕ::VectorField; representation = Cartesian()) return VectorField( data = ϕ.data, grid = ϕ.grid, representation = representation, ) end function (ϕ::VectorField)(representation::AbstractRepresentation) return VectorField(ϕ, representation = representation) end function components(data, ::Cartesian) one = @sprintf("%0.2e", data[1]) two = @sprintf("%0.2e", data[2]) three = @sprintf("%0.2e", data[3]) println(one, "x̂ +", two, "ŷ +", three, "ẑ ") return data end getindex(ϕ::VectorField, ijk, e; verbose = true) = components( getindex.(ϕ.data, ijk, e), ϕ.grid, ijk, e, ϕ.representation, verbose = verbose, ) ## Component Grabber function convenience_print(grid, ijk, e) print("location: ") x, y, z = get_position(grid, ijk, e) println( "x=", @sprintf("%0.2e", x), ",y=", @sprintf("%0.2e", y), ",z=", @sprintf("%0.2e", z), " ", ) print("field: ") end function components(v⃗, grid, ijk, e, ::Cartesian; verbose = true) if verbose one = @sprintf("%0.2e", v⃗[1]) two = @sprintf("%0.2e", v⃗[2]) three = @sprintf("%0.2e", v⃗[3]) convenience_print(grid, ijk, e) println(one, "x̂ +", two, "ŷ +", three, "ẑ ") end return v⃗ end function components(v⃗, grid, ijk, e, ::Covariant; verbose = true) v⃗¹, v⃗², v⃗³ = get_contravariant(grid, ijk, e) v₁ = dot(v⃗¹, v⃗) v₂ = dot(v⃗², v⃗) v₃ = dot(v⃗³, v⃗) if verbose one = @sprintf("%0.2e", v₁) two = @sprintf("%0.2e", v₂) three = @sprintf("%0.2e", v₃) convenience_print(grid, ijk, e) println(one, "v⃗¹ +", two, "v⃗² +", three, "v⃗³ ") end return (; v₁, v₂, v₃) end function components(v⃗, grid, ijk, e, ::Contravariant; verbose = true) v⃗₁, v⃗₂, v⃗₃ = get_covariant(grid, ijk, e) v¹ = dot(v⃗₁, v⃗) v² = dot(v⃗₂, v⃗) v³ = dot(v⃗₃, v⃗) if verbose one = @sprintf("%0.2e", v¹) two = @sprintf("%0.2e", v²) three = @sprintf("%0.2e", v³) convenience_print(grid, ijk, e) println(one, "v⃗₁ +", two, "v⃗₂ +", three, "v⃗₃ ") end return (; v¹, v², v³) end function components(v⃗, grid, ijk, e, ::Spherical; verbose = true) r̂, θ̂, φ̂ = get_spherical(grid, ijk, e) vʳ = dot(r̂, v⃗) vᶿ = dot(θ̂, v⃗) vᵠ = dot(φ̂, v⃗) if verbose one = @sprintf("%0.2e", vʳ) two = @sprintf("%0.2e", vᶿ) three = @sprintf("%0.2e", vᵠ) convenience_print(grid, ijk, e) println(one, "r̂ +", two, "θ̂ +", three, "φ̂ ") end return (; vʳ, vᶿ, vᵠ) end ## Helper functions function get_jacobian(grid, ijk, e) ξ1x1 = grid.vgeo[ijk, grid.ξ1x1id, e] ξ1x2 = grid.vgeo[ijk, grid.ξ1x2id, e] ξ1x3 = grid.vgeo[ijk, grid.ξ1x3id, e] ξ2x1 = grid.vgeo[ijk, grid.ξ2x1id, e] ξ2x2 = grid.vgeo[ijk, grid.ξ2x2id, e] ξ2x3 = grid.vgeo[ijk, grid.ξ2x3id, e] ξ3x1 = grid.vgeo[ijk, grid.ξ3x1id, e] ξ3x2 = grid.vgeo[ijk, grid.ξ3x2id, e] ξ3x3 = grid.vgeo[ijk, grid.ξ3x3id, e] J = [ ξ1x1 ξ1x2 ξ1x3 ξ2x1 ξ2x2 ξ2x3 ξ3x1 ξ3x2 ξ3x3 ] # rows are the contravariant vectors # columns of the inverse are the covariant vectors return J end function get_contravariant(grid, ijk, e) J = get_jacobian(grid, ijk, e) a⃗¹ = J[1, :] a⃗² = J[2, :] a⃗³ = J[3, :] return (; a⃗¹, a⃗², a⃗³) end function get_covariant(grid, ijk, e) J = inv(get_jacobian(grid, ijk, e)) a⃗₁ = J[:, 1] a⃗₂ = J[:, 2] a⃗₃ = J[:, 3] return (; a⃗₁, a⃗₂, a⃗₃) end function get_spherical(grid, ijk, e) x, y, z = get_position(grid, ijk, e) r̂ = [x, y, z] ./ norm([x, y, z]) θ̂ = [x * z, y * z, -(x^2 + y^2)] ./ (norm([x, y, z]) * norm([x, y, 0])) φ̂ = [-y, x, 0] ./ norm([x, y, 0]) return (; r̂, θ̂, φ̂) end function get_position(grid, ijk, e) x1 = grid.vgeo[ijk, grid.x1id, e] x2 = grid.vgeo[ijk, grid.x2id, e] x3 = grid.vgeo[ijk, grid.x3id, e] r = [x1 x2 x3] return r end function construct_determinant(grid) M = grid.vgeo[:, grid.Mid, :] ωx = reshape(grid.ω[1], (length(grid.ω[1]), 1, 1, 1)) ωy = reshape(grid.ω[2], (1, length(grid.ω[2]), 1, 1)) ωz = reshape(grid.ω[3], (1, 1, length(grid.ω[3]), 1)) ω = reshape(ωx .* ωy .* ωz, (size(M)[1], 1)) J = M ./ ω return J end function get_jacobian_determinant(grid, ijk, e) M = grid.vgeo[ijk, grid.Mid, e] end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/shared_source/abstractions.jl ================================================ ####### # useful concepts for dispatch ####### """ Advection terms right now really only non-linear or ::Nothing """ abstract type AdvectionTerm end struct NonLinearAdvectionTerm <: AdvectionTerm end """ Turbulence Closures ways to handle drag and diffusion and such """ abstract type TurbulenceClosure end struct LinearDrag{T} <: TurbulenceClosure λ::T end struct ConstantViscosity{T} <: TurbulenceClosure μ::T ν::T κ::T function ConstantViscosity{T}(; μ = T(1e-6), # m²/s ν = T(1e-6), # m²/s κ = T(1e-6), # m²/s ) where {T <: AbstractFloat} return new{T}(μ, ν, κ) end end """ Forcings ways to add body terms and sources """ abstract type Forcing end abstract type CoriolisForce <: Forcing end struct fPlaneCoriolis{T} <: CoriolisForce fₒ::T β::T function fPlaneCoriolis{T}(; fₒ = T(1e-4), # Hz β = T(1e-11), # Hz/m ) where {T <: AbstractFloat} return new{T}(fₒ, β) end end struct SphereCoriolis{T} <: CoriolisForce Ω::T function SphereCoriolis{T}(; Ω = T(2π / 86400), # Hz ) where {T <: AbstractFloat} return new{T}(Ω) end end struct KinematicStress{T} <: Forcing τₒ::T function KinematicStress{T}(; τₒ = T(1e-4)) where {T <: AbstractFloat} return new{T}(τₒ) end end struct Buoyancy{T} <: Forcing α::T # 1/K g::T # m/s² function Buoyancy{T}(; α = T(2e-4), g = T(10)) where {T <: AbstractFloat} return new{T}(α, g) end end """ Grouping structs """ abstract type AbstractModel end Base.@kwdef struct SpatialModel{𝒜, ℬ, 𝒞, 𝒟, ℰ, ℱ} <: AbstractModel balance_law::𝒜 physics::ℬ numerics::𝒞 grid::𝒟 boundary_conditions::ℰ parameters::ℱ end polynomialorders(model::SpatialModel) = convention( model.grid.resolution.polynomial_order, Val(ndims(model.grid.domain)), ) abstract type ModelPhysics end Base.@kwdef struct FluidPhysics{𝒪, 𝒜, 𝒟, 𝒞, ℬ} <: ModelPhysics orientation::𝒪 = ClimateMachine.Orientations.FlatOrientation() advection::𝒜 = NonLinearAdvectionTerm() dissipation::𝒟 = nothing coriolis::𝒞 = nothing buoyancy::ℬ = nothing end abstract type AbstractInitialValueProblem end Base.@kwdef struct InitialValueProblem{𝒫, ℐ𝒱} <: AbstractInitialValueProblem params::𝒫 = nothing initial_conditions::ℐ𝒱 = nothing end abstract type AbstractSimulation end struct Simulation{𝒜, ℬ, 𝒞, 𝒟, ℰ, ℱ} <: AbstractSimulation model::𝒜 state::ℬ timestepper::𝒞 initial_conditions::𝒟 callbacks::ℰ time::ℱ end function Simulation(; model = nothing, state = nothing, timestepper = nothing, initial_conditions = nothing, callbacks = nothing, time = nothing, ) model = DGModel(model, initial_conditions = initial_conditions) FT = eltype(model.grid.vgeo) if state == nothing state = init_ode_state(model, FT(0); init_on_cpu = true) end # model = (discrete = dgmodel, spatial = model) return Simulation( model, state, timestepper, initial_conditions, callbacks, time, ) end coordinates(simulation::Simulation) = coordinates(simulation.model.grid) polynomialorders(simulation::Simulation) = polynomialorders(simulation.model.grid) abstract type AbstractTimestepper end Base.@kwdef struct TimeStepper{S, T} <: AbstractTimestepper method::S timestep::T end """ calculate_dt(grid, wavespeed = nothing, diffusivity = nothing, viscocity = nothing, cfl = 0.1) """ function calculate_dt( grid; wavespeed = nothing, diffusivity = nothing, viscocity = nothing, cfl = 1.0, ) Δx = min_node_distance(grid) Δts = [] if wavespeed != nothing push!(Δts, Δx / wavespeed) end if diffusivity != nothing push!(Δts, Δx^2 / diffusivity) end if viscocity != nothing push!(Δts, Δx^2 / viscocity) end if Δts == [] @error("Please provide characteristic speed or diffusivities") return nothing end return cfl * minimum(Δts) end function calculate_dt( grid::DiscretizedDomain; wavespeed = nothing, diffusivity = nothing, viscocity = nothing, cfl = 1.0, ) return calculate_dt( grid.numerical; wavespeed = wavespeed, diffusivity = diffusivity, viscocity = viscocity, cfl = cfl, ) end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/shared_source/boilerplate.jl ================================================ using MPI using JLD2 using Test using Dates using Printf using Logging using StaticArrays using LinearAlgebra using ClimateMachine using ClimateMachine.VTK using ClimateMachine.MPIStateArrays using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.BalanceLaws using ClimateMachine.ODESolvers using ClimateMachine.Orientations # ×(a::SVector, b::SVector) = StaticArrays.cross(a, b) ⋅(a::SVector, b::SVector) = StaticArrays.dot(a, b) ⊗(a::SVector, b::SVector) = a * b' abstract type AbstractFluid <: BalanceLaw end struct Fluid <: AbstractFluid end include("domains.jl") include("grids.jl") include("abstractions.jl") include("callbacks.jl") include("FluidBC.jl") include("ScalarFields.jl") include("VectorFields.jl") # include("../plotting/bigfileofstuff.jl") # include("../plotting/vizinanigans.jl") """ function coordinates(grid::DiscontinuousSpectralElementGrid) # Description Gets the (x,y,z) coordinates corresponding to the grid # Arguments - `grid`: DiscontinuousSpectralElementGrid # Return - `x, y, z`: views of x, y, z coordinates """ function evolve!(simulation, spatial_model; refDat = ()) Q = simulation.state dg = simulation.model Ns = polynomialorders(spatial_model) if haskey(spatial_model.grid.resolution, :overintegration_order) Nover = spatial_model.grid.resolution.overintegration_order else Nover = (0, 0, 0) end # only works if Nover > 0 overintegration_filter!(Q, dg, Ns, Nover) function custom_tendency(tendency, x...; kw...) dg(tendency, x...; kw...) overintegration_filter!(tendency, dg, Ns, Nover) end t0 = simulation.time.start Δt = simulation.timestepper.timestep timestepper = simulation.timestepper.method odesolver = timestepper(custom_tendency, Q, dt = Δt, t0 = t0) cbvector = create_callbacks(simulation, odesolver) if isempty(cbvector) solve!(Q, odesolver; timeend = simulation.time.finish) else solve!( Q, odesolver; timeend = simulation.time.finish, callbacks = cbvector, ) end ## Check results against reference ClimateMachine.StateCheck.scprintref(cbvector[end]) if length(refDat) > 0 @test ClimateMachine.StateCheck.scdocheck(cbvector[end], refDat) end return Q end function visualize( simulation::Simulation; statenames = [string(i) for i in 1:size(simulation.state)[2]], resolution = (32, 32, 32), ) a_, statesize, b_ = size(simulation.state) mpistate = simulation.state grid = simulation.model.grid grid_helper = GridHelper(grid) r = coordinates(grid) states = [] ϕ = ScalarField(copy(r[1]), grid_helper) r = uniform_grid(Ω, resolution = resolution) # statesymbol = vars(Q).names[i] # doesn't work for vectors for i in 1:statesize ϕ .= mpistate[:, i, :] ϕnew = ϕ(r...) push!(states, ϕnew) end visualize([states...], statenames = statenames) end function overintegration_filter!(state_array, dgmodel, Ns, Nover) if sum(Nover) > 0 cutoff_order = Ns .+ 1 cutoff = MassPreservingCutoffFilter(dgmodel.grid, cutoff_order) num_state_prognostic = number_states(dgmodel.balance_law, Prognostic()) ClimateMachine.Mesh.Filters.apply!( state_array, 1:num_state_prognostic, dgmodel.grid, cutoff, ) end return nothing end # initialized on CPU so not problem, but could do kernel launch? function set_ic!(ϕ, s::Number, _...) ϕ .= s return nothing end function set_ic!(ϕ, s::Function, p, x, y, z) _, nstates, _ = size(ϕ) @inbounds for i in eachindex(x) @inbounds for j in 1:nstates ϕʲ = view(ϕ, :, j, :) ϕʲ[i] = s(p, x[i], y[i], z[i])[j] end end return nothing end # filter below here using ClimateMachine.Mesh.Filters using KernelAbstractions using KernelAbstractions.Extras: @unroll import ClimateMachine.Mesh.Filters: apply_async! import ClimateMachine.Mesh.Filters: AbstractFilterTarget import ClimateMachine.Mesh.Filters: number_state_filtered, vars_state_filtered, compute_filter_argument!, compute_filter_result! function modified_filter_matrix(r, Nc, σ) N = length(r) - 1 T = eltype(r) @assert N >= 0 @assert 0 <= Nc a, b = GaussQuadrature.legendre_coefs(T, N) V = (N == 0 ? ones(T, 1, 1) : GaussQuadrature.orthonormal_poly(r, a, b)) Σ = ones(T, N + 1) if Nc ≤ N Σ[(Nc:N) .+ 1] .= σ.(((Nc:N) .- Nc) ./ (N - Nc)) end V * Diagonal(Σ) / V end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/shared_source/callbacks.jl ================================================ abstract type AbstractCallback end struct Default <: AbstractCallback end struct Info <: AbstractCallback end struct StateCheck{T} <: AbstractCallback number_of_checks::T end Base.@kwdef struct JLD2State{T, V, B} <: AbstractCallback iteration::T filepath::V overwrite::B = true end Base.@kwdef struct VTKState{T, V, C, B} <: AbstractCallback iteration::T = 1 filepath::V = "." counter::C = [0] overwrite::B = true end function create_callbacks(simulation::Simulation, odesolver) callbacks = simulation.callbacks if isempty(callbacks) return () else cbvector = [ create_callback(callback, simulation, odesolver) for callback in callbacks ] return tuple(cbvector...) end end function create_callback(::Default, simulation::Simulation, odesolver) cb_info = create_callback(Info(), simulation, odesolver) cb_state_check = create_callback(StateCheck(10), simulation, odesolver) return (cb_info, cb_state_check) end function create_callback(::Info, simulation::Simulation, odesolver) Q = simulation.state timeend = simulation.time.finish mpicomm = MPI.COMM_WORLD starttime = Ref(now()) cbinfo = ClimateMachine.GenericCallbacks.EveryXWallTimeSeconds( 60, mpicomm, ) do (s = false) if s starttime[] = now() else energy = norm(Q) @info @sprintf( """Update simtime = %8.4f / %8.4f runtime = %s norm(Q) = %.16e""", ClimateMachine.ODESolvers.gettime(odesolver), timeend, Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) if isnan(energy) error("NaNs") end end end return cbinfo end function create_callback(callback::StateCheck, simulation::Simulation, _...) sim_length = simulation.time.finish - simulation.time.start timestep = simulation.timestepper.timestep nChecks = callback.number_of_checks nt_freq = floor(Int, sim_length / timestep / nChecks) cbcs_dg = ClimateMachine.StateCheck.sccreate( [(simulation.state, "state")], nt_freq, ) return cbcs_dg end function create_callback(output::JLD2State, simulation::Simulation, odesolver) # Initialize output output.overwrite && isfile(output.filepath) && rm(output.filepath; force = output.overwrite) Q = simulation.state mpicomm = MPI.COMM_WORLD iteration = output.iteration steps = ClimateMachine.ODESolvers.getsteps(odesolver) time = ClimateMachine.ODESolvers.gettime(odesolver) file = jldopen(output.filepath, "a+") JLD2.Group(file, "state") JLD2.Group(file, "time") file["state"][string(steps)] = Array(Q) file["time"][string(steps)] = time close(file) jldcallback = ClimateMachine.GenericCallbacks.EveryXSimulationSteps( iteration, ) do (s = false) steps = ClimateMachine.ODESolvers.getsteps(odesolver) time = ClimateMachine.ODESolvers.gettime(odesolver) @info steps, time file = jldopen(output.filepath, "a+") file["state"][string(steps)] = Array(Q) file["time"][string(steps)] = time close(file) return nothing end end function create_callback(output::VTKState, simulation::Simulation, odesolver) # Initialize output output.overwrite && isfile(output.filepath) && rm(output.filepath; force = output.overwrite) mkpath(output.filepath) state = simulation.state model = simulation.model function do_output(counter, model, state) mpicomm = MPI.COMM_WORLD balance_law = model.balance_law aux_state = model.state_auxiliary outprefix = @sprintf( "%s/mpirank%04d_step%04d", output.filepath, MPI.Comm_rank(mpicomm), counter[1], ) @info "doing VTK output" outprefix state_names = flattenednames(vars_state(balance_law, Prognostic(), eltype(state))) aux_names = flattenednames(vars_state(balance_law, Auxiliary(), eltype(state))) writevtk(outprefix, state, model, state_names, aux_state, aux_names) counter[1] += 1 return nothing end do_output(output.counter, model, state) cbvtk = ClimateMachine.GenericCallbacks.EveryXSimulationSteps(output.iteration) do ( init = false ) do_output(output.counter, model, state) return nothing end return cbvtk end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/shared_source/domains.jl ================================================ import Base: getindex, *, ndims, length, ^ import LinearAlgebra: × abstract type AbstractDomain end abstract type AbstractBoundary end struct DomainBoundary <: AbstractBoundary closure::Any end struct PointDomain{S} <: AbstractDomain point::S end struct IntervalDomain{AT, BT, PT} <: AbstractDomain min::AT max::BT periodic::PT end function IntervalDomain(min, max; periodic = false) @assert min < max return IntervalDomain(min, max, periodic) end function Periodic(min, max) @assert min < max return IntervalDomain(min, max, periodic = true) end S¹ = Periodic function Interval(min, max) @assert min < max return IntervalDomain(min, max) end function Periodic(min, max) @assert min < max return IntervalDomain(min, max, periodic = true) end function Base.show(io::IO, Ω::IntervalDomain) min = Ω.min max = Ω.max printstyled(io, "[", color = 226) astring = @sprintf("%0.2f", min) bstring = @sprintf("%0.2f", max) printstyled(astring, ", ", bstring, color = 7) # printstyled("$min, $max", color = 7) Ω.periodic ? printstyled(io, ")", color = 226) : printstyled(io, "]", color = 226) end function Base.show(io::IO, o::PointDomain) printstyled("{", o.point, "}", color = 201) end # Product Domains struct ProductDomain{DT} <: AbstractDomain domains::DT end function Base.show(io::IO, Ω::ProductDomain) for (i, domain) in enumerate(Ω.domains) print(domain) if i != length(Ω.domains) printstyled(io, "×", color = 118) end end end ndims(p::PointDomain) = 0 ndims(Ω::IntervalDomain) = 1 ndims(Ω::ProductDomain) = +(ndims.(Ω.domains)...) length(Ω::IntervalDomain) = Ω.max - Ω.min length(Ω::ProductDomain) = length.(Ω.domains) ×(arg1::AbstractDomain, arg2::AbstractDomain) = ProductDomain((arg1, arg2)) ×(args::ProductDomain, arg2::AbstractDomain) = ProductDomain((args.domains..., arg2)) ×(arg1::AbstractDomain, args::ProductDomain) = ProductDomain((arg1, args.domains...)) ×(arg1::ProductDomain, args::ProductDomain) = ProductDomain((arg1.domains..., args.domains...)) ×(args::AbstractDomain) = ProductDomain(args...) *(arg1::AbstractDomain, arg2::AbstractDomain) = arg1 × arg2 function ^(Ω::IntervalDomain, T::Int) Ωᵀ = Ω for i in 1:(T - 1) Ωᵀ *= Ω end return Ωᵀ end function info(Ω::ProductDomain) println("This is a ", ndims(Ω), "-dimensional tensor product domain.") print("The domain is ") println(Ω, ".") for (i, domain) in enumerate(Ω.domains) domain_string = domain.periodic ? "periodic" : "wall-bounded" length = @sprintf("%.2f ", domain.max - domain.min) println( "The dimension $i domain is ", domain_string, " with length ≈ ", length, ) end return nothing end function isperiodic(Ω::ProductDomain) max = [Ω.domains[i].periodic for i in eachindex(Ω.domains)] return prod(max) end function periodicityof(Ω::ProductDomain) periodicity = ones(Bool, ndims(Ω)) for i in 1:ndims(Ω) periodicity[i] = Ω[i].periodic end return Tuple(periodicity) end getindex(Ω::ProductDomain, i::Int) = Ω.domains[i] # Boundaries struct Boundaries{S} boundaries::S end getindex(∂Ω::Boundaries, i) = ∂Ω.boundaries[i] function Base.show(io::IO, ∂Ω::Boundaries) for (i, boundary) in enumerate(∂Ω.boundaries) printstyled("boundary ", i, ": ", color = 13) println(boundary) end end function ∂(Ω::IntervalDomain) if Ω.periodic return (nothing) else return Boundaries((PointDomain(Ω.min), PointDomain(Ω.max))) end return nothing end function ∂(Ω::ProductDomain) ∂Ω = [] for domain in Ω.domains push!(∂Ω, ∂(domain)) end splitb = [] for (i, boundary) in enumerate(∂Ω) tmp = Any[] push!(tmp, Ω.domains...) if boundary != nothing tmp1 = copy(tmp) tmp2 = copy(tmp) tmp1[i] = boundary[1] push!(splitb, ProductDomain(Tuple(tmp1))) tmp2[i] = boundary[2] push!(splitb, ProductDomain(Tuple(tmp2))) end end return Boundaries(Tuple(splitb)) end Base.@kwdef struct SphericalShellDomain{ℛ, ℋ, 𝒟} <: AbstractDomain radius::ℛ = 6378e3 height::ℋ = 30e3 depth::𝒟 = 3e3 end length(Ω::SphericalShellDomain) = (Ω.radius, Ω.radius, Ω.height - Ω.depth) ndims(Ω::SphericalShellDomain) = 3 function AtmosDomain(; radius = 6378e3, height = 30e3) depth = 0 return SphericalShellDomain(; radius, height, depth) end function OceanDomain(; radius = 6378e3, depth = 3e3) height = 0 return SphericalShellDomain(; radius, height, depth) end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/shared_source/grids.jl ================================================ import ClimateMachine.Mesh.Grids: DiscontinuousSpectralElementGrid function coordinates(grid::DiscontinuousSpectralElementGrid) x = view(grid.vgeo, :, grid.x1id, :) # x-direction y = view(grid.vgeo, :, grid.x2id, :) # y-direction z = view(grid.vgeo, :, grid.x3id, :) # z-direction return x, y, z end # some convenience functions function convention( a::NamedTuple{(:vertical, :horizontal), T}, ::Val{3}, ) where {T} return (a.horizontal, a.horizontal, a.vertical) end function convention(a::Number, ::Val{3}) return (a, a, a) end function convention( a::NamedTuple{(:vertical, :horizontal), T}, ::Val{2}, ) where {T} return (a.horizontal, a.vertical) end function convention(a::Number, ::Val{2}) return (a, a) end function convention(a::Tuple, b) return a end # brick range brickbuilder function uniform_brick_builder(domain, elements; FT = Float64) dimension = ndims(domain) tuple_ranges = [] for i in 1:dimension push!( tuple_ranges, range( FT(domain[i].min); length = elements[i] + 1, stop = FT(domain[i].max), ), ) end brickrange = Tuple(tuple_ranges) return brickrange end # Grid Constructor """ function DiscontinuousSpectralElementGrid(domain::ProductDomain; elements = nothing, polynomialorder = nothing) # Description Computes a DiscontinuousSpectralElementGrid as specified by a product domain # Arguments -`domain`: A product domain object # Keyword Arguments -`elements`: A tuple of integers ordered by (Nx, Ny, Nz) for number of elements -`polynomialorder`: A tupe of integers ordered by (npx, npy, npz) for polynomial order -`FT`: floattype, assumed Float64 unless otherwise specified -`topology`: default = StackedBrickTopology -`mpicomm`: default = MPI.COMM_WORLD -`array`: default = ClimateMachine.array_type() -`brickbuilder`: default = uniform_brick_builder, brickrange=uniform_brick_builder(domain, elements) # Return A DiscontinuousSpectralElementGrid object """ function DiscontinuousSpectralElementGrid( domain::ProductDomain; elements = nothing, polynomialorder = nothing, FT = Float64, mpicomm = MPI.COMM_WORLD, array = ClimateMachine.array_type(), topology = StackedBrickTopology, brick_builder = uniform_brick_builder, ) if elements == nothing error_message = "Please specify the number of elements as a tuple whose size is commensurate with the domain," error_message *= " e.g., a 3 dimensional domain would need a specification like elements = (10,10,10)." error_message *= " or elements = (vertical = 8, horizontal = 5)" @error(error_message) return nothing end if polynomialorder == nothing error_message = "Please specify the polynomial order as a tuple whose size is commensurate with the domain," error_message *= "e.g., a 3 dimensional domain would need a specification like polynomialorder = (3,3,3)." error_message *= " or polynomialorder = (vertical = 8, horizontal = 5)" @error(error_message) return nothing end dimension = ndims(domain) if (dimension < 2) || (dimension > 3) error_message = "SpectralElementGrid only works with dimensions 2 or 3. " error_message *= "The current dimension is " * string(ndims(domain)) println("The domain is ", domain) @error(error_message) return nothing end elements = convention(elements, Val(dimension)) if ndims(domain) != length(elements) @error("Incorrectly specified elements for the dimension of the domain") return nothing end polynomialorder = convention(polynomialorder, Val(dimension)) if ndims(domain) != length(polynomialorder) @error("Incorrectly specified polynomialorders for the dimension of the domain") return nothing end brickrange = brick_builder(domain, elements, FT = FT) if dimension == 2 boundary = ((1, 2), (3, 4)) else boundary = ((1, 2), (3, 4), (5, 6)) end periodicity = periodicityof(domain) connectivity = dimension == 2 ? :face : :full topl = topology( mpicomm, brickrange; periodicity = periodicity, boundary = boundary, connectivity = connectivity, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = array, polynomialorder = polynomialorder, ) return grid end abstract type AbstractDiscretizedDomain end struct DiscretizedDomain{𝒜, ℬ, 𝒞} <: AbstractDiscretizedDomain domain::𝒜 resolution::ℬ numerical::𝒞 end function DiscretizedDomain( domain::ProductDomain; elements = nothing, polynomial_order = nothing, overintegration_order = nothing, FT = Float64, mpicomm = MPI.COMM_WORLD, array = ClimateMachine.array_type(), topology = StackedBrickTopology, brick_builder = uniform_brick_builder, ) grid = DiscontinuousSpectralElementGrid( domain, elements = elements, polynomialorder = polynomial_order .+ overintegration_order, FT = FT, mpicomm = mpicomm, array = array, topology = topology, brick_builder = brick_builder, ) return DiscretizedDomain( domain, (; elements, polynomial_order, overintegration_order), grid, ) end ## SphericalShellDomain function DiscontinuousSpectralElementGrid( Ω::SphericalShellDomain; elements = (vertical = 2, horizontal = 4), polynomialorder = (vertical = 4, horizontal = 4), mpicomm = MPI.COMM_WORLD, boundary = (5, 6), FT = Float64, array = Array, ) Rrange = grid1d( Ω.radius - Ω.depth, Ω.radius + Ω.height, nelem = elements.vertical, ) topl = StackedCubedSphereTopology( mpicomm, elements.horizontal, Rrange, boundary = boundary, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = array, polynomialorder = ( polynomialorder.vertical, polynomialorder.horizontal, ), meshwarp = equiangular_cubed_sphere_warp, ) return grid end function DiscretizedDomain( domain::SphericalShellDomain; elements = nothing, polynomial_order = nothing, overintegration_order = nothing, FT = Float64, mpicomm = MPI.COMM_WORLD, array = ClimateMachine.array_type(), topology = StackedBrickTopology, brick_builder = uniform_brick_builder, ) new_polynomial_order = convention(polynomial_order, Val(2)) new_polynomial_order = new_polynomial_order .+ convention(overintegration_order, Val(2)) vertical, horizontal = new_polynomial_order grid = DiscontinuousSpectralElementGrid( domain, elements = elements, polynomialorder = (; vertical, horizontal), FT = FT, mpicomm = mpicomm, array = array, ) return DiscretizedDomain( domain, (; elements, polynomial_order, overintegration_order), grid, ) end # extensions coordinates(grid::DiscretizedDomain) = coordinates(grid.numerical) polynomialorders(grid::DiscretizedDomain) = polynomialorders(grid.numerical) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/sphere/sphere_helper_functions.jl ================================================ rad(x, y, z) = sqrt(x^2 + y^2 + z^2) lat(x, y, z) = asin(z / rad(x, y, z)) # ϕ ∈ [-π/2, π/2] lon(x, y, z) = atan(y, x) # λ ∈ [-π, π) r̂ⁿᵒʳᵐ(x, y, z) = norm([x, y, z]) ≈ 0 ? 1 : norm([x, y, z])^(-1) ϕ̂ⁿᵒʳᵐ(x, y, z) = norm([x, y, 0]) ≈ 0 ? 1 : (norm([x, y, z]) * norm([x, y, 0]))^(-1) λ̂ⁿᵒʳᵐ(x, y, z) = norm([x, y, 0]) ≈ 0 ? 1 : norm([x, y, 0])^(-1) r̂(x, y, z) = r̂ⁿᵒʳᵐ(x, y, z) * @SVector([x, y, z]) ϕ̂(x, y, z) = ϕ̂ⁿᵒʳᵐ(x, y, z) * @SVector [x * z, y * z, -(x^2 + y^2)] λ̂(x, y, z) = λ̂ⁿᵒʳᵐ(x, y, z) * @SVector [-y, x, 0] rfunc(p, x...) = ρuʳᵃᵈ(p, lon(x...), lat(x...), rad(x...)) * r̂(x...) ϕfunc(p, x...) = ρuˡᵃᵗ(p, lon(x...), lat(x...), rad(x...)) * ϕ̂(x...) λfunc(p, x...) = ρuˡᵒⁿ(p, lon(x...), lat(x...), rad(x...)) * λ̂(x...) ρu⃗(p, x...) = rfunc(p, x...) + ϕfunc(p, x...) + λfunc(p, x...) function vector_field_representation( u⃗, grid::DiscontinuousSpectralElementGrid, rep::AbstractRepresentation, ) n_ijk, _, n_e = size(u⃗) uᴿ = copy(u⃗) v⃗ = VectorField(data = (u⃗[:, 1, :], u⃗[:, 2, :], u⃗[:, 3, :]), grid = grid) for ijk in 1:n_ijk, e in 1:n_e, s in 1:3 uᴿ[ijk, s, e] = v⃗(rep)[ijk, e, verbose = false][s] end return uᴿ end vector_field_representation(simulation::Simulation, rep) = vector_field_representation(simulation.state.ρu, simulation.model.grid, rep) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/sphere/test_heat_equation.jl ================================================ #!/usr/bin/env julia --project include("../shared_source/boilerplate.jl") include("../three_dimensional/ThreeDimensionalCompressibleNavierStokesEquations.jl") include("sphere_helper_functions.jl") ClimateMachine.init() #! format: off refVals = ( [ [ "state", "ρ", 9.99999999999998778755e-01, 1.00000000000000111022e+00, 1.00000000000000000000e+00, 4.03070839356576998483e-16 ], [ "state", "ρu[1]", -6.93674606129580362185e-18, 1.19513727285696382782e-17, 9.77991462267996185035e-19, 2.78015892816367900233e-18 ], [ "state", "ρu[2]", -1.21340560458612968582e-17, 1.22630936333135748291e-17, -1.30065979768613433360e-21, 2.78919703417897612909e-18 ], [ "state", "ρu[3]", -1.00545092917566730233e-17, 1.00043577527427175213e-17, 3.76131929192945135398e-19, 2.47195369518944690265e-18 ], [ "state", "ρθ", -3.00959038541689510859e-02, -3.00957518135526284897e-02, -3.00958465374152675520e-02, 3.12498428220800724718e-08 ], ], [ [ "state", "ρ", 12, 12, 12, 0 ], [ "state", "ρu[1]", 0, 0, 0, 0 ], [ "state", "ρu[2]", 0, 0, 0, 0 ], [ "state", "ρu[3]", 0, 0, 0, 0 ], [ "state", "ρθ", 12, 12, 12, 0 ], ], ) #! format: on ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin ######## # Setup physical and numerical domains ######## Ω = AtmosDomain(radius = 1, height = 0.2) grid = DiscretizedDomain( Ω; elements = (vertical = 1, horizontal = 5), polynomial_order = (vertical = 1 + 0, horizontal = 3 + 0), overintegration_order = (vertical = 1, horizontal = 1), ) ######## # Define physical parameters and parameterizations ######## parameters = ( ρₒ = 1, # reference density cₛ = 1e-2, # sound speed R = Ω.radius, # [m] ω = 0.0, # [s⁻¹] K = 7.848e-6, # [s⁻¹] n = 4, # dimensionless Ω = 0.0, # [s⁻¹] 2π/86400 ) physics = FluidPhysics(; orientation = SphericalOrientation(), advection = NonLinearAdvectionTerm(), dissipation = ConstantViscosity{Float64}(μ = 0, ν = 0.0, κ = 1e-3), coriolis = nothing, buoyancy = Buoyancy{Float64}(α = 2e-4, g = 0), ) ######## # Define timestepping parameters ######## Δt = 0.1 * min_node_distance(grid.numerical)^2 / physics.dissipation.κ start_time = 0 end_time = 20.0 * Δt method = SSPRK22Heuns timestepper = TimeStepper(method = method, timestep = Δt) callbacks = (Info(), StateCheck(10)) ######## # Define boundary conditions (west east are the ones that are enforced for a sphere) ######## ρu_bcs = (bottom = Impenetrable(FreeSlip()), top = Impenetrable(FreeSlip())) ρθ_bcs = ( bottom = TemperatureFlux( flux = (p, state, aux, t) -> p.Q, params = (Q = 1e-3,), # positive means removing heat from system ), top = TemperatureFlux( flux = (p, state, aux, t) -> p.Q, params = (Q = 1e-3,), # positive means removing heat from system ), ) BC = (ρθ = ρθ_bcs, ρu = ρu_bcs) ######## # Define initial conditions ######## # Earth Spherical Representation # longitude: λ ∈ [-π, π), λ = 0 is the Greenwich meridian # latitude: ϕ ∈ [-π/2, π/2], ϕ = 0 is the equator # radius: r ∈ [Rₑ - hᵐⁱⁿ, Rₑ + hᵐᵃˣ] # Rₑ = Radius of sphere; hᵐⁱⁿ, hᵐᵃˣ ≥ 0 ρ₀(p, λ, ϕ, r) = p.ρₒ ρuʳᵃᵈ(p, λ, ϕ, r) = 0.0 ρuˡᵃᵗ(p, λ, ϕ, r) = 0.0 ρuˡᵒⁿ(p, λ, ϕ, r) = 0.0 ρθ₀(p, λ, ϕ, r) = 0.0 # Cartesian Representation (boiler plate really) ρ₀ᶜᵃʳᵗ(p, x...) = ρ₀(p, lon(x...), lat(x...), rad(x...)) ρu⃗₀ᶜᵃʳᵗ(p, x...) = ( ρuʳᵃᵈ(p, lon(x...), lat(x...), rad(x...)) * r̂(x...) + ρuˡᵃᵗ(p, lon(x...), lat(x...), rad(x...)) * ϕ̂(x...) + ρuˡᵒⁿ(p, lon(x...), lat(x...), rad(x...)) * λ̂(x...) ) ρθ₀ᶜᵃʳᵗ(p, x...) = ρθ₀(p, lon(x...), lat(x...), rad(x...)) initial_conditions = (ρ = ρ₀ᶜᵃʳᵗ, ρu = ρu⃗₀ᶜᵃʳᵗ, ρθ = ρθ₀ᶜᵃʳᵗ) ######## # Create the things ######## model = SpatialModel( balance_law = Fluid3D(), physics = physics, numerics = (flux = RoeNumericalFlux(),), grid = grid, boundary_conditions = BC, parameters = parameters, ) simulation = Simulation( model = model, initial_conditions = initial_conditions, timestepper = timestepper, callbacks = callbacks, time = (; start = start_time, finish = end_time), ) ######## # Run the model ######## tic = Base.time() @testset "State Check" begin evolve!(simulation, model, refDat = refVals) end toc = Base.time() time = toc - tic println(time) ## Check the budget θ̄ = weightedsum(simulation.state, 5) @testset "Exact Geometry" begin # Exact (with exact geometry) R = grid.domain.radius H = grid.domain.height θ̄ᴱ = 0 θ̄ᴱ -= 4π * R^2 * end_time * BC.ρθ.bottom.params.Q θ̄ᴱ -= 4π * (R + H)^2 * end_time * BC.ρθ.top.params.Q δθ̄ᴱ = abs(θ̄ - θ̄ᴱ) / abs(θ̄ᴱ) println("The relative error w.r.t. exact geometry ", δθ̄ᴱ) @test isapprox(0, δθ̄ᴱ; atol = 1e-9) end # TODO: make Approx Geom test MPI safe #= @testset "Approx Geometry" begin # Exact (with approximated geometry) n_e = convention(grid.resolution.elements, Val(3)) n_ijk = convention(grid.resolution.polynomial_order, Val(3)) n_ijk = n_ijk .+ convention(grid.resolution.overintegration_order, Val(3)) n_ijk = n_ijk .+ (1, 1, 1) Mᴴ = reshape( grid.numerical.vgeo[:, grid.numerical.MHid, :], (n_ijk..., n_e[3], 6 * n_e[1] * n_e[2]), ) Aᴮᵒᵗ = sum(Mᴴ[:, :, 1, 1, :]) Aᵀᵒᵖ = sum(Mᴴ[:, :, end, end, :]) θ̄ᴬ = 0 θ̄ᴬ -= Aᴮᵒᵗ * end_time * BC.ρθ.bottom.params.Q θ̄ᴬ -= Aᵀᵒᵖ * end_time * BC.ρθ.top.params.Q δθ̄ᴬ = abs(θ̄ - θ̄ᴬ) / abs(θ̄ᴬ) println("The relative error w.r.t. approximated geometry ", δθ̄ᴬ) @test isapprox(0, δθ̄ᴬ; atol = 1e-15) end =# end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/sphere/test_hydrostatic_balance.jl ================================================ #!/usr/bin/env julia --project include("../shared_source/boilerplate.jl") include("../three_dimensional/ThreeDimensionalCompressibleNavierStokesEquations.jl") include("sphere_helper_functions.jl") ClimateMachine.init() #! format: off refVals = ( [ [ "state", "ρ", 1.99999999999600046319e-02, 1.00000000000004396483e+00, 5.10000000000001008083e-01, 2.91613390275248574035e-01 ], [ "state", "ρu[1]", -3.13311246519756673238e-11, 3.34851852426747048856e-11, 1.50582245544617117858e-13, 4.32064975692569532007e-12 ], [ "state", "ρu[2]", -3.98988708940419829624e-11, 4.12795751448340701883e-11, -1.66842288197274268178e-14, 4.34989190229689036913e-12 ], [ "state", "ρu[3]", -4.16490644835465690341e-11, 4.18355107496431387673e-11, 5.18313726923951858909e-14, 4.62898170296138141882e-12 ], [ "state", "ρθ", -4.90000000000050732751e+01, -9.79999999998041548821e-01, -2.49900000000000446221e+01, 1.42890561234872102148e+01 ], ], [ [ "state", "ρ", 12, 12, 12, 12 ], [ "state", "ρu[1]", 0, 0, 0, 0 ], [ "state", "ρu[2]", 0, 0, 0, 0 ], [ "state", "ρu[3]", 0, 0, 0, 0 ], [ "state", "ρθ", 12, 12, 12, 12 ], ], ) #! format: on ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin ######## # Setup physical and numerical domains ######## Ω = AtmosDomain(radius = 6e6, height = 1e5) grid = DiscretizedDomain( Ω; elements = (vertical = 4, horizontal = 4), polynomial_order = (vertical = 1, horizontal = 3), overintegration_order = (vertical = 1, horizontal = 1), ) ######## # Define physical parameters and parameterizations ######## parameters = ( ρₒ = 1, # reference density cₛ = 100.0, # sound speed ν = 1e-5, ∂θ = 0.98 / 1e5, α = 2e-4, g = 10.0, power = 1, ) ######## # Define timestepping parameters ######## Δtᴬ = min_node_distance(grid.numerical) / parameters.cₛ * 0.25 Δtᴰ = min_node_distance(grid.numerical)^2 / parameters.ν * 0.25 Δt = minimum([Δtᴬ, Δtᴰ]) start_time = 0 end_time = 86400 * 0.5 method = SSPRK22Heuns timestepper = TimeStepper(method = method, timestep = Δt) callbacks = (Info(), StateCheck(10))# , VTKState(iteration = 2880, filepath = ".")) physics = FluidPhysics(; orientation = SphericalOrientation(), advection = NonLinearAdvectionTerm(), dissipation = ConstantViscosity{Float64}( μ = 0, ν = parameters.ν, κ = 0.0, ), coriolis = SphereCoriolis{Float64}(Ω = 2π / 86400), buoyancy = Buoyancy{Float64}(α = parameters.α, g = parameters.g), ) ######## # Define boundary conditions (west east are the ones that are enforced for a sphere) ######## ρu_bcs = (bottom = Impenetrable(NoSlip()), top = Impenetrable(NoSlip())) ρθ_bcs = (bottom = Insulating(), top = Insulating()) BC = (ρθ = ρθ_bcs, ρu = ρu_bcs) ######## # Define initial conditions ######## # Earth Spherical Representation # longitude: λ ∈ [-π, π), λ = 0 is the Greenwich meridian # latitude: ϕ ∈ [-π/2, π/2], ϕ = 0 is the equator # radius: r ∈ [Rₑ - hᵐⁱⁿ, Rₑ + hᵐᵃˣ], Rₑ = Radius of sphere; hᵐⁱⁿ, hᵐᵃˣ ≥ 0 ρ₀(p, λ, ϕ, r) = (1 - p.∂θ * (r - 6e6)^p.power / p.power * 1e5^(1 - p.power)) * p.ρₒ ρuʳᵃᵈ(p, λ, ϕ, r) = 0.0 ρuˡᵃᵗ(p, λ, ϕ, r) = 0.0 ρuˡᵒⁿ(p, λ, ϕ, r) = 0.0 ρθ₀(p, λ, ϕ, r) = -ρ₀(p, λ, ϕ, r) * p.∂θ * (r - 6e6)^(p.power - 1) * 1e5^(1 - p.power) * (p.cₛ)^2 / (p.α * p.g) # Cartesian Representation (boiler plate really) ρ₀ᶜᵃʳᵗ(p, x...) = ρ₀(p, lon(x...), lat(x...), rad(x...)) ρu⃗₀ᶜᵃʳᵗ(p, x...) = ( ρuʳᵃᵈ(p, lon(x...), lat(x...), rad(x...)) * r̂(x...) + ρuˡᵃᵗ(p, lon(x...), lat(x...), rad(x...)) * ϕ̂(x...) + ρuˡᵒⁿ(p, lon(x...), lat(x...), rad(x...)) * λ̂(x...) ) ρθ₀ᶜᵃʳᵗ(p, x...) = ρθ₀(p, lon(x...), lat(x...), rad(x...)) initial_conditions = (ρ = ρ₀ᶜᵃʳᵗ, ρu = ρu⃗₀ᶜᵃʳᵗ, ρθ = ρθ₀ᶜᵃʳᵗ) ######## # Create the things ######## model = SpatialModel( balance_law = Fluid3D(), physics = physics, numerics = (flux = RoeNumericalFlux(),), grid = grid, boundary_conditions = BC, parameters = parameters, ) simulation = Simulation( model = model, initial_conditions = initial_conditions, timestepper = timestepper, callbacks = callbacks, time = (; start = start_time, finish = end_time), ) ######## # Run the model ######## tic = Base.time() evolve!(simulation, model, refDat = refVals) toc = Base.time() time = toc - tic println(time) end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/sphere/test_sphere.jl ================================================ #!/usr/bin/env julia --project include("../shared_source/boilerplate.jl") include("../three_dimensional/ThreeDimensionalCompressibleNavierStokesEquations.jl") ClimateMachine.init() #! format: off refVals = ( [ [ "state", "ρ", 9.99999999999999000799e-01, 1.00000000000000066613e+00, 1.00000000000000000000e+00, 3.49433608871729305050e-16 ], [ "state", "ρu[1]", -1.03159890504820785885e-17, 9.46342010159792552240e-18, -5.38891312633679647218e-19, 2.05248736591058928424e-18 ], [ "state", "ρu[2]", -1.12556410356665774824e-17, 1.11125313793689735132e-17, -2.17938113394074827612e-22, 2.14911076688864467562e-18 ], [ "state", "ρu[3]", -1.12757405914899880325e-17, 9.66517285069783167720e-18, -2.95076506235937354400e-19, 2.21386287201671452859e-18 ], [ "state", "ρθ", 9.99999999999999000799e-01, 1.00000000000000066613e+00, 1.00000000000000000000e+00, 3.49433608871729305050e-16 ], ], [ [ "state", "ρ", 12, 12, 12, 0 ], [ "state", "ρu[1]", 0, 0, 0, 0 ], [ "state", "ρu[2]", 0, 0, 0, 0 ], [ "state", "ρu[3]", 0, 0, 0, 0 ], [ "state", "ρθ", 12, 12, 12, 0 ], ], ) #! format: on ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin ######## # Setup physical and numerical domains ######## Ω = AtmosDomain(radius = 1, height = 0.02) grid = DiscretizedDomain( Ω; elements = (vertical = 1, horizontal = 4), polynomial_order = (vertical = 1, horizontal = 4), overintegration_order = 1, ) ######## # Define timestepping parameters ######## start_time = 0 end_time = 2.0 Δt = 0.05 method = SSPRK22Heuns timestepper = TimeStepper(method = method, timestep = Δt) callbacks = (Info(), StateCheck(10)) ######## # Define physical parameters and parameterizations ######## parameters = ( ρₒ = 1, # reference density cₛ = 1e-2, # sound speed ) physics = FluidPhysics(; advection = NonLinearAdvectionTerm(), dissipation = ConstantViscosity{Float64}(μ = 0, ν = 0.0, κ = 0.0), coriolis = nothing, buoyancy = Buoyancy{Float64}(α = 2e-4, g = 0), ) ######## # Define boundary conditions (west east are the ones that are enforced for a sphere) ######## ρu_bcs = (bottom = Impenetrable(NoSlip()), top = Impenetrable(NoSlip())) ρθ_bcs = (bottom = Insulating(), top = Insulating()) BC = (ρθ = ρθ_bcs, ρu = ρu_bcs) ######## # Define initial conditions ######## ρ₀(p, x, y, z) = p.ρₒ ρu₀(p, x...) = ρ₀(p, x...) * -0 ρv₀(p, x...) = ρ₀(p, x...) * -0 ρw₀(p, x...) = ρ₀(p, x...) * -0 ρθ₀(p, x...) = ρ₀(p, x...) * 1.0 ρu⃗₀(p, x...) = @SVector [ρu₀(p, x...), ρv₀(p, x...), ρw₀(p, x...)] initial_conditions = (ρ = ρ₀, ρu = ρu⃗₀, ρθ = ρθ₀) ######## # Create the things ######## model = SpatialModel( balance_law = Fluid3D(), physics = physics, numerics = (flux = RoeNumericalFlux(),), grid = grid, boundary_conditions = BC, parameters = parameters, ) simulation = Simulation( model = model, initial_conditions = initial_conditions, timestepper = timestepper, callbacks = callbacks, time = (; start = start_time, finish = end_time), ) ######## # Run the model ######## tic = Base.time() evolve!(simulation, model; refDat = refVals) toc = Base.time() time = toc - tic println(time) end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/ThreeDimensionalCompressibleNavierStokesEquations.jl ================================================ include("../shared_source/boilerplate.jl") import ClimateMachine.BalanceLaws: vars_state, init_state_prognostic!, init_state_auxiliary!, compute_gradient_argument!, compute_gradient_flux!, flux_first_order!, flux_second_order!, source!, wavespeed, boundary_conditions, boundary_state! import ClimateMachine.DGMethods: DGModel import ClimateMachine.NumericalFluxes: numerical_flux_first_order! import ClimateMachine.Orientations: vertical_unit_vector """ ThreeDimensionalCompressibleNavierStokesEquations <: BalanceLaw A `BalanceLaw` for shallow water modeling. write out the equations here # Usage ThreeDimensionalCompressibleNavierStokesEquations() """ abstract type AbstractFluid3D <: AbstractFluid end struct Fluid3D <: AbstractFluid3D end struct ThreeDimensionalCompressibleNavierStokesEquations{ I, D, O, A, T, C, F, BC, FT, } <: AbstractFluid3D initial_value_problem::I domain::D orientation::O advection::A turbulence::T coriolis::C forcing::F boundary_conditions::BC cₛ::FT ρₒ::FT function ThreeDimensionalCompressibleNavierStokesEquations{FT}( initial_value_problem::I, domain::D, orientation::O, advection::A, turbulence::T, coriolis::C, forcing::F, boundary_conditions::BC; cₛ = FT(sqrt(10)), # m/s ρₒ = FT(1), #kg/m³ ) where {FT <: AbstractFloat, I, D, O, A, T, C, F, BC} return new{I, D, O, A, T, C, F, BC, FT}( initial_value_problem, domain, orientation, advection, turbulence, coriolis, forcing, boundary_conditions, cₛ, ρₒ, ) end end CNSE3D = ThreeDimensionalCompressibleNavierStokesEquations function vars_state(m::CNSE3D, ::Prognostic, T) @vars begin ρ::T ρu::SVector{3, T} ρθ::T end end function init_state_prognostic!(m::CNSE3D, state::Vars, aux::Vars, localgeo, t) cnse_init_state!(m, state, aux, localgeo, t) end # default initial state if IVP == nothing function cnse_init_state!(model::CNSE3D, state, aux, localgeo, t) x = aux.x y = aux.y z = aux.z ρ = model.ρₒ state.ρ = ρ state.ρu = ρ * @SVector [-0, -0, -0] state.ρθ = ρ return nothing end # user defined initial state function cnse_init_state!( model::CNSE3D{<:InitialValueProblem}, state, aux, localgeo, t, ) x = aux.x y = aux.y z = aux.z params = model.initial_value_problem.params ic = model.initial_value_problem.initial_conditions state.ρ = ic.ρ(params, x, y, z) state.ρu = ic.ρu(params, x, y, z) state.ρθ = ic.ρθ(params, x, y, z) return nothing end function vars_state(m::CNSE3D, st::Auxiliary, T) @vars begin x::T y::T z::T orientation::vars_state(m.orientation, st, T) end end function init_state_auxiliary!( m::CNSE3D, state_auxiliary::MPIStateArray, grid, direction, ) # update the geopotential Φ in state_auxiliary.orientation.Φ init_state_auxiliary!( m, (m, aux, tmp, geom) -> orientation_nodal_init_aux!(m.orientation, m.domain, aux, geom), state_auxiliary, grid, direction, ) # update ∇Φ in state_auxiliary.orientation.∇Φ orientation_gradient(m, m.orientation, state_auxiliary, grid, direction) # store coordinates and potentially other stuff init_state_auxiliary!( m, (m, aux, tmp, geom) -> cnse_init_aux!(m, aux, geom), state_auxiliary, grid, direction, ) return nothing end function orientation_gradient( model::CNSE3D, ::Orientation, state_auxiliary, grid, direction, ) auxiliary_field_gradient!( model, state_auxiliary, ("orientation.∇Φ",), state_auxiliary, ("orientation.Φ",), grid, direction, ) return nothing end function orientation_gradient(::CNSE3D, ::NoOrientation, _...) return nothing end function orientation_nodal_init_aux!( ::SphericalOrientation, domain::Tuple, aux::Vars, geom::LocalGeometry, ) norm_R = norm(geom.coord) @inbounds aux.orientation.Φ = norm_R - domain[1] return nothing end """ function orientation_nodal_init_aux!( ::SuperSphericalOrientation, domain::Tuple, aux::Vars, geom::LocalGeometry, ) norm_R = norm(geom.coord) @inbounds aux.orientation.Φ = 1 / norm_R^2 end """ function orientation_nodal_init_aux!( ::FlatOrientation, domain::Tuple, aux::Vars, geom::LocalGeometry, ) @inbounds aux.orientation.Φ = geom.coord[3] return nothing end function orientation_nodal_init_aux!( ::NoOrientation, domain::Tuple, aux::Vars, geom::LocalGeometry, ) return nothing end function cnse_init_aux!(::CNSE3D, aux, geom) @inbounds begin aux.x = geom.coord[1] aux.y = geom.coord[2] aux.z = geom.coord[3] end return nothing end function vars_state(m::CNSE3D, ::Gradient, T) @vars begin ∇ρ::T ∇u::SVector{3, T} ∇θ::T ∇p::T end end function compute_gradient_argument!( model::CNSE3D, grad::Vars, state::Vars, aux::Vars, t::Real, ) ρ = state.ρ cₛ = model.cₛ ρₒ = model.ρₒ grad.∇p = (cₛ * ρ)^2 / (2 * ρₒ) compute_gradient_argument!(model.turbulence, grad, state, aux, t) end @inline function compute_gradient_argument!( ::ConstantViscosity, grad::Vars, state::Vars, aux::Vars, t::Real, ) ρ = state.ρ ρu = state.ρu ρθ = state.ρθ u = ρu θ = ρθ grad.∇ρ = ρ grad.∇u = u grad.∇θ = θ return nothing end function vars_state(m::CNSE3D, ::GradientFlux, T) @vars begin μ∇ρ::SVector{3, T} ν∇u::SMatrix{3, 3, T, 9} κ∇θ::SVector{3, T} ∇p::SVector{3, T} end end function compute_gradient_flux!( model::CNSE3D, gradflux::Vars, grad::Grad, state::Vars, aux::Vars, t::Real, ) gradflux.∇p = grad.∇p compute_gradient_flux!( model, model.turbulence, gradflux, grad, state, aux, t, ) end @inline function compute_gradient_flux!( ::CNSE3D, turb::ConstantViscosity, gradflux::Vars, grad::Grad, state::Vars, aux::Vars, t::Real, ) μ = turb.μ * I ν = turb.ν * I κ = turb.κ * I gradflux.μ∇ρ = -μ * grad.∇ρ gradflux.ν∇u = -ν * grad.∇u gradflux.κ∇θ = -κ * grad.∇θ return nothing end @inline function flux_first_order!( model::CNSE3D, flux::Grad, state::Vars, aux::Vars, t::Real, direction, ) ρ = state.ρ ρu = state.ρu ρθ = state.ρθ cₛ = model.cₛ ρₒ = model.ρₒ flux.ρ += ρu # flux.ρu += (cₛ * ρ)^2 / (2 * ρₒ) * I advective_flux!(model, model.advection, flux, state, aux, t) return nothing end advective_flux!(::CNSE3D, ::Nothing, _...) = nothing @inline function advective_flux!( ::CNSE3D, ::NonLinearAdvectionTerm, flux::Grad, state::Vars, aux::Vars, t::Real, ) ρ = state.ρ ρu = state.ρu ρθ = state.ρθ flux.ρu += ρu ⊗ ρu / ρ flux.ρθ += ρu * ρθ / ρ return nothing end function flux_second_order!( model::CNSE3D, flux::Grad, state::Vars, gradflux::Vars, ::Vars, aux::Vars, t::Real, ) flux_second_order!(model, model.turbulence, flux, state, gradflux, aux, t) end @inline function flux_second_order!( ::CNSE3D, ::ConstantViscosity, flux::Grad, state::Vars, gradflux::Vars, aux::Vars, t::Real, ) flux.ρ += gradflux.μ∇ρ flux.ρu += gradflux.ν∇u flux.ρθ += gradflux.κ∇θ return nothing end @inline function source!( model::CNSE3D, source::Vars, state::Vars, gradflux::Vars, aux::Vars, t::Real, direction, ) source.ρu -= gradflux.∇p coriolis_force!(model, model.coriolis, source, state, aux, t) forcing_term!(model, model.forcing, source, state, aux, t) return nothing end coriolis_force!(::CNSE3D, ::Nothing, _...) = nothing @inline function coriolis_force!( model::CNSE3D, coriolis::fPlaneCoriolis, source, state, aux, t, ) # f × u f = [-0, -0, coriolis_parameter(model, coriolis, aux.coords)] ρu = state.ρu source.ρu -= f × ρu return nothing end @inline function coriolis_force!( model::CNSE3D, coriolis::SphereCoriolis, source, state, aux, t, ) # f × u Ω = coriolis.Ω f = @SVector [-0, -0, 2Ω] ρu = state.ρu source.ρu -= f × ρu return nothing end forcing_term!(::CNSE3D, ::Nothing, _...) = nothing @inline function forcing_term!( model::CNSE3D, buoy::Buoyancy, source, state, aux, t, ) α = buoy.α g = buoy.g ρθ = state.ρθ # as temperature increase, density decreases B = -α * g * ρθ k̂ = vertical_unit_vector(model.orientation, aux) # gravity points downward source.ρu -= B * k̂ end @inline vertical_unit_vector(::Orientation, aux) = aux.orientation.∇Φ @inline vertical_unit_vector(::NoOrientation, aux) = @SVector [0, 0, 1] @inline wavespeed(m::CNSE3D, _...) = m.cₛ roe_average(ρ⁻, ρ⁺, var⁻, var⁺) = (sqrt(ρ⁻) * var⁻ + sqrt(ρ⁺) * var⁺) / (sqrt(ρ⁻) + sqrt(ρ⁺)) function numerical_flux_first_order!( ::RoeNumericalFlux, model::CNSE3D, fluxᵀn::Vars{S}, n⁻::SVector, state⁻::Vars{S}, aux⁻::Vars{A}, state⁺::Vars{S}, aux⁺::Vars{A}, t, direction, ) where {S, A} numerical_flux_first_order!( CentralNumericalFluxFirstOrder(), model, fluxᵀn, n⁻, state⁻, aux⁻, state⁺, aux⁺, t, direction, ) FT = eltype(fluxᵀn) # constants and normal vectors cₛ = model.cₛ ρₒ = model.ρₒ # - states ρ⁻ = state⁻.ρ ρu⁻ = state⁻.ρu ρθ⁻ = state⁻.ρθ # constructed states u⁻ = ρu⁻ / ρ⁻ θ⁻ = ρθ⁻ / ρ⁻ uₙ⁻ = u⁻' * n⁻ # in general thermodynamics p⁻ = (cₛ * ρ⁻)^2 / (2 * ρₒ) c⁻ = cₛ * sqrt(ρ⁻ / ρₒ) # + states ρ⁺ = state⁺.ρ ρu⁺ = state⁺.ρu ρθ⁺ = state⁺.ρθ # constructed states u⁺ = ρu⁺ / ρ⁺ θ⁺ = ρθ⁺ / ρ⁺ uₙ⁺ = u⁺' * n⁻ # in general thermodynamics p⁺ = (cₛ * ρ⁺)^2 / (2 * ρₒ) c⁺ = cₛ * sqrt(ρ⁺ / ρₒ) # construct roe averges ρ = sqrt(ρ⁻ * ρ⁺) u = roe_average(ρ⁻, ρ⁺, u⁻, u⁺) θ = roe_average(ρ⁻, ρ⁺, θ⁻, θ⁺) c = roe_average(ρ⁻, ρ⁺, c⁻, c⁺) # construct normal velocity uₙ = u' * n⁻ # differences Δρ = ρ⁺ - ρ⁻ Δp = p⁺ - p⁻ Δu = u⁺ - u⁻ Δρθ = ρθ⁺ - ρθ⁻ Δuₙ = Δu' * n⁻ # constructed values c⁻² = 1 / c^2 w1 = abs(uₙ - c) * (Δp - ρ * c * Δuₙ) * 0.5 * c⁻² w2 = abs(uₙ + c) * (Δp + ρ * c * Δuₙ) * 0.5 * c⁻² w3 = abs(uₙ) * (Δρ - Δp * c⁻²) w4 = abs(uₙ) * ρ w5 = abs(uₙ) * (Δρθ - θ * Δp * c⁻²) # fluxes!!! fluxᵀn.ρ -= (w1 + w2 + w3) * 0.5 fluxᵀn.ρu -= ( w1 * (u - c * n⁻) + w2 * (u + c * n⁻) + w3 * u + w4 * (Δu - Δuₙ * n⁻) ) * 0.5 fluxᵀn.ρθ -= ((w1 + w2) * θ + w5) * 0.5 return nothing end boundary_conditions(model::CNSE3D) = model.boundary_conditions """ boundary_state!(nf, ::CNSE3D, args...) applies boundary conditions for the hyperbolic fluxes dispatches to a function in CNSEBoundaryConditions """ @inline function boundary_state!(nf, bc, model::CNSE3D, args...) return _cnse_boundary_state!(nf, bc, model, args...) end """ cnse_boundary_state!(nf, bc::FluidBC, ::CNSE3D) splits boundary condition application into velocity """ @inline function cnse_boundary_state!(nf, bc::FluidBC, m::CNSE3D, args...) cnse_boundary_state!(nf, bc.momentum, m, m.turbulence, args...) cnse_boundary_state!(nf, bc.temperature, m, args...) return nothing end include("bc_momentum.jl") include("bc_temperature.jl") """ STUFF FOR ANDRE'S WRAPPERS """ function get_boundary_conditions( model::SpatialModel{BL}, ) where {BL <: AbstractFluid3D} bcs = model.boundary_conditions west_east = (check_bc(bcs, :west), check_bc(bcs, :east)) south_north = (check_bc(bcs, :south), check_bc(bcs, :north)) bottom_top = (check_bc(bcs, :bottom), check_bc(bcs, :top)) return (west_east..., south_north..., bottom_top...) end function DGModel( model::SpatialModel{BL}; initial_conditions = nothing, ) where {BL <: AbstractFluid3D} params = model.parameters physics = model.physics Lˣ, Lʸ, Lᶻ = length(model.grid.domain) bcs = get_boundary_conditions(model) FT = eltype(model.grid.numerical.vgeo) if !isnothing(initial_conditions) initial_conditions = InitialValueProblem(params, initial_conditions) end balance_law = CNSE3D{FT}( initial_conditions, (Lˣ, Lʸ, Lᶻ), physics.orientation, physics.advection, physics.dissipation, physics.coriolis, physics.buoyancy, bcs, ρₒ = params.ρₒ, cₛ = params.cₛ, ) numerical_flux_first_order = model.numerics.flux # should be a function rhs = DGModel( balance_law, model.grid.numerical, numerical_flux_first_order, CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) return rhs end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/bc_momentum.jl ================================================ """ cnse_boundary_state!(::NumericalFluxFirstOrder, ::Impenetrable{FreeSlip}, ::CNSE3D) apply free slip boundary condition for velocity sets reflective ghost point """ @inline function cnse_boundary_state!( ::NumericalFluxFirstOrder, ::Impenetrable{FreeSlip}, ::CNSE3D, ::TurbulenceClosure, state⁺, aux⁺, n⁻, state⁻, aux⁻, t, args..., ) state⁺.ρ = state⁻.ρ ρu⁻ = state⁻.ρu state⁺.ρu = ρu⁻ - 2 * n⁻ ⋅ ρu⁻ .* SVector(n⁻) return nothing end """ cnse_boundary_state!(::NumericalFluxGradient, ::Impenetrable{FreeSlip}, ::CNSE3D) apply free slip boundary condition for velocity sets non-reflective ghost point """ function cnse_boundary_state!( ::NumericalFluxGradient, ::Impenetrable{FreeSlip}, ::CNSE3D, ::ConstantViscosity, state⁺, aux⁺, n⁻, state⁻, aux⁻, t, args..., ) state⁺.ρ = state⁻.ρ ρu⁻ = state⁻.ρu state⁺.ρu = ρu⁻ - n⁻ ⋅ ρu⁻ .* SVector(n⁻) return nothing end """ shallow_normal_boundary_flux_second_order!(::NumericalFluxSecondOrder, ::Impenetrable{FreeSlip}, ::CNSE3D) apply free slip boundary condition for velocity apply zero numerical flux in the normal direction """ function cnse_boundary_state!( ::NumericalFluxSecondOrder, ::Impenetrable{FreeSlip}, ::CNSE3D, ::ConstantViscosity, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, args..., ) state⁺.ρu = state⁻.ρu gradflux⁺.ν∇u = n⁻ * (@SVector [-0, -0, -0])' return nothing end """ cnse_boundary_state!(::NumericalFluxFirstOrder, ::Impenetrable{NoSlip}, ::CNSE3D) apply no slip boundary condition for velocity sets reflective ghost point """ @inline function cnse_boundary_state!( ::NumericalFluxFirstOrder, ::Impenetrable{NoSlip}, ::CNSE3D, ::TurbulenceClosure, state⁺, aux⁺, n⁻, state⁻, aux⁻, t, args..., ) state⁺.ρ = state⁻.ρ state⁺.ρu = -state⁻.ρu return nothing end """ cnse_boundary_state!(::NumericalFluxGradient, ::Impenetrable{NoSlip}, ::CNSE3D) apply no slip boundary condition for velocity set numerical flux to zero for U """ @inline function cnse_boundary_state!( ::NumericalFluxGradient, ::Impenetrable{NoSlip}, ::CNSE3D, ::ConstantViscosity, state⁺, aux⁺, n⁻, state⁻, aux⁻, t, args..., ) FT = eltype(state⁺) state⁺.ρu = @SVector zeros(FT, 3) return nothing end """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::Impenetrable{NoSlip}, ::CNSE3D) apply no slip boundary condition for velocity sets ghost point to have no numerical flux on the boundary for U """ @inline function cnse_boundary_state!( ::NumericalFluxSecondOrder, ::Impenetrable{NoSlip}, ::CNSE3D, ::ConstantViscosity, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, args..., ) state⁺.ρu = -state⁻.ρu gradflux⁺.ν∇u = gradflux⁻.ν∇u return nothing end """ cnse_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{FreeSlip}, ::CNSE3D) no mass boundary condition for penetrable """ cnse_boundary_state!( ::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{FreeSlip}, ::CNSE3D, ::ConstantViscosity, _..., ) = nothing """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::Penetrable{FreeSlip}, ::CNSE3D) apply free slip boundary condition for velocity apply zero numerical flux in the normal direction """ function cnse_boundary_state!( ::NumericalFluxSecondOrder, ::Penetrable{FreeSlip}, ::CNSE3D, ::ConstantViscosity, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, args..., ) state⁺.ρu = state⁻.ρu gradflux⁺.ν∇u = n⁻ * (@SVector [-0, -0, -0])' return nothing end """ cnse_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Impenetrable{MomentumFlux}, ::HBModel) apply kinematic stress boundary condition for velocity applies free slip conditions for first-order and gradient fluxes """ function cnse_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Impenetrable{<:MomentumFlux}, model::CNSE3D, turb::TurbulenceClosure, args..., ) return cnse_boundary_state!( nf, Impenetrable(FreeSlip()), model, turb, args..., ) end """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::Impenetrable{MomentumFlux}, ::HBModel) apply kinematic stress boundary condition for velocity sets ghost point to have specified flux on the boundary for ν∇u """ @inline function cnse_boundary_state!( ::NumericalFluxSecondOrder, bc::Impenetrable{<:MomentumFlux}, model::CNSE3D, ::ConstantViscosity, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, args..., ) state⁺.ρu = state⁻.ρu gradflux⁺.ν∇u = n⁻ * bc.drag(state⁻, aux⁻, t)' return nothing end """ cnse_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{MomentumFlux}, ::HBModel) apply kinematic stress boundary condition for velocity applies free slip conditions for first-order and gradient fluxes """ function cnse_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{<:MomentumFlux}, model::CNSE3D, turb::TurbulenceClosure, args..., ) return cnse_boundary_state!( nf, Penetrable(FreeSlip()), model, turb, args..., ) end """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::Penetrable{MomentumFlux}, ::HBModel) apply kinematic stress boundary condition for velocity sets ghost point to have specified flux on the boundary for ν∇u """ @inline function cnse_boundary_state!( ::NumericalFluxSecondOrder, bc::Penetrable{<:MomentumFlux}, shallow::CNSE3D, ::ConstantViscosity, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, args..., ) state⁺.ρu = state⁻.ρu gradflux⁺.ν∇u = n⁻ * bc.drag(state⁻, aux⁻, t)' return nothing end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/bc_temperature.jl ================================================ """ cnse_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Insulating, ::HBModel) apply insulating boundary condition for temperature sets transmissive ghost point """ function cnse_boundary_state!( ::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Insulating, ::CNSE3D, state⁺, aux⁺, n⁻, state⁻, aux⁻, t, ) state⁺.ρθ = state⁻.ρθ return nothing end """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::Insulating, ::HBModel) apply insulating boundary condition for velocity sets ghost point to have no numerical flux on the boundary for κ∇θ """ @inline function cnse_boundary_state!( ::NumericalFluxSecondOrder, ::Insulating, ::CNSE3D, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, ) state⁺.ρθ = state⁻.ρθ gradflux⁺.κ∇θ = n⁻ * -0 return nothing end """ cnse_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::TemperatureFlux, ::HBModel) apply temperature flux boundary condition for velocity applies insulating conditions for first-order and gradient fluxes """ function cnse_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::TemperatureFlux, model::CNSE3D, args..., ) return cnse_boundary_state!(nf, Insulating(), model, args...) end """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::TemperatureFlux, ::HBModel) apply insulating boundary condition for velocity sets ghost point to have specified flux on the boundary for κ∇θ """ @inline function cnse_boundary_state!( ::NumericalFluxSecondOrder, bc::TemperatureFlux, ::CNSE3D, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, ) state⁺.ρθ = state⁻.ρθ gradflux⁺.κ∇θ = n⁻ * bc(state⁻, aux⁻, t) return nothing end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/config_sphere.jl ================================================ include("../CNSE.jl") include("ThreeDimensionalCompressibleNavierStokesEquations.jl") function Config( name, resolution, domain, params; numerical_flux_first_order = RoeNumericalFlux(), Nover = 0, boundary = (1, 1), boundary_conditons = (FluidBC(Impenetrable(FreeSlip()), Insulating()),), ) mpicomm = MPI.COMM_WORLD ArrayType = ClimateMachine.array_type() println(string(resolution.Nᶻ) * " elems in the vertical") vert_range = grid1d(domain.min_height, domain.max_height, nelem = resolution.Nᶻ) println( string(resolution.Nʰ) * "x" * string(resolution.Nʰ) * " elems per face", ) topology = StackedCubedSphereTopology( mpicomm, resolution.Nʰ, vert_range; boundary = boundary, ) println("poly order is " * string(resolution.N)) println("OI order is " * string(Nover)) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = resolution.N + Nover, meshwarp = equiangular_cubed_sphere_warp, ) model = CNSE3D{FT}( nothing, (domain.min_height, domain.max_height), ClimateMachine.Orientations.SphericalOrientation(), NonLinearAdvectionTerm(), ConstantViscosity{FT}(μ = params.μ, ν = params.ν, κ = params.κ), nothing, nothing, boundary_conditons; cₛ = params.cₛ, ρₒ = params.ρₒ, ) dg = DGModel( model, grid, numerical_flux_first_order, CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) return Config(name, dg, Nover, mpicomm, ArrayType) end function cnse_init_aux!(::CNSE3D, aux, geom) @inbounds begin aux.x = geom.coord[1] aux.y = geom.coord[2] aux.z = geom.coord[3] end return nothing end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/refvals_bickley_jet.jl ================================================ # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] #! format: off first_order = ( [ [ "state", "ρ", 9.88641470218911577739e-01, 1.00735927139044068035e+00, 1.00000000000000244249e+00, 1.32618819574356725140e-03 ], [ "state", "ρu[1]", -2.42850976605026747102e-01, 5.73364756714398238202e-01, 1.59153832831308655882e-01, 9.67668639083398146594e-02 ], [ "state", "ρu[2]", -4.12933704690272018745e-01, 4.50172304099278386413e-01, -4.55511721449193612529e-14, 1.48850741581273815495e-01 ], [ "state", "ρu[3]", -3.70076433121673098459e-01, 3.26009261713208653433e-01, -6.46953234297288930041e-14, 8.65065044705036617634e-02 ], [ "state", "ρθ", -1.71139241110591289186e+00, 1.79353274817685659492e+00, -7.54540463032282362914e-16, 3.87893456476833764501e-01 ], ], [ ["state", "ρ", 12, 12, 12, 12], ["state", "ρu[1]", 11, 11, 12, 12], ["state", "ρu[2]", 12, 11, 0, 12], ["state", "ρu[3]", 12, 11, 0, 12], ["state", "ρθ", 11, 10, 0, 12], ], ) fourth_order = ( [ [ "state", "ρ", 9.79756567265035793746e-01, 1.00986882291859325633e+00, 9.99972252557182250676e-01, 1.27738464370501453651e-03 ], [ "state", "ρu[1]", -3.85746589538976891731e-01, 7.12308328222086784010e-01, 1.58403896799433618892e-01, 1.03266735858658170732e-01 ], [ "state", "ρu[2]", -5.86525244955121705104e-01, 6.33258737050533038193e-01, 2.62244534853688972004e-04, 1.37491363204364197559e-01 ], [ "state", "ρu[3]", -4.58727280284793481613e-01, 4.82564240905019259387e-01, -2.81636420608788126414e-04, 8.90227260571099660025e-02 ], [ "state", "ρθ", -4.51696345879803118351e+00, 4.14217705122501378412e+00, -5.08780751729477078403e-04, 3.32659156438805059253e-01 ], ], [ ["state", "ρ", 8, 8, 10, 7], ["state", "ρu[1]", 5, 5, 8, 7], ["state", "ρu[2]", 6, 5, 0, 6], ["state", "ρu[3]", 6, 5, 0, 7], ["state", "ρθ", 5, 4, 0, 6], ], ) #! format: on refVals = (; first_order, fourth_order) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/refvals_buoyancy.jl ================================================ # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] #! format: off parr = [ ["state", "ρ", 12, 12, 12, 12], ["state", "ρu[1]", 0, 0, 0, 0], ["state", "ρu[2]", 0, 0, 0, 0], ["state", "ρu[3]", 12, 8, 12, 12], ["state", "ρθ", 12, 10, 12, 12], ] second_order_flat = ( [ [ "state", "ρ", 9.95252314022507689195e-01, 9.99992856011554298590e-01, 9.98330419819817738158e-01, 1.48639562654353791886e-03 ], [ "state", "ρu[1]", -6.36093867411581375298e-15, 8.28629359748938561747e-15, 5.16470747993988398929e-16, 1.17216775586980107502e-15 ], [ "state", "ρu[2]", -3.70797682276813316617e-15, 9.42818503691220985643e-15, 6.07164354178963622023e-16, 1.23143801722553508699e-15 ], [ "state", "ρu[3]", -1.65133743521589989450e-03, 5.29367075491514133403e-09, -8.40309050919211576736e-04, 4.66618546037470796790e-04 ], [ "state", "ρθ", -9.95249493245247940365e+00, 1.99973806108046075331e-05, -4.98740538889376860965e+00, 2.91965149708573168397e+00 ], ], parr, ) second_order = ( [ [ "state", "ρ", 9.95252314022507689195e-01, 9.99992856011554298590e-01, 9.98330419819817738158e-01, 1.48639562654353791886e-03 ], [ "state", "ρu[1]", -6.36142448250610767485e-15, 8.28656656189152269018e-15, 5.17120118507278235699e-16, 1.17302082244252352324e-15 ], [ "state", "ρu[2]", -3.70772154095122499196e-15, 9.42925797553789671672e-15, 6.06575884435408245721e-16, 1.23026363135339100359e-15 ], [ "state", "ρu[3]", -1.65133743521588883564e-03, 5.29367075398690075732e-09, -8.40309050919211468315e-04, 4.66618546037470417320e-04 ], [ "state", "ρθ", -9.95249493245247940365e+00, 1.99973806108054952236e-05, -4.98740538889376860965e+00, 2.91965149708573168397e+00 ], ], parr, ) fourth_order_flat = ( [ [ "state", "ρ", 9.95377495534709000324e-01, 9.99992951378667060958e-01, 9.98321272635789513927e-01, 1.50722816639464090097e-03 ], [ "state", "ρu[1]", -1.22866254290737312252e-14, 1.95946879007561421956e-14, 1.73741725325975513295e-15, 4.23083086870931785383e-15 ], [ "state", "ρu[2]", -1.63058197687820133665e-14, 2.71134092821328864807e-14, 1.75302916450504673999e-15, 4.33921909208334743316e-15 ], [ "state", "ρu[3]", -1.66252523985498286418e-03, 5.55021884647818839491e-08, -8.13842490777055664608e-04, 4.76414940515918383830e-04 ], [ "state", "ρθ", -9.95373884733410818626e+00, -4.05672848247591079102e-07, -4.98722877855243940104e+00, 2.97859288054288384728e+00 ], ], parr, ) fourth_order = ( [ [ "state", "ρ", 9.95377495534709000324e-01, 9.99992951378667060958e-01, 9.98321272635789513927e-01, 1.50722816639464068413e-03 ], [ "state", "ρu[1]", -1.22778750186973996879e-14, 1.95893093780676565332e-14, 1.73744809621522011733e-15, 4.23044883848200063451e-15 ], [ "state", "ρu[2]", -1.62568523313266733927e-14, 2.71071963864711276060e-14, 1.75189805678757112174e-15, 4.33763908149213610479e-15 ], [ "state", "ρu[3]", -1.66252523985503642377e-03, 5.55021884639224168229e-08, -8.13842490777055447768e-04, 4.76414940515918817511e-04 ], [ "state", "ρθ", -9.95373884733410818626e+00, -4.05672848142820462126e-07, -4.98722877855243940104e+00, 2.97859288054288384728e+00 ], ], parr, ) #! format: on refVals = (; second_order, fourth_order, second_order_flat, fourth_order_flat) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/run_bickley_jet.jl ================================================ #!/usr/bin/env julia --project include("../shared_source/boilerplate.jl") include("ThreeDimensionalCompressibleNavierStokesEquations.jl") ClimateMachine.init() ######## # Setup physical and numerical domains ######## Ωˣ = IntervalDomain(-2π, 2π, periodic = true) Ωʸ = IntervalDomain(-2π, 2π, periodic = true) Ωᶻ = IntervalDomain(-2π, 2π, periodic = true) grid = DiscretizedDomain( Ωˣ × Ωʸ × Ωᶻ; elements = 13, polynomial_order = 4, overintegration_order = 1, ) ######## # Define timestepping parameters ######## start_time = 0 end_time = 200.0 Δt = 0.004 method = SSPRK22Heuns timestepper = TimeStepper(method = method, timestep = Δt) callbacks = (Info(), StateCheck(10)) ######## # Define physical parameters and parameterizations ######## parameters = ( ϵ = 0.1, # perturbation size for initial condition l = 0.5, # Gaussian width k = 0.5, # Sinusoidal wavenumber ρₒ = 1, # reference density cₛ = sqrt(10), # sound speed ) physics = FluidPhysics(; advection = NonLinearAdvectionTerm(), dissipation = ConstantViscosity{Float64}(μ = 0, ν = 0, κ = 0), coriolis = nothing, buoyancy = nothing, ) ######## # Define initial conditions ######## # The Bickley jet U₀(p, x, y, z) = cosh(y)^(-2) V₀(p, x, y, z) = 0 W₀(p, x, y, z) = 0 # Slightly off-center vortical perturbations Ψ₁(p, x, y, z) = exp(-(y + p.l / 10)^2 / (2 * (p.l^2))) * cos(p.k * x) * cos(p.k * y) Ψ₂(p, x, y, z) = exp(-(z + p.l / 10)^2 / (2 * (p.l^2))) * cos(p.k * y) * cos(p.k * z) # Vortical velocity fields (u, v, w) = (-∂ʸ, +∂ˣ, 0) Ψ₁ + (0, -∂ᶻ, +∂ʸ)Ψ₂ u₀(p, x, y, z) = Ψ₁(p, x, y, z) * (p.k * tan(p.k * y) + y / (p.l^2) + 1 / (10 * p.l)) v₀(p, x, y, z) = Ψ₂(p, x, y, z) * (p.k * tan(p.k * z) + z / (p.l^2) + 1 / (10 * p.l)) - Ψ₁(p, x, y, z) * p.k * tan(p.k * x) w₀(p, x, y, z) = -Ψ₂(p, x, y, z) * p.k * tan(p.k * y) θ₀(p, x, y, z) = sin(p.k * y) ρ₀(p, x, y, z) = p.ρₒ ρu₀(p, x...) = ρ₀(p, x...) * (p.ϵ * u₀(p, x...) + U₀(p, x...)) ρv₀(p, x...) = ρ₀(p, x...) * (p.ϵ * v₀(p, x...) + V₀(p, x...)) ρw₀(p, x...) = ρ₀(p, x...) * (p.ϵ * w₀(p, x...) + W₀(p, x...)) ρθ₀(p, x...) = ρ₀(p, x...) * θ₀(p, x...) ρu⃗₀(p, x...) = @SVector [ρu₀(p, x...), ρv₀(p, x...), ρw₀(p, x...)] initial_conditions = (ρ = ρ₀, ρu = ρu⃗₀, ρθ = ρθ₀) ######## # Create the things ######## model = SpatialModel( balance_law = Fluid3D(), physics = physics, numerics = (flux = RoeNumericalFlux(),), grid = grid, boundary_conditions = NamedTuple(), parameters = parameters, ) simulation = Simulation( model = model, initial_conditions = initial_conditions, timestepper = timestepper, callbacks = callbacks, time = (; start = start_time, finish = end_time), ) ######## # Run the model ######## tic = Base.time() evolve!(simulation, model) toc = Base.time() time = toc - tic println(time) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/run_box.jl ================================================ #!/usr/bin/env julia --project include("../shared_source/boilerplate.jl") include("ThreeDimensionalCompressibleNavierStokesEquations.jl") ClimateMachine.init() ######## # Setup physical and numerical domains ######## Ωˣ = IntervalDomain(-2π, 2π, periodic = true) Ωʸ = IntervalDomain(-2π, 2π, periodic = true) Ωᶻ = IntervalDomain(-2π, 2π, periodic = false) grid = DiscretizedDomain( Ωˣ × Ωʸ × Ωᶻ; elements = 8, polynomial_order = 1, overintegration_order = 1, ) ######## # Define timestepping parameters ######## start_time = 0 end_time = 200.0 Δt = 0.05 method = SSPRK22Heuns timestepper = TimeStepper(method = method, timestep = Δt) callbacks = (Info(), StateCheck(10)) ######## # Define physical parameters and parameterizations ######## parameters = ( ρₒ = 1, # reference density cₛ = sqrt(10), # sound speed ) physics = FluidPhysics(; advection = NonLinearAdvectionTerm(), dissipation = ConstantViscosity{Float64}(μ = 0, ν = 1e-2, κ = 1e-2), coriolis = nothing, buoyancy = Buoyancy{Float64}(α = 2e-4, g = 10), ) ######## # Define boundary conditions ######## ρu_bcs = ( bottom = Impenetrable(NoSlip()), top = Impenetrable(MomentumFlux( flux = (p, state, aux, t) -> (@SVector [p.τ / state.ρ, -0, -0]), params = (τ = 0.01,), )), ) ρθ_bcs = ( bottom = Insulating(), top = TemperatureFlux(flux = (p, state, aux, t) -> (p.Q)), params = (Q = 0.1,), # positive means removing heat ) BC = (ρθ = ρθ_bcs, ρu = ρu_bcs) ######## # Define initial conditions ######## ρ₀(p, x, y, z) = p.ρₒ ρu₀(p, x...) = ρ₀(p, x...) * -0 ρv₀(p, x...) = ρ₀(p, x...) * -0 ρw₀(p, x...) = ρ₀(p, x...) * -0 ρθ₀(p, x...) = ρ₀(p, x...) * 5 ρu⃗₀(p, x...) = @SVector [ρu₀(p, x...), ρv₀(p, x...), ρw₀(p, x...)] initial_conditions = (ρ = ρ₀, ρu = ρu⃗₀, ρθ = ρθ₀) ######## # Create the things ######## model = SpatialModel( balance_law = Fluid3D(), physics = physics, numerics = (flux = RoeNumericalFlux(),), grid = grid, boundary_conditions = BC, parameters = parameters, ) simulation = Simulation( model = model, initial_conditions = initial_conditions, timestepper = timestepper, callbacks = callbacks, time = (; start = start_time, finish = end_time), ) ######## # Run the model ######## tic = Base.time() evolve!(simulation, model) toc = Base.time() time = toc - tic println(time) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/run_taylor_green_vortex.jl ================================================ #!/usr/bin/env julia --project include("../shared_source/boilerplate.jl") include("ThreeDimensionalCompressibleNavierStokesEquations.jl") ClimateMachine.init() ######## # Setup physical and numerical domains ######## Ωˣ = IntervalDomain(-2π, 2π, periodic = true) Ωʸ = IntervalDomain(-2π, 2π, periodic = true) Ωᶻ = IntervalDomain(-2π, 2π, periodic = true) grid = DiscretizedDomain( Ωˣ × Ωʸ × Ωᶻ; elements = 16, polynomial_order = 1, overintegration_order = 1, ) ######## # Define timestepping parameters ######## start_time = 0 end_time = 200.0 Δt = 0.01 method = SSPRK22Heuns timestepper = TimeStepper(method = method, timestep = Δt) callbacks = (Info(), StateCheck(10)) ######## # Define physical parameters and parameterizations ######## parameters = ( Uₒ = 1, # reference velocity ρₒ = 1, # reference density cₛ = sqrt(10), # sound speed ) physics = FluidPhysics(; advection = NonLinearAdvectionTerm(), dissipation = ConstantViscosity{Float64}(μ = 0, ν = 1e-3, κ = 1e-3), coriolis = nothing, buoyancy = nothing, ) ######## # Define initial conditions ######## u₀(p, x, y, z) = p.Uₒ * sin(x) * cos(y) * cos(z) v₀(p, x, y, z) = -p.Uₒ * cos(x) * sin(y) * cos(z) w₀(p, x, y, z) = -0 θ₀(p, x, y, z) = sin(0.5 * z) ρ₀(p, x, y, z) = p.ρₒ ρu₀(p, x...) = ρ₀(p, x...) * u₀(p, x...) ρv₀(p, x...) = ρ₀(p, x...) * v₀(p, x...) ρw₀(p, x...) = ρ₀(p, x...) * w₀(p, x...) ρθ₀(p, x...) = ρ₀(p, x...) * θ₀(p, x...) ρu⃗₀(p, x...) = @SVector [ρu₀(p, x...), ρv₀(p, x...), ρw₀(p, x...)] initial_conditions = (ρ = ρ₀, ρu = ρu⃗₀, ρθ = ρθ₀) ######## # Create the things ######## model = SpatialModel( balance_law = Fluid3D(), physics = physics, numerics = (flux = RoeNumericalFlux(),), grid = grid, boundary_conditions = NamedTuple(), parameters = parameters, ) simulation = Simulation( model = model, initial_conditions = initial_conditions, timestepper = timestepper, callbacks = callbacks, time = (; start = start_time, finish = end_time), ) ######## # Run the model ######## tic = Base.time() evolve!(simulation, model) toc = Base.time() time = toc - tic println(time) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/test_bickley_jet.jl ================================================ #!/usr/bin/env julia --project include("../shared_source/boilerplate.jl") include("ThreeDimensionalCompressibleNavierStokesEquations.jl") ClimateMachine.init() ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("refvals_bickley_jet.jl") ######## # Setup physical and numerical domains ######## Ωˣ = IntervalDomain(-2π, 2π, periodic = true) Ωʸ = IntervalDomain(-2π, 2π, periodic = true) Ωᶻ = IntervalDomain(-2π, 2π, periodic = true) first_order = DiscretizedDomain( Ωˣ × Ωʸ × Ωᶻ; elements = 32, polynomial_order = 1, overintegration_order = 1, ) fourth_order = DiscretizedDomain( Ωˣ × Ωʸ × Ωᶻ; elements = 13, polynomial_order = 4, overintegration_order = 1, ) grids = Dict("first_order" => first_order, "fourth_order" => fourth_order) ######## # Define timestepping parameters ######## start_time = 0 end_time = 100.0 Δt = 0.004 method = SSPRK22Heuns timestepper = TimeStepper(method = method, timestep = Δt) callbacks = (Info(), StateCheck(10)) ######## # Define physical parameters and parameterizations ######## parameters = ( ϵ = 0.1, # perturbation size for initial condition l = 0.5, # Gaussian width k = 0.5, # Sinusoidal wavenumber ρₒ = 1, # reference density cₛ = sqrt(10), # sound speed ) physics = FluidPhysics(; advection = NonLinearAdvectionTerm(), dissipation = ConstantViscosity{Float64}(μ = 0, ν = 0, κ = 0), coriolis = nothing, buoyancy = nothing, ) ######## # Define initial conditions ######## # The Bickley jet U₀(p, x, y, z) = cosh(y)^(-2) V₀(p, x, y, z) = 0 W₀(p, x, y, z) = 0 # Slightly off-center vortical perturbations Ψ₁(p, x, y, z) = exp(-(y + p.l / 10)^2 / (2 * (p.l^2))) * cos(p.k * x) * cos(p.k * y) Ψ₂(p, x, y, z) = exp(-(z + p.l / 10)^2 / (2 * (p.l^2))) * cos(p.k * y) * cos(p.k * z) # Vortical velocity fields (u, v, w) = (-∂ʸ, +∂ˣ, 0) Ψ₁ + (0, -∂ᶻ, +∂ʸ)Ψ₂ u₀(p, x, y, z) = Ψ₁(p, x, y, z) * (p.k * tan(p.k * y) + y / (p.l^2) + 1 / (10 * p.l)) v₀(p, x, y, z) = Ψ₂(p, x, y, z) * (p.k * tan(p.k * z) + z / (p.l^2) + 1 / (10 * p.l)) - Ψ₁(p, x, y, z) * p.k * tan(p.k * x) w₀(p, x, y, z) = -Ψ₂(p, x, y, z) * p.k * tan(p.k * y) θ₀(p, x, y, z) = sin(p.k * y) ρ₀(p, x, y, z) = p.ρₒ ρu₀(p, x...) = ρ₀(p, x...) * (p.ϵ * u₀(p, x...) + U₀(p, x...)) ρv₀(p, x...) = ρ₀(p, x...) * (p.ϵ * v₀(p, x...) + V₀(p, x...)) ρw₀(p, x...) = ρ₀(p, x...) * (p.ϵ * w₀(p, x...) + W₀(p, x...)) ρθ₀(p, x...) = ρ₀(p, x...) * θ₀(p, x...) ρu⃗₀(p, x...) = @SVector [ρu₀(p, x...), ρv₀(p, x...), ρw₀(p, x...)] initial_conditions = (ρ = ρ₀, ρu = ρu⃗₀, ρθ = ρθ₀) for (key, grid) in grids @testset "$(key)" begin model = SpatialModel( balance_law = Fluid3D(), physics = physics, numerics = (flux = RoeNumericalFlux(),), grid = grid, boundary_conditions = NamedTuple(), parameters = parameters, ) simulation = Simulation( model = model, initial_conditions = initial_conditions, timestepper = timestepper, callbacks = callbacks, time = (; start = start_time, finish = end_time), ) ######## # Run the model ######## evolve!( simulation, model; refDat = getproperty(refVals, Symbol(key)), ) end end end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/three_dimensional/test_buoyancy.jl ================================================ #!/usr/bin/env julia --project include("../shared_source/boilerplate.jl") include("ThreeDimensionalCompressibleNavierStokesEquations.jl") ClimateMachine.init() ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("refvals_buoyancy.jl") ######## # Setup physical and numerical domains ######## Ωˣ = IntervalDomain(-2π, 2π, periodic = true) Ωʸ = IntervalDomain(-2π, 2π, periodic = true) Ωᶻ = IntervalDomain(0, 4π, periodic = false) second_order = DiscretizedDomain( Ωˣ × Ωʸ × Ωᶻ; elements = 5, polynomial_order = 2, overintegration_order = 1, ) fourth_order = DiscretizedDomain( Ωˣ × Ωʸ × Ωᶻ; elements = 3, polynomial_order = 4, overintegration_order = 1, ) grids = Dict("second_order" => second_order, "fourth_order" => fourth_order) ######## # Define timestepping parameters ######## start_time = 0 end_time = 0.1 Δt = 0.001 method = SSPRK22Heuns timestepper = TimeStepper(method = method, timestep = Δt) callbacks = ( Info(), # VTKState(; iteration = 1, filepath = "output/buoyancy"), StateCheck(10), ) ######## # Define physical parameters and parameterizations ######## Lˣ, Lʸ, Lᶻ = length(Ωˣ × Ωʸ × Ωᶻ) parameters = ( ρₒ = 1, # reference density cₛ = sqrt(10), # sound speed α = 1e-4, # thermal expansion coefficient g = 10, # gravity θₒ = 10, # initial temperature value Lˣ = Lˣ, Lʸ = Lʸ, H = Lᶻ, ) physics = FluidPhysics(; advection = NonLinearAdvectionTerm(), dissipation = ConstantViscosity{Float64}(μ = 0, ν = 0, κ = 0), coriolis = nothing, buoyancy = Buoyancy{Float64}(α = parameters.α, g = parameters.g), ) ######## # Define initial conditions ######## u₀(p, x, y, z) = -0 v₀(p, x, y, z) = -0 w₀(p, x, y, z) = -0 θ₀(p, x, y, z) = -p.θₒ * (1 - z / 4π) ρ₀(p, x, y, z) = p.ρₒ * (1 - (p.α * p.g / p.cₛ^2) / 2 * (-p.θₒ * (1 - z / 4π))^2) ρu₀(p, x...) = ρ₀(p, x...) * u₀(p, x...) ρv₀(p, x...) = ρ₀(p, x...) * v₀(p, x...) ρw₀(p, x...) = ρ₀(p, x...) * w₀(p, x...) ρθ₀(p, x...) = ρ₀(p, x...) * θ₀(p, x...) ρu⃗₀(p, x...) = @SVector [ρu₀(p, x...), ρv₀(p, x...), ρw₀(p, x...)] initial_conditions = (ρ = ρ₀, ρu = ρu⃗₀, ρθ = ρθ₀) orientations = Dict( "" => ClimateMachine.Orientations.NoOrientation(), "_flat" => ClimateMachine.Orientations.FlatOrientation(), ) for (key1, grid) in grids for (key2, orientation) in orientations key = key1 * key2 println("running ", key) local_physics = FluidPhysics(; orientation = orientation, advection = physics.advection, dissipation = physics.dissipation, coriolis = physics.coriolis, buoyancy = physics.buoyancy, ) @testset "$(key)" begin model = SpatialModel( balance_law = Fluid3D(), physics = local_physics, numerics = (flux = RoeNumericalFlux(),), grid = grid, boundary_conditions = NamedTuple(), parameters = parameters, ) simulation = Simulation( model = model, initial_conditions = initial_conditions, timestepper = timestepper, callbacks = callbacks, time = (; start = start_time, finish = end_time), ) ######## # Run the model ######## evolve!( simulation, model, refDat = getproperty(refVals, Symbol(key)), ) end end end end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/two_dimensional/TwoDimensionalCompressibleNavierStokesEquations.jl ================================================ include("../shared_source/boilerplate.jl") import ClimateMachine.BalanceLaws: vars_state, init_state_prognostic!, init_state_auxiliary!, compute_gradient_argument!, compute_gradient_flux!, flux_first_order!, flux_second_order!, source!, wavespeed, boundary_conditions, boundary_state! import ClimateMachine.DGMethods: DGModel import ClimateMachine.NumericalFluxes: numerical_flux_first_order! """ TwoDimensionalCompressibleNavierStokesEquations <: BalanceLaw A `BalanceLaw` for shallow water modeling. write out the equations here # Usage TwoDimensionalCompressibleNavierStokesEquations() """ abstract type AbstractFluid2D <: AbstractFluid end struct Fluid2D <: AbstractFluid2D end struct TwoDimensionalCompressibleNavierStokesEquations{ I, D, A, T, C, F, BC, FT, } <: AbstractFluid2D initial_value_problem::I domain::D advection::A turbulence::T coriolis::C forcing::F boundary_conditions::BC g::FT c::FT function TwoDimensionalCompressibleNavierStokesEquations{FT}( initial_value_problem::I, domain::D, advection::A, turbulence::T, coriolis::C, forcing::F, boundary_conditions::BC; g = FT(10), # m/s² c = FT(0), #m/s ) where {FT <: AbstractFloat, I, D, A, T, C, F, BC} return new{I, D, A, T, C, F, BC, FT}( initial_value_problem, domain, advection, turbulence, coriolis, forcing, boundary_conditions, g, c, ) end end CNSE2D = TwoDimensionalCompressibleNavierStokesEquations function vars_state(m::CNSE2D, ::Prognostic, T) @vars begin ρ::T ρu::SVector{2, T} ρθ::T end end function init_state_prognostic!(m::CNSE2D, state::Vars, aux::Vars, localgeo, t) cnse_init_state!(m, state, aux, localgeo, t) end # default initial state if IVP == nothing function cnse_init_state!(model::CNSE2D, state, aux, localgeo, t) ρ = 1 state.ρ = ρ state.ρu = ρ * @SVector [-0, -0] state.ρθ = ρ return nothing end # user defined initial state function cnse_init_state!( model::CNSE2D{<:InitialValueProblem}, state, aux, localgeo, t, ) x = aux.x y = aux.y z = aux.z params = model.initial_value_problem.params ic = model.initial_value_problem.initial_conditions state.ρ = ic.ρ(params, x, y, z) state.ρu = ic.ρu(params, x, y, z) state.ρθ = ic.ρθ(params, x, y, z) return nothing end function vars_state(m::CNSE2D, ::Auxiliary, T) @vars begin x::T y::T z::T end end function init_state_auxiliary!( model::CNSE2D, state_auxiliary::MPIStateArray, grid, direction, ) init_state_auxiliary!( model, (model, aux, tmp, geom) -> cnse_init_aux!(model, aux, geom), state_auxiliary, grid, direction, ) end function cnse_init_aux!(::CNSE2D, aux, geom) @inbounds begin aux.x = geom.coord[1] aux.y = geom.coord[2] aux.z = geom.coord[3] end return nothing end function vars_state(m::CNSE2D, ::Gradient, T) @vars begin ∇u::SVector{2, T} ∇θ::T end end function compute_gradient_argument!( model::CNSE2D, grad::Vars, state::Vars, aux::Vars, t::Real, ) compute_gradient_argument!(model.turbulence, grad, state, aux, t) end compute_gradient_argument!(::LinearDrag, _...) = nothing @inline function compute_gradient_argument!( ::ConstantViscosity, grad::Vars, state::Vars, aux::Vars, t::Real, ) ρ = state.ρ ρu = state.ρu ρθ = state.ρθ u = ρu / ρ θ = ρθ / ρ grad.∇u = u grad.∇θ = θ return nothing end function vars_state(m::CNSE2D, ::GradientFlux, T) @vars begin ν∇u::SMatrix{3, 2, T, 6} κ∇θ::SVector{3, T} end end function compute_gradient_flux!( model::CNSE2D, gradflux::Vars, grad::Grad, state::Vars, aux::Vars, t::Real, ) compute_gradient_flux!( model, model.turbulence, gradflux, grad, state, aux, t, ) end compute_gradient_flux!(::CNSE2D, ::LinearDrag, _...) = nothing @inline function compute_gradient_flux!( ::CNSE2D, turb::ConstantViscosity, gradflux::Vars, grad::Grad, state::Vars, aux::Vars, t::Real, ) ν = Diagonal(@SVector [turb.ν, turb.ν, -0]) κ = Diagonal(@SVector [turb.κ, turb.κ, -0]) gradflux.ν∇u = -ν * grad.∇u gradflux.κ∇θ = -κ * grad.∇θ return nothing end @inline function flux_first_order!( model::CNSE2D, flux::Grad, state::Vars, aux::Vars, t::Real, direction, ) ρ = state.ρ ρu = @SVector [state.ρu[1], state.ρu[2], -0] ρθ = state.ρθ ρₜ = flux.ρ ρuₜ = flux.ρu θₜ = flux.ρθ g = model.g Iʰ = @SMatrix [ 1 -0 -0 1 -0 -0 ] flux.ρ += ρu flux.ρu += g * ρ^2 * Iʰ / 2 advective_flux!(model, model.advection, flux, state, aux, t) return nothing end advective_flux!(::CNSE2D, ::Nothing, _...) = nothing @inline function advective_flux!( ::CNSE2D, ::NonLinearAdvectionTerm, flux::Grad, state::Vars, aux::Vars, t::Real, ) ρ = state.ρ ρu = state.ρu ρv = @SVector [state.ρu[1], state.ρu[2], -0] ρθ = state.ρθ flux.ρu += ρv ⊗ ρu / ρ flux.ρθ += ρv * ρθ / ρ return nothing end function flux_second_order!( model::CNSE2D, flux::Grad, state::Vars, gradflux::Vars, ::Vars, aux::Vars, t::Real, ) flux_second_order!(model, model.turbulence, flux, state, gradflux, aux, t) end flux_second_order!(::CNSE2D, ::LinearDrag, _...) = nothing @inline function flux_second_order!( ::CNSE2D, ::ConstantViscosity, flux::Grad, state::Vars, gradflux::Vars, aux::Vars, t::Real, ) flux.ρu += gradflux.ν∇u flux.ρθ += gradflux.κ∇θ return nothing end @inline function source!( model::CNSE2D, source::Vars, state::Vars, gradflux::Vars, aux::Vars, t::Real, direction, ) coriolis_force!(model, model.coriolis, source, state, aux, t) forcing_term!(model, model.forcing, source, state, aux, t) linear_drag!(model, model.turbulence, source, state, aux, t) return nothing end coriolis_force!(::CNSE2D, ::Nothing, _...) = nothing @inline function coriolis_force!( model::CNSE2D, coriolis::fPlaneCoriolis, source, state, aux, t, ) ρu = @SVector [state.ρu[1], state.ρu[2], -0] # f × u f = [-0, -0, coriolis_parameter(model, coriolis, aux.coords)] id = @SVector [1, 2] fxρu = (f × ρu)[id] source.ρu -= fxρu return nothing end forcing_term!(::CNSE2D, ::Nothing, _...) = nothing @inline function forcing_term!( model::CNSE2D, forcing::KinematicStress, source, state, aux, t, ) source.ρu += kinematic_stress(model, forcing, aux.coords) return nothing end linear_drag!(::CNSE2D, ::ConstantViscosity, _...) = nothing @inline function linear_drag!(::CNSE2D, turb::LinearDrag, source, state, aux, t) source.ρu -= turb.λ * state.ρu return nothing end @inline wavespeed(m::CNSE2D, _...) = m.c roe_average(ρ⁻, ρ⁺, var⁻, var⁺) = (sqrt(ρ⁻) * var⁻ + sqrt(ρ⁺) * var⁺) / (sqrt(ρ⁻) + sqrt(ρ⁺)) function numerical_flux_first_order!( ::RoeNumericalFlux, model::CNSE2D, fluxᵀn::Vars{S}, n⁻::SVector, state⁻::Vars{S}, aux⁻::Vars{A}, state⁺::Vars{S}, aux⁺::Vars{A}, t, direction, ) where {S, A} numerical_flux_first_order!( CentralNumericalFluxFirstOrder(), model, fluxᵀn, n⁻, state⁻, aux⁻, state⁺, aux⁺, t, direction, ) FT = eltype(fluxᵀn) # constants and normal vectors g = model.g @inbounds nˣ = n⁻[1] @inbounds nʸ = n⁻[2] # get minus side states ρ⁻ = state⁻.ρ @inbounds ρu⁻ = state⁻.ρu[1] @inbounds ρv⁻ = state⁻.ρu[2] ρθ⁻ = state⁻.ρθ u⁻ = ρu⁻ / ρ⁻ v⁻ = ρv⁻ / ρ⁻ θ⁻ = ρθ⁻ / ρ⁻ # get plus side states ρ⁺ = state⁺.ρ @inbounds ρu⁺ = state⁺.ρu[1] @inbounds ρv⁺ = state⁺.ρu[2] ρθ⁺ = state⁺.ρθ u⁺ = ρu⁺ / ρ⁺ v⁺ = ρv⁺ / ρ⁺ θ⁺ = ρθ⁺ / ρ⁺ # averages for roe fluxes ρ = (ρ⁺ + ρ⁻) / 2 ρu = (ρu⁺ + ρu⁻) / 2 ρv = (ρv⁺ + ρv⁻) / 2 ρθ = (ρθ⁺ + ρθ⁻) / 2 u = roe_average(ρ⁻, ρ⁺, u⁻, u⁺) v = roe_average(ρ⁻, ρ⁺, v⁻, v⁺) θ = roe_average(ρ⁻, ρ⁺, θ⁻, θ⁺) # normal and tangent velocities uₙ = nˣ * u + nʸ * v uₚ = nˣ * v - nʸ * u # differences for difference vector Δρ = ρ⁺ - ρ⁻ Δρu = ρu⁺ - ρu⁻ Δρv = ρv⁺ - ρv⁻ Δρθ = ρθ⁺ - ρθ⁻ Δφ = @SVector [Δρ, Δρu, Δρv, Δρθ] """ # jacobian ∂F∂φ = [ 0 nˣ nʸ 0 (nˣ * c^2 - u * uₙ) (uₙ + nˣ * u) (nʸ * u) 0 (nʸ * c^2 - v * uₙ) (nˣ * v) (uₙ + nʸ * v) 0 (-θ * uₙ) (nˣ * θ) (nʸ * θ) uₙ ] # eigen decomposition λ, R = eigen(∂F∂φ) """ # eigen values matrix c = sqrt(g * ρ) λ = @SVector [uₙ, uₙ + c, uₙ - c, uₙ] Λ = Diagonal(abs.(λ)) # eigenvector matrix R = @SMatrix [ 0 1 1 0 -nʸ (u+nˣ * c) (u-nˣ * c) 0 nˣ (v+nʸ * c) (v-nʸ * c) 0 0 θ θ 1 ] # inverse of eigenvector matrix R⁻¹ = @SMatrix [ -uₚ -nʸ nˣ 0 (c - uₙ)/(2c) nˣ/(2c) nʸ/(2c) 0 (c + uₙ)/(2c) -nˣ/(2c) -nʸ/(2c) 0 -θ 0 0 1 ] # @test norm(R⁻¹ * R - I) ≈ 0 # actually calculate flux # parent(fluxᵀn) .-= R * Λ * (R \ Δφ) / 2 parent(fluxᵀn) .-= R * Λ * R⁻¹ * Δφ / 2 return nothing end boundary_conditions(model::CNSE2D) = model.boundary_conditions """ boundary_state!(nf, ::CNSE2D, args...) applies boundary conditions for the hyperbolic fluxes dispatches to a function in CNSEBoundaryConditions """ @inline function boundary_state!(nf, bc, model::CNSE2D, args...) return _cnse_boundary_state!(nf, bc, model, args...) end """ CNSE_boundary_state!(nf, bc::FluidBC, ::CNSE2D) splits boundary condition application into velocity """ @inline function cnse_boundary_state!(nf, bc::FluidBC, m::CNSE2D, args...) return cnse_boundary_state!(nf, bc.momentum, m, m.turbulence, args...) return cnse_boundary_state!(nf, bc.temperature, m, args...) end include("bc_momentum.jl") include("bc_tracer.jl") """ STUFF FOR ANDRE'S WRAPPERS """ function get_boundary_conditions( model::SpatialModel{BL}, ) where {BL <: AbstractFluid2D} bcs = model.boundary_conditions west_east = (check_bc(bcs, :west), check_bc(bcs, :east)) south_north = (check_bc(bcs, :south), check_bc(bcs, :north)) return (west_east..., south_north...) end function DGModel( model::SpatialModel{BL}; initial_conditions = nothing, ) where {BL <: AbstractFluid2D} params = model.parameters physics = model.physics Lˣ, Lʸ = length(model.grid.domain) bcs = get_boundary_conditions(model) FT = eltype(model.grid.numerical.vgeo) if !isnothing(initial_conditions) initial_conditions = InitialValueProblem(params, initial_conditions) end balance_law = CNSE2D{FT}( initial_conditions, (Lˣ, Lʸ), physics.advection, physics.dissipation, physics.coriolis, nothing, bcs, c = params.c, g = params.g, ) numerical_flux_first_order = model.numerics.flux # should be a function rhs = DGModel( balance_law, model.grid.numerical, numerical_flux_first_order, CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) return rhs end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/two_dimensional/bc_momentum.jl ================================================ """ cnse_boundary_state!(::NumericalFluxFirstOrder, ::Impenetrable{FreeSlip}, ::CNSE2D) apply free slip boundary condition for momentum sets reflective ghost point """ @inline function cnse_boundary_state!( ::NumericalFluxFirstOrder, ::Impenetrable{FreeSlip}, ::CNSE2D, ::TurbulenceClosure, state⁺, aux⁺, n⁻, state⁻, aux⁻, t, args..., ) state⁺.ρ = state⁻.ρ ρu⁻ = @SVector [state⁻.ρu[1], state⁻.ρu[2], -0] ρu⁺ = ρu⁻ - 2 * n⁻ ⋅ ρu⁻ .* SVector(n⁻) state⁺.ρu = @SVector [ρu⁺[1], ρu⁺[2]] return nothing end """ cnse_boundary_state!(::Union{NumericalFluxGradient, NumericalFluxSecondOrder}, ::Impenetrable{FreeSlip}, ::CNSE2D) no second order flux computed for linear drag """ cnse_boundary_state!( ::Union{NumericalFluxGradient, NumericalFluxSecondOrder}, ::MomentumBC, ::CNSE2D, ::LinearDrag, _..., ) = nothing """ cnse_boundary_state!(::NumericalFluxGradient, ::Impenetrable{FreeSlip}, ::CNSE2D) apply free slip boundary condition for momentum sets non-reflective ghost point """ function cnse_boundary_state!( ::NumericalFluxGradient, ::Impenetrable{FreeSlip}, ::CNSE2D, ::ConstantViscosity, state⁺, aux⁺, n⁻, state⁻, aux⁻, t, args..., ) state⁺.ρ = state⁻.ρ ρu⁻ = @SVector [state⁻.ρu[1], state⁻.ρu[2], -0] ρu⁺ = ρu⁻ - n⁻ ⋅ ρu⁻ .* SVector(n⁻) state⁺.ρu = @SVector [ρu⁺[1], ρu⁺[2]] return nothing end """ shallow_normal_boundary_flux_second_order!(::NumericalFluxSecondOrder, ::Impenetrable{FreeSlip}, ::CNSE2D) apply free slip boundary condition for momentum apply zero numerical flux in the normal direction """ function cnse_boundary_state!( ::NumericalFluxSecondOrder, ::Impenetrable{FreeSlip}, ::CNSE2D, ::ConstantViscosity, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, args..., ) state⁺.ρu = state⁻.ρu gradflux⁺.ν∇u = n⁻ * (@SVector [-0, -0])' return nothing end """ cnse_boundary_state!(::NumericalFluxFirstOrder, ::Impenetrable{NoSlip}, ::CNSE2D) apply no slip boundary condition for momentum sets reflective ghost point """ @inline function cnse_boundary_state!( ::NumericalFluxFirstOrder, ::Impenetrable{NoSlip}, ::CNSE2D, ::TurbulenceClosure, state⁺, aux⁺, n⁻, state⁻, aux⁻, t, args..., ) state⁺.ρ = state⁻.ρ state⁺.ρu = -state⁻.ρu return nothing end """ cnse_boundary_state!(::NumericalFluxGradient, ::Impenetrable{NoSlip}, ::CNSE2D) apply no slip boundary condition for momentum set numerical flux to zero for U """ @inline function cnse_boundary_state!( ::NumericalFluxGradient, ::Impenetrable{NoSlip}, ::CNSE2D, ::ConstantViscosity, state⁺, aux⁺, n⁻, state⁻, aux⁻, t, args..., ) FT = eltype(state⁺) state⁺.ρu = @SVector zeros(FT, 2) return nothing end """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::Impenetrable{NoSlip}, ::CNSE2D) apply no slip boundary condition for momentum sets ghost point to have no numerical flux on the boundary for U """ @inline function cnse_boundary_state!( ::NumericalFluxSecondOrder, ::Impenetrable{NoSlip}, ::CNSE2D, ::ConstantViscosity, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, args..., ) state⁺.ρu = -state⁻.ρu gradflux⁺.ν∇u = gradflux⁻.ν∇u return nothing end """ cnse_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{FreeSlip}, ::CNSE2D) no mass boundary condition for penetrable """ cnse_boundary_state!( ::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{FreeSlip}, ::CNSE2D, ::ConstantViscosity, _..., ) = nothing """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::Penetrable{FreeSlip}, ::CNSE2D) apply free slip boundary condition for momentum apply zero numerical flux in the normal direction """ function cnse_boundary_state!( ::NumericalFluxSecondOrder, ::Penetrable{FreeSlip}, ::CNSE2D, ::ConstantViscosity, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, args..., ) state⁺.ρu = state⁻.ρu gradflux⁺.ν∇u = n⁻ * (@SVector [-0, -0])' return nothing end """ cnse_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Impenetrable{MomentumFlux}, ::HBModel) apply kinematic stress boundary condition for momentum applies free slip conditions for first-order and gradient fluxes """ function cnse_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Impenetrable{<:MomentumFlux}, model::CNSE2D, turb::TurbulenceClosure, args..., ) return cnse_boundary_state!( nf, Impenetrable(FreeSlip()), model, turb, args..., ) end """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::Impenetrable{MomentumFlux}, ::HBModel) apply kinematic stress boundary condition for momentum sets ghost point to have specified flux on the boundary for ν∇u """ @inline function cnse_boundary_state!( ::NumericalFluxSecondOrder, ::Impenetrable{<:MomentumFlux}, model::CNSE2D, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, ) state⁺.ρu = state⁻.ρu gradflux⁺.ν∇u = n⁻ * bc.drag(state⁻, aux⁻, t)' return nothing end """ cnse_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{MomentumFlux}, ::HBModel) apply kinematic stress boundary condition for momentum applies free slip conditions for first-order and gradient fluxes """ function cnse_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Penetrable{<:MomentumFlux}, model::CNSE2D, turb::TurbulenceClosure, args..., ) return cnse_boundary_state!( nf, Penetrable(FreeSlip()), model, turb, args..., ) end """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::Penetrable{MomentumFlux}, ::HBModel) apply kinematic stress boundary condition for momentum sets ghost point to have specified flux on the boundary for ν∇u """ @inline function cnse_boundary_state!( ::NumericalFluxSecondOrder, bc::Penetrable{<:MomentumFlux}, shallow::CNSE2D, ::TurbulenceClosure, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, ) state⁺.ρu = state⁻.ρu gradflux⁺.ν∇u = n⁻ * bc.drag(state⁻, aux⁻, t)' return nothing end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/two_dimensional/bc_tracer.jl ================================================ """ cnse_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Insulating, ::HBModel) apply insulating boundary condition for temperature sets transmissive ghost point """ function cnse_boundary_state!( ::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::Insulating, ::CNSE2D, state⁺, aux⁺, n⁻, state⁻, aux⁻, t, ) state⁺.ρθ = state⁻.ρθ return nothing end """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::Insulating, ::HBModel) apply insulating boundary condition for velocity sets ghost point to have no numerical flux on the boundary for κ∇θ """ @inline function cnse_boundary_state!( ::NumericalFluxSecondOrder, ::Insulating, ::CNSE2D, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, ) state⁺.ρθ = state⁻.ρθ gradflux⁺.κ∇θ = n⁻ * -0 return nothing end """ cnse_boundary_state!(::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::TemperatureFlux, ::HBModel) apply temperature flux boundary condition for velocity applies insulating conditions for first-order and gradient fluxes """ function cnse_boundary_state!( nf::Union{NumericalFluxFirstOrder, NumericalFluxGradient}, ::TemperatureFlux, model::CNSE2D, args..., ) return cnse_boundary_state!(nf, Insulating(), model, args...) end """ cnse_boundary_state!(::NumericalFluxSecondOrder, ::TemperatureFlux, ::HBModel) apply insulating boundary condition for velocity sets ghost point to have specified flux on the boundary for κ∇θ """ @inline function cnse_boundary_state!( ::NumericalFluxSecondOrder, bc::TemperatureFlux, model::CNSE2D, state⁺, gradflux⁺, aux⁺, n⁻, state⁻, gradflux⁻, aux⁻, t, ) state⁺.ρθ = state⁻.ρθ gradflux⁺.κ∇θ = n⁻ * bc(state⁻, aux⁻, t) return nothing end ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/two_dimensional/refvals_bickley_jet.jl ================================================ # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] parr = [ ["state", :ρ, 12, 12, 12, 12], ["state", "ρu[1]", 12, 11, 12, 12], ["state", "ρu[2]", 12, 12, 11, 12], ["state", :ρθ, 11, 12, 10, 12], ] #! format: off rusanov_periodic = ( [ [ "state", "ρ", 9.76557021043823247908e-01, 1.00785052430958410596e+00, 1.00000023257151737788e+00, 5.40633258005515995870e-03 ], [ "state", "ρu[1]", -2.08235364819134516345e-01, 5.42021330588998817568e-01, 1.59191748432813973135e-01, 1.71679977050671228600e-01 ], [ "state", "ρu[2]", -4.44598929671790876750e-01, 3.96193527502323006306e-01, -9.19857600654994111977e-06, 2.23835345394622464710e-01 ], [ "state", "ρθ", -1.05926174767863745529e+00, 1.78285318416958671328e+00, 3.38466687536443516793e-03, 2.92797512839404083795e-01 ], ], parr, ) roeflux_periodic = ( [ [ "state", "ρ", 9.76408216346011381681e-01, 1.00685186827167338919e+00, 1.00000020920188337215e+00, 5.28437765352953326553e-03 ], [ "state", "ρu[1]", -2.51377173896010663867e-01, 5.33033893482195431091e-01, 1.59154423274343037598e-01, 1.96922130305308085152e-01 ], [ "state", "ρu[2]", -4.04321943123139071474e-01, 3.81969301036260144855e-01, -2.87040331153030903177e-05, 2.02392226536863034658e-01 ], [ "state", "ρθ", -1.21186199231530267184e+00, 9.21722158564563742722e-01, 2.34662081315961668082e-03, 2.61922031216240580598e-01 ], ], parr, ) rusanov = ( [ [ "state", "ρ", 9.76844648039728813416e-01, 1.00662311214286837036e+00, 1.00000043240984126669e+00, 6.05641827304116090597e-03 ], [ "state", "ρu[1]", -6.93213807814652027695e-01, 5.89769972499923134102e-01, 1.42589259313275984464e-01, 2.32576446220999155656e-01 ], [ "state", "ρu[2]", -5.73828377062444716650e-01, 5.07664260030296077275e-01, -4.23962044125134052130e-04, 1.96899492339057097245e-01 ], [ "state", "ρθ", -3.65527466807722500874e+00, 4.07350442876234186684e+00, 1.27099405499731775426e-02, 5.02926135539447205502e-01 ], ], parr, ) roeflux = ( [ [ "state", "ρ", 9.72455511248961124160e-01, 1.00730359035806360524e+00, 1.00000004243418749716e+00, 6.29619947553510493632e-03 ], [ "state", "ρu[1]", -3.40062758027173561715e-01, 5.98382642258275421199e-01, 1.61534160931526560301e-01, 1.83683987730847486652e-01 ], [ "state", "ρu[2]", -4.78833188511707363855e-01, 5.60276621431795018857e-01, -5.51384446753656887186e-05, 2.12893889070269098918e-01 ], [ "state", "ρθ", -1.44374005663885895956e+01, 2.86753217014698513765e+00, 1.48814717217727286724e-03, 5.22593283079209602882e-01 ], ], parr, ) rusanov_overintegration = ( [ [ "state", "ρ", 9.71215931957259970275e-01, 1.00598091306596182370e+00, 1.00000018331969764418e+00, 6.61958163862229592017e-03 ], [ "state", "ρu[1]", -3.70465052437365993665e-01, 7.21959883918644518275e-01, 1.59085030658594694941e-01, 2.39392692254175726285e-01 ], [ "state", "ρu[2]", -4.40021840550348763976e-01, 4.17225753221437845042e-01, 7.34809696136107232513e-05, 1.50458151629451780673e-01 ], [ "state", "ρθ", -3.05207824503129643290e+00, 1.86668064854026383159e+00, -1.05021657861818027563e-02, 4.60981422911087401761e-01 ], ], parr, ) roeflux_overintegration = ( [ [ "state", "ρ", 9.69901051313856066294e-01, 1.00695111308847851106e+00, 1.00000002953972089159e+00, 6.94722590819101589593e-03 ], [ "state", "ρu[1]", -4.21771768146742886962e-01, 6.34790159642324547384e-01, 1.59384804664454482470e-01, 2.22844788492525480716e-01 ], [ "state", "ρu[2]", -4.37678540756429201863e-01, 4.53959896024816400573e-01, -1.22554654623556756642e-04, 1.73930782418124457722e-01 ], [ "state", "ρθ", -1.13636401173616619076e+00, 1.51145239791767727056e+00, 1.96870293018723595616e-03, 3.82573373360323043535e-01 ], ], parr, ) #! format: on refVals = (; rusanov_periodic, roeflux_periodic, rusanov, roeflux, rusanov_overintegration, roeflux_overintegration, ) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/two_dimensional/run_bickley_jet.jl ================================================ #!/usr/bin/env julia --project include("../shared_source/boilerplate.jl") include("TwoDimensionalCompressibleNavierStokesEquations.jl") ClimateMachine.init() ######## # Setup physical and numerical domains ######## Ωˣ = IntervalDomain(-2π, 2π, periodic = true) Ωʸ = IntervalDomain(-2π, 2π, periodic = false) grid = DiscretizedDomain( Ωˣ × Ωʸ; elements = 8, polynomial_order = 3, overintegration_order = 1, ) ######## # Define timestepping parameters ######## start_time = 0 end_time = 200.0 Δt = 0.02 method = SSPRK22Heuns timestepper = TimeStepper(method = method, timestep = Δt) callbacks = (Info(), StateCheck(10)) ######## # Define physical parameters and parameterizations ######## parameters = ( ϵ = 0.1, # perturbation size for initial condition l = 0.5, # Gaussian width k = 0.5, # Sinusoidal wavenumber ρₒ = 1.0, # reference density c = 2, g = 10, ) physics = FluidPhysics(; advection = NonLinearAdvectionTerm(), dissipation = ConstantViscosity{Float64}(μ = 0, ν = 0, κ = 0), coriolis = nothing, buoyancy = nothing, ) ######## # Define boundary conditions ######## ρu_bcs = (south = Impenetrable(FreeSlip()), north = Impenetrable(FreeSlip())) ρθ_bcs = (south = Insulating(), north = Insulating()) BC = (ρθ = ρθ_bcs, ρu = ρu_bcs) ######## # Define initial conditions ######## # The Bickley jet U₀(p, x, y, z) = cosh(y)^(-2) # Slightly off-center vortical perturbations Ψ₀(p, x, y, z) = exp(-(y + p.l / 10)^2 / (2 * (p.l^2))) * cos(p.k * x) * cos(p.k * y) # Vortical velocity fields (ũ, ṽ) = (-∂ʸ, +∂ˣ) ψ̃ u₀(p, x, y, z) = Ψ₀(p, x, y, z) * (p.k * tan(p.k * y) + y / (p.l^2)) v₀(p, x, y, z) = -Ψ₀(p, x, y, z) * p.k * tan(p.k * x) θ₀(p, x, y, z) = sin(p.k * y) ρ₀(p, x, y, z) = p.ρₒ ρu₀(p, x...) = ρ₀(p, x...) * (p.ϵ * u₀(p, x...) + U₀(p, x...)) ρv₀(p, x...) = ρ₀(p, x...) * p.ϵ * v₀(p, x...) ρθ₀(p, x...) = ρ₀(p, x...) * θ₀(p, x...) ρu⃗₀(p, x...) = @SVector [ρu₀(p, x...), ρv₀(p, x...)] initial_conditions = (ρ = ρ₀, ρu = ρu⃗₀, ρθ = ρθ₀) ######## # Create the things ######## model = SpatialModel( balance_law = Fluid2D(), physics = physics, numerics = (flux = RoeNumericalFlux(),), grid = grid, boundary_conditions = BC, parameters = parameters, ) simulation = Simulation( model = model, initial_conditions = initial_conditions, timestepper = timestepper, callbacks = callbacks, time = (; start = start_time, finish = end_time), ) ######## # Run the model ######## tic = Base.time() evolve!(simulation, model) toc = Base.time() time = toc - tic println(time) ================================================ FILE: test/Numerics/DGMethods/compressible_navier_stokes_equations/two_dimensional/test_bickley_jet.jl ================================================ #!/usr/bin/env julia --project include("../shared_source/boilerplate.jl") include("TwoDimensionalCompressibleNavierStokesEquations.jl") ClimateMachine.init() setups = [ (; name = "rusanov_periodic", flux = RusanovNumericalFlux(), periodicity = true, Nover = 0, ), (; name = "roeflux_periodic", flux = RoeNumericalFlux(), periodicity = true, Nover = 0, ), (; name = "rusanov", flux = RusanovNumericalFlux(), periodicity = false, Nover = 0, ), (; name = "roeflux", flux = RoeNumericalFlux(), periodicity = false, Nover = 0, ), (; name = "rusanov_overintegration", flux = RusanovNumericalFlux(), periodicity = false, Nover = 1, ), (; name = "roeflux_overintegration", flux = RoeNumericalFlux(), periodicity = false, Nover = 1, ), ] ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("refvals_bickley_jet.jl") ######## # Define timestepping parameters ######## start_time = 0 end_time = 200.0 Δt = 0.02 method = LSRK54CarpenterKennedy timestepper = TimeStepper(method = method, timestep = Δt) callbacks = (Info(), StateCheck(10)) ######## # Define physical parameters and parameterizations ######## parameters = ( ϵ = 0.1, # perturbation size for initial condition l = 0.5, # Gaussian width k = 0.5, # Sinusoidal wavenumber ρₒ = 1.0, # reference density c = 2, g = 10, ) physics = FluidPhysics(; advection = NonLinearAdvectionTerm(), dissipation = ConstantViscosity{Float64}(μ = 0, ν = 0, κ = 0), coriolis = nothing, buoyancy = nothing, ) ######## # Define boundary conditions ######## ρu_bc = Impenetrable(FreeSlip()) ρθ_bc = Insulating() ρu_bcs = (south = ρu_bc, north = ρu_bc) ρθ_bcs = (south = ρθ_bc, north = ρθ_bc) BC = (ρθ = ρθ_bcs, ρu = ρu_bcs) ######## # Define initial conditions ######## # The Bickley jet U₀(p, x, y, z) = cosh(y)^(-2) # Slightly off-center vortical perturbations Ψ₀(p, x, y, z) = exp(-(y + p.l / 10)^2 / (2 * (p.l^2))) * cos(p.k * x) * cos(p.k * y) # Vortical velocity fields (ũ, ṽ) = (-∂ʸ, +∂ˣ) ψ̃ u₀(p, x, y, z) = Ψ₀(p, x, y, z) * (p.k * tan(p.k * y) + y / (p.l^2)) v₀(p, x, y, z) = -Ψ₀(p, x, y, z) * p.k * tan(p.k * x) θ₀(p, x, y, z) = sin(p.k * y) ρ₀(p, x, y, z) = p.ρₒ ρu₀(p, x...) = ρ₀(p, x...) * (p.ϵ * u₀(p, x...) + U₀(p, x...)) ρv₀(p, x...) = ρ₀(p, x...) * p.ϵ * v₀(p, x...) ρθ₀(p, x...) = ρ₀(p, x...) * θ₀(p, x...) ρu⃗₀(p, x...) = @SVector [ρu₀(p, x...), ρv₀(p, x...)] initial_conditions = (ρ = ρ₀, ρu = ρu⃗₀, ρθ = ρθ₀) for setup in setups @testset "$(setup.name)" begin Ωˣ = Periodic(-2π, 2π) Ωʸ = IntervalDomain(-2π, 2π, periodic = setup.periodicity) grid = DiscretizedDomain( Ωˣ × Ωʸ, elements = 16, polynomial_order = 3, overintegration_order = setup.Nover, ) model = SpatialModel( balance_law = Fluid2D(), physics = physics, numerics = (flux = setup.flux,), grid = grid, boundary_conditions = BC, parameters = parameters, ) simulation = Simulation( model = model, initial_conditions = initial_conditions, timestepper = timestepper, callbacks = callbacks, time = (; start = start_time, finish = end_time), ) ######## # Run the model ######## evolve!( simulation, model; refDat = getproperty(refVals, Symbol(setup.name)), ) end end end ================================================ FILE: test/Numerics/DGMethods/conservation/sphere.jl ================================================ #= Here we solve the equation: ```math q + dot(∇, uq) = 0 p - dot(∇, up) = 0 ``` on a sphere to test the conservation of the numerics The boundary conditions are `p = q` when `dot(n, u) > 0` and `q = p` when `dot(n, u) < 0` (i.e., `p` flows into `q` and vice-sersa). =# using MPI using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.GenericCallbacks using ClimateMachine.BalanceLaws: Prognostic, Auxiliary, AbstractStateType, BalanceLaw using LinearAlgebra using StaticArrays using Logging, Printf, Dates using Random using ClimateMachine.VariableTemplates import ClimateMachine.BalanceLaws: vars_state import ClimateMachine.DGMethods: flux_first_order!, flux_second_order!, source!, boundary_conditions, boundary_state!, nodal_init_state_auxiliary!, init_state_prognostic! import ClimateMachine.DGMethods: init_ode_state using ClimateMachine.Mesh.Geometry: LocalGeometry import ClimateMachine.DGMethods.NumericalFluxes: NumericalFluxFirstOrder, numerical_flux_first_order!, numerical_boundary_flux_first_order! struct ConservationTestModel <: BalanceLaw end vars_state(::ConservationTestModel, ::Auxiliary, T) = @vars(vel::SVector{3, T}) vars_state(::ConservationTestModel, ::Prognostic, T) = @vars(q::T, p::T) vars_state(::ConservationTestModel, ::AbstractStateType, T) = @vars() function nodal_init_state_auxiliary!( ::ConservationTestModel, aux::Vars, tmp::Vars, g::LocalGeometry, ) x, y, z = g.coord r = x^2 + y^2 + z^2 aux.vel = SVector( cos(10 * π * x) * sin(10 * π * y) + cos(20 * π * z), exp(sin(π * r)), sin(π * (x + y + z)), ) end function init_state_prognostic!( ::ConservationTestModel, state::Vars, aux::Vars, localgeo, t, ) state.q = rand() state.p = rand() end function flux_first_order!( ::ConservationTestModel, flux::Grad, state::Vars, auxstate::Vars, t::Real, direction, ) vel = auxstate.vel flux.q = state.q .* vel flux.p = -state.p .* vel end flux_second_order!(::ConservationTestModel, _...) = nothing source!(::ConservationTestModel, _...) = nothing struct ConservationTestModelNumFlux <: NumericalFluxFirstOrder end boundary_conditions(::ConservationTestModel) = (nothing,) boundary_state!( ::CentralNumericalFluxSecondOrder, bc, ::ConservationTestModel, _..., ) = nothing function numerical_flux_first_order!( ::ConservationTestModelNumFlux, bl::BalanceLaw, fluxᵀn::Vars{S}, n::SVector, state⁻::Vars{S}, aux⁻::Vars{A}, state⁺::Vars{S}, aux⁺::Vars{A}, t, direction, ) where {S, A} un⁻ = dot(n, aux⁻.vel) un⁺ = dot(n, aux⁺.vel) un = (un⁺ + un⁻) / 2 if un > 0 fluxᵀn.q = un * state⁻.q fluxᵀn.p = -un * state⁺.p else fluxᵀn.q = un * state⁺.q fluxᵀn.p = -un * state⁻.p end end function numerical_boundary_flux_first_order!( ::ConservationTestModelNumFlux, bctype, bl::BalanceLaw, fluxᵀn::Vars{S}, n::SVector, state⁻::Vars{S}, aux⁻::Vars{A}, state⁺::Vars{S}, aux⁺::Vars{A}, t, direction, state1⁻::Vars{S}, aux1⁻::Vars{A}, ) where {S, A} un = dot(n, aux⁻.vel) if un > 0 fluxᵀn.q = un * state⁻.q fluxᵀn.p = -un * state⁻.q else fluxᵀn.q = un * state⁻.p fluxᵀn.p = -un * state⁻.p end end function test_run(mpicomm, ArrayType, N, Nhorz, Rrange, timeend, FT, dt) topl = StackedCubedSphereTopology(mpicomm, Nhorz, Rrange) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = Topologies.equiangular_cubed_sphere_warp, ) dg = DGModel( ConservationTestModel(), grid, ConservationTestModelNumFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0); init_on_cpu = true) lsrk = LSRK54CarpenterKennedy(dg, Q; dt = dt, t0 = 0) eng0 = norm(Q) sum0 = weightedsum(Q) @info @sprintf """Starting norm(Q₀) = %.16e sum(Q₀) = %.16e""" eng0 sum0 max_mass_loss = FT(0) max_mass_gain = FT(0) cbmass = GenericCallbacks.EveryXSimulationSteps(1) do cbsum = weightedsum(Q) max_mass_loss = max(max_mass_loss, sum0 - cbsum) max_mass_gain = max(max_mass_gain, cbsum - sum0) end solve!(Q, lsrk; timeend = timeend, callbacks = (cbmass,)) # Print some end of the simulation information engf = norm(Q) sumf = weightedsum(Q) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e max mass loss = %.16e max mass gain = %.16e initial mass = %.16e """ engf engf / eng0 engf - eng0 max_mass_loss max_mass_gain sum0 max(max_mass_loss, max_mass_gain) / sum0 end using Test let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 Nhorz = 4 tolerance = Dict(Float64 => 1e-15, Float32 => 1e-7) @testset "$(@__FILE__)" for FT in (Float64, Float32) dt = FT(1e-4) timeend = 100 * dt Rrange = range(FT(1), stop = FT(2), step = FT(1 // 4)) @info (ArrayType, FT) delta_mass = test_run( mpicomm, ArrayType, polynomialorder, Nhorz, Rrange, timeend, FT, dt, ) @test abs(delta_mass) < tolerance[FT] end end ================================================ FILE: test/Numerics/DGMethods/courant.jl ================================================ using Test using LinearAlgebra using MPI using StaticArrays using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.ConfigTypes using ClimateMachine.Courant using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using ClimateMachine.ODESolvers using ClimateMachine.Orientations using Thermodynamics.TemperatureProfiles using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.VTK using CLIMAParameters using CLIMAParameters.Planet: kappa_d struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() const p∞ = 10^5 const T∞ = 300.0 function initialcondition!(problem, bl, state, aux, localgeo, t) FT = eltype(state) param_set = parameter_set(bl) coord = localgeo.coord translation_speed::FT = 150 translation_angle::FT = pi / 4 α = translation_angle u∞ = SVector( FT(translation_speed * coord[1]), FT(translation_speed * coord[1]), FT(0), ) _kappa_d::FT = kappa_d(param_set) u = u∞ T = FT(T∞) # adiabatic/isentropic relation p = FT(p∞) * (T / FT(T∞))^(FT(1) / _kappa_d) ρ = air_density(param_set, T, p) state.ρ = ρ state.ρu = ρ * u e_kin = u' * u / 2 state.energy.ρe = ρ * total_energy(param_set, e_kin, FT(0), T) nothing end let # boiler plate MPI stuff ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD # Mesh generation parameters N = 4 Nq = N + 1 Neh = 10 Nev = 4 @testset "$(@__FILE__) DGModel matrix" begin for FT in (Float64, Float32) for dim in (2, 3) connectivity = dim == 3 ? :full : :face if dim == 2 brickrange = ( range(FT(0); length = Neh + 1, stop = 1), range(FT(1); length = Nev + 1, stop = 2), ) elseif dim == 3 brickrange = ( range(FT(0); length = Neh + 1, stop = 1), range(FT(0); length = Neh + 1, stop = 1), range(FT(1); length = Nev + 1, stop = 2), ) end μ = FT(2) topl = StackedBrickTopology( mpicomm, brickrange, connectivity = connectivity, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) problem = AtmosProblem( boundaryconditions = (), init_state_prognostic = initialcondition!, ) physics = AtmosPhysics{FT}( param_set; ref_state = NoReferenceState(), turbulence = ConstantDynamicViscosity(μ, WithDivergence()), moisture = DryModel(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, source = (Gravity(),), ) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Δt = FT(1 // 200) Q = init_ode_state(dg, FT(0)) Δx = min_node_distance(grid, EveryDirection()) Δx_v = min_node_distance(grid, VerticalDirection()) Δx_h = min_node_distance(grid, HorizontalDirection()) translation_speed = FT(norm([150.0, 150.0, 0.0])) diff_speed_h = FT(μ / air_density(param_set, FT(T∞), FT(p∞))) diff_speed_v = FT(μ / air_density(param_set, FT(T∞), FT(p∞))) c_h = Δt * (translation_speed + soundspeed_air(param_set, FT(T∞))) / Δx_h c_v = Δt * (soundspeed_air(param_set, FT(T∞))) / Δx_v d_h = Δt * diff_speed_h / Δx_h^2 d_v = Δt * diff_speed_v / Δx_v^2 simtime = FT(0) # tests for non diffusive courant number rtol = FT === Float64 ? 1e-4 : 1f-2 @test courant( nondiffusive_courant, dg, model, Q, Δt, simtime, HorizontalDirection(), ) ≈ c_h rtol = rtol @test courant( nondiffusive_courant, dg, model, Q, Δt, simtime, VerticalDirection(), ) ≈ c_v rtol = rtol # tests for diffusive courant number @test courant( diffusive_courant, dg, model, Q, Δt, simtime, HorizontalDirection(), ) ≈ d_h @test courant( diffusive_courant, dg, model, Q, Δt, simtime, VerticalDirection(), ) ≈ d_v end end end end nothing ================================================ FILE: test/Numerics/DGMethods/custom_filter.jl ================================================ using Test using ClimateMachine using ClimateMachine.VariableTemplates: @vars, Vars using ClimateMachine.BalanceLaws using ClimateMachine.DGMethods: AbstractCustomFilter, apply! import ClimateMachine import ClimateMachine.BalanceLaws: vars_state, init_state_auxiliary!, init_state_prognostic! using MPI using LinearAlgebra struct CustomFilterTestModel <: BalanceLaw end struct CustomTestFilter <: AbstractCustomFilter end vars_state(::CustomFilterTestModel, ::Auxiliary, FT) = @vars() vars_state(::CustomFilterTestModel, ::Prognostic, FT) where {N} = @vars(q::FT) init_state_auxiliary!(::CustomFilterTestModel, _...) = nothing function init_state_prognostic!( ::CustomFilterTestModel, state::Vars, aux::Vars, localgeo, ) coord = localgeo.coord state.q = hypot(coord[1], coord[2]) end @testset "Test custom filter" begin let ClimateMachine.init() N = 4 Ne = (2, 2, 2) function ClimateMachine.DGMethods.custom_filter!( ::CustomTestFilter, bl::CustomFilterTestModel, state::Vars, aux::Vars, ) state.q = state.q^2 end @testset for FT in (Float64, Float32) dim = 2 brickrange = ntuple(j -> range(FT(-1); length = Ne[j] + 1, stop = 1), dim) topl = ClimateMachine.Mesh.Topologies.BrickTopology( MPI.COMM_WORLD, brickrange, periodicity = ntuple(j -> true, dim), ) grid = ClimateMachine.Mesh.Grids.DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ClimateMachine.array_type(), polynomialorder = N, ) model = CustomFilterTestModel() dg = ClimateMachine.DGMethods.DGModel( model, grid, nothing, nothing, nothing; state_gradient_flux = nothing, ) Q = ClimateMachine.DGMethods.init_ode_state(dg) data = Array(Q.realdata) apply!(CustomTestFilter(), grid, model, Q, dg.state_auxiliary) @test all(Array(Q.realdata) .== data .^ 2) end end end ================================================ FILE: test/Numerics/DGMethods/fv_reconstruction_test.jl ================================================ using Test using StaticArrays using ClimateMachine.Atmos: AtmosProblem, NoReferenceState, AtmosPhysics, AtmosModel, DryModel, ConstantDynamicViscosity, AtmosLESConfigType, HBFVReconstruction import ClimateMachine.DGMethods.FVReconstructions: FVConstant, FVLinear, width using ClimateMachine.Orientations import StaticArrays: SUnitRange import ClimateMachine.BalanceLaws: Primitive, Prognostic, vars_state, number_states using ClimateMachine.VariableTemplates: Vars using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() # lin_func, quad_func, third_func, fourth_func # # ``` # num_state_primitive::Int64 # pointwise values::Array{FT, num_state_primitive by length(ξ)} # integration values::Array{FT, num_state_primitive by length(ξ)} # ``` function lin_func(ξ) return 1, [(2 * ξ .+ 1)';], [(ξ .^ 2 .+ ξ)';] end function quad_func(ξ) return 2, [(3 * ξ .^ 2 .+ 1)'; (2 * ξ .+ 1)'], [(ξ .^ 3 .+ ξ)'; (ξ .^ 2 .+ ξ)'] end function third_func(ξ) return 1, [(4 * ξ .^ 3 .+ 1)';], [(ξ .^ 4 .+ ξ)';] end function fourth_func(ξ) return 2, [(5 * ξ .^ 4 .+ 1)'; (3 * ξ .^ 2 .+ 1)'], [(ξ .^ 5 .+ ξ)'; (ξ .^ 3 .+ ξ)'] end @testset "Hydrostatic balanced linear reconstruction test" begin function initialcondition!(problem, bl, state, aux, coords, t, args...) end fv_recon! = FVLinear() stencil_width = width(fv_recon!) stencil_center = stencil_width + 1 stencil_diameter = 2stencil_width + 1 @test stencil_width == 1 func = lin_func for FT in (Float64,) physics = AtmosPhysics{FT}( param_set; ref_state = NoReferenceState(), turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = AtmosProblem(; physics = physics, init_state_prognostic = initialcondition!, ), orientation = FlatOrientation(), ) vars_prim = Vars{vars_state(model, Primitive(), FT)} hb_recon! = HBFVReconstruction(model, fv_recon!) _grav = FT(grav(param_set)) @test width(hb_recon!) == 1 num_state_prognostic = number_states(model, Prognostic()) num_state_primitive = number_states(model, Primitive()) local_state_face_primitive = ntuple(Val(2)) do _ MArray{Tuple{num_state_primitive}, FT}(undef) end # interior point reconstruction test grid = FT[0; 1; 3; 6] grid_c = (grid[2:end] + grid[1:(end - 1)]) / 2 local_cell_weights = MArray{Tuple{stencil_diameter}, FT}(grid[2:end] - grid[1:(end - 1)]) # linear profile for all variables expect pressure local_state_primitive = SVector(ntuple(Val(stencil_diameter)) do _ MArray{Tuple{num_state_primitive}, FT}(undef) end...) # values at the cell centers 1 2 3 _, uc, _ = func(grid_c) # values at the cell faces 0.5 1.5* 2.5* 3.5 _, uf, _ = func(grid) for i_d in 1:stencil_diameter for i_p in 1:num_state_prognostic local_state_primitive[i_d][i_p] = uc[i_d] end end # pressure profile is updated to satisfy the discrete hydrostatic balance p_surf = FT(100) # at the bottom wall p_ref = p_surf for i_d in 1:stencil_diameter p_ref -= vars_prim(local_state_primitive[i_d]).ρ * _grav * local_cell_weights[i_d] / 2 vars_prim(local_state_primitive[i_d]).p = p_ref p_ref -= vars_prim(local_state_primitive[i_d]).ρ * _grav * local_cell_weights[i_d] / 2 end local_state_primitive_hb = copy(local_state_primitive) # interior point reconstruction hb_recon!( local_state_face_primitive[1], local_state_face_primitive[2], local_state_primitive, local_cell_weights, ) # bottom face @test vars_prim(local_state_face_primitive[1]).ρ ≈ uf[2] @test vars_prim(local_state_face_primitive[1]).p ≈ p_surf - vars_prim(local_state_primitive[1]).ρ * _grav * local_cell_weights[1] # top face @test vars_prim(local_state_face_primitive[2]).ρ ≈ uf[3] @test vars_prim(local_state_face_primitive[2]).p ≈ p_surf - vars_prim(local_state_primitive[1]).ρ * _grav * local_cell_weights[1] - vars_prim(local_state_primitive[2]).ρ * _grav * local_cell_weights[2] # make sure the for i_d in 1:stencil_diameter @test all( local_state_primitive[i_d] ≈ local_state_primitive_hb[i_d], ) end @info "Start boundary test" # boundary point reconstruction rng = SUnitRange(stencil_center, stencil_center) hb_recon!( local_state_face_primitive[1], local_state_face_primitive[2], local_state_primitive[rng], local_cell_weights[rng], ) # bottom face @test vars_prim(local_state_face_primitive[1]).ρ ≈ uc[stencil_center] @test vars_prim(local_state_face_primitive[1]).p ≈ vars_prim(local_state_primitive[stencil_center]).p + uc[stencil_center] * _grav * local_cell_weights[stencil_center] / 2 # top face @test vars_prim(local_state_face_primitive[2]).ρ ≈ uc[stencil_center] @test vars_prim(local_state_face_primitive[2]).p ≈ vars_prim(local_state_primitive[stencil_center]).p - uc[stencil_center] * _grav * local_cell_weights[stencil_center] / 2 end end ================================================ FILE: test/Numerics/DGMethods/grad_test.jl ================================================ using MPI using StaticArrays using ClimateMachine using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.MPIStateArrays using ClimateMachine.DGMethods using Printf using ClimateMachine.BalanceLaws import ClimateMachine.BalanceLaws: vars_state, nodal_init_state_auxiliary! using ClimateMachine.Mesh.Geometry: LocalGeometry if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end struct GradTestModel{dim, dir} <: BalanceLaw end vars_state(m::GradTestModel, ::Auxiliary, T) = @vars begin a::T ∇a::SVector{3, T} end vars_state(::GradTestModel, ::Prognostic, T) = @vars() function nodal_init_state_auxiliary!( ::GradTestModel{dim, dir}, aux::Vars, tmp::Vars, g::LocalGeometry, ) where {dim, dir} x, y, z = g.coord if dim == 2 aux.a = x^2 + y^3 - x * y if dir isa EveryDirection aux.∇a = SVector(2 * x - y, 3 * y^2 - x, 0) elseif dir isa HorizontalDirection aux.∇a = SVector(2 * x - y, 0, 0) elseif dir isa VerticalDirection aux.∇a = SVector(0, 3 * y^2 - x, 0) end else aux.a = x^2 + y^3 + z^2 * y^2 - x * y * z if dir isa EveryDirection aux.∇a = SVector( 2 * x - y * z, 3 * y^2 + 2 * z^2 * y - x * z, 2 * z * y^2 - x * y, ) elseif dir isa HorizontalDirection aux.∇a = SVector(2 * x - y * z, 3 * y^2 + 2 * z^2 * y - x * z, 0) elseif dir isa VerticalDirection aux.∇a = SVector(0, 0, 2 * z * y^2 - x * y) end end end using Test function test_run(mpicomm, dim, direction, Ne, N, FT, ArrayType) connectivity = dim == 3 ? :full : :face brickrange = ntuple(j -> range(FT(0); length = Ne[j] + 1, stop = 3), dim) topl = StackedBrickTopology( mpicomm, brickrange, periodicity = ntuple(j -> false, dim), connectivity = connectivity, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) model = GradTestModel{dim, direction}() dg = DGModel( model, grid, nothing, nothing, nothing; state_gradient_flux = nothing, ) exact_aux = copy(dg.state_auxiliary) auxiliary_field_gradient!( model, dg.state_auxiliary, (:∇a,), dg.state_auxiliary, (:a,), grid, direction, ) # Wrapping in Array ensure both GPU and CPU code use same approx approx = Array(dg.state_auxiliary.∇a) ≈ Array(exact_aux.∇a) err = euclidean_distance(exact_aux, dg.state_auxiliary) return approx, err end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD numelem = (5, 5, 5) expected_result = Dict() expected_result[Float64, 2, 1] = 6.2135207410935696e+00 expected_result[Float64, 2, 2] = 2.3700094936518794e+00 expected_result[Float64, 2, 3] = 8.7105082013050261e-01 expected_result[Float64, 2, 4] = 3.1401219279927611e-01 expected_result[Float64, 3, 1] = 7.9363467666175236e+00 expected_result[Float64, 3, 2] = 2.8059223082616098e+00 expected_result[Float64, 3, 3] = 9.9204334582727094e-01 expected_result[Float64, 3, 4] = 3.5074028853276679e-01 expected_result[Float32, 2, 1] = 6.2135176658630371e+00 expected_result[Float32, 2, 2] = 2.3700129985809326e+00 expected_result[Float32, 3, 1] = 7.9363408088684082e+00 expected_result[Float32, 3, 2] = 2.8059237003326416e+00 @testset for FT in (Float64, Float32) lvls = integration_testing || ClimateMachine.Settings.integration_testing ? (FT === Float32 ? 2 : 4) : 1 @testset for polynomialorder in ((4, 4), (4, 0)) @testset for dim in 2:3 @testset for direction in ( EveryDirection(), HorizontalDirection(), VerticalDirection(), ) err = zeros(FT, lvls) for l in 1:lvls approx, err[l] = test_run( mpicomm, dim, direction, ntuple(j -> 2^(l - 1) * numelem[j], dim), polynomialorder, FT, ArrayType, ) if polynomialorder[end] != 0 || direction isa HorizontalDirection @test approx else @test err[l] ≈ expected_result[FT, dim, l] end end if polynomialorder[end] != 0 || direction isa HorizontalDirection @info begin msg = "Polynomial order = $polynomialorder, direction = $direction\n" for l in 1:(lvls - 1) rate = log2(err[l]) - log2(err[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/grad_test_sphere.jl ================================================ using MPI using StaticArrays using LinearAlgebra using ClimateMachine using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.MPIStateArrays using ClimateMachine.DGMethods using Printf using ClimateMachine.BalanceLaws import ClimateMachine.BalanceLaws: vars_state, nodal_init_state_auxiliary! using ClimateMachine.Mesh.Geometry: LocalGeometry if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end struct GradSphereTestModel{dir} <: BalanceLaw end vars_state(m::GradSphereTestModel, ::Auxiliary, T) = @vars begin a::T ∇a::SVector{3, T} end vars_state(::GradSphereTestModel, ::Prognostic, T) = @vars() function nodal_init_state_auxiliary!( ::GradSphereTestModel{dir}, aux::Vars, tmp::Vars, g::LocalGeometry, ) where {dir} x, y, z = g.coord r = hypot(x, y, z) aux.a = r^3 if !(dir isa HorizontalDirection) aux.∇a = 3 * r^2 * g.coord / r else aux.∇a = SVector(0, 0, 0) end end using Test function test_run(mpicomm, Ne_horz, Ne_vert, N, FT, ArrayType, direction) Rrange = range(FT(1 // 2); length = Ne_vert + 1, stop = FT(1)) topl = StackedCubedSphereTopology(mpicomm, Ne_horz, Rrange) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = Topologies.equiangular_cubed_sphere_warp, ) model = GradSphereTestModel{direction}() dg = DGModel( model, grid, nothing, nothing, nothing; state_gradient_flux = nothing, ) exact_aux = copy(dg.state_auxiliary) auxiliary_field_gradient!( model, dg.state_auxiliary, (:∇a,), dg.state_auxiliary, (:a,), grid, direction, ) err = euclidean_distance(exact_aux, dg.state_auxiliary) return err end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD base_Nhorz = 4 base_Nvert = 2 expected_result = Dict() expected_result[(4, 4), 1] = 4.3759489495202896e-04 expected_result[(4, 4), 2] = 2.9065372851175251e-05 expected_result[(4, 4), 3] = 1.8457379514995729e-06 expected_result[(4, 4), 4] = 1.1582093840084037e-07 expected_result[(4, 0), 1] = 1.1070045305138052e+00 expected_result[(4, 0), 2] = 4.2750547196265593e-01 expected_result[(4, 0), 3] = 1.6041478911787385e-01 expected_result[(4, 0), 4] = 5.8570590697850776e-02 lvls = integration_testing || ClimateMachine.Settings.integration_testing ? 4 : 1 @testset for FT in (Float64,) @testset for polynomialorder in ((4, 4), (4, 0)) @testset for direction in ( EveryDirection(), HorizontalDirection(), VerticalDirection(), ) err = zeros(FT, lvls) @testset for l in 1:lvls Ne_horz = 2^(l - 1) * base_Nhorz Ne_vert = 2^(l - 1) * base_Nvert err[l] = test_run( mpicomm, Ne_horz, Ne_vert, polynomialorder, FT, ArrayType, direction, ) if !(direction isa HorizontalDirection) @test err[l] ≈ expected_result[polynomialorder, l] else @test abs(err[l]) < FT(1.3e-13) end end if !(direction isa HorizontalDirection) @info begin msg = "Polynomial order = $polynomialorder, direction = $direction\n" for l in 1:(lvls - 1) rate = log2(err[l]) - log2(err[l + 1]) msg *= @sprintf( "\n rate for level %d = %e\n", l, rate ) end msg end end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/horizontal_integral_test.jl ================================================ using MPI using StaticArrays using Logging using Printf using LinearAlgebra using Test import KernelAbstractions: CPU using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.MPIStateArrays using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.ODESolvers using ClimateMachine.GenericCallbacks using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.VariableTemplates using Thermodynamics function run_test1(mpicomm, dim, Ne, N, FT, ArrayType) warpfun = (ξ1, ξ2, ξ3) -> begin x1 = ξ1 + (ξ1 - 1 / 2) * cos(2 * π * ξ2 * ξ3) / 4 x2 = ξ2 + (ξ2 - 1 / 2) * cos(2 * π * ξ2 * ξ3) / 4 x3 = ξ3 + ξ1 / 4 + sin(2 * π * ξ1) / 16 return (x1, x2, x3) end brickrange = ( range(FT(0); length = Ne + 1, stop = 1), range(FT(0); length = Ne + 1, stop = 1), range(FT(0); length = 2, stop = 1), ) topl = StackedBrickTopology( mpicomm, brickrange, periodicity = (false, false, false), ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = warpfun, ) nrealelem = length(topl.realelems) Nq = N + 1 Nqk = dimensionality(grid) == 2 ? 1 : Nq vgeo = grid.vgeo localvgeo = array_device(vgeo) isa CPU ? vgeo : Array(vgeo) S = zeros(Nqk) S1 = zeros(Nqk) for e in 1:nrealelem for k in 1:Nqk for j in 1:Nq for i in 1:Nq ijk = i + Nq * ((j - 1) + Nq * (k - 1)) S[k] += localvgeo[ijk, grid.x1id, e] * localvgeo[ijk, grid.MHid, e] S1[k] += localvgeo[ijk, grid.MHid, e] end end end end Stot = zeros(Nqk) S1tot = zeros(Nqk) Err = 0.0 for k in 1:Nqk Stot[k] = MPI.Reduce(S[k], +, 0, mpicomm) S1tot[k] = MPI.Reduce(S1[k], +, 0, mpicomm) Err += (0.5 - Stot[k] / S1tot[k])^2 end Err = sqrt(Err / Nqk) @test Err < 2e-15 end function run_test2(mpicomm, dim, Ne, N, FT, ArrayType) warpfun = (ξ1, ξ2, ξ3) -> begin x1 = sin(2 * π * ξ3) / 16 + ξ1 x2 = ξ2 + (ξ2 - 1 / 2) * cos(2 * π * ξ2 * ξ3) / 4 x3 = ξ3 + sin(2π * (ξ1)) / 20 return (x1, x2, x3) end brickrange = ( range(FT(0); length = Ne + 1, stop = 1), range(FT(0); length = Ne + 1, stop = 1), range(FT(0); length = 2, stop = 1), ) topl = StackedBrickTopology( mpicomm, brickrange, periodicity = (false, false, false), ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = warpfun, ) nrealelem = length(topl.realelems) Nq = N + 1 Nqk = dimensionality(grid) == 2 ? 1 : Nq vgeo = grid.vgeo localvgeo = array_device(vgeo) isa CPU ? vgeo : Array(vgeo) S = zeros(Nqk) S1 = zeros(Nqk) K = zeros(Nqk) for e in 1:nrealelem for k in 1:Nqk for j in 1:Nq for i in 1:Nq ijk = i + Nq * ((j - 1) + Nq * (k - 1)) S[k] += localvgeo[ijk, grid.x1id, e] * localvgeo[ijk, grid.MHid, e] S1[k] += localvgeo[ijk, grid.MHid, e] K[k] = localvgeo[ijk, grid.x3id, e] end end end end Stot = zeros(Nqk) S1tot = zeros(Nqk) Err = 0.0 for k in 1:Nqk Stot[k] = MPI.Reduce(S[k], +, 0, mpicomm) S1tot[k] = MPI.Reduce(S1[k], +, 0, mpicomm) Err += (0.5 + sin(2 * π * K[k]) / 16 - Stot[k] / S1tot[k])^2 end Err = sqrt(Err / Nqk) @test 2e-15 > Err end function run_test3(mpicomm, dim, Ne, N, FT, ArrayType) base_Nhorz = 4 base_Nvert = 2 Rinner = 1 // 2 Router = 1 expected_result = [ -4.5894269717905445e-8 -1.1473566985387151e-8 -2.0621904184281448e-10 -5.155431637149377e-11 -8.72191208145523e-13 -2.1715962361668062e-13 ] for l in 1:3 Nhorz = 2^(l - 1) * base_Nhorz Nvert = 2^(l - 1) * base_Nvert Rrange = grid1d(FT(Rinner), FT(Router); nelem = Nvert) topl = StackedCubedSphereTopology(mpicomm, Nhorz, Rrange) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = Topologies.equiangular_cubed_sphere_warp, ) nrealelem = length(topl.realelems) Nq = N + 1 Nqk = dimensionality(grid) == 2 ? 1 : Nq vgeo = grid.vgeo localvgeo = array_device(vgeo) isa CPU ? vgeo : Array(vgeo) topology = grid.topology nvertelem = topology.stacksize nhorzelem = div(nrealelem, nvertelem) Surfout = 0 Surfin = 0 for ev in 1:nvertelem for eh in 1:nhorzelem e = ev + (eh - 1) * nvertelem for i in 1:Nq for j in 1:Nq for k in 1:Nqk ijk = i + Nq * ((j - 1) + Nqk * (k - 1)) if (k == Nqk && ev == nvertelem) Surfout += localvgeo[ijk, grid.MHid, e] end if (k == 1 && ev == 1) Surfin += localvgeo[ijk, grid.MHid, e] end end end end end end Surfouttot = MPI.Reduce(Surfout, +, 0, MPI.COMM_WORLD) Surfintot = MPI.Reduce(Surfin, +, 0, MPI.COMM_WORLD) @test (4 * π * Router^2 - Surfouttot) ≈ expected_result[l, 1] rtol = 1e-3 atol = eps(FT) * 4 * π * Router^2 @test (4 * π * Rinner^2 - Surfintot) ≈ expected_result[l, 2] rtol = 1e-3 atol = eps(FT) * 4 * π * Rinner^2 end end # Test for 2D integral function run_test4(mpicomm, dim, Ne, N, FT, ArrayType) brickrange = ntuple(j -> range(FT(0); length = Ne + 1, stop = 1), 2) topl = StackedBrickTopology( mpicomm, brickrange, periodicity = ntuple(j -> true, 2), connectivity = :face, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) nrealelem = length(topl.realelems) Nq = N + 1 vgeo = grid.vgeo localvgeo = array_device(vgeo) isa CPU ? vgeo : Array(vgeo) S = zeros(Nq) for e in 1:nrealelem for i in 1:Nq for j in 1:Nq ij = i + Nq * (j - 1) S[j] += localvgeo[ij, grid.x1id, e] * localvgeo[ij, grid.MHid, e] end end end Err = 0 for j in 1:Nq Err += (S[j] - 0.5)^2 end Err = sqrt(Err / Nq) @test Err <= 1e-15 end function run_test5(mpicomm, dim, Ne, N, FT, ArrayType) warpfun = (ξ1, ξ2, ξ3) -> begin x1 = cos(π * ξ2) / 16 + abs(ξ1) x2 = ξ2 x3 = ξ3 return (x1, x2, x3) end brickrange = ntuple(j -> range(FT(0); length = Ne + 1, stop = 1), 2) topl = StackedBrickTopology( mpicomm, brickrange, periodicity = ntuple(j -> true, 2), connectivity = :face, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = warpfun, ) nrealelem = length(topl.realelems) Nq = N + 1 vgeo = grid.vgeo localvgeo = array_device(vgeo) isa CPU ? vgeo : Array(vgeo) S = zeros(Nq) J = zeros(Nq) for e in 1:nrealelem for i in 1:Nq for j in 1:Nq ij = i + Nq * (j - 1) S[j] += localvgeo[ij, grid.x1id, e] * localvgeo[ij, grid.MHid, e] J[j] = localvgeo[ij, grid.x2id, e] end end end Err = 0 for j in 1:Nq Err += (S[j] - 0.5 - cos(π * J[j]) / 16)^2 end Err = sqrt(Err / Nq) @test Err < 1e-15 end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD FT = Float64 dim = 3 Ne = 1 polynomialorder = 4 @info (ArrayType, FT, dim) @testset "horizontal_integral" begin run_test1(mpicomm, dim, Ne, polynomialorder, FT, ArrayType) run_test2(mpicomm, dim, Ne, polynomialorder, FT, ArrayType) run_test3(mpicomm, dim, Ne, polynomialorder, FT, ArrayType) run_test4(mpicomm, dim, Ne, polynomialorder, FT, ArrayType) run_test5(mpicomm, dim, Ne, polynomialorder, FT, ArrayType) end end ================================================ FILE: test/Numerics/DGMethods/integral_test.jl ================================================ using MPI using StaticArrays using ClimateMachine using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.MPIStateArrays using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using Printf using LinearAlgebra using Logging using ClimateMachine.BalanceLaws import ClimateMachine.BalanceLaws: vars_state, flux_first_order!, flux_second_order!, source!, wavespeed, boundary_state!, nodal_init_state_auxiliary!, init_state_prognostic!, update_auxiliary_state!, indefinite_stack_integral!, reverse_indefinite_stack_integral!, integral_load_auxiliary_state!, integral_set_auxiliary_state!, reverse_integral_load_auxiliary_state!, reverse_integral_set_auxiliary_state! import ClimateMachine.DGMethods: init_ode_state using ClimateMachine.Mesh.Geometry: LocalGeometry struct IntegralTestModel{dim} <: BalanceLaw end vars_state(::IntegralTestModel, ::DownwardIntegrals, T) = @vars(a::T, b::T) vars_state(::IntegralTestModel, ::UpwardIntegrals, T) = @vars(a::T, b::T) vars_state(m::IntegralTestModel, ::Auxiliary, T) = @vars( int::vars_state(m, UpwardIntegrals(), T), rev_int::vars_state(m, DownwardIntegrals(), T), coord::SVector{3, T}, a::T, b::T, rev_a::T, rev_b::T ) vars_state(::IntegralTestModel, ::AbstractStateType, T) = @vars() flux_first_order!(::IntegralTestModel, _...) = nothing flux_second_order!(::IntegralTestModel, _...) = nothing source!(::IntegralTestModel, _...) = nothing boundary_state!(_, ::IntegralTestModel, _...) = nothing init_state_prognostic!(::IntegralTestModel, _...) = nothing wavespeed(::IntegralTestModel, _...) = 1 function nodal_init_state_auxiliary!( ::IntegralTestModel{dim}, aux::Vars, tmp::Vars, g::LocalGeometry, ) where {dim} x, y, z = aux.coord = g.coord if dim == 2 aux.a = x * y aux.b = 2 * x * y + sin(x) * y^2 / 2 - (z - 1)^2 * y^3 / 3 y_top = 3 a_top = x * y_top b_top = 2 * x * y_top + sin(x) * y_top^2 / 2 - (z - 1)^2 * y_top^3 / 3 aux.rev_a = a_top - aux.a aux.rev_b = b_top - aux.b else aux.a = x * z + y * z aux.b = 2 * x * z + sin(x) * y * z - (1 + (z - 1)^3) * y^2 / 3 zz_top = 3 a_top = x * zz_top + y * zz_top b_top = 2 * x * zz_top + sin(x) * y * zz_top - (1 + (zz_top - 1)^3) * y^2 / 3 aux.rev_a = a_top - aux.a aux.rev_b = b_top - aux.b end end function update_auxiliary_state!( dg::DGModel, m::IntegralTestModel, Q::MPIStateArray, t::Real, elems::UnitRange, ) indefinite_stack_integral!(dg, m, Q, dg.state_auxiliary, t, elems) reverse_indefinite_stack_integral!(dg, m, Q, dg.state_auxiliary, t, elems) return true end @inline function integral_load_auxiliary_state!( m::IntegralTestModel{dim}, integrand::Vars, state::Vars, aux::Vars, ) where {dim} x, y, z = aux.coord integrand.a = x + (dim == 3 ? y : 0) integrand.b = 2 * x + sin(x) * y - (z - 1)^2 * y^2 end @inline function integral_set_auxiliary_state!( m::IntegralTestModel, aux::Vars, integral::Vars, ) aux.int.a = integral.a aux.int.b = integral.b end @inline function reverse_integral_load_auxiliary_state!( m::IntegralTestModel, integral::Vars, state::Vars, aux::Vars, ) integral.a = aux.int.a integral.b = aux.int.b end @inline function reverse_integral_set_auxiliary_state!( m::IntegralTestModel, aux::Vars, integral::Vars, ) aux.rev_int.a = integral.a aux.rev_int.b = integral.b end using Test function test_run(mpicomm, dim, Ne, N, FT, ArrayType) connectivity = dim == 3 ? :full : :face brickrange = ntuple(j -> range(FT(0); length = Ne[j] + 1, stop = 3), dim) topl = StackedBrickTopology( mpicomm, brickrange, periodicity = ntuple(j -> true, dim), connectivity = connectivity, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) dg = DGModel( IntegralTestModel{dim}(), grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) dQdt = similar(Q) dg(dQdt, Q, nothing, 0.0) # Wrapping in Array ensure both GPU and CPU code use same approx if N[end] > 0 # Forward integral a @test Array(dg.state_auxiliary.data[:, 1, :]) ≈ Array(dg.state_auxiliary.data[:, 8, :]) # Forward integral b @test Array(dg.state_auxiliary.data[:, 2, :]) ≈ Array(dg.state_auxiliary.data[:, 9, :]) # Reverse integral a @test Array(dg.state_auxiliary.data[:, 3, :]) ≈ Array(dg.state_auxiliary.data[:, 10, :]) # Reverse integral b @test Array(dg.state_auxiliary.data[:, 4, :]) ≈ Array(dg.state_auxiliary.data[:, 11, :]) else # For N = 0 we only compare the first integral which is the integral of # a vertical constant function; N = 0 can also integrate linears exactly # since we use the midpoint rule to compute the face value, but the # averaging procedure we use below does not work in this case Nq = polynomialorders(grid) .+ 1 # Reshape the data array to be (dofs, vertical elem, horizontal elem) A_faces = reshape( Array(dg.state_auxiliary.data[:, 1, :]), prod(Nq), Ne[end], # Vertical element is fastest on stacked meshes prod(Ne[1:(end - 1)]), # Horiztonal elements ) A_center_exact = reshape( Array(dg.state_auxiliary.data[:, 8, :]), prod(Nq), Ne[end], # Vertical element is fastest on stacked meshes prod(Ne[1:(end - 1)]), # Horiztonal elements ) # With N = 0, the integral will return the values in the faces. Namely, # verical element index `eV` will be the value of the integral on the # face ABOVE element `eV`. Namely, # A_faces[n, eV, eH] # will be degree of freedom `n`, in horizontal element stack `eH`, and # face `eV + 1/2`. # # The exact values stored in `A_center_exact` are actually at the cell # centers because these are computed using the `init_state_auxiliary!` # which evaluates using the cell centers. # # This mismatch means we need to convert from faces to cell centers for # comparison, and we do this using averaging to go from faces to cell # centers. # Storage for the averaging A_center = similar(A_faces) # Bottom cell value is average of 0 and top face of cell A_center[:, 1, :] .= A_faces[:, 1, :] / 2 # Remaining cells are average of the two faces A_center[:, 2:end, :, :] .= (A_faces[:, 1:(end - 1), :] + A_faces[:, 2:end, :]) / 2 # Compare the exact and computed @test A_center ≈ A_center_exact # We do the same things for the reverse integral, the only difference is # now the values # RA_faces[n, eV, eH] # will be degree of freedom `n`, in horizontal element stack `eH`, and # face `eV - 1/2` (e.g., the face below element `eV` # Reshape the data array to be (dofs, vertical elm, horizontal elm) RA_faces = reshape( Array(dg.state_auxiliary.data[:, 3, :]), prod(Nq), Ne[end], # Vertical element is fastest on stacked meshes prod(Ne[1:(end - 1)]), # Horiztonal elements ) RA_center_exact = reshape( Array(dg.state_auxiliary.data[:, 10, :]), prod(Nq), Ne[end], # Vertical element is fastest on stacked meshes prod(Ne[1:(end - 1)]), # Horiztonal elements ) # Storage for the averaging RA_center = similar(RA_faces) # Top cell value is average of 0 and top face of cell RA_center[:, end, :] .= RA_faces[:, end, :] / 2 # Remaining cells are average of the two faces RA_center[:, 1:(end - 1), :, :] .= (RA_faces[:, 1:(end - 1), :] + RA_faces[:, 2:end, :]) / 2 # Compare the exact and computed @test RA_center ≈ RA_center_exact end end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD numelem = (5, 6, 7) lvls = 1 for polynomialorder in ((4, 4), (4, 3), (4, 0)) for FT in (Float64,) for dim in 2:3 err = zeros(FT, lvls) for l in 1:lvls @info (ArrayType, FT, dim, polynomialorder) test_run( mpicomm, dim, ntuple(j -> 2^(l - 1) * numelem[j], dim), polynomialorder, FT, ArrayType, ) end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/integral_test_sphere.jl ================================================ using MPI using StaticArrays using ClimateMachine using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.MPIStateArrays using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using Printf using LinearAlgebra using Logging using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, GradientFlux, UpwardIntegrals, DownwardIntegrals import ClimateMachine.BalanceLaws: vars_state, integral_load_auxiliary_state!, flux_first_order!, flux_second_order!, source!, wavespeed, update_auxiliary_state!, indefinite_stack_integral!, reverse_indefinite_stack_integral!, boundary_conditions, boundary_state!, compute_gradient_argument!, nodal_init_state_auxiliary!, init_state_prognostic!, integral_set_auxiliary_state!, reverse_integral_load_auxiliary_state!, reverse_integral_set_auxiliary_state! import ClimateMachine.DGMethods: init_ode_state using ClimateMachine.Mesh.Geometry: LocalGeometry struct IntegralTestSphereModel{T} <: BalanceLaw Rinner::T Router::T end function update_auxiliary_state!( dg::DGModel, m::IntegralTestSphereModel, Q::MPIStateArray, t::Real, elems::UnitRange, ) indefinite_stack_integral!(dg, m, Q, dg.state_auxiliary, t, elems) reverse_indefinite_stack_integral!(dg, m, Q, dg.state_auxiliary, t, elems) return true end vars_state(::IntegralTestSphereModel, ::UpwardIntegrals, T) = @vars(v::T, r::T) vars_state(::IntegralTestSphereModel, ::DownwardIntegrals, T) = @vars(v::T, r::T) vars_state(m::IntegralTestSphereModel, ::Auxiliary, T) = @vars( int::vars_state(m, UpwardIntegrals(), T), rev_int::vars_state(m, DownwardIntegrals(), T), r::T, v::T ) vars_state(::IntegralTestSphereModel, ::Prognostic, T) = @vars() vars_state(::IntegralTestSphereModel, ::GradientFlux, T) = @vars() flux_first_order!(::IntegralTestSphereModel, _...) = nothing flux_second_order!(::IntegralTestSphereModel, _...) = nothing source!(::IntegralTestSphereModel, _...) = nothing boundary_conditions(::IntegralTestSphereModel) = (nothing,) boundary_state!(_, ::Nothing, ::IntegralTestSphereModel, _...) = nothing init_state_prognostic!(::IntegralTestSphereModel, _...) = nothing wavespeed(::IntegralTestSphereModel, _...) = 1 function nodal_init_state_auxiliary!( m::IntegralTestSphereModel, aux::Vars, tmp::Vars, g::LocalGeometry, ) x, y, z = g.coord aux.r = hypot(x, y, z) θ = atan(y, x) ϕ = asin(z / aux.r) # Exact integral aux.v = 1 + cos(ϕ)^2 * sin(θ)^2 + sin(ϕ)^2 aux.int.v = exp(-aux.v * aux.r^2) - exp(-aux.v * m.Rinner^2) aux.int.r = aux.r - m.Rinner aux.rev_int.v = exp(-aux.v * m.Router^2) - exp(-aux.v * aux.r^2) aux.rev_int.r = m.Router - aux.r end @inline function integral_load_auxiliary_state!( m::IntegralTestSphereModel, integrand::Vars, state::Vars, aux::Vars, ) integrand.v = -2 * aux.r * aux.v * exp(-aux.v * aux.r^2) integrand.r = 1 end @inline function integral_set_auxiliary_state!( m::IntegralTestSphereModel, aux::Vars, integral::Vars, ) aux.int.v = integral.v aux.int.r = integral.r end @inline function reverse_integral_load_auxiliary_state!( m::IntegralTestSphereModel, integral::Vars, state::Vars, aux::Vars, ) integral.v = aux.int.v integral.r = aux.int.r end @inline function reverse_integral_set_auxiliary_state!( m::IntegralTestSphereModel, aux::Vars, integral::Vars, ) aux.rev_int.v = integral.v aux.rev_int.r = integral.r end if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end using Test function test_run(mpicomm, topl, ArrayType, N, FT, Rinner, Router) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = Topologies.equiangular_cubed_sphere_warp, ) dg = DGModel( IntegralTestSphereModel(Rinner, Router), grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) dQdt = similar(Q) exact_aux = copy(dg.state_auxiliary) dg(dQdt, Q, nothing, 0.0) (int_r_ind, rev_int_r_ind) = varsindices( vars_state(dg.balance_law, Auxiliary(), FT), ("int.r", "rev_int.r"), ) # Since N = 0 integrals live at the faces we need to average values to the # faces for comparison if N == 0 nvertelem = topl.stacksize nhorzelem = div(length(topl.elems), nvertelem) naux = size(exact_aux, 2) ndof = size(exact_aux, 1) # Reshape the data array to be (dofs, naux, vertical elm, horizontal elm) aux = reshape(dg.state_auxiliary.data, (ndof, naux, nvertelem, nhorzelem)) # average forward integrals to cell centers for ind in varsindices( vars_state(dg.balance_law, Auxiliary(), FT), ("int.r", "int.v"), ) # Store the computed face values A_faces = aux[:, ind, :, :] # Bottom cell value is average of 0 and top face of cell aux[:, ind, 1, :] .= A_faces[:, 1, :] / 2 # Remaining cells are average of the two faces aux[:, ind, 2:end, :] .= (A_faces[:, 1:(end - 1), :] + A_faces[:, 2:end, :]) / 2 end # average reverse integrals to cell centers for ind in varsindices( vars_state(dg.balance_law, Auxiliary(), FT), ("rev_int.r", "rev_int.v"), ) # Store the computed face values RA_faces = aux[:, ind, :, :] # Bottom cell value is average of 0 and top face of cell aux[:, ind, end, :] .= RA_faces[:, end, :] / 2 # Remaining cells are average of the two faces aux[:, ind, 1:(end - 1), :] .= (RA_faces[:, 1:(end - 1), :] + RA_faces[:, 2:end, :]) / 2 end end # We should be exact for the integral of ∫_{R_{inner}}^{r} 1 @test exact_aux[:, int_r_ind, :] ≈ dg.state_auxiliary[:, int_r_ind, :] @test exact_aux[:, rev_int_r_ind, :] ≈ dg.state_auxiliary[:, rev_int_r_ind, :] euclidean_distance(exact_aux, dg.state_auxiliary) end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD base_Nhorz = 4 base_Nvert = 2 Rinner = 1 // 2 Router = 1 expected_result = Dict() expected_result[0] = [ 2.2259657670562167e-02 5.6063943176909315e-03 1.4042479532664005e-03 3.5122834187695408e-04 ] expected_result[1] = [ 1.5934735012225074e-02 4.0030667455285352e-03 1.0020652111566574e-03 2.5059856392475887e-04 ] expected_result[4] = [ 4.662884229467401e-7, 7.218989778540723e-9, 1.1258613174916711e-10, 1.7587739986848968e-12, ] lvls = integration_testing ? length(expected_result[4]) : 1 for N in (0, 1, 4) for FT in (Float64,) err = zeros(FT, lvls) for l in 1:lvls @info (ArrayType, FT, "sphere", N, l) Nhorz = 2^(l - 1) * base_Nhorz Nvert = 2^(l - 1) * base_Nvert Rrange = grid1d(FT(Rinner), FT(Router); nelem = Nvert) topl = StackedCubedSphereTopology(mpicomm, Nhorz, Rrange) err[l] = test_run( mpicomm, topl, ArrayType, N, FT, FT(Rinner), FT(Router), ) @test expected_result[N][l] ≈ err[l] rtol = 1e-3 atol = eps(FT) end if lvls > 1 @info begin msg = "polynomialorder order $N" for l in 1:(lvls - 1) rate = log2(err[l]) - log2(err[l + 1]) msg *= @sprintf("\n rate for level %d = %e\n", l, rate) end msg end end end end end nothing ================================================ FILE: test/Numerics/DGMethods/remainder_model.jl ================================================ using Test using LinearAlgebra using MPI using Random using StaticArrays using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.BalanceLaws using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies using ClimateMachine.Orientations using Thermodynamics.TemperatureProfiles using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using CLIMAParameters using CLIMAParameters.Planet: planet_radius struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() """ main() Run this test problem """ function main() ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 5 numelem_horz = 10 numelem_vert = 5 @testset "remainder model" begin for FT in (Float64,)# Float32) result = test_run( mpicomm, polynomialorder, numelem_horz, numelem_vert, ArrayType, FT, ) end end end function test_run( mpicomm, polynomialorder, numelem_horz, numelem_vert, ArrayType, FT, ) # Structure to pass around to setup the simulation setup = RemainderTestSetup{FT}() # Create the cubed sphere mesh _planet_radius::FT = planet_radius(param_set) vert_range = grid1d( _planet_radius, FT(_planet_radius + setup.domain_height), nelem = numelem_vert, ) topology = StackedCubedSphereTopology(mpicomm, numelem_horz, vert_range) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, meshwarp = equiangular_cubed_sphere_warp, ) T_profile = IsothermalProfile(param_set, setup.T_ref) # This is the base model which defines all the data (all other DGModels # for substepping components will piggy-back off of this models data) fullphysics = AtmosPhysics{FT}( param_set; ref_state = HydrostaticState(T_profile), turbulence = Vreman(FT(0.23)), moisture = DryModel(), ) fullmodel = AtmosModel{FT}( AtmosLESConfigType, fullphysics; orientation = SphericalOrientation(), init_state_prognostic = setup, source = (Gravity(),), ) dg = DGModel( fullmodel, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Random.seed!(1235) Q = init_ode_state(dg, FT(0); init_on_cpu = true) acousticmodel = AtmosAcousticGravityLinearModel(fullmodel) acoustic_dg = DGModel( acousticmodel, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = EveryDirection(), state_auxiliary = dg.state_auxiliary, ) vacoustic_dg = DGModel( acousticmodel, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = VerticalDirection(), state_auxiliary = dg.state_auxiliary, ) hacoustic_dg = DGModel( acousticmodel, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = HorizontalDirection(), state_auxiliary = dg.state_auxiliary, ) # Create some random data to check the wavespeed function with nM = rand(3) nM /= norm(nM) state_prognostic = Vars{vars_state(dg.balance_law, Prognostic(), FT)}(rand( FT, number_states(dg.balance_law, Prognostic()), )) state_auxiliary = Vars{vars_state(dg.balance_law, Auxiliary(), FT)}(rand( FT, number_states(dg.balance_law, Auxiliary()), )) full_wavespeed = wavespeed( dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(),), ) acoustic_wavespeed = wavespeed( acoustic_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(),), ) # Evaluate the full tendency full_tendency = similar(Q) dg(full_tendency, Q, nothing, 0; increment = false) # Evaluate various splittings split_tendency = similar(Q) # Check pulling acoustic model out @testset "full acoustic" begin rem_dg = remainder_DGModel( dg, (acoustic_dg,); numerical_flux_first_order = ( dg.numerical_flux_first_order, (acoustic_dg.numerical_flux_first_order,), ), ) rem_dg(split_tendency, Q, nothing, 0; increment = false) acoustic_dg(split_tendency, Q, nothing, 0; increment = true) A = Array(full_tendency.data) B = Array(split_tendency.data) # Test that we have a fully discrete splitting @test all(isapprox.( A, B, rtol = sqrt(eps(FT)), atol = 10 * sqrt(eps(FT)), )) # Test that wavespeeds are split by direction every_wavespeed = full_wavespeed .- acoustic_wavespeed horz_wavespeed = -zero(FT) vert_wavespeed = -zero(FT) @test all( every_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(),), ), ) @test all( horz_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (HorizontalDirection(),), ), ) @test all( vert_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (VerticalDirection(),), ), ) @test all( every_wavespeed .+ horz_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(), HorizontalDirection()), ), ) @test all( every_wavespeed .+ vert_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(), VerticalDirection()), ), ) end # Check pulling acoustic model but as two pieces @testset "horizontal and vertical acoustic" begin rem_dg = remainder_DGModel( dg, (hacoustic_dg, vacoustic_dg); numerical_flux_first_order = ( dg.numerical_flux_first_order, ( hacoustic_dg.numerical_flux_first_order, vacoustic_dg.numerical_flux_first_order, ), ), ) rem_dg(split_tendency, Q, nothing, 0; increment = false) vacoustic_dg(split_tendency, Q, nothing, 0; increment = true) hacoustic_dg(split_tendency, Q, nothing, 0; increment = true) A = Array(full_tendency.data) B = Array(split_tendency.data) # Test that we have a fully discrete splitting @test all(isapprox.( A, B, rtol = sqrt(eps(FT)), atol = 10 * sqrt(eps(FT)), )) # Test that wavespeeds are split by direction every_wavespeed = full_wavespeed horz_wavespeed = -acoustic_wavespeed vert_wavespeed = -acoustic_wavespeed @test all( every_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(),), ), ) @test all( horz_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (HorizontalDirection(),), ), ) @test all( vert_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (VerticalDirection(),), ), ) @test all( every_wavespeed .+ horz_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(), HorizontalDirection()), ), ) @test all( every_wavespeed .+ vert_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(), VerticalDirection()), ), ) end # Check pulling horizontal acoustic model @testset "horizontal acoustic" begin rem_dg = remainder_DGModel( dg, (hacoustic_dg,); numerical_flux_first_order = ( dg.numerical_flux_first_order, (hacoustic_dg.numerical_flux_first_order,), ), ) rem_dg(split_tendency, Q, nothing, 0; increment = false) hacoustic_dg(split_tendency, Q, nothing, 0; increment = true) A = Array(full_tendency.data) B = Array(split_tendency.data) # Test that we have a fully discrete splitting @test all(isapprox.( A, B, rtol = sqrt(eps(FT)), atol = 10 * sqrt(eps(FT)), )) # Test that wavespeeds are split by direction every_wavespeed = full_wavespeed horz_wavespeed = -acoustic_wavespeed vert_wavespeed = -zero(eltype(FT)) @test all( every_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(),), ), ) @test all( horz_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (HorizontalDirection(),), ), ) @test all( vert_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (VerticalDirection(),), ), ) @test all( every_wavespeed .+ horz_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(), HorizontalDirection()), ), ) @test all( every_wavespeed .+ vert_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(), VerticalDirection()), ), ) end # Check pulling vertical acoustic model @testset "vertical acoustic" begin rem_dg = remainder_DGModel( dg, (vacoustic_dg,); numerical_flux_first_order = ( dg.numerical_flux_first_order, (vacoustic_dg.numerical_flux_first_order,), ), ) rem_dg(split_tendency, Q, nothing, 0; increment = false) vacoustic_dg(split_tendency, Q, nothing, 0; increment = true) A = Array(full_tendency.data) B = Array(split_tendency.data) # Test that we have a fully discrete splitting @test all(isapprox.( A, B, rtol = sqrt(eps(FT)), atol = 10 * sqrt(eps(FT)), )) # Test that wavespeeds are split by direction every_wavespeed = full_wavespeed horz_wavespeed = -zero(eltype(FT)) vert_wavespeed = -acoustic_wavespeed @test all( every_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(),), ), ) @test all( horz_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (HorizontalDirection(),), ), ) @test all( vert_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (VerticalDirection(),), ), ) @test all( every_wavespeed .+ horz_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(), HorizontalDirection()), ), ) @test all( every_wavespeed .+ vert_wavespeed .≈ wavespeed( rem_dg.balance_law, nM, state_prognostic, state_auxiliary, FT(0), (EveryDirection(), VerticalDirection()), ), ) end end Base.@kwdef struct RemainderTestSetup{FT} domain_height::FT = 10e3 T_ref::FT = 300 end function (setup::RemainderTestSetup)(problem, bl, state, aux, localgeo, t) FT = eltype(state) # Vary around the reference state by 10% and a random velocity field state.ρ = (4 + rand(FT)) / 5 + aux.ref_state.ρ state.ρu = 2 * (@SVector rand(FT, 3)) .- 1 state.energy.ρe = (4 + rand(FT)) / 5 + aux.ref_state.ρe nothing end main() ================================================ FILE: test/Numerics/DGMethods/vars_test.jl ================================================ using MPI using StaticArrays using ClimateMachine using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.MPIStateArrays using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using Printf using LinearAlgebra using Logging using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, GradientFlux import ClimateMachine.BalanceLaws: vars_state, flux_first_order!, flux_second_order!, source!, wavespeed, update_auxiliary_state!, boundary_state!, nodal_init_state_auxiliary!, init_state_prognostic! import ClimateMachine.DGMethods: init_ode_state using ClimateMachine.Mesh.Geometry: LocalGeometry struct VarsTestModel{dim} <: BalanceLaw end vars_state(::VarsTestModel, ::Prognostic, T) = @vars(x::T, coord::SVector{3, T}) vars_state(m::VarsTestModel, ::Auxiliary, T) = @vars(coord::SVector{3, T}, polynomial::T) vars_state(m::VarsTestModel, ::GradientFlux, T) = @vars() flux_first_order!(::VarsTestModel, _...) = nothing flux_second_order!(::VarsTestModel, _...) = nothing source!(::VarsTestModel, _...) = nothing boundary_state!(_, ::VarsTestModel, _...) = nothing wavespeed(::VarsTestModel, _...) = 1 function init_state_prognostic!( m::VarsTestModel, state::Vars, aux::Vars, localgeo, t::Real, ) @inbounds state.x = localgeo.coord[1] state.coord = localgeo.coord end function nodal_init_state_auxiliary!( ::VarsTestModel{dim}, aux::Vars, tmp::Vars, g::LocalGeometry, ) where {dim} x, y, z = aux.coord = g.coord aux.polynomial = x * y + x * z + y * z end using Test function test_run(mpicomm, dim, Ne, N, FT, ArrayType) brickrange = ntuple(j -> range(FT(0); length = Ne[j] + 1, stop = 3), dim) connectivity = dim == 3 ? :full : :face topl = StackedBrickTopology( mpicomm, brickrange, periodicity = ntuple(j -> true, dim), connectivity = connectivity, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) dg = DGModel( VarsTestModel{dim}(), grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) @test Array(Q.x)[:, 1, :] == Array(Q.coord)[:, 1, :] @test Array(dg.state_auxiliary.coord) == Array(Q.coord) x = Array(Q.coord)[:, 1, :] y = Array(Q.coord)[:, 2, :] z = Array(Q.coord)[:, 3, :] @test Array(dg.state_auxiliary.polynomial)[:, 1, :] ≈ x .* y + x .* z + y .* z end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD numelem = (5, 5, 5) lvls = 1 polynomialorder = 4 for FT in (Float64,) #Float32) for dim in 2:3 err = zeros(FT, lvls) for l in 1:lvls @info (ArrayType, FT, dim) test_run( mpicomm, dim, ntuple(j -> 2^(l - 1) * numelem[j], dim), polynomialorder, FT, ArrayType, ) end end end end nothing ================================================ FILE: test/Numerics/ESDGMethods/DryAtmos/DryAtmos.jl ================================================ using ClimateMachine.VariableTemplates: Vars, Grad, @vars using ClimateMachine.BalanceLaws import ClimateMachine.BalanceLaws: BalanceLaw, vars_state, state_to_entropy_variables!, entropy_variables_to_state!, nodal_init_state_auxiliary!, init_state_prognostic!, state_to_entropy, boundary_conditions, boundary_state!, wavespeed, flux_first_order!, source! using StaticArrays using LinearAlgebra: dot, I using ClimateMachine.DGMethods.NumericalFluxes: NumericalFluxFirstOrder import ClimateMachine.DGMethods.NumericalFluxes: EntropyConservative, numerical_volume_conservative_flux_first_order!, numerical_volume_fluctuation_flux_first_order!, ave, logave, numerical_flux_first_order!, numerical_flux_second_order!, numerical_boundary_flux_second_order! using ClimateMachine.Orientations: Orientation, FlatOrientation, SphericalOrientation using ClimateMachine.Atmos: NoReferenceState using ClimateMachine.Grids using CLIMAParameters: AbstractEarthParameterSet using CLIMAParameters.Planet: grav, R_d, cp_d, cv_d, planet_radius, MSLP, Omega struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() const total_energy = false const fluctuation_gravity = false @inline gamma(ps::EarthParameterSet) = cp_d(ps) / cv_d(ps) abstract type AbstractDryAtmosProblem end struct DryAtmosModel{D, O, P, RS, S, DS} <: BalanceLaw orientation::O problem::P ref_state::RS sources::S drag_source::DS end function DryAtmosModel{D}( orientation, problem::AbstractDryAtmosProblem; ref_state = NoReferenceState(), sources = (), drag_source = NoDrag(), ) where {D} O = typeof(orientation) P = typeof(problem) RS = typeof(ref_state) S = typeof(sources) DS = typeof(drag_source) DryAtmosModel{D, O, P, RS, S, DS}( orientation, problem, ref_state, sources, drag_source, ) end boundary_conditions(::DryAtmosModel) = (1, 2) # XXX: Hack for Impenetrable. # This is NOT entropy stable / conservative!!!! function boundary_state!( ::NumericalFluxFirstOrder, bctype, ::DryAtmosModel, state⁺, aux⁺, n, state⁻, aux⁻, _..., ) state⁺.ρ = state⁻.ρ state⁺.ρu -= 2 * dot(state⁻.ρu, n) .* SVector(n) state⁺.ρe = state⁻.ρe aux⁺.Φ = aux⁻.Φ end function init_state_prognostic!(m::DryAtmosModel, args...) init_state_prognostic!(m, m.problem, args...) end function nodal_init_state_auxiliary!( m::DryAtmosModel, state_auxiliary, tmp, geom, ) init_state_auxiliary!(m, m.orientation, state_auxiliary, geom) init_state_auxiliary!(m, m.ref_state, state_auxiliary, geom) init_state_auxiliary!(m, m.problem, state_auxiliary, geom) end function altitude(::DryAtmosModel{dim}, ::FlatOrientation, geom) where {dim} @inbounds geom.coord[dim] end function altitude(::DryAtmosModel, ::SphericalOrientation, geom) FT = eltype(geom) _planet_radius::FT = planet_radius(param_set) norm(geom.coord) - _planet_radius end """ init_state_auxiliary!( m::DryAtmosModel, aux::Vars, geom::LocalGeometry ) Initialize geopotential for the `DryAtmosModel`. """ function init_state_auxiliary!( ::DryAtmosModel{dim}, ::FlatOrientation, state_auxiliary, geom, ) where {dim} FT = eltype(state_auxiliary) _grav = FT(grav(param_set)) @inbounds r = geom.coord[dim] state_auxiliary.Φ = _grav * r state_auxiliary.∇Φ = dim == 2 ? SVector{3, FT}(0, _grav, 0) : SVector{3, FT}(0, 0, _grav) end function init_state_auxiliary!( ::DryAtmosModel, ::SphericalOrientation, state_auxiliary, geom, ) FT = eltype(state_auxiliary) _grav = FT(grav(param_set)) r = norm(geom.coord) state_auxiliary.Φ = _grav * r state_auxiliary.∇Φ = _grav * geom.coord / r end function init_state_auxiliary!( ::DryAtmosModel, ::NoReferenceState, state_auxiliary, geom, ) end function init_state_auxiliary!( ::DryAtmosModel, ::AbstractDryAtmosProblem, state_auxiliary, geom, ) end struct DryReferenceState{TP} temperature_profile::TP end vars_state(::DryAtmosModel, ::DryReferenceState, ::Auxiliary, FT) = @vars(T::FT, p::FT, ρ::FT, ρe::FT) vars_state(::DryAtmosModel, ::NoReferenceState, ::Auxiliary, FT) = @vars() function init_state_auxiliary!( m::DryAtmosModel, ref_state::DryReferenceState, state_auxiliary, geom, ) FT = eltype(state_auxiliary) z = altitude(m, m.orientation, geom) T, p = ref_state.temperature_profile(param_set, z) _R_d::FT = R_d(param_set) ρ = p / (_R_d * T) Φ = state_auxiliary.Φ ρu = SVector{3, FT}(0, 0, 0) state_auxiliary.ref_state.T = T state_auxiliary.ref_state.p = p state_auxiliary.ref_state.ρ = ρ state_auxiliary.ref_state.ρe = totalenergy(ρ, ρu, p, Φ) end @inline function flux_first_order!( m::DryAtmosModel, flux::Grad, state::Vars, aux::Vars, t::Real, direction, ) ρ = state.ρ ρinv = 1 / ρ ρu = state.ρu ρe = state.ρe u = ρinv * ρu Φ = aux.Φ p = pressure(ρ, ρu, ρe, Φ) flux.ρ = ρ * u flux.ρu = p * I + ρ * u .* u' flux.ρe = u * (state.ρe + p) end function wavespeed( ::DryAtmosModel, nM, state::Vars, aux::Vars, t::Real, direction, ) ρ = state.ρ ρu = state.ρu ρe = state.ρe Φ = aux.Φ p = pressure(ρ, ρu, ρe, Φ) u = ρu / ρ uN = abs(dot(nM, u)) return uN + soundspeed(ρ, p) end """ pressure(ρ, ρu, ρe, Φ) Compute the pressure given density `ρ`, momentum `ρu`, total energy `ρe`, and gravitational potential `Φ`. """ function pressure(ρ, ρu, ρe, Φ) FT = eltype(ρ) γ = FT(gamma(param_set)) if total_energy (γ - 1) * (ρe - dot(ρu, ρu) / 2ρ - ρ * Φ) else (γ - 1) * (ρe - dot(ρu, ρu) / 2ρ) end end """ totalenergy(ρ, ρu, p, Φ) Compute the total energy given density `ρ`, momentum `ρu`, pressure `p`, and gravitational potential `Φ`. """ function totalenergy(ρ, ρu, p, Φ) FT = eltype(ρ) γ = FT(gamma(param_set)) if total_energy return p / (γ - 1) + dot(ρu, ρu) / 2ρ + ρ * Φ else return p / (γ - 1) + dot(ρu, ρu) / 2ρ end end """ soundspeed(ρ, p) Compute the speed of sound from the density `ρ` and pressure `p`. """ function soundspeed(ρ, p) FT = eltype(ρ) γ = FT(gamma(param_set)) sqrt(γ * p / ρ) end """ vars_state(::DryAtmosModel, ::Prognostic, FT) The prognostic state variables for the `DryAtmosModel` are density `ρ`, momentum `ρu`, and total energy `ρe` """ function vars_state(::DryAtmosModel, ::Prognostic, FT) @vars begin ρ::FT ρu::SVector{3, FT} ρe::FT end end """ vars_state(::DryAtmosModel, ::Auxiliary, FT) The auxiliary variables for the `DryAtmosModel` is gravitational potential `Φ` """ function vars_state(m::DryAtmosModel, st::Auxiliary, FT) @vars begin Φ::FT ∇Φ::SVector{3, FT} # TODO: only needed for the linear model ref_state::vars_state(m, m.ref_state, st, FT) problem::vars_state(m, m.problem, st, FT) end end vars_state(::DryAtmosModel, ::AbstractDryAtmosProblem, ::Auxiliary, FT) = @vars() """ vars_state(::DryAtmosModel, ::Entropy, FT) The entropy variables for the `DryAtmosModel` correspond to the state variables density `ρ`, momentum `ρu`, and total energy `ρe` as well as the auxiliary variable gravitational potential `Φ` """ function vars_state(::DryAtmosModel, ::Entropy, FT) @vars begin ρ::FT ρu::SVector{3, FT} ρe::FT Φ::FT end end """ state_to_entropy_variables!( ::DryAtmosModel, entropy::Vars, state::Vars, aux::Vars, ) See [`BalanceLaws.state_to_entropy_variables!`](@ref) """ function state_to_entropy_variables!( ::DryAtmosModel, entropy::Vars, state::Vars, aux::Vars, ) ρ, ρu, ρe, Φ = state.ρ, state.ρu, state.ρe, aux.Φ FT = eltype(state) γ = FT(gamma(param_set)) p = pressure(ρ, ρu, ρe, Φ) s = log(p / ρ^γ) b = ρ / 2p u = ρu / ρ if total_energy entropy.ρ = (γ - s) / (γ - 1) - (dot(u, u) - 2Φ) * b else entropy.ρ = (γ - s) / (γ - 1) - (dot(u, u)) * b end entropy.ρu = 2b * u entropy.ρe = -2b entropy.Φ = 2ρ * b end """ entropy_variables_to_state!( ::DryAtmosModel, state::Vars, aux::Vars, entropy::Vars, ) See [`BalanceLaws.entropy_variables_to_state!`](@ref) """ function entropy_variables_to_state!( ::DryAtmosModel, state::Vars, aux::Vars, entropy::Vars, ) FT = eltype(state) β = entropy γ = FT(gamma(param_set)) b = -β.ρe / 2 ρ = β.Φ / (2b) ρu = ρ * β.ρu / (2b) p = ρ / (2b) s = log(p / ρ^γ) Φ = dot(ρu, ρu) / (2 * ρ^2) - ((γ - s) / (γ - 1) - β.ρ) / (2b) ρe = p / (γ - 1) + dot(ρu, ρu) / (2ρ) + ρ * Φ state.ρ = ρ state.ρu = ρu state.ρe = ρe aux.Φ = Φ end function state_to_entropy(::DryAtmosModel, state::Vars, aux::Vars) FT = eltype(state) ρ, ρu, ρe, Φ = state.ρ, state.ρu, state.ρe, aux.Φ p = pressure(ρ, ρu, ρe, Φ) γ = FT(gamma(param_set)) s = log(p / ρ^γ) η = -ρ * s / (γ - 1) return η end function numerical_volume_conservative_flux_first_order!( ::EntropyConservative, ::DryAtmosModel, F::Grad, state_1::Vars, aux_1::Vars, state_2::Vars, aux_2::Vars, ) FT = eltype(F) ρ_1, ρu_1, ρe_1 = state_1.ρ, state_1.ρu, state_1.ρe ρ_2, ρu_2, ρe_2 = state_2.ρ, state_2.ρu, state_2.ρe Φ_1, Φ_2 = aux_1.Φ, aux_2.Φ u_1 = ρu_1 / ρ_1 u_2 = ρu_2 / ρ_2 p_1 = pressure(ρ_1, ρu_1, ρe_1, Φ_1) p_2 = pressure(ρ_2, ρu_2, ρe_2, Φ_2) b_1 = ρ_1 / 2p_1 b_2 = ρ_2 / 2p_2 ρ_avg = ave(ρ_1, ρ_2) u_avg = ave(u_1, u_2) b_avg = ave(b_1, b_2) Φ_avg = ave(Φ_1, Φ_2) usq_avg = ave(dot(u_1, u_1), dot(u_2, u_2)) ρ_log = logave(ρ_1, ρ_2) b_log = logave(b_1, b_2) α = b_avg * ρ_log / 2b_1 γ = FT(gamma(param_set)) Fρ = u_avg * ρ_log Fρu = u_avg * Fρ' + ρ_avg / 2b_avg * I if total_energy Fρe = (1 / (2 * (γ - 1) * b_log) - usq_avg / 2 + Φ_avg) * Fρ + Fρu * u_avg else Fρe = (1 / (2 * (γ - 1) * b_log) - usq_avg / 2) * Fρ + Fρu * u_avg end F.ρ += Fρ F.ρu += Fρu F.ρe += Fρe end function numerical_volume_fluctuation_flux_first_order!( ::NumericalFluxFirstOrder, ::DryAtmosModel, D::Grad, state_1::Vars, aux_1::Vars, state_2::Vars, aux_2::Vars, ) if fluctuation_gravity FT = eltype(D) ρ_1, ρu_1, ρe_1 = state_1.ρ, state_1.ρu, state_1.ρe ρ_2, ρu_2, ρe_2 = state_2.ρ, state_2.ρu, state_2.ρe Φ_1, Φ_2 = aux_1.Φ, aux_2.Φ p_1 = pressure(ρ_1, ρu_1, ρe_1, Φ_1) p_2 = pressure(ρ_2, ρu_2, ρe_2, Φ_2) b_1 = ρ_1 / 2p_1 b_2 = ρ_2 / 2p_2 ρ_log = logave(ρ_1, ρ_2) b_avg = ave(b_1, b_2) α = b_avg * ρ_log / 2b_1 D.ρu -= α * (Φ_1 - Φ_2) * I end end struct CentralVolumeFlux <: NumericalFluxFirstOrder end function numerical_volume_conservative_flux_first_order!( ::CentralVolumeFlux, m::DryAtmosModel, F::Grad, state_1::Vars, aux_1::Vars, state_2::Vars, aux_2::Vars, ) FT = eltype(F) F_1 = similar(F) flux_first_order!(m, F_1, state_1, aux_1, FT(0), EveryDirection()) F_2 = similar(F) flux_first_order!(m, F_2, state_2, aux_2, FT(0), EveryDirection()) parent(F) .= (parent(F_1) .+ parent(F_2)) ./ 2 end struct KGVolumeFlux <: NumericalFluxFirstOrder end function numerical_volume_conservative_flux_first_order!( ::KGVolumeFlux, m::DryAtmosModel, F::Grad, state_1::Vars, aux_1::Vars, state_2::Vars, aux_2::Vars, ) Φ_1 = aux_1.Φ ρ_1 = state_1.ρ ρu_1 = state_1.ρu ρe_1 = state_1.ρe u_1 = ρu_1 / ρ_1 e_1 = ρe_1 / ρ_1 p_1 = pressure(ρ_1, ρu_1, ρe_1, Φ_1) Φ_2 = aux_2.Φ ρ_2 = state_2.ρ ρu_2 = state_2.ρu ρe_2 = state_2.ρe u_2 = ρu_2 / ρ_2 e_2 = ρe_2 / ρ_2 p_2 = pressure(ρ_2, ρu_2, ρe_2, Φ_2) ρ_avg = ave(ρ_1, ρ_2) u_avg = ave(u_1, u_2) e_avg = ave(e_1, e_2) p_avg = ave(p_1, p_2) F.ρ = ρ_avg * u_avg F.ρu = p_avg * I + ρ_avg * u_avg .* u_avg' F.ρe = ρ_avg * u_avg * e_avg + p_avg * u_avg end struct Coriolis end function source!( m::DryAtmosModel, ::Coriolis, source, state_prognostic, state_auxiliary, ) FT = eltype(state_prognostic) _Omega::FT = Omega(param_set) # note: this assumes a SphericalOrientation source.ρu -= SVector(0, 0, 2 * _Omega) × state_prognostic.ρu end function source!(m::DryAtmosModel, source, state_prognostic, state_auxiliary) ntuple(Val(length(m.sources))) do s Base.@_inline_meta source!(m, m.sources[s], source, state_prognostic, state_auxiliary) end end struct EntropyConservativeWithPenalty <: NumericalFluxFirstOrder end function numerical_flux_first_order!( numerical_flux::EntropyConservativeWithPenalty, balance_law::BalanceLaw, fluxᵀn::Vars{S}, normal_vector::SVector, state_prognostic⁻::Vars{S}, state_auxiliary⁻::Vars{A}, state_prognostic⁺::Vars{S}, state_auxiliary⁺::Vars{A}, t, direction, ) where {S, A} FT = eltype(fluxᵀn) numerical_flux_first_order!( EntropyConservative(), balance_law, fluxᵀn, normal_vector, state_prognostic⁻, state_auxiliary⁻, state_prognostic⁺, state_auxiliary⁺, t, direction, ) fluxᵀn = parent(fluxᵀn) wavespeed⁻ = wavespeed( balance_law, normal_vector, state_prognostic⁻, state_auxiliary⁻, t, direction, ) wavespeed⁺ = wavespeed( balance_law, normal_vector, state_prognostic⁺, state_auxiliary⁺, t, direction, ) max_wavespeed = max.(wavespeed⁻, wavespeed⁺) penalty = max_wavespeed .* (parent(state_prognostic⁻) - parent(state_prognostic⁺)) fluxᵀn .+= penalty / 2 end Base.@kwdef struct MatrixFlux{FT} <: NumericalFluxFirstOrder Mcut::FT = 0 low_mach::Bool = false kinetic_energy_preserving::Bool = false end function numerical_flux_first_order!( numerical_flux::MatrixFlux, balance_law::BalanceLaw, fluxᵀn::Vars{S}, normal_vector::SVector, state_prognostic⁻::Vars{S}, state_auxiliary⁻::Vars{A}, state_prognostic⁺::Vars{S}, state_auxiliary⁺::Vars{A}, t, direction, ) where {S, A} FT = eltype(fluxᵀn) numerical_flux_first_order!( EntropyConservative(), balance_law, fluxᵀn, normal_vector, state_prognostic⁻, state_auxiliary⁻, state_prognostic⁺, state_auxiliary⁺, t, direction, ) fluxᵀn = parent(fluxᵀn) γ = FT(gamma(param_set)) low_mach = numerical_flux.low_mach Mcut = numerical_flux.Mcut kinetic_energy_preserving = numerical_flux.kinetic_energy_preserving ω = FT(π) / 3 δ = FT(π) / 5 random_unit_vector = SVector(sin(ω) * cos(δ), cos(ω) * cos(δ), sin(δ)) # tangent space basis τ1 = random_unit_vector × normal_vector τ2 = τ1 × normal_vector ρ⁻ = state_prognostic⁻.ρ ρu⁻ = state_prognostic⁻.ρu ρe⁻ = state_prognostic⁻.ρe Φ⁻ = state_auxiliary⁻.Φ u⁻ = ρu⁻ / ρ⁻ p⁻ = pressure(ρ⁻, ρu⁻, ρe⁻, Φ⁻) β⁻ = ρ⁻ / 2p⁻ Φ⁺ = state_auxiliary⁺.Φ ρ⁺ = state_prognostic⁺.ρ ρu⁺ = state_prognostic⁺.ρu ρe⁺ = state_prognostic⁺.ρe u⁺ = ρu⁺ / ρ⁺ p⁺ = pressure(ρ⁺, ρu⁺, ρe⁺, Φ⁺) β⁺ = ρ⁺ / 2p⁺ ρ_log = logave(ρ⁻, ρ⁺) β_log = logave(β⁻, β⁺) if total_energy Φ_avg = ave(Φ⁻, Φ⁺) else Φ_avg = 0 end u_avg = ave.(u⁻, u⁺) p_avg = ave(ρ⁻, ρ⁺) / 2ave(β⁻, β⁺) u²_bar = 2 * sum(u_avg .^ 2) - sum(ave(u⁻ .^ 2, u⁺ .^ 2)) h_bar = γ / (2 * β_log * (γ - 1)) + u²_bar / 2 + Φ_avg c_bar = sqrt(γ * p_avg / ρ_log) umc = u_avg - c_bar * normal_vector upc = u_avg + c_bar * normal_vector u_avgᵀn = u_avg' * normal_vector R = hcat( SVector(1, umc[1], umc[2], umc[3], h_bar - c_bar * u_avgᵀn), SVector(1, u_avg[1], u_avg[2], u_avg[3], u²_bar / 2 + Φ_avg), SVector(0, τ1[1], τ1[2], τ1[3], τ1' * u_avg), SVector(0, τ2[1], τ2[2], τ2[3], τ2' * u_avg), SVector(1, upc[1], upc[2], upc[3], h_bar + c_bar * u_avgᵀn), ) if low_mach M = abs(u_avg' * normal_vector) / c_bar c_bar *= max(min(M, FT(1)), Mcut) end if kinetic_energy_preserving λl = abs(u_avgᵀn) + c_bar λr = λl else λl = abs(u_avgᵀn - c_bar) λr = abs(u_avgᵀn + c_bar) end Λ = SDiagonal(λl, abs(u_avgᵀn), abs(u_avgᵀn), abs(u_avgᵀn), λr) #Ξ = sqrt(abs((p⁺ - p⁻) / (p⁺ + p⁻))) #Λ = Ξ * abs(u_avgᵀn + c_bar) * I + (1 - Ξ) * ΛM T = SDiagonal(ρ_log / 2γ, ρ_log * (γ - 1) / γ, p_avg, p_avg, ρ_log / 2γ) entropy⁻ = similar(parent(state_prognostic⁻), Size(6)) state_to_entropy_variables!( balance_law, Vars{vars_state(balance_law, Entropy(), FT)}(entropy⁻), state_prognostic⁻, state_auxiliary⁻, ) entropy⁺ = similar(parent(state_prognostic⁺), Size(6)) state_to_entropy_variables!( balance_law, Vars{vars_state(balance_law, Entropy(), FT)}(entropy⁺), state_prognostic⁺, state_auxiliary⁺, ) Δentropy = parent(entropy⁺) - parent(entropy⁻) fluxᵀn .-= R * Λ * T * R' * Δentropy[SOneTo(5)] / 2 end function vertical_unit_vector(m::DryAtmosModel, aux::Vars) FT = eltype(aux) aux.∇Φ / FT(grav(param_set)) end norm_u(state::Vars, k̂::AbstractVector, ::VerticalDirection) = abs(dot(state.ρu, k̂)) / state.ρ norm_u(state::Vars, k̂::AbstractVector, ::HorizontalDirection) = norm((state.ρu .- dot(state.ρu, k̂) * k̂) / state.ρ) norm_u(state::Vars, k̂::AbstractVector, ::Direction) = norm(state.ρu / state.ρ) function advective_courant( m::DryAtmosModel, state::Vars, aux::Vars, diffusive::Vars, Δx, Δt, t, direction, ) k̂ = vertical_unit_vector(m, aux) normu = norm_u(state, k̂, direction) return Δt * normu / Δx end function nondiffusive_courant( m::DryAtmosModel, state::Vars, aux::Vars, diffusive::Vars, Δx, Δt, t, direction, ) ρ = state.ρ ρu = state.ρu ρe = state.ρe Φ = aux.Φ p = pressure(ρ, ρu, ρe, Φ) k̂ = vertical_unit_vector(m, aux) normu = norm_u(state, k̂, direction) ss = soundspeed(ρ, p) return Δt * (normu + ss) / Δx end function drag_source!(m::DryAtmosModel, args...) drag_source!(m, m.drag_source, args...) end struct NoDrag end drag_source!(m::DryAtmosModel, ::NoDrag, args...) = nothing struct Gravity end function source!(m::DryAtmosModel, ::Gravity, source, state, aux) ∇Φ = aux.∇Φ if !fluctuation_gravity source.ρu -= state.ρ * ∇Φ end if !total_energy source.ρe -= state.ρu' * ∇Φ end end # Numerical Flux #= numerical_flux_first_order!(::Nothing, _...) = nothing numerical_boundary_flux_first_order!(::Nothing, _...) = nothing numerical_flux_second_order!(::Nothing, _...) = nothing numerical_boundary_flux_second_order!(::Nothing, _...) = nothing =# numerical_flux_first_order!(::Nothing, ::DryAtmosModel, _...) = nothing numerical_flux_second_order!(::Nothing, ::DryAtmosModel, _...) = nothing numerical_boundary_flux_second_order!(::Nothing, a, ::DryAtmosModel, _...) = nothing include("linear.jl") # Throwing this in for convenience function cubedshellwarp(a, b, c, R = max(abs(a), abs(b), abs(c))) function f(sR, ξ, η) X, Y = tan(π * ξ / 4), tan(π * η / 4) x1 = sR / sqrt(X^2 + Y^2 + 1) x2, x3 = X * x1, Y * x1 x1, x2, x3 end fdim = argmax(abs.((a, b, c))) if fdim == 1 && a < 0 # (-R, *, *) : Face I from Ronchi, Iacono, Paolucci (1996) x1, x2, x3 = f(-R, b / a, c / a) elseif fdim == 2 && b < 0 # ( *,-R, *) : Face II from Ronchi, Iacono, Paolucci (1996) x2, x1, x3 = f(-R, a / b, c / b) elseif fdim == 1 && a > 0 # ( R, *, *) : Face III from Ronchi, Iacono, Paolucci (1996) x1, x2, x3 = f(R, b / a, c / a) elseif fdim == 2 && b > 0 # ( *, R, *) : Face IV from Ronchi, Iacono, Paolucci (1996) x2, x1, x3 = f(R, a / b, c / b) elseif fdim == 3 && c > 0 # ( *, *, R) : Face V from Ronchi, Iacono, Paolucci (1996) x3, x2, x1 = f(R, b / c, a / c) elseif fdim == 3 && c < 0 # ( *, *,-R) : Face VI from Ronchi, Iacono, Paolucci (1996) x3, x2, x1 = f(-R, b / c, a / c) else error("invalid case for cubedshellwarp: $a, $b, $c") end return x1, x2, x3 end ================================================ FILE: test/Numerics/ESDGMethods/DryAtmos/baroclinic_wave.jl ================================================ using ClimateMachine using ClimateMachine.ConfigTypes using ClimateMachine.Mesh.Topologies: StackedCubedSphereTopology, grid1d using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Filters using ClimateMachine.Atmos: AtmosFilterPerturbations using ClimateMachine.DGMethods: ESDGModel, init_ode_state, courant using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.ODESolvers using ClimateMachine.SystemSolvers using ClimateMachine.VTK: writevtk, writepvtu using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using Thermodynamics: soundspeed_air using Thermodynamics.TemperatureProfiles using ClimateMachine.VariableTemplates: flattenednames using CLIMAParameters using CLIMAParameters.Planet: R_d, cv_d, Omega, planet_radius, MSLP import CLIMAParameters using MPI, Logging, StaticArrays, LinearAlgebra, Printf, Dates, Test using CUDA const output_vtk = false struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet(); #const X = 20 const X = 1 CLIMAParameters.Planet.planet_radius(::EarthParameterSet) = 6.371e6 / X CLIMAParameters.Planet.Omega(::EarthParameterSet) = 7.2921159e-5 * X # No this isn't great but w/e include("DryAtmos.jl") function sphr_to_cart_vec(vec, lat, lon) FT = eltype(vec) slat, clat = sin(lat), cos(lat) slon, clon = sin(lon), cos(lon) u = MVector{3, FT}( -slon * vec[1] - slat * clon * vec[2] + clat * clon * vec[3], clon * vec[1] - slat * slon * vec[2] + clat * slon * vec[3], clat * vec[2] + slat * vec[3], ) return u end struct BaroclinicWave <: AbstractDryAtmosProblem end function init_state_prognostic!( bl::DryAtmosModel, ::BaroclinicWave, state, aux, localgeo, t, ) coords = localgeo.coord FT = eltype(state) # parameters _grav::FT = grav(param_set) _R_d::FT = R_d(param_set) _cv_d::FT = cv_d(param_set) _Ω::FT = Omega(param_set) _a::FT = planet_radius(param_set) _p_0::FT = MSLP(param_set) k::FT = 3 T_E::FT = 310 T_P::FT = 240 T_0::FT = 0.5 * (T_E + T_P) Γ::FT = 0.005 A::FT = 1 / Γ B::FT = (T_0 - T_P) / T_0 / T_P C::FT = 0.5 * (k + 2) * (T_E - T_P) / T_E / T_P b::FT = 2 H::FT = _R_d * T_0 / _grav z_t::FT = 15e3 λ_c::FT = π / 9 φ_c::FT = 2 * π / 9 d_0::FT = _a / 6 V_p::FT = 1 M_v::FT = 0.608 p_w::FT = 34e3 ## Pressure width parameter for specific humidity η_crit::FT = 10 * _p_0 / p_w ## Critical pressure coordinate q_0::FT = 0 ## Maximum specific humidity (default: 0.018) q_t::FT = 1e-12 ## Specific humidity above artificial tropopause φ_w::FT = 2π / 9 ## Specific humidity latitude wind parameter # grid λ = @inbounds atan(coords[2], coords[1]) φ = @inbounds asin(coords[3] / norm(coords, 2)) z = norm(coords) - _a r::FT = z + _a γ::FT = 1 # set to 0 for shallow-atmosphere case and to 1 for deep atmosphere case # convenience functions for temperature and pressure τ_z_1::FT = exp(Γ * z / T_0) τ_z_2::FT = 1 - 2 * (z / b / H)^2 τ_z_3::FT = exp(-(z / b / H)^2) τ_1::FT = 1 / T_0 * τ_z_1 + B * τ_z_2 * τ_z_3 τ_2::FT = C * τ_z_2 * τ_z_3 τ_int_1::FT = A * (τ_z_1 - 1) + B * z * τ_z_3 τ_int_2::FT = C * z * τ_z_3 I_T::FT = (cos(φ) * (1 + γ * z / _a))^k - k / (k + 2) * (cos(φ) * (1 + γ * z / _a))^(k + 2) # base state virtual temperature, pressure, specific humidity, density T_v::FT = (τ_1 - τ_2 * I_T)^(-1) p::FT = _p_0 * exp(-_grav / _R_d * (τ_int_1 - τ_int_2 * I_T)) # base state velocity U::FT = _grav * k / _a * τ_int_2 * T_v * ( (cos(φ) * (1 + γ * z / _a))^(k - 1) - (cos(φ) * (1 + γ * z / _a))^(k + 1) ) u_ref::FT = -_Ω * (_a + γ * z) * cos(φ) + sqrt((_Ω * (_a + γ * z) * cos(φ))^2 + (_a + γ * z) * cos(φ) * U) v_ref::FT = 0 w_ref::FT = 0 # velocity perturbations F_z::FT = 1 - 3 * (z / z_t)^2 + 2 * (z / z_t)^3 if z > z_t F_z = FT(0) end d::FT = _a * acos(sin(φ) * sin(φ_c) + cos(φ) * cos(φ_c) * cos(λ - λ_c)) c3::FT = cos(π * d / 2 / d_0)^3 s1::FT = sin(π * d / 2 / d_0) if 0 < d < d_0 && d != FT(_a * π) u′::FT = -16 * V_p / 3 / sqrt(3) * F_z * c3 * s1 * (-sin(φ_c) * cos(φ) + cos(φ_c) * sin(φ) * cos(λ - λ_c)) / sin(d / _a) v′::FT = 16 * V_p / 3 / sqrt(3) * F_z * c3 * s1 * cos(φ_c) * sin(λ - λ_c) / sin(d / _a) else u′ = FT(0) v′ = FT(0) end w′::FT = 0 u_sphere = SVector{3, FT}(u_ref + u′, v_ref + v′, w_ref + w′) #u_sphere = SVector{3, FT}(u_ref, v_ref, w_ref) u_cart = sphr_to_cart_vec(u_sphere, φ, λ) ## temperature & density T::FT = T_v ρ::FT = p / (_R_d * T) ## potential & kinetic energy e_pot = aux.Φ e_kin::FT = 0.5 * u_cart' * u_cart e_int = _cv_d * T ## Assign state variables state.ρ = ρ state.ρu = ρ * u_cart if total_energy state.ρe = ρ * (e_int + e_kin + e_pot) else state.ρe = ρ * (e_int + e_kin) end nothing end function main() ClimateMachine.init(parse_clargs = true) ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 3 numelem_horz = 8 numelem_vert = 5 timeend = 10 * 24 * 3600 outputtime = 24 * 3600 FT = Float64 result = run( mpicomm, polynomialorder, numelem_horz, numelem_vert, timeend, outputtime, ArrayType, FT, ) end function run( mpicomm, polynomialorder, numelem_horz, numelem_vert, timeend, outputtime, ArrayType, FT, ) _planet_radius::FT = planet_radius(param_set) domain_height = FT(30e3) vert_range = grid1d( _planet_radius, FT(_planet_radius + domain_height), nelem = numelem_vert, ) topology = StackedCubedSphereTopology(mpicomm, numelem_horz, vert_range) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, meshwarp = cubedshellwarp, ) T_profile = DecayingTemperatureProfile{FT}(param_set, FT(290), FT(220), FT(8e3)) if total_energy sources = (Coriolis(),) else sources = (Coriolis(), Gravity()) end problem = BaroclinicWave() model = DryAtmosModel{FT}( SphericalOrientation(), problem, ref_state = DryReferenceState(T_profile), sources = sources, ) esdg = ESDGModel( model, grid, #volume_numerical_flux_first_order = CentralVolumeFlux(), #volume_numerical_flux_first_order = EntropyConservative(), volume_numerical_flux_first_order = KGVolumeFlux(), #surface_numerical_flux_first_order = MatrixFlux(), surface_numerical_flux_first_order = RusanovNumericalFlux(), ) linearmodel = DryAtmosAcousticGravityLinearModel(model) lineardg = DGModel( linearmodel, grid, RusanovNumericalFlux(), #CentralNumericalFluxFirstOrder(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = VerticalDirection(), state_auxiliary = esdg.state_auxiliary, ) # determine the time step element_size = (domain_height / numelem_vert) acoustic_speed = soundspeed_air(param_set, FT(330)) dx = min_node_distance(grid) cfl = 3 dt = cfl * dx / acoustic_speed Q = init_ode_state(esdg, FT(0)) #odesolver = LSRK144NiegemannDiehlBusch(esdg, Q; dt = dt, t0 = 0) linearsolver = ManyColumnLU() odesolver = ARK2GiraldoKellyConstantinescu( esdg, lineardg, LinearBackwardEulerSolver(linearsolver; isadjustable = false), Q; dt = dt, t0 = 0, split_explicit_implicit = false, ) eng0 = norm(Q) @info @sprintf """Starting ArrayType = %s FT = %s polynomialorder = %d numelem_horz = %d numelem_vert = %d dt = %.16e norm(Q₀) = %.16e """ "$ArrayType" "$FT" polynomialorder numelem_horz numelem_vert dt eng0 # Set up the information callback starttime = Ref(now()) cbinfo = EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q) runtime = Dates.format( convert(DateTime, now() - starttime[]), dateformat"HH:MM:SS", ) @info @sprintf """Update simtime = %.16e runtime = %s norm(Q) = %.16e """ gettime(odesolver) runtime energy end end cbcfl = EveryXSimulationSteps(100) do simtime = gettime(odesolver) @views begin ρ = Array(Q.data[:, 1, :]) ρu = Array(Q.data[:, 2, :]) ρv = Array(Q.data[:, 3, :]) ρw = Array(Q.data[:, 4, :]) end u = ρu ./ ρ v = ρv ./ ρ w = ρw ./ ρ ue = extrema(u) ve = extrema(v) we = extrema(w) @info @sprintf """CFL simtime = %.16e u = (%.4e, %.4e) v = (%.4e, %.4e) w = (%.4e, %.4e) """ simtime ue... ve... we... end callbacks = (cbinfo, cbcfl) #filterorder = 32 #filter = ExponentialFilter(grid, 0, filterorder) #cbfilter = EveryXSimulationSteps(1) do # Filters.apply!( # Q, # #AtmosFilterPerturbations(model), # :, # grid, # filter, # # state_auxiliary = esdg.state_auxiliary, # ) # nothing #end #callbacks = (callbacks..., cbfilter) if output_vtk # create vtk dir vtkdir = "vtk_esdg_total_KG_ncg_hires_baroclinic" * "_poly$(polynomialorder)_horz$(numelem_horz)_vert$(numelem_vert)" * "_$(ArrayType)_$(FT)" mkpath(vtkdir) vtkstep = 0 # output initial step do_output(mpicomm, vtkdir, vtkstep, esdg, Q, model) # setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 do_output(mpicomm, vtkdir, vtkstep, esdg, Q, model) end callbacks = (callbacks..., cbvtk) end ## Create a callback to report state statistics for main MPIStateArrays ## every ntFreq timesteps. nt_freq = floor(Int, 1 // 10 * timeend / dt) cbsc = ClimateMachine.StateCheck.sccreate([(Q, "state")], nt_freq; prec = 12) callbacks = (callbacks..., cbsc) solve!( Q, odesolver; timeend = timeend, adjustfinalstep = false, callbacks = callbacks, ) # final statistics engf = norm(Q) @info @sprintf """Finished norm(Q) = %.16e norm(Q) / norm(Q₀) = %.16e norm(Q) - norm(Q₀) = %.16e """ engf engf / eng0 engf - eng0 engf ## Check results against reference if present ClimateMachine.StateCheck.scprintref(cbsc) #! format: off refDat = ( [ [ "state", "ρ", 1.26085994139264381125e-02, 1.51502814805562069367e+00, 3.46330071392505767225e-01, 3.42221017037761976454e-01 ], [ "state", "ρu[1]", -1.26747868050267172180e+02, 1.22735648852872344605e+02, -6.56484249582622303443e-02, 1.07588365672914481053e+01 ], [ "state", "ρu[2]", -1.44635478251794808102e+02, 1.11383888659731638882e+02, -1.53109264073002888581e-03, 1.06376480052955262323e+01 ], [ "state", "ρu[3]", -1.50479775987282266669e+02, 1.62284843398145170568e+02, 5.84596035289077428643e-02, 9.73728925076320095400e+00 ], [ "state", "ρe", 1.73490214503025617887e+03, 2.70352534694924892392e+05, 6.42401799036320589948e+04, 7.10054852130306971958e+04 ], ], [ [ "state", "ρ", 12, 12, 12, 12 ], [ "state", "ρu[1]", 12, 12, 12, 12 ], [ "state", "ρu[2]", 12, 12, 12, 12 ], [ "state", "ρu[3]", 12, 12, 12, 12 ], [ "state", "ρe", 12, 12, 12, 12 ], ], ) #! format: on if length(refDat) > 0 @test ClimateMachine.StateCheck.scdocheck(cbsc, refDat) end end function do_output( mpicomm, vtkdir, vtkstep, dg, Q, model, testname = "baroclinicwave", ) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk( filename, Q, dg, statenames, dg.state_auxiliary, auxnames; number_sample_points = 10, ) ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu(pvtuprefix, prefixes, (statenames..., auxnames...), eltype(Q)) @info "Done writing VTK: $pvtuprefix" end end @testset "$(@__FILE__)" begin tic = Base.time() main() toc = Base.time() time = toc - tic println(time) end ================================================ FILE: test/Numerics/ESDGMethods/DryAtmos/linear.jl ================================================ using ClimateMachine.DGMethods: DGModel using ClimateMachine.MPIStateArrays: MPIStateArray using ClimateMachine.DGMethods.NumericalFluxes: NumericalFluxSecondOrder using ClimateMachine.Mesh.Geometry: LocalGeometry using ClimateMachine.Mesh.Grids: Direction import ClimateMachine.BalanceLaws: flux_second_order!, indefinite_stack_integral!, reverse_indefinite_stack_integral!, integral_load_auxiliary_state!, integral_set_auxiliary_state!, reverse_integral_load_auxiliary_state!, reverse_integral_set_auxiliary_state! @inline function linearized_pressure(ρ, ρe, Φ) FT = eltype(ρ) γ = FT(gamma(param_set)) if total_energy (γ - 1) * (ρe - ρ * Φ) else (γ - 1) * ρe end end abstract type DryAtmosLinearModel <: BalanceLaw end function vars_state(lm::DryAtmosLinearModel, ::Prognostic, FT) @vars begin ρ::FT ρu::SVector{3, FT} ρe::FT end end vars_state(lm::DryAtmosLinearModel, st::Auxiliary, FT) = vars_state(lm.atmos, st, FT) function update_auxiliary_state!( dg::DGModel, lm::DryAtmosLinearModel, Q::MPIStateArray, t::Real, elems::UnitRange, ) return false end function flux_second_order!( lm::DryAtmosLinearModel, flux::Grad, state::Vars, diffusive::Vars, hyperdiffusive::Vars, aux::Vars, t::Real, ) nothing end integral_load_auxiliary_state!( lm::DryAtmosLinearModel, integ::Vars, state::Vars, aux::Vars, ) = nothing integral_set_auxiliary_state!(lm::DryAtmosLinearModel, aux::Vars, integ::Vars) = nothing reverse_integral_load_auxiliary_state!( lm::DryAtmosLinearModel, integ::Vars, state::Vars, aux::Vars, ) = nothing reverse_integral_set_auxiliary_state!( lm::DryAtmosLinearModel, aux::Vars, integ::Vars, ) = nothing flux_second_order!( lm::DryAtmosLinearModel, flux::Grad, state::Vars, diffusive::Vars, aux::Vars, t::Real, ) = nothing function wavespeed( lm::DryAtmosLinearModel, nM, state::Vars, aux::Vars, t::Real, direction, ) ref = aux.ref_state return soundspeed(ref.ρ, ref.p) end boundary_conditions(lm::DryAtmosLinearModel) = (1, 2) function boundary_state!( nf::NumericalFluxFirstOrder, bc, lm::DryAtmosLinearModel, args..., ) boundary_state!(nf, bc, lm.atmos, args...) end function boundary_state!( nf::NumericalFluxSecondOrder, bc, lm::DryAtmosLinearModel, args..., ) nothing end init_state_auxiliary!(lm::DryAtmosLinearModel, aux::Vars, geom::LocalGeometry) = nothing init_state_prognostic!( lm::DryAtmosLinearModel, state::Vars, aux::Vars, coords, t, ) = nothing struct DryAtmosAcousticGravityLinearModel{M} <: DryAtmosLinearModel atmos::M function DryAtmosAcousticGravityLinearModel(atmos::M) where {M} if atmos.ref_state === NoReferenceState() error("DryAtmosAcousticGravityLinearModel needs a model with a reference state") end new{M}(atmos) end end function flux_first_order!( lm::DryAtmosAcousticGravityLinearModel, flux::Grad, state::Vars, aux::Vars, t::Real, direction, ) FT = eltype(state) ref = aux.ref_state flux.ρ = state.ρu pL = linearized_pressure(state.ρ, state.ρe, aux.Φ) flux.ρu += pL * I flux.ρe = ((ref.ρe + ref.p) / ref.ρ) * state.ρu nothing end function source!( lm::DryAtmosAcousticGravityLinearModel, source::Vars, state::Vars, diffusive::Vars, aux::Vars, t::Real, ::NTuple{1, Dir}, ) where {Dir <: Direction} if Dir === VerticalDirection || Dir === EveryDirection ∇Φ = aux.∇Φ source.ρu -= state.ρ * ∇Φ if !total_energy source.ρe -= state.ρu' * ∇Φ end end nothing end ================================================ FILE: test/Numerics/ESDGMethods/DryAtmos/run_tests.jl ================================================ using Test using ClimateMachine import ClimateMachine.BalanceLaws import ClimateMachine.BalanceLaws: boundary_state! using ClimateMachine.DGMethods: ESDGModel, init_ode_state using ClimateMachine.DGMethods.NumericalFluxes: numerical_volume_flux_first_order! using ClimateMachine.Mesh.Topologies: BrickTopology using ClimateMachine.Mesh.Grids: DiscontinuousSpectralElementGrid using ClimateMachine.VariableTemplates: varsindex using StaticArrays: MArray, @SVector using KernelAbstractions: wait using Random using DoubleFloats using MPI using ClimateMachine.MPIStateArrays Random.seed!(7) include("DryAtmos.jl") boundary_state!(::Nothing, _...) = nothing struct TestProblem <: AbstractDryAtmosProblem end # Random initialization function function init_state_prognostic!( ::DryAtmosModel, ::TestProblem, state_prognostic, state_auxiliary, _..., ) where {dim} FT = eltype(state_prognostic) ρ = state_prognostic.ρ = rand(FT) + 1 ρu = state_prognostic.ρu = 2 * (@SVector rand(FT, 3)) .- 1 p = rand(FT) + 1 Φ = state_auxiliary.Φ state_prognostic.ρe = totalenergy(ρ, ρu, p, Φ) end function check_operators(FT, dim, mpicomm, N, ArrayType) # Create a warped mesh so the metrics are not constant Ne = (8, 9, 10) brickrange = ( range(FT(-1); length = Ne[1] + 1, stop = 1), range(FT(-1); length = Ne[2] + 1, stop = 1), range(FT(-1); length = Ne[3] + 1, stop = 1), ) topl = BrickTopology( mpicomm, ntuple(k -> brickrange[k], dim); periodicity = ntuple(k -> true, dim), ) warpfun = (x1, x2, x3) -> begin α = (4 / π) * (1 - x1^2) * (1 - x2^2) * (1 - x3^2) # Rotate by α with x1 and x2 x1, x2 = cos(α) * x1 - sin(α) * x2, sin(α) * x1 + cos(α) * x2 # Rotate by α with x1 and x3 if dim == 3 x1, x3 = cos(α) * x1 - sin(α) * x3, sin(α) * x1 + cos(α) * x3 end return (x1, x2, x3) end grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = warpfun, ) # Orientation does not matter since we will be setting the geopotential to a # random field model = DryAtmosModel{dim}(FlatOrientation(), TestProblem()) ################################################################## # check that the volume terms lead to only surface contributions # ################################################################## # Create the ES model esdg = ESDGModel( model, grid; volume_numerical_flux_first_order = EntropyConservative(), surface_numerical_flux_first_order = nothing, ) # Make the Geopotential random esdg.state_auxiliary .= ArrayType(2rand(FT, size(esdg.state_auxiliary))) start_exchange = MPIStateArrays.begin_ghost_exchange!(esdg.state_auxiliary) end_exchange = MPIStateArrays.end_ghost_exchange!( esdg.state_auxiliary, dependencies = start_exchange, ) wait(end_exchange) # Create a random state state_prognostic = init_ode_state(esdg; init_on_cpu = true) # Storage for the tendency volume_tendency = similar(state_prognostic) # Compute the tendency function esdg(volume_tendency, state_prognostic, nothing, 0) # Check that the volume terms only lead to surface integrals of # ∑_{j} n_j ψ_j # where Ψ_j = β^T f_j - ζ_j = ρu_j Np, K = (N + 1)^dim, length(esdg.grid.topology.realelems) # Get the mass matrix on the host _M = ClimateMachine.Grids._M M = Array(grid.vgeo[:, _M:_M, 1:K]) # Get the state, tendency, and aux on the host Q = Array(state_prognostic.data[:, :, 1:K]) dQ = Array(volume_tendency.data[:, :, 1:K]) A = Array(esdg.state_auxiliary.data[:, :, 1:K]) # Compute the entropy variables β = similar(Q, Np, number_states(model, Entropy()), K) @views for e in 1:K for i in 1:Np state_to_entropy_variables!( model, β[i, :, e], Q[i, :, e], A[i, :, e], ) end end # Get the unit normals and surface mass matrix sgeo = Array(grid.sgeo) n1 = sgeo[ClimateMachine.Grids._n1, :, :, 1:K] n2 = sgeo[ClimateMachine.Grids._n2, :, :, 1:K] n3 = sgeo[ClimateMachine.Grids._n3, :, :, 1:K] sM = sgeo[ClimateMachine.Grids._sM, :, :, 1:K] # Get the Ψs fmask = Array(grid.vmap⁻[:, :, 1]) _ρu = varsindex(vars_state(model, Prognostic(), FT), :ρu) Ψ1 = Q[fmask, _ρu[1], 1:K] Ψ2 = Q[fmask, _ρu[2], 1:K] Ψ3 = Q[fmask, _ρu[3], 1:K] # Compute the surface integral: # ∫_Ωf ∑_j n_j * Ψ_j surface = sum(sM .* (n1 .* Ψ1 + n2 .* Ψ2 + n3 .* Ψ3), dims = (1, 2))[:] # Compute the volume integral: # -∫_Ω ∑_j β^T (dq/dt) # (tendency is -dq / dt) num_state = number_states(model, Prognostic()) volume = sum(β[:, 1:num_state, :] .* M .* dQ, dims = (1, 2))[:] @test all(isapprox.( surface, volume; atol = 10eps(FT), rtol = sqrt(eps(FT)), )) ########################################### # check that the volume and surface match # ########################################### esdg = ESDGModel( model, grid; state_auxiliary = esdg.state_auxiliary, volume_numerical_flux_first_order = nothing, surface_numerical_flux_first_order = EntropyConservative(), ) surface_tendency = similar(state_prognostic) # Compute the tendency function esdg(surface_tendency, state_prognostic, nothing, 0) # Surface integral should be equal and opposite to the volume integral dQ = Array(surface_tendency.data[:, :, 1:K]) volume_integral = MPI.Allreduce(sum(volume), +, mpicomm) surface_integral = MPI.Allreduce(sum(β[:, 1:num_state, :] .* M .* dQ), +, mpicomm) @test volume_integral ≈ -surface_integral ######################################################## # check that the full tendency is entropy conservative # ######################################################## esdg = ESDGModel( model, grid; state_auxiliary = esdg.state_auxiliary, volume_numerical_flux_first_order = EntropyConservative(), surface_numerical_flux_first_order = EntropyConservative(), ) tendency = similar(state_prognostic) # Compute the tendency function esdg(tendency, state_prognostic, nothing, 0) # Check for entropy conservation dQ = Array(tendency.data[:, :, 1:K]) integral = MPI.Allreduce(sum(β[:, 1:num_state, :] .* M .* dQ), +, mpicomm) @test isapprox(integral, 0, atol = sqrt(eps(sum(volume)))) end let model = DryAtmosModel{3}(FlatOrientation(), TestProblem()) num_state = number_states(model, Prognostic()) num_aux = number_states(model, Auxiliary()) num_entropy = number_states(model, Entropy()) @testset "state to entropy variable transforms" begin for FT in (Float32, Float64) state_in = [1, 2, 2, 2, 1] .* rand(FT, num_state) + [3, -1, -1, -1, 100] aux_in = rand(FT, num_aux) state_out = similar(state_in) aux_out = similar(aux_in) entropy = similar(state_in, num_entropy) state_to_entropy_variables!(model, entropy, state_in, aux_in) entropy_variables_to_state!(model, state_out, aux_out, entropy) @test all(state_in .≈ state_out) #@test all(aux_in .≈ aux_out) end end @testset "test numerical flux for Tadmor shuffle" begin for FT in (Float32, Float64) # Create some random states state_1 = [1, 2, 2, 2, 1] .* rand(FT, num_state) + [3, -1, -1, -1, 100] aux_1 = 0 * rand(FT, num_aux) state_2 = [1, 2, 2, 2, 1] .* rand(FT, num_state) + [3, -1, -1, -1, 100] aux_2 = 0 * rand(FT, num_aux) # Get the entropy variables for the two states entropy_1 = similar(state_1, num_entropy) state_to_entropy_variables!(model, entropy_1, state_1, aux_1) entropy_2 = similar(state_1, num_entropy) state_to_entropy_variables!(model, entropy_2, state_2, aux_2) # Get the values of Ψ_j = β^T f_j - ζ_j = ρu_j where β is the # entropy variables, f_j is the conservative flux, and ζ_j is the # entropy flux. For conservation laws this is the entropy potential. Ψ_1 = Vars{vars_state(model, Prognostic(), FT)}(state_1).ρu Ψ_2 = Vars{vars_state(model, Prognostic(), FT)}(state_2).ρu # Evaluate the flux with both orders of the two states H_12 = fill!(MArray{Tuple{3, num_state}, FT}(undef), -zero(FT)) numerical_volume_flux_first_order!( EntropyConservative(), model, H_12, state_1, aux_1, state_2, aux_2, ) H_21 = fill!(MArray{Tuple{3, num_state}, FT}(undef), -zero(FT)) numerical_volume_flux_first_order!( EntropyConservative(), model, H_21, state_2, aux_2, state_1, aux_1, ) # Check that we satisfy the Tadmor shuffle @test all( H_12 * entropy_1[1:num_state] - H_21 * entropy_2[1:num_state] .≈ Ψ_1 - Ψ_2, ) end end ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 test_types = (Float32, Float64) for FT in test_types for dim in 2:3 @testset "check ESDGMethods relations for dim = $dim and FT = $FT" begin check_operators(FT, dim, mpicomm, polynomialorder, ArrayType) end end end end ================================================ FILE: test/Numerics/ESDGMethods/DryAtmos/run_tests_mpo.jl ================================================ using Test using ClimateMachine import ClimateMachine.BalanceLaws import ClimateMachine.BalanceLaws: boundary_state! using ClimateMachine.DGMethods: ESDGModel, init_ode_state using ClimateMachine.DGMethods.NumericalFluxes: numerical_volume_flux_first_order! using ClimateMachine.Mesh.Topologies: BrickTopology using ClimateMachine.Mesh.Grids: DiscontinuousSpectralElementGrid using ClimateMachine.VariableTemplates: varsindex using StaticArrays: MArray, @SVector using KernelAbstractions: wait using Random using DoubleFloats using MPI using ClimateMachine.MPIStateArrays Random.seed!(7) include("DryAtmos.jl") boundary_state!(::Nothing, _...) = nothing struct TestProblem <: AbstractDryAtmosProblem end # Random initialization function function init_state_prognostic!( ::DryAtmosModel, ::TestProblem, state_prognostic, state_auxiliary, _..., ) where {dim} FT = eltype(state_prognostic) ρ = state_prognostic.ρ = rand(FT) + 1 ρu = state_prognostic.ρu = 2 * (@SVector rand(FT, 3)) .- 1 p = rand(FT) + 1 Φ = state_auxiliary.Φ state_prognostic.ρe = totalenergy(ρ, ρu, p, Φ) end function check_operators(FT, dim, mpicomm, N, ArrayType) # Create a warped mesh so the metrics are not constant Ne = (8, 9, 10) brickrange = ( range(FT(-1); length = Ne[1] + 1, stop = 1), range(FT(-1); length = Ne[2] + 1, stop = 1), range(FT(-1); length = Ne[3] + 1, stop = 1), ) topl = BrickTopology( mpicomm, ntuple(k -> brickrange[k], dim); periodicity = ntuple(k -> true, dim), ) warpfun = (x1, x2, x3) -> begin α = (4 / π) * (1 - x1^2) * (1 - x2^2) * (1 - x3^2) # Rotate by α with x1 and x2 x1, x2 = cos(α) * x1 - sin(α) * x2, sin(α) * x1 + cos(α) * x2 # Rotate by α with x1 and x3 if dim == 3 x1, x3 = cos(α) * x1 - sin(α) * x3, sin(α) * x1 + cos(α) * x3 end return (x1, x2, x3) end grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = warpfun, ) # Orientation does not matter since we will be setting the geopotential to a # random field model = DryAtmosModel{dim}(FlatOrientation(), TestProblem()) ################################################################## # check that the volume terms lead to only surface contributions # ################################################################## # Create the ES model esdg = ESDGModel( model, grid; volume_numerical_flux_first_order = EntropyConservative(), surface_numerical_flux_first_order = nothing, ) # Make the Geopotential random esdg.state_auxiliary .= ArrayType(2rand(FT, size(esdg.state_auxiliary))) start_exchange = MPIStateArrays.begin_ghost_exchange!(esdg.state_auxiliary) end_exchange = MPIStateArrays.end_ghost_exchange!( esdg.state_auxiliary, dependencies = start_exchange, ) wait(end_exchange) # Create a random state state_prognostic = init_ode_state(esdg; init_on_cpu = true) # Storage for the tendency volume_tendency = similar(state_prognostic) # Compute the tendency function esdg(volume_tendency, state_prognostic, nothing, 0) # Check that the volume terms only lead to surface integrals of # ∑_{j} n_j ψ_j # where Ψ_j = β^T f_j - ζ_j = ρu_j Np, K = prod(N .+ 1), length(esdg.grid.topology.realelems) # Get the mass matrix on the host _M = ClimateMachine.Grids._M M = Array(grid.vgeo[:, _M:_M, 1:K]) # Get the state, tendency, and aux on the host Q = Array(state_prognostic.data[:, :, 1:K]) dQ = Array(volume_tendency.data[:, :, 1:K]) A = Array(esdg.state_auxiliary.data[:, :, 1:K]) # Compute the entropy variables β = similar(Q, Np, number_states(model, Entropy()), K) @views for e in 1:K for i in 1:Np state_to_entropy_variables!( model, β[i, :, e], Q[i, :, e], A[i, :, e], ) end end # Get the unit normals and surface mass matrix sgeo = Array(grid.sgeo) n1 = sgeo[ClimateMachine.Grids._n1, :, :, 1:K] n2 = sgeo[ClimateMachine.Grids._n2, :, :, 1:K] n3 = sgeo[ClimateMachine.Grids._n3, :, :, 1:K] sM = sgeo[ClimateMachine.Grids._sM, :, :, 1:K] num_state = number_states(model, Prognostic()) volume = sum(β[:, 1:num_state, :] .* M .* dQ, dims = (1, 2))[:] ########################################### # check that the volume and surface match # ########################################### esdg = ESDGModel( model, grid; state_auxiliary = esdg.state_auxiliary, volume_numerical_flux_first_order = nothing, surface_numerical_flux_first_order = EntropyConservative(), ) surface_tendency = similar(state_prognostic) # Compute the tendency function esdg(surface_tendency, state_prognostic, nothing, 0) # Surface integral should be equal and opposite to the volume integral dQ = Array(surface_tendency.data[:, :, 1:K]) volume_integral = MPI.Allreduce(sum(volume), +, mpicomm) surface_integral = MPI.Allreduce(sum(β[:, 1:num_state, :] .* M .* dQ), +, mpicomm) @test volume_integral ≈ -surface_integral ######################################################## # check that the full tendency is entropy conservative # ######################################################## esdg = ESDGModel( model, grid; state_auxiliary = esdg.state_auxiliary, volume_numerical_flux_first_order = EntropyConservative(), surface_numerical_flux_first_order = EntropyConservative(), ) tendency = similar(state_prognostic) # Compute the tendency function esdg(tendency, state_prognostic, nothing, 0) # Check for entropy conservation dQ = Array(tendency.data[:, :, 1:K]) integral = MPI.Allreduce(sum(β[:, 1:num_state, :] .* M .* dQ), +, mpicomm) @test isapprox(integral, 0, atol = sqrt(eps(sum(volume)))) end let model = DryAtmosModel{3}(FlatOrientation(), TestProblem()) num_state = number_states(model, Prognostic()) num_aux = number_states(model, Auxiliary()) num_entropy = number_states(model, Entropy()) @testset "state to entropy variable transforms" begin for FT in (Float32, Float64) state_in = [1, 2, 2, 2, 1] .* rand(FT, num_state) + [3, -1, -1, -1, 100] aux_in = rand(FT, num_aux) state_out = similar(state_in) aux_out = similar(aux_in) entropy = similar(state_in, num_entropy) state_to_entropy_variables!(model, entropy, state_in, aux_in) entropy_variables_to_state!(model, state_out, aux_out, entropy) @test all(state_in .≈ state_out) #@test all(aux_in .≈ aux_out) end end @testset "test numerical flux for Tadmor shuffle" begin for FT in (Float32, Float64) # Create some random states state_1 = [1, 2, 2, 2, 1] .* rand(FT, num_state) + [3, -1, -1, -1, 100] aux_1 = 0 * rand(FT, num_aux) state_2 = [1, 2, 2, 2, 1] .* rand(FT, num_state) + [3, -1, -1, -1, 100] aux_2 = 0 * rand(FT, num_aux) # Get the entropy variables for the two states entropy_1 = similar(state_1, num_entropy) state_to_entropy_variables!(model, entropy_1, state_1, aux_1) entropy_2 = similar(state_1, num_entropy) state_to_entropy_variables!(model, entropy_2, state_2, aux_2) # Get the values of Ψ_j = β^T f_j - ζ_j = ρu_j where β is the # entropy variables, f_j is the conservative flux, and ζ_j is the # entropy flux. For conservation laws this is the entropy potential. Ψ_1 = Vars{vars_state(model, Prognostic(), FT)}(state_1).ρu Ψ_2 = Vars{vars_state(model, Prognostic(), FT)}(state_2).ρu # Evaluate the flux with both orders of the two states H_12 = fill!(MArray{Tuple{3, num_state}, FT}(undef), -zero(FT)) numerical_volume_flux_first_order!( EntropyConservative(), model, H_12, state_1, aux_1, state_2, aux_2, ) H_21 = fill!(MArray{Tuple{3, num_state}, FT}(undef), -zero(FT)) numerical_volume_flux_first_order!( EntropyConservative(), model, H_21, state_2, aux_2, state_1, aux_1, ) # Check that we satisfy the Tadmor shuffle @test all( H_12 * entropy_1[1:num_state] - H_21 * entropy_2[1:num_state] .≈ Ψ_1 - Ψ_2, ) end end ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorders = ((2, 2, 1), (1, 1, 2)) test_types = (Float32, Float64) for FT in test_types for dim in (3,) for polynomialorder in polynomialorders dim = 3 @testset "check ESDGMethods relations for dim = $dim, FT = $FT, and polynomial order $polynomialorder" begin check_operators( FT, dim, mpicomm, polynomialorder, ArrayType, ) end end end end end ================================================ FILE: test/Numerics/ESDGMethods/diagnostics.jl ================================================ using KernelAbstractions using ClimateMachine.MPIStateArrays: array_device, weightedsum using KernelAbstractions.Extras: @unroll function entropy_integral(dg, entropy, state_prognostic) balance_law = dg.balance_law state_auxiliary = dg.state_auxiliary device = array_device(state_prognostic) grid = dg.grid topology = grid.topology Np = dofs_per_element(grid) dim = dimensionality(grid) # XXX: Needs updating for multiple polynomial orders N = polynomialorders(grid) # Currently only support single polynomial order @assert all(N[1] .== N) N = N[1] realelems = topology.realelems event = Event(device) event = esdg_compute_entropy!(device, min(Np, 1024))( balance_law, Val(dim), Val(N), entropy.data, state_prognostic.data, state_auxiliary.data, realelems, ndrange = Np * length(realelems), dependencies = event, ) wait(event) weightedsum(entropy) end @kernel function esdg_compute_entropy!( balance_law::BalanceLaw, ::Val{dim}, ::Val{N}, entropy, state_prognostic, state_auxiliary, elems, ) where {dim, N} FT = eltype(state_prognostic) num_state_prognostic = number_states(balance_law, Prognostic()) num_state_auxiliary = number_states(balance_law, Auxiliary()) Nq = N + 1 Nqk = dim == 2 ? 1 : Nq Np = Nq * Nq * Nqk local_state_prognostic = MArray{Tuple{num_state_prognostic}, FT}(undef) local_state_auxiliary = MArray{Tuple{num_state_auxiliary}, FT}(undef) I = @index(Global, Linear) eI = (I - 1) ÷ Np + 1 n = (I - 1) % Np + 1 @inbounds begin e = elems[eI] @unroll for s in 1:num_state_prognostic local_state_prognostic[s] = state_prognostic[n, s, e] end @unroll for s in 1:num_state_auxiliary local_state_auxiliary[s] = state_auxiliary[n, s, e] end entropy[n, 1, e] = state_to_entropy( balance_law, Vars{vars_state(balance_law, Prognostic(), FT)}( local_state_prognostic, ), Vars{vars_state(balance_law, Auxiliary(), FT)}( local_state_auxiliary, ), ) end end function entropy_product(dg, entropy, state_prognostic, tendency) balance_law = dg.balance_law state_auxiliary = dg.state_auxiliary device = array_device(state_prognostic) grid = dg.grid topology = grid.topology Np = dofs_per_element(grid) dim = dimensionality(grid) # XXX: Needs updating for multiple polynomial orders N = polynomialorders(grid) # Currently only support single polynomial order @assert all(N[1] .== N) N = N[1] realelems = topology.realelems event = Event(device) event = esdg_compute_entropy_product!(device, min(Np, 1024))( balance_law, Val(dim), Val(N), entropy.data, state_prognostic.data, tendency.data, state_auxiliary.data, realelems, ndrange = Np * length(realelems), dependencies = event, ) wait(event) weightedsum(entropy) end @kernel function esdg_compute_entropy_product!( balance_law::BalanceLaw, ::Val{dim}, ::Val{N}, entropy, state_prognostic, tendency, state_auxiliary, elems, ) where {dim, N} FT = eltype(state_prognostic) num_state_prognostic = number_states(balance_law, Prognostic()) num_state_entropy = number_states(balance_law, Entropy()) num_state_auxiliary = number_states(balance_law, Auxiliary()) Nq = N + 1 Nqk = dim == 2 ? 1 : Nq Np = Nq * Nq * Nqk local_state_entropy = MArray{Tuple{num_state_entropy}, FT}(undef) local_state_prognostic = MArray{Tuple{num_state_prognostic}, FT}(undef) local_tendency = MArray{Tuple{num_state_prognostic}, FT}(undef) local_state_auxiliary = MArray{Tuple{num_state_auxiliary}, FT}(undef) I = @index(Global, Linear) eI = (I - 1) ÷ Np + 1 n = (I - 1) % Np + 1 @inbounds begin e = elems[eI] @unroll for s in 1:num_state_prognostic local_state_prognostic[s] = state_prognostic[n, s, e] end @unroll for s in 1:num_state_prognostic local_tendency[s] = tendency[n, s, e] end @unroll for s in 1:num_state_auxiliary local_state_auxiliary[s] = state_auxiliary[n, s, e] end state_to_entropy_variables!( balance_law, Vars{vars_state(balance_law, Entropy(), FT)}(local_state_entropy), Vars{vars_state(balance_law, Prognostic(), FT)}( local_state_prognostic, ), Vars{vars_state(balance_law, Auxiliary(), FT)}( local_state_auxiliary, ), ) local_product = -zero(FT) # not that tendency related to the last entropy variable is assumed zero @unroll for s in 1:num_state_prognostic local_product += local_state_entropy[s] * local_tendency[s] end entropy[n, 1, e] = local_product end end ================================================ FILE: test/Numerics/Mesh/BrickMesh.jl ================================================ using ClimateMachine.Mesh.BrickMesh using ClimateMachine.Mesh.Grids: mappings, commmapping using Test using MPI MPI.Initialized() || MPI.Init() @testset "Linear Parition" begin @test BrickMesh.linearpartition(1, 1, 1) == 1:1 @test BrickMesh.linearpartition(20, 1, 1) == 1:20 @test BrickMesh.linearpartition(10, 1, 2) == 1:5 @test BrickMesh.linearpartition(10, 2, 2) == 6:10 end @testset "Hilbert Code" begin @test BrickMesh.hilbertcode([0, 0], bits = 1) == [0, 0] @test BrickMesh.hilbertcode([0, 1], bits = 1) == [0, 1] @test BrickMesh.hilbertcode([1, 1], bits = 1) == [1, 0] @test BrickMesh.hilbertcode([1, 0], bits = 1) == [1, 1] @test BrickMesh.hilbertcode([0, 0], bits = 2) == [0, 0] @test BrickMesh.hilbertcode([1, 0], bits = 2) == [0, 1] @test BrickMesh.hilbertcode([1, 1], bits = 2) == [0, 2] @test BrickMesh.hilbertcode([0, 1], bits = 2) == [0, 3] @test BrickMesh.hilbertcode([0, 2], bits = 2) == [1, 0] @test BrickMesh.hilbertcode([0, 3], bits = 2) == [1, 1] @test BrickMesh.hilbertcode([1, 3], bits = 2) == [1, 2] @test BrickMesh.hilbertcode([1, 2], bits = 2) == [1, 3] @test BrickMesh.hilbertcode([2, 2], bits = 2) == [2, 0] @test BrickMesh.hilbertcode([2, 3], bits = 2) == [2, 1] @test BrickMesh.hilbertcode([3, 3], bits = 2) == [2, 2] @test BrickMesh.hilbertcode([3, 2], bits = 2) == [2, 3] @test BrickMesh.hilbertcode([3, 1], bits = 2) == [3, 0] @test BrickMesh.hilbertcode([2, 1], bits = 2) == [3, 1] @test BrickMesh.hilbertcode([2, 0], bits = 2) == [3, 2] @test BrickMesh.hilbertcode([3, 0], bits = 2) == [3, 3] @test BrickMesh.hilbertcode(UInt64.([14, 3, 4])) == UInt64.([0x0, 0x0, 0xe25]) end @testset "Mesh to Hilbert Code" begin let etc = Array{Float64}(undef, 2, 4, 6) etc[:, :, 1] = [2.0 3.0 2.0 3.0; 4.0 4.0 5.0 5.0] etc[:, :, 2] = [3.0 4.0 3.0 4.0; 4.0 4.0 5.0 5.0] etc[:, :, 3] = [4.0 5.0 4.0 5.0; 4.0 4.0 5.0 5.0] etc[:, :, 4] = [2.0 3.0 2.0 3.0; 5.0 5.0 6.0 6.0] etc[:, :, 5] = [3.0 4.0 3.0 4.0; 5.0 5.0 6.0 6.0] etc[:, :, 6] = [4.0 5.0 4.0 5.0; 5.0 5.0 6.0 6.0] code_exect = UInt64[ 0x0000000000000000 0x1555555555555555 0xffffffffffffffff 0x5555555555555555 0x6aaaaaaaaaaaaaaa 0xaaaaaaaaaaaaaaaa 0x0000000000000000 0x5555555555555555 0xffffffffffffffff 0x5555555555555555 0xaaaaaaaaaaaaaaaa 0xaaaaaaaaaaaaaaaa ] code = centroidtocode(MPI.COMM_SELF, etc) @test code == code_exect end let nelem = 1 d = 2 etc = Array{Float64}(undef, d, d^2, nelem) etc[:, :, 1] = [2.0 3.0 2.0 3.0; 4.0 4.0 5.0 5.0] code = centroidtocode(MPI.COMM_SELF, etc) @test code == zeros(eltype(code), d, nelem) end end @testset "Vertex Ordering" begin @test ((1,), 1) == BrickMesh.vertsortandorder(1) @test ((1, 2), 1) == BrickMesh.vertsortandorder(1, 2) @test ((1, 2), 2) == BrickMesh.vertsortandorder(2, 1) @test ((1, 2, 3), 1) == BrickMesh.vertsortandorder(1, 2, 3) @test ((1, 2, 3), 2) == BrickMesh.vertsortandorder(3, 1, 2) @test ((1, 2, 3), 3) == BrickMesh.vertsortandorder(2, 3, 1) @test ((1, 2, 3), 4) == BrickMesh.vertsortandorder(2, 1, 3) @test ((1, 2, 3), 5) == BrickMesh.vertsortandorder(3, 2, 1) @test ((1, 2, 3), 6) == BrickMesh.vertsortandorder(1, 3, 2) @test_throws ErrorException BrickMesh.vertsortandorder(2, 1, 1) @test ((1, 2, 3, 4), 1) == BrickMesh.vertsortandorder(1, 2, 3, 4) @test ((1, 2, 3, 4), 2) == BrickMesh.vertsortandorder(1, 3, 2, 4) @test ((1, 2, 3, 4), 3) == BrickMesh.vertsortandorder(2, 1, 3, 4) @test ((1, 2, 3, 4), 4) == BrickMesh.vertsortandorder(2, 4, 1, 3) @test ((1, 2, 3, 4), 5) == BrickMesh.vertsortandorder(3, 1, 4, 2) @test ((1, 2, 3, 4), 6) == BrickMesh.vertsortandorder(3, 4, 1, 2) @test ((1, 2, 3, 4), 7) == BrickMesh.vertsortandorder(4, 2, 3, 1) @test ((1, 2, 3, 4), 8) == BrickMesh.vertsortandorder(4, 3, 2, 1) @test_throws ErrorException BrickMesh.vertsortandorder(1, 3, 3, 1) end @testset "Mesh" begin let (etv, etc, etb, fc) = brickmesh((4:7,), (false,)) etv_expect = [ 1 2 3 2 3 4 ] etb_expect = [ 1 0 0 0 0 1 ] fc_expect = Array{Int64, 1}[] @test etv == etv_expect @test etb == etb_expect @test fc == fc_expect @test etc[:, :, 1] == [4 5] @test etc[:, :, 2] == [5 6] @test etc[:, :, 3] == [6 7] end let (etv, etc, etb, fc) = brickmesh((4:7,), (true,)) etv_expect = [ 1 2 3 2 3 4 ] etb_expect = [ 0 0 0 0 0 0 ] fc_expect = Array{Int64, 1}[[3, 2, 1]] @test etv == etv_expect @test etb == etb_expect @test fc == fc_expect @test etc[:, :, 1] == [4 5] @test etc[:, :, 2] == [5 6] @test etc[:, :, 3] == [6 7] end let (etv, etc, etb, fc) = brickmesh((2:5, 4:6), (false, true)) etv_expect = [ 1 2 5 6 2 3 6 7 3 4 7 8 5 6 9 10 6 7 10 11 7 8 11 12 ]' etb_expect = [ 1 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 ] fc_expect = Array{Int64, 1}[[4, 4, 1, 2], [5, 4, 2, 3], [6, 4, 3, 4]] @test etv == etv_expect @test etb == etb_expect @test fc == fc_expect @test etc[:, :, 1] == [ 2 3 2 3 4 4 5 5 ] @test etc[:, :, 5] == [ 3 4 3 4 5 5 6 6 ] end let (etv, etc, etb, fc) = brickmesh((-1:2:1, -1:2:1, -1:1:1), (true, true, true)) etv_expect = [ 1 5 2 6 3 7 4 8 5 9 6 10 7 11 8 12 ] etb_expect = zeros(Int64, 6, 2) fc_expect = Array{Int64, 1}[ [1, 2, 1, 3, 5, 7], [1, 4, 1, 2, 5, 6], [2, 2, 5, 7, 9, 11], [2, 4, 5, 6, 9, 10], [2, 6, 1, 2, 3, 4], ] @test etv == etv_expect @test etb == etb_expect @test fc == fc_expect @test etc[:, :, 1] == [ -1 1 -1 1 -1 1 -1 1 -1 -1 1 1 -1 -1 1 1 -1 -1 -1 -1 0 0 0 0 ] @test etc[:, :, 2] == [ -1 1 -1 1 -1 1 -1 1 -1 -1 1 1 -1 -1 1 1 0 0 0 0 1 1 1 1 ] end let (etv, etc, etb, fc) = brickmesh( (-1:1, -1:1, -1:1), (false, false, false), boundary = ((11, 12), (13, 14), (15, 16)), ) @test etb == [ 11 0 11 0 11 0 11 0 0 12 0 12 0 12 0 12 13 13 0 0 13 13 0 0 0 0 14 14 0 0 14 14 15 15 15 15 0 0 0 0 0 0 0 0 16 16 16 16 ] end let x = (1:1000,) p = (false,) b = ((1, 2),) (etv, etc, etb, fc) = brickmesh(x, p, boundary = b) n = 50 (etv_parts, etc_parts, etb_parts, fc_parts) = brickmesh(x, p, boundary = b, part = 1, numparts = n) for j in 2:n (etv_j, etc_j, etb_j, fc_j) = brickmesh(x, p, boundary = b, part = j, numparts = n) etv_parts = cat(etv_parts, etv_j; dims = 2) etc_parts = cat(etc_parts, etc_j; dims = 3) etb_parts = cat(etb_parts, etb_j; dims = 2) end @test etv == etv_parts @test etc == etc_parts @test etb == etb_parts end let x = (-1:2:10, -1:1:1, -4:1:1) p = (true, false, true) b = ((1, 2), (3, 4), (5, 6)) (etv, etc, etb, fc) = brickmesh(x, p, boundary = b) n = 50 (etv_parts, etc_parts, etb_parts, fc_parts) = brickmesh(x, p, boundary = b, part = 1, numparts = n) for j in 2:n (etv_j, etc_j, etb_j, fc_j) = brickmesh(x, p, boundary = b, part = j, numparts = n) etv_parts = cat(etv_parts, etv_j; dims = 2) etc_parts = cat(etc_parts, etc_j; dims = 3) etb_parts = cat(etb_parts, etb_j; dims = 2) end @test etv == etv_parts @test etc == etc_parts @test etb == etb_parts end end @testset "Connect" begin let comm = MPI.COMM_SELF mesh = connectmesh( comm, partition(comm, brickmesh((0:10,), (true,))...)[1:4]..., ) nelem = 10 @test mesh[:elemtocoord][:, :, 1] == [0 1] @test mesh[:elemtocoord][:, :, 2] == [1 2] @test mesh[:elemtocoord][:, :, 3] == [2 3] @test mesh[:elemtocoord][:, :, 4] == [3 4] @test mesh[:elemtocoord][:, :, 5] == [4 5] @test mesh[:elemtocoord][:, :, 6] == [5 6] @test mesh[:elemtocoord][:, :, 7] == [6 7] @test mesh[:elemtocoord][:, :, 8] == [7 8] @test mesh[:elemtocoord][:, :, 9] == [8 9] @test mesh[:elemtocoord][:, :, 10] == [9 10] @test mesh[:elemtoelem] == [ 10 1 2 3 4 5 6 7 8 9 2 3 4 5 6 7 8 9 10 1 ] @test mesh[:elemtoface] == [ 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 ] @test mesh[:elemtoordr] == ones(Int, size(mesh[:elemtoordr])) @test mesh[:elemtobndy] == zeros(Int, size(mesh[:elemtoordr])) @test mesh[:elems] == 1:nelem @test mesh[:realelems] == 1:nelem @test mesh[:ghostelems] == nelem .+ (1:0) @test length(mesh[:sendelems]) == 0 @test mesh[:nabrtorank] == Int[] @test mesh[:nabrtorecv] == UnitRange{Int}[] @test mesh[:nabrtosend] == UnitRange{Int}[] end let comm = MPI.COMM_SELF mesh = connectmesh( comm, partition(comm, brickmesh((0:4, 5:9), (false, true))...)[1:4]..., ) nelem = 16 @test mesh[:elemtocoord][:, :, 1] == [0 1 0 1; 5 5 6 6] @test mesh[:elemtocoord][:, :, 2] == [1 2 1 2; 5 5 6 6] @test mesh[:elemtocoord][:, :, 3] == [1 2 1 2; 6 6 7 7] @test mesh[:elemtocoord][:, :, 4] == [0 1 0 1; 6 6 7 7] @test mesh[:elemtocoord][:, :, 5] == [0 1 0 1; 7 7 8 8] @test mesh[:elemtocoord][:, :, 6] == [0 1 0 1; 8 8 9 9] @test mesh[:elemtocoord][:, :, 7] == [1 2 1 2; 8 8 9 9] @test mesh[:elemtocoord][:, :, 8] == [1 2 1 2; 7 7 8 8] @test mesh[:elemtocoord][:, :, 9] == [2 3 2 3; 7 7 8 8] @test mesh[:elemtocoord][:, :, 10] == [2 3 2 3; 8 8 9 9] @test mesh[:elemtocoord][:, :, 11] == [3 4 3 4; 8 8 9 9] @test mesh[:elemtocoord][:, :, 12] == [3 4 3 4; 7 7 8 8] @test mesh[:elemtocoord][:, :, 13] == [3 4 3 4; 6 6 7 7] @test mesh[:elemtocoord][:, :, 14] == [2 3 2 3; 6 6 7 7] @test mesh[:elemtocoord][:, :, 15] == [2 3 2 3; 5 5 6 6] @test mesh[:elemtocoord][:, :, 16] == [3 4 3 4; 5 5 6 6] @test mesh[:elemtoelem] == [ 1 1 4 4 5 6 6 5 8 7 10 9 14 3 2 15 2 15 14 3 8 7 10 9 12 11 11 12 13 13 16 16 6 7 2 1 4 5 8 3 14 9 12 13 16 15 10 11 4 3 8 5 6 1 2 7 10 15 16 11 12 9 14 13 ] @test mesh[:elemtoface] == [ 1 2 2 1 1 1 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 2 2 2 1 1 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 ] @test mesh[:elemtoordr] == ones(Int, size(mesh[:elemtoordr])) @test mesh[:elems] == 1:nelem @test mesh[:realelems] == 1:nelem @test mesh[:ghostelems] == nelem .+ (1:0) @test length(mesh[:sendelems]) == 0 @test mesh[:nabrtorank] == Int[] @test mesh[:nabrtorecv] == UnitRange{Int}[] @test mesh[:nabrtosend] == UnitRange{Int}[] end let comm = MPI.COMM_SELF mesh = connectmeshfull( comm, partition(comm, brickmesh((0:4, 5:9), (false, true))...)[1:4]..., ) nelem = 16 @test mesh[:elemtocoord][:, :, 1] == [0 1 0 1; 5 5 6 6] @test mesh[:elemtocoord][:, :, 2] == [1 2 1 2; 5 5 6 6] @test mesh[:elemtocoord][:, :, 3] == [1 2 1 2; 6 6 7 7] @test mesh[:elemtocoord][:, :, 4] == [0 1 0 1; 6 6 7 7] @test mesh[:elemtocoord][:, :, 5] == [0 1 0 1; 7 7 8 8] @test mesh[:elemtocoord][:, :, 6] == [0 1 0 1; 8 8 9 9] @test mesh[:elemtocoord][:, :, 7] == [1 2 1 2; 8 8 9 9] @test mesh[:elemtocoord][:, :, 8] == [1 2 1 2; 7 7 8 8] @test mesh[:elemtocoord][:, :, 9] == [2 3 2 3; 7 7 8 8] @test mesh[:elemtocoord][:, :, 10] == [2 3 2 3; 8 8 9 9] @test mesh[:elemtocoord][:, :, 11] == [3 4 3 4; 8 8 9 9] @test mesh[:elemtocoord][:, :, 12] == [3 4 3 4; 7 7 8 8] @test mesh[:elemtocoord][:, :, 13] == [3 4 3 4; 6 6 7 7] @test mesh[:elemtocoord][:, :, 14] == [2 3 2 3; 6 6 7 7] @test mesh[:elemtocoord][:, :, 15] == [2 3 2 3; 5 5 6 6] @test mesh[:elemtocoord][:, :, 16] == [3 4 3 4; 5 5 6 6] @test mesh[:elemtoelem] == [ 1 1 4 4 5 6 6 5 8 7 10 9 14 3 2 15 2 15 14 3 8 7 10 9 12 11 11 12 13 13 16 16 6 7 2 1 4 5 8 3 14 9 12 13 16 15 10 11 4 3 8 5 6 1 2 7 10 15 16 11 12 9 14 13 ] @test mesh[:elemtoface] == [ 1 2 2 1 1 1 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 2 2 2 1 1 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 ] @test mesh[:elemtoordr] == ones(Int, size(mesh[:elemtoordr])) @test mesh[:elems] == 1:nelem @test mesh[:realelems] == 1:nelem @test mesh[:ghostelems] == nelem .+ (1:0) @test length(mesh[:sendelems]) == 0 @test mesh[:nabrtorank] == Int[] @test mesh[:nabrtorecv] == UnitRange{Int}[] @test mesh[:nabrtosend] == UnitRange{Int}[] end end @testset "Mappings" begin let comm = MPI.COMM_SELF x = (0:4,) mesh = connectmesh(comm, partition(comm, brickmesh(x, (true,))...)[1:4]...) N = 3 d = length(x) nelem = prod(length.(x) .- 1) nface = 2d Nfp = (N + 1)^(d - 1) vmap⁻, vmap⁺ = mappings( ntuple(j -> N, d), mesh[:elemtoelem], mesh[:elemtoface], mesh[:elemtoordr], ) @test vmap⁻ == reshape([1, 4, 5, 8, 9, 12, 13, 16], Nfp, nface, nelem) @test vmap⁺ == reshape([16, 5, 4, 9, 8, 13, 12, 1], Nfp, nface, nelem) end # Single polynomial order let comm = MPI.COMM_SELF x = (-1:1, 0:1) p = (false, true) mesh = connectmesh(comm, partition(comm, brickmesh(x, p)...)[1:4]...) N = 2 d = length(x) nelem = prod(length.(x) .- 1) nface = 2d Nfp = (N + 1)^(d - 1) vmap⁻, vmap⁺ = mappings( ntuple(j -> N, d), mesh[:elemtoelem], mesh[:elemtoface], mesh[:elemtoordr], ) #! format: off @test vmap⁻ == reshape( [ 1, 4, 7, # f=1 e=1 3, 6, 9, # f=2 e=1 1, 2, 3, # f=3 e=1 7, 8, 9, # f=4 e=1 10, 13, 16, # f=1 e=2 12, 15, 18, # f=2 e=2 10, 11, 12, # f=3 e=2 16, 17, 18, # f=4 e=2 ], Nfp, nface, nelem, ) @test vmap⁺ == reshape( [ 1, 4, 7, # f=1 e=1 10, 13, 16, # f=1 e=2 7, 8, 9, # f=4 e=1 1, 2, 3, # f=3 e=1 3, 6, 9, # f=2 e=1 12, 15, 18, # f=2 e=2 16, 17, 18, # f=4 e=2 10, 11, 12, # f=3 e=2 ], Nfp, nface, nelem, ) #! format: on end # Two polynomial orders let comm = MPI.COMM_SELF x = (-1:1, 0:1) p = (false, true) mesh = connectmesh(comm, partition(comm, brickmesh(x, p)...)[1:4]...) N = (2, 3) d = length(x) nelem = prod(length.(x) .- 1) nface = 2d Nfp = div.(prod(N .+ 1), ((N .+ 1) .^ (d - 1))) vmap⁻, vmap⁺ = mappings(N, mesh[:elemtoelem], mesh[:elemtoface], mesh[:elemtoordr]) #! format: off @test vmap⁻ == reshape( [ 1, 4, 7, 10, # f=1 e=1 3, 6, 9, 12, # f=2 e=1 1, 2, 3, 0, # f=3 e=1 10, 11, 12, 0, # f=4 e=1 13, 16, 19, 22, # f=1 e=2 15, 18, 21, 24, # f=2 e=2 13, 14, 15, 0, # f=3 e=2 22, 23, 24, 0, # f=4 e=2 ], maximum(Nfp), nface, nelem, ) @test vmap⁺ == reshape( [ 1, 4, 7, 10, # f=1 e=1 13, 16, 19, 22, # f=2 e=1 10, 11, 12, 0, # f=3 e=1 1, 2, 3, 0, # f=4 e=1 3, 6, 9, 12, # f=1 e=2 15, 18, 21, 24, # f=2 e=2 22, 23, 24, 0, # f=3 e=2 13, 14, 15, 0, # f=4 e=2 ], maximum(Nfp), nface, nelem, ) #! format: on end # Single polynomial order let comm = MPI.COMM_SELF x = (0:1, 0:1, -1:1) p = (false, true, false) mesh = connectmesh(comm, partition(comm, brickmesh(x, p)...)[1:4]...) N = 2 d = length(x) nelem = prod(length.(x) .- 1) nface = 2d Np = (N + 1)^d Nfp = (N + 1)^(d - 1) vmap⁻, vmap⁺ = mappings( ntuple(j -> N, d), mesh[:elemtoelem], mesh[:elemtoface], mesh[:elemtoordr], ) #! format: off fmask = [ 1 3 1 7 1 19 4 6 2 8 2 20 7 9 3 9 3 21 10 12 10 16 4 22 13 15 11 17 5 23 16 18 12 18 6 24 19 21 19 25 7 25 22 24 20 26 8 26 25 27 21 27 9 27 ] #! format: on @test vmap⁻ == reshape([fmask[:]; fmask[:] .+ Np], Nfp, nface, nelem) @test vmap⁺ == reshape( [ fmask[:, 1] fmask[:, 2] fmask[:, 4] fmask[:, 3] fmask[:, 5] fmask[:, 5] .+ Np fmask[:, 1] .+ Np fmask[:, 2] .+ Np fmask[:, 4] .+ Np fmask[:, 3] .+ Np fmask[:, 6] fmask[:, 6] .+ Np ], Nfp, nface, nelem, ) end # Multiple polynomial orders let comm = MPI.COMM_SELF x = (0:1, 0:1, -1:1) p = (false, true, false) mesh = connectmesh(comm, partition(comm, brickmesh(x, p)...)[1:4]...) N = (1, 2, 3) d = length(x) nelem = prod(length.(x) .- 1) nface = 2d Np = prod(N .+ 1) Nfp = div.(Np, N .+ 1) vmap⁻, vmap⁺ = mappings(N, mesh[:elemtoelem], mesh[:elemtoface], mesh[:elemtoordr]) #! format: off fmask = ( [ 1 3 5 7 9 11 13 15 17 19 21 23], [ 2 4 6 8 10 12 14 16 18 20 22 24], [ 1 2 7 8 13 14 19 20 0 0 0 0], [ 5 6 11 12 17 18 23 24 0 0 0 0], [ 1 2 3 4 5 6 0 0 0 0 0 0], [19 20 21 22 23 24 0 0 0 0 0 0], ) #! format: on @test vmap⁻ == reshape( [ fmask[1] fmask[2] fmask[3] fmask[4] fmask[5] fmask[6] fmask[1] .+ Np .* (fmask[1] .> 0) fmask[2] .+ Np .* (fmask[2] .> 0) fmask[3] .+ Np .* (fmask[3] .> 0) fmask[4] .+ Np .* (fmask[4] .> 0) fmask[5] .+ Np .* (fmask[5] .> 0) fmask[6] .+ Np .* (fmask[6] .> 0) ]', maximum(Nfp), nface, nelem, ) @test vmap⁺ == reshape( [ vmap⁻[:, 1, 1] vmap⁻[:, 2, 1] vmap⁻[:, 4, 1] vmap⁻[:, 3, 1] vmap⁻[:, 5, 1] vmap⁻[:, 5, 2] vmap⁻[:, 1, 2] vmap⁻[:, 2, 2] vmap⁻[:, 4, 2] vmap⁻[:, 3, 2] vmap⁻[:, 6, 1] vmap⁻[:, 6, 2] ], maximum(Nfp), nface, nelem, ) end # Test polynomial order 0 let comm = MPI.COMM_SELF x = (0:1, -1:1) p = (false, true) mesh = connectmesh(comm, partition(comm, brickmesh(x, p)...)[1:4]...) N = (5, 0) Nq = N .+ 1 d = length(x) nelem = prod(length.(x) .- 1) nface = 2d Np = prod(Nq) Nfp = div.(Np, Nq) vmap⁻, vmap⁺ = mappings(N, mesh[:elemtoelem], mesh[:elemtoface], mesh[:elemtoordr]) #! format: off p = reshape(1:Np, Nq) fmask = ntuple(j -> zeros(Int, maximum(Nfp)), 2d) fmask[1][1:Nfp[1]] .= p[1, :][:] fmask[2][1:Nfp[1]] .= p[end, :][:] fmask[3][1:Nfp[2]] .= p[:, 1][:] fmask[4][1:Nfp[2]] .= p[:, end][:] #! format: on @test vmap⁻ == reshape( [ fmask[1] fmask[2] fmask[3] fmask[4] fmask[1] .+ Np .* (fmask[1] .> 0) fmask[2] .+ Np .* (fmask[2] .> 0) fmask[3] .+ Np .* (fmask[3] .> 0) fmask[4] .+ Np .* (fmask[4] .> 0) ]', maximum(Nfp), nface, nelem, ) @test vmap⁺ == reshape( [ vmap⁻[:, 1, 1] vmap⁻[:, 2, 1] vmap⁻[:, 4, 2] vmap⁻[:, 3, 2] vmap⁻[:, 1, 2] vmap⁻[:, 2, 2] vmap⁻[:, 4, 1] vmap⁻[:, 3, 1] ], maximum(Nfp), nface, nelem, ) end # Test polynomial order 0 let comm = MPI.COMM_SELF x = (0:1, 0:1, -1:1) p = (false, true, false) mesh = connectmesh(comm, partition(comm, brickmesh(x, p)...)[1:4]...) N = (7, 2, 0) Nq = N .+ 1 d = length(x) nelem = prod(length.(x) .- 1) nface = 2d Np = prod(Nq) Nfp = div.(Np, Nq) vmap⁻, vmap⁺ = mappings(N, mesh[:elemtoelem], mesh[:elemtoface], mesh[:elemtoordr]) #! format: off p = reshape(1:Np, Nq) fmask = ntuple(j -> zeros(Int, maximum(Nfp)), 2d) fmask[1][1:Nfp[1]] .= p[1, :, :][:] fmask[2][1:Nfp[1]] .= p[end, :, :][:] fmask[3][1:Nfp[2]] .= p[:, 1, :][:] fmask[4][1:Nfp[2]] .= p[:, end, :][:] fmask[5][1:Nfp[3]] .= p[:, :, 1][:] fmask[6][1:Nfp[3]] .= p[:, :, end][:] #! format: on @test vmap⁻ == reshape( [ fmask[1] fmask[2] fmask[3] fmask[4] fmask[5] fmask[6] fmask[1] .+ Np .* (fmask[1] .> 0) fmask[2] .+ Np .* (fmask[2] .> 0) fmask[3] .+ Np .* (fmask[3] .> 0) fmask[4] .+ Np .* (fmask[4] .> 0) fmask[5] .+ Np .* (fmask[5] .> 0) fmask[6] .+ Np .* (fmask[6] .> 0) ]', maximum(Nfp), nface, nelem, ) @test vmap⁺ == reshape( [ vmap⁻[:, 1, 1] vmap⁻[:, 2, 1] vmap⁻[:, 4, 1] vmap⁻[:, 3, 1] vmap⁻[:, 5, 1] vmap⁻[:, 5, 2] vmap⁻[:, 1, 2] vmap⁻[:, 2, 2] vmap⁻[:, 4, 2] vmap⁻[:, 3, 2] vmap⁻[:, 6, 1] vmap⁻[:, 6, 2] ], maximum(Nfp), nface, nelem, ) end end @testset "Get Partition" begin let Nelem = 150 (so, ss, rs) = BrickMesh.getpartition(MPI.COMM_SELF, Nelem:-1:1) @test so == Nelem:-1:1 @test ss == [1, Nelem + 1] @test rs == [1, Nelem + 1] end let Nelem = 111 code = [ones(1, Nelem); collect(Nelem:-1:1)'] (so, ss, rs) = BrickMesh.getpartition(MPI.COMM_SELF, Nelem:-1:1) @test so == Nelem:-1:1 @test ss == [1, Nelem + 1] @test rs == [1, Nelem + 1] end end @testset "Partition" begin (etv, etc, etb, fc) = brickmesh((-1:2:1, -1:2:1, -2:1:2), (true, true, true)) (netv, netc, netb, nfc) = partition(MPI.COMM_SELF, etv, etc, etb, fc)[1:4] @test etv == netv @test etc == netc @test etb == netb @test fc == nfc end @testset "Comm Mappings" begin let N = 1 d = 2 nface = 2d commelems = [1, 2, 5] commfaces = BitArray(undef, nface, length(commelems)) commfaces .= false nabrtocomm = [1:2, 3:3] vmapC, nabrtovmapC = commmapping(ntuple(j -> N, d), commelems, commfaces, nabrtocomm) @test vmapC == Int[] @test nabrtovmapC == UnitRange{Int64}[1:0, 1:0] end let N = 1 d = 2 nface = 2d commelems = [1, 2, 5] commfaces = BitArray([ false false false false true false false true false false false true ]) nabrtocomm = [1:2, 3:3] vmapC, nabrtovmapC = commmapping(ntuple(j -> N, d), commelems, commfaces, nabrtocomm) @test vmapC == [5, 6, 8, 19, 20] @test nabrtovmapC == UnitRange{Int64}[1:3, 4:5] end # 2D, single polynomial order let N = 2 d = 2 nface = 2d commelems = [2, 4, 5] commfaces = BitArray([ true true false false false false false true true false false true ]) nabrtocomm = [1:1, 2:3] vmapC, nabrtovmapC = commmapping(ntuple(j -> N, d), commelems, commfaces, nabrtocomm) @test vmapC == [10, 13, 16, 28, 29, 30, 31, 34, 37, 38, 39, 43, 44, 45] @test nabrtovmapC == UnitRange{Int64}[1:3, 4:14] end # 2D, multiple polynomial orders let N = (2, 3) Np = prod(N .+ 1) d = length(N) nface = 2d commelems = [2, 4, 5] #! format: off commfaces = BitArray([ true true false false # faces of commelems[1] to send false false false true # faces of commelems[2] to send true false false true # faces of commelems[3] to send ]') #! format: on nabrtocomm = [1:1, 2:3] vmapC, nabrtovmapC = commmapping(N, commelems, commfaces, nabrtocomm) @test vmapC == [ (commelems[1] - 1) * Np + 1 (commelems[1] - 1) * Np + 3 (commelems[1] - 1) * Np + 4 (commelems[1] - 1) * Np + 6 (commelems[1] - 1) * Np + 7 (commelems[1] - 1) * Np + 9 (commelems[1] - 1) * Np + 10 (commelems[1] - 1) * Np + 12 (commelems[2] - 1) * Np + 10 (commelems[2] - 1) * Np + 11 (commelems[2] - 1) * Np + 12 (commelems[3] - 1) * Np + 1 (commelems[3] - 1) * Np + 4 (commelems[3] - 1) * Np + 7 (commelems[3] - 1) * Np + 10 (commelems[3] - 1) * Np + 11 (commelems[3] - 1) * Np + 12 ] @test nabrtovmapC == UnitRange{Int64}[1:8, 9:17] end # 3D, single polynomial order let N = 2 d = 3 nface = 2d commelems = [3, 4, 7, 9] commfaces = BitArray([ true true true false false true false false false true false false false true false false false true true true false true false true ]) nabrtocomm = [1:1, 2:4] vmapC, nabrtovmapC = commmapping(ntuple(j -> N, d), commelems, commfaces, nabrtocomm) #! format: off @test vmapC == [ 55, 58, 61, 64, 67, 70, 73, 76, 79, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 175, 178, 181, 184, 187, 217, 218, 219, 220, 221, 222, 223, 224, 225, 235, 236, 237, 238, 239, 240, 241, 242, 243, ] #! format: on @test nabrtovmapC == UnitRange{Int64}[1:9, 10:68] end # 3D, multiple polynomial order let N = (2, 3, 4) Nq = N .+ 1 Np = prod(Nq) d = length(N) nface = 2d commelems = [3, 4, 7, 9] commfaces = BitArray([ true true true false false true false false false true false false false true false false false true true true false true false true ]) p = reshape(1:Np, Nq) fmask = ( p[1, :, :][:], # Face 1 p[Nq[1], :, :][:], # Face 2 p[:, 1, :][:], # Face 3 p[:, Nq[2], :][:], # Face 4 p[:, :, 1][:], # Face 5 p[:, :, Nq[3]][:], # Face 6 ) nabrtocomm = [1:1, 2:4] vmapC, nabrtovmapC = commmapping(N, commelems, commfaces, nabrtocomm) #! format: off @test vmapC == [ sort(unique(vcat(fmask[commfaces[:, 1]]...))) .+ (commelems[1] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 2]]...))) .+ (commelems[2] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 3]]...))) .+ (commelems[3] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 4]]...))) .+ (commelems[4] - 1) *Np ] #! format: on @test nabrtovmapC == UnitRange{Int64}[1:20, 21:126] end # Test mappings with polyorder 0 in one dimension let N = (0, 1, 2) Nq = N .+ 1 Np = prod(Nq) d = length(N) nface = 2d commelems = [3, 4, 7, 9] commfaces = BitArray([ true true true false false true false false false true false false false true false false false true true true false true false true ]) p = reshape(1:Np, Nq) fmask = ( p[1, :, :][:], # Face 1 p[Nq[1], :, :][:], # Face 2 p[:, 1, :][:], # Face 3 p[:, Nq[2], :][:], # Face 4 p[:, :, 1][:], # Face 5 p[:, :, Nq[3]][:], # Face 6 ) nabrtocomm = [1:1, 2:4] vmapC, nabrtovmapC = commmapping(N, commelems, commfaces, nabrtocomm) #! format: off @test vmapC == [ sort(unique(vcat(fmask[commfaces[:, 1]]...))) .+ (commelems[1] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 2]]...))) .+ (commelems[2] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 3]]...))) .+ (commelems[3] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 4]]...))) .+ (commelems[4] - 1) *Np ] #! format: on @test nabrtovmapC == UnitRange{Int64}[1:6, 7:22] end let N = (3, 2, 0) Nq = N .+ 1 Np = prod(Nq) d = length(N) nface = 2d commelems = [3, 4, 7, 9, 19] commfaces = BitArray([ true true true false true false true false false false false true false false true false true false false false false true true true false false true false true false ]) p = reshape(1:Np, Nq) fmask = ( p[1, :, :][:], # Face 1 p[Nq[1], :, :][:], # Face 2 p[:, 1, :][:], # Face 3 p[:, Nq[2], :][:], # Face 4 p[:, :, 1][:], # Face 5 p[:, :, Nq[3]][:], # Face 6 ) nabrtocomm = [1:1, 2:5] vmapC, nabrtovmapC = commmapping(N, commelems, commfaces, nabrtocomm) #! format: off @test vmapC == [ sort(unique(vcat(fmask[commfaces[:, 1]]...))) .+ (commelems[1] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 2]]...))) .+ (commelems[2] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 3]]...))) .+ (commelems[3] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 4]]...))) .+ (commelems[4] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 5]]...))) .+ (commelems[5] - 1) *Np ] #! format: on @test nabrtovmapC == UnitRange{Int64}[1:3, 4:45] end let N = (0, 2) Nq = N .+ 1 Np = prod(Nq) d = length(N) nface = 2d commelems = [3, 4, 7, 9] commfaces = BitArray([ true true true false false true false true false true false false false true false true ]) p = reshape(1:Np, Nq) fmask = ( p[1, :][:], # Face 1 p[Nq[1], :][:], # Face 2 p[:, 1][:], # Face 3 p[:, Nq[2]][:], # Face 4 ) nabrtocomm = [1:1, 2:4] vmapC, nabrtovmapC = commmapping(N, commelems, commfaces, nabrtocomm) #! format: off @test vmapC == [ sort(unique(vcat(fmask[commfaces[:, 1]]...))) .+ (commelems[1] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 2]]...))) .+ (commelems[2] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 3]]...))) .+ (commelems[3] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 4]]...))) .+ (commelems[4] - 1) *Np ] #! format: on @test nabrtovmapC == UnitRange{Int64}[1:3, 4:12] end let N = (3, 0) Nq = N .+ 1 Np = prod(Nq) d = length(N) nface = 2d commelems = [3, 4, 7, 9, 19] commfaces = BitArray([ true true true false true false true false false true false true false true false false true false true false ]) p = reshape(1:Np, Nq) fmask = ( p[1, :][:], # Face 1 p[Nq[1], :][:], # Face 2 p[:, 1][:], # Face 3 p[:, Nq[2]][:], # Face 4 ) nabrtocomm = [1:1, 2:5] vmapC, nabrtovmapC = commmapping(N, commelems, commfaces, nabrtocomm) #! format: off @test vmapC == [ sort(unique(vcat(fmask[commfaces[:, 1]]...))) .+ (commelems[1] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 2]]...))) .+ (commelems[2] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 3]]...))) .+ (commelems[3] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 4]]...))) .+ (commelems[4] - 1) *Np sort(unique(vcat(fmask[commfaces[:, 5]]...))) .+ (commelems[5] - 1) *Np ] #! format: on @test nabrtovmapC == UnitRange{Int64}[1:1, 2:12] end end ================================================ FILE: test/Numerics/Mesh/DSS.jl ================================================ using Test using MPI using ClimateMachine ClimateMachine.init() using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.MPIStateArrays using ClimateMachine.Mesh.DSS using KernelAbstractions DA = ClimateMachine.array_type() function test_dss() FT = Float64 comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) @assert csize == 1 N = (4, 4, 5) brickrange = (0:2, 5:6, 0:1) periodicity = (false, false, false) nvars = 1 topl = StackedBrickTopology( comm, brickrange, periodicity = periodicity, boundary = ((1, 2), (3, 4), (5, 6)), connectivity = :full, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = DA, polynomialorder = N, ) Nq = N .+ 1 Np = prod(Nq) Q = MPIStateArray{FT}( comm, DA, Np, nvars, length(topl.elems), realelems = topl.realelems, ghostelems = topl.ghostelems, vmaprecv = grid.vmaprecv, vmapsend = grid.vmapsend, nabrtorank = topl.nabrtorank, nabrtovmaprecv = grid.nabrtovmaprecv, nabrtovmapsend = grid.nabrtovmapsend, ) realelems = Q.realelems ghostelems = Q.ghostelems ldof = DA{FT, 1}(reshape(1:Np, Np)) for ivar in 1:nvars for ielem in realelems Q.data[:, ivar, ielem] .= ldof .+ FT((ielem - 1) * Np) end Q.data[:, ivar, ghostelems] .= FT(0) end pre_dss = Array(Q.data) dss!(Q, grid) #---------Tests------------------------- nodes = reshape(1:Np, Nq) interior = nodes[2:(Nq[1] - 1), 2:(Nq[2] - 1), 2:(Nq[3] - 1)][:] vertmap = Array(grid.vertmap) edgemap = Array(grid.edgemap) facemap = Array(grid.facemap) post_dss = Array(Q.data) ne1r = 1:max(Nq[1] - 2, 0) ne2r = 1:max(Nq[2] - 2, 0) ne3r = 1:max(Nq[3] - 2, 0) nf1r = 1:max((Nq[2] - 2) * (Nq[3] - 2), 0) nf2r = 1:max((Nq[1] - 2) * (Nq[3] - 2), 0) nf3r = 1:max((Nq[1] - 2) * (Nq[2] - 2), 0) compare(el1, i1, el2, i2, efmap, rng, post, pre) = post[efmap[rng, i1, 1], 1, el1] == pre[efmap[rng, i2, 1], 1, el2] compare(el1, i1, el2, i2, el3, i3, efmap, rng, post, pre) = post[efmap[rng, i1, 1], 1, el1] == pre[efmap[rng, i2, 1], 1, el2] .+ pre[efmap[rng, i3, 1], 1, el3] el1, el2 = 1, 2 # Element # 1 -------------------------------------------------------------------------- # interior dof should not be affected @test pre_dss[interior, 1, 1] == post_dss[interior, 1, 1] # vertex check @test post_dss[vertmap, 1, 1] == [ pre_dss[vertmap[1], 1, 1], pre_dss[vertmap[2], 1, 1] + pre_dss[vertmap[1], 1, 2], pre_dss[vertmap[3], 1, 1], pre_dss[vertmap[4], 1, 1] + pre_dss[vertmap[3], 1, 2], pre_dss[vertmap[5], 1, 1], pre_dss[vertmap[6], 1, 1] + pre_dss[vertmap[5], 1, 2], pre_dss[vertmap[7], 1, 1], pre_dss[vertmap[8], 1, 1] + pre_dss[vertmap[7], 1, 2], ] # edge check @test compare(el1, 1, el1, 1, edgemap, ne1r, post_dss, pre_dss) # edge 1 (unaffected) @test compare(el1, 2, el1, 2, edgemap, ne1r, post_dss, pre_dss) # edge 2 (unaffected) @test compare(el1, 3, el1, 3, edgemap, ne1r, post_dss, pre_dss) # edge 3 (unaffected) @test compare(el1, 4, el1, 4, edgemap, ne1r, post_dss, pre_dss) # edge 4 (unaffected) @test compare(el1, 5, el1, 5, edgemap, ne2r, post_dss, pre_dss) # edge 5 (unaffected) @test compare(el1, 6, el1, 6, el2, 5, edgemap, ne2r, post_dss, pre_dss) # edge 6 (shared edge) @test compare(el1, 7, el1, 7, edgemap, ne2r, post_dss, pre_dss) # edge 7 (unaffected) @test compare(el1, 8, el1, 8, el2, 7, edgemap, ne2r, post_dss, pre_dss) # edge 8 (shared edge) @test compare(el1, 9, el1, 9, edgemap, ne3r, post_dss, pre_dss) # edge 9 (unaffected) @test compare(el1, 10, el1, 10, el2, 9, edgemap, ne3r, post_dss, pre_dss) # edge 10 (shared edge) @test compare(el1, 11, el1, 11, edgemap, ne3r, post_dss, pre_dss) # edge 11 (unaffected) @test compare(el1, 12, el1, 12, el2, 11, edgemap, ne3r, post_dss, pre_dss) # edge 12 (shared edge) # face check @test compare(el1, 1, el1, 1, facemap, nf1r, post_dss, pre_dss) # face 1 (unaffected) @test compare(el1, 2, el1, 2, el2, 1, facemap, nf1r, post_dss, pre_dss) # face 2 (shared face) @test compare(el1, 3, el1, 3, facemap, nf2r, post_dss, pre_dss) # face 3 (unaffected) @test compare(el1, 4, el1, 4, facemap, nf2r, post_dss, pre_dss) # face 4 (unaffected) @test compare(el1, 5, el1, 5, facemap, nf3r, post_dss, pre_dss) # face 5 (unaffected) @test compare(el1, 6, el1, 6, facemap, nf3r, post_dss, pre_dss) # face 6 (unaffected) # Element # 2 -------------------------------------------------------------------------- # interior dof should not be affected @test pre_dss[interior, 1, 2] == post_dss[interior, 1, 2] # vertex check @test post_dss[vertmap, 1, 2] == [ pre_dss[vertmap[1], 1, 2] + pre_dss[vertmap[2], 1, 1], pre_dss[vertmap[2], 1, 2], pre_dss[vertmap[3], 1, 2] + pre_dss[vertmap[4], 1, 1], pre_dss[vertmap[4], 1, 2], pre_dss[vertmap[5], 1, 2] + pre_dss[vertmap[6], 1, 1], pre_dss[vertmap[6], 1, 2], pre_dss[vertmap[7], 1, 2] + pre_dss[vertmap[8], 1, 1], pre_dss[vertmap[8], 1, 2], ] # edge check @test compare(el2, 1, el2, 1, edgemap, ne1r, post_dss, pre_dss) # edge 1 (unaffected) @test compare(el2, 2, el2, 2, edgemap, ne1r, post_dss, pre_dss) # edge 2 (unaffected) @test compare(el2, 3, el2, 3, edgemap, ne1r, post_dss, pre_dss) # edge 3 (unaffected) @test compare(el2, 4, el2, 4, edgemap, ne1r, post_dss, pre_dss) # edge 4 (unaffected) @test compare(el2, 5, el2, 5, el1, 6, edgemap, ne2r, post_dss, pre_dss) # edge 5 (shared edge) @test compare(el2, 6, el2, 6, edgemap, ne2r, post_dss, pre_dss) # edge 6 (unaffected) @test compare(el2, 7, el2, 7, el1, 8, edgemap, ne2r, post_dss, pre_dss) # edge 7 (shared edge) @test compare(el2, 8, el2, 8, edgemap, ne2r, post_dss, pre_dss) # edge 8 (unaffected) @test compare(el2, 9, el2, 9, el1, 10, edgemap, ne3r, post_dss, pre_dss) # edge 9 (shared edge) @test compare(el2, 10, el2, 10, edgemap, ne3r, post_dss, pre_dss) # edge 10 (unaffected) @test compare(el2, 11, el2, 11, el1, 12, edgemap, ne3r, post_dss, pre_dss) # edge 11 (shared edge) @test compare(el2, 12, el2, 12, edgemap, ne3r, post_dss, pre_dss) # edge 12 (unaffected) # face check @test compare(el2, 1, el2, 1, el1, 2, facemap, nf1r, post_dss, pre_dss) # face 1 (shared face) @test compare(el2, 2, el2, 2, facemap, nf1r, post_dss, pre_dss) # face 2 (unaffected) @test compare(el2, 3, el2, 3, facemap, nf2r, post_dss, pre_dss) # face 3 (unaffected) @test compare(el2, 4, el2, 4, facemap, nf2r, post_dss, pre_dss) # face 4 (unaffected) @test compare(el2, 5, el2, 5, facemap, nf3r, post_dss, pre_dss) # face 5 (unaffected) @test compare(el2, 6, el2, 6, facemap, nf3r, post_dss, pre_dss) # face 6 (unaffected) end test_dss() ================================================ FILE: test/Numerics/Mesh/DSS_mpi.jl ================================================ using Test using MPI using ClimateMachine ClimateMachine.init() using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.MPIStateArrays using ClimateMachine.Mesh.DSS using KernelAbstractions DA = ClimateMachine.array_type() function test_dss_stacked_3d() FT = Float64 comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) @assert csize == 3 N = (4, 4, 5) brickrange = (0:4, 5:9, 0:3) periodicity = (false, false, false) nvars = 3 topl = StackedBrickTopology( comm, brickrange, periodicity = periodicity, boundary = ((1, 2), (3, 4), (5, 6)), connectivity = :full, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = DA, polynomialorder = N, ) Nq = N .+ 1 Np = prod(Nq) Q = MPIStateArray{FT}( comm, DA, Np, nvars, length(topl.elems), realelems = topl.realelems, ghostelems = topl.ghostelems, vmaprecv = grid.vmaprecv, vmapsend = grid.vmapsend, nabrtorank = topl.nabrtorank, nabrtovmaprecv = grid.nabrtovmaprecv, nabrtovmapsend = grid.nabrtovmapsend, ) realelems = Q.realelems ghostelems = Q.ghostelems Q.data[:, :, realelems] .= FT(1) Q.data[:, :, ghostelems] .= FT(0) dss!(Q, grid) #---------Tests------------------------- nodes = reshape(1:Np, Nq) interior = nodes[2:(Nq[1] - 1), 2:(Nq[2] - 1), 2:(Nq[3] - 1)][:] vertmap = Array(grid.vertmap) edgemap = Array(grid.edgemap) facemap = Array(grid.facemap) data = Array(Q.data) compare(data, ivar, iel, efmap) = unique(data[setdiff(efmap, [-1]), ivar, lel]) lel = 1 # local element number for each process if crank == 0 for ivar in 1:nvars # local element #1, global elememt # 1 # interior dof should not be affected idof = unique(data[interior, ivar, lel]) @test idof == [FT(1)] # vertex dof check @test data[vertmap, ivar, lel] == FT.([1, 2, 2, 4, 2, 4, 4, 8]) # edge dof check @test compare(data, ivar, lel, edgemap[:, 1, 1]) == [FT(1)] # edge # 1 @test compare(data, ivar, lel, edgemap[:, 2, 1]) == [FT(2)] # edge # 2 @test compare(data, ivar, lel, edgemap[:, 3, 1]) == [FT(2)] # edge # 3 @test compare(data, ivar, lel, edgemap[:, 4, 1]) == [FT(4)] # edge # 4 @test compare(data, ivar, lel, edgemap[:, 5, 1]) == [FT(1)] # edge # 5 @test compare(data, ivar, lel, edgemap[:, 6, 1]) == [FT(2)] # edge # 6 @test compare(data, ivar, lel, edgemap[:, 7, 1]) == [FT(2)] # edge # 7 @test compare(data, ivar, lel, edgemap[:, 8, 1]) == [FT(4)] # edge # 8 @test compare(data, ivar, lel, edgemap[:, 9, 1]) == [FT(1)] # edge # 9 @test compare(data, ivar, lel, edgemap[:, 10, 1]) == [FT(2)] # edge # 10 @test compare(data, ivar, lel, edgemap[:, 11, 1]) == [FT(2)] # edge # 11 @test compare(data, ivar, lel, edgemap[:, 12, 1]) == [FT(4)] # edge # 12 # face dof check @test compare(data, ivar, lel, facemap[:, 1, 1]) == [FT(1)] # face # 1 @test compare(data, ivar, lel, facemap[:, 2, 1]) == [FT(2)] # face # 2 @test compare(data, ivar, lel, facemap[:, 3, 1]) == [FT(1)] # face # 3 @test compare(data, ivar, lel, facemap[:, 4, 1]) == [FT(2)] # face # 4 @test compare(data, ivar, lel, facemap[:, 5, 1]) == [FT(1)] # face # 5 @test compare(data, ivar, lel, facemap[:, 6, 1]) == [FT(2)] # face # 6 end elseif crank == 1 for ivar in 1:nvars # local element #1, global elememt # 6 (in 2D) # interior dof should not be affected idof = unique(data[interior, ivar, lel]) @test idof == [FT(1)] # vertex dof check @test data[vertmap, ivar, lel] == FT.([2, 4, 1, 2, 4, 8, 2, 4]) # edge dof check @test compare(data, ivar, lel, edgemap[:, 1, 1]) == [FT(2)] # edge # 1 @test compare(data, ivar, lel, edgemap[:, 2, 1]) == [FT(1)] # edge # 2 @test compare(data, ivar, lel, edgemap[:, 3, 1]) == [FT(4)] # edge # 3 @test compare(data, ivar, lel, edgemap[:, 4, 1]) == [FT(2)] # edge # 4 @test compare(data, ivar, lel, edgemap[:, 5, 1]) == [FT(1)] # edge # 5 @test compare(data, ivar, lel, edgemap[:, 6, 1]) == [FT(2)] # edge # 6 @test compare(data, ivar, lel, edgemap[:, 7, 1]) == [FT(2)] # edge # 7 @test compare(data, ivar, lel, edgemap[:, 8, 1]) == [FT(4)] # edge # 8 @test compare(data, ivar, lel, edgemap[:, 9, 1]) == [FT(2)] # edge # 9 @test compare(data, ivar, lel, edgemap[:, 10, 1]) == [FT(4)] # edge # 10 @test compare(data, ivar, lel, edgemap[:, 11, 1]) == [FT(1)] # edge # 11 @test compare(data, ivar, lel, edgemap[:, 12, 1]) == [FT(2)] # edge # 12 # face dof check @test compare(data, ivar, lel, facemap[:, 1, 1]) == [FT(1)] # face # 1 @test compare(data, ivar, lel, facemap[:, 2, 1]) == [FT(2)] # face # 2 @test compare(data, ivar, lel, facemap[:, 3, 1]) == [FT(2)] # face # 3 @test compare(data, ivar, lel, facemap[:, 4, 1]) == [FT(1)] # face # 4 @test compare(data, ivar, lel, facemap[:, 5, 1]) == [FT(1)] # face # 5 @test compare(data, ivar, lel, facemap[:, 6, 1]) == [FT(2)] # face # 6 end else # crank == 2 for ivar in 1:nvars # local element #1, global elememt # 11 (in 2D) # interior dof should not be affected idof = unique(data[interior, ivar, lel]) @test idof == [FT(1)] # vertex dof check @test data[vertmap, ivar, lel] == FT.([4, 2, 2, 1, 8, 4, 4, 2]) # edge dof check @test compare(data, ivar, lel, edgemap[:, 1, 1]) == [FT(2)] # edge # 1 @test compare(data, ivar, lel, edgemap[:, 2, 1]) == [FT(1)] # edge # 2 @test compare(data, ivar, lel, edgemap[:, 3, 1]) == [FT(4)] # edge # 3 @test compare(data, ivar, lel, edgemap[:, 4, 1]) == [FT(2)] # edge # 4 @test compare(data, ivar, lel, edgemap[:, 5, 1]) == [FT(2)] # edge # 5 @test compare(data, ivar, lel, edgemap[:, 6, 1]) == [FT(1)] # edge # 6 @test compare(data, ivar, lel, edgemap[:, 7, 1]) == [FT(4)] # edge # 7 @test compare(data, ivar, lel, edgemap[:, 8, 1]) == [FT(2)] # edge # 8 @test compare(data, ivar, lel, edgemap[:, 9, 1]) == [FT(4)] # edge # 9 @test compare(data, ivar, lel, edgemap[:, 10, 1]) == [FT(2)] # edge # 10 @test compare(data, ivar, lel, edgemap[:, 11, 1]) == [FT(2)] # edge # 11 @test compare(data, ivar, lel, edgemap[:, 12, 1]) == [FT(1)] # edge # 12 # face dof check @test compare(data, ivar, lel, facemap[:, 1, 1]) == [FT(2)] # face # 1 @test compare(data, ivar, lel, facemap[:, 2, 1]) == [FT(1)] # face # 2 @test compare(data, ivar, lel, facemap[:, 3, 1]) == [FT(2)] # face # 3 @test compare(data, ivar, lel, facemap[:, 4, 1]) == [FT(1)] # face # 4 @test compare(data, ivar, lel, facemap[:, 5, 1]) == [FT(1)] # face # 5 @test compare(data, ivar, lel, facemap[:, 6, 1]) == [FT(2)] # face # 6 end end end test_dss_stacked_3d() ================================================ FILE: test/Numerics/Mesh/Elements.jl ================================================ using ClimateMachine.Mesh.Elements using GaussQuadrature using LinearAlgebra using Test @testset "GaussQuadrature" begin for T in (Float32, Float64, BigFloat) let x, w = GaussQuadrature.legendre(T, 1) @test iszero(x) @test w ≈ [2 * one(T)] end let endpt = GaussQuadrature.left x, w = GaussQuadrature.legendre(T, 1, endpt) @test x ≈ [-one(T)] @test w ≈ [2 * one(T)] end let endpt = GaussQuadrature.right x, w = GaussQuadrature.legendre(T, 1, endpt) @test x ≈ [one(T)] @test w ≈ [2 * one(T)] end let endpt = GaussQuadrature.left x, w = GaussQuadrature.legendre(T, 2, endpt) @test x ≈ [-one(T); T(1 // 3)] @test w ≈ [T(1 // 2); T(3 // 2)] end let endpt = GaussQuadrature.right x, w = GaussQuadrature.legendre(T, 2, endpt) @test x ≈ [T(-1 // 3); one(T)] @test w ≈ [T(3 // 2); T(1 // 2)] end end let err = ErrorException("Must have at least two points for both ends.") endpt = GaussQuadrature.both @test_throws err GaussQuadrature.legendre(1, endpt) end let T = Float64 n = 100 endpt = GaussQuadrature.both a, b = GaussQuadrature.legendre_coefs(T, n) err = ErrorException( "No convergence after 1 iterations " * "(try increasing maxits)", ) @test_throws err GaussQuadrature.custom_gauss_rule( -one(T), one(T), a, b, endpt, 1, ) end end @testset "Operators" begin P5(r::AbstractVector{T}) where {T} = T(1) / T(8) * (T(15) * r - T(70) * r .^ 3 + T(63) * r .^ 5) P6(r::AbstractVector{T}) where {T} = T(1) / T(16) * (-T(5) .+ T(105) * r .^ 2 - T(315) * r .^ 4 + T(231) * r .^ 6) DP6(r::AbstractVector{T}) where {T} = T(1) / T(16) * (T(2 * 105) * r - T(4 * 315) * r .^ 3 + T(6 * 231) * r .^ 5) IPN(::Type{T}, N) where {T} = T(2) / T(2 * N + 1) N = 6 for test_type in (Float32, Float64, BigFloat) r, w = Elements.lglpoints(test_type, N) D = Elements.spectralderivative(r) x = LinRange{test_type}(-1, 1, 101) I = Elements.interpolationmatrix(r, x) @test sum(P5(r) .^ 2 .* w) ≈ IPN(test_type, 5) @test D * P6(r) ≈ DP6(r) @test I * P6(r) ≈ P6(x) end for test_type in (Float32, Float64, BigFloat) r, w = Elements.glpoints(test_type, N) D = Elements.spectralderivative(r) @test sum(P5(r) .^ 2 .* w) ≈ IPN(test_type, 5) @test sum(P6(r) .^ 2 .* w) ≈ IPN(test_type, 6) @test D * P6(r) ≈ DP6(r) end end @testset "Jacobip" begin for T in (Float32, Float64, BigFloat) let α, β, N = T(0), T(0), 3 # α, β (for Legendre polynomials) & polynomial order x, wt = Elements.lglpoints(T, N + 1) # lgl points for polynomial order N V = Elements.jacobip(α, β, N, x) # compare with orthonormalized exact solution # https://en.wikipedia.org/wiki/Legendre_polynomials V_exact = similar(V) V_exact[:, 1] .= 1 V_exact[:, 2] .= x V_exact[:, 3] .= (3 * x .^ 2 .- 1) / 2 V_exact[:, 4] .= (5 * x .^ 3 .- 3 * x) / 2 scale = 1 ./ sqrt.(diag(V_exact' * Diagonal(wt) * V_exact)) V_exact = V_exact * Diagonal(scale) @test V ≈ V_exact end end end ================================================ FILE: test/Numerics/Mesh/Geometry.jl ================================================ using Test, MPI using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Geometry using StaticArrays MPI.Initialized() || MPI.Init() @testset "LocalGeometry" begin FT = Float64 ArrayType = Array xmin = 0 ymin = 0 zmin = 0 xmax = 2000 ymax = 400 zmax = 2000 Ne = (20, 2, 20) polynomialorder = 4 brickrange = ( range(FT(xmin); length = Ne[1] + 1, stop = xmax), range(FT(ymin); length = Ne[2] + 1, stop = ymax), range(FT(zmin); length = Ne[3] + 1, stop = zmax), ) topl = StackedBrickTopology( MPI.COMM_SELF, brickrange, periodicity = (true, true, false), ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = polynomialorder, ) S = ( (xmax - xmin) / (polynomialorder * Ne[1]), (ymax - ymin) / (polynomialorder * Ne[2]), (zmax - zmin) / (polynomialorder * Ne[3]), ) Savg = cbrt(prod(S)) Shoriavg = (S[1] + S[2]) / 2 M = SDiagonal(S .^ -2) N = (polynomialorder, polynomialorder) Np = (polynomialorder + 1)^3 for e in 1:size(grid.vgeo, 3) for n in 1:size(grid.vgeo, 1) g = LocalGeometry{Np, N}(grid.vgeo, n, e) @test lengthscale(g) ≈ Savg @test lengthscale_horizontal(g) ≈ Shoriavg end end end ================================================ FILE: test/Numerics/Mesh/Grids.jl ================================================ using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Topologies: BrickTopology using Test using MPI MPI.Initialized() || MPI.Init() @testset "2-D Mass matrix" begin topology = BrickTopology(MPI.COMM_WORLD, ntuple(_ -> (-1, 1), 2);) @testset for N in ((2, 2), (2, 3), (3, 2)) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) grid = DiscontinuousSpectralElementGrid( topology, FloatType = Float64, DeviceArray = Array, polynomialorder = N, ) @views begin ω = grid.ω M = grid.vgeo[:, Grids._M, 1] MH = grid.vgeo[:, Grids._MH, 1] sM = grid.sgeo[Grids._sM, :, :, 1] M = reshape(M, Nq[1], Nq[2]) @test M ≈ [ω[1][i] * ω[2][j] for i in 1:Nq[1], j in 1:Nq[2]] MH = reshape(MH, Nq[1], Nq[2]) @test MH ≈ [ω[1][i] for i in 1:Nq[1], j in 1:Nq[2]] sM12 = sM[1:Nfp[1], 1:2] @test sM12[:, 1] ≈ [ω[2][j] for j in 1:Nq[2]] @test sM12[:, 2] ≈ [ω[2][j] for j in 1:Nq[2]] sM34 = sM[1:Nfp[2], 3:4] @test sM34[:, 1] ≈ [ω[1][j] for j in 1:Nq[1]] @test sM34[:, 2] ≈ [ω[1][j] for j in 1:Nq[1]] end end end @testset "3-D Mass matrix" begin topology = BrickTopology(MPI.COMM_WORLD, ntuple(_ -> (-1, 1), 3);) @testset for N in ((2, 2, 2), (2, 3, 4), (4, 3, 2), (2, 4, 3)) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) grid = DiscontinuousSpectralElementGrid( topology, FloatType = Float64, DeviceArray = Array, polynomialorder = N, ) @views begin ω = grid.ω M = grid.vgeo[:, Grids._M, 1] MH = grid.vgeo[:, Grids._MH, 1] sM = grid.sgeo[Grids._sM, :, :, 1] M = reshape(M, Nq[1], Nq[2], Nq[3]) @test M ≈ [ ω[1][i] * ω[2][j] * ω[3][k] for i in 1:Nq[1], j in 1:Nq[2], k in 1:Nq[3] ] MH = reshape(MH, Nq[1], Nq[2], Nq[3]) @test MH ≈ [ ω[1][i] * ω[2][j] for i in 1:Nq[1], j in 1:Nq[2], k in 1:Nq[3] ] sM12 = reshape(sM[1:Nfp[1], 1:2], Nq[2], Nq[3], 2) @test sM12[:, :, 1] ≈ [ω[2][j] * ω[3][k] for j in 1:Nq[2], k in 1:Nq[3]] @test sM12[:, :, 2] ≈ [ω[2][j] * ω[3][k] for j in 1:Nq[2], k in 1:Nq[3]] sM34 = reshape(sM[1:Nfp[2], 3:4], Nq[1], Nq[3], 2) @test sM34[:, :, 1] ≈ [ω[1][i] * ω[3][k] for i in 1:Nq[1], k in 1:Nq[3]] @test sM34[:, :, 2] ≈ [ω[1][i] * ω[3][k] for i in 1:Nq[1], k in 1:Nq[3]] sM56 = reshape(sM[1:Nfp[3], 5:6], Nq[1], Nq[2], 2) @test sM56[:, :, 1] ≈ [ω[1][i] * ω[2][j] for i in 1:Nq[1], j in 1:Nq[2]] @test sM56[:, :, 2] ≈ [ω[1][i] * ω[2][j] for i in 1:Nq[1], j in 1:Nq[2]] end end end ================================================ FILE: test/Numerics/Mesh/Metrics.jl ================================================ using ClimateMachine.Mesh.Elements using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.GeometricFactors using ClimateMachine.Mesh.Metrics using LinearAlgebra: I using Test using Random: MersenneTwister const _ξ1x1, _ξ2x1, _ξ3x1 = Grids._ξ1x1, Grids._ξ2x1, Grids._ξ3x1 const _ξ1x2, _ξ2x2, _ξ3x2 = Grids._ξ1x2, Grids._ξ2x2, Grids._ξ3x2 const _ξ1x3, _ξ2x3, _ξ3x3 = Grids._ξ1x3, Grids._ξ2x3, Grids._ξ3x3 const _M, _MI = Grids._M, Grids._MI const _x1, _x2, _x3 = Grids._x1, Grids._x2, Grids._x3 const _JcV = Grids._JcV const _nvgeo = Grids._nvgeo const _n1, _n2, _n3 = Grids._n1, Grids._n2, Grids._n3 const _sM, _vMI = Grids._sM, Grids._vMI const _nsgeo = Grids._nsgeo @testset "1-D Metric terms" begin for FT in (Float32, Float64) #{{{ let N = (4,) Nq = N .+ 1 Np = prod(Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple(j -> Elements.lglpoints(FT, N[j]), dim) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) dim = 1 e2c = Array{FT, 3}(undef, 1, 2, 2) e2c[:, :, 1] = [-1 0] e2c[:, :, 2] = [0 10] nelem = size(e2c, 3) (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, (x...) -> identity(x)) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) @test vgeo[:, _x1, 1] ≈ (ξ[1] .- 1) / 2 @test vgeo[:, _x1, 2] ≈ 5 * (ξ[1] .+ 1) @test vgeo[:, _M, 1] ≈ ω[1] .* ones(FT, Nq) / 2 @test vgeo[:, _M, 2] ≈ 5 * ω[1] .* ones(FT, Nq) @test vgeo[:, _ξ1x1, 1] ≈ 2 * ones(FT, Nq) @test vgeo[:, _ξ1x1, 2] ≈ ones(FT, Nq) / 5 @test sgeo.n1[1, 1, :] ≈ -ones(FT, nelem) @test sgeo.n1[1, 2, :] ≈ ones(FT, nelem) @test sgeo.sωJ[1, 1, :] ≈ ones(FT, nelem) @test sgeo.sωJ[1, 2, :] ≈ ones(FT, nelem) end #}}} end # N = 0 test for FT in (Float32, Float64) #{{{ let N = (0,) Nq = N .+ 1 Np = prod(Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple(j -> Elements.glpoints(FT, N[j]), dim) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) dim = 1 e2c = Array{FT, 3}(undef, 1, 2, 2) e2c[:, :, 1] = [-1 0] e2c[:, :, 2] = [0 10] nelem = size(e2c, 3) (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, (x...) -> identity(x)) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) @test vgeo[1, _x1, 1] ≈ sum(e2c[:, :, 1]) / 2 @test vgeo[1, _x1, 2] ≈ sum(e2c[:, :, 2]) / 2 @test vgeo[:, _M, 1] ≈ ω[1] .* ones(FT, Nq) / 2 @test vgeo[:, _M, 2] ≈ 5 * ω[1] .* ones(FT, Nq) @test vgeo[:, _ξ1x1, 1] ≈ 2 * ones(FT, Nq) @test vgeo[:, _ξ1x1, 2] ≈ ones(FT, Nq) / 5 @test sgeo.n1[1, 1, :] ≈ -ones(FT, nelem) @test sgeo.n1[1, 2, :] ≈ ones(FT, nelem) @test sgeo.sωJ[1, 1, :] ≈ ones(FT, nelem) @test sgeo.sωJ[1, 2, :] ≈ ones(FT, nelem) end #}}} end end @testset "2-D Metric terms" begin for FT in (Float32, Float64), N in ((4, 4), (4, 6), (6, 4)) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple(j -> Elements.lglpoints(FT, N[j]), dim) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) # linear and rotation test #{{{ let e2c = Array{FT, 3}(undef, 2, 4, 4) e2c[:, :, 1] = [ 0 2 0 2 0 0 2 2 ] e2c[:, :, 2] = [ 2 2 0 0 0 2 0 2 ] e2c[:, :, 3] = [ 2 0 2 0 2 2 0 0 ] e2c[:, :, 4] = [ 0 0 2 2 2 0 2 0 ] nelem = size(e2c, 3) x_exact = Array{FT, 3}(undef, Nq..., nelem) x_exact[:, :, 1] .= 1 .+ ξ[1] x_exact[:, :, 2] .= 1 .- ξ[2]' x_exact[:, :, 3] .= 1 .- ξ[1] x_exact[:, :, 4] .= 1 .+ ξ[2]' y_exact = Array{FT, 3}(undef, Nq..., nelem) y_exact[:, :, 1] .= 1 .+ ξ[2]' y_exact[:, :, 2] .= 1 .+ ξ[1] y_exact[:, :, 3] .= 1 .- ξ[2]' y_exact[:, :, 4] .= 1 .- ξ[1] M_exact = ones(FT, Nq..., nelem) .* reshape(kron(reverse(ω)...), Nq..., 1) ξ1x1_exact = zeros(FT, Nq..., nelem) ξ1x1_exact[:, :, 1] .= 1 ξ1x1_exact[:, :, 3] .= -1 ξ1x2_exact = zeros(FT, Nq..., nelem) ξ1x2_exact[:, :, 2] .= 1 ξ1x2_exact[:, :, 4] .= -1 ξ2x1_exact = zeros(FT, Nq..., nelem) ξ2x1_exact[:, :, 2] .= -1 ξ2x1_exact[:, :, 4] .= 1 ξ2x2_exact = zeros(FT, Nq..., nelem) ξ2x2_exact[:, :, 1] .= 1 ξ2x2_exact[:, :, 3] .= -1 sM_exact = fill(FT(NaN), maximum(Nfp), nface, nelem) sM_exact[1:Nfp[1], 1, :] .= 1 .* ω[2] sM_exact[1:Nfp[1], 2, :] .= 1 .* ω[2] sM_exact[1:Nfp[2], 3, :] .= 1 .* ω[1] sM_exact[1:Nfp[2], 4, :] .= 1 .* ω[1] nx_exact = fill(FT(NaN), maximum(Nfp), nface, nelem) nx_exact[1:Nfp[1], 1:2, :] .= 0 nx_exact[1:Nfp[2], 3:4, :] .= 0 nx_exact[1:Nfp[1], 1, 1] .= -1 nx_exact[1:Nfp[1], 2, 1] .= 1 nx_exact[1:Nfp[2], 3, 2] .= 1 nx_exact[1:Nfp[2], 4, 2] .= -1 nx_exact[1:Nfp[1], 1, 3] .= 1 nx_exact[1:Nfp[1], 2, 3] .= -1 nx_exact[1:Nfp[2], 3, 4] .= -1 nx_exact[1:Nfp[2], 4, 4] .= 1 ny_exact = fill(FT(NaN), maximum(Nfp), nface, nelem) ny_exact[1:Nfp[1], 1:2, :] .= 0 ny_exact[1:Nfp[2], 3:4, :] .= 0 ny_exact[1:Nfp[2], 3, 1] .= -1 ny_exact[1:Nfp[2], 4, 1] .= 1 ny_exact[1:Nfp[1], 1, 2] .= -1 ny_exact[1:Nfp[1], 2, 2] .= 1 ny_exact[1:Nfp[2], 3, 3] .= 1 ny_exact[1:Nfp[2], 4, 3] .= -1 ny_exact[1:Nfp[1], 1, 4] .= 1 ny_exact[1:Nfp[1], 2, 4] .= -1 (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, (x...) -> identity(x)) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) @test (@view vgeo[:, :, _x1, :]) ≈ x_exact @test (@view vgeo[:, :, _x2, :]) ≈ y_exact @test (@view vgeo[:, :, _M, :]) ≈ M_exact @test (@view vgeo[:, :, _ξ1x1, :]) ≈ ξ1x1_exact @test (@view vgeo[:, :, _ξ1x2, :]) ≈ ξ1x2_exact @test (@view vgeo[:, :, _ξ2x1, :]) ≈ ξ2x1_exact @test (@view vgeo[:, :, _ξ2x2, :]) ≈ ξ2x2_exact msk = isfinite.(sM_exact) @test sgeo.sωJ[:, :, :][msk] ≈ sM_exact[msk] @test sgeo.n1[:, :, :][msk] ≈ nx_exact[msk] @test sgeo.n2[:, :, :][msk] ≈ ny_exact[msk] nothing end #}}} # Polynomial 2-D test #{{{ let f(ξ1, ξ2) = ( 9 .* ξ1 - (1 .+ ξ1) .* ξ2 .^ 2 + (ξ1 .- 1) .^ 2 .* (1 .- ξ2 .^ 2 .+ ξ2 .^ 3), 10 .* ξ2 .+ ξ1 .^ 4 .* (1 .- ξ2) .+ ξ1 .^ 2 .* ξ2 .* (1 .+ ξ2), ) fx1ξ1(ξ1, ξ2) = 7 .+ ξ2 .^ 2 .- 2 .* ξ2 .^ 3 .+ 2 .* ξ1 .* (1 .- ξ2 .^ 2 .+ ξ2 .^ 3) fx1ξ2(ξ1, ξ2) = -2 .* (1 .+ ξ1) .* ξ2 .+ (-1 .+ ξ1) .^ 2 .* ξ2 .* (-2 .+ 3 .* ξ2) fx2ξ1(ξ1, ξ2) = -4 .* ξ1 .^ 3 .* (-1 .+ ξ2) .+ 2 .* ξ1 .* ξ2 .* (1 .+ ξ2) fx2ξ2(ξ1, ξ2) = 10 .- ξ1 .^ 4 .+ ξ1 .^ 2 .* (1 .+ 2 .* ξ2) e2c = Array{FT, 3}(undef, 2, 4, 1) e2c[:, :, 1] = [-1 1 -1 1; -1 -1 1 1] nelem = size(e2c, 3) # Create the metrics (x1ξ1, x1ξ2, x2ξ1, x2ξ2) = let (vgeo, _) = Grids.computegeometry( e2c, D, ξ, ω, (x...) -> identity(x), ) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) ξ1, ξ2 = vgeo[:, :, _x1, :], vgeo[:, :, _x2, :] (fx1ξ1(ξ1, ξ2), fx1ξ2(ξ1, ξ2), fx2ξ1(ξ1, ξ2), fx2ξ2(ξ1, ξ2)) end J = (x1ξ1 .* x2ξ2 - x1ξ2 .* x2ξ1) M = J .* reshape(kron(reverse(ω)...), Nq..., 1) meshwarp(ξ1, ξ2, _) = (f(ξ1, ξ2)..., 0) (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, meshwarp) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) x1 = @view vgeo[:, :, _x1, :] x2 = @view vgeo[:, :, _x2, :] @test M ≈ (@view vgeo[:, :, _M, :]) @test (@view vgeo[:, :, _ξ1x1, :]) ≈ x2ξ2 ./ J @test (@view vgeo[:, :, _ξ2x1, :]) ≈ -x2ξ1 ./ J @test (@view vgeo[:, :, _ξ1x2, :]) ≈ -x1ξ2 ./ J @test (@view vgeo[:, :, _ξ2x2, :]) ≈ x1ξ1 ./ J # check the normals? sM = @view sgeo.sωJ[:, :, :] n1 = @view sgeo.n1[:, :, :] n2 = @view sgeo.n2[:, :, :] @test all(hypot.(n1[1:Nfp[1], 1:2, :], n2[1:Nfp[1], 1:2, :]) .≈ 1) @test all(hypot.(n1[1:Nfp[2], 3:4, :], n2[1:Nfp[2], 3:4, :]) .≈ 1) @test sM[1:Nfp[1], 1, :] .* n1[1:Nfp[1], 1, :] ≈ -x2ξ2[1, :, :] .* ω[2] @test sM[1:Nfp[1], 1, :] .* n2[1:Nfp[1], 1, :] ≈ x1ξ2[1, :, :] .* ω[2] @test sM[1:Nfp[1], 2, :] .* n1[1:Nfp[1], 2, :] ≈ x2ξ2[Nq[1], :, :] .* ω[2] @test sM[1:Nfp[1], 2, :] .* n2[1:Nfp[1], 2, :] ≈ -x1ξ2[Nq[1], :, :] .* ω[2] @test sM[1:Nfp[2], 3, :] .* n1[1:Nfp[2], 3, :] ≈ x2ξ1[:, 1, :] .* ω[1] @test sM[1:Nfp[2], 3, :] .* n2[1:Nfp[2], 3, :] ≈ -x1ξ1[:, 1, :] .* ω[1] @test sM[1:Nfp[2], 4, :] .* n1[1:Nfp[2], 4, :] ≈ -x2ξ1[:, Nq[2], :] .* ω[1] @test sM[1:Nfp[2], 4, :] .* n2[1:Nfp[2], 4, :] ≈ x1ξ1[:, Nq[2], :] .* ω[1] end #}}} # Constant preserving test #{{{ let rng = MersenneTwister(777) f(ξ1, ξ2) = ( ξ1 + (ξ1 * ξ2 * rand(rng) + rand(rng)) / 10, ξ2 + (ξ1 * ξ2 * rand(rng) + rand(rng)) / 10, ) e2c = Array{FT, 3}(undef, 2, 4, 1) e2c[:, :, 1] = [-1 1 -1 1; -1 -1 1 1] nelem = size(e2c, 3) meshwarp(ξ1, ξ2, _) = (f(ξ1, ξ2)..., 0) (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, meshwarp) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) x1 = @view vgeo[:, :, _x1, :] x2 = @view vgeo[:, :, _x2, :] (Cx1, Cx2) = (zeros(FT, Nq...), zeros(FT, Nq...)) J = vgeo[:, :, _M, :] ./ reshape(kron(reverse(ω)...), Nq..., 1) ξ1x1 = @view vgeo[:, :, _ξ1x1, :] ξ1x2 = @view vgeo[:, :, _ξ1x2, :] ξ2x1 = @view vgeo[:, :, _ξ2x1, :] ξ2x2 = @view vgeo[:, :, _ξ2x2, :] e = 1 for n in 1:Nq[2] Cx1[:, n] += D[1] * (J[:, n, e] .* ξ1x1[:, n, e]) Cx2[:, n] += D[1] * (J[:, n, e] .* ξ1x2[:, n, e]) end for n in 1:Nq[1] Cx1[n, :] += D[2] * (J[n, :, e] .* ξ2x1[n, :, e]) Cx2[n, :] += D[2] * (J[n, :, e] .* ξ2x2[n, :, e]) end @test maximum(abs.(Cx1)) ≤ 100 * eps(FT) @test maximum(abs.(Cx2)) ≤ 100 * eps(FT) end #}}} end #N = 0 test #{{{ let for FT in (Float32, Float64) N = (2, 0) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple( j -> Nq[j] == 1 ? Elements.glpoints(FT, N[j]) : Elements.lglpoints(FT, N[j]), dim, ) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) fx1(ξ1, ξ2) = ξ1 + (1 + ξ1)^2 * ξ2 / 10 fx1ξ1(ξ1, ξ2) = 1 + 2 * (1 + ξ1) * ξ2 / 10 fx1ξ2(ξ1, ξ2) = (1 + ξ1)^2 / 10 fx2(ξ1, ξ2) = ξ2 - (1 + ξ1)^2 fx2ξ1(ξ1, ξ2) = -2 * (1 + ξ1) fx2ξ2(ξ1, ξ2) = 1 e2c = Array{FT, 3}(undef, 2, 4, 1) e2c[:, :, 1] = [-1 1 -1 1; -1 -1 1 1] nelem = size(e2c, 3) # Create the metrics (x1, x2, x1ξ1, x1ξ2, x2ξ1, x2ξ2) = let vgeo = VolumeGeometry(FT, Nq, nelem) Metrics.creategrid!(vgeo, e2c, ξ) ( fx1.(vgeo.x1, vgeo.x2), fx2.(vgeo.x1, vgeo.x2), fx1ξ1.(vgeo.x1, vgeo.x2), fx1ξ2.(vgeo.x1, vgeo.x2), fx2ξ1.(vgeo.x1, vgeo.x2), fx2ξ2.(vgeo.x1, vgeo.x2), ) end J = (x1ξ1 .* x2ξ2 - x1ξ2 .* x2ξ1) M = J .* reshape(kron(reverse(ω)...), Nq..., 1) meshwarp(ξ1, ξ2, _) = (fx1(ξ1, ξ2), fx2(ξ1, ξ2), 0) (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, meshwarp) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) @test x1 ≈ vgeo[:, :, _x1, :] @test x2 ≈ vgeo[:, :, _x2, :] @test M ≈ vgeo[:, :, _M, :] @test (@view vgeo[:, :, _ξ2x1, :]) .* J ≈ -x2ξ1 @test (@view vgeo[:, :, _ξ2x2, :]) .* J ≈ x1ξ1 @test (@view vgeo[:, :, _ξ1x1, :]) .* J ≈ x2ξ2 @test (@view vgeo[:, :, _ξ1x2, :]) .* J ≈ -x1ξ2 # check the normals? sM = @view sgeo.sωJ[:, :, :] n1 = @view sgeo.n1[:, :, :] n2 = @view sgeo.n2[:, :, :] @test all(hypot.(n1[1:Nfp[1], 1:2, :], n2[1:Nfp[1], 1:2, :]) .≈ 1) @test all(hypot.(n1[1:Nfp[2], 3:4, :], n2[1:Nfp[2], 3:4, :]) .≈ 1) @test sM[1:Nfp[1], 1, :] .* n1[1:Nfp[1], 1, :] ≈ -x2ξ2[1, :, :] .* ω[2] @test sM[1:Nfp[1], 1, :] .* n2[1:Nfp[1], 1, :] ≈ x1ξ2[1, :, :] .* ω[2] @test sM[1:Nfp[1], 2, :] .* n1[1:Nfp[1], 2, :] ≈ x2ξ2[Nq[1], :, :] .* ω[2] @test sM[1:Nfp[1], 2, :] .* n2[1:Nfp[1], 2, :] ≈ -x1ξ2[Nq[1], :, :] .* ω[2] # for these faces we need the N = 1 metrics (x1ξ1, x2ξ1) = let @assert Nq[2] == 1 && Nq[1] != 1 Nq_N1 = max.(2, Nq) vgeo_N1 = VolumeGeometry(FT, Nq_N1, nelem) Metrics.creategrid!( vgeo_N1, e2c, (ξ[1], Elements.lglpoints(FT, 1)[1]), ) x1 = reshape(vgeo_N1.x1, (Nq_N1..., nelem)) x2 = reshape(vgeo_N1.x2, (Nq_N1..., nelem)) (fx1ξ1.(x1, x2), fx2ξ1.(x1, x2)) end @test sM[1:Nfp[2], 3, :] .* n1[1:Nfp[2], 3, :] ≈ x2ξ1[:, 1, :] .* ω[1] @test sM[1:Nfp[2], 3, :] .* n2[1:Nfp[2], 3, :] ≈ -x1ξ1[:, 1, :] .* ω[1] @test sM[1:Nfp[2], 4, :] .* n1[1:Nfp[2], 4, :] ≈ -x2ξ1[:, 2, :] .* ω[1] @test sM[1:Nfp[2], 4, :] .* n2[1:Nfp[2], 4, :] ≈ x1ξ1[:, 2, :] .* ω[1] end end #}}} # Constant preserving test for N = 0 #{{{ let for FT in (Float32, Float64), N in ((4, 0), (0, 4)) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple( j -> Nq[j] == 1 ? Elements.glpoints(FT, N[j]) : Elements.lglpoints(FT, N[j]), dim, ) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) rng = MersenneTwister(777) fx1(ξ1, ξ2) = ξ1 + (ξ1 * ξ2 * rand(rng) + rand(rng)) / 10 fx2(ξ1, ξ2) = ξ2 + (ξ1 * ξ2 * rand(rng) + rand(rng)) / 10 e2c = Array{FT, 3}(undef, 2, 4, 1) e2c[:, :, 1] = [-1 1 -1 1; -1 -1 1 1] nelem = size(e2c, 3) meshwarp(ξ1, ξ2, _) = (fx1(ξ1, ξ2), fx2(ξ1, ξ2), 0) (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, meshwarp) vgeo = reshape( vgeo.array, prod(Nq), # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) M = vgeo[:, _M, :] ξ1x1 = vgeo[:, _ξ1x1, :] ξ2x1 = vgeo[:, _ξ2x1, :] ξ1x2 = vgeo[:, _ξ1x2, :] ξ2x2 = vgeo[:, _ξ2x2, :] I1 = Matrix(I, Nq[1], Nq[1]) I2 = Matrix(I, Nq[2], Nq[2]) D1 = kron(I2, D[1]) D2 = kron(D[2], I1) # Face interpolation operators L = ( kron(I2, I1[1, :]'), kron(I2, I1[Nq[1], :]'), kron(I2[1, :]', I1), kron(I2[Nq[2], :]', I1), ) sM = ntuple(f -> sgeo.sωJ[1:Nfp[cld(f, 2)], f, :], nface) n1 = ntuple(f -> sgeo.n1[1:Nfp[cld(f, 2)], f, :], nface) n2 = ntuple(f -> sgeo.n2[1:Nfp[cld(f, 2)], f, :], nface) # If constant preserving then: # \sum_{j} = D' * M * ξjxk = \sum_{f} L_f' * sM_f * n1_f @test D1' * (M .* ξ1x1) + D2' * (M .* ξ2x1) ≈ mapreduce((L, sM, n1) -> L' * (sM .* n1), +, L, sM, n1) @test D1' * (M .* ξ1x2) + D2' * (M .* ξ2x2) ≈ mapreduce((L, sM, n2) -> L' * (sM .* n2), +, L, sM, n2) end end #}}} end @testset "3-D Metric terms" begin # linear test #{{{ for FT in (Float32, Float64), N in ((2, 2, 2), (2, 3, 4), (4, 3, 2)) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple(j -> Elements.lglpoints(FT, N[j]), dim) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) e2c = Array{FT, 3}(undef, dim, 8, 2) e2c[:, :, 1] = [ 0 2 0 2 0 2 0 2 0 0 2 2 0 0 2 2 0 0 0 0 2 2 2 2 ] e2c[:, :, 2] = [ 2 2 0 0 2 2 0 0 0 2 0 2 0 2 0 2 0 0 0 0 2 2 2 2 ] nelem = size(e2c, 3) x_exact = Array{FT, 4}(undef, Nq..., nelem) x_exact[:, :, :, 1] .= 1 .+ ξ[1] x_exact[:, :, :, 2] .= 1 .- ξ[2]' ξ1x1_exact = zeros(Int, Nq..., nelem) ξ1x1_exact[:, :, :, 1] .= 1 ξ1x2_exact = zeros(Int, Nq..., nelem) ξ1x2_exact[:, :, :, 2] .= 1 ξ2x1_exact = zeros(Int, Nq..., nelem) ξ2x1_exact[:, :, :, 2] .= -1 ξ2x2_exact = zeros(Int, Nq..., nelem) ξ2x2_exact[:, :, :, 1] .= 1 ξ3x3_exact = ones(Int, Nq..., nelem) y_exact = Array{FT, 4}(undef, Nq..., nelem) y_exact[:, :, :, 1] .= 1 .+ ξ[2]' y_exact[:, :, :, 2] .= 1 .+ ξ[1] z_exact = Array{FT, 4}(undef, Nq..., nelem) z_exact[:, :, :, :] .= reshape(1 .+ ξ[3], 1, 1, Nq[3]) M_exact = ones(Int, Nq..., nelem) .* reshape(kron(reverse(ω)...), Nq..., 1) sJ_exact = ones(Int, maximum(Nfp), nface, nelem) nx_exact = zeros(Int, maximum(Nfp), nface, nelem) nx_exact[:, 1, 1] .= -1 nx_exact[:, 2, 1] .= 1 nx_exact[:, 3, 2] .= 1 nx_exact[:, 4, 2] .= -1 ny_exact = zeros(Int, maximum(Nfp), nface, nelem) ny_exact[:, 3, 1] .= -1 ny_exact[:, 4, 1] .= 1 ny_exact[:, 1, 2] .= -1 ny_exact[:, 2, 2] .= 1 nz_exact = zeros(Int, maximum(Nfp), nface, nelem) nz_exact[:, 5, 1:2] .= -1 nz_exact[:, 6, 1:2] .= 1 (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, (x...) -> identity(x)) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) @test (@view vgeo[:, :, :, _x1, :]) ≈ x_exact @test (@view vgeo[:, :, :, _x2, :]) ≈ y_exact @test (@view vgeo[:, :, :, _x3, :]) ≈ z_exact @test (@view vgeo[:, :, :, _M, :]) ≈ M_exact @test (@view vgeo[:, :, :, _ξ1x1, :]) ≈ ξ1x1_exact @test (@view vgeo[:, :, :, _ξ1x2, :]) ≈ ξ1x2_exact @test maximum(abs.(@view vgeo[:, :, :, _ξ1x3, :])) ≤ 100 * eps(FT) @test (@view vgeo[:, :, :, _ξ2x1, :]) ≈ ξ2x1_exact @test (@view vgeo[:, :, :, _ξ2x2, :]) ≈ ξ2x2_exact @test maximum(abs.(@view vgeo[:, :, :, _ξ2x3, :])) ≤ 100 * eps(FT) @test maximum(abs.(@view vgeo[:, :, :, _ξ3x1, :])) ≤ 100 * eps(FT) @test maximum(abs.(@view vgeo[:, :, :, _ξ3x2, :])) ≤ 100 * eps(FT) @test (@view vgeo[:, :, :, _ξ3x3, :]) ≈ ξ3x3_exact for d in 1:dim for f in (2d - 1):(2d) ωf = ntuple(j -> ω[mod1(d + j, dim)], dim - 1) if !(dim == 3 && d == 2) ωf = reverse(ωf) end Mf = kron(1, ωf...) @test isapprox( (@view sgeo.sωJ[1:Nfp[d], f, :]), sJ_exact[1:Nfp[d], f, :] .* Mf, atol = √eps(FT), rtol = √eps(FT), ) @test isapprox( (@view sgeo.n1[1:Nfp[d], f, :]), nx_exact[1:Nfp[d], f, :]; atol = √eps(FT), rtol = √eps(FT), ) @test isapprox( (@view sgeo.n2[1:Nfp[d], f, :]), ny_exact[1:Nfp[d], f, :]; atol = √eps(FT), rtol = √eps(FT), ) @test isapprox( (@view sgeo.n3[1:Nfp[d], f, :]), nz_exact[1:Nfp[d], f, :]; atol = √eps(FT), rtol = √eps(FT), ) end end end #}}} # Polynomial 3-D test #{{{ for FT in (Float32, Float64), N in ((9, 9, 9), (9, 9, 10), (10, 9, 11)) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple(j -> Elements.lglpoints(FT, N[j]), dim) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) f(ξ1, ξ2, ξ3) = @.( ( ξ2 + ξ1 * ξ3 - (ξ1^2 * ξ2^2 * ξ3^2) / 4, ξ3 - ((ξ1 * ξ2 * ξ3 + 1) / 2)^3 + 1, ξ1 + 2 * ((ξ1 + 1) / 2)^6 * ((ξ2 + 1) / 2)^6 * ((ξ3 + 1) / 2)^6, )) fx1ξ1(ξ1, ξ2, ξ3) = @.(ξ3 - (ξ1 * ξ2^2 * ξ3^2) / 2) fx1ξ2(ξ1, ξ2, ξ3) = @.(1 - (ξ1^2 * ξ2 * ξ3^2) / 2) fx1ξ3(ξ1, ξ2, ξ3) = @.(ξ1 - (ξ1^2 * ξ2^2 * ξ3) / 2) fx2ξ1(ξ1, ξ2, ξ3) = @.(-(3 * ξ2 * ξ3 * ((ξ1 * ξ2 * ξ3 + 1) / 2)^2) / 2) fx2ξ2(ξ1, ξ2, ξ3) = @.(-(3 * ξ1 * ξ3 * ((ξ1 * ξ2 * ξ3 + 1) / 2)^2) / 2) fx2ξ3(ξ1, ξ2, ξ3) = @.(1 - (3 * ξ1 * ξ2 * ((ξ1 * ξ2 * ξ3 + 1) / 2)^2) / 2) fx3ξ1(ξ1, ξ2, ξ3) = @.(6 * ((ξ1 + 1) / 2)^5 * ((ξ2 + 1) / 2)^6 * ((ξ3 + 1) / 2)^6 + 1) fx3ξ2(ξ1, ξ2, ξ3) = @.(6 * ((ξ1 + 1) / 2)^6 * ((ξ2 + 1) / 2)^5 * ((ξ3 + 1) / 2)^6) fx3ξ3(ξ1, ξ2, ξ3) = @.(6 * ((ξ1 + 1) / 2)^6 * ((ξ2 + 1) / 2)^6 * ((ξ3 + 1) / 2)^5) e2c = Array{FT, 3}(undef, dim, 8, 1) e2c[:, :, 1] = [ -1 1 -1 1 -1 1 -1 1 -1 -1 1 1 -1 -1 1 1 -1 -1 -1 -1 1 1 1 1 ] nelem = size(e2c, 3) # Compute exact metrics (x1ξ1, x1ξ2, x1ξ3, x2ξ1, x2ξ2, x2ξ3, x3ξ1, x3ξ2, x3ξ3) = let (vgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, (x...) -> identity(x)) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) ξ1 = vgeo[:, :, :, _x1, :] ξ2 = vgeo[:, :, :, _x2, :] ξ3 = vgeo[:, :, :, _x3, :] ( fx1ξ1(ξ1, ξ2, ξ3), fx1ξ2(ξ1, ξ2, ξ3), fx1ξ3(ξ1, ξ2, ξ3), fx2ξ1(ξ1, ξ2, ξ3), fx2ξ2(ξ1, ξ2, ξ3), fx2ξ3(ξ1, ξ2, ξ3), fx3ξ1(ξ1, ξ2, ξ3), fx3ξ2(ξ1, ξ2, ξ3), fx3ξ3(ξ1, ξ2, ξ3), ) end J = ( x1ξ1 .* (x2ξ2 .* x3ξ3 - x2ξ3 .* x3ξ2) + x2ξ1 .* (x3ξ2 .* x1ξ3 - x3ξ3 .* x1ξ2) + x3ξ1 .* (x1ξ2 .* x2ξ3 - x1ξ3 .* x2ξ2) ) ξ1x1 = (x2ξ2 .* x3ξ3 - x2ξ3 .* x3ξ2) ./ J ξ1x2 = (x3ξ2 .* x1ξ3 - x3ξ3 .* x1ξ2) ./ J ξ1x3 = (x1ξ2 .* x2ξ3 - x1ξ3 .* x2ξ2) ./ J ξ2x1 = (x2ξ3 .* x3ξ1 - x2ξ1 .* x3ξ3) ./ J ξ2x2 = (x3ξ3 .* x1ξ1 - x3ξ1 .* x1ξ3) ./ J ξ2x3 = (x1ξ3 .* x2ξ1 - x1ξ1 .* x2ξ3) ./ J ξ3x1 = (x2ξ1 .* x3ξ2 - x2ξ2 .* x3ξ1) ./ J ξ3x2 = (x3ξ1 .* x1ξ2 - x3ξ2 .* x1ξ1) ./ J ξ3x3 = (x1ξ1 .* x2ξ2 - x1ξ2 .* x2ξ1) ./ J (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, f) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) @test (@view vgeo[:, :, :, _M, :]) ≈ J .* reshape(kron(reverse(ω)...), Nq..., 1) @test (@view vgeo[:, :, :, _ξ1x1, :]) ≈ ξ1x1 @test (@view vgeo[:, :, :, _ξ1x2, :]) ≈ ξ1x2 @test (@view vgeo[:, :, :, _ξ1x3, :]) ≈ ξ1x3 @test (@view vgeo[:, :, :, _ξ2x1, :]) ≈ ξ2x1 @test (@view vgeo[:, :, :, _ξ2x2, :]) ≈ ξ2x2 @test (@view vgeo[:, :, :, _ξ2x3, :]) ≈ ξ2x3 @test (@view vgeo[:, :, :, _ξ3x1, :]) ≈ ξ3x1 @test (@view vgeo[:, :, :, _ξ3x2, :]) ≈ ξ3x2 @test (@view vgeo[:, :, :, _ξ3x3, :]) ≈ ξ3x3 n1 = @view sgeo.n1[:, :, :] n2 = @view sgeo.n2[:, :, :] n3 = @view sgeo.n3[:, :, :] sM = @view sgeo.sωJ[:, :, :] for d in 1:dim for f in (2d - 1):(2d) @test all( hypot.( n1[1:Nfp[d], f, :], n2[1:Nfp[d], f, :], n3[1:Nfp[d], f, :], ) .≈ 1, ) end end d, f = 1, 1 Mf = kron(1, ω[3], ω[2]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (-J[1, :, :, :] .* ξ1x1[1, :, :, :])[:] .* Mf, (-J[1, :, :, :] .* ξ1x2[1, :, :, :])[:] .* Mf, (-J[1, :, :, :] .* ξ1x3[1, :, :, :])[:] .* Mf, ] d, f = 1, 2 Mf = kron(1, ω[3], ω[2]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (J[Nq[d], :, :, :] .* ξ1x1[Nq[d], :, :, :])[:] .* Mf, (J[Nq[d], :, :, :] .* ξ1x2[Nq[d], :, :, :])[:] .* Mf, (J[Nq[d], :, :, :] .* ξ1x3[Nq[d], :, :, :])[:] .* Mf, ] d, f = 2, 3 Mf = kron(1, ω[3], ω[1]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (-J[:, 1, :, :] .* ξ2x1[:, 1, :, :])[:] .* Mf, (-J[:, 1, :, :] .* ξ2x2[:, 1, :, :])[:] .* Mf, (-J[:, 1, :, :] .* ξ2x3[:, 1, :, :])[:] .* Mf, ] d, f = 2, 4 Mf = kron(1, ω[3], ω[1]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (J[:, Nq[d], :, :] .* ξ2x1[:, Nq[d], :, :])[:] .* Mf, (J[:, Nq[d], :, :] .* ξ2x2[:, Nq[d], :, :])[:] .* Mf, (J[:, Nq[d], :, :] .* ξ2x3[:, Nq[d], :, :])[:] .* Mf, ] d, f = 3, 5 Mf = kron(1, ω[2], ω[1]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (-J[:, :, 1, :] .* ξ3x1[:, :, 1, :])[:] .* Mf, (-J[:, :, 1, :] .* ξ3x2[:, :, 1, :])[:] .* Mf, (-J[:, :, 1, :] .* ξ3x3[:, :, 1, :])[:] .* Mf, ] d, f = 3, 6 Mf = kron(1, ω[2], ω[1]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (J[:, :, Nq[d], :] .* ξ3x1[:, :, Nq[d], :])[:] .* Mf, (J[:, :, Nq[d], :] .* ξ3x2[:, :, Nq[d], :])[:] .* Mf, (J[:, :, Nq[d], :] .* ξ3x3[:, :, Nq[d], :])[:] .* Mf, ] end #}}} # Constant preserving test #{{{ for FT in (Float32, Float64), N in ((5, 5, 5), (3, 4, 5), (4, 4, 5)) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple(j -> Elements.lglpoints(FT, N[j]), dim) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) f(ξ1, ξ2, ξ3) = @.( ( ξ2 + ξ1 * ξ3 - (ξ1^2 * ξ2^2 * ξ3^2) / 4, ξ3 - ((ξ1 * ξ2 * ξ3 + 1) / 2)^3 + 1, ξ1 + ((ξ1 + 1) / 2)^6 * ((ξ2 + 1) / 2)^6 * ((ξ3 + 1) / 2)^6, )) e2c = Array{FT, 3}(undef, dim, 8, 1) e2c[:, :, 1] = [ -1 1 -1 1 -1 1 -1 1 -1 -1 1 1 -1 -1 1 1 -1 -1 -1 -1 1 1 1 1 ] nelem = size(e2c, 3) (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, f) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) (Cx1, Cx2, Cx3) = (zeros(FT, Nq...), zeros(FT, Nq...), zeros(FT, Nq...)) J = (@view vgeo[:, :, :, _M, :]) ./ reshape(kron(reverse(ω)...), Nq..., 1) ξ1x1 = @view vgeo[:, :, :, _ξ1x1, :] ξ1x2 = @view vgeo[:, :, :, _ξ1x2, :] ξ1x3 = @view vgeo[:, :, :, _ξ1x3, :] ξ2x1 = @view vgeo[:, :, :, _ξ2x1, :] ξ2x2 = @view vgeo[:, :, :, _ξ2x2, :] ξ2x3 = @view vgeo[:, :, :, _ξ2x3, :] ξ3x1 = @view vgeo[:, :, :, _ξ3x1, :] ξ3x2 = @view vgeo[:, :, :, _ξ3x2, :] ξ3x3 = @view vgeo[:, :, :, _ξ3x3, :] e = 1 for k in 1:Nq[3] for j in 1:Nq[2] Cx1[:, j, k] += D[1] * (J[:, j, k, e] .* ξ1x1[:, j, k, e]) Cx2[:, j, k] += D[1] * (J[:, j, k, e] .* ξ1x2[:, j, k, e]) Cx3[:, j, k] += D[1] * (J[:, j, k, e] .* ξ1x3[:, j, k, e]) end end for k in 1:Nq[3] for i in 1:Nq[1] Cx1[i, :, k] += D[2] * (J[i, :, k, e] .* ξ2x1[i, :, k, e]) Cx2[i, :, k] += D[2] * (J[i, :, k, e] .* ξ2x2[i, :, k, e]) Cx3[i, :, k] += D[2] * (J[i, :, k, e] .* ξ2x3[i, :, k, e]) end end for j in 1:Nq[2] for i in 1:Nq[1] Cx1[i, j, :] += D[3] * (J[i, j, :, e] .* ξ3x1[i, j, :, e]) Cx2[i, j, :] += D[3] * (J[i, j, :, e] .* ξ3x2[i, j, :, e]) Cx3[i, j, :] += D[3] * (J[i, j, :, e] .* ξ3x3[i, j, :, e]) end end @test maximum(abs.(Cx1)) ≤ 300 * eps(FT) @test maximum(abs.(Cx2)) ≤ 300 * eps(FT) @test maximum(abs.(Cx3)) ≤ 300 * eps(FT) end #}}} #N = 0 test #{{{ let for FT in (Float32, Float64) N = (4, 4, 0) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple( j -> Nq[j] == 1 ? Elements.glpoints(FT, N[j]) : Elements.lglpoints(FT, N[j]), dim, ) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) fx1(ξ1, ξ2, ξ3) = ξ1 + (1 + ξ1)^2 * (1 + ξ2)^2 + ξ3 / 10 fx1ξ1(ξ1, ξ2, ξ3) = 1 + 2 * (1 + ξ1) * (1 + ξ2)^2 fx1ξ2(ξ1, ξ2, ξ3) = (1 + ξ1)^2 * 2 * (1 + ξ2) fx1ξ3(ξ1, ξ2, ξ3) = 1 / 10 fx2(ξ1, ξ2, ξ3) = ξ2 - (1 + ξ1)^2 + (2 + ξ3) / 2 fx2ξ1(ξ1, ξ2, ξ3) = -2 * (1 + ξ1) fx2ξ2(ξ1, ξ2, ξ3) = 1 fx2ξ3(ξ1, ξ2, ξ3) = 1 / 2 fx3(ξ1, ξ2, ξ3) = ξ3 + (1 + ξ1)^2 * (1 + ξ2)^2 / 10 fx3ξ1(ξ1, ξ2, ξ3) = 2 * (1 + ξ1) * (1 + ξ2)^2 / 10 fx3ξ2(ξ1, ξ2, ξ3) = (1 + ξ1)^2 * 2 * (1 + ξ2) / 10 fx3ξ3(ξ1, ξ2, ξ3) = 1 e2c = Array{FT, 3}(undef, 3, 8, 1) e2c[:, :, 1] = [ -1 +1 -1 +1 -1 +1 -1 +1 -1 -1 +1 +1 -1 -1 +1 +1 -1 -1 -1 -1 +1 +1 +1 +1 ] nelem = size(e2c, 3) # Create the metrics (x1, x2, x3, x1ξ1, x1ξ2, x1ξ3, x2ξ1, x2ξ2, x2ξ3, x3ξ1, x3ξ2, x3ξ3) = let vgeo = VolumeGeometry(FT, Nq, nelem) Metrics.creategrid!(vgeo, e2c, ξ) x1 = reshape(vgeo.x1, (Nq..., nelem)) x2 = reshape(vgeo.x2, (Nq..., nelem)) x3 = reshape(vgeo.x3, (Nq..., nelem)) ( fx1.(x1, x2, x3), fx2.(x1, x2, x3), fx3.(x1, x2, x3), fx1ξ1.(x1, x2, x3), fx1ξ2.(x1, x2, x3), fx1ξ3.(x1, x2, x3), fx2ξ1.(x1, x2, x3), fx2ξ2.(x1, x2, x3), fx2ξ3.(x1, x2, x3), fx3ξ1.(x1, x2, x3), fx3ξ2.(x1, x2, x3), fx3ξ3.(x1, x2, x3), ) end J = @.( x1ξ1 * (x2ξ2 * x3ξ3 - x3ξ2 * x2ξ3) + x2ξ1 * (x3ξ2 * x1ξ3 - x1ξ2 * x3ξ3) + x3ξ1 * (x1ξ2 * x2ξ3 - x2ξ2 * x1ξ3) ) ξ1x1 = (x2ξ2 .* x3ξ3 - x2ξ3 .* x3ξ2) ./ J ξ1x2 = (x3ξ2 .* x1ξ3 - x3ξ3 .* x1ξ2) ./ J ξ1x3 = (x1ξ2 .* x2ξ3 - x1ξ3 .* x2ξ2) ./ J ξ2x1 = (x2ξ3 .* x3ξ1 - x2ξ1 .* x3ξ3) ./ J ξ2x2 = (x3ξ3 .* x1ξ1 - x3ξ1 .* x1ξ3) ./ J ξ2x3 = (x1ξ3 .* x2ξ1 - x1ξ1 .* x2ξ3) ./ J ξ3x1 = (x2ξ1 .* x3ξ2 - x2ξ2 .* x3ξ1) ./ J ξ3x2 = (x3ξ1 .* x1ξ2 - x3ξ2 .* x1ξ1) ./ J ξ3x3 = (x1ξ1 .* x2ξ2 - x1ξ2 .* x2ξ1) ./ J M = J .* reshape(kron(reverse(ω)...), Nq..., 1) meshwarp(ξ1, ξ2, ξ3) = (fx1(ξ1, ξ2, ξ3), fx2(ξ1, ξ2, ξ3), fx3(ξ1, ξ2, ξ3)) (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, meshwarp) vgeo = reshape( vgeo.array, Nq..., # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) @test x1 ≈ vgeo[:, :, :, _x1, :] @test x2 ≈ vgeo[:, :, :, _x2, :] @test x3 ≈ vgeo[:, :, :, _x3, :] @test M ≈ vgeo[:, :, :, _M, :] @test (@view vgeo[:, :, :, _ξ1x1, :]) ≈ ξ1x1 @test (@view vgeo[:, :, :, _ξ1x2, :]) ≈ ξ1x2 @test (@view vgeo[:, :, :, _ξ1x3, :]) ≈ ξ1x3 @test (@view vgeo[:, :, :, _ξ2x1, :]) ≈ ξ2x1 @test (@view vgeo[:, :, :, _ξ2x2, :]) ≈ ξ2x2 @test (@view vgeo[:, :, :, _ξ2x3, :]) ≈ ξ2x3 @test (@view vgeo[:, :, :, _ξ3x1, :]) ≈ ξ3x1 @test (@view vgeo[:, :, :, _ξ3x2, :]) ≈ ξ3x2 @test (@view vgeo[:, :, :, _ξ3x3, :]) ≈ ξ3x3 # check the normals? sM = @view sgeo.sωJ[:, :, :] n1 = @view sgeo.n1[:, :, :] n2 = @view sgeo.n2[:, :, :] n3 = @view sgeo.n3[:, :, :] @test all( hypot.( n1[1:Nfp[1], 1:2, :], n2[1:Nfp[1], 1:2, :], n3[1:Nfp[1], 1:2, :], ) .≈ 1, ) @test all( hypot.( n1[1:Nfp[2], 3:4, :], n2[1:Nfp[2], 3:4, :], n3[1:Nfp[2], 3:4, :], ) .≈ 1, ) @test all( hypot.( n1[1:Nfp[3], 5:6, :], n2[1:Nfp[3], 5:6, :], n3[1:Nfp[3], 5:6, :], ) .≈ 1, ) d, f = 1, 1 Mf = kron(1, ω[3], ω[2]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (-J[1, :, :, :] .* ξ1x1[1, :, :, :])[:] .* Mf, (-J[1, :, :, :] .* ξ1x2[1, :, :, :])[:] .* Mf, (-J[1, :, :, :] .* ξ1x3[1, :, :, :])[:] .* Mf, ] d, f = 1, 2 Mf = kron(1, ω[3], ω[2]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (J[Nq[d], :, :, :] .* ξ1x1[Nq[d], :, :, :])[:] .* Mf, (J[Nq[d], :, :, :] .* ξ1x2[Nq[d], :, :, :])[:] .* Mf, (J[Nq[d], :, :, :] .* ξ1x3[Nq[d], :, :, :])[:] .* Mf, ] d, f = 2, 3 Mf = kron(1, ω[3], ω[1]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (-J[:, 1, :, :] .* ξ2x1[:, 1, :, :])[:] .* Mf, (-J[:, 1, :, :] .* ξ2x2[:, 1, :, :])[:] .* Mf, (-J[:, 1, :, :] .* ξ2x3[:, 1, :, :])[:] .* Mf, ] d, f = 2, 4 Mf = kron(1, ω[3], ω[1]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (J[:, Nq[d], :, :] .* ξ2x1[:, Nq[d], :, :])[:] .* Mf, (J[:, Nq[d], :, :] .* ξ2x2[:, Nq[d], :, :])[:] .* Mf, (J[:, Nq[d], :, :] .* ξ2x3[:, Nq[d], :, :])[:] .* Mf, ] # for these faces we need the N = 1 metrics (x1ξ1, x1ξ2, x1ξ3, x2ξ1, x2ξ2, x2ξ3, x3ξ1, x3ξ2, x3ξ3) = let @assert Nq[1] != 1 && Nq[2] != 1 && Nq[3] == 1 Nq_N1 = max.(2, Nq) vgeo_N1 = VolumeGeometry(FT, Nq_N1, nelem) Metrics.creategrid!( vgeo_N1, e2c, (ξ[1], ξ[2], Elements.lglpoints(FT, 1)[1]), ) x1 = reshape(vgeo_N1.x1, (Nq_N1..., nelem)) x2 = reshape(vgeo_N1.x2, (Nq_N1..., nelem)) x3 = reshape(vgeo_N1.x3, (Nq_N1..., nelem)) ( fx1ξ1.(x1, x2, x3), fx1ξ2.(x1, x2, x3), fx1ξ3.(x1, x2, x3), fx2ξ1.(x1, x2, x3), fx2ξ2.(x1, x2, x3), fx2ξ3.(x1, x2, x3), fx3ξ1.(x1, x2, x3), fx3ξ2.(x1, x2, x3), fx3ξ3.(x1, x2, x3), ) end J = @.( x1ξ1 * (x2ξ2 * x3ξ3 - x3ξ2 * x2ξ3) + x2ξ1 * (x3ξ2 * x1ξ3 - x1ξ2 * x3ξ3) + x3ξ1 * (x1ξ2 * x2ξ3 - x2ξ2 * x1ξ3) ) ξ3x1 = (x2ξ1 .* x3ξ2 - x2ξ2 .* x3ξ1) ./ J ξ3x2 = (x3ξ1 .* x1ξ2 - x3ξ2 .* x1ξ1) ./ J ξ3x3 = (x1ξ1 .* x2ξ2 - x1ξ2 .* x2ξ1) ./ J d, f = 3, 5 Mf = kron(1, ω[2], ω[1]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (-J[:, :, 1, :] .* ξ3x1[:, :, 1, :])[:] .* Mf, (-J[:, :, 1, :] .* ξ3x2[:, :, 1, :])[:] .* Mf, (-J[:, :, 1, :] .* ξ3x3[:, :, 1, :])[:] .* Mf, ] d, f = 3, 6 Mf = kron(1, ω[2], ω[1]) @test [ (sM[1:Nfp[d], f, :] .* n1[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n2[1:Nfp[d], f, :])[:], (sM[1:Nfp[d], f, :] .* n3[1:Nfp[d], f, :])[:], ] ≈ [ (J[:, :, 2, :] .* ξ3x1[:, :, 2, :])[:] .* Mf, (J[:, :, 2, :] .* ξ3x2[:, :, 2, :])[:] .* Mf, (J[:, :, 2, :] .* ξ3x3[:, :, 2, :])[:] .* Mf, ] end end #}}} # Constant preserving test for N = 0 #{{{ let for FT in (Float64, Float32), N in ((4, 4, 0), (0, 0, 2), (0, 3, 4), (2, 0, 3)) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple( j -> Nq[j] == 1 ? Elements.glpoints(FT, N[j]) : Elements.lglpoints(FT, N[j]), dim, ) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) rng = MersenneTwister(777) fx1(ξ1, ξ2, ξ3) = ξ1 + (ξ1 * ξ2 * ξ3 * rand(rng) + rand(rng)) / 10 fx2(ξ1, ξ2, ξ3) = ξ2 + (ξ1 * ξ2 * ξ3 * rand(rng) + rand(rng)) / 10 fx3(ξ1, ξ2, ξ3) = ξ3 + (ξ1 * ξ2 * ξ3 * rand(rng) + rand(rng)) / 10 e2c = Array{FT, 3}(undef, 3, 8, 1) e2c[:, :, 1] = [ -1 +1 -1 +1 -1 +1 -1 +1 -1 -1 +1 +1 -1 -1 +1 +1 -1 -1 -1 -1 +1 +1 +1 +1 ] nelem = size(e2c, 3) meshwarp(ξ1, ξ2, ξ3) = (fx1(ξ1, ξ2, ξ3), fx2(ξ1, ξ2, ξ3), fx2(ξ1, ξ2, ξ3)) (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, meshwarp) vgeo = reshape( vgeo.array, prod(Nq), # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) M = vgeo[:, _M, :] ξ1x1 = vgeo[:, _ξ1x1, :] ξ2x1 = vgeo[:, _ξ2x1, :] ξ3x1 = vgeo[:, _ξ3x1, :] ξ1x2 = vgeo[:, _ξ1x2, :] ξ2x2 = vgeo[:, _ξ2x2, :] ξ3x2 = vgeo[:, _ξ3x2, :] ξ1x3 = vgeo[:, _ξ1x3, :] ξ2x3 = vgeo[:, _ξ2x3, :] ξ3x3 = vgeo[:, _ξ3x3, :] M = vgeo[:, _M, :] ξ1x1 = vgeo[:, _ξ1x1, :] ξ2x1 = vgeo[:, _ξ2x1, :] ξ1x2 = vgeo[:, _ξ1x2, :] ξ2x2 = vgeo[:, _ξ2x2, :] I1 = Matrix(I, Nq[1], Nq[1]) I2 = Matrix(I, Nq[2], Nq[2]) I3 = Matrix(I, Nq[3], Nq[3]) D1 = kron(I3, I2, D[1]) D2 = kron(I3, D[2], I1) D3 = kron(D[3], I2, I1) # Face interpolation operators L = ( kron(I3, I2, I1[1, :]'), kron(I3, I2, I1[Nq[1], :]'), kron(I3, I2[1, :]', I1), kron(I3, I2[Nq[2], :]', I1), kron(I3[1, :]', I2, I1), kron(I3[Nq[3], :]', I2, I1), ) sM = ntuple(f -> sgeo.sωJ[1:Nfp[cld(f, 2)], f, :], nface) n1 = ntuple(f -> sgeo.n1[1:Nfp[cld(f, 2)], f, :], nface) n2 = ntuple(f -> sgeo.n2[1:Nfp[cld(f, 2)], f, :], nface) n3 = ntuple(f -> sgeo.n3[1:Nfp[cld(f, 2)], f, :], nface) # If constant preserving then: # \sum_{j} = D' * M * ξjxk = \sum_{f} L_f' * sM_f * n1_f @test D1' * (M .* ξ1x1) + D2' * (M .* ξ2x1) + D3' * (M .* ξ3x1) ≈ mapreduce((L, sM, n1) -> L' * (sM .* n1), +, L, sM, n1) @test D1' * (M .* ξ1x2) + D2' * (M .* ξ2x2) + D3' * (M .* ξ3x2) ≈ mapreduce((L, sM, n2) -> L' * (sM .* n2), +, L, sM, n2) @test D1' * (M .* ξ1x3) + D2' * (M .* ξ2x3) + D3' * (M .* ξ3x3) ≈ mapreduce((L, sM, n3) -> L' * (sM .* n3), +, L, sM, n3) end end #}}} # Constant preserving test with all N = 0 #{{{ let for FT in (Float64, Float32) N = (0, 0, 0) Nq = N .+ 1 Np = prod(Nq) Nfp = div.(Np, Nq) dim = length(N) nface = 2dim # Create element operators for each polynomial order ξω = ntuple( j -> Nq[j] == 1 ? Elements.glpoints(FT, N[j]) : Elements.lglpoints(FT, N[j]), dim, ) ξ, ω = ntuple(j -> map(x -> x[j], ξω), 2) D = ntuple(j -> Elements.spectralderivative(ξ[j]), dim) rng = MersenneTwister(777) fx1(ξ1, ξ2, ξ3) = ξ1 + (ξ1 * ξ2 * ξ3 * rand(rng) + rand(rng)) / 10 fx2(ξ1, ξ2, ξ3) = ξ2 + (ξ1 * ξ2 * ξ3 * rand(rng) + rand(rng)) / 10 fx3(ξ1, ξ2, ξ3) = ξ3 + (ξ1 * ξ2 * ξ3 * rand(rng) + rand(rng)) / 10 e2c = Array{FT, 3}(undef, 3, 8, 1) e2c[:, :, 1] = [ -1 +1 -1 +1 -1 +1 -1 +1 -1 -1 +1 +1 -1 -1 +1 +1 -1 -1 -1 -1 +1 +1 +1 +1 ] nelem = size(e2c, 3) meshwarp(ξ1, ξ2, ξ3) = (fx1(ξ1, ξ2, ξ3), fx2(ξ1, ξ2, ξ3), fx2(ξ1, ξ2, ξ3)) (vgeo, sgeo, _) = Grids.computegeometry(e2c, D, ξ, ω, meshwarp) vgeo = reshape( vgeo.array, prod(Nq), # - 1 after fieldcount is to remove the `array` field from the array allocation fieldcount(GeometricFactors.VolumeGeometry) - 1, nelem, ) M = vgeo[:, _M, :] ξ1x1 = vgeo[:, _ξ1x1, :] ξ2x1 = vgeo[:, _ξ2x1, :] ξ3x1 = vgeo[:, _ξ3x1, :] ξ1x2 = vgeo[:, _ξ1x2, :] ξ2x2 = vgeo[:, _ξ2x2, :] ξ3x2 = vgeo[:, _ξ3x2, :] ξ1x3 = vgeo[:, _ξ1x3, :] ξ2x3 = vgeo[:, _ξ2x3, :] ξ3x3 = vgeo[:, _ξ3x3, :] M = vgeo[:, _M, :] ξ1x1 = vgeo[:, _ξ1x1, :] ξ2x1 = vgeo[:, _ξ2x1, :] ξ1x2 = vgeo[:, _ξ1x2, :] ξ2x2 = vgeo[:, _ξ2x2, :] I1 = Matrix(I, Nq[1], Nq[1]) I2 = Matrix(I, Nq[2], Nq[2]) I3 = Matrix(I, Nq[3], Nq[3]) D1 = kron(I3, I2, D[1]) D2 = kron(I3, D[2], I1) D3 = kron(D[3], I2, I1) # Face interpolation operators L = ( kron(I3, I2, I1[1, :]'), kron(I3, I2, I1[Nq[1], :]'), kron(I3, I2[1, :]', I1), kron(I3, I2[Nq[2], :]', I1), kron(I3[1, :]', I2, I1), kron(I3[Nq[3], :]', I2, I1), ) sM = ntuple(f -> sgeo.sωJ[1:Nfp[cld(f, 2)], f, :], nface) n1 = ntuple(f -> sgeo.n1[1:Nfp[cld(f, 2)], f, :], nface) n2 = ntuple(f -> sgeo.n2[1:Nfp[cld(f, 2)], f, :], nface) n3 = ntuple(f -> sgeo.n3[1:Nfp[cld(f, 2)], f, :], nface) # If constant preserving then \sum_{f} L_f' * sM_f * n1_f ≈ 0 @test abs(mapreduce( (L, sM, n1) -> L' * (sM .* n1), +, L, sM, n1, )[1]) < 10 * eps(FT) @test abs(mapreduce( (L, sM, n2) -> L' * (sM .* n2), +, L, sM, n2, )[1]) < 10 * eps(FT) @test abs(mapreduce( (L, sM, n3) -> L' * (sM .* n3), +, L, sM, n3, )[1]) < 10 * eps(FT) end end #}}} end ================================================ FILE: test/Numerics/Mesh/filter.jl ================================================ using Test import ClimateMachine using ClimateMachine.VariableTemplates: @vars, Vars using ClimateMachine.Mesh.Grids: EveryDirection, HorizontalDirection, VerticalDirection using ClimateMachine.MPIStateArrays: weightedsum using ClimateMachine.BalanceLaws import GaussQuadrature using MPI using LinearAlgebra ClimateMachine.init() @testset "Exponential and Cutoff filter matrix" begin let # Values computed with: # https://github.com/tcew/nodal-dg/blob/master/Codes1.1/Codes1D/Filter1D.m #! format: off W = [0x3fe98f3cd0d725e8 0x3fddfd863c6c9a44 0xbfe111110d0fd334 0x3fddbe357bce0b5c 0xbfc970267f929618 0x3fb608a150f6f927 0x3fe99528b1a1cd8d 0x3fcd41d41f8bae45 0xbfc987d5fabab8d5 0x3fb5da1cd858af87 0xbfb333332eb1cd92 0x3fc666666826f178 0x3fe999999798faaa 0x3fc666666826f176 0xbfb333332eb1cd94 0x3fb5da1cd858af84 0xbfc987d5fabab8d4 0x3fcd41d41f8bae46 0x3fe99528b1a1cd8e 0x3fb608a150f6f924 0xbfc970267f929618 0x3fddbe357bce0b5c 0xbfe111110d0fd333 0x3fddfd863c6c9a44 0x3fe98f3cd0d725e8] #! format: on W = reinterpret.(Float64, W) N = size(W, 1) - 1 topology = ClimateMachine.Mesh.Topologies.BrickTopology( MPI.COMM_SELF, -1.0:2.0:1.0, ) grid = ClimateMachine.Mesh.Grids.DiscontinuousSpectralElementGrid( topology; polynomialorder = N, FloatType = Float64, DeviceArray = Array, ) filter = ClimateMachine.Mesh.Filters.ExponentialFilter(grid, 0, 32) nf = length(filter.filter_matrices) @test all(ntuple(i -> filter.filter_matrices[i] ≈ W, nf)) end let # Values computed with: # https://github.com/tcew/nodal-dg/blob/master/Codes1.1/Codes1D/Filter1D.m #! format: off W = [0x3fd822e5f54ecb62 0x3fedd204a0f08ef8 0xbfc7d3aa58fd6968 0xbfbf74682ac4d276 0x3fc7db36e726d8c1 0x3fe59d16feee478b 0x3fc6745bfbb91e20 0xbfa30fbb7a645448 0xbfa30fbb7a645455 0x3fc6745bfbb91e26 0x3fe59d16feee478a 0x3fc7db36e726d8c4 0xbfbf74682ac4d280 0xbfc7d3aa58fd6962 0x3fedd204a0f08ef7 0x3fd822e5f54ecb62] #! format: on W = reinterpret.(Float64, W) N = size(W, 1) - 1 topology = ClimateMachine.Mesh.Topologies.BrickTopology( MPI.COMM_SELF, -1.0:2.0:1.0, ) grid = ClimateMachine.Mesh.Grids.DiscontinuousSpectralElementGrid( topology; polynomialorder = N, FloatType = Float64, DeviceArray = Array, ) filter = ClimateMachine.Mesh.Filters.ExponentialFilter(grid, 1, 4) nf = length(filter.filter_matrices) @test all(ntuple(i -> filter.filter_matrices[i] ≈ W, nf)) end let T = Float64 N = (5, 3) Nc = (4, 2) topology = ClimateMachine.Mesh.Topologies.BrickTopology( MPI.COMM_SELF, -1.0:2.0:1.0, ) grid = ClimateMachine.Mesh.Grids.DiscontinuousSpectralElementGrid( topology; polynomialorder = N, FloatType = T, DeviceArray = Array, ) ξ = ClimateMachine.Mesh.Grids.referencepoints(grid) ξ1 = ξ[1] ξ2 = ξ[2] a1, b1 = GaussQuadrature.legendre_coefs(T, N[1]) a2, b2 = GaussQuadrature.legendre_coefs(T, N[2]) V1 = GaussQuadrature.orthonormal_poly(ξ1, a1, b1) V2 = GaussQuadrature.orthonormal_poly(ξ2, a2, b2) Σ1 = ones(T, N[1] + 1) Σ2 = ones(T, N[2] + 1) Σ1[(Nc[1]:N[1]) .+ 1] .= 0 Σ2[(Nc[2]:N[2]) .+ 1] .= 0 W1 = V1 * Diagonal(Σ1) / V1 W2 = V2 * Diagonal(Σ2) / V2 filter = ClimateMachine.Mesh.Filters.CutoffFilter(grid, Nc) @test filter.filter_matrices[1] ≈ W1 @test filter.filter_matrices[2] ≈ W2 end let T = Float64 N = (5, 3) Nc = (4, 2) topology = ClimateMachine.Mesh.Topologies.BrickTopology( MPI.COMM_SELF, -1.0:2.0:1.0, ) grid = ClimateMachine.Mesh.Grids.DiscontinuousSpectralElementGrid( topology; polynomialorder = N, FloatType = T, DeviceArray = Array, ) ξ = ClimateMachine.Mesh.Grids.referencepoints(grid) ξ1 = ξ[1] ξ2 = ξ[2] a1, b1 = GaussQuadrature.legendre_coefs(T, N[1]) a2, b2 = GaussQuadrature.legendre_coefs(T, N[2]) V1 = GaussQuadrature.orthonormal_poly(ξ1, a1, b1) V2 = GaussQuadrature.orthonormal_poly(ξ2, a2, b2) Σ1 = ones(T, N[1] + 1) Σ2 = ones(T, N[2] + 1) Σ1[(Nc[1]:N[1]) .+ 1] .= 0 Σ2[(Nc[2]:N[2]) .+ 1] .= 0 W1 = V1 * Diagonal(Σ1) / V1 W2 = V2 * Diagonal(Σ2) / V2 filter = ClimateMachine.Mesh.Filters.MassPreservingCutoffFilter(grid, Nc) @test filter.filter_matrices[1] ≈ W1 @test filter.filter_matrices[2] ≈ W2 end end struct FilterTestModel{N} <: ClimateMachine.BalanceLaws.BalanceLaw end ClimateMachine.BalanceLaws.vars_state(::FilterTestModel, ::Auxiliary, FT) = @vars() ClimateMachine.BalanceLaws.init_state_auxiliary!(::FilterTestModel, _...) = nothing # Legendre Polynomials l0(r) = 1 l1(r) = r l2(r) = (3 * r^2 - 1) / 2 l3(r) = (5 * r^3 - 3r) / 2 low(x, y, z) = l0(x) * l0(y) + 4 * l1(x) * l1(y) + 5 * l1(z) + 6 * l1(z) * l1(x) high(x, y, z) = l2(x) * l3(y) + l3(x) + l2(y) + l3(z) * l1(y) filtered(::EveryDirection, dim, x, y, z) = high(x, y, z) filtered(::VerticalDirection, dim, x, y, z) = (dim == 2) ? l2(x) * l3(y) + l2(y) : l3(z) * l1(y) filtered(::HorizontalDirection, dim, x, y, z) = (dim == 2) ? l2(x) * l3(y) + l3(x) : l2(x) * l3(y) + l3(x) + l2(y) ClimateMachine.BalanceLaws.vars_state(::FilterTestModel{4}, ::Prognostic, FT) = @vars(q1::FT, q2::FT, q3::FT, q4::FT) function ClimateMachine.BalanceLaws.init_state_prognostic!( ::FilterTestModel{4}, state::Vars, aux::Vars, localgeo, filter_direction, dim, ) (x, y, z) = localgeo.coord state.q1 = low(x, y, z) + high(x, y, z) state.q2 = low(x, y, z) + high(x, y, z) state.q3 = low(x, y, z) + high(x, y, z) state.q4 = low(x, y, z) + high(x, y, z) if !isnothing(filter_direction) state.q1 -= filtered(filter_direction, dim, x, y, z) state.q3 -= filtered(filter_direction, dim, x, y, z) end end @testset "Exponential and Cutoff filter application" begin N = 3 Ne = (1, 1, 1) @testset for FT in (Float64, Float32) @testset for dim in 2:3 @testset for direction in ( EveryDirection, HorizontalDirection, VerticalDirection, ) brickrange = ntuple( j -> range(FT(-1); length = Ne[j] + 1, stop = 1), dim, ) topl = ClimateMachine.Mesh.Topologies.BrickTopology( MPI.COMM_WORLD, brickrange, periodicity = ntuple(j -> true, dim), ) grid = ClimateMachine.Mesh.Grids.DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ClimateMachine.array_type(), polynomialorder = N, ) filter = ClimateMachine.Mesh.Filters.CutoffFilter(grid, 2) model = FilterTestModel{4}() dg = ClimateMachine.DGMethods.DGModel( model, grid, nothing, nothing, nothing; state_gradient_flux = nothing, ) @testset for target in ((1, 3), (:q1, :q3)) Q = ClimateMachine.DGMethods.init_ode_state( dg, nothing, dim, ) ClimateMachine.Mesh.Filters.apply!( Q, target, grid, filter, direction = direction(), ) P = ClimateMachine.DGMethods.init_ode_state( dg, direction(), dim, ) @test Array(Q.data) ≈ Array(P.data) end end end end end @testset "Mass Preserving Cutoff filter application" begin N = 3 Ne = (1, 1, 1) @testset for FT in (Float64, Float32) @testset for dim in 2:3 @testset for direction in ( EveryDirection, HorizontalDirection, VerticalDirection, ) brickrange = ntuple( j -> range(FT(-1); length = Ne[j] + 1, stop = 1), dim, ) topl = ClimateMachine.Mesh.Topologies.BrickTopology( MPI.COMM_WORLD, brickrange, periodicity = ntuple(j -> true, dim), ) grid = ClimateMachine.Mesh.Grids.DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ClimateMachine.array_type(), polynomialorder = N, ) filter = ClimateMachine.Mesh.Filters.MassPreservingCutoffFilter( grid, 2, ) model = FilterTestModel{4}() dg = ClimateMachine.DGMethods.DGModel( model, grid, nothing, nothing, nothing; state_gradient_flux = nothing, ) @testset for target in ((1, 3), (:q1, :q3)) Q = ClimateMachine.DGMethods.init_ode_state( dg, nothing, dim, ) ClimateMachine.Mesh.Filters.apply!( Q, target, grid, filter, direction = direction(), ) P = ClimateMachine.DGMethods.init_ode_state( dg, direction(), dim, ) @test Array(Q.data) ≈ Array(P.data) end end end end end ClimateMachine.BalanceLaws.vars_state( ::FilterTestModel{1}, ::Prognostic, FT, ) where {N} = @vars(q::FT) function ClimateMachine.BalanceLaws.init_state_prognostic!( ::FilterTestModel{1}, state::Vars, aux::Vars, localgeo, ) (x, y, z) = localgeo.coord state.q = abs(x) - 0.1 end @testset "TMAR filter application" begin N = 4 Ne = (2, 2, 2) @testset for FT in (Float64, Float32) @testset for dim in 2:3 brickrange = ntuple(j -> range(FT(-1); length = Ne[j] + 1, stop = 1), dim) topl = ClimateMachine.Mesh.Topologies.BrickTopology( MPI.COMM_WORLD, brickrange, periodicity = ntuple(j -> true, dim), ) grid = ClimateMachine.Mesh.Grids.DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ClimateMachine.array_type(), polynomialorder = N, ) model = FilterTestModel{1}() dg = ClimateMachine.DGMethods.DGModel( model, grid, nothing, nothing, nothing; state_gradient_flux = nothing, ) @testset for target in ((1,), (:q,), :) Q = ClimateMachine.DGMethods.init_ode_state(dg) initialsumQ = weightedsum(Q) @test minimum(Q.realdata) < 0 ClimateMachine.Mesh.Filters.apply!( Q, target, grid, ClimateMachine.Mesh.Filters.TMARFilter(), ) sumQ = weightedsum(Q) @test minimum(Q.realdata) >= 0 @test isapprox(initialsumQ, sumQ; rtol = 10 * eps(FT)) end end end end function cubedshellwarp(a, b, c, R = max(abs(a), abs(b), abs(c))) function f(sR, ξ, η) X, Y = tan(π * ξ / 4), tan(π * η / 4) x1 = sR / sqrt(X^2 + Y^2 + 1) x2, x3 = X * x1, Y * x1 x1, x2, x3 end fdim = argmax(abs.((a, b, c))) if fdim == 1 && a < 0 # (-R, *, *) : Face I from Ronchi, Iacono, Paolucci (1996) x1, x2, x3 = f(-R, b / a, c / a) elseif fdim == 2 && b < 0 # ( *,-R, *) : Face II from Ronchi, Iacono, Paolucci (1996) x2, x1, x3 = f(-R, a / b, c / b) elseif fdim == 1 && a > 0 # ( R, *, *) : Face III from Ronchi, Iacono, Paolucci (1996) x1, x2, x3 = f(R, b / a, c / a) elseif fdim == 2 && b > 0 # ( *, R, *) : Face IV from Ronchi, Iacono, Paolucci (1996) x2, x1, x3 = f(R, a / b, c / b) elseif fdim == 3 && c > 0 # ( *, *, R) : Face V from Ronchi, Iacono, Paolucci (1996) x3, x2, x1 = f(R, b / c, a / c) elseif fdim == 3 && c < 0 # ( *, *,-R) : Face VI from Ronchi, Iacono, Paolucci (1996) x3, x2, x1 = f(-R, b / c, a / c) else error("invalid case for cubedshellwarp: $a, $b, $c") end return x1, x2, x3 end @testset "Mass Preserving Cutoff Filter Conservation Test" begin N = 3 Ne = (1, 1, 1) dim = 3 # dim, direction @testset for FT in (Float64,) Rrange = [FT(1.0), FT(1.2)] topl = ClimateMachine.Mesh.Grids.StackedCubedSphereTopology( MPI.COMM_WORLD, 1, Rrange, boundary = (5, 6), ) grid = ClimateMachine.Mesh.Grids.DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ClimateMachine.array_type(), polynomialorder = (N, N), meshwarp = cubedshellwarp, ) mp_filter = ClimateMachine.Mesh.Filters.MassPreservingCutoffFilter(grid, 2) reg_filter = ClimateMachine.Mesh.Filters.CutoffFilter(grid, 2) model = FilterTestModel{4}() dg = ClimateMachine.DGMethods.DGModel( model, grid, nothing, nothing, nothing; state_gradient_flux = nothing, ) # test mp filter filter = mp_filter Q = ClimateMachine.DGMethods.init_ode_state(dg, nothing, dim) sum_before_1 = weightedsum(Q, 1) sum_before_2 = weightedsum(Q, 2) sum_before_3 = weightedsum(Q, 3) target = 1:3 ClimateMachine.Mesh.Filters.apply!(Q, target, grid, filter) sum_after_1 = weightedsum(Q, 1) sum_after_2 = weightedsum(Q, 2) sum_after_3 = weightedsum(Q, 3) @test sum_before_1 ≈ sum_after_1 @test sum_before_2 ≈ sum_after_2 @test sum_before_3 ≈ sum_after_3 # test regular filter filter = reg_filter Q = ClimateMachine.DGMethods.init_ode_state(dg, nothing, dim) sum_before_1 = weightedsum(Q, 1) sum_before_2 = weightedsum(Q, 2) sum_before_3 = weightedsum(Q, 3) target = 1:3 ClimateMachine.Mesh.Filters.apply!(Q, target, grid, filter) sum_after_1 = weightedsum(Q, 1) sum_after_2 = weightedsum(Q, 2) sum_after_3 = weightedsum(Q, 3) @test !(sum_before_1 ≈ sum_after_1) @test !(sum_before_2 ≈ sum_after_2) @test !(sum_before_3 ≈ sum_after_3) end end ================================================ FILE: test/Numerics/Mesh/filter_TMAR.jl ================================================ # This tutorial uses the TMAR Filter from [Light2016](@cite) # # to reproduce the tutorial in section 4b. It is a shear swirling # flow deformation of a transported quantity from LeVeque 1996. The exact # solution at the final time is the same as the initial condition. using MPI using Test using ClimateMachine ClimateMachine.init() using Logging using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Filters using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using LinearAlgebra using Printf using Dates using ClimateMachine.GenericCallbacks: EveryXWallTimeSeconds, EveryXSimulationSteps using ClimateMachine.VTK: writevtk, writepvtu using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath( clima_dir, "test", "Numerics", "DGMethods", "advection_diffusion", "advection_diffusion_model.jl", )) Base.@kwdef struct SwirlingFlow{FT} <: AdvectionDiffusionProblem period::FT = 5 end init_velocity_diffusion!(::SwirlingFlow, aux::Vars, geom::LocalGeometry) = nothing cosbell(τ, q) = τ ≤ 1 ? ((1 + cospi(τ)) / 2)^q : zero(τ) function initial_condition!(::SwirlingFlow, state, aux, localgeo, t) FT = eltype(state) x, y, _ = aux.coord x0, y0 = FT(1 // 4), FT(1 // 4) τ = 4 * hypot(x - x0, y - y0) state.ρ = cosbell(τ, 3) end; has_variable_coefficients(::SwirlingFlow) = true function update_velocity_diffusion!( problem::SwirlingFlow, ::AdvectionDiffusion, state::Vars, aux::Vars, t::Real, ) x, y, _ = aux.coord sx, cx = sinpi(x), cospi(x) sy, cy = sinpi(y), cospi(y) ct = cospi(t / problem.period) u = 2 * sx^2 * sy * cy * ct v = -2 * sy^2 * sx * cx * ct aux.advection.u = SVector(u, v, 0) end; function do_output(mpicomm, vtkdir, vtkstep, dg, Q, model, testname) ## name of the file that this MPI rank will write filename = @sprintf( "%s/%s_mpirank%04d_step%04d", vtkdir, testname, MPI.Comm_rank(mpicomm), vtkstep ) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) writevtk(filename, Q, dg, statenames) ## generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s_step%04d", vtkdir, testname, vtkstep) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("%s_mpirank%04d_step%04d", testname, i - 1, vtkstep) end writepvtu(pvtuprefix, prefixes, statenames, eltype(Q)) @info "Done writing VTK: $pvtuprefix" end end; function test_run( mpicomm, ArrayType, topl, problem, dt, N, timeend, FT, vtkdir, outputtime, ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) bcs = (HomogeneousBC{0}(),) model = AdvectionDiffusion{2}(problem, bcs, diffusion = false) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) initialsumQ = weightedsum(Q) ## We integrate so that the final solution is equal to the initial solution Qe = copy(Q) rhs! = function (dQdt, Q, ::Nothing, t; increment = false) Filters.apply!(Q, :, grid, TMARFilter()) dg(dQdt, Q, nothing, t; increment = false) end odesolver = SSPRK33ShuOsher(rhs!, Q; dt = dt, t0 = 0) cbTMAR = EveryXSimulationSteps(1) do Filters.apply!(Q, :, grid, TMARFilter()) end ## create output directory on first rank of communicator if MPI.Comm_rank(mpicomm) == 0 mkpath(vtkdir) end MPI.Barrier(mpicomm) vtkstep = 0 ## output initial step do_output(mpicomm, vtkdir, vtkstep, dg, Q, model, "nonnegative") ## setup the output callback cbvtk = EveryXSimulationSteps(floor(outputtime / dt)) do vtkstep += 1 minQ, maxQ = minimum(Q), maximum(Q) sumQ = weightedsum(Q) sumerror = (initialsumQ - sumQ) / initialsumQ @info @sprintf """Step = %d minimum(Q) = %.16e maximum(Q) = %.16e sum error = %.16e """ vtkstep minQ maxQ sumerror do_output(mpicomm, vtkdir, vtkstep, dg, Q, model, "nonnegative") end callbacks = (cbTMAR, cbvtk) solve!(Q, odesolver; timeend = timeend, callbacks = callbacks) minQ, maxQ = minimum(Q), maximum(Q) finalsumQ = weightedsum(Q) sumerror = (initialsumQ - finalsumQ) / initialsumQ error = euclidean_distance(Q, Qe) @test minQ ≥ 0 @info @sprintf """Finished minimum(Q) = %.16e maximum(Q) = %.16e L2 error = %.16e sum error = %.16e """ minQ maxQ error sumerror end; let ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD FT = Float64 dim = 2 Ne = 20 polynomialorder = 4 problem = SwirlingFlow() brickrange = ( range(FT(0); length = Ne + 1, stop = 1), range(FT(0); length = Ne + 1, stop = 1), range(FT(0); length = Ne + 1, stop = 1), ) topology = BrickTopology( mpicomm, brickrange[1:dim], boundary = ntuple(d -> (1, 1), dim), ) maxvelocity = 2 elementsize = 1 / Ne dx = elementsize / polynomialorder^2 CFL = 1 dt = CFL * dx / maxvelocity vtkdir = abspath(joinpath(ClimateMachine.Settings.output_dir, "vtk_nonnegative")) outputtime = 0.0625 dt = outputtime / ceil(Int64, outputtime / dt) timeend = problem.period @info @sprintf """Starting FT = %s dim = %d Ne = %d polynomial order = %d final time = %.16e time step = %.16e """ FT dim Ne polynomialorder timeend dt test_run( mpicomm, ArrayType, topology, problem, dt, polynomialorder, timeend, FT, vtkdir, outputtime, ) end; ================================================ FILE: test/Numerics/Mesh/grid_integral.jl ================================================ using Test using ClimateMachine let for N in 1:10 for FloatType in (Float64, Float32) (ξ, ω) = ClimateMachine.Mesh.Elements.lglpoints(FloatType, N) I∫ = ClimateMachine.Mesh.Grids.indefinite_integral_interpolation_matrix( ξ, ω, ) for n in 1:N if N == 1 @test sum(abs.(I∫ * ξ)) < 10 * eps(FloatType) else @test I∫ * ξ .^ n ≈ (ξ .^ (n + 1) .- (-1) .^ (n + 1)) / (n + 1) end end end end end ================================================ FILE: test/Numerics/Mesh/interpolation.jl ================================================ using Dates using LinearAlgebra using Logging using MPI using Printf using StaticArrays using Statistics using Test import GaussQuadrature using KernelAbstractions using ClimateMachine ClimateMachine.init() using ClimateMachine.ConfigTypes using ClimateMachine.Atmos using ClimateMachine.Atmos: vars_state using ClimateMachine.Orientations using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Geometry using ClimateMachine.Mesh.Interpolation using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.Writers using CLIMAParameters using CLIMAParameters.Planet: R_d, planet_radius, grav, MSLP struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() #------------------------------------- fcn(x, y, z) = sin(x) * cos(y) * cos(z) # sample function #------------------------------------- function Initialize_Brick_Interpolation_Test!( problem, bl, state::Vars, aux::Vars, localgeo, t, ) FT = eltype(state) # Dummy variables for initial condition function state.ρ = FT(0) state.ρu = SVector{3, FT}(0, 0, 0) state.energy.ρe = FT(0) state.moisture.ρq_tot = FT(0) end #------------------------------------------------ struct TestSphereSetup{DT} p_ground::DT T_initial::DT domain_height::DT function TestSphereSetup( p_ground::DT, T_initial::DT, domain_height::DT, ) where {DT <: AbstractFloat} return new{DT}(p_ground, T_initial, domain_height) end end #---------------------------------------------------------------------------- function (setup::TestSphereSetup)(problem, bl, state, aux, coords, t) # callable to set initial conditions FT = eltype(state) param_set = parameter_set(bl) _grav::FT = grav(param_set) _R_d::FT = R_d(param_set) z = altitude(bl, aux) scale_height::FT = _R_d * setup.T_initial / _grav p::FT = setup.p_ground * exp(-z / scale_height) e_int = internal_energy(param_set, setup.T_initial) e_pot = gravitational_potential(bl.orientation, aux) # TODO: Fix type instability: typeof(setup.T_initial) == typeof(p) fails state.ρ = air_density(param_set, FT(setup.T_initial), p) state.ρu = SVector{3, FT}(0, 0, 0) state.energy.ρe = state.ρ * (e_int + e_pot) return nothing end #---------------------------------------------------------------------------- function run_brick_interpolation_test( ::Type{DA}, ::Type{FT}, polynomialorders, toler::FT, ) where {DA, FT <: AbstractFloat} mpicomm = MPI.COMM_WORLD root = 0 pid = MPI.Comm_rank(mpicomm) npr = MPI.Comm_size(mpicomm) xmin, ymin, zmin = FT(0), FT(0), FT(0) # defining domain extent xmax, ymax, zmax = FT(2000), FT(400), FT(2000) xres = [FT(10), FT(10), FT(10)] # resolution of interpolation grid Ne = (20, 4, 20) #------------------------- _x, _y, _z = ClimateMachine.Mesh.Grids.vgeoid.x1id, ClimateMachine.Mesh.Grids.vgeoid.x2id, ClimateMachine.Mesh.Grids.vgeoid.x3id #------------------------- brickrange = ( range(FT(xmin); length = Ne[1] + 1, stop = xmax), range(FT(ymin); length = Ne[2] + 1, stop = ymax), range(FT(zmin); length = Ne[3] + 1, stop = zmax), ) topl = StackedBrickTopology( mpicomm, brickrange, periodicity = (true, true, false), ) grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = DA, polynomialorder = polynomialorders, ) physics = AtmosPhysics{FT}( param_set; ref_state = NoReferenceState(), turbulence = ConstantDynamicViscosity(FT(0)), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; orientation = SphericalOrientation(), init_state_prognostic = Initialize_Brick_Interpolation_Test!, source = (Gravity(),), ) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) #------------------------------ x1 = @view grid.vgeo[:, _x:_x, :] x2 = @view grid.vgeo[:, _y:_y, :] x3 = @view grid.vgeo[:, _z:_z, :] #----calling interpolation function on state variable # st_idx-------------------------- nvars = size(Q.data, 2) Q.data .= sin.(x1 ./ xmax) .* cos.(x2 ./ ymax) .* cos.(x3 ./ zmax) xbnd = Array{FT}(undef, 2, 3) xbnd[1, 1] = FT(xmin) xbnd[2, 1] = FT(xmax) xbnd[1, 2] = FT(ymin) xbnd[2, 2] = FT(ymax) xbnd[1, 3] = FT(zmin) xbnd[2, 3] = FT(zmax) #---------------------------------------------------------- x1g = collect(range(xbnd[1, 1], xbnd[2, 1], step = xres[1])) nx1 = length(x1g) x2g = collect(range(xbnd[1, 2], xbnd[2, 2], step = xres[2])) nx2 = length(x2g) x3g = collect(range(xbnd[1, 3], xbnd[2, 3], step = xres[3])) nx3 = length(x3g) filename = "test.nc" varnames = ("ρ", "ρu", "ρv", "ρw", "e", "other") intrp_brck = InterpolationBrick(grid, xbnd, x1g, x2g, x3g) # sets up the interpolation structure iv = DA(Array{FT}(undef, intrp_brck.Npl, nvars)) # allocating space for the interpolation variable if pid == 0 fiv = DA(Array{FT}(undef, nx1, nx2, nx3, nvars)) # allocating space for the full interpolation variables accumulated on proc# 0 else fiv = DA(Array{FT}(undef, 0, 0, 0, 0)) end interpolate_local!(intrp_brck, Q.data, iv) # interpolation accumulate_interpolated_data!(intrp_brck, iv, fiv) # write interpolation data to file #------------------------------ err_inf_dom = zeros(FT, nvars) x1g = intrp_brck.x1g x2g = intrp_brck.x2g x3g = intrp_brck.x3g if pid == 0 nx1 = length(x1g) nx2 = length(x2g) nx3 = length(x3g) x1 = Array{FT}(undef, nx1, nx2, nx3) x2 = similar(x1) x3 = similar(x1) fiv_cpu = Array(fiv) for k in 1:nx3, j in 1:nx2, i in 1:nx1 x1[i, j, k] = x1g[i] x2[i, j, k] = x2g[j] x3[i, j, k] = x3g[k] end fex = sin.(x1 ./ xmax) .* cos.(x2 ./ ymax) .* cos.(x3 ./ zmax) for vari in 1:nvars err_inf_dom[vari] = maximum(abs.(fiv_cpu[:, :, :, vari] .- fex[:, :, :])) end end MPI.Bcast!(err_inf_dom, root, mpicomm) if maximum(err_inf_dom) > toler if pid == 0 println("err_inf_domain = $(maximum(err_inf_dom)) is larger than prescribed tolerance of $toler") end MPI.Barrier(mpicomm) end @test maximum(err_inf_dom) < toler return nothing #---------------- end #function run_brick_interpolation_test #---------------------------------------------------------------------------- #---------------------------------------------------------------------------- # Cubed sphere, lat/long interpolation test #---------------------------------------------------------------------------- function run_cubed_sphere_interpolation_test( ::Type{DA}, ::Type{FT}, polynomialorders, toler::FT, ) where {DA, FT <: AbstractFloat} mpicomm = MPI.COMM_WORLD root = 0 pid = MPI.Comm_rank(mpicomm) npr = MPI.Comm_size(mpicomm) domain_height = FT(30e3) numelem_horz = 6 numelem_vert = 4 #------------------------- _x, _y, _z = ClimateMachine.Mesh.Grids.vgeoid.x1id, ClimateMachine.Mesh.Grids.vgeoid.x2id, ClimateMachine.Mesh.Grids.vgeoid.x3id _ρ, _ρu, _ρv, _ρw = 1, 2, 3, 4 #------------------------- _planet_radius::FT = planet_radius(param_set) vert_range = grid1d( _planet_radius, FT(_planet_radius + domain_height), nelem = numelem_vert, ) lat_res = FT(1) # 1 degree resolution long_res = FT(1) # 1 degree resolution nel_vert_grd = 20 #100 #50 #10#50 rad_res = FT((vert_range[end] - vert_range[1]) / FT(nel_vert_grd)) # 1000 m vertical resolution #---------------------------------------------------------- _MSLP::FT = MSLP(param_set) setup = TestSphereSetup(_MSLP, FT(255), FT(30e3)) topology = StackedCubedSphereTopology(mpicomm, numelem_horz, vert_range) grid = DiscontinuousSpectralElementGrid( topology, FloatType = FT, DeviceArray = DA, polynomialorder = polynomialorders, meshwarp = ClimateMachine.Mesh.Topologies.equiangular_cubed_sphere_warp, ) physics = AtmosPhysics{FT}( param_set; ref_state = NoReferenceState(), turbulence = ConstantDynamicViscosity(FT(0)), moisture = DryModel(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; init_state_prognostic = setup, source = (), ) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) #------------------------------ x1 = @view grid.vgeo[:, _x:_x, :] x2 = @view grid.vgeo[:, _y:_y, :] x3 = @view grid.vgeo[:, _z:_z, :] xmax = _planet_radius ymax = _planet_radius zmax = _planet_radius nvars = size(Q.data, 2) Q.data .= sin.(x1 ./ xmax) .* cos.(x2 ./ ymax) .* cos.(x3 ./ zmax) #for ivar in 1:nvars # Q.data[:, ivar, :] .= # sin.(x1[:, 1, :] ./ xmax) .* cos.(x2[:, 1, :] ./ ymax) .* # cos.(x3[:, 1, :] ./ zmax) #end #------------------------------ lat_min, lat_max = FT(-90.0), FT(90.0) # inclination/zeinth angle range long_min, long_max = FT(-180.0), FT(180.0) # azimuthal angle range rad_min, rad_max = vert_range[1], vert_range[end] # radius range lat_grd = collect(range(lat_min, lat_max, step = lat_res)) n_lat = length(lat_grd) long_grd = collect(range(long_min, long_max, step = long_res)) n_long = length(long_grd) rad_grd = collect(range(rad_min, rad_max, step = rad_res)) n_rad = length(rad_grd) _ρu, _ρv, _ρw = 2, 3, 4 filename = "test.nc" varnames = ("ρ", "ρu", "ρv", "ρw", "e") projectv = true intrp_cs = InterpolationCubedSphere( grid, collect(vert_range), numelem_horz, lat_grd, long_grd, rad_grd, ) # sets up the interpolation structure iv = DA(Array{FT}(undef, intrp_cs.Npl, nvars)) # allocating space for the interpolation variable if pid == 0 fiv = DA(Array{FT}(undef, n_long, n_lat, n_rad, nvars)) # allocating space for the full interpolation variables accumulated on proc# 0 else fiv = DA(Array{FT}(undef, 0, 0, 0, 0)) end interpolate_local!(intrp_cs, Q.data, iv) # interpolation project_cubed_sphere!(intrp_cs, iv, (_ρu, _ρv, _ρw)) # project velocity onto unit vectors along rad, lat & long accumulate_interpolated_data!(intrp_cs, iv, fiv) # accumulate interpolated data on to proc# 0 #---------------------------------------------------------- # Testing err_inf_dom = zeros(FT, nvars) rad = Array(intrp_cs.rad_grd) lat = Array(intrp_cs.lat_grd) long = Array(intrp_cs.long_grd) fiv_cpu = Array(fiv) if pid == 0 nrad = length(rad) nlat = length(lat) nlong = length(long) x1g = Array{FT}(undef, nrad, nlat, nlong) x2g = similar(x1g) x3g = similar(x1g) fex = zeros(FT, nlong, nlat, nrad, nvars) for vari in 1:nvars for i in 1:nlong, j in 1:nlat, k in 1:nrad x1g_ijk = rad[k] * cosd(lat[j]) * cosd(long[i]) # inclination -> latitude; azimuthal -> longitude. x2g_ijk = rad[k] * cosd(lat[j]) * sind(long[i]) # inclination -> latitude; azimuthal -> longitude. x3g_ijk = rad[k] * sind(lat[j]) fex[i, j, k, vari] = fcn(x1g_ijk / xmax, x2g_ijk / ymax, x3g_ijk / zmax) end end if projectv for i in 1:nlong, j in 1:nlat, k in 1:nrad fex[i, j, k, _ρu] = -fex[i, j, k, _ρ] * sind(long[i]) + fex[i, j, k, _ρ] * cosd(long[i]) fex[i, j, k, _ρv] = -fex[i, j, k, _ρ] * sind(lat[j]) * cosd(long[i]) - fex[i, j, k, _ρ] * sind(lat[j]) * sind(long[i]) + fex[i, j, k, _ρ] * cosd(lat[j]) fex[i, j, k, _ρw] = fex[i, j, k, _ρ] * cosd(lat[j]) * cosd(long[i]) + fex[i, j, k, _ρ] * cosd(lat[j]) * sind(long[i]) + fex[i, j, k, _ρ] * sind(lat[j]) end end for vari in 1:nvars err_inf_dom[vari] = maximum(abs.(fiv_cpu[:, :, :, vari] .- fex[:, :, :, vari])) end end MPI.Bcast!(err_inf_dom, root, mpicomm) if maximum(err_inf_dom) > toler if pid == 0 println("err_inf_domain = $(maximum(err_inf_dom)) is larger than prescribed tolerance of $toler") end MPI.Barrier(mpicomm) end @test maximum(err_inf_dom) < toler return nothing end #---------------------------------------------------------------------------- @testset "Interpolation tests" begin DA = ClimateMachine.array_type() run_brick_interpolation_test(DA, Float32, (0), Float32(1E-1)) run_brick_interpolation_test(DA, Float64, (0), Float64(1E-1)) run_brick_interpolation_test(DA, Float32, (5), Float32(1E-6)) run_brick_interpolation_test(DA, Float64, (5), Float64(1E-9)) run_brick_interpolation_test(DA, Float32, (5, 6), Float32(1E-6)) run_brick_interpolation_test(DA, Float64, (5, 6), Float64(1E-9)) run_cubed_sphere_interpolation_test(DA, Float32, (0), Float32(2e-1)) run_cubed_sphere_interpolation_test(DA, Float64, (0), Float64(2e-1)) run_cubed_sphere_interpolation_test(DA, Float32, (5), Float32(2e-6)) run_cubed_sphere_interpolation_test(DA, Float64, (5), Float64(2e-7)) run_cubed_sphere_interpolation_test(DA, Float32, (5, 6), Float32(2e-6)) run_cubed_sphere_interpolation_test(DA, Float64, (5, 6), Float64(2e-7)) end #------------------------------------------------ ================================================ FILE: test/Numerics/Mesh/min_node_distance.jl ================================================ using Test using MPI using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.VTK using Logging using Printf using LinearAlgebra let # boiler plate MPI stuff ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD # Mesh generation parameters Neh = 10 Nev = 4 Ns = ((4, (4, 3), (2, 3)), (4, (2, 3, 4), (4, 2, 3), (4, 2, 3))) @testset "$(@__FILE__) DGModel matrix" begin for FT in (Float64, Float32) for dim in (2, 3) for N in Ns[dim - 1] if dim == 2 brickrange = ( range(FT(0); length = Neh + 1, stop = 1), range(FT(1); length = Nev + 1, stop = 2), ) elseif dim == 3 brickrange = ( range(FT(0); length = Neh + 1, stop = 1), range(FT(0); length = Neh + 1, stop = 1), range(FT(1); length = Nev + 1, stop = 2), ) end topl = StackedBrickTopology(mpicomm, brickrange) function warpfun(ξ1, ξ2, ξ3) FT = eltype(ξ1) ξ1 ≥ FT(1 // 2) && (ξ1 = FT(1 // 2) + 2 * (ξ1 - FT(1 // 2))) if dim == 2 ξ2 ≥ FT(3 // 2) && (ξ2 = FT(3 // 2) + 2 * (ξ2 - FT(3 // 2))) elseif dim == 3 ξ2 ≥ FT(1 // 2) && (ξ2 = FT(1 // 2) + 2 * (ξ2 - FT(1 // 2))) ξ3 ≥ FT(3 // 2) && (ξ3 = FT(3 // 2) + 2 * (ξ3 - FT(3 // 2))) end (ξ1, ξ2, ξ3) end grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = warpfun, ) # testname = "grid_poly$(N)_dim$(dim)_$(ArrayType)_$(FT)" # filename(rank) = @sprintf("%s_mpirank%04d", testname, rank) # writevtk(filename(MPI.Comm_rank(mpicomm)), grid) # if MPI.Comm_rank(mpicomm) == 0 # writepvtu(testname, filename.(0:MPI.Comm_size(mpicomm)-1), (), FT) # end ξ = referencepoints(grid) Δξ = ntuple(d -> ξ[d][2] - ξ[d][1], dim) hmnd = minimum(Δξ[1:(dim - 1)]) / (2Neh) vmnd = Δξ[end] / (2Nev) @test hmnd ≈ min_node_distance(grid, EveryDirection()) @test vmnd ≈ min_node_distance(grid, VerticalDirection()) @test hmnd ≈ min_node_distance(grid, HorizontalDirection()) end end end end end nothing ================================================ FILE: test/Numerics/Mesh/mpi_centroid.jl ================================================ using Test using MPI using ClimateMachine.Mesh.BrickMesh function main() MPI.Init() comm = MPI.COMM_WORLD (elemtovert, elemtocorner, facecode) = brickmesh( (2.0:5.0, 4.0:6.0), (false, true); part = MPI.Comm_rank(comm) + 1, numparts = MPI.Comm_size(comm), ) code = centroidtocode(comm, elemtocorner) (d, nelem) = size(code) root = 0 counts = MPI.Gather(Cint(length(code)), 0, comm) code_all = MPI.Gatherv!( code, MPI.Comm_rank(comm) == root ? VBuffer(similar(code, sum(counts)), counts) : nothing, root, comm, ) if MPI.Comm_rank(comm) == root code_all = reshape(code_all, d, div(sum(counts), d)) code_expect = UInt64[ 0x0000000000000000 0x1555555555555555 0xffffffffffffffff 0x5555555555555555 0x6aaaaaaaaaaaaaaa 0xaaaaaaaaaaaaaaaa 0x0000000000000000 0x5555555555555555 0xffffffffffffffff 0x5555555555555555 0xaaaaaaaaaaaaaaaa 0xaaaaaaaaaaaaaaaa ] @test code_all == code_expect end end main() ================================================ FILE: test/Numerics/Mesh/mpi_connect.jl ================================================ using Test using MPI using ClimateMachine.Mesh.Topologies function main() MPI.Init() comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) @assert csize == 3 topology = BrickTopology( comm, (0:4, 5:9); boundary = ((1, 2), (3, 4)), periodicity = (false, true), connectivity = :face, ) elems = topology.elems realelems = topology.realelems ghostelems = topology.ghostelems sendelems = topology.sendelems elemtocoord = topology.elemtocoord elemtoelem = topology.elemtoelem elemtoface = topology.elemtoface elemtoordr = topology.elemtoordr elemtobndy = topology.elemtobndy nabrtorank = topology.nabrtorank nabrtorecv = topology.nabrtorecv nabrtosend = topology.nabrtosend globalelemtoface = [ 1 2 2 1 1 1 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 2 2 2 1 1 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 ] globalelemtoordr = ones(Int, size(globalelemtoface)) globalelemtocoord = Array{Int}(undef, 2, 4, 16) globalelemtocoord[:, :, 1] = [0 1 0 1; 5 5 6 6] globalelemtocoord[:, :, 2] = [1 2 1 2; 5 5 6 6] globalelemtocoord[:, :, 3] = [1 2 1 2; 6 6 7 7] globalelemtocoord[:, :, 4] = [0 1 0 1; 6 6 7 7] globalelemtocoord[:, :, 5] = [0 1 0 1; 7 7 8 8] globalelemtocoord[:, :, 6] = [0 1 0 1; 8 8 9 9] globalelemtocoord[:, :, 7] = [1 2 1 2; 8 8 9 9] globalelemtocoord[:, :, 8] = [1 2 1 2; 7 7 8 8] globalelemtocoord[:, :, 9] = [2 3 2 3; 7 7 8 8] globalelemtocoord[:, :, 10] = [2 3 2 3; 8 8 9 9] globalelemtocoord[:, :, 11] = [3 4 3 4; 8 8 9 9] globalelemtocoord[:, :, 12] = [3 4 3 4; 7 7 8 8] globalelemtocoord[:, :, 13] = [3 4 3 4; 6 6 7 7] globalelemtocoord[:, :, 14] = [2 3 2 3; 6 6 7 7] globalelemtocoord[:, :, 15] = [2 3 2 3; 5 5 6 6] globalelemtocoord[:, :, 16] = [3 4 3 4; 5 5 6 6] globalelemtobndy = [ 1 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ] if crank == 0 nrealelem = 5 globalelems = [1, 2, 3, 4, 5, 6, 7, 8, 14, 15] elemtoelem_expect = [ 1 1 4 2 3 4 7 8 9 10 2 10 9 3 8 6 7 8 9 10 6 7 2 1 4 6 7 8 9 10 4 3 8 5 6 6 7 8 9 10 ] nabrtorank_expect = [1, 2] nabrtorecv_expect = UnitRange{Int}[1:3, 4:5] nabrtosend_expect = UnitRange{Int}[1:4, 5:6] elseif crank == 1 nrealelem = 5 globalelems = [6, 7, 8, 9, 10, 1, 2, 3, 5, 11, 12, 14, 15] elemtoelem_expect = [ 1 1 9 3 2 2 7 8 3 10 11 12 13 2 5 4 11 10 6 7 8 9 1 2 12 13 9 3 8 12 4 6 7 8 9 10 11 12 13 6 7 2 5 13 6 7 8 9 10 11 12 13 ] nabrtorank_expect = [0, 2] nabrtorecv_expect = UnitRange{Int}[1:4, 5:8] nabrtosend_expect = UnitRange{Int}[1:3, 4:5] elseif crank == 2 nrealelem = 6 globalelems = [11, 12, 13, 14, 15, 16, 2, 3, 9, 10] elemtoelem_expect = [ 10 9 4 8 7 5 7 8 9 10 1 2 3 3 6 4 7 8 9 10 2 3 6 5 10 1 7 8 9 10 6 1 2 9 4 3 7 8 9 10 ] nabrtorank_expect = [0, 1] nabrtorecv_expect = UnitRange{Int}[1:2, 3:4] nabrtosend_expect = UnitRange{Int}[1:2, 3:6] end @test elems == 1:length(globalelems) @test realelems == 1:nrealelem @test ghostelems == (nrealelem + 1):length(globalelems) @test elemtocoord == globalelemtocoord[:, :, globalelems] @test elemtoface[:, realelems] == globalelemtoface[:, globalelems[realelems]] @test elemtoelem == elemtoelem_expect @test elemtobndy == globalelemtobndy[:, globalelems] @test elemtoordr == ones(eltype(elemtoordr), size(elemtoordr)) @test nabrtorank == nabrtorank_expect @test nabrtorecv == nabrtorecv_expect @test nabrtosend == nabrtosend_expect @test collect(realelems) == sort(union(topology.exteriorelems, topology.interiorelems)) @test unique(sort(sendelems)) == topology.exteriorelems @test length(intersect(topology.exteriorelems, topology.interiorelems)) == 0 end main() ================================================ FILE: test/Numerics/Mesh/mpi_connect_1d.jl ================================================ using Test using MPI using ClimateMachine.Mesh.Topologies function main() MPI.Init() comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) @assert csize == 5 topology = BrickTopology( comm, (0:10,); boundary = ((1, 2),), periodicity = (true,), ) elems = topology.elems realelems = topology.realelems ghostelems = topology.ghostelems sendelems = topology.sendelems elemtocoord = topology.elemtocoord elemtoelem = topology.elemtoelem elemtoface = topology.elemtoface elemtoordr = topology.elemtoordr elemtobndy = topology.elemtobndy nabrtorank = topology.nabrtorank nabrtorecv = topology.nabrtorecv nabrtosend = topology.nabrtosend globalelemtoelem = [ 10 1 2 3 4 5 6 7 8 9 2 3 4 5 6 7 8 9 10 1 ] globalelemtoface = [ 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 ] globalelemtoordr = ones(Int, size(globalelemtoface)) globalelemtobndy = zeros(Int, size(globalelemtoface)) globalelemtocoord = Array{Int}(undef, 1, 2, 10) globalelemtocoord[:, :, 1] = [0 1] globalelemtocoord[:, :, 2] = [1 2] globalelemtocoord[:, :, 3] = [2 3] globalelemtocoord[:, :, 4] = [3 4] globalelemtocoord[:, :, 5] = [4 5] globalelemtocoord[:, :, 6] = [5 6] globalelemtocoord[:, :, 7] = [6 7] globalelemtocoord[:, :, 8] = [7 8] globalelemtocoord[:, :, 9] = [8 9] globalelemtocoord[:, :, 10] = [9 10] @assert csize == 5 nrealelem = 2 if crank == 0 globalelems = [1, 2, 3, 10] elemtoelem_expect = [4 1 3 4; 2 3 3 4] nabrtorank_expect = [1, 4] elseif crank == 1 globalelems = [3, 4, 2, 5] elemtoelem_expect = [3 1 3 4; 2 4 3 4] nabrtorank_expect = [0, 2] elseif crank == 2 globalelems = [5, 6, 4, 7] elemtoelem_expect = [3 1 3 4; 2 4 3 4] nabrtorank_expect = [1, 3] elseif crank == 3 globalelems = [7, 8, 6, 9] elemtoelem_expect = [3 1 3 4; 2 4 3 4] nabrtorank_expect = [2, 4] elseif crank == 4 globalelems = [9, 10, 1, 8] elemtoelem_expect = [4 1 3 4; 2 3 3 4] nabrtorank_expect = [0, 3] end nabrtorecv_expect = UnitRange{Int}[1:1, 2:2] nabrtosend_expect = UnitRange{Int}[1:1, 2:2] @test elems == 1:length(globalelems) @test realelems == 1:nrealelem @test ghostelems == (nrealelem + 1):length(globalelems) @test elemtocoord == globalelemtocoord[:, :, globalelems] @test elemtoface[:, realelems] == globalelemtoface[:, globalelems[realelems]] @test elemtoelem == elemtoelem_expect @test elemtobndy == globalelemtobndy[:, globalelems] @test elemtoordr == globalelemtoordr[:, globalelems] @test nabrtorank == nabrtorank_expect @test nabrtorecv == nabrtorecv_expect @test nabrtosend == nabrtosend_expect @test collect(realelems) == sort(union(topology.exteriorelems, topology.interiorelems)) @test unique(sort(sendelems)) == topology.exteriorelems @test length(intersect(topology.exteriorelems, topology.interiorelems)) == 0 end main() ================================================ FILE: test/Numerics/Mesh/mpi_connect_ell.jl ================================================ using Test using MPI using ClimateMachine.Mesh.Topologies function main() MPI.Init() comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) @assert csize == 2 FT = Float64 Nx = 3 Ny = 2 x = range(FT(0); length = Nx + 1, stop = 1) y = range(FT(0); length = Ny + 1, stop = 1) topology = BrickTopology( comm, (x, y); boundary = ((1, 2), (3, 4)), periodicity = (true, true), connectivity = :face, ) elems = topology.elems realelems = topology.realelems ghostelems = topology.ghostelems sendelems = topology.sendelems elemtocoord = topology.elemtocoord elemtoelem = topology.elemtoelem elemtoface = topology.elemtoface elemtoordr = topology.elemtoordr elemtobndy = topology.elemtobndy nabrtorank = topology.nabrtorank nabrtorecv = topology.nabrtorecv nabrtosend = topology.nabrtosend globalelemtoface = [ 2 2 2 2 2 2 1 1 1 1 1 1 4 4 4 4 4 4 3 3 3 3 3 3 ] globalelemtoordr = ones(Int, size(globalelemtoface)) globalelemtobndy = zeros(Int, size(globalelemtoface)) if crank == 0 nrealelem = 3 globalelems = [1, 2, 3, 4, 5, 6] elemtoelem_expect = [ 6 4 2 4 5 6 5 3 4 4 5 6 2 1 5 4 5 6 2 1 5 4 5 6 ] nabrtorank_expect = [1] nabrtorecv_expect = UnitRange{Int}[1:3] nabrtosend_expect = UnitRange{Int}[1:3] elseif crank == 1 nrealelem = 3 globalelems = [4, 5, 6, 1, 2, 3] elemtoelem_expect = [ 6 4 2 4 5 6 5 3 4 4 5 6 3 6 1 4 5 6 3 6 1 4 5 6 ] nabrtorank_expect = [0] nabrtorecv_expect = UnitRange{Int}[1:3] nabrtosend_expect = UnitRange{Int}[1:3] end @test elems == 1:length(globalelems) @test realelems == 1:nrealelem @test ghostelems == (nrealelem + 1):length(globalelems) @test elemtoface[:, realelems] == globalelemtoface[:, globalelems[realelems]] @test elemtoelem == elemtoelem_expect @test elemtobndy == globalelemtobndy[:, globalelems] @test elemtoordr == ones(eltype(elemtoordr), size(elemtoordr)) @test nabrtorank == nabrtorank_expect @test nabrtorecv == nabrtorecv_expect @test nabrtosend == nabrtosend_expect @test collect(realelems) == sort(union(topology.exteriorelems, topology.interiorelems)) @test unique(sort(sendelems)) == topology.exteriorelems @test length(intersect(topology.exteriorelems, topology.interiorelems)) == 0 end main() ================================================ FILE: test/Numerics/Mesh/mpi_connect_sphere.jl ================================================ using Test using MPI using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.MPIStateArrays using KernelAbstractions function main() FT = Float64 Nhorz = 3 Nstack = 5 N = 4 DA = Array MPI.Initialized() || MPI.Init() comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) Rrange = FT.(accumulate(+, 1:(Nstack + 1))) topology = StackedCubedSphereTopology( MPI.COMM_SELF, Nhorz, Rrange; boundary = (1, 2), connectivity = :face, ) grid = DiscontinuousSpectralElementGrid( topology; FloatType = FT, DeviceArray = DA, polynomialorder = N, meshwarp = Topologies.equiangular_cubed_sphere_warp, ) let Np = (N + 1)^3 activedofs = Array(grid.activedofs) vmaprecv = Array(grid.vmaprecv) active = union(1:(length(topology.realelems) * Np), vmaprecv) inactive = setdiff(1:(length(topology.elems) * Np), active) @test all(activedofs[active] .== true) @test all(activedofs[inactive] .== false) end #= @show elems = topology.elems @show realelems = topology.realelems @show ghostelems = topology.ghostelems @show sendelems = topology.sendelems @show elemtocoord = topology.elemtocoord @show elemtoelem = topology.elemtoelem @show elemtoface = topology.elemtoface @show elemtoordr = topology.elemtoordr @show elemtobndy = topology.elemtobndy @show nabrtorank = topology.nabrtorank @show nabrtorecv = topology.nabrtorecv @show nabrtosend = topology.nabrtosend =# # Check x1x2x3 matches before comm x1 = @view grid.vgeo[:, Grids._x1, :] x2 = @view grid.vgeo[:, Grids._x2, :] x3 = @view grid.vgeo[:, Grids._x3, :] interior_faces = vec(grid.elemtobndy .== 0) interior_vmap⁻ = reshape(grid.vmap⁻, (size(grid.vmap⁻, 1), :))[:, interior_faces] interior_vmap⁺ = reshape(grid.vmap⁺, (size(grid.vmap⁺, 1), :))[:, interior_faces] @test x1[interior_vmap⁻] ≈ x1[interior_vmap⁺] @test x2[interior_vmap⁻] ≈ x2[interior_vmap⁺] @test x3[interior_vmap⁻] ≈ x3[interior_vmap⁺] Np = (N + 1)^3 x1x2x3 = MPIStateArray{FT}( topology.mpicomm, DA, Np, 3, length(topology.elems), realelems = topology.realelems, ghostelems = topology.ghostelems, vmaprecv = grid.vmaprecv, vmapsend = grid.vmapsend, nabrtorank = topology.nabrtorank, nabrtovmaprecv = grid.nabrtovmaprecv, nabrtovmapsend = grid.nabrtovmapsend, ) x1x2x3.data[:, :, topology.realelems] .= @view grid.vgeo[ :, [Grids._x1, Grids._x2, Grids._x3], topology.realelems, ] event = Event(array_device(x1x2x3)) event = MPIStateArrays.begin_ghost_exchange!(x1x2x3, dependencies = event) event = MPIStateArrays.end_ghost_exchange!(x1x2x3, dependencies = event) wait(array_device(x1x2x3), event) # Check x1x2x3 matches after x1 = @view x1x2x3.data[:, 1, :] x2 = @view x1x2x3.data[:, 2, :] x3 = @view x1x2x3.data[:, 3, :] @test x1[interior_vmap⁻] ≈ x1[interior_vmap⁺] @test x2[interior_vmap⁻] ≈ x2[interior_vmap⁺] @test x3[interior_vmap⁻] ≈ x3[interior_vmap⁺] nothing end isinteractive() || main() ================================================ FILE: test/Numerics/Mesh/mpi_connect_stacked.jl ================================================ using Test using MPI using ClimateMachine.Mesh.Topologies function main() MPI.Init() comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) @assert csize == 3 topology = StackedBrickTopology( comm, (2:5, 4:6), periodicity = (false, true), boundary = ((1, 2), (3, 4)), connectivity = :face, ) elems = topology.elems realelems = topology.realelems ghostelems = topology.ghostelems sendelems = topology.sendelems elemtocoord = topology.elemtocoord elemtoelem = topology.elemtoelem elemtoface = topology.elemtoface elemtoordr = topology.elemtoordr elemtobndy = topology.elemtobndy nabrtorank = topology.nabrtorank nabrtorecv = topology.nabrtorecv nabrtosend = topology.nabrtosend globalelemtoface = [ 1 1 2 2 2 2 1 1 1 1 2 2 4 4 4 4 4 4 3 3 3 3 3 3 ] globalelemtoordr = ones(Int, size(globalelemtoface)) globalelemtocoord = Array{Int}(undef, 2, 4, 6) globalelemtocoord[:, :, 1] = [2 3 2 3; 4 4 5 5] globalelemtocoord[:, :, 2] = [2 3 2 3; 5 5 6 6] globalelemtocoord[:, :, 3] = [3 4 3 4; 4 4 5 5] globalelemtocoord[:, :, 4] = [3 4 3 4; 5 5 6 6] globalelemtocoord[:, :, 5] = [4 5 4 5; 4 4 5 5] globalelemtocoord[:, :, 6] = [4 5 4 5; 5 5 6 6] globalelemtobndy = [ 1 1 0 0 0 0 0 0 0 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 ] if crank == 0 nrealelem = 2 globalelems = [1, 2, 3, 4] elemtoelem_expect = [ 1 2 3 4 3 4 3 4 2 1 3 4 2 1 3 4 ] nabrtorank_expect = [1] nabrtorecv_expect = UnitRange{Int}[1:2] nabrtosend_expect = UnitRange{Int}[1:2] elseif crank == 1 nrealelem = 2 globalelems = [3, 4, 1, 2, 5, 6] elemtoelem_expect = [ 3 4 1 2 5 6 5 6 3 4 1 2 2 1 3 4 5 6 2 1 3 4 5 6 ] nabrtorank_expect = [0, 2] nabrtorecv_expect = UnitRange{Int}[1:2, 3:4] nabrtosend_expect = UnitRange{Int}[1:2, 3:4] elseif crank == 2 nrealelem = 2 globalelems = [5, 6, 3, 4] elemtoelem_expect = [ 3 4 3 4 1 2 3 4 2 1 3 4 2 1 3 4 ] nabrtorank_expect = [1] nabrtorecv_expect = UnitRange{Int}[1:2] nabrtosend_expect = UnitRange{Int}[1:2] end @test elems == 1:length(globalelems) @test realelems == 1:nrealelem @test ghostelems == (nrealelem + 1):length(globalelems) @test elemtocoord == globalelemtocoord[:, :, globalelems] @test elemtoface[:, realelems] == globalelemtoface[:, globalelems[realelems]] @test elemtoelem == elemtoelem_expect @test elemtobndy == globalelemtobndy[:, globalelems] @test elemtoordr == ones(eltype(elemtoordr), size(elemtoordr)) @test nabrtorank == nabrtorank_expect @test nabrtorecv == nabrtorecv_expect @test nabrtosend == nabrtosend_expect @test collect(realelems) == sort(union(topology.exteriorelems, topology.interiorelems)) @test unique(sort(sendelems)) == topology.exteriorelems @test length(intersect(topology.exteriorelems, topology.interiorelems)) == 0 end main() ================================================ FILE: test/Numerics/Mesh/mpi_connect_stacked_3d.jl ================================================ using Test using MPI using ClimateMachine.Mesh.Topologies function main() MPI.Init() comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) @assert csize == 2 topology = StackedBrickTopology( comm, (1:4, 5:8, 9:12), periodicity = (false, true, false), boundary = ((1, 2), (3, 4), (5, 6)), connectivity = :face, ) elems = topology.elems realelems = topology.realelems ghostelems = topology.ghostelems sendelems = topology.sendelems elemtocoord = topology.elemtocoord elemtoelem = topology.elemtoelem elemtoface = topology.elemtoface elemtoordr = topology.elemtoordr elemtobndy = topology.elemtobndy nabrtorank = topology.nabrtorank nabrtorecv = topology.nabrtorecv nabrtosend = topology.nabrtosend globalelemtoface = [ 1 1 1 2 2 2 2 2 2 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 5 6 6 5 6 6 5 6 6 5 6 6 5 6 6 5 6 6 5 6 6 5 6 6 5 6 6 5 5 6 5 5 6 5 5 6 5 5 6 5 5 6 5 5 6 5 5 6 5 5 6 5 5 6 ] globalelemtoordr = ones(Int, size(globalelemtoface)) globalelemtocoord = Array{Int}(undef, 3, 8, 27) globalelemtocoord[:, :, 1] = [1 2 1 2 1 2 1 2; 5 5 6 6 5 5 6 6; 9 9 9 9 10 10 10 10] globalelemtocoord[:, :, 2] = [1 2 1 2 1 2 1 2; 5 5 6 6 5 5 6 6; 10 10 10 10 11 11 11 11] globalelemtocoord[:, :, 3] = [1 2 1 2 1 2 1 2; 5 5 6 6 5 5 6 6; 11 11 11 11 12 12 12 12] globalelemtocoord[:, :, 4] = [2 3 2 3 2 3 2 3; 5 5 6 6 5 5 6 6; 9 9 9 9 10 10 10 10] globalelemtocoord[:, :, 5] = [2 3 2 3 2 3 2 3; 5 5 6 6 5 5 6 6; 10 10 10 10 11 11 11 11] globalelemtocoord[:, :, 6] = [2 3 2 3 2 3 2 3; 5 5 6 6 5 5 6 6; 11 11 11 11 12 12 12 12] globalelemtocoord[:, :, 7] = [2 3 2 3 2 3 2 3; 6 6 7 7 6 6 7 7; 9 9 9 9 10 10 10 10] globalelemtocoord[:, :, 8] = [2 3 2 3 2 3 2 3; 6 6 7 7 6 6 7 7; 10 10 10 10 11 11 11 11] globalelemtocoord[:, :, 9] = [2 3 2 3 2 3 2 3; 6 6 7 7 6 6 7 7; 11 11 11 11 12 12 12 12] globalelemtocoord[:, :, 10] = [1 2 1 2 1 2 1 2; 6 6 7 7 6 6 7 7; 9 9 9 9 10 10 10 10] globalelemtocoord[:, :, 11] = [1 2 1 2 1 2 1 2; 6 6 7 7 6 6 7 7; 10 10 10 10 11 11 11 11] globalelemtocoord[:, :, 12] = [1 2 1 2 1 2 1 2; 6 6 7 7 6 6 7 7; 11 11 11 11 12 12 12 12] globalelemtocoord[:, :, 13] = [1 2 1 2 1 2 1 2; 7 7 8 8 7 7 8 8; 9 9 9 9 10 10 10 10] globalelemtocoord[:, :, 14] = [1 2 1 2 1 2 1 2; 7 7 8 8 7 7 8 8; 10 10 10 10 11 11 11 11] globalelemtocoord[:, :, 15] = [1 2 1 2 1 2 1 2; 7 7 8 8 7 7 8 8; 11 11 11 11 12 12 12 12] globalelemtocoord[:, :, 16] = [2 3 2 3 2 3 2 3; 7 7 8 8 7 7 8 8; 9 9 9 9 10 10 10 10] globalelemtocoord[:, :, 17] = [2 3 2 3 2 3 2 3; 7 7 8 8 7 7 8 8; 10 10 10 10 11 11 11 11] globalelemtocoord[:, :, 18] = [2 3 2 3 2 3 2 3; 7 7 8 8 7 7 8 8; 11 11 11 11 12 12 12 12] globalelemtocoord[:, :, 19] = [3 4 3 4 3 4 3 4; 7 7 8 8 7 7 8 8; 9 9 9 9 10 10 10 10] globalelemtocoord[:, :, 20] = [3 4 3 4 3 4 3 4; 7 7 8 8 7 7 8 8; 10 10 10 10 11 11 11 11] globalelemtocoord[:, :, 21] = [3 4 3 4 3 4 3 4; 7 7 8 8 7 7 8 8; 11 11 11 11 12 12 12 12] globalelemtocoord[:, :, 22] = [3 4 3 4 3 4 3 4; 6 6 7 7 6 6 7 7; 9 9 9 9 10 10 10 10] globalelemtocoord[:, :, 23] = [3 4 3 4 3 4 3 4; 6 6 7 7 6 6 7 7; 10 10 10 10 11 11 11 11] globalelemtocoord[:, :, 24] = [3 4 3 4 3 4 3 4; 6 6 7 7 6 6 7 7; 11 11 11 11 12 12 12 12] globalelemtocoord[:, :, 25] = [3 4 3 4 3 4 3 4; 5 5 6 6 5 5 6 6; 9 9 9 9 10 10 10 10] globalelemtocoord[:, :, 26] = [3 4 3 4 3 4 3 4; 5 5 6 6 5 5 6 6; 10 10 10 10 11 11 11 11] globalelemtocoord[:, :, 27] = [3 4 3 4 3 4 3 4; 5 5 6 6 5 5 6 6; 11 11 11 11 12 12 12 12] globalelemtobndy = [ 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 0 0 5 0 0 5 0 0 5 0 0 5 0 0 5 0 0 5 0 0 5 0 0 5 0 0 0 0 6 0 0 6 0 0 6 0 0 6 0 0 6 0 0 6 0 0 6 0 0 6 0 0 6 ] if crank == 0 nrealelem = 12 globalelems = [ 1, 2, 3, # 1 4, 5, 6, # 2 7, 8, 9, # 3 10, 11, 12, # 4 13, 14, 15, # 5 16, 17, 18, # 6 22, 23, 24, # 8 25, 26, 27, ] # 9 elemtoelem_expect = [ 1 2 3 1 2 3 10 11 12 4 5 6 7 8 9 16 17 18 19 20 21 22 23 24 4 5 6 22 23 24 19 20 21 7 8 9 13 14 15 16 17 18 1 2 3 4 5 6 13 14 15 16 17 18 4 5 6 1 2 3 13 14 15 16 17 18 19 20 21 22 23 24 10 11 12 7 8 9 16 17 18 13 14 15 13 14 15 16 17 18 19 20 21 22 23 24 1 1 2 2 4 5 3 7 8 4 10 11 5 14 15 6 17 18 7 20 21 8 23 24 2 3 1 5 6 2 8 9 3 11 12 4 13 14 5 16 17 6 19 20 7 22 23 8 ] nabrtorank_expect = [1] nabrtorecv_expect = UnitRange{Int}[1:12] nabrtosend_expect = UnitRange{Int}[1:12] elseif crank == 1 nrealelem = 15 globalelems = [ 13, 14, 15, # 5 16, 17, 18, # 6 19, 20, 21, # 7 22, 23, 24, # 8 25, 26, 27, # 9 1, 2, 3, # 1 4, 5, 6, # 2 7, 8, 9, # 3 10, 11, 12, ] # 4 elemtoelem_expect = [ 1 2 3 1 2 3 4 5 6 22 23 24 19 20 21 4 5 6 19 20 21 22 23 24 7 8 9 4 5 6 7 8 9 1 2 3 4 5 6 7 8 9 16 17 18 19 20 21 22 23 24 25 26 27 25 26 27 22 23 24 10 11 12 13 14 15 7 8 9 16 17 18 19 20 21 22 23 24 25 26 27 16 17 18 19 20 21 13 14 15 7 8 9 10 11 12 16 17 18 19 20 21 22 23 24 25 26 27 1 1 2 2 4 5 3 7 8 4 10 11 5 13 14 6 17 18 7 20 21 8 23 24 9 26 27 2 3 1 5 6 2 8 9 3 11 12 4 14 15 5 16 17 6 19 20 7 22 23 8 25 26 9 ] nabrtorank_expect = [0] nabrtorecv_expect = UnitRange{Int}[1:12] nabrtosend_expect = UnitRange{Int}[1:12] end @test elems == 1:length(globalelems) @test realelems == 1:nrealelem @test ghostelems == (nrealelem + 1):length(globalelems) @test elemtocoord == globalelemtocoord[:, :, globalelems] @test elemtoface[:, realelems] == globalelemtoface[:, globalelems[realelems]] @test elemtoelem == elemtoelem_expect @test elemtobndy == globalelemtobndy[:, globalelems] @test elemtoordr == ones(eltype(elemtoordr), size(elemtoordr)) @test nabrtorank == nabrtorank_expect @test nabrtorecv == nabrtorecv_expect @test nabrtosend == nabrtosend_expect @test collect(realelems) == sort(union(topology.exteriorelems, topology.interiorelems)) @test unique(sort(sendelems)) == topology.exteriorelems @test length(intersect(topology.exteriorelems, topology.interiorelems)) == 0 end main() ================================================ FILE: test/Numerics/Mesh/mpi_connectfull.jl ================================================ using Test using MPI using ClimateMachine.Mesh.Topologies function test_connectmeshfull() MPI.Init() comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) @assert csize == 3 topology = BrickTopology( comm, (0:4, 5:9); boundary = ((1, 2), (3, 4)), periodicity = (false, true), connectivity = :full, ) elems = topology.elems realelems = topology.realelems ghostelems = topology.ghostelems sendelems = topology.sendelems elemtocoord = topology.elemtocoord elemtoelem = topology.elemtoelem elemtoface = topology.elemtoface elemtoordr = topology.elemtoordr elemtobndy = topology.elemtobndy nabrtorank = topology.nabrtorank nabrtorecv = topology.nabrtorecv nabrtosend = topology.nabrtosend globalelemtoface = [ 1 2 2 1 1 1 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 2 2 2 1 1 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 ] globalelemtoordr = ones(Int, size(globalelemtoface)) globalelemtocoord = Array{Int}(undef, 2, 4, 16) globalelemtocoord[:, :, 1] = [0 1 0 1; 5 5 6 6] globalelemtocoord[:, :, 2] = [1 2 1 2; 5 5 6 6] globalelemtocoord[:, :, 3] = [1 2 1 2; 6 6 7 7] globalelemtocoord[:, :, 4] = [0 1 0 1; 6 6 7 7] globalelemtocoord[:, :, 5] = [0 1 0 1; 7 7 8 8] globalelemtocoord[:, :, 6] = [0 1 0 1; 8 8 9 9] globalelemtocoord[:, :, 7] = [1 2 1 2; 8 8 9 9] globalelemtocoord[:, :, 8] = [1 2 1 2; 7 7 8 8] globalelemtocoord[:, :, 9] = [2 3 2 3; 7 7 8 8] globalelemtocoord[:, :, 10] = [2 3 2 3; 8 8 9 9] globalelemtocoord[:, :, 11] = [3 4 3 4; 8 8 9 9] globalelemtocoord[:, :, 12] = [3 4 3 4; 7 7 8 8] globalelemtocoord[:, :, 13] = [3 4 3 4; 6 6 7 7] globalelemtocoord[:, :, 14] = [2 3 2 3; 6 6 7 7] globalelemtocoord[:, :, 15] = [2 3 2 3; 5 5 6 6] globalelemtocoord[:, :, 16] = [3 4 3 4; 5 5 6 6] globalelemtobndy = [ 1 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ] if crank == 0 nrealelem = 5 globalelems = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 14, 15] elemtoelem_expect = [ 1 1 4 2 3 4 6 5 8 7 3 2 2 12 11 3 8 7 10 9 9 10 11 12 6 7 2 1 4 5 8 3 11 9 12 10 4 3 8 5 6 1 2 7 10 12 9 11 ] elemtoface_expect = [ 1 2 2 1 1 1 2 2 2 2 2 2 1 1 1 1 1 1 1 1 2 2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 ] nabrtorank_expect = [1, 2] nabrtorecv_expect = UnitRange{Int}[1:5, 6:7] nabrtosend_expect = UnitRange{Int}[1:5, 6:7] elseif crank == 1 nrealelem = 5 globalelems = [6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 11, 12, 13, 14, 15, 16] elemtoelem_expect = [ 1 1 10 3 2 2 6 9 3 4 5 4 14 8 7 15 2 5 4 12 11 7 15 14 8 3 1 2 3 13 16 4 10 3 8 14 4 1 2 7 6 9 12 13 16 15 5 11 6 7 2 5 15 9 8 3 10 1 16 11 12 4 14 13 ] elemtoface_expect = [ 1 2 2 2 2 1 2 2 1 1 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 2 2 2 1 1 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 ] nabrtorank_expect = [0, 2] nabrtorecv_expect = UnitRange{Int}[1:5, 6:11] nabrtosend_expect = UnitRange{Int}[1:5, 6:9] elseif crank == 2 nrealelem = 6 globalelems = [11, 12, 13, 14, 15, 16, 2, 3, 7, 8, 9, 10] elemtoelem_expect = [ 12 11 4 8 7 5 7 8 9 10 10 9 1 2 3 3 6 4 5 4 12 11 2 1 2 3 6 5 12 1 9 7 10 8 4 11 6 1 2 11 4 3 8 10 7 9 12 5 ] elemtoface_expect = [ 2 2 2 2 2 2 1 1 1 1 2 2 2 2 2 1 1 2 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 ] nabrtorank_expect = [0, 1] nabrtorecv_expect = UnitRange{Int}[1:2, 3:6] nabrtosend_expect = UnitRange{Int}[1:2, 3:8] end @test elems == 1:length(globalelems) @test realelems == 1:nrealelem @test ghostelems == (nrealelem + 1):length(globalelems) @test elemtocoord == globalelemtocoord[:, :, globalelems] @test elemtoface[:, realelems] == globalelemtoface[:, globalelems[realelems]] @test elemtoelem == elemtoelem_expect @test elemtobndy == globalelemtobndy[:, globalelems] @test elemtoordr == ones(eltype(elemtoordr), size(elemtoordr)) @test nabrtorank == nabrtorank_expect @test nabrtorecv == nabrtorecv_expect @test nabrtosend == nabrtosend_expect @test collect(realelems) == sort(union(topology.exteriorelems, topology.interiorelems)) @test unique(sort(sendelems)) == topology.exteriorelems @test length(intersect(topology.exteriorelems, topology.interiorelems)) == 0 end test_connectmeshfull() ================================================ FILE: test/Numerics/Mesh/mpi_getpartition.jl ================================================ using Test using MPI using ClimateMachine.Mesh.BrickMesh using Random function main() MPI.Init() comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) Nelemtotal = 113 Random.seed!(1234) globalcode = randperm(Nelemtotal) @assert csize > 1 bs = [ (i == 1) ? (1:0) : BrickMesh.linearpartition(Nelemtotal, i - 1, csize - 1) for i in 1:csize ] as = [BrickMesh.linearpartition(Nelemtotal, i, csize) for i in 1:csize] codeb = globalcode[bs[crank + 1]] (so, ss, rs) = BrickMesh.getpartition(comm, codeb) codeb = codeb[so] codec = [] for r in 0:(csize - 1) sendrange = ss[r + 1]:(ss[r + 2] - 1) rcounts = MPI.Gather(Cint(length(sendrange)), r, comm) c = MPI.Gatherv!( view(codeb, sendrange), crank == r ? VBuffer(similar(codeb, sum(rcounts)), rcounts) : nothing, r, comm, ) if r == crank codec = c end end codea = (1:Nelemtotal)[as[crank + 1]] @test sort(codec) == codea end main() ================================================ FILE: test/Numerics/Mesh/mpi_partition.jl ================================================ using Test using MPI using ClimateMachine.Mesh.BrickMesh function main() MPI.Init() comm = MPI.COMM_WORLD crank = MPI.Comm_rank(comm) csize = MPI.Comm_size(comm) @assert csize == 3 (etv, etc, etb, fc) = brickmesh( (0:4, 5:9), (false, true), boundary = ((1, 2), (3, 4), (5, 6)), part = crank + 1, numparts = csize, ) (etv, etc, etb, fc) = partition(comm, etv, etc, etb, fc) if crank == 0 etv_expect = [ 1 2 7 6 11 2 3 8 7 12 6 7 12 11 16 7 8 13 12 17 ] @test etv == etv_expect @test etc[:, :, 1] == [0 1 0 1; 5 5 6 6] @test etc[:, :, 2] == [1 2 1 2; 5 5 6 6] @test etc[:, :, 3] == [1 2 1 2; 6 6 7 7] @test etc[:, :, 4] == [0 1 0 1; 6 6 7 7] @test etc[:, :, 5] == [0 1 0 1; 7 7 8 8] etb_expect = [ 1 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ] @test etb == etb_expect fc_expect = Array{Int64, 1}[] @test fc == fc_expect elseif crank == 1 etv_expect = [ 16 17 12 13 18 17 18 13 14 19 21 22 17 18 23 22 23 18 19 24 ] @test etv == etv_expect @test etc[:, :, 1] == [0 1 0 1; 8 8 9 9] @test etc[:, :, 2] == [1 2 1 2; 8 8 9 9] @test etc[:, :, 3] == [1 2 1 2; 7 7 8 8] @test etc[:, :, 4] == [2 3 2 3; 7 7 8 8] @test etc[:, :, 5] == [2 3 2 3; 8 8 9 9] etb_expect = [ 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ] @test etb == etb_expect fc_expect = Array{Int64, 1}[[1, 4, 1, 2], [2, 4, 2, 3], [5, 4, 3, 4]] @test fc == fc_expect elseif crank == 2 etv_expect = [ 19 14 9 8 3 4 20 15 10 9 4 5 24 19 14 13 8 9 25 20 15 14 9 10 ] @test etv == etv_expect @test etc[:, :, 1] == [3 4 3 4; 8 8 9 9] @test etc[:, :, 2] == [3 4 3 4; 7 7 8 8] @test etc[:, :, 3] == [3 4 3 4; 6 6 7 7] @test etc[:, :, 4] == [2 3 2 3; 6 6 7 7] @test etc[:, :, 5] == [2 3 2 3; 5 5 6 6] @test etc[:, :, 6] == [3 4 3 4; 5 5 6 6] etb_expect = [ 0 0 0 0 0 0 2 2 2 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 ] @test etb == etb_expect fc_expect = Array{Int64, 1}[[1, 4, 4, 5]] @test fc == fc_expect end end main() ================================================ FILE: test/Numerics/Mesh/mpi_sortcolumns.jl ================================================ using Random using Test using MPI using ClimateMachine.Mesh.BrickMesh function main() MPI.Init() comm = MPI.COMM_WORLD rank = MPI.Comm_rank(comm) Random.seed!(1234) d = 4 A = rand(1:10, d, 3rank) B = BrickMesh.parallelsortcolumns(comm, A, rev = true) root = 0 Acounts = MPI.Gather(Cint(length(A)), root, comm) A_all = MPI.Gatherv!( A, MPI.Comm_rank(comm) == root ? VBuffer(similar(A, sum(Acounts)), Acounts) : nothing, root, comm, ) Bcounts = MPI.Gather(Cint(length(B)), root, comm) B_all = MPI.Gatherv!( B, MPI.Comm_rank(comm) == root ? VBuffer(similar(B, sum(Bcounts)), Bcounts) : nothing, root, comm, ) if MPI.Comm_rank(comm) == root A_all = reshape(A_all, d, div(length(A_all), d)) B_all = reshape(B_all, d, div(length(B_all), d)) A_all = sortslices(A_all, dims = 2, rev = true) @test A_all == B_all end end main() ================================================ FILE: test/Numerics/Mesh/topology.jl ================================================ using Test using ClimateMachine.Mesh.Topologies using Combinatorics, MPI MPI.Initialized() || MPI.Init() @testset "Equiangular cubed_sphere_warp tests" begin import ClimateMachine.Mesh.Topologies: equiangular_cubed_sphere_warp # Create function alias for shorter formatting eacsw = equiangular_cubed_sphere_warp @testset "check radius" begin @test hypot(eacsw(3.0, -2.2, 1.3)...) ≈ 3.0 rtol = eps() @test hypot(eacsw(-3.0, -2.2, 1.3)...) ≈ 3.0 rtol = eps() @test hypot(eacsw(1.1, -2.2, 3.0)...) ≈ 3.0 rtol = eps() @test hypot(eacsw(1.1, -2.2, -3.0)...) ≈ 3.0 rtol = eps() @test hypot(eacsw(1.1, 3.0, 0.0)...) ≈ 3.0 rtol = eps() @test hypot(eacsw(1.1, -3.0, 0.0)...) ≈ 3.0 rtol = eps() end @testset "check sign" begin @test sign.(eacsw(3.0, -2.2, 1.3)) == sign.((3.0, -2.2, 1.3)) @test sign.(eacsw(-3.0, -2.2, 1.3)) == sign.((-3.0, -2.2, 1.3)) @test sign.(eacsw(1.1, -2.2, 3.0)) == sign.((1.1, -2.2, 3.0)) @test sign.(eacsw(1.1, -2.2, -3.0)) == sign.((1.1, -2.2, -3.0)) @test sign.(eacsw(1.1, 3.0, 0.0)) == sign.((1.1, 3.0, 0.0)) @test sign.(eacsw(1.1, -3.0, 0.0)) == sign.((1.1, -3.0, 0.0)) end @testset "check continuity" begin for (u, v) in zip( permutations([3.0, 2.999999999, 1.3]), permutations([2.999999999, 3.0, 1.3]), ) @test all(eacsw(u...) .≈ eacsw(v...)) end for (u, v) in zip( permutations([3.0, -2.999999999, 1.3]), permutations([2.999999999, -3.0, 1.3]), ) @test all(eacsw(u...) .≈ eacsw(v...)) end for (u, v) in zip( permutations([-3.0, 2.999999999, 1.3]), permutations([-2.999999999, 3.0, 1.3]), ) @test all(eacsw(u...) .≈ eacsw(v...)) end for (u, v) in zip( permutations([-3.0, -2.999999999, 1.3]), permutations([-2.999999999, -3.0, 1.3]), ) @test all(eacsw(u...) .≈ eacsw(v...)) end end end @testset "Equiangular cubed_sphere_unwarp tests" begin import ClimateMachine.Mesh.Topologies: cubed_sphere_warp, equiangular_cubed_sphere_unwarp # Create function aliases for shorter formatting eacsw = equiangular_cubed_sphere_warp eacsu = equiangular_cubed_sphere_unwarp for u in permutations([3.0, 2.999999999, 1.3]) @test all(eacsu(eacsw(u...)...) .≈ u) end for u in permutations([3.0, -2.999999999, 1.3]) @test all(eacsu(eacsw(u...)...) .≈ u) end for u in permutations([-3.0, 2.999999999, 1.3]) @test all(eacsu(eacsw(u...)...) .≈ u) end for u in permutations([-3.0, -2.999999999, 1.3]) @test all(eacsu(eacsw(u...)...) .≈ u) end end @testset "Equidistant cubed_sphere_warp tests" begin import ClimateMachine.Mesh.Topologies: equidistant_cubed_sphere_warp # Create function alias for shorter formatting edcsw = equidistant_cubed_sphere_warp @testset "check radius" begin @test hypot(edcsw(3.0, -2.2, 1.3)...) ≈ 3.0 rtol = eps() @test hypot(edcsw(-3.0, -2.2, 1.3)...) ≈ 3.0 rtol = eps() @test hypot(edcsw(1.1, -2.2, 3.0)...) ≈ 3.0 rtol = eps() @test hypot(edcsw(1.1, -2.2, -3.0)...) ≈ 3.0 rtol = eps() @test hypot(edcsw(1.1, 3.0, 0.0)...) ≈ 3.0 rtol = eps() @test hypot(edcsw(1.1, -3.0, 0.0)...) ≈ 3.0 rtol = eps() end @testset "check sign" begin @test sign.(edcsw(3.0, -2.2, 1.3)) == sign.((3.0, -2.2, 1.3)) @test sign.(edcsw(-3.0, -2.2, 1.3)) == sign.((-3.0, -2.2, 1.3)) @test sign.(edcsw(1.1, -2.2, 3.0)) == sign.((1.1, -2.2, 3.0)) @test sign.(edcsw(1.1, -2.2, -3.0)) == sign.((1.1, -2.2, -3.0)) @test sign.(edcsw(1.1, 3.0, 0.0)) == sign.((1.1, 3.0, 0.0)) @test sign.(edcsw(1.1, -3.0, 0.0)) == sign.((1.1, -3.0, 0.0)) end @testset "check continuity" begin for (u, v) in zip( permutations([3.0, 2.999999999, 1.3]), permutations([2.999999999, 3.0, 1.3]), ) @test all(edcsw(u...) .≈ edcsw(v...)) end for (u, v) in zip( permutations([3.0, -2.999999999, 1.3]), permutations([2.999999999, -3.0, 1.3]), ) @test all(edcsw(u...) .≈ edcsw(v...)) end for (u, v) in zip( permutations([-3.0, 2.999999999, 1.3]), permutations([-2.999999999, 3.0, 1.3]), ) @test all(edcsw(u...) .≈ edcsw(v...)) end for (u, v) in zip( permutations([-3.0, -2.999999999, 1.3]), permutations([-2.999999999, -3.0, 1.3]), ) @test all(edcsw(u...) .≈ edcsw(v...)) end end end @testset "Equidistant cubed_sphere_unwarp tests" begin import ClimateMachine.Mesh.Topologies: equidistant_cubed_sphere_warp, equidistant_cubed_sphere_unwarp # Create function aliases for shorter formatting edcsw = equidistant_cubed_sphere_warp edcsu = equidistant_cubed_sphere_unwarp for u in permutations([3.0, 2.999999999, 1.3]) @test all(edcsu(edcsw(u...)...) .≈ u) end for u in permutations([3.0, -2.999999999, 1.3]) @test all(edcsu(edcsw(u...)...) .≈ u) end for u in permutations([-3.0, 2.999999999, 1.3]) @test all(edcsu(edcsw(u...)...) .≈ u) end for u in permutations([-3.0, -2.999999999, 1.3]) @test all(edcsu(edcsw(u...)...) .≈ u) end end @testset "Conformal cubed_sphere_warp tests" begin import ClimateMachine.Mesh.Topologies: conformal_cubed_sphere_warp # Create function alias for shorter formatting ccsw = conformal_cubed_sphere_warp @testset "check radius" begin @test hypot(ccsw(3.0, -2.2, 1.3)...) ≈ 3.0 rtol = eps() @test hypot(ccsw(-3.0, -2.2, 1.3)...) ≈ 3.0 rtol = eps() @test hypot(ccsw(1.1, -2.2, 3.0)...) ≈ 3.0 rtol = eps() @test hypot(ccsw(1.1, -2.2, -3.0)...) ≈ 3.0 rtol = eps() @test hypot(ccsw(1.1, 3.0, 0.0)...) ≈ 3.0 rtol = eps() @test hypot(ccsw(1.1, -3.0, 0.0)...) ≈ 3.0 rtol = eps() end @testset "check sign" begin @test sign.(ccsw(3.0, -2.2, 1.3)) == sign.((3.0, -2.2, 1.3)) @test sign.(ccsw(-3.0, -2.2, 1.3)) == sign.((-3.0, -2.2, 1.3)) @test sign.(ccsw(1.1, -2.2, 3.0)) == sign.((1.1, -2.2, 3.0)) @test sign.(ccsw(1.1, -2.2, -3.0)) == sign.((1.1, -2.2, -3.0)) @test sign.(ccsw(1.1, 3.0, -2.2)) == sign.((1.1, 3.0, -2.2)) @test sign.(ccsw(1.1, -3.0, -2.2)) == sign.((1.1, -3.0, -2.2)) end @testset "check continuity" begin for (u, v) in zip( permutations([3.0, 2.999999999, 1.3]), permutations([2.999999999, 3.0, 1.3]), ) @test all(ccsw(u...) .≈ ccsw(v...)) end for (u, v) in zip( permutations([3.0, -2.999999999, 1.3]), permutations([2.999999999, -3.0, 1.3]), ) @test all(ccsw(u...) .≈ ccsw(v...)) end for (u, v) in zip( permutations([-3.0, 2.999999999, 1.3]), permutations([-2.999999999, 3.0, 1.3]), ) @test all(ccsw(u...) .≈ ccsw(v...)) end for (u, v) in zip( permutations([-3.0, -2.999999999, 1.3]), permutations([-2.999999999, -3.0, 1.3]), ) @test all(ccsw(u...) .≈ ccsw(v...)) end end end @testset "Conformal cubed_sphere_unwarp tests" begin import ClimateMachine.Mesh.Topologies: conformal_cubed_sphere_warp, conformal_cubed_sphere_unwarp # Create function aliases for shorter formatting ccsw = conformal_cubed_sphere_warp ccsu = conformal_cubed_sphere_unwarp for u in permutations([3.0, 2.999999999, 1.3]) @test all(ccsu(ccsw(u...)...) .≈ u) end for u in permutations([3.0, -2.999999999, 1.3]) @test all(ccsu(ccsw(u...)...) .≈ u) end for u in permutations([-3.0, 2.999999999, 1.3]) @test all(ccsu(ccsw(u...)...) .≈ u) end for u in permutations([-3.0, -2.999999999, 1.3]) @test all(ccsu(ccsw(u...)...) .≈ u) end end @testset "grid1d" begin g = grid1d(0, 10, nelem = 10) @test eltype(g) == Float64 @test length(g) == 11 @test g[1] == 0 @test g[end] == 10 g = grid1d(10.0f0, 20.0f0, elemsize = 0.1) @test eltype(g) == Float32 @test length(g) == 101 @test g[1] == 10 @test g[end] == 20 g = grid1d(10.0f0, 20.0f0, InteriorStretching(0), elemsize = 0.1) @test eltype(g) == Float32 @test length(g) == 101 @test g[1] == 10 @test g[end] == 20 g = grid1d( 10.0f0, 20.0f0, SingleExponentialStretching(2.5f0), elemsize = 0.1, ) @test eltype(g) == Float32 @test length(g) == 101 @test g[1] == 10 @test g[end] == 20 end @testset "BrickTopology tests" begin let comm = MPI.COMM_SELF elemrange = (0:10,) periodicity = (true,) topology = BrickTopology( comm, elemrange, periodicity = periodicity, connectivity = :face, ) nelem = length(elemrange[1]) - 1 for e in 1:nelem @test topology.elemtocoord[:, :, e] == [e - 1 e] end @test topology.elemtoelem == [nelem collect(1:(nelem - 1))'; collect(2:nelem)' 1] @test topology.elemtoface == repeat(2:-1:1, outer = (1, nelem)) @test topology.elemtoordr == ones(Int, size(topology.elemtoordr)) @test topology.elemtobndy == zeros(Int, size(topology.elemtoordr)) @test topology.elems == 1:nelem @test topology.realelems == 1:nelem @test topology.ghostelems == nelem .+ (1:0) @test length(topology.sendelems) == 0 @test length(topology.exteriorelems) == 0 @test collect(topology.realelems) == topology.interiorelems @test topology.nabrtorank == Int[] @test topology.nabrtorecv == UnitRange{Int}[] @test topology.nabrtosend == UnitRange{Int}[] end let comm = MPI.COMM_SELF topology = BrickTopology( comm, (0:4, 5:9), periodicity = (false, true), connectivity = :face, ) nelem = 16 @test topology.elemtocoord[:, :, 1] == [0 1 0 1; 5 5 6 6] @test topology.elemtocoord[:, :, 2] == [1 2 1 2; 5 5 6 6] @test topology.elemtocoord[:, :, 3] == [1 2 1 2; 6 6 7 7] @test topology.elemtocoord[:, :, 4] == [0 1 0 1; 6 6 7 7] @test topology.elemtocoord[:, :, 5] == [0 1 0 1; 7 7 8 8] @test topology.elemtocoord[:, :, 6] == [0 1 0 1; 8 8 9 9] @test topology.elemtocoord[:, :, 7] == [1 2 1 2; 8 8 9 9] @test topology.elemtocoord[:, :, 8] == [1 2 1 2; 7 7 8 8] @test topology.elemtocoord[:, :, 9] == [2 3 2 3; 7 7 8 8] @test topology.elemtocoord[:, :, 10] == [2 3 2 3; 8 8 9 9] @test topology.elemtocoord[:, :, 11] == [3 4 3 4; 8 8 9 9] @test topology.elemtocoord[:, :, 12] == [3 4 3 4; 7 7 8 8] @test topology.elemtocoord[:, :, 13] == [3 4 3 4; 6 6 7 7] @test topology.elemtocoord[:, :, 14] == [2 3 2 3; 6 6 7 7] @test topology.elemtocoord[:, :, 15] == [2 3 2 3; 5 5 6 6] @test topology.elemtocoord[:, :, 16] == [3 4 3 4; 5 5 6 6] @test topology.elemtoelem == [ 1 1 4 2 3 4 6 5 8 7 10 9 14 3 2 15 2 15 14 3 8 7 10 9 12 11 5 6 7 13 16 8 6 7 2 1 4 5 8 3 14 9 12 13 16 15 10 11 4 3 8 5 6 1 2 7 10 15 16 11 12 9 14 13 ] @test topology.elemtoface == [ 1 2 2 1 1 1 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 2 2 2 1 1 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 ] @test topology.elemtoordr == ones(Int, size(topology.elemtoordr)) @test topology.elemtoelem[topology.elemtobndy .== 1] == 1:8 @test topology.elems == 1:nelem @test topology.realelems == 1:nelem @test topology.ghostelems == nelem .+ (1:0) @test length(topology.sendelems) == 0 @test length(topology.exteriorelems) == 0 @test collect(topology.realelems) == topology.interiorelems @test topology.nabrtorank == Int[] @test topology.nabrtorecv == UnitRange{Int}[] @test topology.nabrtosend == UnitRange{Int}[] end let comm = MPI.COMM_SELF for px in (true, false) topology = BrickTopology( comm, (0:10,), periodicity = (px,), connectivity = :face, ) @test Topologies.hasboundary(topology) == !px if px @test topology.bndytoelem == () @test topology.bndytoface == () else @test topology.bndytoelem == ([1, 10],) @test topology.bndytoface == ([1, 2],) end end for py in (true, false), px in (true, false) topology = BrickTopology( comm, (0:10, 0:3), periodicity = (px, py), connectivity = :face, ) @test Topologies.hasboundary(topology) == !(px && py) if px && py @test topology.bndytoelem == () @test topology.bndytoface == () else @test sort(unique(topology.bndytoface[1])) == vcat( px ? Int64[] : Int64[1, 2], py ? Int64[] : Int64[3, 4], ) end end for pz in (true, false), py in (true, false), px in (true, false) topology = BrickTopology( comm, (0:10, 0:3, -1:3), periodicity = (px, py, pz), connectivity = :face, ) @test Topologies.hasboundary(topology) == !(px && py && pz) if px && py && pz @test topology.bndytoelem == () @test topology.bndytoface == () else @test sort(unique(topology.bndytoface[1])) == vcat( px ? Int64[] : Int64[1, 2], py ? Int64[] : Int64[3, 4], pz ? Int64[] : Int64[5, 6], ) end end end end @testset "StackedBrickTopology tests" begin let comm = MPI.COMM_SELF topology = StackedBrickTopology( comm, (2:5, 4:6), periodicity = (false, true), boundary = ((1, 2), (3, 4)), connectivity = :face, ) nelem = 6 @test topology.elemtocoord[:, :, 1] == [2 3 2 3; 4 4 5 5] @test topology.elemtocoord[:, :, 2] == [2 3 2 3; 5 5 6 6] @test topology.elemtocoord[:, :, 3] == [3 4 3 4; 4 4 5 5] @test topology.elemtocoord[:, :, 4] == [3 4 3 4; 5 5 6 6] @test topology.elemtocoord[:, :, 5] == [4 5 4 5; 4 4 5 5] @test topology.elemtocoord[:, :, 6] == [4 5 4 5; 5 5 6 6] @test topology.elemtoelem == [ 1 2 1 2 3 4 3 4 5 6 1 2 2 1 4 3 6 5 2 1 4 3 6 5 ] @test topology.elemtoface == [ 1 1 2 2 2 2 1 1 1 1 2 2 4 4 4 4 4 4 3 3 3 3 3 3 ] @test topology.elemtoordr == ones(Int, size(topology.elemtoordr)) @test topology.elemtobndy == [ 1 1 0 0 0 0 0 0 0 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 ] @test topology.elemtoelem[topology.elemtobndy .== 1] == 1:2 @test topology.elemtoelem[topology.elemtobndy .== 2] == 1:2 @test topology.elems == 1:nelem @test topology.realelems == 1:nelem @test topology.ghostelems == nelem .+ (1:0) @test length(topology.sendelems) == 0 @test length(topology.exteriorelems) == 0 @test collect(topology.realelems) == topology.interiorelems @test topology.nabrtorank == Int[] @test topology.nabrtorecv == UnitRange{Int}[] @test topology.nabrtosend == UnitRange{Int}[] @test topology.bndytoelem == ([1, 2], [5, 6]) @test topology.bndytoface == ([1, 1], [2, 2]) end let comm = MPI.COMM_SELF for py in (true, false), px in (true, false) topology = StackedBrickTopology( comm, (0:10, 0:3), periodicity = (px, py), connectivity = :face, ) @test Topologies.hasboundary(topology) == !(px && py) if px && py @test topology.bndytoelem == () @test topology.bndytoface == () else @test sort(unique(topology.bndytoface[1])) == vcat( px ? Int64[] : Int64[1, 2], py ? Int64[] : Int64[3, 4], ) end end for pz in (true, false), py in (true, false), px in (true, false) topology = StackedBrickTopology( comm, (0:10, 0:3, -1:3), periodicity = (px, py, pz), connectivity = :face, ) @test Topologies.hasboundary(topology) == !(px && py && pz) if px && py && pz @test topology.bndytoelem == () @test topology.bndytoface == () else @test sort(unique(topology.bndytoface[1])) == vcat( px ? Int64[] : Int64[1, 2], py ? Int64[] : Int64[3, 4], pz ? Int64[] : Int64[5, 6], ) end end end end @testset "StackedCubedSphereTopology tests" begin topology = StackedCubedSphereTopology( MPI.COMM_SELF, 3, 1.0:3.0, boundary = (2, 1), connectivity = :face, ) @test Topologies.hasboundary(topology) @test map(unique, topology.bndytoface) == ([6], [5]) end @testset "CubedShellTopology tests" begin topology = CubedShellTopology(MPI.COMM_SELF, 3, Float64, connectivity = :face) @test !Topologies.hasboundary(topology) end ================================================ FILE: test/Numerics/ODESolvers/callbacks.jl ================================================ using MPI using Test using ClimateMachine.ODESolvers: AbstractODESolver using ClimateMachine.GenericCallbacks mutable struct PseudoSolver <: AbstractODESolver t::Float64 steps::Int PseudoSolver() = new(42.0, 0) end gettime(ps::PseudoSolver) = ps.t mutable struct MyCallback initialized::Bool calls::Int finished::Bool end MyCallback() = MyCallback(false, 0, false) GenericCallbacks.init!(cb::MyCallback, _...) = cb.initialized = true GenericCallbacks.call!(cb::MyCallback, _...) = (cb.calls += 1; nothing) GenericCallbacks.fini!(cb::MyCallback, _...) = cb.finished = true MPI.Init() mpicomm = MPI.COMM_WORLD ps = PseudoSolver() wtcb = GenericCallbacks.EveryXWallTimeSeconds(MyCallback(), 2, mpicomm) stcb = GenericCallbacks.EveryXSimulationTime(MyCallback(), 0.5) sscb = GenericCallbacks.EveryXSimulationSteps(MyCallback(), 10) fn_calls = 0 fncb = () -> (global fn_calls += 1) wtfn_calls = 0 wtfncb = GenericCallbacks.EveryXWallTimeSeconds( AtInit(() -> global wtfn_calls += 1), 2, mpicomm, ) stfn_calls = 0 stfncb = GenericCallbacks.EveryXSimulationTime( AtInitAndFini(() -> global stfn_calls += 1), 0.5, ) ssfn_calls = 0 ssfncb = GenericCallbacks.EveryXSimulationSteps( AtInit(() -> global ssfn_calls += 1), 5, ) callbacks = ((wtcb, stcb, sscb), fncb, (wtfncb, stfncb, ssfncb)) @testset "GenericCallbacks" begin GenericCallbacks.init!(callbacks, ps, nothing, nothing, ps.t) @test wtcb.callback.initialized @test stcb.callback.initialized @test sscb.callback.initialized @test wtcb.callback.calls == 0 @test stcb.callback.calls == 0 @test sscb.callback.calls == 0 @test fn_calls == 0 @test wtfn_calls >= 1 @test stfn_calls == 1 @test ssfn_calls == 1 @test GenericCallbacks.call!(callbacks, ps, nothing, nothing, ps.t) in (0, nothing) @test wtcb.callback.calls >= 0 @test stcb.callback.calls == 0 @test sscb.callback.calls == 0 @test fn_calls == 1 @test wtfn_calls >= 1 @test stfn_calls == 1 @test ssfn_calls == 1 ps.t += 0.5 @test GenericCallbacks.call!(callbacks, ps, nothing, nothing, ps.t) in (0, nothing) @test wtcb.callback.calls >= 0 @test stcb.callback.calls == 1 @test sscb.callback.calls == 0 @test fn_calls == 2 @test wtfn_calls >= 1 @test stfn_calls == 2 @test ssfn_calls == 1 sleep(max((2.1 + wtcb.lastcbtime_ns / 1e9 - time_ns() / 1e9), 0.0)) ps.t += 0.5 @test GenericCallbacks.call!(callbacks, ps, nothing, nothing, ps.t) in (0, nothing) @test wtcb.callback.calls >= 1 @test stcb.callback.calls == 2 @test sscb.callback.calls == 0 @test fn_calls == 3 @test wtfn_calls >= 2 @test stfn_calls == 3 @test ssfn_calls == 1 for i in 1:7 @test GenericCallbacks.call!(callbacks, ps, nothing, nothing, ps.t) in (0, nothing) end @test wtcb.callback.calls >= 1 @test stcb.callback.calls == 2 @test sscb.callback.calls == 1 @test fn_calls == 10 @test wtfn_calls >= 2 @test stfn_calls == 3 @test ssfn_calls == 3 GenericCallbacks.fini!(callbacks, ps, nothing, nothing, ps.t) @test wtcb.callback.finished @test stcb.callback.finished @test sscb.callback.finished @test wtfn_calls >= 2 @test stfn_calls == 4 @test ssfn_calls == 3 end ================================================ FILE: test/Numerics/ODESolvers/ode_tests_basic.jl ================================================ using Test using ClimateMachine using LinearAlgebra import OrdinaryDiffEq: SSPRK73 include("ode_tests_common.jl") ClimateMachine.init() const ArrayType = ClimateMachine.array_type() a = 100 b = 1 c = 1 / 100 Δ = sqrt(4 * a * c - b^2) α1, α2 = 1 / 4, 3 / 4 β1, β2, β3 = 1 / 3, 3 / 6, 1 / 6 function rhs!(dQ, Q, ::Nothing, t; increment = false) if increment @. dQ += $cos(t) * (a + b * Q + c * Q^2) else @. dQ = $cos(t) * (a + b * Q + c * Q^2) end end function rhs_linear!(dQ, Q, ::Nothing, t; increment = false) if increment @. dQ += $cos(t) * b * Q else @. dQ = $cos(t) * b * Q end end struct ODETestBasicLinBE <: AbstractBackwardEulerSolver end ODESolvers.Δt_is_adjustable(::ODETestBasicLinBE) = true (::ODETestBasicLinBE)(Q, Qhat, α, p, t) = @. Q = Qhat / (1 - α * $cos(t) * b) function rhs_nonlinear!(dQ, Q, ::Nothing, t; increment = false) if increment @. dQ += $cos(t) * (a + c * Q^2) else @. dQ = $cos(t) * (a + c * Q^2) end end function rhs_fast!(dQ, Q, ::Any, t; increment = false) if increment @. dQ += α1 * $cos(t) * (a + b * Q + c * Q^2) else @. dQ = α1 * $cos(t) * (a + b * Q + c * Q^2) end end function rhs_fast_linear!(dQ, Q, ::Nothing, t; increment = false) if increment @. dQ += α1 * $cos(t) * Q else @. dQ = α1 * $cos(t) * Q end end function rhs_slow!(dQ, Q, ::Nothing, t; increment = false) if increment @. dQ += α2 * $cos(t) * (a + b * Q + c * Q^2) else @. dQ = α2 * $cos(t) * (a + b * Q + c * Q^2) end end function rhs1!(dQ, Q, ::Any, t; increment = false) if increment @. dQ += β1 * $cos(t) * (a + b * Q + c * Q^2) else @. dQ = β1 * $cos(t) * (a + b * Q + c * Q^2) end end function rhs2!(dQ, Q, ::Any, t; increment = false) if increment @. dQ += β2 * $cos(t) * (a + b * Q + c * Q^2) else @. dQ = β2 * $cos(t) * (a + b * Q + c * Q^2) end end function rhs3!(dQ, Q, ::Nothing, t; increment = false) if increment @. dQ += β3 * $cos(t) * (a + b * Q + c * Q^2) else @. dQ = β3 * $cos(t) * (a + b * Q + c * Q^2) end end function exactsolution(t, q0, t0) k = @. 2 * atan((2 * c * q0 + b) / Δ) / Δ - sin(t0) solution = @. (Δ * tan((k + sin(t)) * Δ / 2) - b) / (2 * c) return ArrayType(solution) end q0 = ArrayType === Array ? [1.0] : range(-1.0, 1.0, length = 303) t0 = 0.1 finaltime = 1.2 Qinit = exactsolution(t0, q0, t0) Q = similar(Qinit) Qexact = exactsolution(finaltime, q0, t0) @testset "Convergence/limited" begin @testset "Explicit methods" begin dts = [2.0^(-k) for k in 3:4] errors = similar(dts) for (method, expected_order) in explicit_methods for (n, dt) in enumerate(dts) Q .= Qinit solver = method(rhs!, Q; dt = dt, t0 = t0) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test isapprox(rates[end], expected_order; atol = 0.7) end end @testset "IMEX methods (LowStorageVariant)" begin dts = [2.0^(-k) for k in 4:5] errors = similar(dts) for (method, order) in imex_methods_lowstorage_compatible for split_explicit_implicit in (false, true) for (n, dt) in enumerate(dts) Q .= Qinit rhs_arg! = split_explicit_implicit ? rhs_nonlinear! : rhs! solver = method( rhs_arg!, rhs_linear!, LinearBackwardEulerSolver( DivideLinearSolver(); isadjustable = true, ), Q; dt = dt, t0 = t0, split_explicit_implicit = split_explicit_implicit, variant = LowStorageVariant(), ) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) if split_explicit_implicit if method === ARK1ForwardBackwardEuler expected_order = 1 else expected_order = 2 end else expected_order = order end @test isapprox(rates[end], expected_order; rtol = 0.3) end end end @testset "IMEX methods (NaiveVariant)" begin dts = [2.0^(-k) for k in 4:5] errors = similar(dts) for (method, expected_order) in imex_methods_naivestorage_compatible for split_explicit_implicit in (false, true) for (n, dt) in enumerate(dts) Q .= Qinit rhs_arg! = split_explicit_implicit ? rhs_nonlinear! : rhs! solver = method( rhs_arg!, rhs_linear!, LinearBackwardEulerSolver( DivideLinearSolver(); isadjustable = true, ), Q; dt = dt, t0 = t0, split_explicit_implicit = split_explicit_implicit, variant = NaiveVariant(), ) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test isapprox(rates[end], expected_order; rtol = 0.3) end end end @testset "IMEX methods with direct solver" begin dts = [2.0^(-k) for k in 4:5] errors = similar(dts) for (method, order) in imex_methods_naivestorage_compatible for split_explicit_implicit in (false, true) for (n, dt) in enumerate(dts) Q .= Qinit rhs_arg! = split_explicit_implicit ? rhs_nonlinear! : rhs! solver = method( rhs_arg!, rhs_linear!, ODETestBasicLinBE(), Q; dt = dt, t0 = t0, split_explicit_implicit = split_explicit_implicit, variant = NaiveVariant(), ) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) expected_order = order @test isapprox(rates[end], expected_order; rtol = 0.3) end end end @testset "MRRK methods with 2 rates" begin dts = [2.0^(-k) for k in 3:4] errors = similar(dts) for (slow_method, slow_expected_order) in slow_mrrk_methods for (fast_method, fast_expected_order) in fast_mrrk_methods for nsubsteps in (1, 3) for (n, dt) in enumerate(dts) Q .= Qinit solver = MultirateRungeKutta( ( slow_method(rhs_slow!, Q; dt = dt), fast_method(rhs_fast!, Q; dt = dt / nsubsteps), ); t0 = t0, ) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) min_order = min(slow_expected_order, fast_expected_order) max_order = max(slow_expected_order, fast_expected_order) @test ( isapprox(rates[end], min_order; atol = 0.5) || isapprox(rates[end], max_order; atol = 0.5) || min_order <= rates[end] <= max_order ) end end end end @testset "MRRK methods with IMEX" begin dts = [2.0^(-k) for k in 3:4] errors = similar(dts) for (slow_method, slow_expected_order) in slow_mrrk_methods for (fast_method, fast_expected_order) in imex_methods_lowstorage_compatible for (n, dt) in enumerate(dts) Q .= Qinit solver = MultirateRungeKutta( ( slow_method(rhs_slow!, Q; dt = dt), fast_method( rhs_fast!, rhs_fast_linear!, LinearBackwardEulerSolver( DivideLinearSolver(); isadjustable = true, ), Q; dt = dt, split_explicit_implicit = false, variant = LowStorageVariant(), ), ), t0 = t0, ) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) min_order = min(slow_expected_order, fast_expected_order) max_order = max(slow_expected_order, fast_expected_order) @test ( isapprox(rates[end], min_order; atol = 0.5) || isapprox(rates[end], max_order; atol = 0.5) || min_order <= rates[end] <= max_order ) end end end @testset "MRRK methods with 3 rates" begin dts = [2.0^(-k) for k in 3:4] errors = similar(dts) for (rate3_method, rate3_order) in slow_mrrk_methods for (rate2_method, rate2_order) in slow_mrrk_methods for (rate1_method, rate1_order) in fast_mrrk_methods for nsubsteps in (1, 2) for (n, dt) in enumerate(dts) Q .= Qinit solver = MultirateRungeKutta( ( rate3_method(rhs3!, Q, dt = dt), rate2_method(rhs2!, Q, dt = dt / nsubsteps), rate1_method( rhs1!, Q; dt = dt / nsubsteps^2, ), ); dt = dt, t0 = t0, ) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test 3.8 <= rates[end] end end end end end @testset "MIS methods" begin dts = [2.0^(-k) for k in 3:4] errors = similar(dts) for (method, expected_order) in mis_methods for fast_method in (LSRK54CarpenterKennedy,) for (n, dt) in enumerate(dts) Q .= Qinit solver = method( rhs_slow!, rhs_fast!, fast_method, 4, Q; dt = dt, t0 = t0, ) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test isapprox(rates[end], expected_order; atol = 0.6) end end end @testset "MRI GARK methods with 2 rates" begin dts = [2.0^(-k) for k in 3:4] errors = similar(dts) for (slow_method, expected_order) in mrigark_erk_methods for (fast_method, _) in fast_mrigark_methods for nsubsteps in (1, 3) for (n, dt) in enumerate(dts) dt /= 4 # Need a smaller dt to get convergence rate Q .= Qinit fastsolver = fast_method(rhs_fast!, Q; dt = dt / nsubsteps) solver = slow_method( rhs_slow!, fastsolver, Q; dt = dt, t0 = t0, ) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test isapprox(rates[end], expected_order; atol = 0.5) end end end end @testset "MRI GARK methods with 3 rates" begin dts = [2.0^(-k) for k in 3:4] errors = similar(dts) for (rate3_method, rate3_order) in mrigark_erk_methods for (rate2_method, rate2_order) in mrigark_erk_methods for (rate1_method, _) in fast_mrigark_methods for nsubsteps in (1, 2) for (n, dt) in enumerate(dts) dt /= 4 # Need a smaller dt to get convergence rate Q .= Qinit solver1 = rate1_method(rhs1!, Q; dt = dt / nsubsteps^2) solver2 = rate2_method( rhs2!, solver1, Q; dt = dt / nsubsteps, ) solver3 = rate3_method( rhs3!, solver2, Q; dt = dt, t0 = t0, ) solve!(Q, solver3; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) expected_order = min(rate3_order, rate2_order) @test isapprox(rates[end], expected_order; atol = 0.5) end end end end end @testset "MRI GARK implicit methods with 2 rates and linear solver" begin dts = [2.0^(-k) for k in 3:4] errors = similar(dts) for (slow_method, expected_order) in mrigark_irk_methods for (fast_method, _) in fast_mrigark_methods for nsubsteps in (1, 3) for (n, dt) in enumerate(dts) dt /= 4 Q .= Qinit nsteps = ceil(Int, (finaltime - t0) / dt) dt = (finaltime - t0) / nsteps fastsolver = fast_method(rhs_nonlinear!, Q; dt = dt / nsubsteps) solver = slow_method( rhs_linear!, LinearBackwardEulerSolver( DivideLinearSolver(); isadjustable = true, ), fastsolver, Q; dt = dt, t0 = t0, ) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test isapprox(rates[end], expected_order; atol = 0.5) end end end end @testset "MRI GARK implicit methods with 2 rates and custom solver" begin dts = [2.0^(-k) for k in 3:4] errors = similar(dts) for (slow_method, expected_order) in mrigark_irk_methods for (fast_method, _) in fast_mrigark_methods for nsubsteps in (1, 3) for (n, dt) in enumerate(dts) dt /= 4 Q .= Qinit nsteps = ceil(Int, (finaltime - t0) / dt) dt = (finaltime - t0) / nsteps fastsolver = fast_method(rhs_nonlinear!, Q; dt = dt / nsubsteps) solver = slow_method( rhs_linear!, ODETestBasicLinBE(), fastsolver, Q; dt = dt, t0 = t0, ) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test isapprox(rates[end], expected_order; atol = 0.5) end end end end end @testset "Explicit methods composition of solve!" begin halftime = 1.0 finaltime = 2.0 dt = 0.075 for (method, _) in explicit_methods Q .= Qinit solver1 = method(rhs!, Q; dt = dt, t0 = t0) solve!(Q, solver1; timeend = finaltime) Q2 = similar(Q) Q2 .= Qinit solver2 = method(rhs!, Q2; dt = dt, t0 = t0) solve!(Q2, solver2; timeend = halftime, adjustfinalstep = false) solve!(Q2, solver2; timeend = finaltime) @test Q2 == Q end end @testset "DiffEq methods" begin alg = SSPRK73() expected_order = 3 dts = [2.0^(-k) for k in 3:4] errors = similar(dts) for (n, dt) in enumerate(dts) Q .= Qinit solver = DiffEqJLSolver(rhs!, alg, Q; dt = dt, t0 = t0) solve!(Q, solver; timeend = finaltime) errors[n] = norm(Q - Qexact) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test isapprox(rates[end], expected_order; atol = 0.7) end ================================================ FILE: test/Numerics/ODESolvers/ode_tests_common.jl ================================================ using ClimateMachine.ODESolvers using ClimateMachine.SystemSolvers const slow_mrrk_methods = ((LSRK54CarpenterKennedy, 4), (LSRK144NiegemannDiehlBusch, 4)) const fast_mrrk_methods = ( (LSRK54CarpenterKennedy, 4), (LSRK144NiegemannDiehlBusch, 4), (SSPRK33ShuOsher, 3), (SSPRK34SpiteriRuuth, 3), ) const explicit_methods = ( (LSRK54CarpenterKennedy, 4), (LSRK144NiegemannDiehlBusch, 4), (LS3NRK44Classic, 4), (LS3NRK33Heuns, 3), (SSPRK22Heuns, 2), (SSPRK22Ralstons, 2), (SSPRK33ShuOsher, 3), (SSPRK34SpiteriRuuth, 3), (LSRKEulerMethod, 1), ) const imex_methods_lowstorage_compatible = ( # Low-storage variant methods have an assumption that the # explicit and implicit rhs/time-scaling coefficients (B/C vectors) # in the Butcher tables are the same. (ARK1ForwardBackwardEuler, 1), (ARK2ImplicitExplicitMidpoint, 2), (ARK2GiraldoKellyConstantinescu, 2), (ARK437L2SA1KennedyCarpenter, 4), (ARK548L2SA2KennedyCarpenter, 5), (DBM453VoglEtAl, 3), ) const imex_methods_naivestorage_compatible = ( imex_methods_lowstorage_compatible..., # Some methods can only be used with the `NaiveVariant` storage # scheme since, in general, ARK methods can have different time-scaling/rhs-scaling # coefficients (C/B vectors in the Butcher tables). For future reference, # any other ARK-type methods that have more general Butcher tables # (but with same number of stages) should be tested here: (Trap2LockWoodWeller, 2), ) const mis_methods = ((MIS2, 2), (MIS3C, 2), (MIS4, 3), (MIS4a, 3), (TVDMISA, 2), (TVDMISB, 2)) const mrigark_erk_methods = ((MRIGARKERK33aSandu, 3), (MRIGARKERK45aSandu, 4)) const mrigark_irk_methods = ( (MRIGARKESDIRK24LSA, 2), (MRIGARKESDIRK23LSA, 2), (MRIGARKIRK21aSandu, 2), (MRIGARKESDIRK34aSandu, 3), (MRIGARKESDIRK46aSandu, 4), ) const fast_mrigark_methods = ((LSRK54CarpenterKennedy, 4), (LSRK144NiegemannDiehlBusch, 4)) struct DivideLinearSolver <: AbstractSystemSolver end function SystemSolvers.prefactorize( linearoperator!, ::DivideLinearSolver, args..., ) linearoperator! end function SystemSolvers.linearsolve!( linearoperator!, preconditioner, ::DivideLinearSolver, Qtt, Qhat, args..., ) @. Qhat = 1 / Qhat linearoperator!(Qtt, Qhat, args...) @. Qtt = 1 / Qtt end ================================================ FILE: test/Numerics/ODESolvers/ode_tests_convergence.jl ================================================ using Test using ClimateMachine using StaticArrays using LinearAlgebra using KernelAbstractions using ClimateMachine.MPIStateArrays: array_device include("ode_tests_common.jl") ClimateMachine.init() const ArrayType = ClimateMachine.array_type() @testset "ODE Solvers" begin @testset "Convergence/extensive" begin @testset "1-rate ODE" begin function rhs!(dQ, Q, ::Nothing, time; increment) if increment dQ .+= Q * cos(time) else dQ .= Q * cos(time) end end exactsolution(q0, time) = q0 * exp(sin(time)) @testset "Explicit methods" begin finaltime = 20.0 dts = [2.0^(-k) for k in 6:7] errors = similar(dts) q0 = ArrayType === Array ? [1.0] : range(-1.0, 1.0, length = 303) for (method, expected_order) in explicit_methods for (n, dt) in enumerate(dts) Q = ArrayType(q0) solver = method(rhs!, Q; dt = dt, t0 = 0.0) solve!(Q, solver; timeend = finaltime) Q = Array(Q) errors[n] = maximum(@. abs(Q - exactsolution(q0, finaltime))) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test isapprox(rates[end], expected_order; atol = 0.17) end end end @testset "Two-rate ODE with a linear stiff part" begin c = 100.0 function rhs_full!(dQ, Q, ::Nothing, time; increment) if increment dQ .+= im * c * Q .+ exp(im * time) else dQ .= im * c * Q .+ exp(im * time) end end function rhs_nonlinear!(dQ, Q, ::Nothing, time; increment) if increment dQ .+= exp(im * time) else dQ .= exp(im * time) end end rhs_slow! = rhs_nonlinear! function rhs_linear!(dQ, Q, ::Nothing, time; increment) if increment dQ .+= im * c * Q else dQ .= im * c * Q end end rhs_fast! = rhs_linear! function exactsolution(q0, time) q0 * exp(im * c * time) + (exp(im * time) - exp(im * c * time)) / (im * (1 - c)) end @testset "IMEX methods (LowStorageVariant)" begin finaltime = pi / 2 dts = [2.0^(-k) for k in 13:14] errors = similar(dts) q0 = ArrayType <: Array ? [1.0] : range(-1.0, 1.0, length = 303) for (method, expected_order) in imex_methods_lowstorage_compatible for split_explicit_implicit in (false, true) for (n, dt) in enumerate(dts) Q = ArrayType{ComplexF64}(q0) rhs! = split_explicit_implicit ? rhs_nonlinear! : rhs_full! solver = method( rhs!, rhs_linear!, LinearBackwardEulerSolver( DivideLinearSolver(), isadjustable = true, ), Q; dt = dt, t0 = 0.0, split_explicit_implicit = split_explicit_implicit, variant = LowStorageVariant(), ) solve!(Q, solver; timeend = finaltime) Q = Array(Q) errors[n] = maximum(@. abs( Q - exactsolution(q0, finaltime), )) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test errors[1] < 2.0 @test isapprox(rates[end], expected_order; atol = 0.35) end end end @testset "IMEX methods (NaiveVariant)" begin finaltime = pi / 2 dts = [2.0^(-k) for k in 13:14] errors = similar(dts) q0 = ArrayType <: Array ? [1.0] : range(-1.0, 1.0, length = 303) for (method, expected_order) in imex_methods_naivestorage_compatible for split_explicit_implicit in (false, true) for (n, dt) in enumerate(dts) Q = ArrayType{ComplexF64}(q0) rhs! = split_explicit_implicit ? rhs_nonlinear! : rhs_full! solver = method( rhs!, rhs_linear!, LinearBackwardEulerSolver( DivideLinearSolver(), isadjustable = true, ), Q; dt = dt, t0 = 0.0, split_explicit_implicit = split_explicit_implicit, variant = NaiveVariant(), ) solve!(Q, solver; timeend = finaltime) Q = Array(Q) errors[n] = maximum(@. abs( Q - exactsolution(q0, finaltime), )) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test errors[1] < 2.0 @test isapprox(rates[end], expected_order; atol = 0.35) end end end @testset "MRRK methods (no substeps)" begin finaltime = pi / 2 dts = [2.0^(-k) for k in 10:11] errors = similar(dts) for (slow_method, slow_expected_order) in slow_mrrk_methods for (fast_method, fast_expected_order) in fast_mrrk_methods q0 = ArrayType === Array ? [1.0] : range(-1.0, 1.0, length = 303) for (n, dt) in enumerate(dts) Q = ArrayType{ComplexF64}(q0) solver = MultirateRungeKutta( ( slow_method(rhs_slow!, Q), fast_method(rhs_fast!, Q), ); dt = dt, t0 = 0.0, ) solve!(Q, solver; timeend = finaltime) Q = Array(Q) errors[n] = maximum(@. abs( Q - exactsolution(q0, finaltime), )) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) min_order = min(slow_expected_order, fast_expected_order) max_order = max(slow_expected_order, fast_expected_order) @test ( isapprox(rates[end], min_order; atol = 0.1) || isapprox(rates[end], max_order; atol = 0.1) || min_order <= rates[end] <= max_order ) end end end @testset "MRRK methods (with substeps)" begin finaltime = pi / 2 dts = [2.0^(-k) for k in 14:15] errors = similar(dts) for (slow_method, slow_expected_order) in slow_mrrk_methods for (fast_method, fast_expected_order) in fast_mrrk_methods q0 = ArrayType === Array ? [1.0] : range(-1.0, 1.0, length = 303) for (n, fast_dt) in enumerate(dts) slow_dt = c * fast_dt Q = ArrayType{ComplexF64}(q0) solver = MultirateRungeKutta(( slow_method(rhs_slow!, Q; dt = slow_dt), fast_method(rhs_fast!, Q; dt = fast_dt), )) solve!(Q, solver; timeend = finaltime) Q = Array(Q) errors[n] = maximum(@. abs( Q - exactsolution(q0, finaltime), )) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) min_order = min(slow_expected_order, fast_expected_order) max_order = max(slow_expected_order, fast_expected_order) @test ( isapprox(rates[end], min_order; atol = 0.1) || isapprox(rates[end], max_order; atol = 0.1) || min_order <= rates[end] <= max_order ) end end end @testset "MIS methods (with substeps)" begin finaltime = pi / 2 dts = [2.0^(-k) for k in 8:9] errors = similar(dts) for (mis_method, mis_expected_order) in mis_methods for fast_method in (LSRK54CarpenterKennedy,) q0 = ArrayType === Array ? [1.0] : range(-1.0, 1.0, length = 303) for (n, dt) in enumerate(dts) Q = ArrayType{ComplexF64}(q0) solver = mis_method( rhs_slow!, rhs_fast!, fast_method, 4, Q; dt = dt, t0 = 0.0, ) solve!(Q, solver; timeend = finaltime) Q = Array(Q) errors[n] = maximum(@. abs( Q - exactsolution(q0, finaltime), )) end rates = log2.(errors[1:(end - 1)] ./ errors[2:end]) @test isapprox( rates[end], mis_expected_order; atol = 0.1, ) end end end end #= Test problem (4.2) from [Roberts2018](@cite) Note: The actual rates are all over the place with this test and passing largely depends on final dt size =# @testset "2-rate ODE from Roberts2018" begin ω = 100 λf = -10 λs = -1 ξ = 1 // 10 α = 1 ηfs = ((1 - ξ) / α) * (λf - λs) ηsf = -ξ * α * (λf - λs) Ω = @SMatrix [ λf ηfs ηsf λs ] function rhs_fast!(dQ, Q, param, t; increment) @inbounds begin increment || (dQ .= 0) yf = Q[1] ys = Q[2] gf = (-3 + yf^2 - cos(ω * t)) / 2yf gs = (-2 + ys^2 - cos(t)) / 2ys dQ[1] += Ω[1, 1] * gf + Ω[1, 2] * gs - ω * sin(ω * t) / 2yf end end function rhs_slow!(dQ, Q, param, t; increment) @inbounds begin increment || (dQ .= 0) yf = Q[1] ys = Q[2] gf = (-3 + yf^2 - cos(ω * t)) / 2yf gs = (-2 + ys^2 - cos(t)) / 2ys dQ[2] += Ω[2, 1] * gf + Ω[2, 2] * gs - sin(t) / 2ys end end exactsolution(t) = [sqrt(3 + cos(ω * t)); sqrt(2 + cos(t))] @testset "MRRK methods (no substeps)" begin finaltime = 5π / 2 dts = [2.0^(-k) for k in 7:8] error = similar(dts) for (slow_method, slow_expected_order) in slow_mrrk_methods for (fast_method, fast_expected_order) in fast_mrrk_methods for (n, dt) in enumerate(dts) Q = exactsolution(0) solver = MultirateRungeKutta( ( slow_method(rhs_slow!, Q), fast_method(rhs_fast!, Q), ); dt = dt, t0 = 0.0, ) solve!(Q, solver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) min_order = min(slow_expected_order, fast_expected_order) max_order = max(slow_expected_order, fast_expected_order) @test ( isapprox(rate[end], min_order; atol = 0.3) || isapprox(rate[end], max_order; atol = 0.3) || min_order <= rate[end] <= max_order ) end end end @testset "MRRK methods (with substeps)" begin finaltime = 5π / 2 dts = [2.0^(-k) for k in 8:9] error = similar(dts) for (slow_method, slow_expected_order) in slow_mrrk_methods for (fast_method, fast_expected_order) in fast_mrrk_methods for (n, fast_dt) in enumerate(dts) Q = exactsolution(0) slow_dt = ω * fast_dt solver = MultirateRungeKutta(( slow_method(rhs_slow!, Q; dt = slow_dt), fast_method(rhs_fast!, Q; dt = fast_dt), )) solve!(Q, solver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) min_order = min(slow_expected_order, fast_expected_order) max_order = max(slow_expected_order, fast_expected_order) @test ( isapprox(rate[end], min_order; atol = 0.3) || isapprox(rate[end], max_order; atol = 0.3) || min_order <= rate[end] <= max_order ) end end end @testset "MRRK methods with IMEX fast solver" begin function rhs_zero!(dQ, Q, param, t; increment) if !increment dQ .= 0 end end finaltime = 5π / 2 dts = [2.0^(-k) for k in 8:9] error = similar(dts) for (slow_method, slow_expected_order) in slow_mrrk_methods for (fast_method, fast_expected_order) in imex_methods_lowstorage_compatible for (n, fast_dt) in enumerate(dts) Q = exactsolution(0) slow_dt = ω * fast_dt solver = MultirateRungeKutta(( slow_method(rhs_slow!, Q; dt = slow_dt), fast_method( rhs_fast!, rhs_zero!, LinearBackwardEulerSolver( DivideLinearSolver(), isadjustable = true, ), Q; dt = fast_dt, split_explicit_implicit = false, variant = LowStorageVariant(), ), )) solve!(Q, solver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) min_order = min(slow_expected_order, fast_expected_order) max_order = max(slow_expected_order, fast_expected_order) atol = fast_method == ARK2GiraldoKellyConstantinescu ? 0.5 : 0.37 @test ( isapprox(rate[end], min_order; atol = atol) || isapprox(rate[end], max_order; atol = atol) || min_order <= rate[end] <= max_order ) end end end @testset "MIS methods (with substeps)" begin finaltime = 5π / 2 dts = [2.0^(-k) for k in 9:10] error = similar(dts) for (mis_method, mis_expected_order) in mis_methods for fast_method in (LSRK54CarpenterKennedy,) for (n, dt) in enumerate(dts) Q = exactsolution(0) solver = mis_method( rhs_slow!, rhs_fast!, fast_method, 4, Q; dt = dt, t0 = 0.0, ) solve!(Q, solver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) @test isapprox( rate[end], mis_expected_order; atol = 0.1, ) end end end end # Simple 3-rate problem based on test of [Roberts2018](@cite) # # NOTE: Since we have no theory to say this ODE solver is accurate, the rates # suggest that things are really only 2nd order. # # TODO: This is not great, but no theory to say we should be accurate! @testset "3-rate ODE" begin ω1, ω2, ω3 = 10000, 100, 1 λ1, λ2, λ3 = -100, -10, -1 β1, β2, β3 = 2, 3, 4 ξ12 = λ2 / λ1 ξ13 = λ3 / λ1 ξ23 = λ3 / λ2 α12, α13, α23 = 1, 1, 1 η12 = ((1 - ξ12) / α12) * (λ1 - λ2) η13 = ((1 - ξ13) / α13) * (λ1 - λ3) η23 = ((1 - ξ23) / α23) * (λ2 - λ3) η21 = ξ12 * α12 * (λ2 - λ1) η31 = ξ13 * α13 * (λ3 - λ1) η32 = ξ23 * α23 * (λ3 - λ2) Ω = @SMatrix [ λ1 η12 η13 η21 λ2 η23 η31 η32 λ3 ] function rhs1!(dQ, Q, param, t; increment) @inbounds begin increment || (dQ .= 0) y1, y2, y3 = Q[1], Q[2], Q[3] g = @SVector [ (-β1 + y1^2 - cos(ω1 * t)) / 2y1, (-β2 + y2^2 - cos(ω2 * t)) / 2y2, (-β3 + y3^2 - cos(ω3 * t)) / 2y3, ] dQ[1] += Ω[1, :]' * g - ω1 * sin(ω1 * t) / 2y1 end end function rhs2!(dQ, Q, param, t; increment) @inbounds begin increment || (dQ .= 0) y1, y2, y3 = Q[1], Q[2], Q[3] g = @SVector [ (-β1 + y1^2 - cos(ω1 * t)) / 2y1, (-β2 + y2^2 - cos(ω2 * t)) / 2y2, (-β3 + y3^2 - cos(ω3 * t)) / 2y3, ] dQ[2] += Ω[2, :]' * g - ω2 * sin(ω2 * t) / 2y2 end end function rhs3!(dQ, Q, param, t; increment) @inbounds begin increment || (dQ .= 0) y1, y2, y3 = Q[1], Q[2], Q[3] g = @SVector [ (-β1 + y1^2 - cos(ω1 * t)) / 2y1, (-β2 + y2^2 - cos(ω2 * t)) / 2y2, (-β3 + y3^2 - cos(ω3 * t)) / 2y3, ] dQ[3] += Ω[3, :]' * g - ω3 * sin(ω3 * t) / 2y3 end end function rhs12!(dQ, Q, param, t; increment) rhs1!(dQ, Q, param, t; increment = increment) rhs2!(dQ, Q, param, t; increment = true) end exactsolution(t) = [ sqrt(β1 + cos(ω1 * t)), sqrt(β2 + cos(ω2 * t)), sqrt(β3 + cos(ω3 * t)), ] @testset "MRRK methods (no substeps)" begin finaltime = π / 2 dts = [2.0^(-k) for k in 9:10] error = similar(dts) for (rate3_method, rate3_order) in slow_mrrk_methods for (rate2_method, rate2_order) in slow_mrrk_methods for (rate1_method, rate1_order) in fast_mrrk_methods for (n, dt) in enumerate(dts) Q = exactsolution(0) solver = MultirateRungeKutta( ( rate3_method(rhs3!, Q), rate2_method(rhs2!, Q), rate1_method(rhs1!, Q), ); dt = dt, t0 = 0.0, ) solve!(Q, solver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) min_order = min(rate3_order, rate2_order, rate1_order) max_order = max(rate3_order, rate2_order, rate1_order) @test 2 <= rate[end] end end end end @testset "MRRK methods (with substeps)" begin finaltime = π / 2 dts = [2.0^(-k) for k in 16:17] error = similar(dts) for (rate3_method, rate3_order) in slow_mrrk_methods for (rate2_method, rate2_order) in slow_mrrk_methods for (rate1_method, rate1_order) in fast_mrrk_methods for (n, dt1) in enumerate(dts) Q = exactsolution(0) dt2 = (ω1 / ω2) * dt1 dt3 = (ω2 / ω3) * dt2 solver = MultirateRungeKutta(( rate3_method(rhs3!, Q; dt = dt3), rate2_method(rhs2!, Q; dt = dt2), rate1_method(rhs1!, Q; dt = dt1), )) solve!(Q, solver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) min_order = min(rate3_order, rate2_order, rate1_order) max_order = max(rate3_order, rate2_order, rate1_order) @test 2 <= rate[end] end end end end end #= Test problem (8.2) from [Sandu2019](@cite) for MRI-GARK Schemes =# @testset "2-rate problem" begin ω = 20 λf = -10 λs = -1 ξ = 1 // 10 α = 1 ηfs = ((1 - ξ) / α) * (λf - λs) ηsf = -ξ * α * (λf - λs) Ω = @SMatrix [ λf ηfs ηsf λs ] function rhs_fast!(dQ, Q, param, t; increment) @kernel function knl!(dQ, Q, t, increment) @inbounds begin increment || (dQ .= 0) yf = Q[1] ys = Q[2] gf = (-3 + yf^2 - cos(ω * t)) / 2yf gs = (-2 + ys^2 - cos(t)) / 2ys dQ[1] += Ω[1, 1] * gf + Ω[1, 2] * gs - ω * sin(ω * t) / 2yf end end event = Event(array_device(Q)) event = knl!(array_device(Q), 1)( dQ, Q, t, increment; ndrange = 1, dependencies = (event,), ) wait(array_device(Q), event) end function rhs_slow!(dQ, Q, param, t; increment) @kernel function knl!(dQ, Q, t, increment) @inbounds begin increment || (dQ .= 0) yf = Q[1] ys = Q[2] gf = (-3 + yf^2 - cos(ω * t)) / 2yf gs = (-2 + ys^2 - cos(t)) / 2ys dQ[2] += Ω[2, 1] * gf + Ω[2, 2] * gs - sin(t) / 2ys end end event = Event(array_device(Q)) event = knl!(array_device(Q), 1)( dQ, Q, t, increment; ndrange = 1, dependencies = (event,), ) wait(array_device(Q), event) end struct ODETestConvNonLinBE <: AbstractBackwardEulerSolver end ODESolvers.Δt_is_adjustable(::ODETestConvNonLinBE) = true function (::ODETestConvNonLinBE)(Q, Qhat, α, p, t) @kernel function knl!(Q, Qhat, α, p, t) @inbounds begin # Slow RHS has zero tendency of yf so just copy Qhat Q[1] = yf = Qhat[1] gf = (-3 + yf^2 - cos(ω * t)) / 2yf # solves: Q = Qhat[2] + α * rhs_slow(Q, t) # (simplifies to a quadratic equation) a = 2 - α * Ω[2, 2] b = -2 * (Qhat[2] + α * Ω[2, 1] * gf) c = α * (Ω[2, 2] * (2 + cos(t)) + sin(t)) Q[2] = (-b + sqrt(b^2 - 4 * a * c)) / (2a) end end event = Event(array_device(Q)) event = knl!(array_device(Q), 1)( Q, Qhat, α, p, t; ndrange = 1, dependencies = (event,), ) wait(array_device(Q), event) end exactsolution(t) = ArrayType([sqrt(3 + cos(ω * t)); sqrt(2 + cos(t))]) finaltime = 1 dts = [2.0^(-k) for k in 7:9] error = similar(dts) @testset "Explicit" begin for (mri_method, mri_expected_order) in mrigark_erk_methods for (fast_method, fast_expected_order) in fast_mrigark_methods for (n, slow_dt) in enumerate(dts) Q = exactsolution(0) fast_dt = slow_dt / ω fastsolver = fast_method(rhs_fast!, Q; dt = fast_dt) solver = mri_method( rhs_slow!, fastsolver, Q, dt = slow_dt, ) solve!(Q, solver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) order = mri_expected_order @test isapprox( rate[end], mri_expected_order; atol = 0.3, ) end end end @testset "Implicit" begin for (mri_method, mri_expected_order) in mrigark_irk_methods for (fast_method, fast_expected_order) in fast_mrigark_methods for (n, slow_dt) in enumerate(dts) Q = exactsolution(0) fast_dt = slow_dt / ω fastsolver = fast_method(rhs_fast!, Q; dt = fast_dt) solver = mri_method( rhs_slow!, ODETestConvNonLinBE(), fastsolver, Q, dt = slow_dt, ) solve!(Q, solver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) order = mri_expected_order @test isapprox( rate[end], mri_expected_order; atol = 0.3, ) end end end @testset "IMEX" begin for (method, expected_order) in imex_methods_naivestorage_compatible for (n, dt) in enumerate(dts) Q = exactsolution(0) solver = method( rhs_fast!, rhs_slow!, ODETestConvNonLinBE(), Q; dt = dt, t0 = 0.0, split_explicit_implicit = true, variant = NaiveVariant(), ) solve!(Q, solver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) order = expected_order @test isapprox(rate[end], expected_order; atol = 0.3) end end end # 3-rate modification of the above test problem @testset "3-rate problem" begin ω1, ω2, ω3 = 20, 5, 1 λ1, λ2, λ3 = -20, -5, -1 β1, β2, β3 = 2, 2, 2 ξ12 = λ2 / (λ1 + λ2) ξ13 = λ3 / (λ1 + λ3) ξ23 = λ3 / (λ2 + λ3) α12, α13, α23 = 1, 1, 1 η12 = ((1 - ξ12) / α12) * (λ1 - λ2) η13 = 0#((1 - ξ13) / α13) * (λ1 - λ3) η23 = ((1 - ξ23) / α23) * (λ2 - λ3) η21 = ξ12 * α12 * (λ2 - λ1) η31 = 0#ξ13 * α13 * (λ3 - λ1) η32 = ξ23 * α23 * (λ3 - λ2) Ω = @SMatrix [ λ1 η12 η13 η21 λ2 η23 η31 η32 λ3 ] function rhs1!(dQ, Q, param, t; increment) @kernel function knl!(dQ, Q, t, increment) @inbounds begin increment || (dQ .= 0) y1, y2, y3 = Q[1], Q[2], Q[3] g = @SVector [ (-β1 + y1^2 - cos(ω1 * t)) / 2y1, (-β2 + y2^2 - cos(ω2 * t)) / 2y2, (-β3 + y3^2 - cos(ω3 * t)) / 2y3, ] dQ[1] += Ω[1, :]' * g - ω1 * sin(ω1 * t) / 2y1 end end event = Event(array_device(Q)) event = knl!(array_device(Q), 1)( dQ, Q, t, increment; ndrange = 1, dependencies = (event,), ) wait(array_device(Q), event) end function rhs2!(dQ, Q, param, t; increment) @kernel function knl!(dQ, Q, t, increment) @inbounds begin increment || (dQ .= 0) y1, y2, y3 = Q[1], Q[2], Q[3] g = @SVector [ (-β1 + y1^2 - cos(ω1 * t)) / 2y1, (-β2 + y2^2 - cos(ω2 * t)) / 2y2, (-β3 + y3^2 - cos(ω3 * t)) / 2y3, ] dQ[2] += Ω[2, :]' * g - ω2 * sin(ω2 * t) / 2y2 end end event = Event(array_device(Q)) event = knl!(array_device(Q), 1)( dQ, Q, t, increment; ndrange = 1, dependencies = (event,), ) wait(array_device(Q), event) end function rhs3!(dQ, Q, param, t; increment) @kernel function knl!(dQ, Q, t, increment) @inbounds begin increment || (dQ .= 0) y1, y2, y3 = Q[1], Q[2], Q[3] g = @SVector [ (-β1 + y1^2 - cos(ω1 * t)) / 2y1, (-β2 + y2^2 - cos(ω2 * t)) / 2y2, (-β3 + y3^2 - cos(ω3 * t)) / 2y3, ] dQ[3] += Ω[3, :]' * g - ω3 * sin(ω3 * t) / 2y3 end end event = Event(array_device(Q)) event = knl!(array_device(Q), 1)( dQ, Q, t, increment; ndrange = 1, dependencies = (event,), ) wait(array_device(Q), event) end struct ODETestConvNonLinBE3Rate <: AbstractBackwardEulerSolver end ODESolvers.Δt_is_adjustable(::ODETestConvNonLinBE3Rate) = true function (::ODETestConvNonLinBE3Rate)(Q, Qhat, α, p, t) @kernel function knl!(Q, Qhat, α, p, t) @inbounds begin # Slower RHS has zero tendency of yf so just copy Qhat Q[1] = y1 = Qhat[1] Q[2] = y2 = Qhat[2] g = @SVector [ (-β1 + y1^2 - cos(ω1 * t)) / 2y1, (-β2 + y2^2 - cos(ω2 * t)) / 2y2, ] # solves: Q = Qhat + α * rhs_slow(Q, t) # (simplifies to a quadratic equation) a = 2 - α * Ω[3, 3] b = -2 * (Qhat[3] + α * Ω[3, 1] * g[1] + α * Ω[3, 2] * g[2]) c = α * (Ω[3, 3] * (β3 + cos(t)) + sin(t)) Q[3] = (-b + sqrt(b^2 - 4 * a * c)) / (2a) end end event = Event(array_device(Q)) event = knl!(array_device(Q), 1)( Q, Qhat, α, p, t; ndrange = 1, dependencies = (event,), ) wait(array_device(Q), event) end exactsolution(t) = ArrayType([ sqrt(β1 + cos(ω1 * t)), sqrt(β2 + cos(ω2 * t)), sqrt(β3 + cos(ω3 * t)), ]) finaltime = 1 dts = [2.0^(-k) for k in 6:7] error = similar(dts) @testset "Explicit" begin for (slow_method, slow_order) in mrigark_erk_methods for (mid_method, mid_order) in mrigark_erk_methods for (fast_method, fast_order) in fast_mrigark_methods for (n, slow_dt) in enumerate(dts) Q = exactsolution(0) mid_dt = slow_dt / ω2 fast_dt = slow_dt / ω1 fastsolver = fast_method(rhs1!, Q; dt = fast_dt) midsolver = mid_method( rhs2!, fastsolver, Q, dt = mid_dt, ) slowsolver = slow_method( rhs3!, midsolver, Q, dt = slow_dt, ) solve!(Q, slowsolver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) expected_order = min(slow_order, mid_order, fast_order) @test isapprox( rate[end], expected_order; atol = 0.3, ) end end end end @testset "Implicit-Explicit" begin for (slow_method, slow_order) in mrigark_irk_methods for (mid_method, mid_order) in mrigark_erk_methods for (fast_method, fast_order) in fast_mrigark_methods for (n, slow_dt) in enumerate(dts) Q = exactsolution(0) mid_dt = slow_dt / ω2 fast_dt = slow_dt / ω1 fastsolver = fast_method(rhs1!, Q; dt = fast_dt) midsolver = mid_method( rhs2!, fastsolver, Q, dt = mid_dt, ) slowsolver = slow_method( rhs3!, ODETestConvNonLinBE3Rate(), midsolver, Q, dt = slow_dt, ) solve!(Q, slowsolver; timeend = finaltime) error[n] = norm(Q - exactsolution(finaltime)) end rate = log2.(error[1:(end - 1)] ./ error[2:end]) expected_order = min(slow_order, mid_order, fast_order) @test isapprox( rate[end], expected_order; atol = 0.4, ) end end end end end end end ================================================ FILE: test/Numerics/ODESolvers/runtests.jl ================================================ using Test, MPI include(joinpath("..", "..", "testhelpers.jl")) @testset "ODE Solvers" begin runmpi(joinpath(@__DIR__, "callbacks.jl")) end ================================================ FILE: test/Numerics/SystemSolvers/bandedsystem.jl ================================================ using ClimateMachine using MPI using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using Logging using LinearAlgebra using Random using StaticArrays using ClimateMachine.BalanceLaws import ClimateMachine.BalanceLaws: vars_state, number_states using ClimateMachine.DGMethods: DGModel, DGFVModel, init_ode_state, create_state using ClimateMachine.SystemSolvers: banded_matrix, banded_matrix_vector_product! using ClimateMachine.DGMethods.FVReconstructions: FVConstant, FVLinear using ClimateMachine.DGMethods.NumericalFluxes: RusanovNumericalFlux, CentralNumericalFluxSecondOrder, CentralNumericalFluxGradient using ClimateMachine.MPIStateArrays: MPIStateArray, euclidean_distance using ClimateMachine.VariableTemplates using ClimateMachine.SystemSolvers using ClimateMachine.SystemSolvers: band_lu!, band_forward!, band_back! using Test include("../DGMethods/advection_diffusion/advection_diffusion_model.jl") struct Pseudo1D{n, α, β, μ, δ} <: AdvectionDiffusionProblem end function init_velocity_diffusion!( ::Pseudo1D{n, α, β}, aux::Vars, geom::LocalGeometry, ) where {n, α, β} # Direction of flow is n with magnitude α aux.advection.u = α * n # diffusion of strength β in the n direction aux.diffusion.D = β * n * n' end struct BigAdvectionDiffusion <: BalanceLaw end function vars_state(::BigAdvectionDiffusion, ::Prognostic, FT) @vars begin ρ::FT X::SVector{3, FT} end end function initial_condition!( ::Pseudo1D{n, α, β, μ, δ}, state, aux, localgeo, t, ) where {n, α, β, μ, δ} ξn = dot(n, localgeo.coord) # ξT = SVector(x) - ξn * n state.ρ = exp(-(ξn - μ - α * t)^2 / (4 * β * (δ + t))) / sqrt(1 + t / δ) end let # boiler plate MPI stuff ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD Random.seed!(777 + MPI.Comm_rank(mpicomm)) # Mesh generation parameters Neh = 10 Nev = 4 @testset "$(@__FILE__) DGModel matrix" begin for FT in (Float64, Float32) for dim in (2, 3) connectivity = dim == 3 ? :full : :face for single_column in (false, true) # Setup the topology if dim == 2 brickrange = ( range(FT(0); length = Neh + 1, stop = 1), range(FT(1); length = Nev + 1, stop = 2), ) elseif dim == 3 brickrange = ( range(FT(0); length = Neh + 1, stop = 1), range(FT(0); length = Neh + 1, stop = 1), range(FT(1); length = Nev + 1, stop = 2), ) end topl = StackedBrickTopology( mpicomm, brickrange, connectivity = connectivity, ) # Warp mesh function warpfun(ξ1, ξ2, ξ3) # single column currently requires no geometry warping # Even if the warping is in only the horizontal, the way we # compute metrics causes problems for the single column approach # (possibly need to not use curl-invariant computation) if !single_column ξ1 = ξ1 + sin(2 * FT(π) * ξ1 * ξ2) / 10 ξ2 = ξ2 + sin(2 * FT(π) * ξ1) / 5 if dim == 3 ξ3 = ξ3 + sin(8 * FT(π) * ξ1 * ξ2) / 10 end end (ξ1, ξ2, ξ3) end d = dim == 2 ? FT[1, 10, 0] : FT[1, 1, 10] n = SVector{3, FT}(d ./ norm(d)) α = FT(1) β = FT(1 // 100) μ = FT(-1 // 2) δ = FT(1 // 10) bcs = (HomogeneousBC{0}(),) model = AdvectionDiffusion{dim}(Pseudo1D{n, α, β, μ, δ}(), bcs) for (N, fvmethod) in ( ((4, 4), nothing), ((4, 0), FVConstant()), ((4, 0), FVLinear()), ) # create the actual grid grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, meshwarp = warpfun, ) # the nonlinear model is needed so we can grab the state_auxiliary below dg = (fvmethod === nothing) ? DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) : DGFVModel( model, grid, fvmethod, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) vdg = (fvmethod === nothing) ? DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = VerticalDirection(), state_auxiliary = dg.state_auxiliary, ) : DGFVModel( model, grid, fvmethod, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); direction = VerticalDirection(), state_auxiliary = dg.state_auxiliary, ) A_banded = banded_matrix( vdg, MPIStateArray(dg), MPIStateArray(dg); single_column = single_column, ) Q = init_ode_state(dg, FT(0)) dQ1 = MPIStateArray(dg) dQ2 = MPIStateArray(dg) vdg(dQ1, Q, nothing, 0; increment = false) Q.data .= dQ1.data vdg(dQ1, Q, nothing, 0; increment = false) banded_matrix_vector_product!(A_banded, dQ2, Q) @test all(isapprox.( Array(dQ1.realdata), Array(dQ2.realdata), atol = 100 * eps(FT), )) big_Q = create_state( BigAdvectionDiffusion(), grid, Prognostic(), ) big_dQ = create_state( BigAdvectionDiffusion(), grid, Prognostic(), ) big_Q .= NaN big_dQ .= NaN big_Q.data[:, 1:size(Q, 2), :] .= Q.data vdg(big_dQ, big_Q, nothing, 0; increment = false) @test all(isapprox.( Array(big_dQ.realdata[:, 1:size(Q, 2), :]), Array(dQ1.realdata), atol = 100 * eps(FT), )) @test all(big_dQ[:, (size(Q, 2) + 1):end, :] .== 0) big_dQ.data[:, (size(Q, 2) + 1):end, :] .= -7 vdg(big_dQ, big_Q, nothing, 0; increment = true) @test all(big_dQ[:, (size(Q, 2) + 1):end, :] .== -7) @test all(isapprox.( Array(big_dQ.realdata[:, 1:size(Q, 2), :]), Array(dQ1.realdata), atol = 100 * eps(FT), )) big_A = banded_matrix( vdg, similar(big_Q), similar(big_dQ); single_column = single_column, ) @test all(isapprox.( Array(big_A.data), Array(A_banded.data), atol = 100 * eps(FT), )) α = FT(1 // 10) function op!(LQ, Q) vdg(LQ, Q, nothing, 0; increment = false) @. LQ = Q + α * LQ end A_banded = banded_matrix( op!, vdg, MPIStateArray(dg), MPIStateArray(dg); single_column = single_column, ) Q = init_ode_state(dg, FT(0)) dQ1 = MPIStateArray(vdg) dQ2 = MPIStateArray(vdg) op!(dQ1, Q) Q.data .= dQ1.data op!(dQ1, Q) banded_matrix_vector_product!(A_banded, dQ2, Q) @test all(isapprox.( Array(dQ1.realdata), Array(dQ2.realdata), atol = 100 * eps(FT), )) big_Q .= NaN big_Q.data[:, 1:size(Q, 2), :] .= Q.data big_dQ .= NaN op!(big_dQ, big_Q) big_dQ.data[:, (size(Q, 2) + 1):end, :] .= -7 big_A = banded_matrix( op!, vdg, similar(big_Q), similar(big_dQ); single_column = single_column, ) @test all(isapprox.( Array(big_A.data), Array(A_banded.data), atol = 100 * eps(FT), )) band_lu!(big_A) band_forward!(big_dQ, big_A) band_back!(big_dQ, big_A) @test all(isapprox.( Array(big_dQ.realdata[:, 1:size(Q, 2), :]), Array(Q.realdata), atol = 100 * eps(FT), )) @test all(big_dQ[:, (size(Q, 2) + 1):end, :] .== -7) end end end end end end nothing ================================================ FILE: test/Numerics/SystemSolvers/bgmres.jl ================================================ using MPI using Test using LinearAlgebra using Random using StaticArrays using ClimateMachine using ClimateMachine.SystemSolvers using ClimateMachine.MPIStateArrays using CUDA using Random using KernelAbstractions import ClimateMachine.MPIStateArrays: array_device ClimateMachine.init(; fix_rng_seed = true) @kernel function multiply_by_A!(x, A, y, n1, n2) I = @index(Global) for i in 1:n1 tmp = zero(eltype(x)) for j in 1:n2 tmp += A[i, j, I] * y[j, I] end x[i, I] = tmp end end let if CUDA.has_cuda_gpu() Arrays = [Array, CuArray] else Arrays = [Array] end for ArrayType in Arrays for T in [Float32, Float64] ϵ = eps(T) @testset "($ArrayType, $T) Basic Test" begin Random.seed!(42) # Test 1: Basic Functionality n = 100 # size of local (batch) matrix ni = 100 # batch size b = ArrayType(randn(n, ni)) # rhs x = ArrayType(randn(n, ni)) # initial guess x_ref = similar(x) A = ArrayType(randn((n, n, ni)) ./ sqrt(n)) for i in 1:n A[i, i, :] .+= 10i end ss = size(b)[1] bgmres = BatchedGeneralizedMinimalResidual( b, n, ni, M = ss, atol = ϵ, rtol = ϵ, ) # Define the linear operator function closure_linear_operator_multi!(A, n1, n2, n3) function linear_operator!(x, y) device = array_device(x) if isa(device, CPU) groupsize = Threads.nthreads() else # isa(device, CUDADevice) groupsize = 256 end event = Event(device) event = multiply_by_A!(device, groupsize)( x, A, y, n1, n2, ndrange = n3, dependencies = (event,), ) wait(device, event) nothing end end linear_operator! = closure_linear_operator_multi!(A, size(A)...) # Now solve linearsolve!( linear_operator!, nothing, bgmres, x, b; max_iters = Inf, ) # reference solution for i in 1:ni x_ref[:, i] = A[:, :, i] \ b[:, i] end @test norm(x - x_ref) < 1000ϵ end ### # Test 2: MPI State Array ### @testset "($ArrayType, $T) MPIStateArray Test" begin Random.seed!(43) n1 = 8 n2 = 3 n3 = 10 mpicomm = MPI.COMM_WORLD mpi_b = MPIStateArray{T}(mpicomm, ArrayType, n1, n2, n3) mpi_x = MPIStateArray{T}(mpicomm, ArrayType, n1, n2, n3) mpi_A = ArrayType(randn(n1 * n2, n1 * n2, n3)) mpi_b.data[:] .= ArrayType(randn(n1 * n2 * n3)) mpi_x.data[:] .= ArrayType(randn(n1 * n2 * n3)) bgmres = BatchedGeneralizedMinimalResidual( mpi_b, n1 * n2, n3, M = n1 * n2, atol = ϵ, rtol = ϵ, ) # Now define the linear operator function closure_linear_operator_mpi!(A, n1, n2, n3) function linear_operator!(x, y) alias_x = reshape(x.data, (n1, n3)) alias_y = reshape(y.data, (n1, n3)) device = array_device(x) if isa(device, CPU) groupsize = Threads.nthreads() else # isa(device, CUDADevice) groupsize = 256 end event = Event(device) event = multiply_by_A!(device, groupsize)( alias_x, A, alias_y, n1, n2, ndrange = n3, dependencies = (event,), ) wait(device, event) nothing end end linear_operator! = closure_linear_operator_mpi!(mpi_A, size(mpi_A)...) # Now solve linearsolve!( linear_operator!, nothing, bgmres, mpi_x, mpi_b; max_iters = Inf, ) # check all solutions norms = -zeros(n3) for cidx in 1:n3 sol = Array(mpi_A[:, :, cidx]) \ Array(mpi_b.data[:, :, cidx])[:] norms[cidx] = norm(sol - Array(mpi_x.data[:, :, cidx])[:]) end @test maximum(norms) < 8000ϵ end ### # Test 3: Columnwise test ### @testset "(Array, $T) Columnwise Test" begin Random.seed!(2424) function closure_linear_operator_columwise!(A, tup) function linear_operator!(y, x) alias_x = reshape(x, tup) alias_y = reshape(y, tup) for i6 in 1:tup[6] for i4 in 1:tup[4] for i2 in 1:tup[2] for i1 in 1:tup[1] tmp = alias_x[i1, i2, :, i4, :, i6][:] tmp2 = A[i1, i2, i4, i6] * tmp alias_y[i1, i2, :, i4, :, i6] .= reshape(tmp2, (tup[3], tup[5])) end end end end end end tup = (3, 4, 7, 6, 5, 2) B = [ randn(tup[3] * tup[5], tup[3] * tup[5]) for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ] columnwise_A = [ B[i1, i2, i4, i6] + 10I for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ] # taking the inverse of A isn't great, but it is convenient columnwise_inv_A = [ inv(columnwise_A[i1, i2, i4, i6]) for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ] columnwise_linear_operator! = closure_linear_operator_columwise!(columnwise_A, tup) columnwise_inverse_linear_operator! = closure_linear_operator_columwise!(columnwise_inv_A, tup) mpi_tup = (tup[1] * tup[2] * tup[3], tup[4], tup[5] * tup[6]) b = randn(mpi_tup) x = copy(b) columnwise_inverse_linear_operator!(x, b) x += randn((tup[1] * tup[2] * tup[3], tup[4], tup[5] * tup[6])) * 0.1 reshape_tuple_f = tup permute_tuple = (5, 3, 1, 4, 2, 6) bgmres = BatchedGeneralizedMinimalResidual( b, tup[3] * tup[5], tup[1] * tup[2] * tup[4] * tup[6]; M = tup[3] * tup[5], forward_reshape = tup, forward_permute = permute_tuple, atol = 10ϵ, rtol = 10ϵ, ) x_exact = copy(x) linearsolve!( columnwise_linear_operator!, nothing, bgmres, x, b, max_iters = tup[3] * tup[5], ) columnwise_inverse_linear_operator!(x_exact, b) @test norm(x - x_exact) / norm(x_exact) < 1000ϵ columnwise_linear_operator!(x_exact, x) @test norm(x_exact - b) / norm(b) < 1000ϵ end end end end ================================================ FILE: test/Numerics/SystemSolvers/cg.jl ================================================ using CUDA using MPI using Test using LinearAlgebra using Random using StaticArrays using KernelAbstractions: CPU, CUDADevice using ClimateMachine using ClimateMachine.SystemSolvers using ClimateMachine.MPIStateArrays using Random Random.seed!(1235) let ClimateMachine.init() mpicomm = MPI.COMM_WORLD ArrayType = ClimateMachine.array_type() n = 100 T = Float64 A = rand(n, n) scale = 2.0 ϵ = 0.1 # Matrix 1 A = A' * A .* ϵ + scale * I err_thresh = sqrt(eps(T)) # Matrix 2 # A = Diagonal(collect(1:n) * 1.0) positive_definite = minimum(eigvals(A)) > eps(1.0) @test positive_definite b = ones(n) * 1.0 mulbyA!(y, x) = (y .= A * x) tol = sqrt(eps(T)) method(b, tol) = ConjugateGradient(b, max_iter = n) linearsolver = method(b, tol) x = ones(n) * 1.0 x0 = copy(x) iters = linearsolve!(mulbyA!, nothing, linearsolver, x, b; max_iters = Inf) exact = A \ b x0 = copy(x) @testset "Array test" begin @test norm(x - exact) / norm(exact) < err_thresh @test norm(A * x - b) / norm(b) < err_thresh end # Testing for CUDA CuArrays if CUDA.has_cuda_gpu() at_A = ArrayType(A) at_b = ArrayType(b) at_x = ArrayType(ones(n) * 1.0) mulbyat_A!(y, x) = (y .= at_A * x) at_method(b, tol) = ConjugateGradient(b, max_iter = n) linearsolver = at_method(at_b, tol) iters = linearsolve!( mulbyat_A!, nothing, linearsolver, at_x, at_b; max_iters = n, ) exact = at_A \ at_b @testset "CuArray test" begin @test norm(at_x - exact) / norm(exact) < err_thresh @test norm(at_A * at_x - at_b) / norm(at_b) < err_thresh end end mpi_b = MPIStateArray{T}(mpicomm, ArrayType, 4, 4, 4) mpi_x = MPIStateArray{T}(mpicomm, ArrayType, 4, 4, 4) mpi_A = ArrayType(randn(4^3, 4^3)) mpi_A .= mpi_A' * mpi_A function mpi_mulby!(x, y) fy = y.data[:] fx = mpi_A * fy x.data[:] .= fx[:] return nothing end mpi_b.data[:] .= ArrayType(randn(4^3)) mpi_x.data[:] .= ArrayType(randn(4^3)) mpi_method(mpi_b, tol) = ConjugateGradient(mpi_b, max_iter = n) linearsolver = mpi_method(mpi_b, tol) iters = linearsolve!( mpi_mulby!, nothing, linearsolver, mpi_x, mpi_b; max_iters = n, ) exact = mpi_A \ mpi_b[:] @testset "MPIStateArray test" begin @test norm(mpi_x.data[:] - exact) / norm(exact) < err_thresh mpi_Ax = MPIStateArray{T}(mpicomm, ArrayType, 4, 4, 4) mpi_mulby!(mpi_Ax, mpi_x) @test norm(mpi_Ax - mpi_b) / norm(mpi_b) < err_thresh end # ## More Complex Example function closure_linear_operator!(A, tup) function linear_operator!(y, x) alias_x = reshape(x, tup) alias_y = reshape(y, tup) for i6 in 1:tup[6] for i4 in 1:tup[4] for i2 in 1:tup[2] for i1 in 1:tup[1] tmp = alias_x[i1, i2, :, i4, :, i6][:] tmp2 = A[i1, i2, i4, i6] * tmp alias_y[i1, i2, :, i4, :, i6] .= reshape(tmp2, (tup[3], tup[5])) end end end end end end tup = (3, 4, 7, 2, 20, 2) B = [ randn(tup[3] * tup[5], tup[3] * tup[5]) for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ] columnwise_A = [ B[i1, i2, i4, i6] * B[i1, i2, i4, i6]' + 10I for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ] columnwise_inv_A = [ inv(columnwise_A[i1, i2, i4, i6]) for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ] columnwise_linear_operator! = closure_linear_operator!(columnwise_A, tup) columnwise_inverse_linear_operator! = closure_linear_operator!(columnwise_inv_A, tup) mpi_tup = (tup[1] * tup[2] * tup[3], tup[4], tup[5] * tup[6]) b = randn(mpi_tup) x = randn(mpi_tup) linearsolver = ConjugateGradient( x, max_iter = tup[3] * tup[5], dims = (3, 5), reshape_tuple = tup, ) iters = linearsolve!(columnwise_linear_operator!, nothing, linearsolver, x, b) x_exact = copy(x) columnwise_inverse_linear_operator!(x_exact, b) @testset "Columnwise test" begin @test norm(x - x_exact) / norm(x_exact) < err_thresh end end nothing ================================================ FILE: test/Numerics/SystemSolvers/columnwiselu.jl ================================================ using MPI using Test using LinearAlgebra using Random using KernelAbstractions, StaticArrays using ClimateMachine using ClimateMachine.SystemSolvers using ClimateMachine.SystemSolvers: band_lu_kernel!, band_forward_kernel!, band_back_kernel!, DGColumnBandedMatrix, lower_bandwidth, upper_bandwidth import ClimateMachine.MPIStateArrays: array_device ClimateMachine.init() const ArrayType = ClimateMachine.array_type() function band_to_full(B, p, q) _, n = size(B) A = similar(B, n, n) # assume square fill!(A, 0) for j in 1:n, i in max(1, j - q):min(j + p, n) A[i, j] = B[q + i - j + 1, j] end A end function run_columnwiselu_test(FT, N) Nq = N .+ 1 Nq1 = Nq[1] Nq2 = Nq[2] Nqv = Nq[3] nstate = 3 nvertelem = 5 nhorzelem = 4 eband = 2 m = n = Nqv * nstate * nvertelem p = lower_bandwidth(N[end], nstate, eband) q = upper_bandwidth(N[end], nstate, eband) Random.seed!(1234) AB = rand(FT, Nq1, Nq2, p + q + 1, n, nhorzelem) AB[:, :, q + 1, :, :] .+= 10 # Make A's diagonally dominate Random.seed!(5678) b = rand(FT, Nq1, Nq2, Nqv, nstate, nvertelem, nhorzelem) x = similar(b) perm = (4, 3, 5, 1, 2, 6) bp = reshape(PermutedDimsArray(b, perm), n, Nq1, Nq2, nhorzelem) xp = reshape(PermutedDimsArray(x, perm), n, Nq1, Nq2, nhorzelem) d_F = ArrayType(AB) d_F = DGColumnBandedMatrix{ 3, N, nstate, nhorzelem, nvertelem, eband, false, typeof(d_F), }( d_F, ) groupsize = (Nq1, Nq2) ndrange = (Nq1, Nq2, nhorzelem) event = Event(array_device(d_F.data)) event = band_lu_kernel!(array_device(d_F.data), groupsize, ndrange)( d_F, dependencies = (event,), ) wait(array_device(d_F.data), event) F = Array(d_F.data) for h in 1:nhorzelem, j in 1:Nq2, i in 1:Nq1 B = AB[i, j, :, :, h] G = band_to_full(B, p, q) GLU = lu!(G, Val(false)) H = band_to_full(F[i, j, :, :, h], p, q) @assert H ≈ G xp[:, i, j, h] .= GLU \ bp[:, i, j, h] end b = reshape(b, Nq1 * Nq2 * Nqv, nstate, nvertelem * nhorzelem) x = reshape(x, Nq1 * Nq2 * Nqv, nstate, nvertelem * nhorzelem) d_x = ArrayType(b) event = Event(array_device(d_x)) event = band_forward_kernel!(array_device(d_x), groupsize, ndrange)( d_x, d_F, dependencies = (event,), ) event = band_back_kernel!(array_device(d_x), groupsize, ndrange)( d_x, d_F, dependencies = (event,), ) wait(array_device(d_x), event) result = x ≈ Array(d_x) return result end @testset "Columnwise LU test" begin for FT in (Float64, Float32) for N in ((1, 1, 1), (1, 1, 2), (2, 2, 1), (2, 2, 0)) result = run_columnwiselu_test(FT, N) @test result end end end ================================================ FILE: test/Numerics/SystemSolvers/iterativesolvers.jl ================================================ using Test using ClimateMachine using ClimateMachine.SystemSolvers using StaticArrays, LinearAlgebra, Random # this test setup is partly based on IterativeSolvers.jl, see e.g # https://github.com/JuliaMath/IterativeSolvers.jl/blob/master/test/cg.jl @testset "SystemSolvers small full system" begin n = 10 methods = ( (b, tol) -> GeneralizedConjugateResidual(2, b, rtol = tol), (b, tol) -> GeneralizedMinimalResidual(b, M = 3, rtol = tol), (b, tol) -> GeneralizedMinimalResidual(b, M = n, rtol = tol), ) expected_iters = ( Dict(Float32 => 7, Float64 => 11), Dict(Float32 => 5, Float64 => 17), Dict(Float32 => 4, Float64 => 10), ) for (m, method) in enumerate(methods), T in [Float32, Float64] Random.seed!(44) A = @MMatrix rand(T, n, n) A = A' * A + I b = @MVector rand(T, n) mulbyA!(y, x) = (y .= A * x) tol = sqrt(eps(T)) linearsolver = method(b, tol) x = @MVector rand(T, n) x0 = copy(x) iters = linearsolve!(mulbyA!, nothing, linearsolver, x, b; max_iters = Inf) @test iters == expected_iters[m][T] @test norm(A * x - b) / norm(A * x0 - b) <= tol # test for convergence in 0 iterations by # initializing with the exact solution x = A \ b iters = linearsolve!(mulbyA!, nothing, linearsolver, x, b; max_iters = Inf) @test iters == 0 @test norm(A * x - b) <= 100 * eps(T) newtol = 1000tol settolerance!(linearsolver, newtol) x = @MVector rand(T, n) x0 = copy(x) linearsolve!(mulbyA!, nothing, linearsolver, x, b; max_iters = Inf) @test norm(A * x - b) / norm(A * x0 - b) <= newtol @test norm(A * x - b) / norm(A * x0 - b) >= tol end end @testset "SystemSolvers large full system" begin n = 1000 methods = ( (b, tol) -> GeneralizedMinimalResidual(b, M = 15, rtol = tol), (b, tol) -> GeneralizedMinimalResidual(b, M = 20, rtol = tol), ) expected_iters = ( Dict(Float32 => (3, 3), Float64 => (9, 8)), Dict(Float32 => (3, 3), Float64 => (9, 8)), ) for (m, method) in enumerate(methods), T in [Float32, Float64] for (i, α) in enumerate(T[1e-2, 5e-3]) Random.seed!(44) A = rand(T, 200, 1000) A = α * A' * A + I b = rand(T, n) mulbyA!(y, x) = (y .= A * x) tol = sqrt(eps(T)) linearsolver = method(b, tol) x = rand(T, n) x0 = copy(x) iters = linearsolve!( mulbyA!, nothing, linearsolver, x, b; max_iters = Inf, ) @test iters == expected_iters[m][T][i] @test norm(A * x - b) / norm(A * x0 - b) <= tol newtol = 1000tol settolerance!(linearsolver, newtol) x = rand(T, n) x0 = copy(x) linearsolve!(mulbyA!, nothing, linearsolver, x, b; max_iters = Inf) @test norm(A * x - b) / norm(A * x0 - b) <= newtol end end end ================================================ FILE: test/Numerics/SystemSolvers/poisson.jl ================================================ using MPI using Test using StaticArrays using Logging, Printf using ClimateMachine using ClimateMachine.SystemSolvers using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.VariableTemplates using ClimateMachine.DGMethods using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux import ClimateMachine.BalanceLaws: vars_state, flux_first_order!, flux_second_order!, source!, boundary_conditions, boundary_state!, compute_gradient_argument!, compute_gradient_flux!, nodal_init_state_auxiliary!, init_state_prognostic! import ClimateMachine.DGMethods: numerical_boundary_flux_second_order! using ClimateMachine.Mesh.Geometry: LocalGeometry import ClimateMachine.DGMethods.NumericalFluxes: NumericalFluxSecondOrder, numerical_flux_second_order! if !@isdefined integration_testing const integration_testing = parse( Bool, lowercase(get(ENV, "JULIA_CLIMA_INTEGRATION_TESTING", "false")), ) end struct PoissonModel{dim} <: BalanceLaw end vars_state(::PoissonModel, ::Auxiliary, T) = @vars(rhs_ϕ::T) vars_state(::PoissonModel, ::Prognostic, T) = @vars(ϕ::T) vars_state(::PoissonModel, ::Gradient, T) = @vars(ϕ::T) vars_state(::PoissonModel, ::GradientFlux, T) = @vars(∇ϕ::SVector{3, T}) boundary_conditions(::PoissonModel) = () boundary_state!(nf, bc, bl::PoissonModel, _...) = nothing function flux_first_order!(::PoissonModel, _...) end function flux_second_order!( ::PoissonModel, flux::Grad, state::Vars, diffusive::Vars, hyperdiffusive::Vars, state_auxiliary::Vars, t::Real, ) flux.ϕ = diffusive.∇ϕ end struct PenaltyNumFluxDiffusive <: NumericalFluxSecondOrder end # There is no boundary since we are periodic numerical_boundary_flux_second_order!(nf::PenaltyNumFluxDiffusive, _...) = nothing function numerical_flux_second_order!( ::PenaltyNumFluxDiffusive, bl::PoissonModel, fluxᵀn::Vars{S}, n::SVector, state⁻::Vars{S}, diff⁻::Vars{D}, hyperdiff⁻::Vars{HD}, aux⁻::Vars{A}, state⁺::Vars{S}, diff⁺::Vars{D}, hyperdiff⁺::Vars{HD}, aux⁺::Vars{A}, t, ) where {S, HD, D, A} numerical_flux_second_order!( CentralNumericalFluxSecondOrder(), bl, fluxᵀn, n, state⁻, diff⁻, hyperdiff⁻, aux⁻, state⁺, diff⁺, hyperdiff⁺, aux⁺, t, ) Fᵀn = parent(fluxᵀn) FT = eltype(Fᵀn) tau = FT(1) Fᵀn .-= tau * (parent(state⁻) - parent(state⁺)) end function compute_gradient_argument!( ::PoissonModel, transformstate::Vars, state::Vars, state_auxiliary::Vars, t::Real, ) transformstate.ϕ = state.ϕ end function compute_gradient_flux!( ::PoissonModel, diffusive::Vars, ∇transform::Grad, state::Vars, state_auxiliary::Vars, t::Real, ) diffusive.∇ϕ = ∇transform.ϕ end source!(::PoissonModel, _...) = nothing # note, that the code assumes solutions with zero mean sol1d(x) = sin(2pi * x)^4 - 3 / 8 dxx_sol1d(x) = -16 * pi^2 * sin(2pi * x)^2 * (sin(2pi * x)^2 - 3 * cos(2pi * x)^2) function nodal_init_state_auxiliary!( ::PoissonModel{dim}, aux::Vars, tmp::Vars, g::LocalGeometry, ) where {dim} aux.rhs_ϕ = 0 @inbounds for d in 1:dim x1 = g.coord[d] x2 = g.coord[1 + mod(d, dim)] x3 = g.coord[1 + mod(d + 1, dim)] x23 = SVector(x2, x3) aux.rhs_ϕ -= dxx_sol1d(x1) * prod(sol1d, view(x23, 1:(dim - 1))) end end function init_state_prognostic!( ::PoissonModel{dim}, state::Vars, aux::Vars, localgeo, t, ) where {dim} coords = localgeo.coord state.ϕ = prod(sol1d, view(coords, 1:dim)) end function test_run( mpicomm, ArrayType, ::Type{FT}, dim, polynomialorder, brickrange, periodicity, linmethod, ) where {FT} topology = BrickTopology(mpicomm, brickrange, periodicity = periodicity) grid = DiscontinuousSpectralElementGrid( topology, polynomialorder = polynomialorder, DeviceArray = ArrayType, FloatType = FT, ) dg = DGModel( PoissonModel{dim}(), grid, CentralNumericalFluxFirstOrder(), PenaltyNumFluxDiffusive(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) Qrhs = dg.state_auxiliary Qexact = init_ode_state(dg, FT(0)) linearoperator!(y, x) = dg(y, x, nothing, 0; increment = false) linearsolver = linmethod(Q) iters = linearsolve!(linearoperator!, nothing, linearsolver, Q, Qrhs) error = euclidean_distance(Q, Qexact) @info @sprintf """Finished error = %.16e iters = %d """ error iters error, iters end let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD polynomialorder = 4 base_num_elem = 4 tol = 1e-9 linmethods = ( b -> GeneralizedConjugateResidual(3, b, rtol = tol), b -> GeneralizedMinimalResidual(b, M = 7, rtol = tol), ) expected_result = Array{Float64}(undef, 2, 2, 3) # method, dim-1, lvl # GeneralizedConjugateResidual expected_result[1, 1, 1] = 5.0540243616448058e-02 expected_result[1, 1, 2] = 1.4802275366044011e-03 expected_result[1, 1, 3] = 3.3852821775121401e-05 expected_result[1, 2, 1] = 1.4957957657736219e-02 expected_result[1, 2, 2] = 4.7282369781541172e-04 expected_result[1, 2, 3] = 1.4697449643351771e-05 # GeneralizedMinimalResidual expected_result[2, 1, 1] = 5.0540243587512981e-02 expected_result[2, 1, 2] = 1.4802275409186211e-03 expected_result[2, 1, 3] = 3.3852820667079927e-05 expected_result[2, 2, 1] = 1.4957957659220951e-02 expected_result[2, 2, 2] = 4.7282369895963614e-04 expected_result[2, 2, 3] = 1.4697449516628483e-05 lvls = integration_testing ? size(expected_result)[end] : 1 for (m, linmethod) in enumerate(linmethods), FT in (Float64,) result = Array{Tuple{FT, Int}}(undef, lvls) for dim in 2:3 for l in 1:lvls Ne = ntuple(d -> 2^(l - 1) * base_num_elem, dim) brickrange = ntuple(d -> range(FT(0), length = Ne[d], stop = 1), dim) periodicity = ntuple(d -> true, dim) @info (ArrayType, FT, m, dim) result[l] = test_run( mpicomm, ArrayType, FT, dim, polynomialorder, brickrange, periodicity, linmethod, ) @test isapprox( result[l][1], FT(expected_result[m, dim - 1, l]), rtol = sqrt(tol), ) end if integration_testing @info begin msg = "" for l in 1:(lvls - 1) rate = log2(result[l][1]) - log2(result[l + 1][1]) msg *= @sprintf("\n rate for level %d = %e\n", l, rate) end msg end end end end end nothing ================================================ FILE: test/Numerics/SystemSolvers/runtests.jl ================================================ using Test, MPI include(joinpath("..", "..", "testhelpers.jl")) include("iterativesolvers.jl") ================================================ FILE: test/Numerics/runtests.jl ================================================ using Test, Pkg @testset "Numerics" begin all_tests = isempty(ARGS) || "all" in ARGS ? true : false for submodule in ["SystemSolvers", "ODESolvers"] if all_tests || "$submodule" in ARGS || "Numerics/$submodule" in ARGS || "Numerics" in ARGS include_test(submodule) end end end ================================================ FILE: test/Ocean/HydrostaticBoussinesq/test_3D_spindown.jl ================================================ #!/usr/bin/env julia --project using ClimateMachine ClimateMachine.init(parse_clargs = true) using ClimateMachine.MPIStateArrays: euclidean_distance, norm using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.BalanceLaws: parameter_set using ClimateMachine.Mesh.Filters using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Grids: polynomialorders using ClimateMachine.Ocean.HydrostaticBoussinesq using ClimateMachine.Ocean.OceanProblems using Test using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() function config_simple_box( ::Type{FT}, N, resolution, dimensions; BC = nothing, ) where {FT} if BC == nothing problem = SimpleBox{FT}(dimensions...) else problem = SimpleBox{FT}(dimensions...; BC = BC) end model = HydrostaticBoussinesqModel{FT}( param_set, problem; cʰ = FT(1), αᵀ = FT(0), κʰ = FT(0), κᶻ = FT(0), fₒ = FT(0), β = FT(0), ) solver_type = ExplicitSolverType(solver_method = LSRK144NiegemannDiehlBusch) config = ClimateMachine.OceanBoxGCMConfiguration( "hydrostatic_spindown", N, resolution, param_set, model; periodicity = (true, true, false), boundary = ((0, 0), (0, 0), (1, 2)), ) return config, solver_type end function run_hydrostatic_test(; imex::Bool = false, BC = nothing, refDat = ()) FT = Float64 # DG polynomial order N = Int(4) # Domain resolution and size Nˣ = Int(5) Nʸ = Int(5) Nᶻ = Int(8) resolution = (Nˣ, Nʸ, Nᶻ) Lˣ = 1e6 # m Lʸ = 1e6 # m H = 400 # m dimensions = (Lˣ, Lʸ, H) timestart = FT(0) # s timeout = FT(6 * 3600) # s timeend = FT(86400) # s dt = FT(120) # s if imex solver_type = ClimateMachine.IMEXSolverType(implicit_model = LinearHBModel) else solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) end driver, solver_type_driver_config = config_simple_box(FT, N, resolution, dimensions; BC = BC) grid = driver.grid N = polynomialorders(grid) Nvert = N[end] vert_filter = CutoffFilter(grid, Nvert - 1) exp_filter = ExponentialFilter(grid, 1, 8) modeldata = (vert_filter = vert_filter, exp_filter = exp_filter) solver = ClimateMachine.SolverConfiguration( timestart, timeend, driver, init_on_cpu = true, ode_dt = dt, ode_solver_type = solver_type, modeldata = modeldata, ) output_interval = ceil(Int64, timeout / solver.dt) ClimateMachine.Settings.vtk = "never" # ClimateMachine.Settings.vtk = "$(output_interval)steps" ClimateMachine.Settings.diagnostics = "never" # ClimateMachine.Settings.diagnostics = "$(output_interval)steps" cb = ClimateMachine.StateCheck.sccreate( [(solver.Q, "state"), (solver.dg.state_auxiliary, "aux")], output_interval; prec = 12, ) result = ClimateMachine.invoke!(solver; user_callbacks = [cb]) Q_exact = ClimateMachine.DGMethods.init_ode_state( solver.dg, timeend, solver.init_args...; init_on_cpu = solver.init_on_cpu, ) error = euclidean_distance(solver.Q, Q_exact) / norm(Q_exact) println("error = ", error) @test isapprox(error, FT(0.0); atol = 0.005) ## Check results against reference ClimateMachine.StateCheck.scprintref(cb) if length(refDat) > 0 @test ClimateMachine.StateCheck.scdocheck(cb, refDat) end end @testset "$(@__FILE__)" begin include("../refvals/3D_hydrostatic_spindown_refvals.jl") run_hydrostatic_test(imex = false, refDat = refVals.explicit) # error = 0.0011289879366523504 end ================================================ FILE: test/Ocean/HydrostaticBoussinesq/test_initial_value_problem.jl ================================================ using ClimateMachine ClimateMachine.init() using ClimateMachine.Ocean.OceanProblems: InitialConditions, InitialValueProblem @testset "$(@__FILE__)" begin U = 0.1 L = 0.2 a = 0.3 U = 0.4 Ψ(x, L) = exp(-x^2 / 2 * L^2) # a Gaussian uᵢ(x, y, z) = +U * y / L * Ψ(x, L) vᵢ(x, y, z) = -U * x / L * Ψ(x, L) ηᵢ(x, y, z) = a * Ψ(x, L) θᵢ(x, y, z) = 20.0 + 1e-3 * z initial_conditions = InitialConditions(u = uᵢ, v = vᵢ, η = ηᵢ, θ = θᵢ) problem = InitialValueProblem{Float64}( dimensions = (π, 42, 1.1), initial_conditions = initial_conditions, ) @test problem.Lˣ == Float64(π) @test problem.Lʸ == 42.0 @test problem.H == 1.1 @test problem.initial_conditions.u === uᵢ @test problem.initial_conditions.v === vᵢ @test problem.initial_conditions.η === ηᵢ @test problem.initial_conditions.θ === θᵢ end ================================================ FILE: test/Ocean/HydrostaticBoussinesq/test_ocean_gyre_long.jl ================================================ #!/usr/bin/env julia --project include("../../../experiments/OceanBoxGCM/simple_box.jl") ClimateMachine.init() const FT = Float64 ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("../refvals/test_ocean_gyre_refvals.jl") # simulation time timestart = FT(0) # s timeend = FT(86400) # s timespan = (timestart, timeend) # DG polynomial order N = Int(4) # Domain resolution Nˣ = Int(20) Nʸ = Int(20) Nᶻ = Int(20) resolution = (N, Nˣ, Nʸ, Nᶻ) # Domain size Lˣ = 4e6 # m Lʸ = 4e6 # m H = 1000 # m dimensions = (Lˣ, Lʸ, H) BC = ( OceanBC(Impenetrable(NoSlip()), Insulating()), OceanBC(Impenetrable(NoSlip()), Insulating()), OceanBC(Penetrable(KinematicStress()), TemperatureFlux()), ) run_simple_box( "ocean_gyre_long", resolution, dimensions, timespan, OceanGyre, imex = false, BC = BC, refDat = refVals.long, ) end ================================================ FILE: test/Ocean/HydrostaticBoussinesq/test_ocean_gyre_short.jl ================================================ #!/usr/bin/env julia --project include("../../../experiments/OceanBoxGCM/simple_box.jl") ClimateMachine.init() const FT = Float64 ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("../refvals/test_ocean_gyre_refvals.jl") # simulation time timestart = FT(0) # s timeend = FT(3600) # s timespan = (timestart, timeend) # DG polynomial order N = Int(4) # Domain resolution Nˣ = Int(5) Nʸ = Int(5) Nᶻ = Int(5) resolution = (N, Nˣ, Nʸ, Nᶻ) # Domain size Lˣ = 1e6 # m Lʸ = 1e6 # m H = 1000 # m dimensions = (Lˣ, Lʸ, H) BC = ( OceanBC(Impenetrable(NoSlip()), Insulating()), OceanBC(Impenetrable(NoSlip()), Insulating()), OceanBC(Penetrable(KinematicStress()), TemperatureFlux()), ) run_simple_box( "ocean_gyre_short", resolution, dimensions, timespan, OceanGyre, imex = false, BC = BC, Δt = 120, refDat = refVals.short, ) end ================================================ FILE: test/Ocean/HydrostaticBoussinesq/test_windstress_long.jl ================================================ #!/usr/bin/env julia --project include("../../../experiments/OceanBoxGCM/simple_box.jl") ClimateMachine.init() const FT = Float64 ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("../refvals/test_windstress_refvals.jl") # simulation time timestart = FT(0) # s timeend = FT(86400) # s timespan = (timestart, timeend) # DG polynomial order N = Int(4) # Domain resolution Nˣ = Int(20) Nʸ = Int(20) Nᶻ = Int(50) resolution = (N, Nˣ, Nʸ, Nᶻ) # Domain size Lˣ = 4e6 # m Lʸ = 4e6 # m H = 400 # m dimensions = (Lˣ, Lʸ, H) BC = ( OceanBC(Impenetrable(NoSlip()), Insulating()), OceanBC(Impenetrable(FreeSlip()), Insulating()), OceanBC(Penetrable(KinematicStress()), Insulating()), ) run_simple_box( "test_windstress_long_imex", resolution, dimensions, timespan, HomogeneousBox, imex = true, BC = BC, Δt = 55, refDat = refVals.long, ) end ================================================ FILE: test/Ocean/HydrostaticBoussinesq/test_windstress_short.jl ================================================ #!/usr/bin/env julia --project include("../../../experiments/OceanBoxGCM/simple_box.jl") ClimateMachine.init() const FT = Float64 ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("../refvals/test_windstress_refvals.jl") # simulation time timestart = FT(0) # s timeend = FT(3600) # s timespan = (timestart, timeend) # DG polynomial order N = Int(4) # Domain resolution Nˣ = Int(5) Nʸ = Int(5) Nᶻ = Int(5) resolution = (N, Nˣ, Nʸ, Nᶻ) # Domain size Lˣ = 1e6 # m Lʸ = 1e6 # m H = 400 # m dimensions = (Lˣ, Lʸ, H) BC = ( OceanBC(Impenetrable(NoSlip()), Insulating()), OceanBC(Impenetrable(FreeSlip()), Insulating()), OceanBC(Penetrable(KinematicStress()), Insulating()), ) run_simple_box( "test_windstress_short_imex", resolution, dimensions, timespan, HomogeneousBox, imex = true, BC = BC, Δt = 60, refDat = refVals.imex, ) run_simple_box( "test_windstress_short_explicit", resolution, dimensions, timespan, HomogeneousBox, imex = false, BC = BC, Δt = 180, refDat = ClimateMachine.array_type() == Array ? refVals.explicit_cpu : refVals.explicit_gpu, ) end ================================================ FILE: test/Ocean/HydrostaticBoussinesqModel/test_hydrostatic_boussinesq_model.jl ================================================ using ClimateMachine ClimateMachine.init() using ClimateMachine.CartesianDomains using CLIMAParameters: AbstractEarthParameterSet struct DefaultParameters <: AbstractEarthParameterSet end using ClimateMachine.Ocean: HydrostaticBoussinesqSuperModel, current_time, current_step, Δt @testset "$(@__FILE__)" begin domain = RectangularDomain( Ne = (1, 1, 1), Np = 4, x = (-1, 1), y = (-1, 1), z = (-1, 0), ) model = HydrostaticBoussinesqSuperModel( domain = domain, time_step = 0.1, parameters = DefaultParameters(), ) @test model isa HydrostaticBoussinesqSuperModel @test Δt(model) == 0.1 @test current_step(model) == 0 @test current_time(model) == 0.0 end ================================================ FILE: test/Ocean/OceanProblems/test_initial_value_problem.jl ================================================ using ClimateMachine ClimateMachine.init() using ClimateMachine.Ocean.OceanProblems: InitialConditions, InitialValueProblem @testset "$(@__FILE__)" begin U = 0.1 L = 0.2 a = 0.3 U = 0.4 Ψ(x, L) = exp(-x^2 / 2 * L^2) # a Gaussian uᵢ(x, y, z) = +U * y / L * Ψ(x, L) vᵢ(x, y, z) = -U * x / L * Ψ(x, L) ηᵢ(x, y, z) = a * Ψ(x, L) θᵢ(x, y, z) = 20.0 + 1e-3 * z initial_conditions = InitialConditions(u = uᵢ, v = vᵢ, η = ηᵢ, θ = θᵢ) problem = InitialValueProblem{Float64}( dimensions = (π, 42, 1.1), initial_conditions = initial_conditions, ) @test problem.Lˣ == Float64(π) @test problem.Lʸ == 42.0 @test problem.H == 1.1 @test problem.initial_conditions.u === uᵢ @test problem.initial_conditions.v === vᵢ @test problem.initial_conditions.η === ηᵢ @test problem.initial_conditions.θ === θᵢ end ================================================ FILE: test/Ocean/ShallowWater/GyreDriver.jl ================================================ using MPI using Test using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.GenericCallbacks using ClimateMachine.VariableTemplates: flattenednames using ClimateMachine.BalanceLaws using ClimateMachine.Ocean.ShallowWater using ClimateMachine.Ocean.ShallowWater: TurbulenceClosure, LinearDrag, ConstantViscosity using ClimateMachine.Ocean using ClimateMachine.Ocean.OceanProblems using LinearAlgebra using StaticArrays using Logging, Printf, Dates using ClimateMachine.VTK using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() if !isempty(ARGS) stommel = Bool(parse(Int, ARGS[1])) linear = Bool(parse(Int, ARGS[2])) test = parse(Int, ARGS[3]) else stommel = true linear = true test = 1 end ################### # PARAM SELECTION # ################### const FT = Float64 const τₒ = 2e-4 # value includes τₒ, g, and ρ const fₒ = 1e-4 const β = 1e-11 const λ = 1e-6 const ν = 1e4 const Lˣ = 1e6 const Lʸ = 1e6 const timeend = 100 * 24 * 60 * 60 const H = 1000 const c = sqrt(grav(param_set) * H) if stommel gyre = "stommel" else gyre = "munk" end if linear advec = "linear" else advec = "nonlinear" end outname = "vtk_new_dt_" * gyre * "_" * advec function setup_model( ::Type{FT}, stommel, linear, τₒ, fₒ, β, γ, ν, Lˣ, Lʸ, H, ) where {FT} problem = HomogeneousBox{FT}( Lˣ, Lʸ, H, τₒ = τₒ, BC = (OceanBC(Impenetrable(FreeSlip()), Insulating()),), ) if stommel turbulence = LinearDrag{FT}(λ) else turbulence = ConstantViscosity{FT}(ν) end if linear advection = nothing else advection = NonLinearAdvectionTerm() end model = ShallowWaterModel{FT}( param_set, problem, turbulence, advection, c = c, fₒ = fₒ, β = β, ) end ######################### # Timestepping function # ######################### function test_run( mpicomm, topl, ArrayType, N, dt, ::Type{FT}, model, test, ) where {FT} grid = DiscontinuousSpectralElementGrid( topl, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) dg = DGModel( model, grid, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q = init_ode_state(dg, FT(0)) Qe = init_ode_state(dg, FT(timeend)) lsrk = LSRK144NiegemannDiehlBusch(dg, Q; dt = dt, t0 = 0) nt_freq = floor(Int, 1 // 10 * timeend / dt) cbcs_dg = ClimateMachine.StateCheck.sccreate( [(Q, "2D state")], nt_freq; prec = 12, ) cb = (cbcs_dg,) if test > 2 outprefix = @sprintf("ic_mpirank%04d_ic", MPI.Comm_rank(mpicomm)) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk(outprefix, Q, dg, statenames, dg.state_auxiliary, auxnames) outprefix = @sprintf("exact_mpirank%04d", MPI.Comm_rank(mpicomm)) statenames = flattenednames(vars_state(model, Prognostic(), eltype(Qe))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Qe))) writevtk(outprefix, Qe, dg, statenames, dg.state_auxiliary, auxnames) vtkstep = [0] vtkpath = outname mkpath(vtkpath) cbvtk = GenericCallbacks.EveryXSimulationSteps(1000) do outprefix = @sprintf( "%s/mpirank%04d_step%04d", vtkpath, MPI.Comm_rank(mpicomm), vtkstep[1] ) @debug "doing VTK output" outprefix statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxiliarynames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk( outprefix, Q, dg, statenames, dg.state_auxiliary, auxiliarynames, ) vtkstep[1] += 1 nothing end cb = (cb..., cbvtk) end solve!(Q, lsrk; timeend = timeend, callbacks = cb) error = euclidean_distance(Q, Qe) / norm(Qe) @info @sprintf( """Finished error = %.16e """, error ) return error end ################ # Start Driver # ################ let ClimateMachine.init() ArrayType = ClimateMachine.array_type() mpicomm = MPI.COMM_WORLD model = setup_model(FT, stommel, linear, τₒ, fₒ, β, λ, ν, Lˣ, Lʸ, H) if test == 1 cellsrange = 10:10 orderrange = 4:4 testval = 1.6068814534535144e-03 elseif test == 2 cellsrange = 5:5:10 orderrange = 3:4 elseif test > 2 cellsrange = 5:5:25 orderrange = 6:10 end errors = zeros(FT, length(cellsrange), length(orderrange)) for (i, Ne) in enumerate(cellsrange) brickrange = ( range(FT(0); length = Ne + 1, stop = Lˣ), range(FT(0); length = Ne + 1, stop = Lʸ), ) topl = BrickTopology( mpicomm, brickrange, periodicity = (false, false), boundary = ((1, 1), (1, 1)), ) for (j, N) in enumerate(orderrange) @info "running Ne $Ne and N $N with" dt = (Lˣ / c) / Ne / N^2 @info @sprintf("\n dt = %f", dt) errors[i, j] = test_run(mpicomm, topl, ArrayType, N, dt, FT, model, test) end end @test errors[end, end] ≈ testval #= msg = "" for i in length(cellsrange)-1 rate = log2(errors[i, end] - log2(errors[i+1, end])) msg *= @sprintf("\n rate for Ne %d = %e", cellsrange[i], rate) end @info msg msg = "" for j in length(orderrange)-1 rate = log2(errors[end, j] - log2(errors[end, j+1])) msg *= @sprintf("\n rate for N %d = %e", orderrange[j], rate) end @info msg =# end ================================================ FILE: test/Ocean/ShallowWater/test_2D_spindown.jl ================================================ #!/usr/bin/env julia --project using Test using ClimateMachine ClimateMachine.init() using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.Mesh.Filters using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Grids: polynomialorders using ClimateMachine.Ocean.ShallowWater using ClimateMachine.Ocean.OceanProblems using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.BalanceLaws: vars_state, Prognostic, Auxiliary using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.VTK using MPI using LinearAlgebra using StaticArrays using Logging, Printf, Dates using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() function run_hydrostatic_spindown(; refDat = ()) mpicomm = MPI.COMM_WORLD ArrayType = ClimateMachine.array_type() ll = uppercase(get(ENV, "JULIA_LOG_LEVEL", "INFO")) loglevel = ll == "DEBUG" ? Logging.Debug : ll == "WARN" ? Logging.Warn : ll == "ERROR" ? Logging.Error : Logging.Info logger_stream = MPI.Comm_rank(mpicomm) == 0 ? stderr : devnull global_logger(ConsoleLogger(logger_stream, loglevel)) brickrange_2D = (xrange, yrange) topl_2D = BrickTopology( mpicomm, brickrange_2D, periodicity = (true, true), boundary = ((0, 0), (0, 0)), ) grid_2D = DiscontinuousSpectralElementGrid( topl_2D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) problem = SimpleBox{FT}(Lˣ, Lʸ, H) model_2D = ShallowWaterModel{FT}( param_set, problem, ShallowWater.ConstantViscosity{FT}(5e3), nothing; c = FT(1), fₒ = FT(0), β = FT(0), ) dt_fast = 300 nout = ceil(Int64, tout / dt_fast) dt_fast = tout / nout dg_2D = DGModel( model_2D, grid_2D, CentralNumericalFluxFirstOrder(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_2D = init_ode_state(dg_2D, FT(0); init_on_cpu = true) lsrk_2D = LSRK54CarpenterKennedy(dg_2D, Q_2D, dt = dt_fast, t0 = 0) odesolver = lsrk_2D vtkstep = [0, 0] cbvector = make_callbacks( vtkpath, vtkstep, nout, mpicomm, odesolver, dg_2D, model_2D, Q_2D, ) eng0 = norm(Q_2D) @info @sprintf """Starting norm(Q₀) = %.16e ArrayType = %s""" eng0 ArrayType solve!(Q_2D, odesolver; timeend = timeend, callbacks = cbvector) Qe_2D = init_ode_state(dg_2D, timeend, init_on_cpu = true) error_2D = euclidean_distance(Q_2D, Qe_2D) / norm(Qe_2D) println("2D error = ", error_2D) @test isapprox(error_2D, FT(0.0); atol = 0.005) ## Check results against reference ClimateMachine.StateCheck.scprintref(cbvector[end]) if length(refDat) > 0 @test ClimateMachine.StateCheck.scdocheck(cbvector[end], refDat) end return nothing end function make_callbacks( vtkpath, vtkstep, nout, mpicomm, odesolver, dg_fast, model_fast, Q_fast, ) if isdir(vtkpath) rm(vtkpath, recursive = true) end mkpath(vtkpath) mkpath(vtkpath * "/fast") function do_output(span, vtkstep, model, dg, Q) outprefix = @sprintf( "%s/%s/mpirank%04d_step%04d", vtkpath, span, MPI.Comm_rank(mpicomm), vtkstep ) @info "doing VTK output" outprefix statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk(outprefix, Q, dg, statenames, dg.state_auxiliary, auxnames) end do_output("fast", vtkstep[2], model_fast, dg_fast, Q_fast) cbvtk_fast = GenericCallbacks.EveryXSimulationSteps(nout) do (init = false) do_output("fast", vtkstep[2], model_fast, dg_fast, Q_fast) vtkstep[2] += 1 nothing end starttime = Ref(now()) cbinfo = GenericCallbacks.EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q_fast) @info @sprintf( """Update simtime = %8.2f / %8.2f runtime = %s norm(Q) = %.16e""", ODESolvers.gettime(odesolver), timeend, Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end cbcs_dg = ClimateMachine.StateCheck.sccreate( [(Q_fast, "2D state")], nout; prec = 12, ) # don't write vtk during CI testing # return (cbvtk_fast, cbinfo, cbcs_dg) return (cbinfo, cbcs_dg) end ################# # RUN THE TESTS # ################# FT = Float64 vtkpath = abspath(joinpath( ClimateMachine.Settings.output_dir, "vtk_shallow_spindown", )) const timeend = FT(24 * 3600) # s const tout = FT(2 * 3600) # s # const timeend = 1200 # s # const tout = 600 # s const N = 4 const Nˣ = 5 const Nʸ = 5 const Lˣ = 1e6 # m const Lʸ = 1e6 # m const H = 400 # m xrange = range(FT(0); length = Nˣ + 1, stop = Lˣ) yrange = range(FT(0); length = Nʸ + 1, stop = Lʸ) @testset "$(@__FILE__)" begin include("../refvals/2D_hydrostatic_spindown_refvals.jl") run_hydrostatic_spindown(refDat = refVals.explicit) # error = 0.00011327920483879001 end ================================================ FILE: test/Ocean/SplitExplicit/hydrostatic_spindown.jl ================================================ include("split_explicit.jl") function SplitConfig( name, resolution, dimensions, coupling, rotation = Fixed(); boundary_conditions = ( OceanBC(Impenetrable(FreeSlip()), Insulating()), OceanBC(Penetrable(FreeSlip()), Insulating()), ), solver = SplitExplicitSolver, dt_slow = 90 * 60, ) mpicomm = MPI.COMM_WORLD ArrayType = ClimateMachine.array_type() N, Nˣ, Nʸ, Nᶻ = resolution Lˣ, Lʸ, H = dimensions xrange = range(FT(0); length = Nˣ + 1, stop = Lˣ) yrange = range(FT(0); length = Nʸ + 1, stop = Lʸ) zrange = range(FT(-H); length = Nᶻ + 1, stop = 0) brickrange_2D = (xrange, yrange) topl_2D = BrickTopology( mpicomm, brickrange_2D, periodicity = (true, true), boundary = ((0, 0), (0, 0)), ) grid_2D = DiscontinuousSpectralElementGrid( topl_2D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) brickrange_3D = (xrange, yrange, zrange) topl_3D = StackedBrickTopology( mpicomm, brickrange_3D; periodicity = (true, true, false), boundary = ((0, 0), (0, 0), (1, 2)), ) grid_3D = DiscontinuousSpectralElementGrid( topl_3D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) problem = SimpleBox{FT}( dimensions...; BC = boundary_conditions, rotation = rotation, ) dg_3D, dg_2D = setup_models( solver, problem, grid_3D, grid_2D, param_set, coupling, dt_slow, ) return SplitConfig(name, dg_3D, dg_2D, solver, mpicomm, ArrayType) end function setup_models( ::Type{SplitExplicitSolver}, problem, grid_3D, grid_2D, param_set, coupling, _, ) model_3D = HydrostaticBoussinesqModel{FT}( param_set, problem; coupling = coupling, cʰ = FT(1), αᵀ = FT(0), κʰ = FT(0), κᶻ = FT(0), ) model_2D = ShallowWaterModel{FT}( param_set, problem, ShallowWater.ConstantViscosity{FT}(model_3D.νʰ), nothing; coupling = coupling, c = FT(1), ) N = polynomialorders(grid_3D) Nvert = N[end] vert_filter = CutoffFilter(grid_3D, Nvert - 1) exp_filter = ExponentialFilter(grid_3D, 1, 8) integral_model = DGModel( VerticalIntegralModel(model_3D), grid_3D, CentralNumericalFluxFirstOrder(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) dg_2D = DGModel( model_2D, grid_2D, CentralNumericalFluxFirstOrder(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_2D = init_ode_state(dg_2D, FT(0); init_on_cpu = true) modeldata = ( dg_2D = dg_2D, Q_2D = Q_2D, vert_filter = vert_filter, exp_filter = exp_filter, integral_model = integral_model, ) dg_3D = DGModel( model_3D, grid_3D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); modeldata = modeldata, ) return dg_3D, dg_2D end function setup_models( ::Type{SplitExplicitLSRK2nSolver}, problem, grid_3D, grid_2D, param_set, _, dt_slow, ) add_fast_substeps = 2 numImplSteps = 5 numImplSteps > 0 ? ivdc_dt = dt_slow / FT(numImplSteps) : ivdc_dt = dt_slow model_3D = OceanModel{FT}( param_set, problem, cʰ = FT(1), αᵀ = FT(0), κʰ = FT(0), κᶻ = FT(0), add_fast_substeps = add_fast_substeps, numImplSteps = numImplSteps, ivdc_dt = ivdc_dt, ) model_2D = BarotropicModel(model_3D) dg_2D = DGModel( model_2D, grid_2D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_2D = init_ode_state(dg_2D, FT(0); init_on_cpu = true) dg_3D = OceanDGModel( model_3D, grid_3D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(); modeldata = (dg_2D, Q_2D), ) return dg_3D, dg_2D end ================================================ FILE: test/Ocean/SplitExplicit/simple_box_2dt.jl ================================================ #!/usr/bin/env julia --project using ClimateMachine ClimateMachine.init(parse_clargs = true) using ClimateMachine.BalanceLaws: vars_state, Prognostic, Auxiliary using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Filters using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates: flattenednames using ClimateMachine.Ocean.SplitExplicit01 using ClimateMachine.GenericCallbacks using ClimateMachine.VTK using ClimateMachine.Checkpoint using Test using MPI using LinearAlgebra using StaticArrays using Logging, Printf, Dates using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() import ClimateMachine.Ocean.SplitExplicit01: ocean_init_aux!, ocean_init_state!, ocean_boundary_state!, CoastlineFreeSlip, CoastlineNoSlip, OceanFloorFreeSlip, OceanFloorNoSlip, OceanSurfaceNoStressNoForcing, OceanSurfaceStressNoForcing, OceanSurfaceNoStressForcing, OceanSurfaceStressForcing import ClimateMachine.DGMethods: update_auxiliary_state!, update_auxiliary_state_gradient!, VerticalDirection # using GPUifyLoops const ArrayType = ClimateMachine.array_type() struct SimpleBox{T, BC} <: AbstractOceanProblem Lˣ::T Lʸ::T H::T τₒ::T λʳ::T θᴱ::T boundary_conditions::BC end function ocean_init_state!(p::SimpleBox, Q, A, localgeo, t) coords = localgeo.coord @inbounds y = coords[2] @inbounds z = coords[3] @inbounds H = p.H Q.u = @SVector [-0, -0] Q.η = -0 Q.θ = (5 + 4 * cos(y * π / p.Lʸ)) * (1 + z / H) return nothing end function ocean_init_aux!(m::OceanModel, p::SimpleBox, A, geom) FT = eltype(A) @inbounds A.y = geom.coord[2] # not sure if this is needed but getting weird intialization stuff A.w = -0 A.pkin = -0 A.wz0 = -0 A.u_d = @SVector [-0, -0] A.ΔGu = @SVector [-0, -0] return nothing end # A is Filled afer the state function ocean_init_aux!(m::BarotropicModel, P::SimpleBox, A, geom) @inbounds A.y = geom.coord[2] A.Gᵁ = @SVector [-0, -0] A.U_c = @SVector [-0, -0] A.η_c = -0 A.U_s = @SVector [-0, -0] A.η_s = -0 A.Δu = @SVector [-0, -0] A.η_diag = -0 A.Δη = -0 return nothing end function main(; restart = 0) mpicomm = MPI.COMM_WORLD ll = uppercase(get(ENV, "JULIA_LOG_LEVEL", "INFO")) loglevel = ll == "DEBUG" ? Logging.Debug : ll == "WARN" ? Logging.Warn : ll == "ERROR" ? Logging.Error : Logging.Info logger_stream = MPI.Comm_rank(mpicomm) == 0 ? stderr : devnull global_logger(ConsoleLogger(logger_stream, loglevel)) if restart == 0 && MPI.Comm_rank(mpicomm) == 0 && isdir(vtkpath) @info @sprintf("""Remove old dir: %s and make new one""", vtkpath) rm(vtkpath, recursive = true) end brickrange_2D = (xrange, yrange) topl_2D = BrickTopology(mpicomm, brickrange_2D, periodicity = (false, false)) grid_2D = DiscontinuousSpectralElementGrid( topl_2D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) brickrange_3D = (xrange, yrange, zrange) topl_3D = StackedBrickTopology( mpicomm, brickrange_3D; periodicity = (false, false, false), boundary = ((1, 1), (1, 1), (2, 3)), ) grid_3D = DiscontinuousSpectralElementGrid( topl_3D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) BC = ( ClimateMachine.Ocean.SplitExplicit01.CoastlineNoSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanFloorNoSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanSurfaceStressForcing(), ) prob = SimpleBox{FT, typeof(BC)}(Lˣ, Lʸ, H, τₒ, λʳ, θᴱ, BC) gravity::FT = grav(param_set) #- set model time-step: dt_fast = 240 dt_slow = 5400 # dt_fast = 300 # dt_slow = 300 if t_chkp > 0 n_chkp = ceil(Int64, t_chkp / dt_slow) dt_slow = t_chkp / n_chkp else n_chkp = ceil(Int64, runTime / dt_slow) dt_slow = runTime / n_chkp n_chkp = 0 end n_outp = t_outp > 0 ? floor(Int64, t_outp / dt_slow) : ceil(Int64, runTime / dt_slow) ivdc_dt = numImplSteps > 0 ? dt_slow / FT(numImplSteps) : dt_slow model = OceanModel{FT}( prob, grav = gravity, cʰ = cʰ, add_fast_substeps = add_fast_substeps, ) # model = OceanModel{FT}(prob, cʰ = cʰ, fₒ = FT(0), β = FT(0) ) # model = OceanModel{FT}(prob, cʰ = cʰ, νʰ = FT(1e3), νᶻ = FT(1e-3) ) # model = OceanModel{FT}(prob, cʰ = cʰ, νʰ = FT(0), fₒ = FT(0), β = FT(0) ) barotropicmodel = BarotropicModel(model) minΔx = min_node_distance(grid_3D, HorizontalDirection()) minΔz = min_node_distance(grid_3D, VerticalDirection()) #- 2 horiz directions gravity_max_dT = 1 / (2 * sqrt(gravity * H) / minΔx) # dt_fast = minimum([gravity_max_dT]) #- 2 horiz directions + harmonic visc or diffusion: 2^2 factor in CFL: viscous_max_dT = 1 / (2 * model.νʰ / minΔx^2 + model.νᶻ / minΔz^2) / 4 diffusive_max_dT = 1 / (2 * model.κʰ / minΔx^2 + model.κᶻ / minΔz^2) / 4 # dt_slow = minimum([diffusive_max_dT, viscous_max_dT]) @info @sprintf( """Update Gravity Max-dT = %.1f Timestep = %.1f""", gravity_max_dT, dt_fast ) @info @sprintf( """Update Viscous Max-dT = %.1f Diffusive Max-dT = %.1f Timestep = %.1f""", viscous_max_dT, diffusive_max_dT, dt_slow ) if restart > 0 direction = EveryDirection() Q_3D, _, t0 = read_checkpoint(vtkpath, "baroclinic", ArrayType, mpicomm, restart) Q_2D, _, _ = read_checkpoint(vtkpath, "barotropic", ArrayType, mpicomm, restart) dg = OceanDGModel( model, grid_3D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) barotropic_dg = DGModel( barotropicmodel, grid_2D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_3D = restart_ode_state(dg, Q_3D; init_on_cpu = true) Q_2D = restart_ode_state(barotropic_dg, Q_2D; init_on_cpu = true) else t0 = 0 dg = OceanDGModel( model, grid_3D, # CentralNumericalFluxFirstOrder(), RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) barotropic_dg = DGModel( barotropicmodel, grid_2D, # CentralNumericalFluxFirstOrder(), RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_3D = init_ode_state(dg, FT(0); init_on_cpu = true) Q_2D = init_ode_state(barotropic_dg, FT(0); init_on_cpu = true) end timeend = runTime + t0 lsrk_ocean = LSRK54CarpenterKennedy(dg, Q_3D, dt = dt_slow, t0 = t0) lsrk_barotropic = LSRK54CarpenterKennedy(barotropic_dg, Q_2D, dt = dt_fast, t0 = t0) odesolver = SplitExplicitLSRK2nSolver(lsrk_ocean, lsrk_barotropic) #-- Set up State Check call back for config state arrays, called every ntFrq_SC time steps cbcs_dg = ClimateMachine.StateCheck.sccreate( [ (Q_3D, "oce Q_3D"), (dg.state_auxiliary, "oce aux"), # (dg.diffstate,"oce diff",), # (lsrk_ocean.dQ,"oce_dQ",), # (dg.modeldata.tendency_dg.state_auxiliary,"tend Int aux",), # (dg.modeldata.conti3d_Q,"conti3d_Q",), (Q_2D, "baro Q_2D"), (barotropic_dg.state_auxiliary, "baro aux"), ], ntFrq_SC; prec = 12, ) # (barotropic_dg.diffstate,"baro diff",), # (lsrk_barotropic.dQ,"baro_dQ",) #-- cb_ntFrq = [n_outp, n_chkp] outp_nb = round(Int64, restart * n_chkp / n_outp) step = [outp_nb, outp_nb, restart + 1] cbvector = make_callbacks( vtkpath, step, cb_ntFrq, timeend, mpicomm, odesolver, dg, model, Q_3D, barotropic_dg, barotropicmodel, Q_2D, ) eng0 = norm(Q_3D) @info @sprintf """Starting norm(Q₀) = %.16e ArrayType = %s""" eng0 ArrayType # slow fast state tuple Qvec = (slow = Q_3D, fast = Q_2D) # solve!(Qvec, odesolver; timeend = timeend, callbacks = cbvector) cbv = (cbvector..., cbcs_dg) solve!(Qvec, odesolver; timeend = timeend, callbacks = cbv) ## Enable the code block below to print table for use in reference value code ## reference value code sits in a file named $(@__FILE__)_refvals.jl. It is hand ## edited using code generated by block below when reference values are updated. regenRefVals = false if regenRefVals ## Print state statistics in format for use as reference values println( "# SC ========== Test number ", 1, " reference values and precision match template. =======", ) println("# SC ========== $(@__FILE__) test reference values ======================================") ClimateMachine.StateCheck.scprintref(cbcs_dg) println("# SC ====================================================================================") end ## Check results against reference if present checkRefVals = true if checkRefVals include("../refvals/simple_box_2dt_refvals.jl") refDat = (refVals[1], refPrecs[1]) checkPass = ClimateMachine.StateCheck.scdocheck(cbcs_dg, refDat) checkPass ? checkRep = "Pass" : checkRep = "Fail" @info @sprintf("""Compare vs RefVals: %s""", checkRep) @test checkPass end return nothing end function make_callbacks( vtkpath, step, ntFrq, timeend, mpicomm, odesolver, dg_slow, model_slow, Q_slow, dg_fast, model_fast, Q_fast, ) n_outp = ntFrq[1] n_chkp = ntFrq[2] mkpath(vtkpath) mkpath(vtkpath * "/slow") mkpath(vtkpath * "/fast") function do_output(span, step, model, dg, Q) outprefix = @sprintf( "%s/%s/mpirank%04d_step%04d", vtkpath, span, MPI.Comm_rank(mpicomm), step ) @info "doing VTK output" outprefix statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk(outprefix, Q, dg, statenames, dg.state_auxiliary, auxnames) mycomm = Q.mpicomm ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 && MPI.Comm_size(mpicomm) > 1 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s/step%04d", vtkpath, span, step) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("mpirank%04d_step%04d", i - 1, step) end writepvtu( pvtuprefix, prefixes, (statenames..., auxnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end do_output("slow", step[1], model_slow, dg_slow, Q_slow) step[1] += 1 cbvtk_slow = GenericCallbacks.EveryXSimulationSteps(n_outp) do (init = false) do_output("slow", step[1], model_slow, dg_slow, Q_slow) step[1] += 1 nothing end do_output("fast", step[2], model_fast, dg_fast, Q_fast) step[2] += 1 cbvtk_fast = GenericCallbacks.EveryXSimulationSteps(n_outp) do (init = false) do_output("fast", step[2], model_fast, dg_fast, Q_fast) step[2] += 1 nothing end starttime = Ref(now()) cbinfo = GenericCallbacks.EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q_slow) @info @sprintf( """Update simtime = %8.2f / %8.2f runtime = %s norm(Q) = %.16e""", ODESolvers.gettime(odesolver), timeend, Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end if n_chkp > 0 # Note: write zeros instead of Aux vars (not needed to restart); would be # better just to write state vars (once write_checkpoint() can handle it) cb_checkpoint = GenericCallbacks.EveryXSimulationSteps(n_chkp) do write_checkpoint( Q_slow, zero(Q_slow), odesolver, vtkpath, "baroclinic", mpicomm, step[3], ) write_checkpoint( Q_fast, zero(Q_fast), odesolver, vtkpath, "barotropic", mpicomm, step[3], ) step[3] += 1 nothing end return (cbvtk_slow, cbvtk_fast, cbinfo, cb_checkpoint) else return (cbvtk_slow, cbvtk_fast, cbinfo) end end ################# # RUN THE TESTS # ################# FT = Float64 vtkpath = "vtk_split" const runTime = 5 * 24 * 3600 # s const t_outp = 24 * 3600 # s const t_chkp = runTime # s #const runTime = 6 * 3600 # s #const t_outp = 6 * 3600 # s #const t_chkp = 0 const ntFrq_SC = 1 # frequency (in time-step) for State-Check output const N = 4 const Nˣ = 20 const Nʸ = 20 const Nᶻ = 20 const Lˣ = 4e6 # m const Lʸ = 4e6 # m const H = 1000 # m xrange = range(FT(0); length = Nˣ + 1, stop = Lˣ) yrange = range(FT(0); length = Nʸ + 1, stop = Lʸ) zrange = range(FT(-H); length = Nᶻ + 1, stop = 0) #const cʰ = sqrt(gravity * H) const cʰ = 1 # typical of ocean internal-wave speed const cᶻ = 0 #- inverse ratio of additional fast time steps (for weighted average) # --> do 1/add more time-steps and average from: 1 - 1/add up to: 1 + 1/add # e.g., = 1 --> 100% more ; = 2 --> 50% more ; = 3 --> 33% more ... add_fast_substeps = 2 #- number of Implicit vertical-diffusion sub-time-steps within one model full time-step # default = 0 : disable implicit vertical diffusion numImplSteps = 0 const τₒ = 2e-1 # (Pa = N/m^2) const λʳ = 20 // 86400 # m/s #- since we are using old BC (with factor of 2), take only half: #const τₒ = 1e-1 #const λʳ = 10 // 86400 const θᴱ = 10 # deg.C @testset "$(@__FILE__)" begin main(restart = 0) end ================================================ FILE: test/Ocean/SplitExplicit/simple_box_ivd.jl ================================================ #!/usr/bin/env julia --project using ClimateMachine ClimateMachine.init(parse_clargs = true) using ClimateMachine.BalanceLaws: vars_state, Prognostic, Auxiliary using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Filters using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates: flattenednames using ClimateMachine.Ocean.SplitExplicit01 using ClimateMachine.GenericCallbacks using ClimateMachine.VTK using ClimateMachine.Checkpoint using Test using MPI using LinearAlgebra using StaticArrays using Logging, Printf, Dates using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() import ClimateMachine.Ocean.SplitExplicit01: ocean_init_aux!, ocean_init_state!, ocean_boundary_state!, CoastlineFreeSlip, CoastlineNoSlip, OceanFloorFreeSlip, OceanFloorNoSlip, OceanSurfaceNoStressNoForcing, OceanSurfaceStressNoForcing, OceanSurfaceNoStressForcing, OceanSurfaceStressForcing import ClimateMachine.DGMethods: update_auxiliary_state!, update_auxiliary_state_gradient!, VerticalDirection # using GPUifyLoops const ArrayType = ClimateMachine.array_type() struct SimpleBox{T, BC} <: AbstractOceanProblem Lˣ::T Lʸ::T H::T τₒ::T λʳ::T θᴱ::T boundary_conditions::BC end function ocean_init_state!(p::SimpleBox, Q, A, localgeo, t) coords = localgeo.coord @inbounds y = coords[2] @inbounds z = coords[3] @inbounds H = p.H Q.u = @SVector [-0, -0] Q.η = -0 Q.θ = (5 + 4 * cos(y * π / p.Lʸ)) * (1 + z / H) return nothing end function ocean_init_aux!(m::OceanModel, p::SimpleBox, A, geom) FT = eltype(A) @inbounds A.y = geom.coord[2] # not sure if this is needed but getting weird intialization stuff A.w = -0 A.pkin = -0 A.wz0 = -0 A.u_d = @SVector [-0, -0] A.ΔGu = @SVector [-0, -0] return nothing end # A is Filled afer the state function ocean_init_aux!(m::BarotropicModel, P::SimpleBox, A, geom) @inbounds A.y = geom.coord[2] A.Gᵁ = @SVector [-0, -0] A.U_c = @SVector [-0, -0] A.η_c = -0 A.U_s = @SVector [-0, -0] A.η_s = -0 A.Δu = @SVector [-0, -0] A.η_diag = -0 A.Δη = -0 return nothing end function main(; restart = 0) mpicomm = MPI.COMM_WORLD ll = uppercase(get(ENV, "JULIA_LOG_LEVEL", "INFO")) loglevel = ll == "DEBUG" ? Logging.Debug : ll == "WARN" ? Logging.Warn : ll == "ERROR" ? Logging.Error : Logging.Info logger_stream = MPI.Comm_rank(mpicomm) == 0 ? stderr : devnull global_logger(ConsoleLogger(logger_stream, loglevel)) if restart == 0 && MPI.Comm_rank(mpicomm) == 0 && isdir(vtkpath) @info @sprintf("""Remove old dir: %s and make new one""", vtkpath) rm(vtkpath, recursive = true) end brickrange_2D = (xrange, yrange) topl_2D = BrickTopology(mpicomm, brickrange_2D, periodicity = (false, false)) grid_2D = DiscontinuousSpectralElementGrid( topl_2D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) brickrange_3D = (xrange, yrange, zrange) topl_3D = StackedBrickTopology( mpicomm, brickrange_3D; periodicity = (false, false, false), boundary = ((1, 1), (1, 1), (2, 3)), ) grid_3D = DiscontinuousSpectralElementGrid( topl_3D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) BC = ( ClimateMachine.Ocean.SplitExplicit01.CoastlineNoSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanFloorNoSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanSurfaceStressForcing(), ) prob = SimpleBox{FT, typeof(BC)}(Lˣ, Lʸ, H, τₒ, λʳ, θᴱ, BC) gravity::FT = grav(param_set) #- set model time-step: dt_fast = 240 dt_slow = 5400 # dt_fast = 300 # dt_slow = 300 if t_chkp > 0 n_chkp = ceil(Int64, t_chkp / dt_slow) dt_slow = t_chkp / n_chkp else n_chkp = ceil(Int64, runTime / dt_slow) dt_slow = runTime / n_chkp n_chkp = 0 end n_outp = t_outp > 0 ? floor(Int64, t_outp / dt_slow) : ceil(Int64, runTime / dt_slow) ivdc_dt = numImplSteps > 0 ? dt_slow / FT(numImplSteps) : dt_slow model = OceanModel{FT}( prob, grav = gravity, cʰ = cʰ, add_fast_substeps = add_fast_substeps, numImplSteps = numImplSteps, ivdc_dt = ivdc_dt, κᶜ = FT(0.1), ) # model = OceanModel{FT}(prob, cʰ = cʰ, fₒ = FT(0), β = FT(0) ) # model = OceanModel{FT}(prob, cʰ = cʰ, νʰ = FT(1e3), νᶻ = FT(1e-3) ) # model = OceanModel{FT}(prob, cʰ = cʰ, νʰ = FT(0), fₒ = FT(0), β = FT(0) ) barotropicmodel = BarotropicModel(model) minΔx = min_node_distance(grid_3D, HorizontalDirection()) minΔz = min_node_distance(grid_3D, VerticalDirection()) #- 2 horiz directions gravity_max_dT = 1 / (2 * sqrt(gravity * H) / minΔx) # dt_fast = minimum([gravity_max_dT]) #- 2 horiz directions + harmonic visc or diffusion: 2^2 factor in CFL: viscous_max_dT = 1 / (2 * model.νʰ / minΔx^2 + model.νᶻ / minΔz^2) / 4 diffusive_max_dT = 1 / (2 * model.κʰ / minΔx^2 + model.κᶻ / minΔz^2) / 4 # dt_slow = minimum([diffusive_max_dT, viscous_max_dT]) @info @sprintf( """Update Gravity Max-dT = %.1f Timestep = %.1f""", gravity_max_dT, dt_fast ) @info @sprintf( """Update Viscous Max-dT = %.1f Diffusive Max-dT = %.1f Timestep = %.1f""", viscous_max_dT, diffusive_max_dT, dt_slow ) if restart > 0 direction = EveryDirection() Q_3D, _, t0 = read_checkpoint(vtkpath, "baroclinic", ArrayType, mpicomm, restart) Q_2D, _, _ = read_checkpoint(vtkpath, "barotropic", ArrayType, mpicomm, restart) dg = OceanDGModel( model, grid_3D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) barotropic_dg = DGModel( barotropicmodel, grid_2D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_3D = restart_ode_state(dg, Q_3D; init_on_cpu = true) Q_2D = restart_ode_state(barotropic_dg, Q_2D; init_on_cpu = true) else t0 = 0 dg = OceanDGModel( model, grid_3D, # CentralNumericalFluxFirstOrder(), RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) barotropic_dg = DGModel( barotropicmodel, grid_2D, # CentralNumericalFluxFirstOrder(), RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_3D = init_ode_state(dg, FT(0); init_on_cpu = true) Q_2D = init_ode_state(barotropic_dg, FT(0); init_on_cpu = true) end timeend = runTime + t0 lsrk_ocean = LSRK54CarpenterKennedy(dg, Q_3D, dt = dt_slow, t0 = t0) lsrk_barotropic = LSRK54CarpenterKennedy(barotropic_dg, Q_2D, dt = dt_fast, t0 = t0) odesolver = SplitExplicitLSRK2nSolver(lsrk_ocean, lsrk_barotropic) #-- Set up State Check call back for config state arrays, called every ntFrq_SC time steps cbcs_dg = ClimateMachine.StateCheck.sccreate( [ (Q_3D, "oce Q_3D"), (dg.state_auxiliary, "oce aux"), # (dg.diffstate,"oce diff",), # (lsrk_ocean.dQ,"oce_dQ",), # (dg.modeldata.tendency_dg.state_auxiliary,"tend Int aux",), # (dg.modeldata.conti3d_Q,"conti3d_Q",), (Q_2D, "baro Q_2D"), (barotropic_dg.state_auxiliary, "baro aux"), ], ntFrq_SC; prec = 12, ) # (barotropic_dg.diffstate,"baro diff",), # (lsrk_barotropic.dQ,"baro_dQ",) #-- cb_ntFrq = [n_outp, n_chkp] outp_nb = round(Int64, restart * n_chkp / n_outp) step = [outp_nb, outp_nb, restart + 1] cbvector = make_callbacks( vtkpath, step, cb_ntFrq, timeend, mpicomm, odesolver, dg, model, Q_3D, barotropic_dg, barotropicmodel, Q_2D, ) eng0 = norm(Q_3D) @info @sprintf """Starting norm(Q₀) = %.16e ArrayType = %s""" eng0 ArrayType # slow fast state tuple Qvec = (slow = Q_3D, fast = Q_2D) # solve!(Qvec, odesolver; timeend = timeend, callbacks = cbvector) cbv = (cbvector..., cbcs_dg) solve!(Qvec, odesolver; timeend = timeend, callbacks = cbv) ## Enable the code block below to print table for use in reference value code ## reference value code sits in a file named $(@__FILE__)_refvals.jl. It is hand ## edited using code generated by block below when reference values are updated. regenRefVals = false if regenRefVals ## Print state statistics in format for use as reference values println( "# SC ========== Test number ", 1, " reference values and precision match template. =======", ) println("# SC ========== $(@__FILE__) test reference values ======================================") ClimateMachine.StateCheck.scprintref(cbcs_dg) println("# SC ====================================================================================") end ## Check results against reference if present checkRefVals = true if checkRefVals include("../refvals/simple_box_ivd_refvals.jl") refDat = (refVals[1], refPrecs[1]) checkPass = ClimateMachine.StateCheck.scdocheck(cbcs_dg, refDat) checkPass ? checkRep = "Pass" : checkRep = "Fail" @info @sprintf("""Compare vs RefVals: %s""", checkRep) @test checkPass end return nothing end function make_callbacks( vtkpath, step, ntFrq, timeend, mpicomm, odesolver, dg_slow, model_slow, Q_slow, dg_fast, model_fast, Q_fast, ) n_outp = ntFrq[1] n_chkp = ntFrq[2] mkpath(vtkpath) mkpath(vtkpath * "/slow") mkpath(vtkpath * "/fast") function do_output(span, step, model, dg, Q) outprefix = @sprintf( "%s/%s/mpirank%04d_step%04d", vtkpath, span, MPI.Comm_rank(mpicomm), step ) @info "doing VTK output" outprefix statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk(outprefix, Q, dg, statenames, dg.state_auxiliary, auxnames) mycomm = Q.mpicomm ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 && MPI.Comm_size(mpicomm) > 1 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s/step%04d", vtkpath, span, step) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("mpirank%04d_step%04d", i - 1, step) end writepvtu( pvtuprefix, prefixes, (statenames..., auxnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end do_output("slow", step[1], model_slow, dg_slow, Q_slow) step[1] += 1 cbvtk_slow = GenericCallbacks.EveryXSimulationSteps(n_outp) do (init = false) do_output("slow", step[1], model_slow, dg_slow, Q_slow) step[1] += 1 nothing end do_output("fast", step[2], model_fast, dg_fast, Q_fast) step[2] += 1 cbvtk_fast = GenericCallbacks.EveryXSimulationSteps(n_outp) do (init = false) do_output("fast", step[2], model_fast, dg_fast, Q_fast) step[2] += 1 nothing end starttime = Ref(now()) cbinfo = GenericCallbacks.EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q_slow) @info @sprintf( """Update simtime = %8.2f / %8.2f runtime = %s norm(Q) = %.16e""", ODESolvers.gettime(odesolver), timeend, Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end if n_chkp > 0 # Note: write zeros instead of Aux vars (not needed to restart); would be # better just to write state vars (once write_checkpoint() can handle it) cb_checkpoint = GenericCallbacks.EveryXSimulationSteps(n_chkp) do write_checkpoint( Q_slow, zero(Q_slow), odesolver, vtkpath, "baroclinic", mpicomm, step[3], ) write_checkpoint( Q_fast, zero(Q_fast), odesolver, vtkpath, "barotropic", mpicomm, step[3], ) step[3] += 1 nothing end return (cbvtk_slow, cbvtk_fast, cbinfo, cb_checkpoint) # return (cbinfo, cb_checkpoint) else return (cbvtk_slow, cbvtk_fast, cbinfo) # return (cbinfo) end end ################# # RUN THE TESTS # ################# FT = Float64 vtkpath = "vtk_split" const runTime = 5 * 24 * 3600 # s const t_outp = 24 * 3600 # s const t_chkp = runTime # s #const runTime = 6 * 3600 # s #const t_outp = 6 * 3600 # s #const t_chkp = 0 const ntFrq_SC = 1 # frequency (in time-step) for State-Check output const N = 4 const Nˣ = 20 const Nʸ = 20 const Nᶻ = 20 const Lˣ = 4e6 # m const Lʸ = 4e6 # m const H = 1000 # m xrange = range(FT(0); length = Nˣ + 1, stop = Lˣ) yrange = range(FT(0); length = Nʸ + 1, stop = Lʸ) zrange = range(FT(-H); length = Nᶻ + 1, stop = 0) #const cʰ = sqrt(gravity * H) const cʰ = 1 # typical of ocean internal-wave speed const cᶻ = 0 #- inverse ratio of additional fast time steps (for weighted average) # --> do 1/add more time-steps and average from: 1 - 1/add up to: 1 + 1/add # e.g., = 1 --> 100% more ; = 2 --> 50% more ; = 3 --> 33% more ... add_fast_substeps = 2 #- number of Implicit vertical-diffusion sub-time-steps within one model full time-step # default = 0 : disable implicit vertical diffusion numImplSteps = 5 const τₒ = 2e-1 # (Pa = N/m^2) const λʳ = 20 // 86400 # m/s #- since we are using old BC (with factor of 2), take only half: #const τₒ = 1e-1 #const λʳ = 10 // 86400 const θᴱ = 10 # deg.C @testset "$(@__FILE__)" begin main(restart = 0) end ================================================ FILE: test/Ocean/SplitExplicit/simple_box_rk3.jl ================================================ #!/usr/bin/env julia --project using ClimateMachine ClimateMachine.init(parse_clargs = true) using ClimateMachine.BalanceLaws: vars_state, Prognostic, Auxiliary using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Filters using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates: flattenednames using ClimateMachine.Ocean.SplitExplicit01 using ClimateMachine.GenericCallbacks using ClimateMachine.VTK using ClimateMachine.Checkpoint using Test using MPI using LinearAlgebra using StaticArrays using Logging, Printf, Dates using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() import ClimateMachine.Ocean.SplitExplicit01: ocean_init_aux!, ocean_init_state!, ocean_boundary_state!, CoastlineFreeSlip, CoastlineNoSlip, OceanFloorFreeSlip, OceanFloorNoSlip, OceanSurfaceNoStressNoForcing, OceanSurfaceStressNoForcing, OceanSurfaceNoStressForcing, OceanSurfaceStressForcing import ClimateMachine.DGMethods: update_auxiliary_state!, update_auxiliary_state_gradient!, VerticalDirection # using GPUifyLoops const ArrayType = ClimateMachine.array_type() struct SimpleBox{T, BC} <: AbstractOceanProblem Lˣ::T Lʸ::T H::T τₒ::T λʳ::T θᴱ::T boundary_conditions::BC end function ocean_init_state!(p::SimpleBox, Q, A, localgeo, t) coords = localgeo.coord @inbounds y = coords[2] @inbounds z = coords[3] @inbounds H = p.H Q.u = @SVector [-0, -0] Q.η = -0 Q.θ = (5 + 4 * cos(y * π / p.Lʸ)) * (1 + z / H) return nothing end function ocean_init_aux!(m::OceanModel, p::SimpleBox, A, geom) FT = eltype(A) @inbounds A.y = geom.coord[2] # not sure if this is needed but getting weird intialization stuff A.w = -0 A.pkin = -0 A.wz0 = -0 A.u_d = @SVector [-0, -0] A.ΔGu = @SVector [-0, -0] return nothing end # A is Filled afer the state function ocean_init_aux!(m::BarotropicModel, P::SimpleBox, A, geom) @inbounds A.y = geom.coord[2] A.Gᵁ = @SVector [-0, -0] A.U_c = @SVector [-0, -0] A.η_c = -0 A.U_s = @SVector [-0, -0] A.η_s = -0 A.Δu = @SVector [-0, -0] A.η_diag = -0 A.Δη = -0 return nothing end function main(; restart = 0) mpicomm = MPI.COMM_WORLD ll = uppercase(get(ENV, "JULIA_LOG_LEVEL", "INFO")) loglevel = ll == "DEBUG" ? Logging.Debug : ll == "WARN" ? Logging.Warn : ll == "ERROR" ? Logging.Error : Logging.Info logger_stream = MPI.Comm_rank(mpicomm) == 0 ? stderr : devnull global_logger(ConsoleLogger(logger_stream, loglevel)) if restart == 0 && MPI.Comm_rank(mpicomm) == 0 && isdir(vtkpath) @info @sprintf("""Remove old dir: %s and make new one""", vtkpath) rm(vtkpath, recursive = true) end brickrange_2D = (xrange, yrange) topl_2D = BrickTopology(mpicomm, brickrange_2D, periodicity = (false, false)) grid_2D = DiscontinuousSpectralElementGrid( topl_2D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) brickrange_3D = (xrange, yrange, zrange) topl_3D = StackedBrickTopology( mpicomm, brickrange_3D; periodicity = (false, false, false), boundary = ((1, 1), (1, 1), (2, 3)), ) grid_3D = DiscontinuousSpectralElementGrid( topl_3D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) BC = ( ClimateMachine.Ocean.SplitExplicit01.CoastlineNoSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanFloorNoSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanSurfaceStressForcing(), ) prob = SimpleBox{FT, typeof(BC)}(Lˣ, Lʸ, H, τₒ, λʳ, θᴱ, BC) gravity::FT = grav(param_set) #- set model time-step: dt_fast = 120 dt_slow = 2400 # dt_fast = 240 # dt_slow = 5400 if t_chkp > 0 n_chkp = ceil(Int64, t_chkp / dt_slow) dt_slow = t_chkp / n_chkp else n_chkp = ceil(Int64, runTime / dt_slow) dt_slow = runTime / n_chkp n_chkp = 0 end n_outp = t_outp > 0 ? floor(Int64, t_outp / dt_slow) : ceil(Int64, runTime / dt_slow) ivdc_dt = numImplSteps > 0 ? dt_slow / FT(numImplSteps) : dt_slow model = OceanModel{FT}( prob, grav = gravity, cʰ = cʰ, add_fast_substeps = add_fast_substeps, numImplSteps = numImplSteps, ivdc_dt = ivdc_dt, κᶜ = FT(0.1), ) # model = OceanModel{FT}(prob, cʰ = cʰ, fₒ = FT(0), β = FT(0) ) # model = OceanModel{FT}(prob, cʰ = cʰ, νʰ = FT(1e3), νᶻ = FT(1e-3) ) # model = OceanModel{FT}(prob, cʰ = cʰ, νʰ = FT(0), fₒ = FT(0), β = FT(0) ) barotropicmodel = BarotropicModel(model) minΔx = min_node_distance(grid_3D, HorizontalDirection()) minΔz = min_node_distance(grid_3D, VerticalDirection()) #- 2 horiz directions gravity_max_dT = 1 / (2 * sqrt(gravity * H) / minΔx) # dt_fast = minimum([gravity_max_dT]) #- 2 horiz directions + harmonic visc or diffusion: 2^2 factor in CFL: viscous_max_dT = 1 / (2 * model.νʰ / minΔx^2 + model.νᶻ / minΔz^2) / 4 diffusive_max_dT = 1 / (2 * model.κʰ / minΔx^2 + model.κᶻ / minΔz^2) / 4 # dt_slow = minimum([diffusive_max_dT, viscous_max_dT]) @info @sprintf( """Update Gravity Max-dT = %.1f Timestep = %.1f""", gravity_max_dT, dt_fast ) @info @sprintf( """Update Viscous Max-dT = %.1f Diffusive Max-dT = %.1f Timestep = %.1f""", viscous_max_dT, diffusive_max_dT, dt_slow ) if restart > 0 direction = EveryDirection() Q_3D, _, t0 = read_checkpoint(vtkpath, "baroclinic", ArrayType, mpicomm, restart) Q_2D, _, _ = read_checkpoint(vtkpath, "barotropic", ArrayType, mpicomm, restart) dg = OceanDGModel( model, grid_3D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) barotropic_dg = DGModel( barotropicmodel, grid_2D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_3D = restart_ode_state(dg, Q_3D; init_on_cpu = true) Q_2D = restart_ode_state(barotropic_dg, Q_2D; init_on_cpu = true) else t0 = 0 dg = OceanDGModel( model, grid_3D, # CentralNumericalFluxFirstOrder(), RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) barotropic_dg = DGModel( barotropicmodel, grid_2D, # CentralNumericalFluxFirstOrder(), RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_3D = init_ode_state(dg, FT(0); init_on_cpu = true) Q_2D = init_ode_state(barotropic_dg, FT(0); init_on_cpu = true) end timeend = runTime + t0 lsrk_ocean = LS3NRK33Heuns(dg, Q_3D, dt = dt_slow, t0 = t0) lsrk_barotropic = LS3NRK33Heuns(barotropic_dg, Q_2D, dt = dt_fast, t0 = t0) odesolver = SplitExplicitLSRK3nSolver(lsrk_ocean, lsrk_barotropic) #-- Set up State Check call back for config state arrays, called every ntFrq_SC time steps cbcs_dg = ClimateMachine.StateCheck.sccreate( [ (Q_3D, "oce Q_3D"), (dg.state_auxiliary, "oce aux"), # (dg.diffstate,"oce diff",), # (lsrk_ocean.dQ,"oce_dQ",), # (dg.modeldata.tendency_dg.state_auxiliary,"tend Int aux",), # (dg.modeldata.conti3d_Q,"conti3d_Q",), (Q_2D, "baro Q_2D"), (barotropic_dg.state_auxiliary, "baro aux"), ], ntFrq_SC; prec = 12, ) # (barotropic_dg.diffstate,"baro diff",), # (lsrk_barotropic.dQ,"baro_dQ",) #-- cb_ntFrq = [n_outp, n_chkp] outp_nb = round(Int64, restart * n_chkp / n_outp) step = [outp_nb, outp_nb, restart + 1] cbvector = make_callbacks( vtkpath, step, cb_ntFrq, timeend, mpicomm, odesolver, dg, model, Q_3D, barotropic_dg, barotropicmodel, Q_2D, ) eng0 = norm(Q_3D) @info @sprintf """Starting norm(Q₀) = %.16e ArrayType = %s""" eng0 ArrayType # slow fast state tuple Qvec = (slow = Q_3D, fast = Q_2D) # solve!(Qvec, odesolver; timeend = timeend, callbacks = cbvector) cbv = (cbvector..., cbcs_dg) solve!(Qvec, odesolver; timeend = timeend, callbacks = cbv) ## Enable the code block below to print table for use in reference value code ## reference value code sits in a file named $(@__FILE__)_refvals.jl. It is hand ## edited using code generated by block below when reference values are updated. regenRefVals = false if regenRefVals ## Print state statistics in format for use as reference values println( "# SC ========== Test number ", 1, " reference values and precision match template. =======", ) println("# SC ========== $(@__FILE__) test reference values ======================================") ClimateMachine.StateCheck.scprintref(cbcs_dg) println("# SC ====================================================================================") end ## Check results against reference if present checkRefVals = true if checkRefVals include("../refvals/simple_box_rk3_refvals.jl") refDat = (refVals[1], refPrecs[1]) checkPass = ClimateMachine.StateCheck.scdocheck(cbcs_dg, refDat) checkPass ? checkRep = "Pass" : checkRep = "Fail" @info @sprintf("""Compare vs RefVals: %s""", checkRep) @test checkPass end return nothing end function make_callbacks( vtkpath, step, ntFrq, timeend, mpicomm, odesolver, dg_slow, model_slow, Q_slow, dg_fast, model_fast, Q_fast, ) n_outp = ntFrq[1] n_chkp = ntFrq[2] mkpath(vtkpath) mkpath(vtkpath * "/slow") mkpath(vtkpath * "/fast") function do_output(span, step, model, dg, Q) outprefix = @sprintf( "%s/%s/mpirank%04d_step%04d", vtkpath, span, MPI.Comm_rank(mpicomm), step ) @info "doing VTK output" outprefix statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk(outprefix, Q, dg, statenames, dg.state_auxiliary, auxnames) mycomm = Q.mpicomm ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 && MPI.Comm_size(mpicomm) > 1 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s/step%04d", vtkpath, span, step) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("mpirank%04d_step%04d", i - 1, step) end writepvtu( pvtuprefix, prefixes, (statenames..., auxnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end do_output("slow", step[1], model_slow, dg_slow, Q_slow) step[1] += 1 cbvtk_slow = GenericCallbacks.EveryXSimulationSteps(n_outp) do (init = false) do_output("slow", step[1], model_slow, dg_slow, Q_slow) step[1] += 1 nothing end do_output("fast", step[2], model_fast, dg_fast, Q_fast) step[2] += 1 cbvtk_fast = GenericCallbacks.EveryXSimulationSteps(n_outp) do (init = false) do_output("fast", step[2], model_fast, dg_fast, Q_fast) step[2] += 1 nothing end starttime = Ref(now()) cbinfo = GenericCallbacks.EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q_slow) @info @sprintf( """Update simtime = %8.2f / %8.2f runtime = %s norm(Q) = %.16e""", ODESolvers.gettime(odesolver), timeend, Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end if n_chkp > 0 # Note: write zeros instead of Aux vars (not needed to restart); would be # better just to write state vars (once write_checkpoint() can handle it) cb_checkpoint = GenericCallbacks.EveryXSimulationSteps(n_chkp) do write_checkpoint( Q_slow, zero(Q_slow), odesolver, vtkpath, "baroclinic", mpicomm, step[3], ) write_checkpoint( Q_fast, zero(Q_fast), odesolver, vtkpath, "barotropic", mpicomm, step[3], ) step[3] += 1 nothing end return (cbvtk_slow, cbvtk_fast, cbinfo, cb_checkpoint) else return (cbvtk_slow, cbvtk_fast, cbinfo) end end ################# # RUN THE TESTS # ################# FT = Float64 vtkpath = "vtk_split" const runTime = 3 * 24 * 3600 # s const t_outp = 24 * 3600 # s const t_chkp = runTime # s #const runTime = 6 * 3600 # s #const t_outp = 6 * 3600 # s #const t_chkp = 0 const ntFrq_SC = 1 # frequency (in time-step) for State-Check output const N = 4 const Nˣ = 20 const Nʸ = 20 const Nᶻ = 20 const Lˣ = 4e6 # m const Lʸ = 4e6 # m const H = 1000 # m xrange = range(FT(0); length = Nˣ + 1, stop = Lˣ) yrange = range(FT(0); length = Nʸ + 1, stop = Lʸ) zrange = range(FT(-H); length = Nᶻ + 1, stop = 0) #const cʰ = sqrt(gravity * H) const cʰ = 1 # typical of ocean internal-wave speed const cᶻ = 0 #- inverse ratio of additional fast time steps (for weighted average) # --> do 1/add more time-steps and average from: 1 - 1/add up to: 1 + 1/add # e.g., = 1 --> 100% more ; = 2 --> 50% more ; = 3 --> 33% more ... add_fast_substeps = 3 #- number of Implicit vertical-diffusion sub-time-steps within one model full time-step # default = 0 : disable implicit vertical diffusion numImplSteps = 5 const τₒ = 2e-1 # (Pa = N/m^2) const λʳ = 20 // 86400 # m/s #- since we are using old BC (with factor of 2), take only half: #const τₒ = 1e-1 #const λʳ = 10 // 86400 const θᴱ = 10 # deg.C @testset "$(@__FILE__)" begin main(restart = 0) end ================================================ FILE: test/Ocean/SplitExplicit/simple_dbl_gyre.jl ================================================ #!/usr/bin/env julia --project using ClimateMachine ClimateMachine.init(parse_clargs = true) using ClimateMachine.BalanceLaws: vars_state, Prognostic, Auxiliary using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Filters using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates: flattenednames using ClimateMachine.Ocean.SplitExplicit01 using ClimateMachine.GenericCallbacks using ClimateMachine.VTK using ClimateMachine.Checkpoint using Test using MPI using LinearAlgebra using StaticArrays using Logging, Printf, Dates using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() import ClimateMachine.Ocean.SplitExplicit01: ocean_init_aux!, ocean_init_state!, ocean_boundary_state!, CoastlineFreeSlip, CoastlineNoSlip, OceanFloorFreeSlip, OceanFloorNoSlip, OceanSurfaceNoStressNoForcing, OceanSurfaceStressNoForcing, OceanSurfaceNoStressForcing, OceanSurfaceStressForcing, velocity_flux, temperature_flux import ClimateMachine.DGMethods: update_auxiliary_state!, update_auxiliary_state_gradient!, VerticalDirection # using GPUifyLoops const ArrayType = ClimateMachine.array_type() struct DoubleGyreBox{T, BC} <: AbstractOceanProblem Lˣ::T Lʸ::T H::T τₒ::T λʳ::T θᴱ::T boundary_conditions::BC end @inline velocity_flux(p::DoubleGyreBox, y, ρ) = -(p.τₒ / ρ) * cos(2 * π * y / p.Lʸ) @inline function temperature_flux(p::DoubleGyreBox, y, θ) θʳ = p.θᴱ * (1 - y / p.Lʸ) return p.λʳ * (θʳ - θ) end function ocean_init_state!(p::DoubleGyreBox, Q, A, localgeo, t) coords = localgeo.coord @inbounds y = coords[2] @inbounds z = coords[3] @inbounds H = p.H Q.u = @SVector [-0, -0] Q.η = -0 Q.θ = (12 + 10 * cos(π * y / p.Lʸ)) * (1 + z / H) return nothing end function ocean_init_aux!(m::OceanModel, p::DoubleGyreBox, A, geom) FT = eltype(A) @inbounds A.y = geom.coord[2] # not sure if this is needed but getting weird intialization stuff A.w = -0 A.pkin = -0 A.wz0 = -0 A.u_d = @SVector [-0, -0] A.ΔGu = @SVector [-0, -0] return nothing end # A is Filled afer the state function ocean_init_aux!(m::BarotropicModel, P::DoubleGyreBox, A, geom) @inbounds A.y = geom.coord[2] A.Gᵁ = @SVector [-0, -0] A.U_c = @SVector [-0, -0] A.η_c = -0 A.U_s = @SVector [-0, -0] A.η_s = -0 A.Δu = @SVector [-0, -0] A.η_diag = -0 A.Δη = -0 return nothing end function main(; restart = 0) mpicomm = MPI.COMM_WORLD ll = uppercase(get(ENV, "JULIA_LOG_LEVEL", "INFO")) loglevel = ll == "DEBUG" ? Logging.Debug : ll == "WARN" ? Logging.Warn : ll == "ERROR" ? Logging.Error : Logging.Info logger_stream = MPI.Comm_rank(mpicomm) == 0 ? stderr : devnull global_logger(ConsoleLogger(logger_stream, loglevel)) if restart == 0 && MPI.Comm_rank(mpicomm) == 0 && isdir(vtkpath) @info @sprintf("""Remove old dir: %s and make new one""", vtkpath) rm(vtkpath, recursive = true) end brickrange_2D = (xrange, yrange) topl_2D = BrickTopology(mpicomm, brickrange_2D, periodicity = (false, false)) grid_2D = DiscontinuousSpectralElementGrid( topl_2D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) brickrange_3D = (xrange, yrange, zrange) topl_3D = StackedBrickTopology( mpicomm, brickrange_3D; periodicity = (false, false, false), boundary = ((1, 1), (1, 1), (2, 3)), ) grid_3D = DiscontinuousSpectralElementGrid( topl_3D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) BC = ( ClimateMachine.Ocean.SplitExplicit01.CoastlineNoSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanFloorNoSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanSurfaceStressForcing(), ) prob = DoubleGyreBox{FT, typeof(BC)}(Lˣ, Lʸ, H, τₒ, λʳ, θᴱ, BC) gravity::FT = grav(param_set) #- set model time-step: dt_fast = 96 dt_slow = 3456 if t_chkp > 0 n_chkp = ceil(Int64, t_chkp / dt_slow) dt_slow = t_chkp / n_chkp else n_chkp = ceil(Int64, runTime / dt_slow) dt_slow = runTime / n_chkp n_chkp = 0 end n_outp = t_outp > 0 ? floor(Int64, t_outp / dt_slow) : ceil(Int64, runTime / dt_slow) ivdc_dt = numImplSteps > 0 ? dt_slow / FT(numImplSteps) : dt_slow model = OceanModel{FT}( prob, grav = gravity, cʰ = cʰ, add_fast_substeps = add_fast_substeps, numImplSteps = numImplSteps, ivdc_dt = ivdc_dt, νʰ = FT(15e3), νᶻ = FT(5e-3), κᶜ = FT(1.0), fₒ = FT(3.8e-5), β = FT(1.7e-11), ) # model = OceanModel{FT}(prob, cʰ = cʰ, fₒ = FT(0), β = FT(0) ) # model = OceanModel{FT}(prob, cʰ = cʰ, νʰ = FT(1e3), νᶻ = FT(1e-3) ) # model = OceanModel{FT}(prob, cʰ = cʰ, νʰ = FT(0), fₒ = FT(0), β = FT(0) ) barotropicmodel = BarotropicModel(model) minΔx = min_node_distance(grid_3D, HorizontalDirection()) minΔz = min_node_distance(grid_3D, VerticalDirection()) #- 2 horiz directions gravity_max_dT = 1 / (2 * sqrt(gravity * H) / minΔx) # dt_fast = minimum([gravity_max_dT]) #- 2 horiz directions + harmonic visc or diffusion: 2^2 factor in CFL: viscous_max_dT = 1 / (2 * model.νʰ / minΔx^2 + model.νᶻ / minΔz^2) / 4 diffusive_max_dT = 1 / (2 * model.κʰ / minΔx^2 + model.κᶻ / minΔz^2) / 4 # dt_slow = minimum([diffusive_max_dT, viscous_max_dT]) @info @sprintf( """Update Gravity Max-dT = %.1f Timestep = %.1f""", gravity_max_dT, dt_fast ) @info @sprintf( """Update Viscous Max-dT = %.1f Diffusive Max-dT = %.1f Timestep = %.1f""", viscous_max_dT, diffusive_max_dT, dt_slow ) if restart > 0 direction = EveryDirection() Q_3D, _, t0 = read_checkpoint(vtkpath, "baroclinic", ArrayType, mpicomm, restart) Q_2D, _, _ = read_checkpoint(vtkpath, "barotropic", ArrayType, mpicomm, restart) dg = OceanDGModel( model, grid_3D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) barotropic_dg = DGModel( barotropicmodel, grid_2D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_3D = restart_ode_state(dg, Q_3D; init_on_cpu = true) Q_2D = restart_ode_state(barotropic_dg, Q_2D; init_on_cpu = true) else t0 = 0 dg = OceanDGModel( model, grid_3D, # CentralNumericalFluxFirstOrder(), RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) barotropic_dg = DGModel( barotropicmodel, grid_2D, # CentralNumericalFluxFirstOrder(), RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_3D = init_ode_state(dg, FT(0); init_on_cpu = true) Q_2D = init_ode_state(barotropic_dg, FT(0); init_on_cpu = true) end timeend = runTime + t0 lsrk_ocean = LS3NRK33Heuns(dg, Q_3D, dt = dt_slow, t0 = t0) lsrk_barotropic = LS3NRK33Heuns(barotropic_dg, Q_2D, dt = dt_fast, t0 = t0) odesolver = SplitExplicitLSRK3nSolver(lsrk_ocean, lsrk_barotropic) #-- Set up State Check call back for config state arrays, called every ntFrq_SC time steps cbcs_dg = ClimateMachine.StateCheck.sccreate( [ (Q_3D, "oce Q_3D"), (dg.state_auxiliary, "oce aux"), # (dg.diffstate,"oce diff",), # (lsrk_ocean.dQ,"oce_dQ",), # (dg.modeldata.tendency_dg.state_auxiliary,"tend Int aux",), # (dg.modeldata.conti3d_Q,"conti3d_Q",), (Q_2D, "baro Q_2D"), (barotropic_dg.state_auxiliary, "baro aux"), ], ntFrq_SC; prec = 12, ) # (barotropic_dg.diffstate,"baro diff",), # (lsrk_barotropic.dQ,"baro_dQ",) #-- cb_ntFrq = [n_outp, n_chkp] outp_nb = round(Int64, restart * n_chkp / n_outp) step = [outp_nb, outp_nb, restart + 1] cbvector = make_callbacks( vtkpath, step, cb_ntFrq, timeend, mpicomm, odesolver, dg, model, Q_3D, barotropic_dg, barotropicmodel, Q_2D, ) eng0 = norm(Q_3D) @info @sprintf """Starting norm(Q₀) = %.16e ArrayType = %s""" eng0 ArrayType # slow fast state tuple Qvec = (slow = Q_3D, fast = Q_2D) # solve!(Qvec, odesolver; timeend = timeend, callbacks = cbvector) cbv = (cbvector..., cbcs_dg) solve!(Qvec, odesolver; timeend = timeend, callbacks = cbv) ## Enable the code block below to print table for use in reference value code ## reference value code sits in a file named $(@__FILE__)_refvals.jl. It is hand ## edited using code generated by block below when reference values are updated. regenRefVals = false if regenRefVals ## Print state statistics in format for use as reference values println( "# SC ========== Test number ", 1, " reference values and precision match template. =======", ) println("# SC ========== $(@__FILE__) test reference values ======================================") ClimateMachine.StateCheck.scprintref(cbcs_dg) println("# SC ====================================================================================") end ## Check results against reference if present checkRefVals = true if checkRefVals include("../refvals/simple_dbl_gyre_refvals.jl") refDat = (refVals[1], refPrecs[1]) checkPass = ClimateMachine.StateCheck.scdocheck(cbcs_dg, refDat) checkPass ? checkRep = "Pass" : checkRep = "Fail" @info @sprintf("""Compare vs RefVals: %s""", checkRep) @test checkPass end return nothing end function make_callbacks( vtkpath, step, ntFrq, timeend, mpicomm, odesolver, dg_slow, model_slow, Q_slow, dg_fast, model_fast, Q_fast, ) n_outp = ntFrq[1] n_chkp = ntFrq[2] mkpath(vtkpath) mkpath(vtkpath * "/slow") mkpath(vtkpath * "/fast") function do_output(span, step, model, dg, Q) outprefix = @sprintf( "%s/%s/mpirank%04d_step%04d", vtkpath, span, MPI.Comm_rank(mpicomm), step ) @info "doing VTK output" outprefix statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk(outprefix, Q, dg, statenames, dg.state_auxiliary, auxnames) mycomm = Q.mpicomm ## Generate the pvtu file for these vtk files if MPI.Comm_rank(mpicomm) == 0 && MPI.Comm_size(mpicomm) > 1 ## name of the pvtu file pvtuprefix = @sprintf("%s/%s/step%04d", vtkpath, span, step) ## name of each of the ranks vtk files prefixes = ntuple(MPI.Comm_size(mpicomm)) do i @sprintf("mpirank%04d_step%04d", i - 1, step) end writepvtu( pvtuprefix, prefixes, (statenames..., auxnames...), eltype(Q), ) @info "Done writing VTK: $pvtuprefix" end end do_output("slow", step[1], model_slow, dg_slow, Q_slow) step[1] += 1 cbvtk_slow = GenericCallbacks.EveryXSimulationSteps(n_outp) do (init = false) do_output("slow", step[1], model_slow, dg_slow, Q_slow) step[1] += 1 nothing end do_output("fast", step[2], model_fast, dg_fast, Q_fast) step[2] += 1 cbvtk_fast = GenericCallbacks.EveryXSimulationSteps(n_outp) do (init = false) do_output("fast", step[2], model_fast, dg_fast, Q_fast) step[2] += 1 nothing end starttime = Ref(now()) cbinfo = GenericCallbacks.EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q_slow) @info @sprintf( """Update simtime = %8.2f / %8.2f runtime = %s norm(Q) = %.16e""", ODESolvers.gettime(odesolver), timeend, Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end if n_chkp > 0 # Note: write zeros instead of Aux vars (not needed to restart); would be # better just to write state vars (once write_checkpoint() can handle it) cb_checkpoint = GenericCallbacks.EveryXSimulationSteps(n_chkp) do write_checkpoint( Q_slow, zero(Q_slow), odesolver, vtkpath, "baroclinic", mpicomm, step[3], ) write_checkpoint( Q_fast, zero(Q_fast), odesolver, vtkpath, "barotropic", mpicomm, step[3], ) step[3] += 1 nothing end return (cbvtk_slow, cbvtk_fast, cbinfo, cb_checkpoint) else return (cbvtk_slow, cbvtk_fast, cbinfo) end end ################# # RUN THE TESTS # ################# FT = Float64 vtkpath = "vtk_split" const runTime = 3 * 24 * 3600 # s const t_outp = 24 * 3600 # s const t_chkp = runTime # s #const runTime = 6 * 3600 # s #const t_outp = 6 * 3600 # s #const t_chkp = 0 const ntFrq_SC = 1 # frequency (in time-step) for State-Check output const N = 4 const Nˣ = 20 const Nʸ = 30 const Nᶻ = 15 const Lˣ = 4e6 # m const Lʸ = 6e6 # m const H = 3000 # m xrange = range(FT(0); length = Nˣ + 1, stop = Lˣ) yrange = range(FT(0); length = Nʸ + 1, stop = Lʸ) zrange = range(FT(-H); length = Nᶻ + 1, stop = 0) # dz = [576, 540, 433, 339, 266, 208, 162, 128, 99, 75, 58, 43, 31, 22, 20] # zrange = zeros(FT, Nᶻ + 1) # zrange[2:(Nᶻ + 1)] .= cumsum(dz) # zrange .-= H #const cʰ = sqrt(gravity * H) const cʰ = 1 # typical of ocean internal-wave speed const cᶻ = 0 #- inverse ratio of additional fast time steps (for weighted average) # --> do 1/add more time-steps and average from: 1 - 1/add up to: 1 + 1/add # e.g., = 1 --> 100% more ; = 2 --> 50% more ; = 3 --> 33% more ... add_fast_substeps = 3 #- number of Implicit vertical-diffusion sub-time-steps within one model full time-step # default = 0 : disable implicit vertical diffusion numImplSteps = 5 const τₒ = 1e-1 # (Pa = N/m^2) const λʳ = 20 // 86400 # m/s #- since we are using old BC (with factor of 2), take only half: #const τₒ = 5e-2 #const λʳ = 10 // 86400 const θᴱ = 25 # deg.C @testset "$(@__FILE__)" begin main(restart = 0) end ================================================ FILE: test/Ocean/SplitExplicit/split_explicit.jl ================================================ #!/usr/bin/env julia --project using Test using ClimateMachine using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.Mesh.Filters using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Grids: polynomialorders using ClimateMachine.Ocean using ClimateMachine.Ocean.HydrostaticBoussinesq using ClimateMachine.Ocean.ShallowWater using ClimateMachine.Ocean.SplitExplicit: VerticalIntegralModel using ClimateMachine.Ocean.SplitExplicit01 using ClimateMachine.Ocean.OceanProblems using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.BalanceLaws: vars_state, Prognostic, Auxiliary using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.VTK using ClimateMachine.Checkpoint using ClimateMachine.SystemSolvers using MPI using LinearAlgebra using StaticArrays using Logging, Printf, Dates using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() struct SplitConfig{N, D3, D2, S, M, AT} name::N dg_3D::D3 dg_2D::D2 solver::S mpicomm::M ArrayType::AT end function run_split_explicit( config::SplitConfig, timespan; dt_fast = 300, dt_slow = 300, refDat = (), restart = 0, analytic_solution = false, ) Q_3D, Q_2D, t0 = init_states(config, Val(restart)) tout, timeend = timespan # @show dt_fast = floor(Int, 1 / (2 * sqrt(gravity * H) / minΔx)) # / 4 # @show dt_slow = floor(Int, minΔx / 15) # / 4 nout = ceil(Int64, tout / dt_slow) dt_slow = tout / nout timeendlocal = timeend + t0 lsrk_3D = LSRK54CarpenterKennedy(config.dg_3D, Q_3D, dt = dt_slow, t0 = t0) lsrk_2D = LSRK54CarpenterKennedy(config.dg_2D, Q_2D, dt = dt_fast, t0 = t0) odesolver = config.solver(lsrk_3D, lsrk_2D;) vtkstep = [restart, restart, restart + 1, restart + 1] cbvector = make_callbacks( abspath(joinpath(ClimateMachine.Settings.output_dir, config.name)), vtkstep, nout, config.mpicomm, odesolver, config.dg_3D, config.dg_3D.balance_law, Q_3D, config.dg_2D, config.dg_2D.balance_law, Q_2D, timeendlocal, ) eng0 = norm(Q_3D) @info @sprintf """Starting norm(Q₀) = %.16e ArrayType = %s""" eng0 config.ArrayType # slow fast state tuple Qvec = (slow = Q_3D, fast = Q_2D) solve!(Q_3D, odesolver; timeend = timeendlocal, callbacks = cbvector) if analytic_solution Qe_3D = init_ode_state(config.dg_3D, timeendlocal, init_on_cpu = true) Qe_2D = init_ode_state(config.dg_2D, timeendlocal, init_on_cpu = true) error_3D = euclidean_distance(Q_3D, Qe_3D) / norm(Qe_3D) error_2D = euclidean_distance(Q_2D, Qe_2D) / norm(Qe_2D) println("3D error = ", error_3D) println("2D error = ", error_2D) @test isapprox(error_3D, FT(0.0); atol = 0.005) @test isapprox(error_2D, FT(0.0); atol = 0.005) end ## Check results against reference ClimateMachine.StateCheck.scprintref(cbvector[end]) if length(refDat) > 0 @test ClimateMachine.StateCheck.scdocheck(cbvector[end], refDat) end return nothing end function init_states(config, ::Val{0}) Q_3D = init_ode_state(config.dg_3D, FT(0); init_on_cpu = true) Q_2D = config.dg_3D.modeldata.Q_2D return Q_3D, Q_2D, 0 end function init_states(config, ::Val{restart}) where {restart} Q_3D, A_3D, t0 = read_checkpoint( abspath(joinpath(ClimateMachine.Settings.output_dir, config.name)), "baroclinic", config.ArrayType, config.mpicomm, restart, ) Q_2D_restart, A_2D, _ = read_checkpoint( abspath(joinpath(ClimateMachine.Settings.output_dir, config.name)), "barotropic", config.ArrayType, config.mpicomm, restart, ) direction = EveryDirection() A_3D = restart_auxiliary_state( config.dg_3D.balance_law, config.dg_3D.grid, A_3D, direction, ) A_2D = restart_auxiliary_state( config.dg_2D.balance_law, config.dg_2D.grid, A_2D, direction, ) config.dg_3D.state_auxiliary .= A_3D config.dg_2D.state_auxiliary .= A_2D Q_3D = restart_ode_state(config.dg_3D, Q_3D; init_on_cpu = true) Q_2D = config.dg_3D.modeldata.Q_2D Q_2D .= Q_2D_restart return Q_3D, Q_2D, t0 end function make_callbacks( vtkpath, vtkstep, nout, mpicomm, odesolver, dg_slow, model_slow, Q_slow, dg_fast, model_fast, Q_fast, timeend, ) mkpath(vtkpath) mkpath(vtkpath * "/slow") mkpath(vtkpath * "/fast") A_slow = dg_slow.state_auxiliary A_fast = dg_fast.state_auxiliary function do_output(span, vtkstep, model, dg, Q, A) outprefix = @sprintf( "%s/%s/mpirank%04d_step%04d", vtkpath, span, MPI.Comm_rank(mpicomm), vtkstep ) @info "doing VTK output" outprefix statenames = flattenednames(vars_state(model, Prognostic(), eltype(Q))) auxnames = flattenednames(vars_state(model, Auxiliary(), eltype(Q))) writevtk(outprefix, Q, dg, statenames, A, auxnames) end do_output("slow", vtkstep[1], model_slow, dg_slow, Q_slow, A_slow) vtkstep[1] += 1 cbvtk_slow = GenericCallbacks.EveryXSimulationSteps(nout) do (init = false) do_output("slow", vtkstep[1], model_slow, dg_slow, Q_slow, A_slow) vtkstep[1] += 1 nothing end do_output("fast", vtkstep[2], model_fast, dg_fast, Q_fast, A_fast) vtkstep[2] += 1 cbvtk_fast = GenericCallbacks.EveryXSimulationSteps(nout) do (init = false) do_output("fast", vtkstep[2], model_fast, dg_fast, Q_fast, A_fast) vtkstep[2] += 1 nothing end starttime = Ref(now()) cbinfo = GenericCallbacks.EveryXWallTimeSeconds(60, mpicomm) do (s = false) if s starttime[] = now() else energy = norm(Q_slow) @info @sprintf( """Update simtime = %8.2f / %8.2f runtime = %s norm(Q) = %.16e""", ODESolvers.gettime(odesolver), timeend, Dates.format( convert(Dates.DateTime, Dates.now() - starttime[]), Dates.dateformat"HH:MM:SS", ), energy ) end end cbcs_dg = ClimateMachine.StateCheck.sccreate( [ (Q_slow, "3D state"), (A_slow, "3D aux"), (Q_fast, "2D state"), (A_fast, "2D aux"), ], nout; prec = 12, ) cb_checkpoint = GenericCallbacks.EveryXSimulationSteps(nout) do write_checkpoint( Q_slow, A_slow, odesolver, vtkpath, "baroclinic", mpicomm, vtkstep[3], ) write_checkpoint( Q_fast, A_fast, odesolver, vtkpath, "barotropic", mpicomm, vtkstep[4], ) rm_checkpoint(vtkpath, "baroclinic", mpicomm, vtkstep[3] - 1) rm_checkpoint(vtkpath, "barotropic", mpicomm, vtkstep[4] - 1) vtkstep[3] += 1 vtkstep[4] += 1 nothing end return (cbvtk_slow, cbvtk_fast, cbinfo, cb_checkpoint, cbcs_dg) end ================================================ FILE: test/Ocean/SplitExplicit/test_coriolis.jl ================================================ #!/usr/bin/env julia --project using Test include("hydrostatic_spindown.jl") ClimateMachine.init() const FT = Float64 ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("../refvals/hydrostatic_spindown_refvals.jl") # simulation time timeend = FT(15 * 24 * 3600) # s tout = FT(24 * 3600) # s timespan = (tout, timeend) # DG polynomial order N = Int(4) # Domain resolution Nˣ = Int(5) Nʸ = Int(5) Nᶻ = Int(8) resolution = (N, Nˣ, Nʸ, Nᶻ) # Domain size Lˣ = 1e6 # m Lʸ = 1e6 # m H = 400 # m dimensions = (Lˣ, Lʸ, H) BC = ( OceanBC(Impenetrable(FreeSlip()), Insulating()), OceanBC(Penetrable(FreeSlip()), Insulating()), ) config = SplitConfig( "rotating_bla", resolution, dimensions, Coupled(), Rotating(); solver = SplitExplicitSolver, boundary_conditions = BC, ) #= BC = ( ClimateMachine.Ocean.SplitExplicit01.OceanFloorFreeSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanSurfaceNoStressNoForcing(), ) config = SplitConfig( "rotating_jmc", resolution, dimensions, Coupled(), Rotating(); solver = SplitExplicitLSRK2nSolver, boundary_conditions = BC, ) =# run_split_explicit( config, timespan, dt_fast = 300, dt_slow = 300, # 90 * 60, # refDat = refVals.ninety_minutes, analytic_solution = true, ) end ================================================ FILE: test/Ocean/SplitExplicit/test_restart.jl ================================================ #!/usr/bin/env julia --project using Test include("hydrostatic_spindown.jl") ClimateMachine.init() const FT = Float64 ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("../refvals/hydrostatic_spindown_refvals.jl") # simulation time timeend = FT(24 * 3600) # s tout = FT(3 * 3600) # s timespan = (tout, timeend) # DG polynomial order N = Int(4) # Domain resolution Nˣ = Int(5) Nʸ = Int(5) Nᶻ = Int(8) resolution = (N, Nˣ, Nʸ, Nᶻ) # Domain size Lˣ = 1e6 # m Lʸ = 1e6 # m H = 400 # m dimensions = (Lˣ, Lʸ, H) config = SplitConfig("test_restart", resolution, dimensions, Coupled()) midpoint = timeend / 2 timespan = (tout, midpoint) run_split_explicit(config, timespan; dt_slow = 90 * 60) run_split_explicit( config, timespan; dt_slow = 90 * 60, refDat = refVals.ninety_minutes, analytic_solution = true, restart = Int(midpoint / tout), ) end ================================================ FILE: test/Ocean/SplitExplicit/test_simple_box.jl ================================================ include("../../../experiments/OceanSplitExplicit/simple_box.jl") ClimateMachine.init() # Float type const FT = Float64 ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("../refvals/simple_box_ivd_refvals.jl") refDat = (refVals[1], refPrecs[1]) # simulation time timestart = FT(0) # s timeend = FT(5 * 86400) # s timespan = (timestart, timeend) # DG polynomial order N = Int(4) # Domain resolution Nˣ = Int(20) Nʸ = Int(20) Nᶻ = Int(20) resolution = (N, Nˣ, Nʸ, Nᶻ) # Domain size Lˣ = 4e6 # m Lʸ = 4e6 # m H = 1000 # m dimensions = (Lˣ, Lʸ, H) BC = ( ClimateMachine.Ocean.SplitExplicit01.CoastlineNoSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanFloorNoSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanSurfaceStressForcing(), ) config, solver_type = config_simple_box( "test_simple_box", resolution, dimensions, BC; dt_slow = FT(90 * 60), dt_fast = FT(240), ) run_simple_box(config, timespan, solver_type.dt_slow; refDat = refDat) end ================================================ FILE: test/Ocean/SplitExplicit/test_spindown_long.jl ================================================ #!/usr/bin/env julia --project using Test include("hydrostatic_spindown.jl") ClimateMachine.init() const FT = Float64 ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("../refvals/hydrostatic_spindown_refvals.jl") # simulation time timeend = FT(24 * 3600) # s tout = FT(3 * 3600) # s timespan = (tout, timeend) # DG polynomial order N = Int(4) # Domain resolution Nˣ = Int(5) Nʸ = Int(5) Nᶻ = Int(8) resolution = (N, Nˣ, Nʸ, Nᶻ) # Domain size Lˣ = 1e6 # m Lʸ = 1e6 # m H = 400 # m dimensions = (Lˣ, Lʸ, H) @testset "Single-Rate" begin @testset "Not Coupled" begin config = SplitConfig("uncoupled", resolution, dimensions, Uncoupled()) run_split_explicit( config, timespan, refDat = refVals.uncoupled, analytic_solution = true, ) end @testset "Fully Coupled" begin config = SplitConfig("coupled", resolution, dimensions, Coupled()) run_split_explicit( config, timespan, refDat = refVals.coupled, analytic_solution = true, ) end end @testset "Multi-rate" begin @testset "Δt = 30 mins" begin config = SplitConfig("multirate", resolution, dimensions, Coupled()) run_split_explicit( config, timespan, dt_slow = 30 * 60, refDat = refVals.thirty_minutes, analytic_solution = true, ) end @testset "Δt = 60 mins" begin config = SplitConfig("multirate", resolution, dimensions, Coupled()) run_split_explicit( config, timespan, dt_slow = 60 * 60, refDat = refVals.sixty_minutes, analytic_solution = true, ) end @testset "Δt = 90 mins" begin config = SplitConfig("multirate", resolution, dimensions, Coupled()) run_split_explicit( config, timespan, dt_slow = 90 * 60, refDat = refVals.ninety_minutes, analytic_solution = true, ) end end end ================================================ FILE: test/Ocean/SplitExplicit/test_spindown_short.jl ================================================ #!/usr/bin/env julia --project include("hydrostatic_spindown.jl") ClimateMachine.init() const FT = Float64 ################# # RUN THE TESTS # ################# @testset "$(@__FILE__)" begin include("../refvals/hydrostatic_spindown_refvals.jl") # simulation time timeend = FT(24 * 3600) # s tout = FT(1.5 * 3600) # s timespan = (tout, timeend) # DG polynomial order N = Int(4) # Domain resolution Nˣ = Int(5) Nʸ = Int(5) Nᶻ = Int(8) resolution = (N, Nˣ, Nʸ, Nᶻ) # Domain size Lˣ = 1e6 # m Lʸ = 1e6 # m H = 400 # m dimensions = (Lˣ, Lʸ, H) BC = ( OceanBC(Impenetrable(FreeSlip()), Insulating()), OceanBC(Penetrable(FreeSlip()), Insulating()), ) config = SplitConfig( "spindown_bla", resolution, dimensions, Coupled(); solver = SplitExplicitSolver, boundary_conditions = BC, ) #= BC = ( ClimateMachine.Ocean.SplitExplicit01.OceanFloorFreeSlip(), ClimateMachine.Ocean.SplitExplicit01.OceanSurfaceNoStressNoForcing(), ) config = SplitConfig( "spindown_jmc", resolution, dimensions, Coupled(); solver = SplitExplicitLSRK2nSolver, boundary_conditions = BC, ) =# run_split_explicit( config, timespan; dt_fast = 300, # seconds dt_slow = 90 * 60, # seconds # refDat = refVals.ninety_minutes, analytic_solution = true, ) end ================================================ FILE: test/Ocean/SplitExplicit/test_vertical_integral_model.jl ================================================ #!/usr/bin/env julia --project using Test using ClimateMachine ClimateMachine.init() using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.Mesh.Filters using ClimateMachine.VariableTemplates using ClimateMachine.Mesh.Grids: polynomialorders using ClimateMachine.Ocean.HydrostaticBoussinesq using ClimateMachine.Ocean.ShallowWater using ClimateMachine.Ocean.SplitExplicit: VerticalIntegralModel using ClimateMachine.Ocean.OceanProblems using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.BalanceLaws using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.MPIStateArrays using ClimateMachine.VTK using MPI using LinearAlgebra using StaticArrays using Logging, Printf, Dates using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() function test_vertical_integral_model(::Type{FT}, time; refDat = ()) where {FT} mpicomm = MPI.COMM_WORLD ArrayType = ClimateMachine.array_type() brickrange_2D = (xrange, yrange) topl_2D = BrickTopology( mpicomm, brickrange_2D, periodicity = (true, true), boundary = ((0, 0), (0, 0)), ) grid_2D = DiscontinuousSpectralElementGrid( topl_2D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) brickrange_3D = (xrange, yrange, zrange) topl_3D = StackedBrickTopology( mpicomm, brickrange_3D; periodicity = (true, true, false), boundary = ((0, 0), (0, 0), (1, 2)), ) grid_3D = DiscontinuousSpectralElementGrid( topl_3D, FloatType = FT, DeviceArray = ArrayType, polynomialorder = N, ) problem = SimpleBox{FT}(Lˣ, Lʸ, H) model_3D = HydrostaticBoussinesqModel{FT}( param_set, problem; cʰ = FT(1), αᵀ = FT(0), κʰ = FT(0), κᶻ = FT(0), fₒ = FT(0), β = FT(0), ) model_2D = ShallowWaterModel{FT}( param_set, problem, ShallowWater.ConstantViscosity{FT}(model_3D.νʰ), nothing; c = FT(1), fₒ = FT(0), β = FT(0), ) integral_bl = VerticalIntegralModel(model_3D) integral_model = DGModel( integral_bl, grid_3D, CentralNumericalFluxFirstOrder(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) dg_3D = DGModel( model_3D, grid_3D, RusanovNumericalFlux(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) dg_2D = DGModel( model_2D, grid_2D, CentralNumericalFluxFirstOrder(), CentralNumericalFluxSecondOrder(), CentralNumericalFluxGradient(), ) Q_3D = init_ode_state(dg_3D, FT(time); init_on_cpu = true) Q_2D = init_ode_state(dg_2D, FT(time); init_on_cpu = true) Q_int = integral_model.state_auxiliary state_check = ClimateMachine.StateCheck.sccreate( [ (Q_int, "∫u") (Q_2D, "U") ], 1; prec = 12, ) update_auxiliary_state!(integral_model, integral_bl, Q_3D, time) GenericCallbacks.call!(state_check, nothing, nothing, nothing, nothing) ClimateMachine.StateCheck.scprintref(state_check) if length(refDat) > 0 @test ClimateMachine.StateCheck.scdocheck(state_check, refDat) end return nothing end ################# # RUN THE TESTS # ################# const FT = Float64 const N = 4 const Nˣ = 5 const Nʸ = 5 const Nᶻ = 8 const Lˣ = 1e6 # m const Lʸ = 1e6 # m const H = 400 # m xrange = range(FT(0); length = Nˣ + 1, stop = Lˣ) yrange = range(FT(0); length = Nʸ + 1, stop = Lʸ) zrange = range(FT(-H); length = Nᶻ + 1, stop = 0) const cʰ = 1 # typical of ocean internal-wave speed const cᶻ = 0 @testset "$(@__FILE__)" begin include("../refvals/test_vertical_integral_model_refvals.jl") times = [0, 86400, 30 * 86400, 365 * 86400, 10 * 365 * 86400, 100 * 365 * 86400] for (index, time) in enumerate(times) @testset "$(time)" begin test_vertical_integral_model(FT, time, refDat = refVals[index]) end end end ================================================ FILE: test/Ocean/refvals/2D_hydrostatic_spindown_refvals.jl ================================================ # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] parr = [ ["2D state", "η", 12, 12, 0, 12], ["2D state", "U[1]", 12, 12, 0, 12], ["2D state", "U[2]", 0, 0, 0, 0], ] explicit = [ [ "2D state", "η", -8.52722969951589915283e-01, 8.52846676313531282254e-01, -2.49578135935735214742e-16, 6.03454239990563690021e-01, ], [ "2D state", "U[1]", -3.15431401945821825450e+01, 3.15431401945818628008e+01, 6.11504145930918957291e-15, 2.24273815174625497093e+01, ], [ "2D state", "U[2]", -7.62224398365580242501e-13, 9.72156930292624284356e-13, 1.39269607441935025982e-14, 1.95606703846656748360e-13, ], ] refVals = (explicit = (explicit, parr),) ================================================ FILE: test/Ocean/refvals/3D_hydrostatic_spindown_refvals.jl ================================================ # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] parr = [ ["Q", "u[1]", 12, 12, 0, 12], ["Q", "u[2]", 0, 0, 0, 0], ["Q", "η", 12, 12, 0, 12], ["Q", "θ", 15, 15, 15, 15], ["s_aux", "y", 15, 15, 15, 15], ["s_aux", "w", 12, 12, 0, 12], ["s_aux", "pkin", 15, 15, 15, 15], ["s_aux", "wz0", 12, 12, 0, 12], ["aux", "uᵈ[1]", 15, 15, 15, 15], ["aux", "uᵈ[2]", 15, 15, 15, 15], ["aux", "ΔGᵘ[1]", 15, 15, 15, 15], ["aux", "ΔGᵘ[2]", 15, 15, 15, 15], ] ### fully explicit explicit = [ [ "state", "u[1]", -9.58544066049463849843e-01, 9.58544066049465071089e-01, -6.13908923696726568442e-17, 4.45400263687296238402e-01, ], [ "state", "u[2]", -4.33260855197780914020e-14, 1.99397359064900580713e-14, -1.30343101521973575132e-16, 2.95525689323845820606e-15, ], [ "state", "η", -8.52732886154656810618e-01, 8.52845586939211197652e-01, 2.20052243093959998331e-14, 6.02992088522925295813e-01, ], [ "state", "θ", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "aux", "w", -4.04553460063758398447e-04, 4.04714358463272711169e-04, 4.75730566051879549438e-19, 1.63958655681888576441e-04, ], [ "aux", "pkin", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "aux", "wz0", -2.01164684799271339293e-04, 2.01041968159484089494e-04, -2.10942374678779754294e-20, 1.42228420244455277133e-04, ], [ "aux", "uᵈ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "aux", "uᵈ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "aux", "ΔGᵘ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "aux", "ΔGᵘ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], ] imex = [ [ "state", "u[1]", -9.57705359685807500192e-01, 9.57705359685806500991e-01, 4.54747350886464140750e-17, 4.45323727947873004851e-01, ], [ "state", "u[2]", -4.07596731389788922865e-14, 2.53992382675900717845e-14, 9.88087105439151860723e-18, 2.63742927063789745855e-15, ], [ "state", "η", -8.55941716778505390373e-01, 8.56014908798837681481e-01, 2.16732587432488803528e-14, 6.05260814296588400829e-01, ], [ "state", "θ", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "aux", "w", -4.03419301824637895407e-04, 4.03626959596159180614e-04, -3.33066907387546970565e-21, 1.63816237485524198066e-04, ], [ "aux", "pkin", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "aux", "wz0", -1.97016630922629948884e-04, 1.97066086934914155761e-04, -7.49400541621980656312e-19, 1.39393318574472925390e-04, ], [ "aux", "uᵈ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "aux", "uᵈ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "aux", "ΔGᵘ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "aux", "ΔGᵘ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], ] refVals = (explicit = (explicit, parr), imex = (imex, parr)) ================================================ FILE: test/Ocean/refvals/hydrostatic_spindown_refvals.jl ================================================ # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] parr = [ ["3D state", "u[1]", 12, 12, 0, 12], ["3D state", "u[2]", 0, 0, 0, 0], ["3D state", "η", 12, 12, 0, 12], ["3D state", "θ", 15, 15, 15, 15], ["3D aux", "y", 15, 15, 15, 15], ["3D aux", "w", 12, 12, 0, 12], ["3D aux", "pkin", 15, 15, 15, 15], ["3D aux", "wz0", 12, 12, 0, 12], ["3D aux", "uᵈ[1]", 12, 12, 0, 12], ["3D aux", "uᵈ[2]", 0, 0, 0, 0], ["3D aux", "ΔGᵘ[1]", 11, 11, 0, 12], ["3D aux", "ΔGᵘ[2]", 0, 0, 0, 0], ["2D state", "η", 12, 12, 0, 12], ["2D state", "U[1]", 12, 12, 0, 12], ["2D state", "U[2]", 0, 0, 0, 0], ["2D aux", "y", 15, 15, 15, 2], ["2D aux", "Gᵁ[1]", 11, 11, 0, 12], ["2D aux", "Gᵁ[2]", 0, 0, 0, 0], ["2D aux", "Δu[1]", 12, 12, 0, 12], ["2D aux", "Δu[2]", 0, 0, 0, 0], ] uncoupled = [ [ "3D state", "u[1]", -9.58547428246830479637e-01, 9.58547428246829258391e-01, -6.82121026329696195717e-18, 4.45400655325317695876e-01, ], [ "3D state", "u[2]", -4.13574641907576004779e-14, 1.84325494853789287544e-14, 1.94567057477529826177e-17, 2.12815907156924710074e-15, ], [ "3D state", "η", -8.52721734395889496838e-01, 8.52834259382379111791e-01, 2.18869899981655176688e-14, 6.02983573168680453414e-01, ], [ "3D state", "θ", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "3D aux", "w", -4.04489222828922911912e-04, 4.04649979048336849198e-04, 7.79376563286859913379e-19, 1.63949585575251901345e-04, ], [ "3D aux", "pkin", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "wz0", -2.00933099660541601228e-04, 2.00809859469744893647e-04, 2.62012633811536956698e-19, 1.42064682697776552755e-04, ], [ "3D aux", "uᵈ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "uᵈ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "ΔGᵘ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "ΔGᵘ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "2D state", "η", -8.52715894965927923010e-01, 8.52861218874714666072e-01, -7.70938868299708696148e-16, 6.03458382295490314284e-01, ], [ "2D state", "U[1]", -3.15423945317149438949e+01, 3.15423945317147342848e+01, 1.42724412149908289268e-14, 2.24262219407601044452e+01, ], [ "2D state", "U[2]", -9.38673083374999972374e-13, 1.16534781393478256382e-12, 1.93385973223830695204e-14, 2.16634164433978071172e-13, ], [ "2D aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "2D aux", "Gᵁ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "2D aux", "Gᵁ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "2D aux", "Δu[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "2D aux", "Δu[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], ] coupled = [ [ "3D state", "u[1]", -9.58544854429326798062e-01, 9.58544854429326798062e-01, -4.09272615797817732838e-17, 4.45400401208546903309e-01, ], [ "3D state", "u[2]", -4.09119364541065205420e-15, 3.41954410407034240802e-15, 1.57855184364031418231e-19, 7.69982545727116047928e-16, ], [ "3D state", "η", -8.52733075123627615177e-01, 8.52843573070954374948e-01, 7.27595761418342649852e-17, 6.02992194663175995473e-01, ], [ "3D state", "θ", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "3D aux", "w", -4.04456725919283483547e-04, 4.04616610112968017130e-04, 1.33892896769793873666e-18, 1.63945200913093809460e-04, ], [ "3D aux", "pkin", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "wz0", -2.00813442787548966564e-04, 2.00686991710668984502e-04, 1.68642877440561282675e-18, 1.41979510156587984725e-04, ], [ "3D aux", "uᵈ[1]", -8.79713597641291755735e-01, 8.79713597641291200624e-01, -9.26547727431170607429e-17, 4.41871673548253185437e-01, ], [ "3D aux", "uᵈ[2]", -5.88884665747485312740e-28, 6.47654803186450692243e-28, 1.40221555861782666734e-30, 4.22950073030020620100e-29, ], [ "3D aux", "ΔGᵘ[1]", -1.50292385646252865781e-09, 1.50292385640701573972e-09, -3.03178502810731714103e-21, 6.72141424487058536370e-10, ], [ "3D aux", "ΔGᵘ[2]", -2.50330922252795548175e-19, 2.79421319642066487612e-19, 8.87468518373638336981e-36, 5.19465794381941235415e-20, ], [ "2D state", "η", -8.52733075123627615177e-01, 8.52843573070954374948e-01, 1.61115565333602722195e-16, 6.03463098440266798583e-01, ], [ "2D state", "U[1]", -3.15402749945522060671e+01, 3.15402749945527389741e+01, 2.12974776703234167348e-14, 2.24262621677375086904e+01, ], [ "2D state", "U[2]", -1.63647745816416607002e-12, 1.36781764162808076476e-12, 6.31420737450488849608e-17, 3.08233543917742288569e-13, ], [ "2D aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "2D aux", "Gᵁ[1]", -6.01169542562806285963e-07, 6.01169542585011466433e-07, 1.21270571032004377760e-18, 2.69066532005499711718e-07, ], [ "2D aux", "Gᵁ[2]", -1.11768527856826595815e-16, 1.00132368901118217729e-16, -3.50057026691823957451e-33, 2.07948587451660775232e-17, ], [ "2D aux", "Δu[1]", -6.53936429129693404076e-04, 6.53936429129569046087e-04, -2.70378337774435083119e-18, 4.64975945389895016328e-04, ], [ "2D aux", "Δu[2]", -3.76049176138251460297e-17, 3.57641761985933683411e-17, -4.12443309266433447246e-19, 7.09162842149286857838e-18, ], ] thirty_minutes = [ [ "3D state", "u[1]", -9.58527737426365766815e-01, 9.58527737426364989659e-01, -2.95585778076201651428e-17, 4.45397585011924779241e-01, ], [ "3D state", "u[2]", -3.91776258115697290002e-15, 3.71573344368902785991e-15, 9.99453214450878842160e-18, 7.55533641450465537744e-16, ], [ "3D state", "η", -8.52729582431237531637e-01, 8.52830944991449846349e-01, -2.54658516496419884307e-16, 6.02990141900936249542e-01, ], [ "3D state", "θ", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "3D aux", "w", -4.06712181304362465229e-04, 4.06869755985816939844e-04, 9.07052211118752913371e-19, 1.64290385445136678539e-04, ], [ "3D aux", "pkin", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "wz0", -2.09003099782708142351e-04, 2.08868143992394606845e-04, 1.33781874467331362536e-18, 1.47768608464728741676e-04, ], [ "3D aux", "uᵈ[1]", -8.79792191807299950312e-01, 8.79792191807298729067e-01, -9.54969436861574689412e-17, 4.41911149114936674387e-01, ], [ "3D aux", "uᵈ[2]", -4.49650715975976729085e-28, 3.22939933074851707840e-28, 1.28559371735991545201e-30, 3.12162656228539824003e-29, ], [ "3D aux", "ΔGᵘ[1]", -1.52356619979685413105e-09, 1.52356619973800994984e-09, -3.00415481336788178489e-21, 6.81373145706173027634e-10, ], [ "3D aux", "ΔGᵘ[2]", -2.17115289587171316651e-19, 2.99271337844227832879e-19, 3.57452597678270976423e-36, 5.23982958192948877053e-20, ], [ "2D state", "η", -8.52729582431237531637e-01, 8.52830944991449846349e-01, -3.20454773827805192348e-16, 6.03461044074932395631e-01, ], [ "2D state", "U[1]", -3.15407643291233448224e+01, 3.15407643291237640426e+01, 2.30678864898692384736e-14, 2.24265574817805202201e+01, ], [ "2D state", "U[2]", -1.56710503246274425174e-12, 1.48629337747559491236e-12, 3.99781285780299048032e-15, 3.02449468686897625244e-13, ], [ "2D aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "2D aux", "Gᵁ[1]", -6.09426479895203986555e-07, 6.09426479918741655731e-07, 1.20165633492970096897e-18, 2.72762104279987029506e-07, ], [ "2D aux", "Gᵁ[2]", -1.19708535137691129300e-16, 8.68461158348685235787e-17, -1.50869648123518501517e-33, 2.09756864038773538444e-17, ], [ "2D aux", "Δu[1]", -3.89449877638695009935e-03, 3.89449877638627485824e-03, -2.02396849009311295664e-17, 2.76903473119681107703e-03, ], [ "2D aux", "Δu[2]", -2.13991366909841745553e-16, 2.02991354131742782465e-16, -1.26781868574011981865e-18, 4.34606820130057938426e-17, ], ] sixty_minutes = [ [ "3D state", "u[1]", -9.58507705628042327994e-01, 9.58507705628042550039e-01, -5.22959453519433752618e-17, 4.45394100308371843067e-01, ], [ "3D state", "u[2]", -4.09893498131451878178e-15, 3.66595063838075513859e-15, 3.52058149555652693147e-18, 7.65972478919525466697e-16, ], [ "3D state", "η", -8.52715753705294066123e-01, 8.52819886810596838878e-01, 0.00000000000000000000e+00, 6.02986351734545844572e-01, ], [ "3D state", "θ", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "3D aux", "w", -4.09355343664679748733e-04, 4.09526445665209497954e-04, 8.01581023779363006131e-19, 1.64782726593626366188e-04, ], [ "3D aux", "pkin", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "wz0", -2.18597874938533713813e-04, 2.18512725277962983460e-04, 1.16684439888103955116e-18, 1.54581525183602542031e-04, ], [ "3D aux", "uᵈ[1]", -8.79886113674857694988e-01, 8.79886113674856806810e-01, -8.81072992342524199517e-17, 4.41958323665245178535e-01, ], [ "3D aux", "uᵈ[2]", -5.04476548888837049561e-28, 4.09418809809705127009e-28, 1.53142189509630563206e-30, 3.27579623353337388378e-29, ], [ "3D aux", "ΔGᵘ[1]", -1.12461707299293835875e-09, 1.12461707291060059378e-09, -2.89803852573586336546e-21, 5.02954103857782471789e-10, ], [ "3D aux", "ΔGᵘ[2]", -2.29220407034248886795e-19, 3.23795504633739099693e-19, 4.19082355898662551731e-36, 5.50021671320874203932e-20, ], [ "2D state", "η", -8.52715753705294066123e-01, 8.52819886810596838878e-01, -2.04281036531028799987e-17, 6.03457250948630341547e-01, ], [ "2D state", "U[1]", -3.15415180428826182890e+01, 3.15415180428829948767e+01, 1.77902803599749859788e-14, 2.24265293235027023400e+01, ], [ "2D state", "U[2]", -1.63957399252576030728e-12, 1.46638025535227397199e-12, 1.40823259822193190848e-15, 3.06628264537953242560e-13, ], [ "2D aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "2D aux", "Gᵁ[1]", -4.49846829164240224277e-07, 4.49846829197175353425e-07, 1.15921837490966064206e-18, 2.01338753352722549629e-07, ], [ "2D aux", "Gᵁ[2]", -1.29518201853495637566e-16, 9.16881628136995516365e-17, -2.08062063752041870574e-33, 2.20180483211723137251e-17, ], [ "2D aux", "Δu[1]", -7.71677156339491132631e-03, 7.71677156339359640591e-03, -2.97775205101297201914e-17, 5.48673197909435757247e-03, ], [ "2D aux", "Δu[2]", -5.01794629524824969143e-16, 5.51655281127317078031e-16, 3.20748218455203547025e-18, 9.36761976343254920594e-17, ], ] ninety_minutes = [ [ "3D state", "u[1]", -9.58488051495743675900e-01, 9.58488051495742121588e-01, 2.50111042987555274331e-17, 4.45390687454859546257e-01, ], [ "3D state", "u[2]", -1.23989905776825655281e-15, 1.61970487656192169626e-15, 3.83419479315953585092e-17, 3.55219724018798369983e-16, ], [ "3D state", "η", -8.52731405452109680887e-01, 8.52809353848989926128e-01, -2.45563569478690627377e-16, 6.02988963894262375298e-01, ], [ "3D state", "θ", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92775877460665535182e+05, ], [ "3D aux", "w", -4.11959288170071617242e-04, 4.12126623290091649143e-04, -2.00395255944840751533e-19, 1.65355949740609838115e-04, ], [ "3D aux", "pkin", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "3D aux", "wz0", -2.28048874139989487609e-04, 2.27949849600796259491e-04, -3.35287353436797287965e-19, 1.61271245461141072295e-04, ], [ "3D aux", "uᵈ[1]", -8.79979607451056078382e-01, 8.79979607451055745315e-01, -2.84217094304040087969e-18, 4.42005283454923181274e-01, ], [ "3D aux", "uᵈ[2]", -2.89610559829263959062e-28, 2.76495747279964637797e-28, -2.23087062731398024379e-31, 2.03879747098428489165e-29, ], [ "3D aux", "ΔGᵘ[1]", -8.86991425991573654091e-10, 8.86991425890194811807e-10, -2.95809316169619294608e-22, 3.96682558403758137461e-10, ], [ "3D aux", "ΔGᵘ[2]", -1.46453678699776496163e-19, 1.78310815453102811486e-19, 3.45126646034192634633e-36, 3.36942772885638802169e-20, ], [ "2D state", "η", -8.52731405452109680887e-01, 8.52809353848989926128e-01, -2.24886775868071691631e-16, 6.03459865148300189652e-01, ], [ "2D state", "U[1]", -3.15423820746574818941e+01, 3.15423820746572900475e+01, 1.25987324739451618828e-14, 2.24266774102466719398e+01, ], [ "2D state", "U[2]", -4.95959623107294663097e-13, 6.47881950624733081946e-13, 1.53367791726381670695e-14, 1.42198852443335733848e-13, ], [ "2D aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.93004519336559635121e+05, ], [ "2D aux", "Gᵁ[1]", -3.54796570356077906525e-07, 3.54796570396629450056e-07, 1.18322201808542668922e-19, 1.58796938275634264439e-07, ], [ "2D aux", "Gᵁ[2]", -7.13243261812411242092e-17, 5.85814714799106030876e-17, -1.27696859032651283100e-33, 1.34882362672174716756e-17, ], [ "2D aux", "Δu[1]", -1.14629123365972158261e-02, 1.14629123365972678678e-02, 2.41175081207994926868e-18, 8.15149713200666835300e-03, ], [ "2D aux", "Δu[2]", -7.92579521344275918936e-16, 6.08374854985514025679e-16, -3.41928350044524220733e-18, 1.39041335489639128848e-16, ], ] refVals = ( uncoupled = (uncoupled, parr), coupled = (coupled, parr), thirty_minutes = (thirty_minutes, parr), sixty_minutes = (sixty_minutes, parr), ninety_minutes = (ninety_minutes, parr), ) ================================================ FILE: test/Ocean/refvals/simple_box_2dt_refvals.jl ================================================ refVals = [] refPrecs = [] #! format: off # SC ========== Test number 1 reference values and precision match template. ======= # SC ========== /home/jmc/cliMa/cliMa_update/test/Ocean/SplitExplicit/simple_box_2dt.jl test reference values ====================================== # BEGIN SCPRINT # varr - reference values (from reference run) # parr - digits match precision (hand edit as needed) # # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] varr = [ [ "oce Q_3D", "u[1]", -1.65970172440775332046e-01, 1.61603740424529351838e-01, 4.57235954923597420763e-03, 2.21382850057524928344e-02 ], [ "oce Q_3D", "u[2]", -2.16238066786232502325e-01, 2.44156974607258908661e-01, -2.57322116506966246802e-03, 2.49380331699317753236e-02 ], [ "oce Q_3D", "η", -7.98711302045854054654e-01, 3.47810107484795683064e-01, -2.92595480887335743312e-04, 2.91360229800833869795e-01 ], [ "oce Q_3D", "θ", 3.55385987992295690474e-03, 9.91576780951165659417e+00, 2.49883145933055006438e+00, 2.17846939512263038097e+00 ], [ "oce aux", "w", -1.66960462888799841472e-04, 1.65914321067276468273e-04, 5.22671200400887584530e-07, 1.42481192436579197488e-05 ], [ "oce aux", "pkin", -8.84676906676938301644e+00, 0.00000000000000000000e+00, -3.26734804711110893294e+00, 2.49685702267286213640e+00 ], [ "oce aux", "wz0", -2.12828008736777837579e-05, 3.08227212254399767725e-05, -1.55080158053877423592e-10, 7.82426303256959697840e-06 ], [ "oce aux", "u_d[1]", -1.56747674354107358052e-01, 1.23337664405837849069e-01, -7.85428994855595028748e-05, 1.15205292893568473495e-02 ], [ "oce aux", "u_d[2]", -2.12928714468491042666e-01, 2.30595749843429731474e-01, 2.75559843466661209680e-05, 1.75135782183724539318e-02 ], [ "oce aux", "ΔGu[1]", -2.33711927426000212944e-06, 3.31423592619396043514e-06, -4.04240250953691765080e-08, 3.37695011150041160627e-07 ], [ "oce aux", "ΔGu[2]", -2.38751859804088027546e-06, 2.23171788862484417825e-06, 1.26953194368922374152e-06, 7.64696321260357341382e-07 ], [ "oce aux", "y", 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.15573163901915703900e+06 ], [ "baro Q_2D", "U[1]", -1.77749182159153633620e+01, 6.89466489062268976795e+01, 4.64951906527867731000e+00, 1.81233032109943827948e+01 ], [ "baro Q_2D", "U[2]", -3.48869693974309527107e+01, 8.56944551257798678989e+01, -2.60002834745782607229e+00, 1.80715674809898771969e+01 ], [ "baro Q_2D", "η", -7.98718706462308802863e-01, 3.47811046235604659493e-01, -2.92587313874264492736e-04, 2.91375274868600042666e-01 ], [ "baro aux", "Gᵁ[1]", -3.31423592619396051306e-03, 2.33711927426000221075e-03, 4.04240250953691941898e-05, 3.37711728311069743838e-04 ], [ "baro aux", "Gᵁ[2]", -2.23171788862484409693e-03, 2.38751859804088020431e-03, -1.26953194368922394480e-03, 7.64734176576895561574e-04 ], [ "baro aux", "U_c[1]", -1.77747948208394639380e+01, 6.89456621278419703458e+01, 4.64949958798823903550e+00, 1.81231282979409442646e+01 ], [ "baro aux", "U_c[2]", -3.48859937354993405734e+01, 8.56944690523459655651e+01, -2.59996531383233087098e+00, 1.80715328159406638520e+01 ], [ "baro aux", "η_c", -7.98711302045854054654e-01, 3.47810107484795683064e-01, -2.92595480887348482688e-04, 2.91374653217579937525e-01 ], [ "baro aux", "U_s[1]", -1.77749182159153633620e+01, 6.89466489062268976795e+01, 4.64951906527867731000e+00, 1.81233032109943827948e+01 ], [ "baro aux", "U_s[2]", -3.48869693974309527107e+01, 8.56944551257798678989e+01, -2.60002834745782607229e+00, 1.80715674809898771969e+01 ], [ "baro aux", "η_s", -7.98718706462308802863e-01, 3.47811046235604659493e-01, -2.92587313874264492736e-04, 2.91375274868600042666e-01 ], [ "baro aux", "Δu[1]", -4.52063405565574774267e-04, 3.99219316002806577735e-04, -1.09219330063404869891e-05, 1.02341200736279461128e-04 ], [ "baro aux", "Δu[2]", -2.42431689759263984492e-04, 3.83040345117153786629e-04, 5.18877254212526286387e-05, 7.47027376508314802477e-05 ], [ "baro aux", "η_diag", -7.98659935505348972384e-01, 3.47918275378131136577e-01, -2.91845106891554467807e-04, 2.91369719089321743688e-01 ], [ "baro aux", "Δη", -5.74411984127720653959e-04, 7.42725686140199847785e-04, -7.50373995767764799686e-07, 1.62142594044870064609e-04 ], [ "baro aux", "y", 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.15578885204060329124e+06 ], ] parr = [ [ "oce Q_3D", "u[1]", 12, 12, 12, 12 ], [ "oce Q_3D", "u[2]", 12, 12, 12, 12 ], [ "oce Q_3D", "η", 12, 12, 8, 12 ], [ "oce Q_3D", "θ", 12, 12, 12, 12 ], [ "oce aux", "w", 12, 12, 8, 12 ], [ "oce aux", "pkin", 12, 12, 12, 12 ], [ "oce aux", "wz0", 12, 12, 8, 12 ], [ "oce aux", "u_d[1]", 12, 12, 12, 12 ], [ "oce aux", "u_d[2]", 12, 12, 12, 12 ], [ "oce aux", "ΔGu[1]", 12, 12, 12, 12 ], [ "oce aux", "ΔGu[2]", 12, 12, 12, 12 ], [ "oce aux", "y", 12, 12, 12, 12 ], [ "baro Q_2D", "U[1]", 12, 12, 12, 12 ], [ "baro Q_2D", "U[2]", 12, 12, 12, 12 ], [ "baro Q_2D", "η", 12, 12, 8, 12 ], [ "baro aux", "Gᵁ[1]", 12, 12, 12, 12 ], [ "baro aux", "Gᵁ[2]", 12, 12, 12, 12 ], [ "baro aux", "U_c[1]", 12, 12, 12, 12 ], [ "baro aux", "U_c[2]", 12, 12, 12, 12 ], [ "baro aux", "η_c", 12, 12, 8, 12 ], [ "baro aux", "U_s[1]", 12, 12, 12, 12 ], [ "baro aux", "U_s[2]", 12, 12, 12, 12 ], [ "baro aux", "η_s", 12, 12, 8, 12 ], [ "baro aux", "Δu[1]", 12, 12, 12, 12 ], [ "baro aux", "Δu[2]", 12, 12, 12, 12 ], [ "baro aux", "η_diag", 12, 12, 8, 12 ], [ "baro aux", "Δη", 9, 9, 6, 10 ], [ "baro aux", "y", 12, 12, 12, 12 ], ] # END SCPRINT # SC ==================================================================================== append!(refVals ,[ varr ] ) append!(refPrecs,[ parr ] ) #! format: on ================================================ FILE: test/Ocean/refvals/simple_box_ivd_refvals.jl ================================================ refVals = [] refPrecs = [] #! format: off # SC ========== Test number 1 reference values and precision match template. ======= # SC ========== /home/jmc/cliMa/cliMa_update/test/Ocean/SplitExplicit/simple_box_ivd.jl test reference values ====================================== # BEGIN SCPRINT # varr - reference values (from reference run) # parr - digits match precision (hand edit as needed) # # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] varr = [ [ "oce Q_3D", "u[1]", -1.66327525125104291881e-01, 1.63223287218068308091e-01, 4.57054199141500999692e-03, 2.21420008866952226778e-02 ], [ "oce Q_3D", "u[2]", -2.16296814964942768489e-01, 2.44105976363460708267e-01, -2.57418981108877616831e-03, 2.49256704872522875938e-02 ], [ "oce Q_3D", "η", -8.06721661019456748321e-01, 3.47844179114868090608e-01, -3.03380420083551127861e-04, 2.92158779168222249023e-01 ], [ "oce Q_3D", "θ", 3.58991077492186536763e-03, 9.91901429055192807027e+00, 2.49592172215209329167e+00, 2.17949780843674956188e+00 ], [ "oce aux", "w", -1.66963788550371410252e-04, 1.64989216196191281925e-04, 5.43118005323253843601e-07, 1.44011197008218777164e-05 ], [ "oce aux", "pkin", -8.84700789178211977060e+00, 0.00000000000000000000e+00, -3.26178716377690980366e+00, 2.50117471654691314598e+00 ], [ "oce aux", "wz0", -2.12922215345145814233e-05, 3.08186566412866553883e-05, -1.53993475195290930982e-10, 7.82327443240865498639e-06 ], [ "oce aux", "u_d[1]", -1.57099271619415420398e-01, 1.22268940698781983234e-01, -7.84250724128123411372e-05, 1.15452721709468873745e-02 ], [ "oce aux", "u_d[2]", -2.12964763257990491452e-01, 2.30578860790304179806e-01, 2.76337873040526178152e-05, 1.75119371638010334902e-02 ], [ "oce aux", "ΔGu[1]", -2.33686060871737704252e-06, 3.34281015324516805469e-06, -4.07662146058456868485e-08, 3.37377005914367152159e-07 ], [ "oce aux", "ΔGu[2]", -2.46095864522509354306e-06, 2.23139964389116549296e-06, 1.29016375934756163675e-06, 7.46140899703298511983e-07 ], [ "oce aux", "y", 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.15573163901915703900e+06 ], [ "baro Q_2D", "U[1]", -1.77642579751613922667e+01, 6.90024701711891168543e+01, 4.64758402961474637038e+00, 1.81125164737756669808e+01 ], [ "baro Q_2D", "U[2]", -3.48668457221064898022e+01, 8.56043954389619301537e+01, -2.60107324488959568143e+00, 1.80557567005594421516e+01 ], [ "baro Q_2D", "η", -8.06729084084832237522e-01, 3.47845108442749906263e-01, -3.03371430044830942430e-04, 2.92173862421732766226e-01 ], [ "baro aux", "Gᵁ[1]", -3.34281015324516816989e-03, 2.33686060871737691716e-03, 4.07662146058456834074e-05, 3.37393707332951826861e-04 ], [ "baro aux", "Gᵁ[2]", -2.23139964389116544213e-03, 2.46095864522509347530e-03, -1.29016375934756159609e-03, 7.46177836457347174598e-04 ], [ "baro aux", "U_c[1]", -1.77641349600227265171e+01, 6.90014815427443437557e+01, 4.64756464081321674087e+00, 1.81123414932683921563e+01 ], [ "baro aux", "U_c[2]", -3.48658691554727866446e+01, 8.56044062434442594167e+01, -2.60101037971774573521e+00, 1.80557220201615180599e+01 ], [ "baro aux", "η_c", -8.06721661019456748321e-01, 3.47844179114868090608e-01, -3.03380420083561807253e-04, 2.92173242116136766544e-01 ], [ "baro aux", "U_s[1]", -1.77642579751613922667e+01, 6.90024701711891168543e+01, 4.64758402961474637038e+00, 1.81125164737756669808e+01 ], [ "baro aux", "U_s[2]", -3.48668457221064898022e+01, 8.56043954389619301537e+01, -2.60107324488959568143e+00, 1.80557567005594421516e+01 ], [ "baro aux", "η_s", -8.06729084084832237522e-01, 3.47845108442749906263e-01, -3.03371430044830942430e-04, 2.92173862421732766226e-01 ], [ "baro aux", "Δu[1]", -4.51957131810988945505e-04, 3.99130692959488437947e-04, -1.09363921539353876238e-05, 1.02327771113594037156e-04 ], [ "baro aux", "Δu[2]", -2.42568998424025800446e-04, 3.82991529986384212410e-04, 5.18845385039475763891e-05, 7.47269801857268440920e-05 ], [ "baro aux", "η_diag", -8.06666755113148337131e-01, 3.47953196151883525911e-01, -3.02575448959868274160e-04, 2.92168272060050748795e-01 ], [ "baro aux", "Δη", -5.84450730272745300198e-04, 7.45283859099332701703e-04, -8.04971123704421912570e-07, 1.64194305918131369573e-04 ], [ "baro aux", "y", 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.15578885204060329124e+06 ], ] parr = [ [ "oce Q_3D", "u[1]", 12, 12, 12, 12 ], [ "oce Q_3D", "u[2]", 12, 12, 12, 12 ], [ "oce Q_3D", "η", 12, 12, 8, 12 ], [ "oce Q_3D", "θ", 12, 12, 12, 12 ], [ "oce aux", "w", 12, 12, 8, 12 ], [ "oce aux", "pkin", 12, 12, 12, 12 ], [ "oce aux", "wz0", 12, 12, 8, 12 ], [ "oce aux", "u_d[1]", 12, 12, 12, 12 ], [ "oce aux", "u_d[2]", 12, 12, 12, 12 ], [ "oce aux", "ΔGu[1]", 12, 12, 12, 12 ], [ "oce aux", "ΔGu[2]", 12, 12, 12, 12 ], [ "oce aux", "y", 12, 12, 12, 12 ], [ "baro Q_2D", "U[1]", 12, 12, 12, 12 ], [ "baro Q_2D", "U[2]", 12, 12, 12, 12 ], [ "baro Q_2D", "η", 12, 12, 8, 12 ], [ "baro aux", "Gᵁ[1]", 12, 12, 12, 12 ], [ "baro aux", "Gᵁ[2]", 12, 12, 12, 12 ], [ "baro aux", "U_c[1]", 12, 12, 12, 12 ], [ "baro aux", "U_c[2]", 12, 12, 12, 12 ], [ "baro aux", "η_c", 12, 12, 8, 12 ], [ "baro aux", "U_s[1]", 12, 12, 12, 12 ], [ "baro aux", "U_s[2]", 12, 12, 12, 12 ], [ "baro aux", "η_s", 12, 12, 8, 12 ], [ "baro aux", "Δu[1]", 12, 12, 12, 12 ], [ "baro aux", "Δu[2]", 12, 12, 12, 12 ], [ "baro aux", "η_diag", 12, 12, 8, 12 ], [ "baro aux", "Δη", 9, 9, 6, 10 ], [ "baro aux", "y", 12, 12, 12, 12 ], ] # END SCPRINT # SC ==================================================================================== append!(refVals ,[ varr ] ) append!(refPrecs,[ parr ] ) #! format: on ================================================ FILE: test/Ocean/refvals/simple_box_rk3_refvals.jl ================================================ refVals = [] refPrecs = [] #! format: off # SC ========== Test number 1 reference values and precision match template. ======= # SC ========== /home/jmc/cliMa/cliMa_new_jmc/test/Ocean/SplitExplicit/simple_box_rk3.jl test reference values ====================================== # BEGIN SCPRINT # varr - reference values (from reference run) # parr - digits match precision (hand edit as needed) # # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] varr = [ [ "oce Q_3D", "u[1]", -2.16721345244414054232e-01, 1.57840557320998220447e-01, -5.59921465916523013878e-03, 1.73941108901450175450e-02 ], [ "oce Q_3D", "u[2]", -2.19220347308738905401e-01, 2.31784478500299123693e-01, -2.15429258177158092225e-03, 2.12517725248480282563e-02 ], [ "oce Q_3D", "η", -4.69394735091986536890e-01, 2.06288744860751438459e-01, -2.16765734563377927271e-04, 1.47004018586567947180e-01 ], [ "oce Q_3D", "θ", 2.38571451036062890869e-03, 9.91209376647090323331e+00, 2.49760287312251527680e+00, 2.18031198350304133982e+00 ], [ "oce aux", "w", -1.61734977485578068895e-04, 1.50833500332270227301e-04, 5.43794052142437598582e-07, 1.38648560985387203702e-05 ], [ "oce aux", "pkin", -8.85948574009684719499e+00, 0.00000000000000000000e+00, -3.26449934601666758027e+00, 2.50111590039020725840e+00 ], [ "oce aux", "wz0", -3.66555541768163618680e-05, 1.94813350674614766593e-05, -4.61824895762852330113e-10, 9.37524239840742993843e-06 ], [ "oce aux", "u_d[1]", -1.56194066602061892857e-01, 1.21967899279333547025e-01, -2.05350727093651255306e-05, 1.15672828345751727702e-02 ], [ "oce aux", "u_d[2]", -2.09729915133787636616e-01, 2.37375904307950719163e-01, 1.58965003169083721666e-06, 1.81625473749615351515e-02 ], [ "oce aux", "ΔGu[1]", -2.09811375971404995481e-06, 3.23018047166822386688e-06, -4.22417846605267206689e-08, 2.80615396594168210400e-07 ], [ "oce aux", "ΔGu[2]", -2.18036232525426524906e-06, 2.31188192991968871615e-06, 1.27801756611837186519e-06, 7.19811396028193828918e-07 ], [ "oce aux", "y", 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.15573163901915703900e+06 ], [ "baro Q_2D", "U[1]", -3.61200008824836729104e+01, 2.32336820840376567787e+01, -5.58234606367367547364e+00, 1.11708003103345792084e+01 ], [ "baro Q_2D", "U[2]", -4.02361402227773581330e+01, 4.41897774443185085147e+01, -2.15561232944511127485e+00, 1.17915802114324606009e+01 ], [ "baro Q_2D", "η", -4.69794642977587939559e-01, 2.06346018349864157582e-01, -2.16567930962150958151e-04, 1.47057680237089760666e-01 ], [ "baro aux", "Gᵁ[1]", -3.23018047166822403968e-03, 2.09811375971404989044e-03, 4.22417846605267028812e-05, 2.80629288101644008488e-04 ], [ "baro aux", "Gᵁ[2]", -2.31188192991968856707e-03, 2.18036232525426528633e-03, -1.27801756611837201427e-03, 7.19847029373728297674e-04 ], [ "baro aux", "U_c[1]", -3.60967490653019851266e+01, 2.32266714099544557826e+01, -5.58248511658658941315e+00, 1.11690207251244704167e+01 ], [ "baro aux", "U_c[2]", -4.02002845377460147347e+01, 4.41870183291104439149e+01, -2.15756908928005142201e+00, 1.17881994981494511165e+01 ], [ "baro aux", "η_c", -4.69394735091986536890e-01, 2.06288744860751438459e-01, -2.16765734563369958385e-04, 1.47011295833105265496e-01 ], [ "baro aux", "U_s[1]", -3.61200008824836729104e+01, 2.32336820840376567787e+01, -5.58234606367367547364e+00, 1.11708003103345792084e+01 ], [ "baro aux", "U_s[2]", -4.02361402227773581330e+01, 4.41897774443185085147e+01, -2.15561232944511127485e+00, 1.17915802114324606009e+01 ], [ "baro aux", "η_s", -4.69794642977587939559e-01, 2.06346018349864157582e-01, -2.16567930962150958151e-04, 1.47057680237089760666e-01 ], [ "baro aux", "Δu[1]", -1.12410191785971484528e-03, 1.98901388437166311979e-03, 1.77992355967350647386e-04, 4.24716110102865956194e-04 ], [ "baro aux", "Δu[2]", -1.40780586763535704373e-03, 1.04939722938436467460e-03, -2.11977336567820129603e-04, 2.73797449735710581031e-04 ], [ "baro aux", "η_diag", -4.69273762873680611030e-01, 2.06327225515284040647e-01, -2.15810591775846216884e-04, 1.47008240438936316208e-01 ], [ "baro aux", "Δη", -2.62922736242607313351e-04, 2.37963172744791451318e-04, -9.55142787533847028187e-07, 6.94341553655788138637e-05 ], [ "baro aux", "y", 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.15578885204060329124e+06 ], ] parr = [ [ "oce Q_3D", "u[1]", 12, 12, 12, 12 ], [ "oce Q_3D", "u[2]", 12, 12, 12, 12 ], [ "oce Q_3D", "η", 12, 12, 8, 12 ], [ "oce Q_3D", "θ", 12, 12, 12, 12 ], [ "oce aux", "w", 12, 12, 8, 12 ], [ "oce aux", "pkin", 12, 12, 12, 12 ], [ "oce aux", "wz0", 12, 12, 8, 12 ], [ "oce aux", "u_d[1]", 12, 12, 12, 12 ], [ "oce aux", "u_d[2]", 12, 12, 12, 12 ], [ "oce aux", "ΔGu[1]", 12, 12, 12, 12 ], [ "oce aux", "ΔGu[2]", 12, 12, 12, 12 ], [ "oce aux", "y", 12, 12, 12, 12 ], [ "baro Q_2D", "U[1]", 12, 12, 12, 12 ], [ "baro Q_2D", "U[2]", 12, 12, 12, 12 ], [ "baro Q_2D", "η", 12, 12, 8, 12 ], [ "baro aux", "Gᵁ[1]", 12, 12, 12, 12 ], [ "baro aux", "Gᵁ[2]", 12, 12, 12, 12 ], [ "baro aux", "U_c[1]", 12, 12, 12, 12 ], [ "baro aux", "U_c[2]", 12, 12, 12, 12 ], [ "baro aux", "η_c", 12, 12, 8, 12 ], [ "baro aux", "U_s[1]", 12, 12, 12, 12 ], [ "baro aux", "U_s[2]", 12, 12, 12, 12 ], [ "baro aux", "η_s", 12, 12, 8, 12 ], [ "baro aux", "Δu[1]", 12, 12, 12, 12 ], [ "baro aux", "Δu[2]", 12, 12, 12, 12 ], [ "baro aux", "η_diag", 12, 12, 8, 12 ], [ "baro aux", "Δη", 9, 9, 6, 10 ], [ "baro aux", "y", 12, 12, 12, 12 ], ] # END SCPRINT # SC ==================================================================================== append!(refVals ,[ varr ] ) append!(refPrecs,[ parr ] ) #! format: on ================================================ FILE: test/Ocean/refvals/simple_dbl_gyre_refvals.jl ================================================ refVals = [] refPrecs = [] #! format: off # SC ========== Test number 1 reference values and precision match template. ======= # SC ========== /home/jmc/cliMa/cliMa_new_jmc/test/Ocean/SplitExplicit/simple_dbl_gyre.jl test reference values ====================================== # BEGIN SCPRINT # varr - reference values (from reference run) # parr - digits match precision (hand edit as needed) # # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] varr = [ [ "oce Q_3D", "u[1]", -1.41235234130601461366e-01, 2.44137911410471170059e-01, -1.17638792697003554538e-02, 4.76422973396204429974e-02 ], [ "oce Q_3D", "u[2]", -3.71526973402696303328e-01, 2.63129391123315958811e-01, -3.84758873236049556144e-02, 4.34631279346227028526e-02 ], [ "oce Q_3D", "η", -3.62315954238604787108e+00, 3.25374835544518203889e+00, -1.15530630772209001104e-04, 1.69499420692026547819e+00 ], [ "oce Q_3D", "θ", -1.95989738813091111946e-02, 2.41980669630830433903e+01, 6.00291945213983790808e+00, 5.36502088796993170661e+00 ], [ "oce aux", "w", -1.42436775039925483770e-03, 1.17093883909037276177e-03, -4.27143547438785408186e-07, 1.58700821328606480297e-04 ], [ "oce aux", "pkin", -6.47863971232045656734e+01, 0.00000000000000000000e+00, -2.35470923626465094003e+01, 1.84884052655973327717e+01 ], [ "oce aux", "wz0", -2.03056757204373681007e-04, 2.51217746755789830913e-04, -1.04288746719621225719e-08, 9.09212328801199519022e-05 ], [ "oce aux", "u_d[1]", -1.29380048170506856131e-01, 2.41847160349357936937e-01, 1.50957373152410783811e-04, 4.55063940872466113352e-02 ], [ "oce aux", "u_d[2]", -2.31151064740037659462e-01, 3.17638906635839823878e-01, 7.38946153332456550028e-05, 3.35035482074258969543e-02 ], [ "oce aux", "ΔGu[1]", -2.28025960163159889874e-05, 3.46723882659755934413e-06, -5.56600796147506152338e-07, 2.61685583178905381934e-06 ], [ "oce aux", "ΔGu[2]", -3.00923384704360015996e-06, 1.24960695834485056166e-05, 6.54681130814259427502e-06, 3.13708534178777629812e-06 ], [ "oce aux", "y", 0.00000000000000000000e+00, 6.00000000000000093132e+06, 3.00000000000000000000e+06, 1.73273876310492726043e+06 ], [ "baro Q_2D", "U[1]", -1.37905164694259781299e+02, 1.09296887114357033965e+02, -3.59834173192416955089e+01, 4.24653520384661575804e+01 ], [ "baro Q_2D", "U[2]", -4.93706839547863808093e+02, 5.71157285218092027890e+01, -1.15735478409127139798e+02, 8.23847078816430951065e+01 ], [ "baro Q_2D", "η", -3.62624136658312767878e+00, 3.25919860855558907176e+00, -1.15147341274860083972e-04, 1.69658109382101818241e+00 ], [ "baro aux", "Gᵁ[1]", -1.04017164797926778969e-02, 6.84077880489479678294e-02, 1.66980238844251878058e-03, 7.85082570477715034618e-03 ], [ "baro aux", "Gᵁ[2]", -3.74882087503455169175e-02, 9.02770154113080071367e-03, -1.96404339244277831300e-02, 9.41156556666298931002e-03 ], [ "baro aux", "U_c[1]", -1.36958883839043295438e+02, 1.09074110263089167461e+02, -3.57794851972332565992e+01, 4.23235946471143051895e+01 ], [ "baro aux", "U_c[2]", -4.92966277666792962009e+02, 5.72050394052077919582e+01, -1.15636183420627688179e+02, 8.22648167942200103653e+01 ], [ "baro aux", "η_c", -3.62315954238604787108e+00, 3.25374835544518203889e+00, -1.15530630772203336148e-04, 1.69504995619627174541e+00 ], [ "baro aux", "U_s[1]", -1.37905164694259781299e+02, 1.09296887114357033965e+02, -3.59834173192416955089e+01, 4.24653520384661575804e+01 ], [ "baro aux", "U_s[2]", -4.93706839547863808093e+02, 5.71157285218092027890e+01, -1.15735478409127139798e+02, 8.23847078816430951065e+01 ], [ "baro aux", "η_s", -3.62624136658312767878e+00, 3.25919860855558907176e+00, -1.15147341274860083972e-04, 1.69658109382101818241e+00 ], [ "baro aux", "Δu[1]", -4.66680666769807577128e-04, 1.05054947340137826844e-02, 3.56177517668052924515e-03, 2.34394235403509801352e-03 ], [ "baro aux", "Δu[2]", -2.98072019929583121103e-03, 4.75363215770287662193e-03, 1.22307915357422513150e-03, 1.31189027778619380499e-03 ], [ "baro aux", "η_diag", -3.62078008618114477457e+00, 3.25151939956774960194e+00, -1.15589924206263583179e-04, 1.69498993030730304987e+00 ], [ "baro aux", "Δη", -8.83137470915729139165e-03, 5.97876216292370088468e-03, 5.92934342415881362251e-08, 2.08693123903101827171e-03 ], [ "baro aux", "y", 0.00000000000000000000e+00, 6.00000000000000093132e+06, 3.00000000000000000000e+06, 1.73279575381979602389e+06 ], ] parr = [ [ "oce Q_3D", "u[1]", 12, 12, 12, 12 ], [ "oce Q_3D", "u[2]", 12, 12, 12, 12 ], [ "oce Q_3D", "η", 12, 12, 8, 12 ], [ "oce Q_3D", "θ", 12, 12, 12, 12 ], [ "oce aux", "w", 12, 12, 8, 12 ], [ "oce aux", "pkin", 12, 12, 12, 12 ], [ "oce aux", "wz0", 12, 12, 8, 12 ], [ "oce aux", "u_d[1]", 12, 12, 12, 12 ], [ "oce aux", "u_d[2]", 12, 12, 12, 12 ], [ "oce aux", "ΔGu[1]", 12, 12, 12, 12 ], [ "oce aux", "ΔGu[2]", 12, 12, 12, 12 ], [ "oce aux", "y", 12, 12, 12, 12 ], [ "baro Q_2D", "U[1]", 12, 12, 12, 12 ], [ "baro Q_2D", "U[2]", 12, 12, 12, 12 ], [ "baro Q_2D", "η", 12, 12, 8, 12 ], [ "baro aux", "Gᵁ[1]", 12, 12, 12, 12 ], [ "baro aux", "Gᵁ[2]", 12, 12, 12, 12 ], [ "baro aux", "U_c[1]", 12, 12, 12, 12 ], [ "baro aux", "U_c[2]", 12, 12, 12, 12 ], [ "baro aux", "η_c", 12, 12, 8, 12 ], [ "baro aux", "U_s[1]", 12, 12, 12, 12 ], [ "baro aux", "U_s[2]", 12, 12, 12, 12 ], [ "baro aux", "η_s", 12, 12, 8, 12 ], [ "baro aux", "Δu[1]", 12, 12, 12, 12 ], [ "baro aux", "Δu[2]", 12, 12, 12, 12 ], [ "baro aux", "η_diag", 12, 12, 8, 12 ], [ "baro aux", "Δη", 9, 9, 6, 10 ], [ "baro aux", "y", 12, 12, 12, 12 ], ] # END SCPRINT # SC ==================================================================================== append!(refVals ,[ varr ] ) append!(refPrecs,[ parr ] ) #! format: on ================================================ FILE: test/Ocean/refvals/test_ocean_gyre_refvals.jl ================================================ parr = [ ["Q", "u[1]", 12, 12, 12, 12], ["Q", "u[2]", 12, 12, 12, 12], ["Q", "η", 12, 12, 11, 12], ["Q", "θ", 12, 12, 12, 12], ["s_aux", "y", 12, 12, 12, 12], ["s_aux", "w", 12, 12, 11, 12], ["s_aux", "pkin", 12, 12, 12, 12], ["s_aux", "wz0", 12, 11, 10, 12], ["s_aux", "uᵈ[1]", 12, 12, 12, 12], ["s_aux", "uᵈ[2]", 12, 12, 12, 12], ["s_aux", "ΔGᵘ[1]", 12, 12, 12, 12], ["s_aux", "ΔGᵘ[2]", 12, 12, 12, 12], ] #! format: off short = [ [ "Q", "u[1]", -1.56752732465427965791e-02, 1.68514505893861757380e-02, -2.29247099512640793717e-03, 3.25160701902235671490e-03 ], [ "Q", "u[2]", -3.79775189267773510826e-02, 6.43003815969073189152e-03, -1.34097283250433057383e-02, 1.20337368194613283934e-02 ], [ "Q", "η", -1.53249855976142324021e-01, 1.57769367725846931805e-01, -6.47997524778634250456e-06, 1.03985821375781786746e-01 ], [ "Q", "θ", 1.07842251824037485168e-05, 9.00370181779731204585e+00, 2.49971752106072075961e+00, 2.19711465681760964586e+00 ], [ "s_aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92779390974978974555e+05 ], [ "s_aux", "w", -9.71167446044889304474e-05, 8.57892392958965760040e-05, 7.22305481630802806057e-08, 3.80815409312154189983e-05 ], [ "s_aux", "pkin", -8.99913049767501638243e-01, 0.00000000000000000000e+00, -3.32109083459310172604e-01, 2.56226532893150116266e-01 ], [ "s_aux", "wz0", -8.17753668537623428815e-05, 8.25631396299614581233e-05, -1.10719884491561329431e-09, 5.14580958965247714965e-05 ], [ "s_aux", "uᵈ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "uᵈ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "ΔGᵘ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "ΔGᵘ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], ] long = [ [ "Q", "u[1]", -7.88803234115370982549e-02, 6.54328011157656042052e-02, 2.01451525264662988784e-03, 1.24588716783014998718e-02 ], [ "Q", "u[2]", -8.71605274310923855419e-02, 1.47505267452481658719e-01, 5.63158201082450144553e-03, 1.28959719954838351180e-02 ], [ "Q", "η", -4.73491408442001826540e-01, 4.02693687285959778244e-01, -6.49059970677390036392e-05, 2.21689928090663096460e-01 ], [ "Q", "θ", 4.24292935192232840286e-04, 9.24539353455579338004e+00, 2.49938206627401893201e+00, 2.17986626392490689952e+00 ], [ "s_aux", "y", 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.15573163901915703900e+06 ], [ "s_aux", "w", -2.22086406767006932887e-04, 2.00575090959222558738e-04, 2.53168866096380013512e-07, 1.66257132341935760973e-05 ], [ "s_aux", "pkin", -9.00869877619916104017e-01, 0.00000000000000000000e+00, -3.33171488779369751043e-01, 2.54740287894525019308e-01 ], [ "s_aux", "wz0", -2.96608015795817572195e-05, 3.66759042312928121082e-05, 3.78116102337067175813e-10, 1.07073256826410757734e-05 ], [ "s_aux", "uᵈ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "uᵈ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "ΔGᵘ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "ΔGᵘ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], ] #! format: on refVals = (short = (short, parr), long = (long, parr)) ================================================ FILE: test/Ocean/refvals/test_vertical_integral_model_refvals.jl ================================================ # [ # [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ], # [ : : : : : : ], # ] parr = [ ["∫u", "∫x[1]", 12, 12, 0, 12], ["∫u", "∫x[2]", 15, 15, 15, 15], ["U", "η", 12, 12, 0, 12], ["U", "U[1]", 12, 12, 0, 12], ["U", "U[2]", 15, 15, 15, 15], ] initial = [ [ "∫u", "∫x[1]", -6.36104748927323058183e+01, 6.36104748927324763486e+01, 3.59432306140661235747e-14, 3.16776758596447187699e+01, ], [ "∫u", "∫x[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "U", "η", -1.00000000000000000000e+00, 1.00000000000000000000e+00, 3.73034936274052574533e-18, 7.07673146340372483110e-01, ], [ "U", "U[1]", -9.95282537582876658533e-01, 9.95282537582876547511e-01, -8.82958196287834196438e-18, 7.07673146340372150043e-01, ], [ "U", "U[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], ] day = [ [ "∫u", "∫x[1]", -6.40437964821475844701e+01, 6.40437964821476981570e+01, 2.21189111471176159638e-14, 2.60887556680598677872e+01, ], [ "∫u", "∫x[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "U", "η", -8.52761706341222169847e-01, 8.52761706341222169847e-01, -1.86517468137026310378e-17, 6.03476559805077306109e-01, ], [ "U", "U[1]", -3.15397076561132330141e+01, 3.15397076561132294614e+01, -2.82332564752296143530e-15, 2.24255960582435314166e+01, ], [ "U", "U[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], ] month = [ [ "∫u", "∫x[1]", -3.51564130159214158766e+01, 3.51564130159213874549e+01, -1.16415321826934820032e-14, 1.41614248818105643579e+01, ], [ "∫u", "∫x[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "U", "η", -5.30601964901526002016e-01, 5.30601964901526002016e-01, -1.24344978758017535116e-17, 3.75492761956246756672e-01, ], [ "U", "U[1]", -3.51564130159208829696e+01, 3.51564130159208758641e+01, -2.47942593560761348802e-15, 2.49971726354604832920e+01, ], [ "U", "U[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], ] year = [ [ "∫u", "∫x[1]", -4.25110151351051179791e-01, 4.25110151351050846724e-01, -1.36424205265939223736e-16, 1.74619680058462317662e-01, ], [ "∫u", "∫x[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "U", "η", -4.39687965160425187072e-02, 4.39687965160425187072e-02, -6.10622663543836061796e-19, 3.11155365713074068268e-02, ], [ "U", "U[1]", -4.25110151351044573964e-01, 4.25110151351044518453e-01, -1.99959679826973105365e-17, 3.02264961945818255717e-01, ], [ "U", "U[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], ] decade = [ [ "∫u", "∫x[1]", -1.88298126764002322427e-12, 1.88298126764002483985e-12, 5.62482816536058859320e-28, 7.73459738532225749582e-13, ], [ "∫u", "∫x[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "U", "η", -3.38352596570510824820e-15, 3.38352596570510824820e-15, 6.31088724176809474572e-34, 2.39443046587488032416e-15, ], [ "U", "U[1]", -1.88298126763999575929e-12, 1.88298126763999575929e-12, 3.78999175038214983828e-29, 1.33885125866565214937e-12, ], [ "U", "U[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], ] century = [ [ "∫u", "∫x[1]", -3.97994692621354800600e-134, 3.97994692621354409568e-134, -1.20124985710986044942e-149, 1.63481642745144852479e-134, ], [ "∫u", "∫x[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], [ "U", "η", -2.07116984864437321907e-136, 2.07116984864437321907e-136, -5.62108290883927252001e-153, 1.46571128339547670108e-136, ], [ "U", "U[1]", -3.97994692621348592969e-134, 3.97994692621348544091e-134, -4.15669815960076969677e-150, 2.82985128060348631090e-134, ], [ "U", "U[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, ], ] refVals = ( intitial = (initial, parr), day = (day, parr), month = (month, parr), year = (year, parr), decade = (decade, parr), century = (century, parr), ) ================================================ FILE: test/Ocean/refvals/test_windstress_refvals.jl ================================================ parr = [ ["Q", "u[1]", 12, 12, 12, 12], ["Q", "u[2]", 12, 12, 12, 12], ["Q", "η", 12, 12, 11, 12], ["Q", "θ", 12, 12, 12, 0], ["s_aux", "y", 12, 12, 12, 12], ["s_aux", "w", 12, 12, 12, 12], ["s_aux", "pkin", 12, 12, 12, 12], ["s_aux", "wz0", 12, 12, 10, 12], ["s_aux", "uᵈ[1]", 12, 12, 12, 12], ["s_aux", "uᵈ[2]", 12, 12, 12, 12], ["s_aux", "ΔGᵘ[1]", 12, 12, 12, 12], ["s_aux", "ΔGᵘ[2]", 12, 12, 12, 12], ] #! format: off imex = [ [ "Q", "u[1]", -3.79026877309730850230e-02, 3.77520487392776424307e-02, -1.40391503429245286812e-06, 5.33572816364154614566e-03 ], [ "Q", "u[2]", -7.13128520899746973227e-03, 6.54348134460207026680e-03, -5.44654209456798120995e-06, 9.83670484102493409076e-04 ], [ "Q", "η", -5.75156897888350494147e-03, 5.06823909948787842961e-03, -1.61345029906403004787e-05, 1.58599716799621352596e-03 ], [ "Q", "θ", 1.99999999999999928946e+01, 2.00000000000002948752e+01, 2.00000000000000426326e+01, 6.30374859517183302003e-14 ], [ "s_aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92779390974978974555e+05 ], [ "s_aux", "w", -1.93125254703910637023e-05, 1.89413928194037212366e-05, 6.96287201311761918867e-08, 1.82962329428446588302e-06 ], [ "s_aux", "pkin", -1.60000000000000408562e+00, 0.00000000000000000000e+00, -8.00000000000002042810e-01, 4.68447025559967145103e-01 ], [ "s_aux", "wz0", -1.78460606500689411704e-06, 1.42096532423882541587e-06, -3.15575167929213483841e-09, 7.00511144160156445126e-07 ], [ "s_aux", "uᵈ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "uᵈ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "ΔGᵘ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "ΔGᵘ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], ] explicit_cpu = [ [ "Q", "u[1]", -3.74270752639261211625e-02, 3.72763215301363109999e-02, -1.40392694287316997219e-06, 5.29521849931491837837e-03 ], [ "Q", "u[2]", -7.13776376792300999707e-03, 6.54949226335132476257e-03, -5.45004642311143043000e-06, 9.84283148803805074678e-04 ], [ "Q", "η", -5.75146380759523553894e-03, 5.06819905742867966164e-03, -1.61399463692112299070e-05, 1.58594255118538803723e-03 ], [ "Q", "θ", 1.99999999999996518341e+01, 2.00000000000013891110e+01, 2.00000000000004192202e+01, 2.61373866244148208458e-13 ], [ "s_aux", "y", 0.00000000000000000000e+00, 1.00000000000000011642e+06, 5.00000000000000000000e+05, 2.92779390974978974555e+05 ], [ "s_aux", "w", -1.91903846873268650749e-05, 1.88201368003043060118e-05, 6.88331165208082022114e-08, 1.81657738735220659856e-06 ], [ "s_aux", "pkin", -1.60000000000003339551e+00, 0.00000000000000000000e+00, -8.00000000000017919000e-01, 4.68447025559975749331e-01 ], [ "s_aux", "wz0", -1.79359066322475932405e-06, 1.42978124248321576704e-06, -3.01455602004428728418e-09, 6.97246485535118604352e-07 ], [ "s_aux", "uᵈ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "uᵈ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "ΔGᵘ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "ΔGᵘ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], ] explicit_gpu = deepcopy(explicit_cpu) explicit_gpu[3][5] = -1.61399463694379806270e-05 # CPU value: -1.61399463692112299070e-05 -> Δ = 2.267507199799762e-16 explicit_gpu[6][5] = 6.88331165196929377526e-08 # CPU value: 6.88331165208082022114e-08 -> Δ = -1.1152644588480957e-18 long = [ [ "Q", "u[1]", -1.26047572708648997208e-01, 8.73949752192989676169e-02, 4.77293205335180186562e-05, 7.01620954203814161526e-03 ], [ "Q", "u[2]", -7.74039642996040555545e-02, 1.17079049312627345159e-01, -1.81789027475552530900e-04, 1.05959299008928087282e-02 ], [ "Q", "η", -7.88344390091704622092e-02, 3.98295576513588017731e-02, -9.41579560194082235179e-05, 2.80408708312616314351e-02 ], [ "Q", "θ", 1.99999999999542694695e+01, 2.00000000000501145792e+01, 2.00000000000023661073e+01, 1.20202839114602287074e-12 ], [ "s_aux", "y", 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.15573129229947482236e+06 ], [ "s_aux", "w", -2.39010107630067514077e-05, 6.21800814928817288142e-05, 2.57411032722755278831e-07, 4.26276984128505141513e-06 ], [ "s_aux", "pkin", -1.60000000000220565788e+00, 0.00000000000000000000e+00, -8.00000000000101851860e-01, 4.61946285916540189120e-01 ], [ "s_aux", "wz0", -2.51713525692209742640e-06, 9.22709404863107399641e-07, -8.84750036743458214697e-10, 5.52781925119928323386e-07 ], [ "s_aux", "uᵈ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "uᵈ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "ΔGᵘ[1]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], [ "s_aux", "ΔGᵘ[2]", 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00, 0.00000000000000000000e+00 ], ] #! format: on refVals = ( explicit_cpu = (explicit_cpu, parr), explicit_gpu = (explicit_gpu, parr), imex = (imex, parr), long = (long, parr), ) ================================================ FILE: test/Ocean/runtests.jl ================================================ using MPI, Test include("../testhelpers.jl") @testset "Ocean" begin runmpi(joinpath(@__DIR__, "HydrostaticBoussinesq/test_ocean_gyre_short.jl")) runmpi(joinpath(@__DIR__, "SplitExplicit/test_spindown_short.jl")) include(joinpath("OceanProblems", "test_initial_value_problem.jl")) include(joinpath( "HydrostaticBoussinesqModel", "test_hydrostatic_boussinesq_model.jl", )) end ================================================ FILE: test/Utilities/SingleStackUtils/runtests.jl ================================================ module TestSingleStackUtils using MPI, Test include(joinpath("..", "..", "testhelpers.jl")) @testset "SingleStackUtils" begin runmpi(joinpath(@__DIR__, "ssu_tests.jl")) end end ================================================ FILE: test/Utilities/SingleStackUtils/ssu_tests.jl ================================================ using OrderedCollections using StaticArrays using Test using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.DGMethods using ClimateMachine.DGMethods.Grids using ClimateMachine.MPIStateArrays using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.DGMethods: LocalGeometry using ClimateMachine.BalanceLaws: BalanceLaw, Auxiliary, Prognostic, Gradient, GradientFlux import ClimateMachine.BalanceLaws: vars_state, nodal_init_state_auxiliary!, init_state_prognostic! struct EmptyBalLaw{FT, PS} <: BalanceLaw "Parameters" param_set::PS "Domain height" zmax::FT end EmptyBalLaw(param_set, zmax) = EmptyBalLaw{typeof(zmax), typeof(param_set)}(param_set, zmax) vars_state(::EmptyBalLaw, ::Auxiliary, FT) = @vars(x::FT, y::FT, z::FT) vars_state(::EmptyBalLaw, ::Prognostic, FT) = @vars(ρ::FT) vars_state(::EmptyBalLaw, ::Gradient, FT) = @vars() vars_state(::EmptyBalLaw, ::GradientFlux, FT) = @vars() function nodal_init_state_auxiliary!( m::EmptyBalLaw, aux::Vars, tmp::Vars, geom::LocalGeometry, ) aux.x = geom.coord[1] aux.y = geom.coord[2] aux.z = geom.coord[3] end function init_state_prognostic!( m::EmptyBalLaw, state::Vars, aux::Vars, coords, t::Real, ) z = aux.z x = aux.x y = aux.y state.ρ = (1 - 4 * (z - m.zmax / 2)^2) * (2 - x - y) end function test_hmean( grid::DiscontinuousSpectralElementGrid{T, dim, Ns}, Q::MPIStateArray, vars, ) where {T, dim, Ns} state_vars_avg = get_horizontal_mean(grid, Q, vars) target = target_meanprof(grid) @test state_vars_avg["ρ"] ≈ target end function test_hvar( grid::DiscontinuousSpectralElementGrid{T, dim, Ns}, Q::MPIStateArray, vars, ) where {T, dim, Ns} state_vars_var = get_horizontal_variance(grid, Q, vars) target = target_varprof(grid) @test state_vars_var["ρ"] ≈ target end function test_horizontally_ave( grid::DiscontinuousSpectralElementGrid{T, dim, Ns}, Q_in::MPIStateArray, vars, ) where {T, dim, Ns} Q = deepcopy(Q_in) state_vars_var = get_horizontal_variance(grid, Q, vars) i_vars = varsindex(vars, :ρ) horizontally_average!(grid, Q, i_vars) FT = eltype(Q) state_vars_var = get_horizontal_variance(grid, Q, vars) @test all(isapprox.(state_vars_var["ρ"], 0, atol = 10 * eps(FT))) end function target_meanprof( grid::DiscontinuousSpectralElementGrid{T, dim, Ns}, ) where {T, dim, Ns} Nqs = Ns .+ 1 Nq_v = Nqs[end] Ntot = Nq_v * grid.topology.stacksize z = Array(get_z(grid)) target = SVector{Ntot, T}([1.0 - 4.0 * (z_i - z[Ntot] / 2.0)^2 for z_i in z]) return target end function target_varprof( grid::DiscontinuousSpectralElementGrid{T, dim, Ns}, ) where {T, dim, Ns} Nqs = Ns .+ 1 Nq_v = Nqs[end] nvertelem = grid.topology.stacksize Ntot = Nq_v * nvertelem z = Array(get_z(grid)) x = z[1:Nq_v] * nvertelem scaled_var = 0.0 for i in 1:Nq_v for j in 1:Nq_v scaled_var = scaled_var + (2 - x[i] - x[j]) * (2 - x[i] - x[j]) end end target = SVector{Ntot, Float64}([ (1.0 - 4.0 * (z_i - z[Ntot] / 2.0)^2) * (1.0 - 4.0 * (z_i - z[Ntot] / 2.0)^2) * (scaled_var / Nq_v / Nq_v - 1) for z_i in z ]) return target end function main() FT = Float64 ClimateMachine.init() m = EmptyBalLaw(param_set, FT(1)) # Prescribe polynomial order of basis functions in finite elements N_poly = 5 # Specify the number of vertical elements nelem_vert = 20 # Specify the domain height zmax = m.zmax # Initial and final times t0 = 0.0 timeend = 1.0 dt = 0.1 # Establish a `ClimateMachine` single stack configuration driver_config = ClimateMachine.SingleStackConfiguration( "SingleStackUtilsTest", N_poly, nelem_vert, zmax, param_set, m, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) # tests test_hmean( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ) test_hvar( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ) r1, z1 = reduce_nodal_stack( max, solver_config.dg.grid, solver_config.Q, vars_state(m, Prognostic(), FT), "ρ", i = 6, j = 6, ) @test r1 ≈ 8.880558532968455e-16 && z1 == 10 r2, z2 = reduce_nodal_stack( +, solver_config.dg.grid, solver_config.Q, vars_state(m, Prognostic(), FT), "ρ", i = 3, j = 3, ) @test r2 ≈ 102.73283921735293 && z2 == 20 ns = reduce_element_stack( +, solver_config.dg.grid, solver_config.Q, vars_state(m, Prognostic(), FT), "ρ", ) (r3, z3) = let f(a, b) = (a[1] + b[1], b[2]) reduce(f, ns) end @test r3 ≈ FT(2877.6) && z3 == 20 test_horizontally_ave( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ) # Test NodalStack iterator (without interpolation) nodal_stack = NodalStack(solver_config; interp = false) ρ_iter = [local_states.prog.ρ for local_states in nodal_stack] dons = get_vars_from_nodal_stack( solver_config.dg.grid, solver_config.Q, vars_state(m, Prognostic(), FT); interp = false, ) ρ_vfns = dons["ρ"] @test all(ρ_iter .≈ ρ_vfns) # Test NodalStack iterator (with interpolation) nodal_stack = NodalStack(solver_config; interp = true) ρ_iter = [local_states.prog.ρ for local_states in nodal_stack] dons = get_vars_from_nodal_stack( solver_config.dg.grid, solver_config.Q, vars_state(m, Prognostic(), FT); interp = true, ) ρ_vfns_interp = dons["ρ"] @test all(ρ_iter .≈ ρ_vfns_interp) return nothing end @testset "Single Stack Utils" begin main() end ================================================ FILE: test/Utilities/TicToc/runtests.jl ================================================ module TestTicToc using Test using ClimateMachine.TicToc function foo() @tic foo sleep(0.25) @toc foo end function bar() @tic bar sleep(1) @toc bar end if TicToc.tictoc_enabled @testset "TicToc" begin @test tictoc() >= 2 foo_i = findfirst(s -> s == :tictoc__foo, TicToc.timing_info_names) bar_i = findfirst(s -> s == :tictoc__bar, TicToc.timing_info_names) @test foo_i != nothing @test bar_i != nothing foo() foo() @test TicToc.timing_infos[foo_i].ncalls == 2 @test TicToc.timing_infos[bar_i].ncalls == 0 @test TicToc.timing_infos[foo_i].time >= 5e8 bar() @test TicToc.timing_infos[bar_i].ncalls == 1 @test TicToc.timing_infos[bar_i].time >= 1e9 buf = IOBuffer() old_stdout = stdout try rd, = redirect_stdout() TicToc.print_timing_info() Libc.flush_cstdio() flush(stdout) write(buf, readavailable(rd)) finally redirect_stdout(old_stdout) end str = String(take!(buf)) @test findnext("tictoc__foo", str, 1) != nothing @test findnext("tictoc__bar", str, 1) != nothing end end end # module TestTicToc ================================================ FILE: test/Utilities/VariableTemplates/complex_models.jl ================================================ using Test using StaticArrays abstract type AbstractModel end abstract type OneLayerModel <: AbstractModel end struct EmptyModel <: OneLayerModel end state(m::EmptyModel, T) = @vars() struct ScalarModel <: OneLayerModel end state(m::ScalarModel, T) = @vars(x::T) struct VectorModel{N} <: OneLayerModel end state(m::VectorModel{N}, T) where {N} = @vars(x::SVector{N, T}) struct MatrixModel{N, M} <: OneLayerModel end state(m::MatrixModel{N, M}, T) where {N, M} = @vars(x::SHermitianCompact{N, T, M}) abstract type TwoLayerModel <: AbstractModel end struct CompositModel{EM, SM, VM, MM} <: TwoLayerModel empty_model::EM scalar_model::SM vector_model::VM matrix_model::MM end function CompositModel( Nv, N, M; empty_model = EmptyModel(), scalar_model = ScalarModel(), vector_model = VectorModel{Nv}(), matrix_model = MatrixModel{N, M}(), ) args = (empty_model, scalar_model, vector_model, matrix_model) return CompositModel{typeof.(args)...}(args...) end function state(m::CompositModel, T) @vars begin empty_model::state(m.empty_model, T) scalar_model::state(m.scalar_model, T) vector_model::state(m.vector_model, T) matrix_model::state(m.matrix_model, T) end end Base.@kwdef struct NTupleModel{S, V, Nv} <: OneLayerModel scalar_model::S = ScalarModel() vector_model::V = VectorModel{Nv}() end function NTupleModel( Nv; scalar_model::S = ScalarModel(), vector_model::V = VectorModel{Nv}(), ) where {S, V} return NTupleModel{S, V, Nv}(scalar_model, vector_model) end function state(m::NTupleModel, T) @vars begin scalar_model::state(m.scalar_model, T) vector_model::state(m.vector_model, T) end end state(m::NTuple{N, NTupleModel}, FT) where {N} = Tuple{ntuple(i -> state(m[i], FT), N)...} struct NTupleContainingModel{N, NTM, VM, SM} <: TwoLayerModel ntuple_model::NTM vector_model::VM scalar_model::SM end function NTupleContainingModel( N, Nv; ntuple_model = ntuple(i -> NTupleModel(Nv), N), vector_model = VectorModel{Nv}(), scalar_model = ScalarModel(), ) args = (ntuple_model, vector_model, scalar_model) return NTupleContainingModel{N, typeof.(args)...}(args...) end function state(m::NTupleContainingModel, T) @vars begin ntuple_model::state(m.ntuple_model, T) vector_model::state(m.vector_model, T) scalar_model::state(m.scalar_model, T) end end ================================================ FILE: test/Utilities/VariableTemplates/runtests.jl ================================================ module TestVariableTemplates using Test using StaticArrays using ClimateMachine.VariableTemplates @testset "VariableTemplates" begin include("test_base_functionality.jl") include("varsindex.jl") include("test_complex_models.jl") end end ================================================ FILE: test/Utilities/VariableTemplates/runtests_gpu.jl ================================================ module TestVariableTemplatesGPU using Test using StaticArrays using ClimateMachine.VariableTemplates @testset "VariableTemplates - GPU" begin include("test_complex_models_gpu.jl") end end ================================================ FILE: test/Utilities/VariableTemplates/test_base_functionality.jl ================================================ using Test using StaticArrays using ClimateMachine.VariableTemplates struct TestModel{A, B, C} a::A b::B c::C end struct SubModelA end struct SubModelB end struct SubModelC{N} end function state(m::TestModel, T) @vars begin ρ::T ρu::SVector{3, T} ρe::T a::state(m.a, T) b::state(m.b, T) c::state(m.c, T) S::SHermitianCompact{3, T, 6} end end state(m::SubModelA, T) = @vars() state(m::SubModelB, T) = @vars(ρqt::T) state(m::SubModelC{N}, T) where {N} = @vars(ρk::SVector{N, T}) model = TestModel(SubModelA(), SubModelB(), SubModelC{5}()) st = state(model, Float64) @test varsize(st) == 17 @test varsize(typeof(())) == 0 v = Vars{st}(zeros(MVector{varsize(st), Float64})) g = Grad{st}(zeros(MMatrix{3, varsize(st), Float64})) @test v.ρ === 0.0 @test v.ρu === SVector(0.0, 0.0, 0.0) v.ρu = SVector(1, 2, 3) @test v.ρu === SVector(1.0, 2.0, 3.0) @test v.b.ρqt === 0.0 v.b.ρqt = 12.0 @test v.b.ρqt === 12.0 @test v.S === zeros(SHermitianCompact{3, Float64, 6}) v.S = SHermitianCompact{3, Float64, 6}(1, 2, 3, 2, 3, 4, 3, 4, 5) @test v.S[1, 1] === 1.0 @test v.S[1, 3] === 3.0 @test v.S[3, 1] === 3.0 @test v.S[3, 3] === 5.0 v.S = ones(SMatrix{3, 3, Int64}) @test v.S[1, 1] === 1.0 @test v.S[1, 3] === 1.0 @test v.S[3, 1] === 1.0 @test v.S[3, 3] === 1.0 @test propertynames(v.a) == () @test propertynames(g.a) == () @test g.ρu == zeros(SMatrix{3, 3, Float64}) g.ρu = SMatrix{3, 3}(1:9) @test g.ρu == SMatrix{3, 3, Float64}(1:9) @test size(v.c.ρk) == (5,) @test size(g.c.ρk) == (3, 5) sv = similar(v) @test typeof(sv) == typeof(v) @test size(parent(sv)) == size(parent(v)) sg = similar(g) @test typeof(sg) == typeof(g) @test size(parent(sg)) == size(parent(g)) @test flattenednames(st) == [ "ρ", "ρu[1]", "ρu[2]", "ρu[3]", "ρe", "b.ρqt", "c.ρk[1]", "c.ρk[2]", "c.ρk[3]", "c.ρk[4]", "c.ρk[5]", "S[1,1]", "S[2,1]", "S[3,1]", "S[2,2]", "S[3,2]", "S[3,3]", ] ================================================ FILE: test/Utilities/VariableTemplates/test_complex_models.jl ================================================ using Test using StaticArrays using Printf using ClimateMachine.VariableTemplates using ClimateMachine.VariableTemplates: wrap_val import ClimateMachine.VariableTemplates VT = VariableTemplates @testset "Test complex models" begin include("complex_models.jl") FT = Float32 # test getproperty m = ScalarModel() st = state(m, FT) vs = varsize(st) a_global = collect(1:vs) v = Vars{st}(a_global) @test v.x == FT(1) Nv = 4 m = VectorModel{Nv}() st = state(m, FT) vs = varsize(st) a_global = collect(1:vs) v = Vars{st}(a_global) @test v.x == SVector{Nv, FT}(1:Nv) N = 3 M = 6 m = MatrixModel{N, M}() st = state(m, FT) vs = varsize(st) a_global = collect(1:vs) v = Vars{st}(a_global) @test v.x == SHermitianCompact{N, FT, M}(collect(1:(1 + M - 1))) Nv = 3 N = 3 M = 6 m = CompositModel(Nv, N, M) st = state(m, FT) vs = varsize(st) a_global = collect(1:vs) v = Vars{st}(a_global) scalar_model = v.scalar_model @test v.scalar_model.x == FT(1) vector_model = v.vector_model @test v.vector_model.x == SVector{Nv, FT}([2, 3, 4]) matrix_model = v.matrix_model @test v.matrix_model.x == SHermitianCompact{N, FT, M}(collect(5:(5 + M - 1))) Nv = 3 N = 5 m = NTupleContainingModel(N, Nv) st = state(m, FT) vs = varsize(st) a_global = collect(1:vs) v = Vars{st}(a_global) offset = (Nv + 1) * N @test v.vector_model.x == SVector{Nv, FT}(collect(1:Nv) .+ offset) @test v.scalar_model.x == FT(1 + Nv) + offset # Make sure we bounds error for NTupleModel's: @test_throws BoundsError m.ntuple_model[0] @test_throws BoundsError m.ntuple_model[N + 1] unval(::Val{i}) where {i} = i @unroll_map(N) do i @test m.ntuple_model[i] isa NTupleModel @test m.ntuple_model[i].scalar_model isa ScalarModel @test v.ntuple_model[i].scalar_model.x == FT(unval(i)) + (Nv) * (unval(i) - 1) @test v.vector_model.x == SVector{Nv, FT}(1:Nv) .+ offset @test v.scalar_model.x == FT(Nv + 1) + offset end @test vuntuple(x -> x, 5) == ntuple(i -> Val(i), Val(5)) # test flattenednames fn = flattenednames(st) j = 1 for i in 1:N @test fn[j] === "ntuple_model[$i].scalar_model.x" j += 1 for k in 1:Nv @test fn[j] === "ntuple_model[$i].vector_model.x[$k]" j += 1 end end for k in 1:Nv @test fn[j] === "vector_model.x[$k]" j += 1 end @test fn[j] === "scalar_model.x" # flattened_tup_chain - empty/generic cases struct Foo end @test flattened_tup_chain(NamedTuple{(), Tuple{}}) == () @test flattened_tup_chain(Foo, RetainArr()) == ((Symbol(),),) @test flattened_tup_chain(Foo, FlattenArr()) == ((Symbol(),),) # flattened_tup_chain - SHermitianCompact Nv, M = 3, 6 A = SHermitianCompact{Nv, FT, M}(collect(1:(1 + M - 1))) ftc = flattened_tup_chain(typeof(A), FlattenArr()) @test ftc == ntuple(i -> (Symbol(), i), M) ftc = flattened_tup_chain(typeof(A), RetainArr()) @test ftc == ((Symbol(),),) # flattened_tup_chain - Retain arrays ftc = flattened_tup_chain(st, RetainArr()) j = 1 for i in 1:N @test ftc[j] === (:ntuple_model, i, :scalar_model, :x) j += 1 @test ftc[j] === (:ntuple_model, i, :vector_model, :x) j += 1 end @test ftc[j] === (:vector_model, :x) j += 1 @test ftc[j] === (:scalar_model, :x) # test varsindex ntuple(N) do i i_val = Val(i) i_sm = varsindex(st, :ntuple_model, i_val, :scalar_model, :x) i_vm = varsindex(st, :ntuple_model, i_val, :vector_model, :x) nt_offset = (Nv + 1) - 1 i_sm_correct = (i + nt_offset * (i - 1)):(i + nt_offset * (i - 1)) @test i_sm == i_sm_correct offset = 1 i_start = i + nt_offset * (i - 1) + offset i_vm_correct = (i_start):(i_start + Nv - 1) @test i_vm == i_vm_correct end # test that getproperty matches varsindex ntuple(N) do i i_ϕ = varsindex(st, wrap_val.(ftc[i])...) ϕ = getproperty(v, wrap_val.(ftc[i])) @test all(parent(v)[i_ϕ] .≈ ϕ) end # test getproperty with tup-chain @unroll_map(N) do i @test v.scalar_model.x == getproperty(v, (:scalar_model, :x)) @test v.vector_model.x == getproperty(v, (:vector_model, :x)) @test v.ntuple_model[i] == getproperty(v, (:ntuple_model, i)) @test v.ntuple_model[i].scalar_model == getproperty(v, (:ntuple_model, i, :scalar_model)) @test v.ntuple_model[i].scalar_model.x == getproperty(v, (:ntuple_model, i, :scalar_model, :x)) end # Test converting to flattened NamedTuple fnt = flattened_named_tuple(v, RetainArr()) @test fnt.ntuple_model_1_scalar_model_x == 1.0f0 @test fnt.ntuple_model_1_vector_model_x == Float32[2.0, 3.0, 4.0] @test fnt.ntuple_model_2_scalar_model_x == 5.0f0 @test fnt.ntuple_model_2_vector_model_x == Float32[6.0, 7.0, 8.0] @test fnt.ntuple_model_3_scalar_model_x == 9.0f0 @test fnt.ntuple_model_3_vector_model_x == Float32[10.0, 11.0, 12.0] @test fnt.ntuple_model_4_scalar_model_x == 13.0f0 @test fnt.ntuple_model_4_vector_model_x == Float32[14.0, 15.0, 16.0] @test fnt.ntuple_model_5_scalar_model_x == 17.0f0 @test fnt.ntuple_model_5_vector_model_x == Float32[18.0, 19.0, 20.0] @test fnt.vector_model_x == Float32[21.0, 22.0, 23.0] @test fnt.scalar_model_x == 24.0f0 # flattened_tup_chain - Flatten arrays ftc = flattened_tup_chain(st, FlattenArr()) j = 1 for i in 1:N @test ftc[j] === (:ntuple_model, i, :scalar_model, :x) j += 1 for k in 1:Nv @test ftc[j] === (:ntuple_model, i, :vector_model, :x, k) j += 1 end end for i in 1:Nv @test ftc[j] === (:vector_model, :x, i) j += 1 end @test ftc[j] === (:scalar_model, :x) # test varsindex (flatten arrays) ntuple(N) do i i_val = Val(i) i_sm = varsindex(st, :ntuple_model, i_val, :scalar_model, :x) nt_offset = (Nv + 1) - 1 i_sm_correct = (i + nt_offset * (i - 1)):(i + nt_offset * (i - 1)) @test i_sm == i_sm_correct for j in 1:Nv i_vm = varsindex(st, :ntuple_model, i_val, :vector_model, :x, Val(j)) offset = 1 i_start = i + nt_offset * (i - 1) + offset i_vm_correct = i_start + j - 1 @test i_vm == i_vm_correct:i_vm_correct end end # test that getproperty matches varsindex ntuple(N) do i i_ϕ = varsindex(st, wrap_val.(ftc[i])...) ϕ = getproperty(v, wrap_val.(ftc[i])) @test all(parent(v)[i_ϕ] .≈ ϕ) end # test getproperty with tup-chain for k in 1:Nv @test v.vector_model.x[k] == getproperty(v, (:vector_model, :x, Val(k))) end # test getindex with Val @test getindex((1, 2), Val(1)) == 1 @test getindex((1, 2), Val(2)) == 2 @test getindex(SVector(1, 2), Val(1)) == 1 @test getindex(SVector(1, 2), Val(2)) == 2 nt = (; a = ((; x = 1), (; x = 2))) fnt = VT.flattened_tuple(FlattenArr(), nt) vg = Grad{typeof(nt)}(zeros(MMatrix{3, length(fnt), FT})) parent(vg)[1, :] .= fnt parent(vg)[2, :] .= fnt parent(vg)[3, :] .= fnt for i in 1:2 @test getindex(vg.a, Val(i)).x[1] == i @test getindex(vg.a, Val(i)).x[2] == i @test getindex(vg.a, Val(i)).x[3] == i end # getpropertyorindex @test VT.getpropertyorindex((1, 2), Val(1)) == 1 @test VT.getpropertyorindex((1, 2), Val(2)) == 2 @test VT.getpropertyorindex([1, 2], Val(1)) == 1 @test VT.getpropertyorindex([1, 2], Val(2)) == 2 @test VT.getpropertyorindex(v, :scalar_model) == v.scalar_model for i in 1:N @test VT.getpropertyorindex(v.ntuple_model, Val(i)) == v.ntuple_model[Val(i)] @test VT.getpropertyorindex(v.ntuple_model, (Val(i),)) == v.ntuple_model[Val(i)] @test getindex(v.ntuple_model, (Val(i),)) == VT.getpropertyorindex(v.ntuple_model, (Val(i),)) end # Test converting to flattened NamedTuple fnt = flattened_named_tuple(v, FlattenArr()) @test fnt.ntuple_model_1_scalar_model_x == 1.0f0 @test fnt.ntuple_model_1_vector_model_x_1 == 2.0 @test fnt.ntuple_model_1_vector_model_x_2 == 3.0 @test fnt.ntuple_model_1_vector_model_x_3 == 4.0 @test fnt.ntuple_model_2_scalar_model_x == 5.0f0 @test fnt.ntuple_model_2_vector_model_x_1 == 6.0 @test fnt.ntuple_model_2_vector_model_x_2 == 7.0 @test fnt.ntuple_model_2_vector_model_x_3 == 8.0 @test fnt.ntuple_model_3_scalar_model_x == 9.0f0 @test fnt.ntuple_model_3_vector_model_x_1 == 10.0 @test fnt.ntuple_model_3_vector_model_x_2 == 11.0 @test fnt.ntuple_model_3_vector_model_x_3 == 12.0 @test fnt.ntuple_model_4_scalar_model_x == 13.0f0 @test fnt.ntuple_model_4_vector_model_x_1 == 14.0 @test fnt.ntuple_model_4_vector_model_x_2 == 15.0 @test fnt.ntuple_model_4_vector_model_x_3 == 16.0 @test fnt.ntuple_model_5_scalar_model_x == 17.0f0 @test fnt.ntuple_model_5_vector_model_x_1 == 18.0 @test fnt.ntuple_model_5_vector_model_x_2 == 19.0 @test fnt.ntuple_model_5_vector_model_x_3 == 20.0 @test fnt.vector_model_x_1 == 21.0 @test fnt.vector_model_x_2 == 22.0 @test fnt.vector_model_x_3 == 23.0 @test fnt.scalar_model_x == 24.0f0 struct Foo end nt = (; nest = (; v = SVector(1, 2, 3), nt = (; shc = SHermitianCompact{3, FT, 6}(collect(1:6)), f = FT(1.0), ), d = SDiagonal(collect(1:3)...), tt = (Foo(), Foo()), t = Foo(), ), ) # Test flattened_tuple: @test VT.flattened_tuple(RetainArr(), NamedTuple()) == () @test VT.flattened_tuple(FlattenArr(), NamedTuple()) == () @test VT.flattened_tuple(RetainArr(), Tuple(NamedTuple())) == () @test VT.flattened_tuple(FlattenArr(), Tuple(NamedTuple())) == () ft = FlattenArr() @test VT.flattened_tuple(ft, nt.nest.nt.f) == (1.0f0,) @test VT.flattened_tuple(ft, nt.nest.nt) == (1.0f0, 2.0f0, 3.0f0, 4.0f0, 5.0f0, 6.0f0, 1.0f0) @test VT.flattened_tuple(ft, nt.nest.d) == (1, 2, 3) @test VT.flattened_tuple(ft, nt.nest.t) == (Foo(),) @test VT.flattened_tuple(ft, nt.nest.tt) == (Foo(), Foo()) ft = RetainArr() @test VT.flattened_tuple(ft, nt.nest.nt.f) == (1.0f0,) @test VT.flattened_tuple(ft, nt.nest.nt)[1] == nt.nest.nt.shc.lowertriangle @test VT.flattened_tuple(ft, nt.nest.nt)[2] == 1.0f0 @test VT.flattened_tuple(ft, nt.nest.d) == (nt.nest.d.diag,) @test VT.flattened_tuple(ft, nt.nest.t) == (Foo(),) @test VT.flattened_tuple(ft, nt.nest.tt) == (Foo(), Foo()) # Test flattened_named_tuple for NamedTuples fnt = flattened_named_tuple(nt, FlattenArr()) @test fnt.nest_v_1 == 1 @test fnt.nest_v_2 == 2 @test fnt.nest_v_3 == 3 @test fnt.nest_nt_shc_1 == 1.0 @test fnt.nest_nt_shc_2 == 2.0 @test fnt.nest_nt_shc_3 == 3.0 @test fnt.nest_nt_shc_4 == 4.0 @test fnt.nest_nt_shc_5 == 5.0 @test fnt.nest_nt_shc_6 == 6.0 @test fnt.nest_nt_f == 1.0 @test fnt.nest_tt_1 == Foo() @test fnt.nest_tt_2 == Foo() @test fnt.nest_t == Foo() fnt = flattened_named_tuple(nt, RetainArr()) @test fnt.nest_v == SVector(1, 2, 3) @test fnt.nest_nt_shc == nt.nest.nt.shc.lowertriangle @test fnt.nest_nt_f == 1.0 @test fnt.nest_tt_1 == Foo() @test fnt.nest_tt_2 == Foo() @test fnt.nest_t == Foo() # Test that show doesn't break sprint(show, v) sprint(show, vg) end ================================================ FILE: test/Utilities/VariableTemplates/test_complex_models_gpu.jl ================================================ using Test using StaticArrays using ClimateMachine.VariableTemplates using CUDA using KernelAbstractions using KernelAbstractions.Extras: @unroll include("complex_models.jl") run_gpu = CUDA.has_cuda_gpu() if run_gpu CUDA.allowscalar(false) else CUDA.allowscalar(true) end get_device() = run_gpu ? CPU() : CUDADevice() device_array(a, ::CUDADevice) = CuArray(a) device_array(a, ::CPU) = Array(a) device_rand(::CUDADevice, args...) = CUDA.rand(args...) device_rand(::CPU, args...) = rand(args...) number_states(m) = varsize(state(m, Int)) @kernel function mem_copy_kernel!( m::AbstractModel, dst::AbstractArray{FT, N}, src::AbstractArray{FT, N}, ) where {FT, N} @uniform begin ns = number_states(m) vs = state(m, FT) local_src = MArray{Tuple{ns}, FT}(undef) local_dst = MArray{Tuple{ns}, FT}(undef) end i = @index(Group, Linear) @inbounds begin @unroll for s in 1:ns local_src[s] = src[s, i] end mem_copy!(m, Vars{vs}(local_dst), Vars{vs}(local_src)) @unroll for s in 1:ns dst[s, i] = local_dst[s] end end end function mem_copy!(m::ScalarModel, dst::Vars, src::Vars) dst.x = src.x end function mem_copy!(m::NTupleContainingModel{N}, dst::Vars, src::Vars) where {N} dst.vector_model.x = src.vector_model.x dst.scalar_model.x = src.scalar_model.x up = vuntuple(i -> src.ntuple_model[i].scalar_model.x, N) up_v = vuntuple(i -> src.ntuple_model[i].vector_model.x, N) up_sv = SVector(up...) @unroll_map(N) do i dst.ntuple_model[i].scalar_model.x = up[i] # index into tuple dst.ntuple_model[i].scalar_model.x = up_sv[i] # index into SArray dst.ntuple_model[i].vector_model.x = up_v[i] end end @testset "ScalarModel" begin FT = Float32 device = get_device() n_elem = 10 m = ScalarModel() ns = number_states(m) a_src = Array{FT}(undef, ns, n_elem) a_dst = Array{FT}(undef, ns, n_elem) d_src = device_array(a_src, device) d_dst = device_array(a_dst, device) d_src .= device_rand(device, FT, ns, n_elem) fill!(d_dst, 0) work_groups = (1,) ndrange = (n_elem,) kernel! = mem_copy_kernel!(device, work_groups) event = kernel!(m, d_dst, d_src, ndrange = ndrange) wait(device, event) a_src = Array(d_src) a_dst = Array(d_dst) @test a_src == a_dst end @testset "NTupleContainingModel" begin FT = Float32 device = get_device() n_elem = 10 Nv = 4 N = 3 m = NTupleContainingModel(N, Nv) ns = number_states(m) a_src = Array{FT}(undef, ns, n_elem) a_dst = Array{FT}(undef, ns, n_elem) d_src = device_array(a_src, device) d_dst = device_array(a_dst, device) d_src .= device_rand(device, FT, ns, n_elem) fill!(d_dst, 0) work_groups = (1,) ndrange = (n_elem,) kernel! = mem_copy_kernel!(device, work_groups) event = kernel!(m, d_dst, d_src, ndrange = ndrange) wait(device, event) a_src = Array(d_src) a_dst = Array(d_dst) @test a_src == a_dst end ================================================ FILE: test/Utilities/VariableTemplates/varsindex.jl ================================================ using Test, StaticArrays using ClimateMachine.VariableTemplates: varsindex, @vars, varsindices using StaticArrays @testset "varsindex" begin struct TestMoistureModel{FT} end struct TestAtmosModel{FT} moisture::TestMoistureModel{FT} end function vars_state(::TestMoistureModel, FT) @vars begin ρq_tot::FT ρq_x::SVector{5, FT} ρq_liq::FT ρq_vap::FT end end function vars_state(m::TestAtmosModel, FT) @vars begin ρ::FT ρu::SVector{3, FT} ρe::FT moisture::vars_state(m.moisture, FT) S::SHermitianCompact{3, FT, 6} end end FT = Float64 m = TestAtmosModel(TestMoistureModel{FT}()) @test 1:1 === varsindex(vars_state(m, FT), :ρ) @test 2:4 === varsindex(vars_state(m, FT), :ρu) @test 5:5 === varsindex(vars_state(m, FT), :ρe) # Since moisture is defined recusively this will get all the fields moist = varsindex(vars_state(m, FT), :moisture) @test 6:13 === moist # To get the specific ones we can do @test 6:6 === varsindex(vars_state(m, FT), :moisture, :ρq_tot) @test 7:11 === varsindex(vars_state(m, FT), :moisture, :ρq_x) @test 12:12 === varsindex(vars_state(m, FT), :moisture, :ρq_liq) @test 13:13 === varsindex(vars_state(m, FT), :moisture, :ρq_vap) # or @test 6:6 === moist[varsindex(vars_state(m.moisture, FT), :ρq_tot)] @test 7:11 === moist[varsindex(vars_state(m.moisture, FT), :ρq_x)] @test 12:12 === moist[varsindex(vars_state(m.moisture, FT), :ρq_liq)] @test 13:13 === moist[varsindex(vars_state(m.moisture, FT), :ρq_vap)] @test 14:19 == varsindex(vars_state(m, FT), :S) @test (1,) === varsindices(vars_state(m, FT), :ρ) @test (2, 3, 4) === varsindices(vars_state(m, FT), :ρu) @test (1, 5) === varsindices(vars_state(m, FT), :ρ, :ρe) @test (12,) === varsindices(vars_state(m, FT), :(moisture.ρq_liq)) let vars = ("ρe", "moisture.ρq_x", "moisture.ρq_vap") @test (5, 7, 8, 9, 10, 11, 13) === varsindices(vars_state(m, FT), vars) end end ================================================ FILE: test/Utilities/runtests.jl ================================================ using Test, Pkg @testset "Utilities" begin all_tests = isempty(ARGS) || "all" in ARGS ? true : false for submodule in ["TicToc", "VariableTemplates", "SingleStackUtils"] if all_tests || "$submodule" in ARGS || "Utilities/$submodule" in ARGS || "Utilities" in ARGS include_test(submodule) end end end ================================================ FILE: test/runtests.jl ================================================ using Test, Pkg ENV["DATADEPS_ALWAYS_ACCEPT"] = true ENV["JULIA_LOG_LEVEL"] = "WARN" function include_test(_module) println("Starting tests for $_module") t = @elapsed include(joinpath(_module, "runtests.jl")) println("Completed tests for $_module, $(round(Int, t)) seconds elapsed") return nothing end @testset "ClimateMachine" begin all_tests = isempty(ARGS) || "all" in ARGS ? true : false function has_submodule(sm) any(ARGS) do a a == sm && return true first(split(a, '/')) == sm && return true return false end end for submodule in [ "InputOutput", "Utilities", "Common", "Arrays", "BalanceLaws", "Atmos", "Land", "Numerics", "Diagnostics", "Ocean", "Driver", ] if all_tests || has_submodule(submodule) || "ClimateMachine" in ARGS include_test(submodule) end end end ================================================ FILE: test/runtests_gpu.jl ================================================ using Test, Pkg, CUDA ENV["JULIA_LOG_LEVEL"] = "WARN" @test CUDA.functional() for submodule in [ "Arrays", #"Numerics/Mesh", #"Numerics/DGMethods", "Numerics/ODESolvers", ] println("Starting tests for $submodule") t = @elapsed include(joinpath(submodule, "runtests.jl")) println("Completed tests for $submodule, $(round(Int, t)) seconds elapsed") end ================================================ FILE: test/testhelpers.jl ================================================ using MPI function runmpi(file; ntasks = 1, localhost = false) localhostenv = try parse(Bool, get(ENV, "CLIMATEMACHINE_TEST_RUNMPI_LOCALHOST", "false")) catch false end # Force mpiexec to exec on the localhost node # TODO: MicrosoftMPI mpiexec has issues if mpiexec.exe is in a different # folder as the MPI script to run so ignore for now. if (localhost || localhostenv) && (MPI.MPI_LIBRARY != MPI.MicrosoftMPI) localhostonly = `-host localhost` else localhostonly = `` end # by default some mpi runtimes will # complain if more resources (processes) # are requested than available on the node if MPI.MPI_LIBRARY == MPI.OpenMPI oversubscribe = `--oversubscribe` else oversubscribe = `` end @info "Running MPI test..." file ntasks # Running this way prevents: # Balance Law Solver | No tests # since external tests are not returned as passed/fail @time @test MPI.mpiexec() do cmd Base.run( `$cmd $localhostonly $oversubscribe -np $ntasks $(Base.julia_cmd()) --startup-file=no --project=$(Base.active_project()) $file`; wait = true, ) true end end ================================================ FILE: tutorials/Atmos/agnesi_hs_lin.jl ================================================ # # [Linear HS mountain waves (Topography)](@id EX-LIN_HS-docs) # # ## Description of experiment # 1) Dry linear Hydrostatic Mountain Waves # The atmosphere is dry and the flow impinges against a witch of Agnesi mountain of heigh $h_{m}=1$m # and base parameter $a=10000m$ and centered on $x_{c} = 120km$ in a 2D domain # $\Omega = 240km \times 50km$. The mountain is defined as # # ```math # z = \frac{h_m}{1 + \frac{x - x_c}{a}} # ``` # # The 2D problem is setup in 3D by using 1 element in the y direction. # To damp the upward moving gravity waves, a Reyleigh absorbing layer is added at $z = 15000 m$. # # The initial atmosphere is defined such that it has a stability frequency $N=g/\sqrt{c_p T_0}$, where # # $T_0 = 250 K$ # so that # # ```math # \theta = \theta_0 = T_0 # ``` # # ```math # \pi = 1 + \frac{g^2}{c_p \theta_0 N^2}\left(\exp\left(\frac{-N^2 z}{g} \right)\right) # ``` # # where $\theta_0 = T_0 K$. # # so that # # ```math # ρ = \frac{p_{sfc}}{R_{gas}\theta}\pi^{c_v/R_{gas}} # ``` # and # ```math # T = \theta \pi # ``` # # 2) Boundaries # - `Impenetrable(FreeSlip())` - Top and bottom: no momentum flux, no mass flux through # walls. # - `Impermeable()` - non-porous walls, i.e. no diffusive fluxes through # walls. # - Agnesi topography built via meshwarp. # - Laterally periodic # 3) Domain - 240,000 m (horizontal) x 4000 m (horizontal) x 30,000m (vertical) # 4) Resolution - 1000m X 240 m effective resolution # 5) Total simulation time - 15,000 s # 6) Overrides defaults for # - CPU Initialisation # - Time integrator # - Sources # #md # !!! note #md # This experiment setup assumes that you have installed the #md # `ClimateMachine` according to the instructions on the landing page. #md # We assume the users' familiarity with the conservative form of the #md # equations of motion for a compressible fluid (see the #md # [AtmosModel](@ref AtmosModel-docs) page). #md # #md # The following topics are covered in this example #md # - Defining the initial conditions #md # - Applying source terms #md # - Add an idealized topography defined by a warping function # # ## Boilerplate (Using Modules) # # The setup of this problem is taken from Case 6 of [giraldoRestelli2008a](@cite) # using ClimateMachine ClimateMachine.init(parse_clargs = true); nothing # Setting `parse_clargs=true` allows the use of command-line arguments (see API > Driver docs) # to control simulation update and output intervals. using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.ConfigTypes using ClimateMachine.Diagnostics using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.Mesh.Filters using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using Thermodynamics.TemperatureProfiles using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using StaticArrays using Test using CLIMAParameters using CLIMAParameters.Atmos.SubgridScale: C_smag using CLIMAParameters.Planet: R_d, cp_d, cv_d, MSLP, grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() # ## [Initial Conditions](@id init) #md # !!! note #md # The following variables are assigned in the initial condition #md # - `state.ρ` = Scalar quantity for initial density profile #md # - `state.ρu`= 3-component vector for initial momentum profile #md # - `state.energy.ρe`= Scalar quantity for initial total-energy profile #md # humidity function init_agnesi_hs_lin!(problem, bl, state, aux, localgeo, t) (x, y, z) = localgeo.coord ## Problem float-type FT = eltype(state) param_set = parameter_set(bl) ## Unpack constant parameters R_gas::FT = R_d(param_set) c_p::FT = cp_d(param_set) c_v::FT = cv_d(param_set) p0::FT = MSLP(param_set) _grav::FT = grav(param_set) γ::FT = c_p / c_v c::FT = c_v / R_gas c2::FT = R_gas / c_p Tiso::FT = 250.0 θ0::FT = Tiso ## Calculate the Brunt-Vaisaila frequency for an isothermal field Brunt::FT = _grav / sqrt(c_p * Tiso) Brunt2::FT = Brunt * Brunt g2::FT = _grav * _grav π_exner::FT = exp(-_grav * z / (c_p * Tiso)) θ::FT = θ0 * exp(Brunt2 * z / _grav) ρ::FT = p0 / (R_gas * θ) * (π_exner)^c ## Compute perturbed thermodynamic state: T = θ * π_exner e_int = internal_energy(param_set, T) ts = PhaseDry(param_set, e_int, ρ) ## initial velocity u = FT(20.0) ## State (prognostic) variable assignment e_kin = FT(0) # kinetic energy e_pot = gravitational_potential(bl.orientation, aux)# potential energy ρe_tot = ρ * total_energy(e_kin, e_pot, ts) # total energy state.ρ = ρ state.ρu = SVector{3, FT}(ρ * u, 0, 0) state.energy.ρe = ρe_tot end # Define a `setmax` method function setmax(f, xmax, ymax, zmax) function setmaxima(xin, yin, zin) return f(xin, yin, zin; xmax = xmax, ymax = ymax, zmax = zmax) end return setmaxima end # Define a warping function to build an analytic topography: function warp_agnesi(xin, yin, zin; xmax = 1000.0, ymax = 1000.0, zmax = 1000.0) FT = eltype(xin) ac = FT(10000) hm = FT(1) xc = FT(0.5) * xmax zdiff = hm / (FT(1) + ((xin - xc) / ac)^2) ## Linear relaxation towards domain maximum height x, y, z = xin, yin, zin + zdiff * (zmax - zin) / zmax return x, y, z end # ## [Model Configuration](@id config-helper) # We define a configuration function to assist in prescribing the physical # model. The purpose of this is to populate the # `AtmosLESConfiguration` with arguments # appropriate to the problem being considered. function config_agnesi_hs_lin( ::Type{FT}, N, resolution, xmax, ymax, zmax, ) where {FT} ## ## Explicit Rayleigh damping: ## ## `` ## \tau_s = \alpha * \sin\left(0.5\pi \frac{z - z_s}{zmax - z_s} \right)^2, ## `` ## where ## ``sponge_ampz`` is the wave damping coefficient (1/s) ## ``z_s`` is the level where the Rayleigh sponge starts ## ``zmax`` is the domain top ## ## Setup the parameters for the gravity wave absorbing layer ## at the top of the domain ## ## u_relaxation(xvelo, vvelo, wvelo) contains the background velocity values to which ## the sponge relaxes the vertically moving wave u_relaxation = SVector(FT(20), FT(0), FT(0)) ## Wave damping coefficient (1/s) sponge_ampz = FT(0.5) ## Vertical level where the absorbing layer starts z_s = FT(25000.0) ## Pass the sponge parameters to the sponge calculator rayleigh_sponge = RayleighSponge{FT}(zmax, z_s, sponge_ampz, u_relaxation, 2) ## Setup the source terms for this problem: source = (Gravity(), rayleigh_sponge) ## Define the reference state: T_virt = FT(250) temp_profile_ref = IsothermalProfile(param_set, T_virt) ref_state = HydrostaticState(temp_profile_ref) nothing # hide _C_smag = FT(0.21) physics = AtmosPhysics{FT}( param_set; ref_state = ref_state, turbulence = Vreman(_C_smag), moisture = DryModel(), tracers = NoTracers(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; init_state_prognostic = init_agnesi_hs_lin!, source = source, ) config = ClimateMachine.AtmosLESConfiguration( "Agnesi_HS_LINEAR", # Problem title [String] N, # Polynomial order [Int] resolution, # (Δx, Δy, Δz) effective resolution [m] xmax, # Domain maximum size [m] ymax, # Domain maximum size [m] zmax, # Domain maximum size [m] param_set, # Parameter set. init_agnesi_hs_lin!, # Function specifying initial condition model = model, # Model type meshwarp = setmax(warp_agnesi, xmax, ymax, zmax), ) return config end # Define a `main` method (entry point) function main() FT = Float64 ## Define the polynomial order and effective grid spacings: N = 4 ## Define the domain size and spatial resolution Nx = 20 Ny = 20 Nz = 20 xmax = FT(244000) ymax = FT(4000) zmax = FT(50000) Δx = xmax / FT(Nx) Δy = ymax / FT(Ny) Δz = zmax / FT(Nz) resolution = (Δx, Δy, Δz) t0 = FT(0) timeend = FT(150) #FT(hrs * 60 * 60) ## Define the max Courant for the time time integrator (ode_solver). ## The default value is 1.7 for LSRK144: CFL = FT(1.5) ## Assign configurations so they can be passed to the `invoke!` function driver_config = config_agnesi_hs_lin(FT, N, resolution, xmax, ymax, zmax) ## Define the time integrator: ## We chose an explicit single-rate LSRK144 for this problem ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFL, ) ## Set up the spectral filter to remove the solutions spurious modes ## Define the order of the exponential filter: use 32 or 64 for this problem. ## The larger the value, the less dissipation you get: filterorder = 64 filter = ExponentialFilter(solver_config.dg.grid, 0, filterorder) cbfilter = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, AtmosFilterPerturbations(driver_config.bl), solver_config.dg.grid, filter, state_auxiliary = solver_config.dg.state_auxiliary, ) nothing end ## End exponential filter ## Invoke solver (calls `solve!` function for time-integrator), ## pass the driver, solver and diagnostic config information. result = ClimateMachine.invoke!( solver_config; user_callbacks = (cbfilter,), check_euclidean_distance = true, ) ## Check that the solution norm is reasonable. @test isapprox(result, FT(1); atol = 1.5e-3) end # Call `main` main() ================================================ FILE: tutorials/Atmos/agnesi_nh_lin.jl ================================================ # # [Linear NH mountain waves (Topography)](@id EX-LIN_NH-docs) # # ## Description of experiment # 1) Dry linear Non-hydrostatic Mountain Waves # This example of a non-linear hydrostatic mountain wave can be classified as an initial value # problem. # # The atmosphere is dry and the flow impinges against a witch of Agnesi mountain of heigh $h_m=1m$ # and base parameter $a=1000$m and centered in $x_c = 72km$ in a 2D domain # $\Omega = 144km \times 30 km$. The mountain is defined as # # ```math # z = \frac{h_m}{1 + \frac{x - x_c}{a}} # ``` # The 2D problem is setup in 3D by using 1 element in the y direction. # To damp the upward moving gravity waves, a Reyleigh absorbing layer is added at $z = 10,000m$. # # The initial atmosphere is defined such that it has a stability frequency $N=0.01 s^{-1}$, where # # ```math # N^2 = g\frac{\rm d \ln \theta}{ \rm dz} # ``` # so that # # ```math # \theta = \theta_0 \exp\left(\frac{N^2 z}{g} \right), # ``` # ```math # \pi = 1 + \frac{g^2}{c_p \theta_0 N^2}\left(\exp\left(\frac{-N^2 z}{g} \right)\right) # ``` # # where $\theta_0 = 280K$. # # so that # # $ρ = \frac{p_{sfc}}{R_{gas}\theta}pi^{c_v/R_{gas}}$ # and $T = \theta \pi$ # # 2) Boundaries # - `Impenetrable(FreeSlip())` - Top and bottom: no momentum flux, no mass flux through # walls. # - `Impermeable()` - non-porous walls, i.e. no diffusive fluxes through # walls. # - Agnesi topography built via meshwarp. # - Laterally periodic # 3) Domain - 144,000 m (horizontal) x 1360 m (horizontal) x 30,000m (vertical) (infinite domain in y) # 4) Resolution - 340 m X 200 m effective resolution # 5) Total simulation time - 18,000 s # 6) Overrides defaults for # - CPU Initialisation # - Time integrator # - Sources # #md # !!! note #md # This experiment setup assumes that you have installed the #md # `ClimateMachine` according to the instructions on the landing page. #md # We assume the users' familiarity with the conservative form of the #md # equations of motion for a compressible fluid (see the #md # [AtmosModel](@ref AtmosModel-docs) page). #md # #md # The following topics are covered in this example #md # - Defining the initial conditions #md # - Applying source terms #md # - Add an idealized topography defined by a warping function # # ## Boilerplate (Using Modules) # # The setup of this problem is taken from Case 6 of [giraldoRestelli2008a](@cite) # using ClimateMachine ClimateMachine.init(parse_clargs = true) using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.ConfigTypes using ClimateMachine.Diagnostics using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.Mesh.Filters using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using Thermodynamics.TemperatureProfiles using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using StaticArrays using Test using CLIMAParameters using CLIMAParameters.Atmos.SubgridScale: C_smag using CLIMAParameters.Planet: R_d, cp_d, cv_d, MSLP, grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() # ## [Initial Conditions](@id init) #md # !!! note #md # The following variables are assigned in the initial condition #md # - `state.ρ` = Scalar quantity for initial density profile #md # - `state.ρu`= 3-component vector for initial momentum profile #md # - `state.energy.ρe`= Scalar quantity for initial total-energy profile #md # humidity function init_agnesi_hs_lin!(problem, bl, state, aux, localgeo, t) (x, y, z) = localgeo.coord ## Problem float-type FT = eltype(state) param_set = parameter_set(bl) ## Unpack constant parameters R_gas::FT = R_d(param_set) c_p::FT = cp_d(param_set) c_v::FT = cv_d(param_set) p0::FT = MSLP(param_set) _grav::FT = grav(param_set) γ::FT = c_p / c_v c::FT = c_v / R_gas c2::FT = R_gas / c_p ## Define initial thermal field as isothermal Tiso::FT = 250.0 θ0::FT = Tiso ## Assign a value to the Brunt-Vaisala frquencey: Brunt::FT = 0.01 Brunt2::FT = Brunt * Brunt g2::FT = _grav * _grav π_exner::FT = exp(-_grav * z / (c_p * Tiso)) θ::FT = θ0 * exp(Brunt2 * z / _grav) ρ::FT = p0 / (R_gas * θ) * (π_exner)^c ## Compute perturbed thermodynamic state: T = θ * π_exner e_int = internal_energy(param_set, T) ts = PhaseDry(param_set, e_int, ρ) ## initial velocity u = FT(10.0) ## State (prognostic) variable assignment e_kin = FT(0) # kinetic energy e_pot = gravitational_potential(bl.orientation, aux)# potential energy ρe_tot = ρ * total_energy(e_kin, e_pot, ts) # total energy state.ρ = ρ state.ρu = SVector{3, FT}(ρ * u, 0, 0) state.energy.ρe = ρe_tot end function setmax(f, xmax, ymax, zmax) function setmaxima(xin, yin, zin) return f(xin, yin, zin; xmax = xmax, ymax = ymax, zmax = zmax) end return setmaxima end # Define a warping function to build an analytic topography: function warp_agnesi(xin, yin, zin; xmax = 1000.0, ymax = 1000.0, zmax = 1000.0) FT = eltype(xin) ac = FT(1000) hm = FT(1) xc = FT(0.5) * xmax zdiff = hm / (FT(1) + ((xin - xc) / ac)^2) ## Linear relaxation towards domain maximum height x, y, z = xin, yin, zin + zdiff * (zmax - zin) / zmax return x, y, z end # ## [Model Configuration](@id config-helper) # We define a configuration function to assist in prescribing the physical # model. The purpose of this is to populate the # `AtmosLESConfiguration` with arguments # appropriate to the problem being considered. function config_agnesi_hs_lin( ::Type{FT}, N, resolution, xmax, ymax, zmax, ) where {FT} ## ## Explicit Rayleigh damping: ## ## `` ## \tau_s = \alpha * \sin\left(0.5\pi \frac{z - z_s}{zmax - z_s} \right)^2, ## `` ## where ## ``sponge_ampz`` is the wave damping coefficient (1/s) ## ``z_s`` is the level where the Rayleigh sponge starts ## ``zmax`` is the domain top ## ## Setup the parameters for the gravity wave absorbing layer ## at the top of the domain ## ## u_relaxation(xvelo, vvelo, wvelo) contains the background velocity values to which ## the sponge relaxes the vertically moving wave u_relaxation = SVector(FT(10), FT(0), FT(0)) ## Wave damping coefficient (1/s) sponge_ampz = FT(0.5) ## Vertical level where the absorbing layer starts z_s = FT(10000.0) ## Pass the sponge parameters to the sponge calculator rayleigh_sponge = RayleighSponge{FT}(zmax, z_s, sponge_ampz, u_relaxation, 2) ## Setup the source terms for this problem: source = (Gravity(), rayleigh_sponge) ## Define the reference state: T_virt = FT(280) temp_profile_ref = IsothermalProfile(param_set, T_virt) ref_state = HydrostaticState(temp_profile_ref) nothing # hide _C_smag = FT(0.0) physics = AtmosPhysics{FT}( param_set; ref_state = ref_state, turbulence = Vreman(_C_smag), moisture = DryModel(), tracers = NoTracers(), ) model = AtmosModel{FT}( AtmosLESConfigType, physics; init_state_prognostic = init_agnesi_hs_lin!, source = source, ) config = ClimateMachine.AtmosLESConfiguration( "Agnesi_NH_LINEAR", # Problem title [String] N, # Polynomial order [Int] resolution, # (Δx, Δy, Δz) effective resolution [m] xmax, # Domain maximum size [m] ymax, # Domain maximum size [m] zmax, # Domain maximum size [m] param_set, # Parameter set. init_agnesi_hs_lin!, # Function specifying initial condition model = model, # Model type meshwarp = setmax(warp_agnesi, xmax, ymax, zmax), ) return config end # Define a `main` method (entry point) function main() FT = Float64 ## Define the polynomial order and effective grid spacings: N = 4 ## Define the domain size and spatial resolution Nx = 20 Ny = 20 Nz = 20 xmax = FT(144000) ymax = FT(4000) zmax = FT(30000) Δx = xmax / FT(Nx) Δy = ymax / FT(Ny) Δz = zmax / FT(Nz) resolution = (Δx, Δy, Δz) t0 = FT(0) timeend = FT(100) ## Define the max Courant for the time time integrator (ode_solver). ## The default value is 1.7 for LSRK144: CFL = FT(1.5) ## Assign configurations so they can be passed to the `invoke!` function driver_config = config_agnesi_hs_lin(FT, N, resolution, xmax, ymax, zmax) ## Define the time integrator: ## We chose an explicit single-rate LSRK144 for this problem ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFL, ) ## Set up the spectral filter to remove the solutions spurious modes ## Define the order of the exponential filter: use 32 or 64 for this problem. ## The larger the value, the less dissipation you get: filterorder = 64 filter = ExponentialFilter(solver_config.dg.grid, 0, filterorder) cbfilter = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, AtmosFilterPerturbations(driver_config.bl), solver_config.dg.grid, filter, state_auxiliary = solver_config.dg.state_auxiliary, ) nothing end ## End exponential filter ## Invoke solver (calls `solve!` function for time-integrator), ## pass the driver, solver and diagnostic config information. result = ClimateMachine.invoke!( solver_config; user_callbacks = (cbfilter,), check_euclidean_distance = true, ) ## Check that the solution norm is reasonable. @test FT(0.9) < result < FT(1) ## Some energy is lost from the sponge, so we cannot ## expect perfect energy conservation. end # Call `main` main(); ================================================ FILE: tutorials/Atmos/burgers_single_stack.jl ================================================ # # Single stack tutorial based on the 3D Burgers + tracer equations # This tutorial implements the Burgers equations with a tracer field # in a single element stack. The flow is initialized with a horizontally # uniform profile of horizontal velocity and uniform initial temperature. The fluid # is heated from the bottom surface. Gaussian noise is imposed to the horizontal # velocity field at each node at the start of the simulation. The tutorial demonstrates how to # # * Initialize a [`BalanceLaw`](@ref ClimateMachine.BalanceLaws.BalanceLaw) in a single stack configuration; # * Return the horizontal velocity field to a given profile (e.g., large-scale advection); # * Remove any horizontal inhomogeneities or noise from the flow. # # The second and third bullet points are demonstrated imposing Rayleigh friction, horizontal # diffusion and 2D divergence damping to the horizontal momentum prognostic equation. # Equations solved in balance law form: # ```math # \begin{align} # \frac{∂ ρ}{∂ t} =& - ∇ ⋅ (ρ\mathbf{u}) \\ # \frac{∂ ρ\mathbf{u}}{∂ t} =& - ∇ ⋅ (-μ ∇\mathbf{u}) - ∇ ⋅ (ρ\mathbf{u} \mathbf{u}') - γ[ (ρ\mathbf{u}-ρ̄\mathbf{ū}) - (ρ\mathbf{u}-ρ̄\mathbf{ū})⋅ẑ ẑ] - ν_d ∇_h (∇_h ⋅ ρ\mathbf{u}) \\ # \frac{∂ ρcT}{∂ t} =& - ∇ ⋅ (-α ∇ρcT) - ∇ ⋅ (\mathbf{u} ρcT) # \end{align} # ``` # Boundary conditions: # ```math # \begin{align} # z_{\mathrm{min}}: & ρ = 1 \\ # z_{\mathrm{min}}: & ρ\mathbf{u} = \mathbf{0} \\ # z_{\mathrm{min}}: & ρcT = ρc T_{\mathrm{fixed}} \\ # z_{\mathrm{max}}: & ρ = 1 \\ # z_{\mathrm{max}}: & ρ\mathbf{u} = \mathbf{0} \\ # z_{\mathrm{max}}: & -α∇ρcT = 0 # \end{align} # ``` # where # - ``t`` is time # - ``ρ`` is the density # - ``\mathbf{u}`` is the velocity (vector) # - ``\mathbf{ū}`` is the horizontally averaged velocity (vector) # - ``μ`` is the dynamic viscosity tensor # - ``γ`` is the Rayleigh friction frequency # - ``ν_d`` is the horizontal divergence damping coefficient # - ``T`` is the temperature # - ``α`` is the thermal diffusivity tensor # - ``c`` is the heat capacity # - ``ρcT`` is the thermal energy # Solving these equations is broken down into the following steps: # 1) Preliminary configuration # 2) PDEs # 3) Space discretization # 4) Time discretization # 5) Solver hooks / callbacks # 6) Solve # 7) Post-processing # # Preliminary configuration # ## [Loading code](@id Loading-code-burgers) # First, we'll load our pre-requisites # - load external packages: using MPI using Distributions using OrderedCollections using Plots using StaticArrays using LinearAlgebra: Diagonal, tr # - load CLIMAParameters and set up to use it: using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() # - load necessary ClimateMachine modules: using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Writers using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, parameter_set using ClimateMachine.Mesh.Geometry: LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils # - import necessary ClimateMachine modules: (`import`ing enables us to # provide implementations of these structs/methods) using ClimateMachine.Orientations: Orientation, FlatOrientation, init_aux!, vertical_unit_vector, projection_tangential import ClimateMachine.BalanceLaws: vars_state, source!, flux_second_order!, flux_first_order!, compute_gradient_argument!, compute_gradient_flux!, init_state_auxiliary!, init_state_prognostic!, BoundaryCondition, boundary_conditions, boundary_state! # ## Initialization # Define the float type (`Float64` or `Float32`) const FT = Float64; # Initialize ClimateMachine for CPU. ClimateMachine.init(; disable_gpu = true); const clima_dir = dirname(dirname(pathof(ClimateMachine))); # Load some helper functions for plotting include(joinpath(clima_dir, "docs", "plothelpers.jl")); # # Define the set of Partial Differential Equations (PDEs) # ## Define the model # Model parameters can be stored in the particular [`BalanceLaw`](@ref # ClimateMachine.BalanceLaws.BalanceLaw), in this case, the `BurgersEquation`: Base.@kwdef struct BurgersEquation{FT, APS, O} <: BalanceLaw "Parameters" param_set::APS "Orientation model" orientation::O "Heat capacity" c::FT = 1 "Vertical dynamic viscosity" μv::FT = 1e-4 "Horizontal dynamic viscosity" μh::FT = 1 "Vertical thermal diffusivity" αv::FT = 1e-2 "Horizontal thermal diffusivity" αh::FT = 1 "IC Gaussian noise standard deviation" σ::FT = 5e-2 "Rayleigh damping" γ::FT = 5 "Domain height" zmax::FT = 1 "Initial conditions for temperature" initialT::FT = 295.15 "Bottom boundary value for temperature (Dirichlet boundary conditions)" T_bottom::FT = 300.0 "Top flux (α∇ρcT) at top boundary (Neumann boundary conditions)" flux_top::FT = 0.0 "Divergence damping coefficient (horizontal)" νd::FT = 1 end # Create an instance of the `BurgersEquation`: orientation = FlatOrientation() m = BurgersEquation{FT, typeof(param_set), typeof(orientation)}( param_set = param_set, orientation = orientation, ); # This model dictates the flow control, using [Dynamic Multiple # Dispatch](https://en.wikipedia.org/wiki/Multiple_dispatch), for which # kernels are executed. # ## Define the variables # All of the methods defined in this section were `import`ed in # [Loading code](@ref Loading-code-burgers) to let us provide # implementations for our `BurgersEquation` as they will be used # by the solver. # Specify auxiliary variables for `BurgersEquation` function vars_state(m::BurgersEquation, st::Auxiliary, FT) @vars begin coord::SVector{3, FT} orientation::vars_state(m.orientation, st, FT) end end # Specify prognostic variables, the variables solved for in the PDEs, for # `BurgersEquation` vars_state(::BurgersEquation, ::Prognostic, FT) = @vars(ρ::FT, ρu::SVector{3, FT}, ρcT::FT); # Specify state variables whose gradients are needed for `BurgersEquation` vars_state(::BurgersEquation, ::Gradient, FT) = @vars(u::SVector{3, FT}, ρcT::FT, ρu::SVector{3, FT}); # Specify gradient variables for `BurgersEquation` vars_state(::BurgersEquation, ::GradientFlux, FT) = @vars( μ∇u::SMatrix{3, 3, FT, 9}, α∇ρcT::SVector{3, FT}, νd∇D::SMatrix{3, 3, FT, 9} ); # ## Define the compute kernels # Specify the initial values in `aux::Vars`, which are available in # `init_state_prognostic!`. Note that # - this method is only called at `t=0`. # - `aux.coord` is available here because we've specified `coord` in `vars_state(m, aux, FT)`. function nodal_init_state_auxiliary!( m::BurgersEquation, aux::Vars, tmp::Vars, geom::LocalGeometry, ) aux.coord = geom.coord end; # `init_aux!` initializes the auxiliary gravitational potential field needed for vertical projections function init_state_auxiliary!( m::BurgersEquation, state_auxiliary::MPIStateArray, grid, direction, ) init_aux!(m, m.orientation, state_auxiliary, grid, direction) init_state_auxiliary!( m, nodal_init_state_auxiliary!, state_auxiliary, grid, direction, ) end; # Specify the initial values in `state::Vars`. Note that # - this method is only called at `t=0`. # - `state.ρ`, `state.ρu` and`state.ρcT` are available here because we've specified `ρ`, `ρu` and `ρcT` in `vars_state(m, state, FT)`. function init_state_prognostic!( m::BurgersEquation, state::Vars, aux::Vars, localgeo, t::Real, ) z = aux.coord[3] ε1 = rand(Normal(0, m.σ)) ε2 = rand(Normal(0, m.σ)) state.ρ = 1 ρu = 1 - 4 * (z - m.zmax / 2)^2 + ε1 ρv = 1 - 4 * (z - m.zmax / 2)^2 + ε2 ρw = 0 state.ρu = SVector(ρu, ρv, ρw) state.ρcT = state.ρ * m.c * m.initialT end; # The remaining methods, defined in this section, are called at every # time-step in the solver by the [`BalanceLaw`](@ref # ClimateMachine.BalanceLaws.BalanceLaw) framework. # Since we have second-order fluxes, we must tell `ClimateMachine` to compute # the gradient of `ρcT`, `u` and `ρu`. Here, we specify how `ρcT`, `u` and `ρu` are computed. Note that # e.g. `transform.ρcT` is available here because we've specified `ρcT` in `vars_state(m, ::Gradient, FT)`. function compute_gradient_argument!( m::BurgersEquation, transform::Vars, state::Vars, aux::Vars, t::Real, ) transform.ρcT = state.ρcT transform.u = state.ρu / state.ρ transform.ρu = state.ρu end; # Specify where in `diffusive::Vars` to store the computed gradient from # `compute_gradient_argument!`. Note that: # - `diffusive.μ∇u` is available here because we've specified `μ∇u` in `vars_state(m, ::GradientFlux, FT)`. # - `∇transform.u` is available here because we've specified `u` in `vars_state(m, ::Gradient, FT)`. # - `diffusive.μ∇u` is built using an anisotropic diffusivity tensor. # - The `divergence` may be computed from the trace of tensor `∇ρu`. function compute_gradient_flux!( m::BurgersEquation{FT}, diffusive::Vars, ∇transform::Grad, state::Vars, aux::Vars, t::Real, ) where {FT} param_set = parameter_set(m) ∇ρu = ∇transform.ρu ẑ = vertical_unit_vector(m.orientation, param_set, aux) divergence = tr(∇ρu) - ẑ' * ∇ρu * ẑ diffusive.α∇ρcT = Diagonal(SVector(m.αh, m.αh, m.αv)) * ∇transform.ρcT diffusive.μ∇u = Diagonal(SVector(m.μh, m.μh, m.μv)) * ∇transform.u diffusive.νd∇D = Diagonal(SVector(m.νd, m.νd, FT(0))) * Diagonal(SVector(divergence, divergence, FT(0))) end; # Introduce Rayleigh friction towards a target profile as a source. # Note that: # - Rayleigh damping is only applied in the horizontal using the `projection_tangential` method. function source!( m::BurgersEquation{FT}, source::Vars, state::Vars, diffusive::Vars, aux::Vars, args..., ) where {FT} param_set = parameter_set(m) ẑ = vertical_unit_vector(m.orientation, param_set, aux) z = aux.coord[3] ρ̄ū = state.ρ * SVector{3, FT}( 0.5 - 2 * (z - m.zmax / 2)^2, 0.5 - 2 * (z - m.zmax / 2)^2, 0.0, ) ρu_p = state.ρu - ρ̄ū source.ρu -= m.γ * projection_tangential(m.orientation, param_set, aux, ρu_p) end; # Compute advective flux. # Note that: # - `state.ρu` is available here because we've specified `ρu` in `vars_state(m, state, FT)`. function flux_first_order!( m::BurgersEquation, flux::Grad, state::Vars, aux::Vars, t::Real, _..., ) flux.ρ = state.ρu u = state.ρu / state.ρ flux.ρu = state.ρu * u' flux.ρcT = u * state.ρcT end; # Compute diffusive flux (e.g. ``F(μ, \mathbf{u}, t) = -μ∇\mathbf{u}`` in the original PDE). # Note that: # - `diffusive.μ∇u` is available here because we've specified `μ∇u` in `vars_state(m, ::GradientFlux, FT)`. # - The divergence gradient can be written as a diffusive flux using a divergence diagonal tensor. function flux_second_order!( m::BurgersEquation, flux::Grad, state::Vars, diffusive::Vars, hyperdiffusive::Vars, aux::Vars, t::Real, ) flux.ρcT -= diffusive.α∇ρcT flux.ρu -= diffusive.μ∇u flux.ρu -= diffusive.νd∇D end; # ### Boundary conditions # Second-order terms in our equations, ``∇⋅(G)`` where ``G = μ∇\mathbf{u}``, are # internally reformulated to first-order unknowns. # Boundary conditions must be specified for all unknowns, both first-order and # second-order unknowns which have been reformulated. struct TopBC <: BoundaryCondition end; struct BottomBC <: BoundaryCondition end; boundary_conditions(::BurgersEquation) = (BottomBC(), TopBC()); # The boundary conditions for `ρ`, `ρu` and `ρcT` (first order unknowns) function boundary_state!( nf, bc::BottomBC, m::BurgersEquation, state⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) state⁺.ρcT = state⁺.ρ * m.c * m.T_bottom end; function boundary_state!( nf, bc::TopBC, m::BurgersEquation, state⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) end; # The boundary conditions for `ρ`, `ρu` and `ρcT` are specified here for # second-order unknowns function boundary_state!( nf, bc::BottomBC, m::BurgersEquation, state⁺::Vars, diff⁺::Vars, hyperdiff⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) state⁺.ρcT = state⁺.ρ * m.c * m.T_bottom end; function boundary_state!( nf, bc::TopBC, m::BurgersEquation, state⁺::Vars, diff⁺::Vars, hyperdiff⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) diff⁺.α∇ρcT = -n⁻ * m.flux_top end; # # Spatial discretization # Prescribe polynomial order of basis functions in finite elements N_poly = 5; # Specify the number of vertical elements nelem_vert = 10; # Specify the domain height zmax = m.zmax; # Establish a `ClimateMachine` single stack configuration driver_config = ClimateMachine.SingleStackConfiguration( "BurgersEquation", N_poly, nelem_vert, zmax, param_set, m, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ); # # Time discretization # Specify simulation time (SI units) t0 = FT(0); timeend = FT(1); # We'll define the time-step based on the Fourier # number and the [Courant number](https://en.wikipedia.org/wiki/Courant–Friedrichs–Lewy_condition) # of the flow Δ = min_node_distance(driver_config.grid) given_Fourier = FT(0.5); Fourier_bound = given_Fourier * Δ^2 / max(m.αh, m.μh, m.νd); Courant_bound = FT(0.5) * Δ; dt = min(Fourier_bound, Courant_bound) # # Configure a `ClimateMachine` solver. # This initializes the state vector and allocates memory for the solution in # space (`dg` has the model `m`, which describes the PDEs as well as the # function used for initialization). This additionally initializes the ODE # solver, by default an explicit Low-Storage # [Runge-Kutta](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) # method. solver_config = ClimateMachine.SolverConfiguration(t0, timeend, driver_config, ode_dt = dt); # ## Inspect the initial conditions for a single nodal stack # Let's export plots of the initial state output_dir = @__DIR__; mkpath(output_dir); z_scale = 100 # convert from meters to cm z_key = "z" z_label = "z [cm]" z = get_z(driver_config.grid; z_scale = z_scale) state_vars = get_vars_from_nodal_stack( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ); # Create an array to store the solution: state_data = Dict[state_vars] # store initial condition at ``t=0`` time_data = FT[0] # store time data # Generate plots of initial conditions for the southwest nodal stack export_plot( z, time_data, state_data, ("ρcT",), joinpath(output_dir, "initial_condition_T_nodal.png"); xlabel = "ρcT at southwest node", ylabel = z_label, ); export_plot( z, time_data, state_data, ("ρu[1]",), joinpath(output_dir, "initial_condition_u_nodal.png"); xlabel = "ρu at southwest node", ylabel = z_label, ); export_plot( z, time_data, state_data, ("ρu[2]",), joinpath(output_dir, "initial_condition_v_nodal.png"); xlabel = "ρv at southwest node", ylabel = z_label, ); # ![](initial_condition_T_nodal.png) # ![](initial_condition_u_nodal.png) # ## Inspect the initial conditions for the horizontal averages # Horizontal statistics of variables state_vars_var = get_horizontal_variance( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ); state_vars_avg = get_horizontal_mean( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ); data_avg = Dict[state_vars_avg] data_var = Dict[state_vars_var] export_plot( z, time_data, data_avg, ("ρu[1]",), joinpath(output_dir, "initial_condition_avg_u.png"); xlabel = "Horizontal mean of ρu", ylabel = z_label, ); export_plot( z, time_data, data_var, ("ρu[1]",), joinpath(output_dir, "initial_condition_variance_u.png"); xlabel = "Horizontal variance of ρu", ylabel = z_label, ); # ![](initial_condition_avg_u.png) # ![](initial_condition_variance_u.png) # # Solver hooks / callbacks # Define the number of outputs from `t0` to `timeend` const n_outputs = 5; const every_x_simulation_time = timeend / n_outputs; # Create a dictionary for `z` coordinate (and convert to cm) NCDatasets IO: dims = OrderedDict(z_key => collect(z)); # Create dictionaries to store outputs: data_var = Dict[Dict([k => Dict() for k in 0:n_outputs]...),] data_var[1] = state_vars_var data_avg = Dict[Dict([k => Dict() for k in 0:n_outputs]...),] data_avg[1] = state_vars_avg data_nodal = Dict[Dict([k => Dict() for k in 0:n_outputs]...),] data_nodal[1] = state_vars # The `ClimateMachine`'s time-steppers provide hooks, or callbacks, which # allow users to inject code to be executed at specified intervals. In this # callback, the state variables are collected, combined into a single # `OrderedDict` and written to a NetCDF file (for each output step). callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do state_vars_var = get_horizontal_variance( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ) state_vars_avg = get_horizontal_mean( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ) state_vars = get_vars_from_nodal_stack( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), i = 1, j = 1, ) push!(data_var, state_vars_var) push!(data_avg, state_vars_avg) push!(data_nodal, state_vars) push!(time_data, gettime(solver_config.solver)) nothing end; # # Solve # This is the main `ClimateMachine` solver invocation. While users do not have # access to the time-stepping loop, code may be injected via `user_callbacks`, # which is a `Tuple` of [`GenericCallbacks`](@ref ClimateMachine.GenericCallbacks). ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)) # # Post-processing # Our solution has now been calculated and exported to NetCDF files in # `output_dir`. # Let's plot the horizontal statistics of `ρu` and `ρcT`, as well as the evolution of # `ρu` for the southwest nodal stack: export_plot( z, time_data, data_avg, ("ρu[1]"), joinpath(output_dir, "solution_vs_time_u_avg.png"); xlabel = "Horizontal mean of ρu", ylabel = z_label, ); export_plot( z, time_data, data_var, ("ρu[1]"), joinpath(output_dir, "variance_vs_time_u.png"); xlabel = "Horizontal variance of ρu", ylabel = z_label, ); export_plot( z, time_data, data_avg, ("ρcT"), joinpath(output_dir, "solution_vs_time_T_avg.png"); xlabel = "Horizontal mean of ρcT", ylabel = z_label, ); export_plot( z, time_data, data_var, ("ρcT"), joinpath(output_dir, "variance_vs_time_T.png"); xlabel = "Horizontal variance of ρcT", ylabel = z_label, ); export_plot( z, time_data, data_nodal, ("ρu[1]"), joinpath(output_dir, "solution_vs_time_u_nodal.png"); xlabel = "ρu at southwest node", ylabel = z_label, ); # ![](solution_vs_time_u_avg.png) # ![](variance_vs_time_u.png) # ![](solution_vs_time_T_avg.png) # ![](variance_vs_time_T.png) # ![](solution_vs_time_u_nodal.png) # Rayleigh friction returns the horizontal velocity to the objective # profile on the timescale of the simulation (1 second), since `γ`∼1. The horizontal viscosity # and 2D divergence damping act to reduce the horizontal variance over the same timescale. # The initial Gaussian noise is propagated to the temperature field through advection. # The horizontal diffusivity acts to reduce this `ρcT` variance in time, although in a longer # timescale. # To run this file, and # inspect the solution, include this tutorial in the Julia REPL # with: # ```julia # include(joinpath("tutorials", "Atmos", "burgers_single_stack.jl")) # ``` ================================================ FILE: tutorials/Atmos/burgers_single_stack_bjfnk.jl ================================================ # # Single stack with HEVI solver tutorial based on the 3D Burgers + tracer equations # This tutorial implements the Burgers equations with a tracer field # in a single element stack. The flow is initialized with a horizontally # uniform profile of horizontal velocity and uniform initial temperature. The fluid # is heated from the bottom surface. Gaussian noise is imposed to the horizontal # velocity field at each node at the start of the simulation. The tutorial demonstrates how to # # * Initialize a [`BalanceLaw`](@ref ClimateMachine.BalanceLaws.BalanceLaw) in a single stack configuration; # * Return the horizontal velocity field to a given profile (e.g., large-scale advection); # * Remove any horizontal inhomogeneities or noise from the flow. # * Use horizontal explicit vertical implicit (HEVI) solver in the Single stack setup # # The second and third bullet points are demonstrated imposing Rayleigh friction, horizontal # diffusion and 2D divergence damping to the horizontal momentum prognostic equation. # Equations solved in balance law form: # ```math # \begin{align} # \frac{∂ ρ}{∂ t} =& - ∇ ⋅ (ρ\mathbf{u}) \\ # \frac{∂ ρ\mathbf{u}}{∂ t} =& - ∇ ⋅ (-μ ∇\mathbf{u}) - ∇ ⋅ (ρ\mathbf{u} \mathbf{u}') - γ[ (ρ\mathbf{u}-ρ̄\mathbf{ū}) - (ρ\mathbf{u}-ρ̄\mathbf{ū})⋅ẑ ẑ] - ν_d ∇_h (∇_h ⋅ ρ\mathbf{u}) \\ # \frac{∂ ρcT}{∂ t} =& - ∇ ⋅ (-α ∇ρcT) - ∇ ⋅ (\mathbf{u} ρcT) # \end{align} # ``` # Boundary conditions: # ```math # \begin{align} # z_{\mathrm{min}}: & ρ = 1 \\ # z_{\mathrm{min}}: & ρ\mathbf{u} = \mathbf{0} \\ # z_{\mathrm{min}}: & ρcT = ρc T_{\mathrm{fixed}} \\ # z_{\mathrm{max}}: & ρ = 1 \\ # z_{\mathrm{max}}: & ρ\mathbf{u} = \mathbf{0} \\ # z_{\mathrm{max}}: & -α∇ρcT = 0 # \end{align} # ``` # where # - ``t`` is time # - ``ρ`` is the density # - ``\mathbf{u}`` is the velocity (vector) # - ``\mathbf{ū}`` is the horizontally averaged velocity (vector) # - ``μ`` is the dynamic viscosity tensor # - ``γ`` is the Rayleigh friction frequency # - ``ν_d`` is the horizontal divergence damping coefficient # - ``T`` is the temperature # - ``α`` is the thermal diffusivity tensor # - ``c`` is the heat capacity # - ``ρcT`` is the thermal energy # Solving these equations is broken down into the following steps: # 1) Preliminary configuration # 2) PDEs # 3) Space discretization # 4) Time discretization # 5) Solver hooks / callbacks # 6) Solve # 7) Post-processing # # Preliminary configuration # ## [Loading code](@id Loading-code-burgers-bjfnk) # First, we'll load our pre-requisites # - load external packages: using MPI using Distributions using OrderedCollections using Plots using StaticArrays using LinearAlgebra: Diagonal, tr # - load CLIMAParameters and set up to use it: using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() # - load necessary ClimateMachine modules: using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Writers using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, parameter_set using ClimateMachine.Mesh.Geometry: LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.SystemSolvers using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils # - import necessary ClimateMachine modules: (`import`ing enables us to # provide implementations of these structs/methods) using ClimateMachine.Orientations: Orientation, FlatOrientation, init_aux!, vertical_unit_vector, projection_tangential import ClimateMachine.BalanceLaws: vars_state, source!, flux_second_order!, flux_first_order!, compute_gradient_argument!, compute_gradient_flux!, init_state_auxiliary!, init_state_prognostic!, BoundaryCondition, boundary_conditions, boundary_state! # ## Initialization # Define the float type (`Float64` or `Float32`) const FT = Float64; # Initialize ClimateMachine for CPU. ClimateMachine.init(; disable_gpu = true); const clima_dir = dirname(dirname(pathof(ClimateMachine))); # Load some helper functions for plotting include(joinpath(clima_dir, "docs", "plothelpers.jl")); # # Define the set of Partial Differential Equations (PDEs) # ## Define the model # Model parameters can be stored in the particular [`BalanceLaw`](@ref # ClimateMachine.BalanceLaws.BalanceLaw), in this case, the `BurgersEquation`: Base.@kwdef struct BurgersEquation{FT, APS, O} <: BalanceLaw "Parameters" param_set::APS "Orientation model" orientation::O "Heat capacity" c::FT = 1 "Vertical dynamic viscosity" μv::FT = 1e-4 "Horizontal dynamic viscosity" μh::FT = 1 "Vertical thermal diffusivity" αv::FT = 1e-2 "Horizontal thermal diffusivity" αh::FT = 1 "IC Gaussian noise standard deviation" σ::FT = 5e-2 "Rayleigh damping" γ::FT = 5 "Domain height" zmax::FT = 1 "Initial conditions for temperature" initialT::FT = 295.15 "Bottom boundary value for temperature (Dirichlet boundary conditions)" T_bottom::FT = 300.0 "Top flux (α∇ρcT) at top boundary (Neumann boundary conditions)" flux_top::FT = 0.0 "Divergence damping coefficient (horizontal)" νd::FT = 1 end # Create an instance of the `BurgersEquation`: orientation = FlatOrientation() m = BurgersEquation{FT, typeof(param_set), typeof(orientation)}( param_set = param_set, orientation = orientation, ); # This model dictates the flow control, using [Dynamic Multiple # Dispatch](https://en.wikipedia.org/wiki/Multiple_dispatch), for which # kernels are executed. # ## Define the variables # All of the methods defined in this section were `import`ed in # [Loading code](@ref Loading-code-burgers) to let us provide # implementations for our `BurgersEquation` as they will be used # by the solver. # Specify auxiliary variables for `BurgersEquation` function vars_state(m::BurgersEquation, st::Auxiliary, FT) @vars begin coord::SVector{3, FT} orientation::vars_state(m.orientation, st, FT) end end # Specify prognostic variables, the variables solved for in the PDEs, for # `BurgersEquation` vars_state(::BurgersEquation, ::Prognostic, FT) = @vars(ρ::FT, ρu::SVector{3, FT}, ρcT::FT); # Specify state variables whose gradients are needed for `BurgersEquation` vars_state(::BurgersEquation, ::Gradient, FT) = @vars(u::SVector{3, FT}, ρcT::FT, ρu::SVector{3, FT}); # Specify gradient variables for `BurgersEquation` vars_state(::BurgersEquation, ::GradientFlux, FT) = @vars( μ∇u::SMatrix{3, 3, FT, 9}, α∇ρcT::SVector{3, FT}, νd∇D::SMatrix{3, 3, FT, 9} ); # ## Define the compute kernels # Specify the initial values in `aux::Vars`, which are available in # `init_state_prognostic!`. Note that # - this method is only called at `t=0`. # - `aux.coord` is available here because we've specified `coord` in `vars_state(m, aux, FT)`. function nodal_init_state_auxiliary!( m::BurgersEquation, aux::Vars, tmp::Vars, geom::LocalGeometry, ) aux.coord = geom.coord end; # `init_aux!` initializes the auxiliary gravitational potential field needed for vertical projections function init_state_auxiliary!( m::BurgersEquation, state_auxiliary::MPIStateArray, grid, direction, ) init_aux!(m, m.orientation, state_auxiliary, grid, direction) init_state_auxiliary!( m, nodal_init_state_auxiliary!, state_auxiliary, grid, direction, ) end; # Specify the initial values in `state::Vars`. Note that # - this method is only called at `t=0`. # - `state.ρ`, `state.ρu` and`state.ρcT` are available here because we've specified `ρ`, `ρu` and `ρcT` in `vars_state(m, state, FT)`. function init_state_prognostic!( m::BurgersEquation, state::Vars, aux::Vars, localgeo, t::Real, ) z = aux.coord[3] ε1 = rand(Normal(0, m.σ)) ε2 = rand(Normal(0, m.σ)) state.ρ = 1 ρu = 1 - 4 * (z - m.zmax / 2)^2 + ε1 ρv = 1 - 4 * (z - m.zmax / 2)^2 + ε2 ρw = 0 state.ρu = SVector(ρu, ρv, ρw) state.ρcT = state.ρ * m.c * m.initialT end; # The remaining methods, defined in this section, are called at every # time-step in the solver by the [`BalanceLaw`](@ref # ClimateMachine.BalanceLaws.BalanceLaw) framework. # Since we have second-order fluxes, we must tell `ClimateMachine` to compute # the gradient of `ρcT`, `u` and `ρu`. Here, we specify how `ρcT`, `u` and `ρu` are computed. Note that # e.g. `transform.ρcT` is available here because we've specified `ρcT` in `vars_state(m, ::Gradient, FT)`. function compute_gradient_argument!( m::BurgersEquation, transform::Vars, state::Vars, aux::Vars, t::Real, ) transform.ρcT = state.ρcT transform.u = state.ρu / state.ρ transform.ρu = state.ρu end; # Specify where in `diffusive::Vars` to store the computed gradient from # `compute_gradient_argument!`. Note that: # - `diffusive.μ∇u` is available here because we've specified `μ∇u` in `vars_state(m, ::GradientFlux, FT)`. # - `∇transform.u` is available here because we've specified `u` in `vars_state(m, ::Gradient, FT)`. # - `diffusive.μ∇u` is built using an anisotropic diffusivity tensor. # - The `divergence` may be computed from the trace of tensor `∇ρu`. function compute_gradient_flux!( m::BurgersEquation{FT}, diffusive::Vars, ∇transform::Grad, state::Vars, aux::Vars, t::Real, ) where {FT} param_set = parameter_set(m) ∇ρu = ∇transform.ρu ẑ = vertical_unit_vector(m.orientation, param_set, aux) divergence = tr(∇ρu) - ẑ' * ∇ρu * ẑ diffusive.α∇ρcT = Diagonal(SVector(m.αh, m.αh, m.αv)) * ∇transform.ρcT diffusive.μ∇u = Diagonal(SVector(m.μh, m.μh, m.μv)) * ∇transform.u diffusive.νd∇D = Diagonal(SVector(m.νd, m.νd, FT(0))) * Diagonal(SVector(divergence, divergence, FT(0))) end; # Introduce Rayleigh friction towards a target profile as a source. # Note that: # - Rayleigh damping is only applied in the horizontal using the `projection_tangential` method. function source!( m::BurgersEquation{FT}, source::Vars, state::Vars, diffusive::Vars, aux::Vars, args..., ) where {FT} param_set = parameter_set(m) ẑ = vertical_unit_vector(m.orientation, param_set, aux) z = aux.coord[3] ρ̄ū = state.ρ * SVector{3, FT}( 0.5 - 2 * (z - m.zmax / 2)^2, 0.5 - 2 * (z - m.zmax / 2)^2, 0.0, ) ρu_p = state.ρu - ρ̄ū source.ρu -= m.γ * projection_tangential(m.orientation, param_set, aux, ρu_p) end; # Compute advective flux. # Note that: # - `state.ρu` is available here because we've specified `ρu` in `vars_state(m, state, FT)`. function flux_first_order!( m::BurgersEquation, flux::Grad, state::Vars, aux::Vars, t::Real, _..., ) flux.ρ = state.ρu u = state.ρu / state.ρ flux.ρu = state.ρu * u' flux.ρcT = u * state.ρcT end; # Compute diffusive flux (e.g. ``F(μ, \mathbf{u}, t) = -μ∇\mathbf{u}`` in the original PDE). # Note that: # - `diffusive.μ∇u` is available here because we've specified `μ∇u` in `vars_state(m, ::GradientFlux, FT)`. # - The divergence gradient can be written as a diffusive flux using a divergence diagonal tensor. function flux_second_order!( m::BurgersEquation, flux::Grad, state::Vars, diffusive::Vars, hyperdiffusive::Vars, aux::Vars, t::Real, ) flux.ρcT -= diffusive.α∇ρcT flux.ρu -= diffusive.μ∇u flux.ρu -= diffusive.νd∇D end; # ### Boundary conditions # Second-order terms in our equations, ``∇⋅(G)`` where ``G = μ∇\mathbf{u}``, are # internally reformulated to first-order unknowns. # Boundary conditions must be specified for all unknowns, both first-order and # second-order unknowns which have been reformulated. struct TopBC <: BoundaryCondition end; struct BottomBC <: BoundaryCondition end; boundary_conditions(::BurgersEquation) = (BottomBC(), TopBC()); # The boundary conditions for `ρ`, `ρu` and `ρcT` (first order unknowns) function boundary_state!( nf, bc::BottomBC, m::BurgersEquation, state⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) state⁺.ρcT = state⁺.ρ * m.c * m.T_bottom end; function boundary_state!( nf, bc::TopBC, m::BurgersEquation, state⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) end; # The boundary conditions for `ρ`, `ρu` and `ρcT` are specified here for # second-order unknowns function boundary_state!( nf, bc::BottomBC, m::BurgersEquation, state⁺::Vars, diff⁺::Vars, hyperdiff⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) state⁺.ρcT = state⁺.ρ * m.c * m.T_bottom end; function boundary_state!( nf, bc::TopBC, m::BurgersEquation, state⁺::Vars, diff⁺::Vars, hyperdiff⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) diff⁺.α∇ρcT = -n⁻ * m.flux_top end; # # Spatial discretization # Prescribe polynomial order of basis functions in finite elements N_poly = 5; # Specify the number of vertical elements nelem_vert = 10; # Specify the domain height zmax = m.zmax; # # Temporal discretization # This initializes the ODE # solver, the horizontal explicit vertical implicit scheme # with ARK2GiraldoKellyConstantinescu method. ode_solver_type = ClimateMachine.HEVISolverType( FT; solver_method = ARK2GiraldoKellyConstantinescu, linear_max_subspace_size = Int(30), linear_atol = FT(-1.0), linear_rtol = FT(1e-5), nonlinear_max_iterations = Int(10), nonlinear_rtol = FT(1e-4), nonlinear_ϵ = FT(1.e-10), preconditioner_update_freq = Int(50), ) # Establish a `ClimateMachine` single stack configuration driver_config = ClimateMachine.SingleStackConfiguration( "BurgersEquation", N_poly, nelem_vert, zmax, param_set, m, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ); # # Time discretization # Specify simulation time (SI units) t0 = FT(0); timeend = FT(1); # We'll define the time-step based on the Fourier # number and the [Courant number](https://en.wikipedia.org/wiki/Courant–Friedrichs–Lewy_condition) # of the flow Δ = min_node_distance(driver_config.grid) given_Fourier = FT(0.5); Fourier_bound = given_Fourier * Δ^2 / max(m.αh, m.μh, m.νd); Courant_bound = FT(0.5) * Δ; # We define the time step 50 times larger than that defined by CFL law dt = FT(50.0) * min(Fourier_bound, Courant_bound) # # Configure a `ClimateMachine` solver. # This initializes the state vector and allocates memory for the solution in # space (`dg` has the model `m`, which describes the PDEs as well as the # function used for initialization). solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ode_solver_type = ode_solver_type, ); # ## Inspect the initial conditions for a single nodal stack # Let's export plots of the initial state output_dir = @__DIR__; mkpath(output_dir); z_scale = 100 # convert from meters to cm z_key = "z" z_label = "z [cm]" z = get_z(driver_config.grid; z_scale = z_scale) state_vars = get_vars_from_nodal_stack( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ); # Create an array to store the solution: state_data = Dict[state_vars] # store initial condition at ``t=0`` time_data = FT[0] # store time data # Generate plots of initial conditions for the southwest nodal stack export_plot( z, time_data, state_data, ("ρcT",), joinpath(output_dir, "initial_condition_T_nodal_bjfnk.png"); xlabel = "ρcT at southwest node", ylabel = z_label, ); export_plot( z, time_data, state_data, ("ρu[1]",), joinpath(output_dir, "initial_condition_u_nodal_bjfnk.png"); xlabel = "ρu at southwest node", ylabel = z_label, ); export_plot( z, time_data, state_data, ("ρu[2]",), joinpath(output_dir, "initial_condition_v_nodal_bjfnk.png"); xlabel = "ρv at southwest node", ylabel = z_label, ); # ![](initial_condition_T_nodal_bjfnk.png) # ![](initial_condition_u_nodal_bjfnk.png) # ## Inspect the initial conditions for the horizontal averages # Horizontal statistics of variables state_vars_var = get_horizontal_variance( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ); state_vars_avg = get_horizontal_mean( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ); data_avg = Dict[state_vars_avg] data_var = Dict[state_vars_var] export_plot( z, time_data, data_avg, ("ρu[1]",), joinpath(output_dir, "initial_condition_avg_u_bjfnk.png"); xlabel = "Horizontal mean of ρu", ylabel = z_label, ); export_plot( z, time_data, data_var, ("ρu[1]",), joinpath(output_dir, "initial_condition_variance_u_bjfnk.png"); xlabel = "Horizontal variance of ρu", ylabel = z_label, ); # ![](initial_condition_avg_u_bjfnk.png) # ![](initial_condition_variance_u_bjfnk.png) # # Solver hooks / callbacks # Define the number of outputs from `t0` to `timeend` const n_outputs = 5; const every_x_simulation_time = timeend / n_outputs; # Create a dictionary for `z` coordinate (and convert to cm) NCDatasets IO: dims = OrderedDict(z_key => collect(z)); # Create dictionaries to store outputs: data_var = Dict[Dict([k => Dict() for k in 0:n_outputs]...),] data_var[1] = state_vars_var data_avg = Dict[Dict([k => Dict() for k in 0:n_outputs]...),] data_avg[1] = state_vars_avg data_nodal = Dict[Dict([k => Dict() for k in 0:n_outputs]...),] data_nodal[1] = state_vars # The `ClimateMachine`'s time-steppers provide hooks, or callbacks, which # allow users to inject code to be executed at specified intervals. In this # callback, the state variables are collected, combined into a single # `OrderedDict` and written to a NetCDF file (for each output step `step`). step = [0]; callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do state_vars_var = get_horizontal_variance( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ) state_vars_avg = get_horizontal_mean( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ) state_vars = get_vars_from_nodal_stack( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), i = 1, j = 1, ) step[1] += 1 push!(data_var, state_vars_var) push!(data_avg, state_vars_avg) push!(data_nodal, state_vars) push!(time_data, gettime(solver_config.solver)) nothing end; # # Solve # This is the main `ClimateMachine` solver invocation. While users do not have # access to the time-stepping loop, code may be injected via `user_callbacks`, # which is a `Tuple` of [`GenericCallbacks`](@ref ClimateMachine.GenericCallbacks). ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)) # # Post-processing # Our solution has now been calculated and exported to NetCDF files in # `output_dir`. # Let's plot the horizontal statistics of `ρu` and `ρcT`, as well as the evolution of # `ρu` for the southwest nodal stack: export_plot( z, time_data, data_avg, ("ρu[1]"), joinpath(output_dir, "solution_vs_time_u_avg_bjfnk.png"); xlabel = "Horizontal mean of ρu", ylabel = z_label, ); export_plot( z, time_data, data_var, ("ρu[1]"), joinpath(output_dir, "variance_vs_time_u_bjfnk.png"); xlabel = "Horizontal variance of ρu", ylabel = z_label, ); export_plot( z, time_data, data_avg, ("ρcT"), joinpath(output_dir, "solution_vs_time_T_avg_bjfnk.png"); xlabel = "Horizontal mean of ρcT", ylabel = z_label, ); export_plot( z, time_data, data_var, ("ρcT"), joinpath(output_dir, "variance_vs_time_T_bjfnk.png"); xlabel = "Horizontal variance of ρcT", ylabel = z_label, ); export_plot( z, time_data, data_nodal, ("ρu[1]"), joinpath(output_dir, "solution_vs_time_u_nodal_bjfnk.png"); xlabel = "ρu at southwest node", ylabel = z_label, ); # ![](solution_vs_time_u_avg_bjfnk.png) # ![](variance_vs_time_u_bjfnk.png) # ![](solution_vs_time_T_avg_bjfnk.png) # ![](variance_vs_time_T_bjfnk.png) # ![](solution_vs_time_u_nodal_bjfnk.png) # Rayleigh friction returns the horizontal velocity to the objective # profile on the timescale of the simulation (1 second), since `γ`∼1. The horizontal viscosity # and 2D divergence damping act to reduce the horizontal variance over the same timescale. # The initial Gaussian noise is propagated to the temperature field through advection. # The horizontal diffusivity acts to reduce this `ρcT` variance in time, although in a longer # timescale. # To run this file, and # inspect the solution, include this tutorial in the Julia REPL # with: # ```julia # include(joinpath("tutorials", "Atmos", "burgers_single_stack.jl")) # ``` ================================================ FILE: tutorials/Atmos/burgers_single_stack_fvm.jl ================================================ # # Finite volume single stack tutorial based on the 3D Burgers + tracer equations # This tutorial implements the Burgers equations with a tracer field # in a single element stack. The flow is initialized with a horizontally # uniform profile of horizontal velocity and uniform initial temperature. The fluid # is heated from the bottom surface. Gaussian noise is imposed to the horizontal # velocity field at each node at the start of the simulation. The tutorial demonstrates how to # # * Initialize a [`BalanceLaw`](@ref ClimateMachine.BalanceLaws.BalanceLaw) in a single stack configuration; # * Return the horizontal velocity field to a given profile (e.g., large-scale advection); # * Remove any horizontal inhomogeneities or noise from the flow. # # The second and third bullet points are demonstrated imposing Rayleigh friction, horizontal # diffusion and 2D divergence damping to the horizontal momentum prognostic equation. # Equations solved in balance law form: # ```math # \begin{align} # \frac{∂ ρ}{∂ t} =& - ∇ ⋅ (ρ\mathbf{u}) \\ # \frac{∂ ρ\mathbf{u}}{∂ t} =& - ∇ ⋅ (-μ ∇\mathbf{u}) - ∇ ⋅ (ρ\mathbf{u} \mathbf{u}') - γ[ (ρ\mathbf{u}-ρ̄\mathbf{ū}) - (ρ\mathbf{u}-ρ̄\mathbf{ū})⋅ẑ ẑ] - ν_d ∇_h (∇_h ⋅ ρ\mathbf{u}) \\ # \frac{∂ ρcT}{∂ t} =& - ∇ ⋅ (-α ∇ρcT) - ∇ ⋅ (\mathbf{u} ρcT) # \end{align} # ``` # Boundary conditions: # ```math # \begin{align} # z_{\mathrm{min}}: & ρ = 1 \\ # z_{\mathrm{min}}: & ρ\mathbf{u} = \mathbf{0} \\ # z_{\mathrm{min}}: & ρcT = ρc T_{\mathrm{fixed}} \\ # z_{\mathrm{max}}: & ρ = 1 \\ # z_{\mathrm{max}}: & ρ\mathbf{u} = \mathbf{0} \\ # z_{\mathrm{max}}: & -α∇ρcT = 0 # \end{align} # ``` # where # - ``t`` is time # - ``ρ`` is the density # - ``\mathbf{u}`` is the velocity (vector) # - ``\mathbf{ū}`` is the horizontally averaged velocity (vector) # - ``μ`` is the dynamic viscosity tensor # - ``γ`` is the Rayleigh friction frequency # - ``ν_d`` is the horizontal divergence damping coefficient # - ``T`` is the temperature # - ``α`` is the thermal diffusivity tensor # - ``c`` is the heat capacity # - ``ρcT`` is the thermal energy # Solving these equations is broken down into the following steps: # 1) Preliminary configuration # 2) PDEs # 3) Space discretization # 4) Time discretization # 5) Solver hooks / callbacks # 6) Solve # 7) Post-processing # # Preliminary configuration # ## [Loading code](@id Loading-code-burgers-fvm) # First, we'll load our pre-requisites # - load external packages: using MPI using Distributions using OrderedCollections using Plots using StaticArrays using LinearAlgebra: Diagonal, tr # - load CLIMAParameters and set up to use it: using CLIMAParameters using CLIMAParameters.Planet: grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() # - load necessary ClimateMachine modules: using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Writers using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, parameter_set using ClimateMachine.Mesh.Geometry: LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils import ClimateMachine.DGMethods.FVReconstructions: FVLinear # - import necessary ClimateMachine modules: (`import`ing enables us to # provide implementations of these structs/methods) using ClimateMachine.Orientations: Orientation, NoOrientation, FlatOrientation, init_aux!, vertical_unit_vector, projection_tangential import ClimateMachine.BalanceLaws: vars_state, source!, flux_second_order!, flux_first_order!, compute_gradient_argument!, compute_gradient_flux!, init_state_auxiliary!, init_state_prognostic!, construct_face_auxiliary_state!, BoundaryCondition, boundary_conditions, boundary_state! # ## Initialization # Define the float type (`Float64` or `Float32`) const FT = Float64; # Initialize ClimateMachine for CPU. ClimateMachine.init(; disable_gpu = true); const clima_dir = dirname(dirname(pathof(ClimateMachine))); # Load some helper functions for plotting include(joinpath(clima_dir, "docs", "plothelpers.jl")); # # Define the set of Partial Differential Equations (PDEs) # ## Define the model # Model parameters can be stored in the particular [`BalanceLaw`](@ref # ClimateMachine.BalanceLaws.BalanceLaw), in this case, the `BurgersEquation`: Base.@kwdef struct BurgersEquation{FT, APS, O} <: BalanceLaw "Parameters" param_set::APS "Orientation model" orientation::O "Heat capacity" c::FT = 1 "Vertical dynamic viscosity" μv::FT = 1e-4 "Horizontal dynamic viscosity" μh::FT = 1 "Vertical thermal diffusivity" αv::FT = 1e-2 "Horizontal thermal diffusivity" αh::FT = 1 "IC Gaussian noise standard deviation" σ::FT = 5e-2 "Rayleigh damping" γ::FT = 5 "Domain height" zmax::FT = 1 "Initial conditions for temperature" initialT::FT = 295.15 "Bottom boundary value for temperature (Dirichlet boundary conditions)" T_bottom::FT = 300.0 "Top flux (α∇ρcT) at top boundary (Neumann boundary conditions)" flux_top::FT = 0.0 "Divergence damping coefficient (horizontal)" νd::FT = 1 end # Create an instance of the `BurgersEquation`: orientation = FlatOrientation() m = BurgersEquation{FT, typeof(param_set), typeof(orientation)}( param_set = param_set, orientation = orientation, ); # This model dictates the flow control, using [Dynamic Multiple # Dispatch](https://en.wikipedia.org/wiki/Multiple_dispatch), for which # kernels are executed. # ## Define the variables # All of the methods defined in this section were `import`ed in # [Loading code](@ref Loading-code-burgers) to let us provide # implementations for our `BurgersEquation` as they will be used # by the solver. # Specify auxiliary variables for `BurgersEquation` function vars_state(m::BurgersEquation, st::Auxiliary, FT) @vars begin coord::SVector{3, FT} orientation::vars_state(m.orientation, st, FT) end end # Specify prognostic variables, the variables solved for in the PDEs, for # `BurgersEquation` vars_state(::BurgersEquation, ::Prognostic, FT) = @vars(ρ::FT, ρu::SVector{3, FT}, ρcT::FT); # Specify state variables whose gradients are needed for `BurgersEquation` vars_state(::BurgersEquation, ::Gradient, FT) = @vars(u::SVector{3, FT}, ρcT::FT, ρu::SVector{3, FT}); # Specify gradient variables for `BurgersEquation` vars_state(::BurgersEquation, ::GradientFlux, FT) = @vars( μ∇u::SMatrix{3, 3, FT, 9}, α∇ρcT::SVector{3, FT}, νd∇D::SMatrix{3, 3, FT, 9} ); # ## Define the compute kernels # Specify the initial values in `aux::Vars`, which are available in # `init_state_prognostic!`. Note that # - this method is only called at `t=0`. # - `aux.coord` is available here because we've specified `coord` in `vars_state(m, aux, FT)`. function nodal_init_state_auxiliary!( m::BurgersEquation, aux::Vars, tmp::Vars, geom::LocalGeometry, ) aux.coord = geom.coord end; # `init_aux!` initializes the auxiliary gravitational potential field needed for vertical projections function init_state_auxiliary!( m::BurgersEquation, state_auxiliary::MPIStateArray, grid, direction, ) init_aux!(m, m.orientation, state_auxiliary, grid, direction) init_state_auxiliary!( m, nodal_init_state_auxiliary!, state_auxiliary, grid, direction, ) end; # Specify the initial values in `state::Vars`. Note that # - this method is only called at `t=0`. # - `state.ρ`, `state.ρu` and`state.ρcT` are available here because we've specified `ρ`, `ρu` and `ρcT` in `vars_state(m, state, FT)`. function init_state_prognostic!( m::BurgersEquation, state::Vars, aux::Vars, localgeo, t::Real, ) z = aux.coord[3] ε1 = rand(Normal(0, m.σ)) ε2 = rand(Normal(0, m.σ)) state.ρ = 1 ρu = 1 - 4 * (z - m.zmax / 2)^2 + ε1 ρv = 1 - 4 * (z - m.zmax / 2)^2 + ε2 ρw = 0 state.ρu = SVector(ρu, ρv, ρw) state.ρcT = state.ρ * m.c * m.initialT end; function construct_face_auxiliary_state!( bl::BurgersEquation, aux_face::AbstractArray, aux_cell::AbstractArray, Δz::FT, ) where {FT <: Real} param_set = parameter_set(bl) _grav = FT(grav(param_set)) var_aux = Vars{vars_state(bl, Auxiliary(), FT)} aux_face .= aux_cell if !(bl.orientation isa NoOrientation) var_aux(aux_face).orientation.Φ = var_aux(aux_cell).orientation.Φ + _grav * Δz / 2 end end # The remaining methods, defined in this section, are called at every # time-step in the solver by the [`BalanceLaw`](@ref # ClimateMachine.BalanceLaws.BalanceLaw) framework. # Since we have second-order fluxes, we must tell `ClimateMachine` to compute # the gradient of `ρcT`, `u` and `ρu`. Here, we specify how `ρcT`, `u` and `ρu` are computed. Note that # e.g. `transform.ρcT` is available here because we've specified `ρcT` in `vars_state(m, ::Gradient, FT)`. function compute_gradient_argument!( m::BurgersEquation, transform::Vars, state::Vars, aux::Vars, t::Real, ) transform.ρcT = state.ρcT transform.u = state.ρu / state.ρ transform.ρu = state.ρu end; # Specify where in `diffusive::Vars` to store the computed gradient from # `compute_gradient_argument!`. Note that: # - `diffusive.μ∇u` is available here because we've specified `μ∇u` in `vars_state(m, ::GradientFlux, FT)`. # - `∇transform.u` is available here because we've specified `u` in `vars_state(m, ::Gradient, FT)`. # - `diffusive.μ∇u` is built using an anisotropic diffusivity tensor. # - The `divergence` may be computed from the trace of tensor `∇ρu`. function compute_gradient_flux!( m::BurgersEquation{FT}, diffusive::Vars, ∇transform::Grad, state::Vars, aux::Vars, t::Real, ) where {FT} param_set = parameter_set(m) ∇ρu = ∇transform.ρu ẑ = vertical_unit_vector(m.orientation, param_set, aux) divergence = tr(∇ρu) - ẑ' * ∇ρu * ẑ diffusive.α∇ρcT = Diagonal(SVector(m.αh, m.αh, m.αv)) * ∇transform.ρcT diffusive.μ∇u = Diagonal(SVector(m.μh, m.μh, m.μv)) * ∇transform.u diffusive.νd∇D = Diagonal(SVector(m.νd, m.νd, FT(0))) * Diagonal(SVector(divergence, divergence, FT(0))) end; # Introduce Rayleigh friction towards a target profile as a source. # Note that: # - Rayleigh damping is only applied in the horizontal using the `projection_tangential` method. function source!( m::BurgersEquation{FT}, source::Vars, state::Vars, diffusive::Vars, aux::Vars, args..., ) where {FT} param_set = parameter_set(m) ẑ = vertical_unit_vector(m.orientation, param_set, aux) z = aux.coord[3] ρ̄ū = state.ρ * SVector{3, FT}( 0.5 - 2 * (z - m.zmax / 2)^2, 0.5 - 2 * (z - m.zmax / 2)^2, 0.0, ) ρu_p = state.ρu - ρ̄ū source.ρu -= m.γ * projection_tangential(m.orientation, param_set, aux, ρu_p) end; # Compute advective flux. # Note that: # - `state.ρu` is available here because we've specified `ρu` in `vars_state(m, state, FT)`. function flux_first_order!( m::BurgersEquation, flux::Grad, state::Vars, aux::Vars, t::Real, _..., ) flux.ρ = state.ρu u = state.ρu / state.ρ flux.ρu = state.ρu * u' flux.ρcT = u * state.ρcT end; # Compute diffusive flux (e.g. ``F(μ, \mathbf{u}, t) = -μ∇\mathbf{u}`` in the original PDE). # Note that: # - `diffusive.μ∇u` is available here because we've specified `μ∇u` in `vars_state(m, ::GradientFlux, FT)`. # - The divergence gradient can be written as a diffusive flux using a divergence diagonal tensor. function flux_second_order!( m::BurgersEquation, flux::Grad, state::Vars, diffusive::Vars, hyperdiffusive::Vars, aux::Vars, t::Real, ) flux.ρcT -= diffusive.α∇ρcT flux.ρu -= diffusive.μ∇u flux.ρu -= diffusive.νd∇D end; # ### Boundary conditions # Second-order terms in our equations, ``∇⋅(G)`` where ``G = μ∇\mathbf{u}``, are # internally reformulated to first-order unknowns. # Boundary conditions must be specified for all unknowns, both first-order and # second-order unknowns which have been reformulated. struct TopBC <: BoundaryCondition end; struct BottomBC <: BoundaryCondition end; boundary_conditions(::BurgersEquation) = (BottomBC(), TopBC()); # The boundary conditions for `ρ`, `ρu` and `ρcT` (first order unknowns) function boundary_state!( nf, bc::BottomBC, m::BurgersEquation, state⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) state⁺.ρcT = state⁺.ρ * m.c * m.T_bottom end; function boundary_state!( nf, bc::TopBC, m::BurgersEquation, state⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) end; # The boundary conditions for `ρ`, `ρu` and `ρcT` are specified here for # second-order unknowns function boundary_state!( nf, bc::BottomBC, m::BurgersEquation, state⁺::Vars, diff⁺::Vars, hyperdiff⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) state⁺.ρcT = state⁺.ρ * m.c * m.T_bottom end; function boundary_state!( nf, bc::TopBC, m::BurgersEquation, state⁺::Vars, diff⁺::Vars, hyperdiff⁺::Vars, aux⁺::Vars, n⁻, _..., ) state⁺.ρ = 1 state⁺.ρu = SVector(0, 0, 0) diff⁺.α∇ρcT = -n⁻ * m.flux_top end; # # Spatial discretization # Prescribe polynomial order of basis functions in finite elements # The second index 0 indicates that finite volume method is # applied in the vertical direction N_poly = (1, 0); # Specify the number of vertical elements nelem_vert = 50; # Specify the domain height zmax = m.zmax; # Establish a `ClimateMachine` single stack configuration driver_config = ClimateMachine.SingleStackConfiguration( "BurgersEquation", N_poly, nelem_vert, zmax, param_set, m, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), fv_reconstruction = FVLinear(), ); # # Time discretization # Specify simulation time (SI units) t0 = FT(0); timeend = FT(1); # We'll define the time-step based on the Fourier # number and the [Courant number](https://en.wikipedia.org/wiki/Courant–Friedrichs–Lewy_condition) # of the flow Δ = min_node_distance(driver_config.grid) given_Fourier = FT(0.5); Fourier_bound = given_Fourier * Δ^2 / max(m.αh, m.μh, m.νd); Courant_bound = FT(0.5) * Δ; dt = min(Fourier_bound, Courant_bound) # # Configure a `ClimateMachine` solver. # This initializes the state vector and allocates memory for the solution in # space (`dg` has the model `m`, which describes the PDEs as well as the # function used for initialization). This additionally initializes the ODE # solver, by default an explicit Low-Storage # [Runge-Kutta](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) # method. solver_config = ClimateMachine.SolverConfiguration(t0, timeend, driver_config, ode_dt = dt); # ## Inspect the initial conditions for a single nodal stack # Let's export plots of the initial state output_dir = @__DIR__; mkpath(output_dir); z_scale = 100 # convert from meters to cm z_key = "z" z_label = "z [cm]" z = get_z(driver_config.grid; z_scale = z_scale) state_vars = get_vars_from_nodal_stack( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ); # Create an array to store the solution: state_data = Dict[state_vars] # store initial condition at ``t=0`` time_data = FT[0] # store time data # Generate plots of initial conditions for the southwest nodal stack export_plot( z, time_data, state_data, ("ρcT",), joinpath(output_dir, "initial_condition_T_nodal_fvm.png"); xlabel = "ρcT at southwest node", ylabel = z_label, ); export_plot( z, time_data, state_data, ("ρu[1]",), joinpath(output_dir, "initial_condition_u_nodal_fvm.png"); xlabel = "ρu at southwest node", ylabel = z_label, ); export_plot( z, time_data, state_data, ("ρu[2]",), joinpath(output_dir, "initial_condition_v_nodal_fvm.png"); xlabel = "ρv at southwest node", ylabel = z_label, ); # ![](initial_condition_T_nodal_fvm.png) # ![](initial_condition_u_nodal_fvm.png) # ## Inspect the initial conditions for the horizontal averages # Horizontal statistics of variables state_vars_var = get_horizontal_variance( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ); state_vars_avg = get_horizontal_mean( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ); data_avg = Dict[state_vars_avg] data_var = Dict[state_vars_var] export_plot( z, time_data, data_avg, ("ρu[1]",), joinpath(output_dir, "initial_condition_avg_u_fvm.png"); xlabel = "Horizontal mean of ρu", ylabel = z_label, ); export_plot( z, time_data, data_var, ("ρu[1]",), joinpath(output_dir, "initial_condition_variance_u_fvm.png"); xlabel = "Horizontal variance of ρu", ylabel = z_label, ); # ![](initial_condition_avg_u_fvm.png) # ![](initial_condition_variance_u_fvm.png) # # Solver hooks / callbacks # Define the number of outputs from `t0` to `timeend` const n_outputs = 5; const every_x_simulation_time = timeend / n_outputs; # Create a dictionary for `z` coordinate (and convert to cm) NCDatasets IO: dims = OrderedDict(z_key => collect(z)); # Create dictionaries to store outputs: data_var = Dict[Dict([k => Dict() for k in 0:n_outputs]...),] data_var[1] = state_vars_var data_avg = Dict[Dict([k => Dict() for k in 0:n_outputs]...),] data_avg[1] = state_vars_avg data_nodal = Dict[Dict([k => Dict() for k in 0:n_outputs]...),] data_nodal[1] = state_vars # The `ClimateMachine`'s time-steppers provide hooks, or callbacks, which # allow users to inject code to be executed at specified intervals. In this # callback, the state variables are collected, combined into a single # `OrderedDict` and written to a NetCDF file (for each output step). callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do state_vars_var = get_horizontal_variance( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ) state_vars_avg = get_horizontal_mean( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), ) state_vars = get_vars_from_nodal_stack( driver_config.grid, solver_config.Q, vars_state(m, Prognostic(), FT), i = 1, j = 1, ) push!(data_var, state_vars_var) push!(data_avg, state_vars_avg) push!(data_nodal, state_vars) push!(time_data, gettime(solver_config.solver)) nothing end; # # Solve # This is the main `ClimateMachine` solver invocation. While users do not have # access to the time-stepping loop, code may be injected via `user_callbacks`, # which is a `Tuple` of [`GenericCallbacks`](@ref ClimateMachine.GenericCallbacks). ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)) # # Post-processing # Our solution has now been calculated and exported to NetCDF files in # `output_dir`. # Let's plot the horizontal statistics of `ρu` and `ρcT`, as well as the evolution of # `ρu` for the southwest nodal stack: export_plot( z, time_data, data_avg, ("ρu[1]"), joinpath(output_dir, "solution_vs_time_u_avg_fvm.png"); xlabel = "Horizontal mean of ρu", ylabel = z_label, ); export_plot( z, time_data, data_var, ("ρu[1]"), joinpath(output_dir, "variance_vs_time_u_fvm.png"); xlabel = "Horizontal variance of ρu", ylabel = z_label, ); export_plot( z, time_data, data_avg, ("ρcT"), joinpath(output_dir, "solution_vs_time_T_avg_fvm.png"); xlabel = "Horizontal mean of ρcT", ylabel = z_label, ); export_plot( z, time_data, data_var, ("ρcT"), joinpath(output_dir, "variance_vs_time_T_fvm.png"); xlabel = "Horizontal variance of ρcT", ylabel = z_label, ); export_plot( z, time_data, data_nodal, ("ρu[1]"), joinpath(output_dir, "solution_vs_time_u_nodal_fvm.png"); xlabel = "ρu at southwest node", ylabel = z_label, ); # ![](solution_vs_time_u_avg_fvm.png) # ![](variance_vs_time_u_fvm.png) # ![](solution_vs_time_T_avg_fvm.png) # ![](variance_vs_time_T_fvm.png) # ![](solution_vs_time_u_nodal_fvm.png) # Rayleigh friction returns the horizontal velocity to the objective # profile on the timescale of the simulation (1 second), since `γ`∼1. The horizontal viscosity # and 2D divergence damping act to reduce the horizontal variance over the same timescale. # The initial Gaussian noise is propagated to the temperature field through advection. # The horizontal diffusivity acts to reduce this `ρcT` variance in time, although in a longer # timescale. # To run this file, and # inspect the solution, include this tutorial in the Julia REPL # with: # ```julia # include(joinpath("tutorials", "Atmos", "burgers_single_stack.jl")) # ``` ================================================ FILE: tutorials/Atmos/densitycurrent.jl ================================================ # # Density Current # # In this example, we demonstrate the usage of the `ClimateMachine` # to solve the density current test by Straka 1993. # We solve a flow in a box configuration, which is # representative of a large-eddy simulation. Several versions of the problem # setup may be found in literature, but the general idea is to examine the # vertical ascent of a thermal _bubble_ (we can interpret these as simple # representation of convective updrafts). # # ## Description of experiment # The setup described below is such that the simulation reaches completion # (timeend = 900 s) in approximately 4 minutes of wall-clock time on 1 GPU # # 1) Dry Density Current (circular potential temperature perturbation) # 2) Boundaries # - `Impenetrable(FreeSlip())` - no momentum flux, no mass flux through # walls. # - `Impermeable()` - non-porous walls, i.e. no diffusive fluxes through # walls. # 3) Domain - 25600m (horizontal) x 10000m (horizontal) x 6400m (vertical) # 4) Resolution - 100m effective resolution # 5) Total simulation time - 900s # 6) Mesh Aspect Ratio (Effective resolution) 1:1 # 7) Overrides defaults for # - CPU Initialisation # - Time integrator # - Sources # - Smagorinsky Coefficient _C_smag # 8) Default settings can be found in `src/Driver/.jl` #md # !!! note #md # This experiment setup assumes that you have installed the #md # `ClimateMachine` according to the instructions on the landing page. #md # We assume the users' familiarity with the conservative form of the #md # equations of motion for a compressible fluid #md # #md # The following topics are covered in this example #md # - Package requirements #md # - Defining a `model` subtype for the set of conservation equations #md # - Defining the initial conditions #md # - Applying boundary conditions #md # - Applying source terms #md # - Choosing a turbulence model #md # - Adding tracers to the model #md # - Choosing a time-integrator #md # #md # The following topics are not covered in this example #md # - Defining new boundary conditions #md # - Defining new turbulence models #md # - Building new time-integrators # # ## Boilerplate (Using Modules) # # #### [Skip Section](@ref init-dc) # # Before setting up our experiment, we recognize that we need to import some # pre-defined functions from other packages. Julia allows us to use existing # modules (variable workspaces), or write our own to do so. Complete # documentation for the Julia module system can be found # [here](https://docs.julialang.org/en/v1/manual/modules/#). # We need to use the `ClimateMachine` module! This imports all functions # specific to atmospheric and ocean flow modeling. While we do not cover the # ins-and-outs of the contents of each of these we provide brief descriptions # of the utility of each of the loaded packages. using ClimateMachine ClimateMachine.init(parse_clargs = true) using ClimateMachine.Atmos using ClimateMachine.Orientations # - Required so that we inherit the appropriate model types for the large-eddy # simulation (LES) and global-circulation-model (GCM) configurations. using ClimateMachine.ConfigTypes # - Required so that we may define diagnostics configurations, e.g. choice of # file-writer, choice of output variable sets, output-frequency and directory, using ClimateMachine.Diagnostics # - Required so that we may define (or utilise existing functions) functions # that are `called-back` or executed at frequencies of either timesteps, # simulation-time, or wall-clock time. using ClimateMachine.GenericCallbacks # - Required so we load the appropriate functions for the time-integration # component. Contains ODESolver methods. using ClimateMachine.ODESolvers # - Required for utility of spatial filtering functions (e.g. positivity # preservation) using ClimateMachine.Mesh.Filters # - Required so functions for computation of temperature profiles. using Thermodynamics.TemperatureProfiles # - Required so functions for computation of moist thermodynamic quantities and turbulence closures # are available. using Thermodynamics using ClimateMachine.TurbulenceClosures # - Required so we may access our variable arrays by a sensible naming # convention rather than by numerical array indices. using ClimateMachine.VariableTemplates # - Required so we may access planet parameters # ([CLIMAParameters](https://github.com/CliMA/CLIMAParameters.jl) # specific to this problem include the gas constant, specific heats, # mean-sea-level pressure, gravity and the Smagorinsky coefficient) # In ClimateMachine we use `StaticArrays` for our variable arrays. using StaticArrays # We also use the `Test` package to help with unit tests and continuous # integration systems to design sensible tests for our experiment to ensure new # / modified blocks of code don't damage the fidelity of the physics. The test # defined within this experiment is not a unit test for a specific # subcomponent, but ensures time-integration of the defined problem conditions # within a reasonable tolerance. Immediately useful macros and functions from # this include `@test` and `@testset` which will allow us to define the testing # parameter sets. using Test using CLIMAParameters using CLIMAParameters.Atmos.SubgridScale: C_smag using CLIMAParameters.Planet: R_d, cp_d, cv_d, MSLP, grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() # ## [Initial Conditions](@id init-dc) #md # !!! note #md # The following variables are assigned in the initial condition #md # - `state.ρ` = Scalar quantity for initial density profile #md # - `state.ρu`= 3-component vector for initial momentum profile #md # - `state.energy.ρe`= Scalar quantity for initial total-energy profile #md # humidity #md # - `state.tracers.ρχ` = Vector of four tracers (here, for demonstration #md # only; we can interpret these as dye injections for visualisation #md # purposes) function init_densitycurrent!(problem, bl, state, aux, localgeo, t) (x, y, z) = localgeo.coord ## Problem float-type FT = eltype(state) param_set = parameter_set(bl) ## Unpack constant parameters R_gas::FT = R_d(param_set) c_p::FT = cp_d(param_set) c_v::FT = cv_d(param_set) p0::FT = MSLP(param_set) _grav::FT = grav(param_set) γ::FT = c_p / c_v ## Define bubble center and background potential temperature xc::FT = 0 yc::FT = 0 zc::FT = 3000 rx::FT = 4000 rz::FT = 2000 r = sqrt(((x - xc)^2) / rx^2 + ((z - zc)^2) / rz^2) ## TODO: clean this up, or add convenience function: ## This is configured in the reference hydrostatic state ref_state = reference_state(bl) θ_ref::FT = ref_state.virtual_temperature_profile.T_surface Δθ::FT = 0 θamplitude::FT = -15.0 ## Compute temperature difference over bubble region if r <= 1 Δθ = 0.5 * θamplitude * (1 + cospi(r)) end ## Compute perturbed thermodynamic state: θ = θ_ref + Δθ ## potential temperature π_exner = FT(1) - _grav / (c_p * θ) * z ## exner pressure ρ = p0 / (R_gas * θ) * (π_exner)^(c_v / R_gas) ## density T = θ * π_exner e_int = internal_energy(param_set, T) ts = PhaseDry(param_set, e_int, ρ) ρu = SVector(FT(0), FT(0), FT(0)) ## momentum ## State (prognostic) variable assignment e_kin = FT(0) ## kinetic energy e_pot = gravitational_potential(bl.orientation, aux)## potential energy ρe_tot = ρ * total_energy(e_kin, e_pot, ts) ## total energy ## Assign State Variables state.ρ = ρ state.ρu = ρu state.energy.ρe = ρe_tot end # ## [Model Configuration](@id config-helper) # We define a configuration function to assist in prescribing the physical # model. function config_densitycurrent( ::Type{FT}, N, resolution, xmax, ymax, zmax, ) where {FT} ## The model coefficient for the turbulence closure is defined via the ## [CLIMAParameters ## package](https://CliMA.github.io/CLIMAParameters.jl/dev/) A reference ## state for the linearisation step is also defined. T_surface = FT(300) T_min_ref = FT(0) T_profile = DryAdiabaticProfile{FT}(param_set, T_surface, T_min_ref) ref_state = HydrostaticState(T_profile) ## The fun part! Here we assemble the `AtmosModel`. ##md # !!! note ##md # Docs on model subcomponent options can be found here: ##md # - [`param_set`](https://CliMA.github.io/CLIMAParameters.jl/dev/) ##md # - [`turbulence`](@ref Turbulence-Closures-docs) ##md # - [`source`](@ref atmos-sources) ##md # - [`init_state`](@ref init-dc) _C_smag = FT(0.21) physics = AtmosPhysics{FT}( param_set; # Parameter set corresponding to earth parameters ref_state = ref_state, # Reference state turbulence = Vreman(_C_smag), # Turbulence closure model moisture = DryModel(), # Exclude moisture variables tracers = NoTracers(), # Tracer model with diffusivity coefficients ) model = AtmosModel{FT}( AtmosLESConfigType, # Flow in a box, requires the AtmosLESConfigType physics; # Atmos physics init_state_prognostic = init_densitycurrent!, # Apply the initial condition source = (Gravity(),), # Gravity is the only source term here ) ## Finally, we pass a `Problem Name` string, the mesh information, and the ## model type to the [`AtmosLESConfiguration`](@ref ClimateMachine.AtmosLESConfiguration) object. config = ClimateMachine.AtmosLESConfiguration( "DryDensitycurrent", # Problem title [String] N, # Polynomial order [Int] resolution, # (Δx, Δy, Δz) effective resolution [m] xmax, # Domain maximum size [m] ymax, # Domain maximum size [m] zmax, # Domain maximum size [m] param_set, # Parameter set. init_densitycurrent!, # Function specifying initial condition model = model, # Model type periodicity = (false, false, false), boundary = ((1, 1), (1, 1), (1, 1)), # Set all boundaries to solid walls ) return config end #md # !!! note #md # `Keywords` are used to specify some arguments (see appropriate source #md # files). function main() ## These are essentially arguments passed to the ## [`config_densitycurrent`](@ref config-helper) function. For type ## consistency we explicitly define the problem floating-precision. FT = Float64 ## We need to specify the polynomial order for the DG discretization, ## effective resolution, simulation end-time, the domain bounds, and the ## courant-number for the time-integrator. Note how the time-integration ## components `solver_config` are distinct from the spatial / model ## components in `driver_config`. `init_on_cpu` is a helper keyword argument ## that forces problem initialisation on CPU (thereby allowing the use of ## random seeds, spline interpolants and other special functions at the ## initialisation step.) N = 4 Δx = FT(100) Δy = FT(250) Δv = FT(100) resolution = (Δx, Δy, Δv) xmax = FT(25600) ymax = FT(1000) zmax = FT(6400) t0 = FT(0) timeend = FT(100) CFL = FT(1.5) ## Assign configurations so they can be passed to the `invoke!` function driver_config = config_densitycurrent(FT, N, resolution, xmax, ymax, zmax) ## Choose an Explicit Single-rate Solver LSRK144 from the existing [ODESolvers](@ref ## ODESolvers-docs) options Apply the outer constructor to define the ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFL, ) ## Invoke solver (calls `solve!` function for time-integrator), pass the driver, solver and diagnostic config ## information. result = ClimateMachine.invoke!(solver_config; check_euclidean_distance = true) ## Check that the solution norm is reasonable. @test isapprox(result, FT(1); atol = 1.5e-2) end # The experiment definition is now complete. Time to run it. # `julia --project=$CLIMA_HOME tutorials/Atmos/densitycurrent.jl --vtk 1smins` # to run with VTK output enabled at intervals of 1 simulation minute. # # ## References # # - [Straka1993](@cite) # - [Carpenter1990](@cite) main() ================================================ FILE: tutorials/Atmos/dry_rayleigh_benard.jl ================================================ # # Dry Rayleigh Benard # ## Problem description # # 1) Dry Rayleigh Benard Convection (re-entrant channel configuration) # 2) Boundaries - `Sides` : Periodic (Default `bctuple` used to identify bot,top walls) # `Top` : Prescribed temperature, no-slip # `Bottom`: Prescribed temperature, no-slip # 3) Domain - 250m[horizontal] x 250m[horizontal] x 500m[vertical] # 4) Timeend - 100s # 5) Mesh Aspect Ratio (Effective resolution) 1:1 # 6) Random values in initial condition (Requires `init_on_cpu=true` argument) # 7) Overrides defaults for # `C_smag` # `Courant_number` # `init_on_cpu` # `ref_state` # `solver_type` # `bc` # `sources` # 8) Default settings can be found in src/Driver/Configurations.jl # ## Loading code using Distributions using Random using StaticArrays using Test using DocStringExtensions using Printf using ClimateMachine ClimateMachine.init() using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.Diagnostics using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.Mesh.Filters using Thermodynamics: PhaseEquil_pTq, internal_energy using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using CLIMAParameters using CLIMAParameters.Planet: R_d, cp_d, cv_d, grav, MSLP struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() # Convenience struct for sharing data between kernels struct DryRayleighBenardConvectionDataConfig{FT} xmin::FT ymin::FT zmin::FT xmax::FT ymax::FT zmax::FT T_bot::FT T_lapse::FT T_top::FT end # Define initial condition kernel function init_problem!(problem, bl, state, aux, localgeo, t) (x, y, z) = localgeo.coord dc = bl.data_config FT = eltype(state) param_set = parameter_set(bl) _R_d::FT = R_d(param_set) _cp_d::FT = cp_d(param_set) _grav::FT = grav(param_set) _cv_d::FT = cv_d(param_set) _MSLP::FT = MSLP(param_set) γ::FT = _cp_d / _cv_d δT = sinpi(6 * z / (dc.zmax - dc.zmin)) * cospi(6 * z / (dc.zmax - dc.zmin)) + rand() δw = sinpi(6 * z / (dc.zmax - dc.zmin)) * cospi(6 * z / (dc.zmax - dc.zmin)) + rand() ΔT = _grav / _cv_d * z + δT T = dc.T_bot - ΔT P = _MSLP * (T / dc.T_bot)^(_grav / _R_d / dc.T_lapse) ρ = P / (_R_d * T) q_tot = FT(0) e_pot = gravitational_potential(bl.orientation, aux) ts = PhaseEquil_pTq(param_set, P, T, q_tot) ρu, ρv, ρw = FT(0), FT(0), ρ * δw e_int = internal_energy(ts) e_kin = FT(1 / 2) * δw^2 ρe_tot = ρ * (e_int + e_pot + e_kin) state.ρ = ρ state.ρu = SVector(ρu, ρv, ρw) state.energy.ρe = ρe_tot state.moisture.ρq_tot = FT(0) ρχ = zero(FT) if z <= 100 ρχ += FT(0.1) * (cospi(z / 2 / 100))^2 end state.tracers.ρχ = SVector{1, FT}(ρχ) end # Define problem configuration kernel function config_problem(::Type{FT}, N, resolution, xmax, ymax, zmax) where {FT} ## Boundary conditions T_bot = FT(299) _cp_d::FT = cp_d(param_set) _grav::FT = grav(param_set) T_lapse = FT(_grav / _cp_d) T_top = T_bot - T_lapse * zmax ntracers = 1 δ_χ = SVector{ntracers, FT}(1) ## Turbulence C_smag = FT(0.23) data_config = DryRayleighBenardConvectionDataConfig{FT}( 0, 0, 0, xmax, ymax, zmax, T_bot, T_lapse, FT(T_bot - T_lapse * zmax), ) ## Define the physics physics = AtmosPhysics{FT}( param_set; turbulence = Vreman(C_smag), tracers = NTracers{ntracers, FT}(δ_χ), ) ## Set up the problem problem = AtmosProblem(; physics = physics, boundaryconditions = ( AtmosBC( physics; momentum = Impenetrable(NoSlip()), energy = PrescribedTemperature((state, aux, t) -> T_bot), ), AtmosBC( physics; momentum = Impenetrable(NoSlip()), energy = PrescribedTemperature((state, aux, t) -> T_top), ), ), init_state_prognostic = init_problem!, ) ## Set up the model model = AtmosModel{FT}( AtmosLESConfigType, physics; problem = problem, source = (Gravity(),), data_config = data_config, ) config = ClimateMachine.AtmosLESConfiguration( "DryRayleighBenardConvection", N, resolution, xmax, ymax, zmax, param_set, init_problem!, model = model, ) return config end # Define diagnostics configuration kernel function config_diagnostics(driver_config) interval = "10000steps" dgngrp = setup_atmos_default_diagnostics( AtmosLESConfigType(), interval, driver_config.name, ) return ClimateMachine.DiagnosticsConfiguration([dgngrp]) end # Define main entry point kernel function main() FT = Float64 ## DG polynomial order N = 4 ## Domain resolution and size Δh = FT(10) ## Time integrator setup t0 = FT(0) ## Courant number CFLmax = FT(20) timeend = FT(1000) xmax, ymax, zmax = FT(250), FT(250), FT(500) @testset "DryRayleighBenardTest" begin for Δh in Δh Δv = Δh resolution = (Δh, Δh, Δv) driver_config = config_problem(FT, N, resolution, xmax, ymax, zmax) ## Set up the time-integrator, using a multirate infinitesimal step ## method. The option `splitting_type = ClimateMachine.SlowFastSplitting()` ## separates fast-slow modes by splitting away the acoustic waves and ## treating them via a sub-stepped explicit method. ode_solver_type = ClimateMachine.MISSolverType(; splitting_type = ClimateMachine.SlowFastSplitting(), mis_method = MIS2, fast_method = LSRK144NiegemannDiehlBusch, nsubsteps = (40,), ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFLmax, ) dgn_config = config_diagnostics(driver_config) ## User defined callbacks (TMAR positivity preserving filter) cbtmarfilter = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, ("moisture.ρq_tot",), solver_config.dg.grid, TMARFilter(), ) nothing end result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, user_callbacks = (cbtmarfilter,), check_euclidean_distance = true, ) ## result == engf/eng0 @test isapprox(result, FT(1); atol = 1.5e-2) end end end # Run main() ================================================ FILE: tutorials/Atmos/heldsuarez.jl ================================================ # # Dry atmosphere GCM with Held-Suarez forcing # # The Held-Suarez setup (Held and Suarez, 1994) is a textbook example for a # simplified atmospheric global circulation model configuration which has been # used as a benchmark experiment for development of the dynamical cores (i.e., # GCMs without continents, moisture or parametrization schemes of the physics) # for atmospheric models. It is forced by a thermal relaxation to a reference # state and damped by linear (Rayleigh) friction. This example demonstrates how # # * to set up a ClimateMachine-Atmos GCM configuration; # * to select and save GCM diagnostics output. # # To begin, we load ClimateMachine and a few miscellaneous useful Julia packages. using Distributions using Random using StaticArrays using UnPack # ClimateMachine specific modules needed to make this example work (e.g., we will need # spectral filters, etc.). using ClimateMachine using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.ConfigTypes using ClimateMachine.Diagnostics using ClimateMachine.GenericCallbacks using ClimateMachine.Mesh.Grids using ClimateMachine.Mesh.Filters using Thermodynamics.TemperatureProfiles using ClimateMachine.SystemSolvers using ClimateMachine.ODESolvers using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.BalanceLaws import ClimateMachine.BalanceLaws: source, prognostic_vars # [ClimateMachine parameters](https://github.com/CliMA/CLIMAParameters.jl) are # needed to have access to Earth's physical parameters. using CLIMAParameters using CLIMAParameters.Planet: MSLP, R_d, day, grav, cp_d, cv_d, planet_radius # We need to load the physical parameters for Earth to have an Earth-like simulation :). struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet(); """ HeldSuarezForcingTutorial <: TendencyDef{Source} Defines a forcing that parametrises radiative and frictional effects using Newtonian relaxation and Rayleigh friction, following Held and Suarez (1994) """ struct HeldSuarezForcingTutorial <: TendencyDef{Source} end prognostic_vars(::HeldSuarezForcingTutorial) = (Momentum(), Energy()) function held_suarez_forcing_coefficients(bl, args) @unpack state, aux = args @unpack ts = args.precomputed FT = eltype(state) ## Parameters T_ref = FT(255) param_set = parameter_set(bl) _R_d = FT(R_d(param_set)) _day = FT(day(param_set)) _grav = FT(grav(param_set)) _cp_d = FT(cp_d(param_set)) _p0 = FT(MSLP(param_set)) ## Held-Suarez parameters k_a = FT(1 / (40 * _day)) k_f = FT(1 / _day) k_s = FT(1 / (4 * _day)) ΔT_y = FT(60) Δθ_z = FT(10) T_equator = FT(315) T_min = FT(200) σ_b = FT(7 / 10) ## Held-Suarez forcing φ = latitude(bl, aux) p = air_pressure(ts) ##TODO: replace _p0 with dynamic surface pressure in Δσ calculations to account ##for topography, but leave unchanged for calculations of σ involved in T_equil σ = p / _p0 exner_p = σ^(_R_d / _cp_d) Δσ = (σ - σ_b) / (1 - σ_b) height_factor = max(0, Δσ) T_equil = (T_equator - ΔT_y * sin(φ)^2 - Δθ_z * log(σ) * cos(φ)^2) * exner_p T_equil = max(T_min, T_equil) k_T = k_a + (k_s - k_a) * height_factor * cos(φ)^4 k_v = k_f * height_factor return (k_v = k_v, k_T = k_T, T_equil = T_equil) end function source(::Energy, s::HeldSuarezForcingTutorial, atmos, args) @unpack state = args FT = eltype(state) @unpack ts = args.precomputed nt = held_suarez_forcing_coefficients(atmos, args) param_set = parameter_set(atmos) _cv_d = FT(cv_d(param_set)) @unpack k_T, T_equil = nt T = air_temperature(ts) return -k_T * state.ρ * _cv_d * (T - T_equil) end function source(::Momentum, s::HeldSuarezForcingTutorial, atmos, args) @unpack state, aux = args nt = held_suarez_forcing_coefficients(atmos, args) return -nt.k_v * projection_tangential(atmos, aux, state.ρu) end # ## Set initial condition # When using ClimateMachine, we need to define a function that sets the initial # state of our model run. In our case, we use the reference state of the # simulation (defined below) and add a little bit of noise. Note that the # initial states includes a zero initial velocity field. function init_heldsuarez!(problem, balance_law, state, aux, localgeo, time) FT = eltype(state) ## Set initial state to reference state with random perturbation rnd = FT(1 + rand(Uniform(-1e-3, 1e-3))) state.ρ = aux.ref_state.ρ state.ρu = SVector{3, FT}(0, 0, 0) state.energy.ρe = rnd * aux.ref_state.ρe end; # ## Initialize ClimateMachine # Before we do anything further, we need to initialize ClimateMachine. Among # other things, this will initialize the MPI us. ClimateMachine.init(); # ## Setting the floating-type precision # ClimateMachine allows us to run a model with different floating-type # precisions, with lower precision we get our results faster, and with higher # precision, we may get more accurate results, depending on the questions we # are after. const FT = Float64; # ## Setup model configuration # Now that we have defined our forcing and initialization functions, and have # initialized ClimateMachine, we can set up the model. # # ## Set up a reference state # We start by setting up a reference state. This is simply a vector field that # we subtract from the solutions to the governing equations to both improve # numerical stability of the implicit time stepper and enable faster model # spin-up. The reference state assumes hydrostatic balance and ideal gas law, # with a pressure $p_r(z)$ and density $\rho_r(z)$ that only depend on altitude # $z$ and are in hydrostatic balance with each other. # # In this example, the reference temperature field smoothly transitions from a # linearly decaying profile near the surface to a constant temperature profile # at the top of the domain. temp_profile_ref = DecayingTemperatureProfile{FT}(param_set) ref_state = HydrostaticState(temp_profile_ref); # ## Set up a Rayleigh sponge layer # To avoid wave reflection at the top of the domain, the model applies a sponge # layer that linearly damps the momentum equations. domain_height = FT(30e3) # height of the computational domain (m) z_sponge = FT(12e3) # height at which sponge begins (m) α_relax = FT(1 / 60 / 15) # sponge relaxation rate (1/s) exponent = FT(2) # sponge exponent for squared-sinusoid profile u_relax = SVector(FT(0), FT(0), FT(0)) # relaxation velocity (m/s) sponge = RayleighSponge{FT}(domain_height, z_sponge, α_relax, u_relax, exponent); # ## Set up turbulence models # In order to produce a stable simulation, we need to dissipate energy and # enstrophy at the smallest scales of the developed flow field. To achieve this # we set up diffusive forcing functions. c_smag = FT(0.21); # Smagorinsky constant τ_hyper = FT(4 * 3600); # hyperdiffusion time scale turbulence_model = SmagorinskyLilly(c_smag); hyperdiffusion_model = DryBiharmonic(FT(4 * 3600)); # ## Instantiate the model # The Held Suarez setup was designed to produce an equilibrated state that is # comparable to the zonal mean of the Earth’s atmosphere. physics = AtmosPhysics{FT}( param_set; ref_state = ref_state, turbulence = turbulence_model, hyperdiffusion = hyperdiffusion_model, moisture = DryModel(), ); model = AtmosModel{FT}( AtmosGCMConfigType, physics; init_state_prognostic = init_heldsuarez!, source = (Gravity(), Coriolis(), HeldSuarezForcingTutorial(), sponge), ); # This concludes the setup of the physical model! # ## Set up the driver # We just need to set up a few parameters that define the resolution of the # discontinuous Galerkin method and for how long we want to run our model # setup. poly_order = 5; ## discontinuous Galerkin polynomial order n_horz = 2; ## horizontal element number n_vert = 2; ## vertical element number resolution = (n_horz, n_vert) n_days = 0.1; ## experiment day number timestart = FT(0); ## start time (s) timeend = FT(n_days * day(param_set)); ## end time (s); # The next lines set up the spatial grid. driver_config = ClimateMachine.AtmosGCMConfiguration( "HeldSuarez", poly_order, resolution, domain_height, param_set, init_heldsuarez!; model = model, ); # The next lines set up the time stepper. Since the resolution # in the vertical is much finer than in the horizontal, # the 'stiff' parts of the PDE will be in the vertical. # Setting `splitting_type = HEVISplitting()` will treat # vertical acoustic waves implicitly, while all other dynamics # are treated explicitly. ode_solver_type = ClimateMachine.IMEXSolverType( splitting_type = HEVISplitting(), implicit_model = AtmosAcousticGravityLinearModel, implicit_solver = ManyColumnLU, solver_method = ARK2GiraldoKellyConstantinescu, ); solver_config = ClimateMachine.SolverConfiguration( timestart, timeend, driver_config, Courant_number = FT(0.1), ode_solver_type = ode_solver_type, init_on_cpu = true, CFL_direction = HorizontalDirection(), diffdir = HorizontalDirection(), ); # ## Set up spectral exponential filter # After every completed time step we apply a spectral filter to remove # remaining small-scale noise introduced by the numerical procedures. This # assures that our run remains stable. filterorder = 10; filter = ExponentialFilter(solver_config.dg.grid, 0, filterorder); cbfilter = GenericCallbacks.EveryXSimulationSteps(1) do Filters.apply!( solver_config.Q, AtmosFilterPerturbations(model), solver_config.dg.grid, filter, state_auxiliary = solver_config.dg.state_auxiliary, ) nothing end; # ## Setup diagnostic output # # Choose frequency and resolution of output, and a diagnostics group (dgngrp) # which defines output variables. This needs to be defined in # [`Diagnostics`](@ref ClimateMachine.Diagnostics). interval = "1000steps"; _planet_radius = FT(planet_radius(param_set)); info = driver_config.config_info; boundaries = [ FT(-90.0) FT(-180.0) _planet_radius FT(90.0) FT(180.0) FT(_planet_radius + info.domain_height) ]; resolution = (FT(10), FT(10), FT(1000)); # in (deg, deg, m) interpol = ClimateMachine.InterpolationConfiguration( driver_config, boundaries, resolution, ); dgngrps = [ setup_dump_state_diagnostics( AtmosGCMConfigType(), interval, driver_config.name, interpol = interpol, ), setup_dump_aux_diagnostics( AtmosGCMConfigType(), interval, driver_config.name, interpol = interpol, ), ]; dgn_config = ClimateMachine.DiagnosticsConfiguration(dgngrps); # ## Run the model # Finally, we can run the model using the physical setup and solvers from # above. We use the spectral filter in our callbacks after every time step, and # collect the diagnostics output. result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, user_callbacks = (cbfilter,), check_euclidean_distance = true, ); # ## References # # - [Held1994](@cite) ================================================ FILE: tutorials/Atmos/risingbubble.jl ================================================ # # Rising Thermal Bubble # # In this example, we demonstrate the usage of the `ClimateMachine` # [AtmosModel](@ref AtmosModel-docs) machinery to solve the fluid # dynamics of a thermal perturbation in a neutrally stratified background state # defined by its uniform potential temperature. We solve a flow in a box configuration - # this is representative of a large-eddy simulation. Several versions of the problem # setup may be found in literature, but the general idea is to examine the # vertical ascent of a thermal bubble (we can interpret these as simple # representation of convective updrafts). # # ## Description of experiment # 1) Dry Rising Bubble (circular potential temperature perturbation) # 2) Boundaries # Top and Bottom boundaries: # - `Impenetrable(FreeSlip())` - Top and bottom: no momentum flux, no mass flux through # walls. # - `Impermeable()` - non-porous walls, i.e. no diffusive fluxes through # walls. # Lateral boundaries # - Laterally periodic # 3) Domain - 2500m (horizontal) x 2500m (horizontal) x 2500m (vertical) # 4) Resolution - 50m effective resolution # 5) Total simulation time - 1000s # 6) Mesh Aspect Ratio (Effective resolution) 1:1 # 7) Overrides defaults for # - CPU Initialisation # - Time integrator # - Sources # - Smagorinsky Coefficient #md # !!! note #md # This experiment setup assumes that you have installed the #md # `ClimateMachine` according to the instructions on the landing page. #md # We assume the users' familiarity with the conservative form of the #md # equations of motion for a compressible fluid (see the #md # [AtmosModel](@ref AtmosModel-docs) page). #md # #md # The following topics are covered in this example #md # - Package requirements #md # - Defining a `model` subtype for the set of conservation equations #md # - Defining the initial conditions #md # - Applying source terms #md # - Choosing a turbulence model #md # - Adding tracers to the model #md # - Choosing a time-integrator #md # - Choosing diagnostics (output) configurations #md # #md # The following topics are not covered in this example #md # - Defining new boundary conditions #md # - Defining new turbulence models #md # - Building new time-integrators #md # - Adding diagnostic variables (beyond a standard pre-defined list of #md # variables) # ## [Loading code](@id Loading-code-rtb) # Before setting up our experiment, we recognize that we need to import some # pre-defined functions from other packages. Julia allows us to use existing # modules (variable workspaces), or write our own to do so. Complete # documentation for the Julia module system can be found # [here](https://docs.julialang.org/en/v1/manual/modules/#). # We need to use the `ClimateMachine` module! This imports all functions # specific to atmospheric and ocean flow modeling. using ClimateMachine ClimateMachine.init() using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.ConfigTypes using ClimateMachine.Diagnostics using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using Thermodynamics.TemperatureProfiles using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates # In ClimateMachine we use `StaticArrays` for our variable arrays. # We also use the `Test` package to help with unit tests and continuous # integration systems to design sensible tests for our experiment to ensure new # / modified blocks of code don't damage the fidelity of the physics. The test # defined within this experiment is not a unit test for a specific # subcomponent, but ensures time-integration of the defined problem conditions # within a reasonable tolerance. Immediately useful macros and functions from # this include `@test` and `@testset` which will allow us to define the testing # parameter sets. using StaticArrays using Test using CLIMAParameters using CLIMAParameters.Atmos.SubgridScale: C_smag using CLIMAParameters.Planet: R_d, cp_d, cv_d, MSLP, grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet(); # ## [Initial Conditions](@id init-rtb) # This example demonstrates the use of functions defined # in the [`Thermodynamics`](@ref Thermodynamics) package to # generate the appropriate initial state for our problem. #md # !!! note #md # The following variables are assigned in the initial condition #md # - `state.ρ` = Scalar quantity for initial density profile #md # - `state.ρu`= 3-component vector for initial momentum profile #md # - `state.energy.ρe`= Scalar quantity for initial total-energy profile #md # humidity #md # - `state.tracers.ρχ` = Vector of four tracers (here, for demonstration #md # only; we can interpret these as dye injections for visualization #md # purposes) function init_risingbubble!(problem, bl, state, aux, localgeo, t) (x, y, z) = localgeo.coord ## Problem float-type FT = eltype(state) param_set = parameter_set(bl) ## Unpack constant parameters R_gas::FT = R_d(param_set) c_p::FT = cp_d(param_set) c_v::FT = cv_d(param_set) p0::FT = MSLP(param_set) _grav::FT = grav(param_set) γ::FT = c_p / c_v ## Define bubble center and background potential temperature xc::FT = 5000 yc::FT = 1000 zc::FT = 2000 r = sqrt((x - xc)^2 + (z - zc)^2) rc::FT = 2000 θamplitude::FT = 2 ## This is configured in the reference hydrostatic state ref_state = reference_state(bl) θ_ref::FT = ref_state.virtual_temperature_profile.T_surface ## Add the thermal perturbation: Δθ::FT = 0 if r <= rc Δθ = θamplitude * (1.0 - r / rc) end ## Compute perturbed thermodynamic state: θ = θ_ref + Δθ ## potential temperature π_exner = FT(1) - _grav / (c_p * θ) * z ## exner pressure ρ = p0 / (R_gas * θ) * (π_exner)^(c_v / R_gas) ## density T = θ * π_exner e_int = internal_energy(param_set, T) ts = PhaseDry(param_set, e_int, ρ) ρu = SVector(FT(0), FT(0), FT(0)) ## momentum ## State (prognostic) variable assignment e_kin = FT(0) ## kinetic energy e_pot = gravitational_potential(bl, aux) ## potential energy ρe_tot = ρ * total_energy(e_kin, e_pot, ts) ## total energy ρχ = FT(0) ## tracer ## We inject tracers at the initial condition at some specified z coordinates if 500 < z <= 550 ρχ += FT(0.05) end ## We want 4 tracers ntracers = 4 ## Define 4 tracers, (arbitrary scaling for this demo problem) ρχ = SVector{ntracers, FT}(ρχ, ρχ / 2, ρχ / 3, ρχ / 4) ## Assign State Variables state.ρ = ρ state.ρu = ρu state.energy.ρe = ρe_tot state.tracers.ρχ = ρχ end # ## [Model Configuration](@id config-helper) # We define a configuration function to assist in prescribing the physical # model. The purpose of this is to populate the # `ClimateMachine.AtmosLESConfiguration` with arguments # appropriate to the problem being considered. function config_risingbubble( ::Type{FT}, N, resolution, xmax, ymax, zmax, ) where {FT} ## Since we want four tracers, we specify this and include the appropriate ## diffusivity scaling coefficients (normally these would be physically ## informed but for this demonstration we use integers corresponding to the ## tracer index identifier) ntracers = 4 δ_χ = SVector{ntracers, FT}(1, 2, 3, 4) ## To assemble `AtmosModel` with no tracers, set `tracers = NoTracers()`. ## The model coefficient for the turbulence closure is defined via the ## [CLIMAParameters ## package](https://CliMA.github.io/CLIMAParameters.jl/latest/) A reference ## state for the linearisation step is also defined. T_surface = FT(300) T_min_ref = FT(0) T_profile = DryAdiabaticProfile{FT}(param_set, T_surface, T_min_ref) ref_state = HydrostaticState(T_profile) ## Here we assemble the `AtmosModel`. _C_smag = FT(C_smag(param_set)) physics = AtmosPhysics{FT}( param_set; ## Parameter set corresponding to earth parameters ref_state = ref_state, ## Reference state turbulence = SmagorinskyLilly(_C_smag), ## Turbulence closure model moisture = DryModel(), ## Exclude moisture variables tracers = NTracers{ntracers, FT}(δ_χ), ## Tracer model with diffusivity coefficients ) model = AtmosModel{FT}( AtmosLESConfigType, ## Flow in a box, requires the AtmosLESConfigType physics; ## Atmos physics init_state_prognostic = init_risingbubble!, ## Apply the initial condition source = (Gravity(),), ## Gravity is the only source term here ) ## Finally, we pass a `Problem Name` string, the mesh information, and the ## model type to the [`AtmosLESConfiguration`] object. config = ClimateMachine.AtmosLESConfiguration( "DryRisingBubble", ## Problem title [String] N, ## Polynomial order [Int] resolution, ## (Δx, Δy, Δz) effective resolution [m] xmax, ## Domain maximum size [m] ymax, ## Domain maximum size [m] zmax, ## Domain maximum size [m] param_set, ## Parameter set. init_risingbubble!, ## Function specifying initial condition model = model, ## Model type ) return config end #md # !!! note #md # `Keywords` are used to specify some arguments (see appropriate source #md # files). # ## [Diagnostics](@id config_diagnostics) # Here we define the diagnostic configuration specific to this problem. function config_diagnostics(driver_config) interval = "10000steps" dgngrp = setup_atmos_default_diagnostics( AtmosLESConfigType(), interval, driver_config.name, ) return ClimateMachine.DiagnosticsConfiguration([dgngrp]) end function main() ## These are essentially arguments passed to the ## [`config_risingbubble`](@ref config-helper) function. For type ## consistency we explicitly define the problem floating-precision. FT = Float64 ## We need to specify the polynomial order for the DG discretization, ## effective resolution, simulation end-time, the domain bounds, and the ## courant-number for the time-integrator. Note how the time-integration ## components `solver_config` are distinct from the spatial / model ## components in `driver_config`. `init_on_cpu` is a helper keyword argument ## that forces problem initialization on CPU (thereby allowing the use of ## random seeds, spline interpolants and other special functions at the ## initialization step.) N = 4 Δh = FT(125) Δv = FT(125) resolution = (Δh, Δh, Δv) xmax = FT(10000) ymax = FT(500) zmax = FT(10000) t0 = FT(0) timeend = FT(100) ## For full simulation set `timeend = 1000` ## Use up to 1.7 if ode_solver is the single rate LSRK144. CFL = FT(1.7) ## Assign configurations so they can be passed to the `invoke!` function driver_config = config_risingbubble(FT, N, resolution, xmax, ymax, zmax) ## Choose an Explicit Single-rate Solver from the existing [`ODESolvers`](@ref ClimateMachine.ODESolvers) options. ## Apply the outer constructor to define the `ode_solver`. ## The 1D-IMEX method is less appropriate for the problem given the current ## mesh aspect ratio (1:1). ode_solver_type = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) ## If the user prefers a multi-rate explicit time integrator, ## the ode_solver above can be replaced with ## ## `ode_solver = ClimateMachine.MultirateSolverType( ## fast_model = AtmosAcousticGravityLinearModel, ## slow_method = LSRK144NiegemannDiehlBusch, ## fast_method = LSRK144NiegemannDiehlBusch, ## timestep_ratio = 10, ## )` ## See [ODESolvers](@ref ODESolvers-docs) for all of the available solvers. solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_solver_type = ode_solver_type, init_on_cpu = true, Courant_number = CFL, ) dgn_config = config_diagnostics(driver_config) ## Invoke solver (calls `solve!` function for time-integrator), pass the driver, ## solver and diagnostic config information. result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, user_callbacks = (), check_euclidean_distance = true, ) ## Check that the solution norm is reasonable. @test isapprox(result, FT(1); atol = 1.5e-3) end # The experiment definition is now complete. Time to run it. # ## Running the file # `julia --project tutorials/Atmos/risingbubble.jl` will run the # experiment from the main ClimateMachine.jl directory, with diagnostics output # at the intervals specified in [`config_diagnostics`](@ref # config_diagnostics). You can also prescribe command line arguments for # simulation update and output specifications. For # rapid turnaround, we recommend that you run this experiment on a GPU. # VTK output can be controlled via command line by # setting `parse_clargs=true` in the `ClimateMachine.init` # arguments, and then using `--vtk=`. # ## [Output Visualisation](@id output-viz) # See the `ClimateMachine` API interface documentation # for generating output. # # # - [VisIt](https://wci.llnl.gov/simulation/computer-codes/visit/) # - [Paraview](https://www.paraview.org/) # are two commonly used programs for `.vtu` files. # # For NetCDF or JLD2 diagnostics you may use any of the following tools: # Julia's # [`NCDatasets`](https://github.com/Alexander-Barth/NCDatasets.jl) and # [`JLD2`](https://github.com/JuliaIO/JLD2.jl) packages with a suitable # # or the known and quick NCDF visualization tool: # [`ncview`](http://meteora.ucsd.edu/~pierce/ncview_home_page.html) # plotting program. main() ================================================ FILE: tutorials/BalanceLaws/tendency_specification_layer.jl ================================================ # # A functional tendency specification layer # In the balance law (mutating) functions, where we specify fluxes and sources, # - [`flux_first_order!`](@ref ClimateMachine.BalanceLaws.flux_first_order!) # - [`flux_second_order!`](@ref ClimateMachine.BalanceLaws.flux_second_order!) # - and [`source!`](@ref ClimateMachine.BalanceLaws.source!), # an additional (functional) tendency specification # layer can be placed on-top that has several nice # properties. The functional layer: # - Separates tendency definitions from which tendencies are included in a particular model. # - Reduces duplicate implementations of tendency definitions (e.g., in optional submodel variants) # - Allows a more flexible combination of tendencies # - Allows a simple way to loop over all tendencies for all prognostic variables and recover # _each_ flux / source term. This will allow us a simple way to evaluate, for example, the energy budget. # ## Used modules / imports # Make running locally easier from ClimateMachine.jl/: if !("." in LOAD_PATH) push!(LOAD_PATH, ".") nothing end # First, using necessary modules: using ClimateMachine.BalanceLaws using ClimateMachine.VariableTemplates using StaticArrays, Test # Import methods to overload import ClimateMachine.BalanceLaws: prognostic_vars, eq_tends, flux # ## Define a balance law # Here, we define a simple balance law: struct MyBalanceLaw <: BalanceLaw end # ## Define prognostic variable types # Here, we'll define some prognostic variable types, # by sub-typing [`AbstractPrognosticVariable`](@ref ClimateMachine.BalanceLaws.AbstractPrognosticVariable), # for mass and energy: struct Mass <: AbstractPrognosticVariable end struct Energy <: AbstractPrognosticVariable end # Define [`prognostic_vars`](@ref ClimateMachine.BalanceLaws.prognostic_vars), # which returns _all_ prognostic variables prognostic_vars(::MyBalanceLaw) = (Mass(), Energy()); # ## Define tendency definition types # Tendency definitions types are made by subtyping # [`TendencyDef`](@ref ClimateMachine.BalanceLaws.TendencyDef). # `TendencyDef` has one type parameters: the # `AbstractTendencyType`, which can be either # `Flux{FirstOrder}`, `Flux{SecondOrder}`, or `Source`. struct Advection <: TendencyDef{Flux{FirstOrder}} end struct Source1 <: TendencyDef{Source} end struct Source2 <: TendencyDef{Source} end struct Diffusion <: TendencyDef{Flux{SecondOrder}} end # Define [`eq_tends`](@ref ClimateMachine.BalanceLaws.eq_tends), # which returns a tuple of tendency definitions (those sub-typed # by [`TendencyDef`](@ref ClimateMachine.BalanceLaws.TendencyDef)), # given # - the prognostic variable # - the model (balance law) # - the tendency type ([`Flux`](@ref ClimateMachine.BalanceLaws.Flux) or # [`Source`](@ref ClimateMachine.BalanceLaws.Source)) eq_tends(::Mass, ::MyBalanceLaw, ::Flux{FirstOrder}) = (Advection(),); eq_tends(::Energy, ::MyBalanceLaw, ::Flux{FirstOrder}) = (Advection(),); eq_tends(::Mass, ::MyBalanceLaw, ::Flux{SecondOrder}) = (); eq_tends(::Energy, ::MyBalanceLaw, ::Flux{SecondOrder}) = (Diffusion(),); eq_tends(::Mass, ::MyBalanceLaw, ::Source) = (Source1(), Source2()); eq_tends(::Energy, ::MyBalanceLaw, ::Source) = (Source1(), Source2()); # ## Testing `prognostic_vars` `eq_tends` # To test that `prognostic_vars` and `eq_tends` were # implemented correctly, we'll create a balance law # instance and call [`show_tendencies`](@ref ClimateMachine.BalanceLaws.show_tendencies), # to make sure that the tendency table is accurate. bl = MyBalanceLaw() show_tendencies(bl; table_complete = true) # The table looks correct. Now we're ready to # add the specification layer. # ## Adding the tendency specification layer # For the purpose of this tutorial, we'll only focus # on adding the layer to the first order flux, since # doing so for the second order flux and source # functions follow the same exact pattern. In other words, # we'll add a layer that tests the `Flux{FirstOrder}` column # in the table above. First, we'll define individual # [`flux`](@ref ClimateMachine.BalanceLaws.flux) kernels: flux(::Mass, ::Advection, bl::MyBalanceLaw, args) = args.state.ρ * SVector(1, 1, 1); flux(::Energy, ::Advection, bl::MyBalanceLaw, args) = args.state.ρe * SVector(1, 1, 1); # !!! note # - `flux` should return a 3-componet vector for scalar equations # - `flux` should return a 3xN-componet tensor for N-component vector equations # - `source` should return a scalar for scalar equations # - `source` should return a N-componet vector for N-component vector equations # Define `flux_first_order!` and utilize `eq_tends` function flux_first_order!( bl::MyBalanceLaw, flx::Grad, state::Vars, aux, t, direction, ) tend_type = Flux{FirstOrder}() args = (; state, aux, t, direction) ## `Σfluxes(Mass(), eq_tends(Mass(), bl, tend_type), bl, args)` calls ## `flux(::Mass, ::Advection, ...)` defined above: eqt_ρ = eq_tends(Mass(), bl, tend_type) flx.ρ = Σfluxes(Mass(), eqt_ρ, bl, args) ## `Σfluxes(Energy(), eq_tends(Energy(), bl, tend_type), bl, args)` calls ## `flux(::Energy, ::Advection, ...)` defined above: eqt_ρe = eq_tends(Energy(), bl, tend_type) flx.ρe = Σfluxes(Energy(), eqt_ρe, bl, args) return nothing end; # ## Testing the tendency specification layer # Now, let's test `flux_first_order!` we need to initialize # some dummy data to call it first: FT = Float64; # float type aux = (); # auxiliary fields t = 0.0; # time direction = nothing; # Direction state = Vars{@vars(ρ::FT, ρe::FT)}([1, 2]); flx = Grad{@vars(ρ::FT, ρe::FT)}(zeros(MArray{Tuple{3, 2}, FT})); # call `flux_first_order!` flux_first_order!(bl, flx, state, aux, t, direction); # Test that `flx` has been properly mutated: @testset "Test results" begin @test flx.ρ == [1, 1, 1] @test flx.ρe == [2, 2, 2] end nothing ================================================ FILE: tutorials/Diagnostics/Debug/StateCheck.jl ================================================ # # State debug statistics # # This page shows how to use the `StateCheck` functions to get basic # statistics for nodal values of fields held in ClimateMachine `MPIStateArray` # data structures. The `StateCheck` functions can be used to # # 1. Generate statistics on `MPIStateArray` holding the state of a ClimateMachine experiment. # # and to # # 2. Compare against saved reference statistics from ClimateMachine `MPIStateArray` # variables. This can enable simple automated regression test checks for # detecting unexpected changes introduced into numerical experiments # by code updates. # # These two cases are shown below: # ## 1. Generating statistics for a set of MPIStateArrays # # Here we create a callback that can generate statistics for an arbitrary # set of the MPIStateArray type variables of the sort that hold persistent state for # ClimateMachine models. We then invoke the call back to show the statistics. # # In regular use the `MPIStateArray` variables will come from model configurations. # Here we create a dummy set of `MPIStateArray` variables for use in stand alone # examples. # ### Create a dummy set of MPIStateArrays # # First we set up two `MPIStateArray` variables. This need a few packages to be in placeT, # and utilizes some utility functions to create the array and add named # persistent state variables. # This is usually handled automatically as part of model definition in regular # ClimateMachine activity. # Calling `ClimateMachine.init()` includes initializing GPU CUDA and MPI parallel # processing options that match the hardware/software system in use. # Set up a basic environment using MPI using StaticArrays using Random using ClimateMachine using ClimateMachine.VariableTemplates using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.StateCheck ClimateMachine.init() FT = Float64 # Define some dummy vector and tensor abstract variables with associated types # and dimensions F1 = @vars begin ν∇u::SMatrix{3, 2, FT, 6} κ∇θ::SVector{3, FT} end F2 = @vars begin u::SVector{2, FT} θ::SVector{1, FT} end nothing # hide # Create `MPIStateArray` variables with arrays to hold elements of the # vectors and tensors Q1 = MPIStateArray{Float32, F1}( MPI.COMM_WORLD, ClimateMachine.array_type(), 4, 9, 8, ) Q2 = MPIStateArray{Float64, F2}( MPI.COMM_WORLD, ClimateMachine.array_type(), 4, 3, 8, ) nothing # hide # ### Create a call-back # # Now we can create a `StateCheck` call-back, _cb_, tied to the `MPIStateArray` # variables _Q1_ and _Q2_. Each `MPIStateArray` in the array # of `MPIStateArray` variables tracked is paired with a label # to identify it. The call-back is also given a frequency (in time step numbers) and # precision for printing summary tables. cb = ClimateMachine.StateCheck.sccreate( [(Q1, "My gradients"), (Q2, "My fields")], 1; prec = 15, ) GenericCallbacks.init!(cb, nothing, nothing, nothing, nothing) nothing # hide # ### Invoke the call-back # # The call-back is of type `ClimateMachine.GenericCallbacks.EveryXSimulationSteps` # and in regular use is designed to be passed to a ClimateMachine timestepping # solver e.g. typeof(cb) # Here, for demonstration purposes, we can invoke # the call-back after simply initializing the `MPIStateArray` fields to a random # set of values e.g. Q1.data .= rand(MersenneTwister(0), Float32, size(Q1.data)) Q2.data .= rand(MersenneTwister(0), Float64, size(Q2.data)) GenericCallbacks.call!(cb, nothing, nothing, nothing, nothing) # ## 2. Comparing to reference values # ### Generate arrays of reference values # # StateCheck functions can generate text that can be used to set the value of stored # arrays that can be used in a reference test for subsequent regression testing. This # involves 3 steps. # # **Step 1.** First a reference array setting program code is generated from the latest # state of a given callback e.g. ClimateMachine.StateCheck.scprintref(cb) # **Step 2.** Next the array setting program code is executed (see below). At this stage the _parr[]_ array # context may be hand edited. The parr[] array sets a target number of decimal places for # matching against reference values in _varr[]_. For different experiments and different fields # the degree of precision that constitutes failing a regression test may vary. Choosing the # _parr[]_ values requires some sense as to the stability of the particular numerical # and physical scenario an experiment represents. In the example below some precision # settings have been hand edited from the default of 16 to illustrate the process. #! format: off varr = [ [ "My gradients", "ν∇u[1]", 1.34348869323730468750e-04, 9.84732866287231445313e-01, 5.23545503616333007813e-01, 3.08209930764271777814e-01 ], [ "My gradients", "ν∇u[2]", 1.16317868232727050781e-01, 9.92088317871093750000e-01, 4.83800649642944335938e-01, 2.83350456014221541157e-01 ], [ "My gradients", "ν∇u[3]", 1.05845928192138671875e-03, 9.51775908470153808594e-01, 4.65474426746368408203e-01, 2.73615551085745090099e-01 ], [ "My gradients", "ν∇u[4]", 5.97668886184692382813e-02, 9.68048095703125000000e-01, 5.42618036270141601563e-01, 2.81570862027933854765e-01 ], [ "My gradients", "ν∇u[5]", 8.31030607223510742188e-02, 9.35931921005249023438e-01, 5.05405902862548828125e-01, 2.46073509972619536290e-01 ], [ "My gradients", "ν∇u[6]", 3.09681892395019531250e-02, 9.98341441154479980469e-01, 4.54375565052032470703e-01, 3.09461067853178561915e-01 ], [ "My gradients", "κ∇θ[1]", 8.47448110580444335938e-02, 9.94180679321289062500e-01, 5.27157366275787353516e-01, 2.92455951648181833313e-01 ], [ "My gradients", "κ∇θ[2]", 1.20514631271362304688e-02, 9.93527650833129882813e-01, 4.71063584089279174805e-01, 2.96449027197666359346e-01 ], [ "My gradients", "κ∇θ[3]", 8.14980268478393554688e-02, 9.55443382263183593750e-01, 5.05038917064666748047e-01, 2.77201022741208891187e-01 ], [ "My fields", "u[1]", 4.31410233294131639781e-02, 9.97140933049696531754e-01, 4.62139750850942054861e-01, 3.23076684924287371725e-01 ], [ "My fields", "u[2]", 1.01416659908237782872e-02, 9.14712023896926407218e-01, 4.76160523012988778913e-01, 2.71443440757963339038e-01 ], [ "My fields", "θ[1]", 6.58965491052394547467e-02, 9.73216404386510802738e-01, 4.60007166313864512830e-01, 2.87310472114545079059e-01 ], ] parr = [ [ "My gradients", "ν∇u[1]", 16, 7, 16, 0 ], [ "My gradients", "ν∇u[2]", 16, 7, 16, 0 ], [ "My gradients", "ν∇u[3]", 16, 7, 16, 0 ], [ "My gradients", "ν∇u[4]", 16, 7, 16, 0 ], [ "My gradients", "ν∇u[5]", 16, 7, 16, 0 ], [ "My gradients", "ν∇u[6]", 16, 7, 16, 0 ], [ "My gradients", "κ∇θ[1]", 16, 16, 16, 0 ], [ "My gradients", "κ∇θ[2]", 16, 16, 16, 0 ], [ "My gradients", "κ∇θ[3]", 16, 16, 16, 0 ], [ "My fields", "u[1]", 16, 16, 16, 0 ], [ "My fields", "u[2]", 16, 16, 16, 0 ], [ "My fields", "θ[1]", 16, 16, 16, 0 ], ] #! format: on # **Step 3.** Finally a call-back stored value can be compared for consistency to with _parr[]_ decimal places ClimateMachine.StateCheck.scdocheck(cb, (varr, parr)) nothing # hide # In this trivial case the match is guaranteed. The function will return _true_ to the calling # routine and this can be passed to an `@test` block. # # However we can modify the reference test values to # see the effect of a mismatch e.g. varr[1][3] = varr[1][3] * 10.0 ClimateMachine.StateCheck.scdocheck(cb, (varr, parr)) nothing # hide # Here the mis-matching field is highlighted with _N(0)_ indicating that the precision # was not met and actual match length was (in this case) 0. If any field fails the test returns false # for use in any regression testing control logic. ================================================ FILE: tutorials/Land/Heat/heat_equation.jl ================================================ # # Heat equation tutorial # In this tutorial, we'll be solving the [heat # equation](https://en.wikipedia.org/wiki/Heat_equation): # `` # \frac{∂ ρcT}{∂ t} + ∇ ⋅ (-α ∇ρcT) = 0 # `` # where # - `t` is time # - `α` is the thermal diffusivity # - `T` is the temperature # - `ρ` is the density # - `c` is the heat capacity # - `ρcT` is the thermal energy # To put this in the form of ClimateMachine's [`BalanceLaw`](@ref # ClimateMachine.BalanceLaws.BalanceLaw), we'll re-write the equation as: # `` # \frac{∂ ρcT}{∂ t} + ∇ ⋅ (F(α, ρcT, t)) = 0 # `` # where # - ``F(α, ρcT, t) = -α ∇ρcT`` is the second-order flux # with boundary conditions # - Fixed temperature ``T_{surface}`` at ``z_{min}`` (non-zero Dirichlet) # - No thermal flux at ``z_{min}`` (zero Neumann) # Solving these equations is broken down into the following steps: # 1) Preliminary configuration # 2) PDEs # 3) Space discretization # 4) Time discretization / solver # 5) Solver hooks / callbacks # 6) Solve # 7) Post-processing # # Preliminary configuration # ## [Loading code](@id Loading-code-heat) # First, we'll load our pre-requisites: # - load external packages: using MPI using OrderedCollections using Plots using StaticArrays using OrdinaryDiffEq using DiffEqBase # - load CLIMAParameters and set up to use it: using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() # - load necessary ClimateMachine modules: using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux using ClimateMachine.Mesh.Geometry: LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils # - import necessary ClimateMachine modules: (`import`ing enables us to # provide implementations of these structs/methods) import ClimateMachine.BalanceLaws: vars_state, source!, flux_second_order!, flux_first_order!, compute_gradient_argument!, compute_gradient_flux!, nodal_update_auxiliary_state!, nodal_init_state_auxiliary!, init_state_prognostic!, BoundaryCondition, boundary_conditions, boundary_state! import ClimateMachine.DGMethods: calculate_dt # ## Initialization # Define the float type (`Float64` or `Float32`) const FT = Float64; # Initialize ClimateMachine for CPU. ClimateMachine.init(; disable_gpu = true); const clima_dir = dirname(dirname(pathof(ClimateMachine))); # Load some helper functions for plotting include(joinpath(clima_dir, "docs", "plothelpers.jl")); # # Define the set of Partial Differential Equations (PDEs) # ## Define the model # Model parameters can be stored in the particular [`BalanceLaw`](@ref # ClimateMachine.BalanceLaws.BalanceLaw), in this case, a `HeatModel`: Base.@kwdef struct HeatModel{FT, APS} <: BalanceLaw "Parameters" param_set::APS "Heat capacity" ρc::FT = 1 "Thermal diffusivity" α::FT = 0.01 "Initial conditions for temperature" initialT::FT = 295.15 "Bottom boundary value for temperature (Dirichlet boundary conditions)" T_bottom::FT = 300.0 "Top flux (α∇ρcT) at top boundary (Neumann boundary conditions)" flux_top::FT = 0.0 end # Create an instance of the `HeatModel`: m = HeatModel{FT, typeof(param_set)}(; param_set = param_set); # This model dictates the flow control, using [Dynamic Multiple # Dispatch](https://en.wikipedia.org/wiki/Multiple_dispatch), for which # kernels are executed. # ## Define the variables # All of the methods defined in this section were `import`ed in # [Loading code](@ref Loading-code-heat) to let us provide # implementations for our `HeatModel` as they will be used by # the solver. # Specify auxiliary variables for `HeatModel` vars_state(::HeatModel, ::Auxiliary, FT) = @vars(z::FT, T::FT); # Specify prognostic variables, the variables solved for in the PDEs, for # `HeatModel` vars_state(::HeatModel, ::Prognostic, FT) = @vars(ρcT::FT); # Specify state variables whose gradients are needed for `HeatModel` vars_state(::HeatModel, ::Gradient, FT) = @vars(ρcT::FT); # Specify gradient variables for `HeatModel` vars_state(::HeatModel, ::GradientFlux, FT) = @vars(α∇ρcT::SVector{3, FT}); # ## Define the compute kernels # Specify the initial values in `aux::Vars`, which are available in # `init_state_prognostic!`. Note that # - this method is only called at `t=0` # - `aux.z` and `aux.T` are available here because we've specified `z` and `T` # in `vars_state` given `Auxiliary` # in `vars_state` function nodal_init_state_auxiliary!( m::HeatModel, aux::Vars, tmp::Vars, geom::LocalGeometry, ) aux.z = geom.coord[3] aux.T = m.initialT end; # Specify the initial values in `state::Vars`. Note that # - this method is only called at `t=0` # - `state.ρcT` is available here because we've specified `ρcT` in # `vars_state` given `Prognostic` function init_state_prognostic!( m::HeatModel, state::Vars, aux::Vars, localgeo, t::Real, ) state.ρcT = m.ρc * aux.T end; # The remaining methods, defined in this section, are called at every # time-step in the solver by the [`BalanceLaw`](@ref # ClimateMachine.BalanceLaws.BalanceLaw) framework. # Compute/update all auxiliary variables at each node. Note that # - `aux.T` is available here because we've specified `T` in # `vars_state` given `Auxiliary` function nodal_update_auxiliary_state!( m::HeatModel, state::Vars, aux::Vars, t::Real, ) aux.T = state.ρcT / m.ρc end; # Since we have second-order fluxes, we must tell `ClimateMachine` to compute # the gradient of `ρcT`. Here, we specify how `ρcT` is computed. Note that # - `transform.ρcT` is available here because we've specified `ρcT` in # `vars_state` given `Gradient` function compute_gradient_argument!( m::HeatModel, transform::Vars, state::Vars, aux::Vars, t::Real, ) transform.ρcT = state.ρcT end; # Specify where in `diffusive::Vars` to store the computed gradient from # `compute_gradient_argument!`. Note that: # - `diffusive.α∇ρcT` is available here because we've specified `α∇ρcT` in # `vars_state` given `Gradient` # - `∇transform.ρcT` is available here because we've specified `ρcT` in # `vars_state` given `Gradient` function compute_gradient_flux!( m::HeatModel, diffusive::Vars, ∇transform::Grad, state::Vars, aux::Vars, t::Real, ) diffusive.α∇ρcT = -m.α * ∇transform.ρcT end; # We have no sources, nor non-diffusive fluxes. function source!(m::HeatModel, _...) end; function flux_first_order!(m::HeatModel, _...) end; # Compute diffusive flux (``F(α, ρcT, t) = -α ∇ρcT`` in the original PDE). # Note that: # - `diffusive.α∇ρcT` is available here because we've specified `α∇ρcT` in # `vars_state` given `GradientFlux` function flux_second_order!( m::HeatModel, flux::Grad, state::Vars, diffusive::Vars, hyperdiffusive::Vars, aux::Vars, t::Real, ) flux.ρcT += diffusive.α∇ρcT end; # ### Boundary conditions # Second-order terms in our equations, ``∇⋅(F)`` where ``F = -α∇ρcT``, are # internally reformulated to first-order unknowns. # Boundary conditions must be specified for all unknowns, both first-order and # second-order unknowns which have been reformulated. struct DirichletBC <: BoundaryCondition end; struct NeumannBC <: BoundaryCondition end; boundary_conditions(::HeatModel) = (DirichletBC(), NeumannBC()) # The boundary conditions for `ρcT` (first order unknown) function boundary_state!( nf, bc::DirichletBC, m::HeatModel, state⁺::Vars, aux⁺::Vars, n⁻, state⁻::Vars, aux⁻::Vars, t, _..., ) ## Apply Dirichlet BCs state⁺.ρcT = m.ρc * m.T_bottom end; function boundary_state!( nf, bc::NeumannBC, m::HeatModel, state⁺::Vars, aux⁺::Vars, n⁻, state⁻::Vars, aux⁻::Vars, t, _..., ) nothing end; # The boundary conditions for `ρcT` are specified here for second-order # unknowns function boundary_state!( nf, bc::DirichletBC, m::HeatModel, state⁺::Vars, diff⁺::Vars, hyperdiff⁺::Vars, aux⁺::Vars, n⁻, state⁻::Vars, diff⁻::Vars, hyperdiff⁻::Vars, aux⁻::Vars, t, _..., ) nothing end; function boundary_state!( nf, bc::NeumannBC, m::HeatModel, state⁺::Vars, diff⁺::Vars, hyperdiff⁺::Vars, aux⁺::Vars, n⁻, state⁻::Vars, diff⁻::Vars, hyperdiff⁻::Vars, aux⁻::Vars, t, _..., ) ## Apply Neumann BCs diff⁺.α∇ρcT = n⁻ * m.flux_top end; # # Spatial discretization # Prescribe polynomial order of basis functions in finite elements N_poly = 5; # Specify the number of vertical elements nelem_vert = 10; # Specify the domain height zmax = FT(1); # Establish a `ClimateMachine` single stack configuration driver_config = ClimateMachine.SingleStackConfiguration( "HeatEquation", N_poly, nelem_vert, zmax, param_set, m, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ); # # Time discretization / solver # Specify simulation time (SI units) t0 = FT(0) timeend = FT(40) # In this section, we initialize the state vector and allocate memory for # the solution in space (`dg` has the model `m`, which describes the PDEs # as well as the function used for initialization). `SolverConfiguration` # initializes the ODE solver, by default an explicit Low-Storage # [Runge-Kutta](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) # method. In this tutorial, we prescribe an option for an implicit # `Kvaerno3` method. # First, let's define how the time-step is computed, based on the # [Fourier number](https://en.wikipedia.org/wiki/Fourier_number) # (i.e., diffusive Courant number) is defined. Because # the `HeatModel` is a custom model, we must define how both are computed. # First, we must define our own implementation of `DGMethods.calculate_dt`, # (which we imported): function calculate_dt(dg, model::HeatModel, Q, Courant_number, t, direction) Δt = one(eltype(Q)) CFL = DGMethods.courant(diffusive_courant, dg, model, Q, Δt, t, direction) return Courant_number / CFL end # Next, we'll define our implementation of `diffusive_courant`: function diffusive_courant( m::HeatModel, state::Vars, aux::Vars, diffusive::Vars, Δx, Δt, t, direction, ) return Δt * m.α / (Δx * Δx) end # Finally, we initialize the state vector and solver # configuration based on the given Fourier number. # Note that, we can use a much larger Fourier number # for implicit solvers as compared to explicit solvers. use_implicit_solver = false if use_implicit_solver given_Fourier = FT(30) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config; ode_solver_type = ImplicitSolverType(OrdinaryDiffEq.Kvaerno3( autodiff = false, linsolve = LinSolveGMRES(), )), Courant_number = given_Fourier, CFL_direction = VerticalDirection(), ) else given_Fourier = FT(0.7) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config; Courant_number = given_Fourier, CFL_direction = VerticalDirection(), ) end; grid = solver_config.dg.grid; Q = solver_config.Q; aux = solver_config.dg.state_auxiliary; # ## Inspect the initial conditions # Let's export a plot of the initial state output_dir = @__DIR__; mkpath(output_dir); z_scale = 100; # convert from meters to cm z_key = "z"; z_label = "z [cm]"; z = get_z(grid; z_scale = z_scale, rm_dupes = true); # Create an array to store the solution: dons_arr = Dict[dict_of_nodal_states(solver_config; interp = true)] # store initial condition at ``t=0`` time_data = FT[0] # store time data export_plot( z, time_data, dons_arr, ("ρcT",), joinpath(output_dir, "initial_condition.png"); xlabel = "ρcT", ylabel = z_label, xlims = (m.initialT - 1, m.T_bottom + 1), ); # ![](initial_condition.png) # It matches what we have in `init_state_prognostic!(m::HeatModel, ...)`, so # let's continue. # # Solver hooks / callbacks # Define the number of outputs from `t0` to `timeend` const n_outputs = 5; # This equates to exports every ceil(Int, timeend/n_outputs) time-step: const every_x_simulation_time = ceil(Int, timeend / n_outputs); # The `ClimateMachine`'s time-steppers provide hooks, or callbacks, which # allow users to inject code to be executed at specified intervals. In this # callback, a dictionary of prognostic and auxiliary states are appended to # `dons_arr` for time the callback is executed. In addition, time is collected # and appended to `time_data`. callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do push!(dons_arr, dict_of_nodal_states(solver_config; interp = true)) push!(time_data, gettime(solver_config.solver)) nothing end; # # Solve # This is the main `ClimateMachine` solver invocation. While users do not have # access to the time-stepping loop, code may be injected via `user_callbacks`, # which is a `Tuple` of callbacks in [`GenericCallbacks`](@ref ClimateMachine.GenericCallbacks). ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)); # Append result at the end of the last time step: push!(dons_arr, dict_of_nodal_states(solver_config; interp = true)); push!(time_data, gettime(solver_config.solver)); # # Post-processing # Our solution is stored in the array of dictionaries `dons_arr` whose keys are # the output interval. The next level keys are the variable names, and the # values are the values along the grid: # To get `T` at ``t=0``, we can use `T_at_t_0 = dons_arr[1]["T"][:]` @show keys(dons_arr[1]) # Let's plot the solution: export_plot( z, time_data, dons_arr, ("ρcT",), joinpath(output_dir, "solution_vs_time.png"); xlabel = "ρcT", ylabel = z_label, ); # ![](solution_vs_time.png) export_contour( z, time_data, dons_arr, "ρcT", joinpath(output_dir, "solution_contour.png"); ylabel = "z [cm]", ) # ![](solution_contour.png) # The results look as we would expect: a fixed temperature at the bottom is # resulting in heat flux that propagates up the domain. To run this file, and # inspect the solution in `dons_arr`, include this tutorial in the Julia REPL # with: # ```julia # include(joinpath("tutorials", "Land", "Heat", "heat_equation.jl")) # ``` ================================================ FILE: tutorials/Land/Soil/Artifacts.toml ================================================ [bonan_soil_heat] git-tree-sha1 = "9f0933ccd6d902d0b75cc5bc9eb9dea3aa3708d7" ================================================ FILE: tutorials/Land/Soil/Coupled/equilibrium_test.jl ================================================ # # Coupled heat and water equations tending towards equilibrium # Other tutorials, such as the [soil heat tutorial](../Heat/bonan_heat_tutorial.md) # and [Richards equation tutorial](../Water/equilibrium_test.md) # demonstrate how to solve the heat # equation or Richard's equation without considering # dynamic interactions between the two. As an example, the user could # prescribe a fixed function of space and time for the liquid water content, # and use that to drive the heat equation, but without allowing the water # content to dynamically evolve according to Richard's equation and without # allowing the changing temperature of the soil to affect the water # evolution. # Here we show how to solve the interacting heat and water equations, # in sand, but without phase changes. This allows us to capture # behavior that is not present in the decoupled equations. # The equations # are: # `` # \frac{∂ ρe_{int}}{∂ t} = ∇ ⋅ κ(θ_l, θ_i; ν, ...) ∇T + ∇ ⋅ ρe_{int_{liq}} K (T,θ_l, θ_i; ν, ...) \nabla h( ϑ_l, z; ν, ...) # `` # `` # \frac{ ∂ ϑ_l}{∂ t} = ∇ ⋅ K (T,θ_l, θ_i; ν, ...) ∇h( ϑ_l, z; ν, ...). # `` # Here # ``t`` is the time (s), # ``z`` is the location in the vertical (m), # ``ρe_{int}`` is the volumetric internal energy of the soil (J/m^3), # ``T`` is the temperature of the soil (K), # ``κ`` is the thermal conductivity (W/m/K), # ``ρe_{int_{liq}}`` is the volumetric internal energy of liquid water (J/m^3), # ``K`` is the hydraulic conductivity (m/s), # ``h`` is the hydraulic head (m), # ``ϑ_l`` is the augmented volumetric liquid water fraction, # ``θ_i`` is the volumetric ice fraction, and # ``ν, ...`` denotes parameters relating to soil type, such as porosity. # We will solve this equation in an effectively 1-d domain with ``z ∈ [-1,0]``, # and with the following boundary and initial conditions: # ``- κ ∇T(t, z = 0) = 0 ẑ`` # `` -κ ∇T(t, z = -1) = 0 ẑ `` # `` T(t = 0, z) = T_{min} + (T_{max}-T_{min}) e^{Cz}`` # ``- K ∇h(t, z = 0) = 0 ẑ `` # `` -K ∇h(t, z = -1) = 0 ẑ`` # `` ϑ(t = 0, z) = ϑ_{min} + (ϑ_{max}-ϑ_{min}) e^{Cz}, `` # where ``C, T_{min}, T_{max}, ϑ_{min},`` and ``ϑ_{max}`` are # constants. # If we evolve this system for times long compared to the dynamical timescales # of the system, we expect it to reach an equilibrium where # the LHS of these equations tends to zero. # Assuming zero fluxes at the boundaries, the resulting equilibrium state # should satisfy ``∂h/∂z = 0`` and ``∂T/∂z = 0``. Physically, this means that # the water settles into a vertical profile in which # the resulting pressure balances gravity and that the temperature # is constant across the domain. # We verify that the system is approaching this equilibrium, and we also sketch out # an analytic calculation for the final temperature in equilibrium. # # Import necessary modules # External (non - CliMA) modules using MPI using OrderedCollections using StaticArrays using Statistics using Plots # CliMA Parameters using CLIMAParameters using CLIMAParameters.Planet: ρ_cloud_liq, ρ_cloud_ice, cp_l, cp_i, T_0, LH_f0 # ClimateMachine modules using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Land.SoilHeatParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.Diagnostics using ClimateMachine.ConfigTypes using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state, parameter_set # # Preliminary set-up # Get the parameter set, which holds constants used across CliMA models: struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet(); # Initialize and pick a floating point precision: ClimateMachine.init() const FT = Float64; # Load plot helpers: const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath(clima_dir, "docs", "plothelpers.jl")); # Set soil parameters to be consistent with sand. # Please see e.g. the [soil heat tutorial](../Heat/bonan_heat_tutorial.md) # for other soil type parameters, or [Cosby1984](@cite). # The porosity: porosity = FT(0.395); # Soil solids # are the components of soil besides water, ice, gases, and air. # We specify the soil component fractions, relative to all soil solids. # These should sum to unity; they do not account for pore space. ν_ss_quartz = FT(0.92) ν_ss_minerals = FT(0.08) ν_ss_om = FT(0.0) ν_ss_gravel = FT(0.0); # Other parameters include the hydraulic conductivity at saturation, the specific # storage, and the van Genuchten parameters for sand. # We recommend Chapter 8 of [Bonan19a](@cite) for finding parameters # for other soil types. Ksat = FT(4.42 / 3600 / 100) # m/s S_s = FT(1e-3) #inverse meters vg_n = FT(1.89) vg_α = FT(7.5); # inverse meters # Other constants needed: κ_quartz = FT(7.7) # W/m/K κ_minerals = FT(2.5) # W/m/K κ_om = FT(0.25) # W/m/K κ_liq = FT(0.57) # W/m/K κ_ice = FT(2.29); # W/m/K # The particle density of organic material-free soil is # equal to the particle density of quartz and other minerals ([BallandArp2005](@cite)): ρp = FT(2700); # kg/m^3 # We calculate the thermal conductivities for the solid material # and for saturated soil. These functions are taken from [BallandArp2005](@cite). κ_solid = k_solid(ν_ss_om, ν_ss_quartz, κ_quartz, κ_minerals, κ_om) κ_sat_frozen = ksat_frozen(κ_solid, porosity, κ_ice) κ_sat_unfrozen = ksat_unfrozen(κ_solid, porosity, κ_liq); # Next, we calculate the volumetric heat capacity of dry soil. Dry soil # refers to soil that has no water content. ρc_ds = FT((1 - porosity) * 1.926e06); # J/m^3/K # We collect the majority of the parameters needed # for modeling heat and water flow in soil in `soil_param_functions`, # an object of type [`SoilParamFunctions`](@ref ClimateMachine.Land.SoilParamFunctions). # Parameters used only for hydrology are stored in `water`, which # is of type # [`WaterParamFunctions`](@ref ClimateMachine.Land.WaterParamFunctions). soil_param_functions = SoilParamFunctions( FT; porosity = porosity, ν_ss_gravel = ν_ss_gravel, ν_ss_om = ν_ss_om, ν_ss_quartz = ν_ss_quartz, ρc_ds = ρc_ds, ρp = ρp, κ_solid = κ_solid, κ_sat_unfrozen = κ_sat_unfrozen, κ_sat_frozen = κ_sat_frozen, water = WaterParamFunctions(FT; Ksat = Ksat, S_s = S_s), ); # # Initial and Boundary conditions # As we are not including the equations for phase changes in this tutorial, # we chose temperatures that are above the freezing point of water. # The initial temperature profile: function T_init(aux) FT = eltype(aux) zmax = FT(0) zmin = FT(-1) T_max = FT(289.0) T_min = FT(288.0) c = FT(20.0) z = aux.z output = T_min + (T_max - T_min) * exp(-(z - zmax) / (zmin - zmax) * c) return output end; # The initial water profile: function ϑ_l0(aux) FT = eltype(aux) zmax = FT(0) zmin = FT(-1) theta_max = FT(porosity * 0.5) theta_min = FT(porosity * 0.4) c = FT(20.0) z = aux.z output = theta_min + (theta_max - theta_min) * exp(-(z - zmax) / (zmin - zmax) * c) return output end; # The boundary value problem in this case # requires a boundary condition at the top and the bottom of the domain # for each equation being solved. These conditions can be Dirichlet, or Neumann. # Dirichlet boundary conditions are on `ϑ_l` and # `T`, while Neumann boundary conditions are on `-κ∇T` and `-K∇h`. For Neumann # conditions, the user supplies a scalar, which is multiplied by `ẑ` within the code. # Water boundary conditions: surface_water_flux = (aux, t) -> eltype(aux)(0.0) bottom_water_flux = (aux, t) -> eltype(aux)(0.0); # The boundary conditions for the heat equation: surface_heat_flux = (aux, t) -> eltype(aux)(0.0) bottom_heat_flux = (aux, t) -> eltype(aux)(0.0); bc = LandDomainBC( bottom_bc = LandComponentBC( soil_heat = Neumann(bottom_heat_flux), soil_water = Neumann(bottom_water_flux), ), surface_bc = LandComponentBC( soil_heat = Neumann(surface_heat_flux), soil_water = Neumann(surface_water_flux), ), ); # Next, we define the required `init_soil!` function, which takes the user # specified functions of space for `T_init` and `ϑ_l0` and initializes the state # variables of volumetric internal energy and augmented liquid fraction. This requires # a conversion from `T` to `ρe_int`. function init_soil!(land, state, aux, localgeo, time) myFT = eltype(state) ϑ_l = myFT(land.soil.water.initialϑ_l(aux)) θ_i = myFT(land.soil.water.initialθ_i(aux)) state.soil.water.ϑ_l = ϑ_l state.soil.water.θ_i = θ_i param_set = parameter_set(land) θ_l = volumetric_liquid_fraction(ϑ_l, land.soil.param_functions.porosity) ρc_ds = land.soil.param_functions.ρc_ds ρc_s = volumetric_heat_capacity(θ_l, θ_i, ρc_ds, param_set) state.soil.heat.ρe_int = volumetric_internal_energy( θ_i, ρc_s, land.soil.heat.initialT(aux), param_set, ) end; # # Create the soil model structure # First, for water (this is where the hydrology parameters # are supplied): soil_water_model = SoilWaterModel( FT; viscosity_factor = TemperatureDependentViscosity{FT}(), moisture_factor = MoistureDependent{FT}(), hydraulics = vanGenuchten(FT; α = vg_α, n = vg_n), initialϑ_l = ϑ_l0, ); # Note that the viscosity of water depends on temperature. # We account for the effect that has on the hydraulic conductivity # by specifying `viscosity_factor = TemperatureDependentViscosity{FT}()`. # The default, if no `viscosity_factor` keyword argument is supplied, # is to not include the effect of `T` on viscosity. More guidance about # specifying the # hydraulic conductivity, and the `hydraulics` model, # can be found in the [`hydraulic functions`](../Water/hydraulic_functions.md) # tutorial. # Repeat for heat: soil_heat_model = SoilHeatModel(FT; initialT = T_init) # Combine into a single soil model: m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model); # We aren't using any sources or sinks in the equations here, but this is where # freeze/thaw terms, runoff, root extraction, etc. would go. sources = (); # Create the LandModel - without other components (canopy, carbon, etc): m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil!, ); # # Specify the numerical details # Choose a resolution, domain boundaries, integration time, # timestep, and ODE solver. N_poly = 1 nelem_vert = 50 zmin = FT(-1) zmax = FT(0) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) t0 = FT(0) timeend = FT(60 * 60 * 72) dt = FT(30.0) solver_config = ClimateMachine.SolverConfiguration(t0, timeend, driver_config, ode_dt = dt); # Determine how often you want output: const n_outputs = 4 const every_x_simulation_time = ceil(Int, timeend / n_outputs); # Store initial condition at ``t=0``, # including prognostic, auxiliary, and # gradient flux variables: state_types = (Prognostic(), Auxiliary(), GradientFlux()) dons_arr = Dict[dict_of_nodal_states(solver_config, state_types; interp = true)] time_data = FT[0] # store time data # We specify a function which evaluates `every_x_simulation_time` and returns # the state vector, appending the variables we are interested in into # `dons_arr`. callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do dons = dict_of_nodal_states(solver_config, state_types; interp = true) push!(dons_arr, dons) push!(time_data, gettime(solver_config.solver)) nothing end; # # Run the integration ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)); # Get z-coordinate z = get_z(solver_config.dg.grid; rm_dupes = true); # Let's export a plot of the initial state output_dir = @__DIR__; mkpath(output_dir); export_plot( z, time_data ./ (60 * 60 * 24), dons_arr, ("soil.water.ϑ_l",), joinpath(output_dir, "eq_moisture_plot.png"); xlabel = "ϑ_l", ylabel = "z (m)", time_units = "(days)", ) # ![](eq_moisture_plot.png) export_plot( z, time_data[2:end] ./ (60 * 60 * 24), dons_arr[2:end], ("soil.water.K∇h[3]",), joinpath(output_dir, "eq_hydraulic_head_plot.png"); xlabel = "K∇h (m/s)", ylabel = "z (m)", time_units = "(days)", ) # ![](eq_hydraulic_head_plot.png) export_plot( z, time_data ./ (60 * 60 * 24), dons_arr, ("soil.heat.T",), joinpath(output_dir, "eq_temperature_plot.png"); xlabel = "T (K)", ylabel = "z (m)", time_units = "(days)", ) # ![](eq_temperature_plot.png) export_plot( z, time_data[2:end] ./ (60 * 60 * 24), dons_arr[2:end], ("soil.heat.κ∇T[3]",), joinpath(output_dir, "eq_heat_plot.png"); xlabel = "κ∇T", ylabel = "z (m)", time_units = "(days)", ) # ![](eq_heat_plot.png) # # Analytic Expectations # We can determine a priori what we expect the final temperature to be in # equilibrium. # Regardless of the final water profile in equilibrium, we know that # the final temperature `T_f` will be a constant across the domain. All # water that began with a temperature above this point will cool to `T_f`, # and water that began with a temperature below this point will warm to # `T_f`. The initial function `T(z)` is equal to `T_f` at a value of # `z = z̃`. This is the location in space which divides these two groups # (water that warms over time and water that cools over time) spatially. # We can solve for `z̃(T_f)` using `T_f = T(z̃)`. # Next, we can determine the change in energy required to cool # the water above `z̃` to `T_f`: it is the integral from `z̃` to the surface # at `z = 0` of ` c θ(z) T(z) `, where `c` is the volumetric heat capacity - # a constant here - and `θ(z)` is the initial water profile. Compute the energy # required to warm the water below `z̃` to `T_f` in a similar way, set equal, and solve # for `T_f`. This results in `T_f = 288.056`, which is very close to the mean `T` we observe # after 3 days, of `288.054`. # One could also solve the equation for `ϑ_l` specified by # ``∂ h/∂ z = 0`` to determine the functional form of the # equilibrium profile of the liquid water. # # References # - [Bonan19a](@cite) # - [BallandArp2005](@cite) # - [Cosby1984](@cite) ================================================ FILE: tutorials/Land/Soil/Heat/bonan_heat_tutorial.jl ================================================ # # Solving the heat equation in soil # This tutorial shows how to use CliMA code to solve the heat # equation in soil. # For background on the heat equation in general, # and how to solve it using CliMA code, please see the # [`heat_equation.jl`](../../Heat/heat_equation.md) # tutorial. # The version of the heat equation we are solving here assumes no # sources or sinks and no flow of liquid water. It takes the form # `` # \frac{∂ ρe_{int}}{∂ t} = ∇ ⋅ κ(θ_l, θ_i; ν, ...) ∇T # `` # Here # ``t`` is the time (s), # ``z`` is the location in the vertical (m), # ``ρe_{int}`` is the volumetric internal energy of the soil (J/m^3), # ``T`` is the temperature of the soil (K), # ``κ`` is the thermal conductivity (W/m/K), # ``ϑ_l`` is the augmented volumetric liquid water fraction, # ``θ_i`` is the volumetric ice fraction, and # ``ν, ...`` denotes parameters relating to soil type, such as porosity. # We will solve this equation in an effectively 1-d domain with ``z ∈ [-1,0]``, # and with the following boundary and initial conditions: # ``T(t=0, z) = 277.15^\circ K`` # ``T(t, z = 0) = 288.15^\circ K `` # `` -κ ∇T(t, z = -1) = 0 ẑ`` # The temperature ``T`` and # volumetric internal energy ``ρe_{int}`` are related as # `` # ρe_{int} = ρc_s (θ_l, θ_i; ν, ...) (T - T_0) - θ_i ρ_i LH_{f0} # `` # where # ``ρc_s`` is the volumetric heat capacity of the soil (J/m^3/K), # ``T_0`` is the freezing temperature of water, # ``ρ_i`` is the density of ice (kg/m^3), and # ``LH_{f0}`` is the latent heat of fusion at ``T_0``. # In this tutorial, we will use a [`PrescribedWaterModel`](@ref # ClimateMachine.Land.PrescribedWaterModel). This option allows # the user to specify a function for the spatial and temporal # behavior of `θ_i` and `θ_l`; it does not solve Richard's equation # for the evolution of moisture. Please see the tutorials # in the `Soil/Coupled/` folder or the `Soil/Water/` # folder for information on solving # Richard's equation, either coupled or uncoupled from the heat equation, respectively. # # Import necessary modules # External (non - CliMA) modules using MPI using OrderedCollections using StaticArrays using Statistics using Dierckx using Plots using DelimitedFiles # CliMA Parameters using CLIMAParameters using CLIMAParameters.Planet: ρ_cloud_liq, ρ_cloud_ice, cp_l, cp_i, T_0, LH_f0 # ClimateMachine modules using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Land.SoilHeatParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state, parameter_set import ClimateMachine.DGMethods: calculate_dt using ArtifactWrappers # # Preliminary set-up # Get the parameter set, which holds constants used across CliMA models. struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet(); # Initialize and pick a floating point precision. ClimateMachine.init() const FT = Float32; # Load functions that will help with plotting const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath(clima_dir, "docs", "plothelpers.jl")); # # Determine soil parameters # Below are the soil component fractions for various soil # texture classes, from [Cosby1984](@cite) and [Bonan19a](@cite). # Note that these fractions are volumetric fractions, relative # to other soil solids, i.e. not including pore space. These are denoted `ν_ss_i`; the CliMA # Land Documentation uses the symbol `ν_i` to denote the volumetric fraction # of a soil component `i` relative to the soil, including pore space. ν_ss_silt_array = FT.( [5.0, 12.0, 32.0, 70.0, 39.0, 15.0, 56.0, 34.0, 6.0, 47.0, 20.0] ./ 100.0, ) ν_ss_quartz_array = FT.( [92.0, 82.0, 58.0, 17.0, 43.0, 58.0, 10.0, 32.0, 52.0, 6.0, 22.0] ./ 100.0, ) ν_ss_clay_array = FT.( [3.0, 6.0, 10.0, 13.0, 18.0, 27.0, 34.0, 34.0, 42.0, 47.0, 58.0] ./ 100.0, ) porosity_array = FT.([ 0.395, 0.410, 0.435, 0.485, 0.451, 0.420, 0.477, 0.476, 0.426, 0.492, 0.482, ]); # The soil types that correspond to array elements above are, in order, # sand, loamy sand, sandy loam, silty loam, loam, sandy clay loam, # silty clay loam, clay loam, sandy clay, silty clay, and clay. # Here we choose the soil type to be sandy. # The soil column is uniform in space and time. soil_type_index = 1 ν_ss_minerals = ν_ss_clay_array[soil_type_index] + ν_ss_silt_array[soil_type_index] ν_ss_quartz = ν_ss_quartz_array[soil_type_index] porosity = porosity_array[soil_type_index]; # This tutorial additionally compares the output of a ClimateMachine simulation with that # of Supplemental Program 2, Chapter 5, of [Bonan19a](@cite). # We found this useful as it # allows us compare results from our code against a published version. # The simulation code of [Bonan19a](@cite) employs a formalism for the thermal # conductivity `κ` based on [Johanson1975](@cite). It assumes # no organic matter, and only requires the volumetric # fraction of soil solids for quartz and other minerals. # ClimateMachine employs the formalism of [BallandArp2005](@cite), # which requires the # fraction of soil solids for quartz, gravel, # organic matter, and other minerals. [Dai2019a](@cite) found # the model of [BallandArp2005](@cite) to better match # measured soil properties across a range of soil types. # To compare the output of the two simulations, we set the organic # matter content and gravel content to zero in the CliMA model. # The remaining soil components (quartz and other minerals) match between # the two. We also run the simulation for relatively wet soil # (water content at 80% of porosity). Under these conditions, # the two formulations for `κ`, though taking different functional forms, # are relatively consistent. # The differences between models are important # for soil with organic material and for soil that is relatively dry. ν_ss_om = FT(0.0) ν_ss_gravel = FT(0.0); # We next calculate a few intermediate quantities needed for the # determination of the thermal conductivity ([BallandArp2005](@cite)). These include # the conductivity of the solid material, the conductivity # of saturated soil, and the conductivity of frozen saturated soil. κ_quartz = FT(7.7) # W/m/K κ_minerals = FT(2.5) # W/m/K κ_om = FT(0.25) # W/m/K κ_liq = FT(0.57) # W/m/K κ_ice = FT(2.29); # W/m/K # The particle density of soil solids in moisture-free soil # is taken as a constant, across soil types, as in [Bonan19a](@cite). # This is a good estimate for organic material free soil. The user is referred to # [BallandArp2005](@cite) for a more general expression. ρp = FT(2700) # kg/m^3 κ_solid = k_solid(ν_ss_om, ν_ss_quartz, κ_quartz, κ_minerals, κ_om) κ_sat_frozen = ksat_frozen(κ_solid, porosity, κ_ice) κ_sat_unfrozen = ksat_unfrozen(κ_solid, porosity, κ_liq); # The thermal conductivity of dry soil is also required, but this is # calculated internally using the expression of [3]. # The volumetric specific heat of dry soil is chosen so as to match Bonan's simulation. # The user could instead compute this using a volumetric fraction weighted average # across soil components. ρc_ds = FT((1 - porosity) * 1.926e06) # J/m^3/K # Finally, we store the soil-specific parameters and functions # in a place where they will be accessible to the model # during integration. soil_param_functions = SoilParamFunctions( FT; porosity = porosity, ν_ss_gravel = ν_ss_gravel, ν_ss_om = ν_ss_om, ν_ss_quartz = ν_ss_quartz, ρc_ds = ρc_ds, ρp = ρp, κ_solid = κ_solid, κ_sat_unfrozen = κ_sat_unfrozen, κ_sat_frozen = κ_sat_frozen, ); # # Initial and Boundary conditions # We will be using a [`PrescribedWaterModel`](@ref # ClimateMachine.Land.PrescribedWaterModel), where the user supplies the augmented # liquid fraction and ice fraction as functions of space and time. Since we are not # implementing phase changes, it makes sense to either have entirely liquid or # frozen water. This tutorial shows liquid water. # Because the two models for thermal conductivity agree well for wetter soil, we'll # choose that here. However, the user could also explore how they differ by choosing # drier soil. # Please note that if the user uses a mix of liquid and frozen water, that they must # ensure that the total water content does not exceed porosity. prescribed_augmented_liquid_fraction = FT(porosity * 0.8) prescribed_volumetric_ice_fraction = FT(0.0); # Choose boundary and initial conditions for heat that will not lead to freezing of water: heat_surface_state = (aux, t) -> eltype(aux)(288.15) heat_bottom_flux = (aux, t) -> eltype(aux)(0.0) T_init = (aux) -> eltype(aux)(275.15); # The boundary value problem in this case, with two spatial derivatives, # requires a boundary condition at the top of the domain and the bottom. # Here we choose to specify a bottom flux condition, and a top state condition. # Our problem is effectively 1D, so we do not need to specify lateral boundary # conditions. bc = LandDomainBC( bottom_bc = LandComponentBC(soil_heat = Neumann(heat_bottom_flux)), surface_bc = LandComponentBC(soil_heat = Dirichlet(heat_surface_state)), ); # We also need to define a function `init_soil!`, which # initializes all of the prognostic variables (here, we # only have `ρe_int`, the volumetric internal energy). # The initialization is based on user-specified # initial conditions. Note that the user provides initial # conditions for heat based on the temperature - `init_soil!` also # converts between `T` and `ρe_int`. function init_soil!(land, state, aux, localgeo, time) param_set = parameter_set(land) ϑ_l, θ_i = get_water_content(land.soil.water, aux, state, time) θ_l = volumetric_liquid_fraction(ϑ_l, land.soil.param_functions.porosity) ρc_ds = land.soil.param_functions.ρc_ds ρc_s = volumetric_heat_capacity(θ_l, θ_i, ρc_ds, param_set) state.soil.heat.ρe_int = volumetric_internal_energy( θ_i, ρc_s, land.soil.heat.initialT(aux), param_set, ) end; # # Create the model structure soil_water_model = PrescribedWaterModel( (aux, t) -> prescribed_augmented_liquid_fraction, (aux, t) -> prescribed_volumetric_ice_fraction, ); soil_heat_model = SoilHeatModel(FT; initialT = T_init); # The full soil model requires a heat model and a water model, as well as the # soil parameter functions: m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model); # The equations being solved in this tutorial have no sources or sinks: sources = (); # Finally, we create the `LandModel`. In more complex land models, this would # include the canopy, carbon state of the soil, etc. m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil!, ); # # Specify the numerical details # These include the resolution, domain boundaries, integration time, # Courant number, and ODE solver. N_poly = 1 nelem_vert = 100 zmax = FT(0) zmin = FT(-1) driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ); # In this tutorial, we determine a timestep based on a Courant number ( # also called a Fourier number in the context of the heat equation). # In short, we can use the parameters of the model (`κ` and `ρc_s`), # along with with the size of # elements of the grid used for discretizing the PDE, to estimate # a natural timescale for heat transfer across a grid cell. # Because we are using an explicit ODE solver, the timestep should # be a fraction of this in order to resolve the dynamics. # This allows us to automate, to a certain extent, choosing a value for # the timestep, even as we switch between soil types. function calculate_dt(dg, model::LandModel, Q, Courant_number, t, direction) Δt = one(eltype(Q)) CFL = DGMethods.courant(diffusive_courant, dg, model, Q, Δt, t, direction) return Courant_number / CFL end function diffusive_courant( m::LandModel, state::Vars, aux::Vars, diffusive::Vars, Δx, Δt, t, direction, ) param_set = parameter_set(m) soil = m.soil ϑ_l, θ_i = get_water_content(soil.water, aux, state, t) θ_l = volumetric_liquid_fraction(ϑ_l, soil.param_functions.porosity) κ_dry = k_dry(param_set, soil.param_functions) S_r = relative_saturation(θ_l, θ_i, soil.param_functions.porosity) kersten = kersten_number(θ_i, S_r, soil.param_functions) κ_sat = saturated_thermal_conductivity( θ_l, θ_i, soil.param_functions.κ_sat_unfrozen, soil.param_functions.κ_sat_frozen, ) κ = thermal_conductivity(κ_dry, kersten, κ_sat) ρc_ds = soil.param_functions.ρc_ds ρc_s = volumetric_heat_capacity(θ_l, θ_i, ρc_ds, param_set) return Δt * κ / (Δx * Δx * ρc_ds) end t0 = FT(0) timeend = FT(60 * 60 * 3) Courant_number = FT(0.5) # much bigger than this leads to domain errors solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config; Courant_number = Courant_number, CFL_direction = VerticalDirection(), ); # # Run the integration ClimateMachine.invoke!(solver_config); state_types = (Prognostic(), Auxiliary()) dons = dict_of_nodal_states(solver_config, state_types; interp = true) # # Plot results and comparison data from [Bonan19a](@cite) z = get_z(solver_config.dg.grid; rm_dupes = true); T = dons["soil.heat.T"]; plot( T, z, label = "ClimateMachine", ylabel = "z (m)", xlabel = "T (K)", title = "Heat transfer in sand", ) plot!(T_init.(z), z, label = "Initial condition") filename = "bonan_heat_data.csv" bonan_dataset = ArtifactWrapper( @__DIR__, isempty(get(ENV, "CI", "")), "bonan_soil_heat", ArtifactFile[ArtifactFile( url = "https://caltech.box.com/shared/static/99vm8q8tlyoulext6c35lnd3355tx6bu.csv", filename = filename, ),], ) bonan_dataset_path = get_data_folder(bonan_dataset) data = joinpath(bonan_dataset_path, filename) ds_bonan = readdlm(data, ',') bonan_T = reverse(ds_bonan[:, 2]) bonan_z = reverse(ds_bonan[:, 1]) bonan_T_continuous = Spline1D(bonan_z, bonan_T) bonan_at_clima_z = bonan_T_continuous.(z) plot!(bonan_at_clima_z, z, label = "Bonan simulation") plot!(legend = :bottomleft) savefig("thermal_conductivity_comparison.png") # ![](thermal_conductivity_comparison.png) # The plot shows that the temperature at the top of the # soil is gradually increasing. This is because the surface # temperature is held fixed at a value larger than # the initial temperature. If we ran this for longer, # we would see that the bottom of the domain would also # increase in temperature because there is no heat # leaving the bottom (due to zero heat flux specified in # the boundary condition). # # References # - [Bonan19a](@cite) # - [Johanson1975](@cite) # - [BallandArp2005](@cite) # - [Dai2019a](@cite) # - [Cosby1984](@cite) ================================================ FILE: tutorials/Land/Soil/PhaseChange/freezing_front.jl ================================================ # # Modeling a freezing front in unsaturated soil # Before reading this tutorial, # we recommend that you look over the coupled energy # and water [tutorial](../Coupled/equilibrium_test.md). # That tutorial showed how to solve the heat equation for soil volumetric # internal energy `ρe_int`, simultaneously # with Richards equation for volumetric liquid water fraction `ϑ_l`, assuming zero # volumetric ice fraction `θ_i` for all time, everywhere in the domain[^a]. # In this example, we add in a source term to the right hand side for both `θ_i` # and `ϑ_l` which models freezing and thawing and conserves water mass during the process. # The equations are # `` # \frac{∂ ρe_{int}}{∂ t} = ∇ ⋅ κ(θ_l, θ_i; ν, ...) ∇T + ∇ ⋅ ρe_{int_{liq}} K (T,θ_l, θ_i; ν, ...) \nabla h( ϑ_l, z; ν, ...) # `` # `` # \frac{ ∂ ϑ_l}{∂ t} = ∇ ⋅ K (T,θ_l, θ_i; ν, ...) ∇h( ϑ_l, z; ν, ...) -\frac{F_T}{ρ_l} # `` # `` # \frac{ ∂ θ_i}{∂ t} = \frac{F_T}{ρ_i} # `` # Here # ``t`` is the time (s), # ``z`` is the location in the vertical (m), # ``ρe_{int}`` is the volumetric internal energy of the soil (J/m^3), # ``T`` is the temperature of the soil (K), # ``κ`` is the thermal conductivity (W/m/K), # ``ρe_{int_{liq}}`` is the volumetric internal energy of liquid water (J/m^3), # ``K`` is the hydraulic conductivity (m/s), # ``h`` is the hydraulic head (m), # ``ϑ_l`` is the augmented volumetric liquid water fraction, # ``θ_i`` is the volumetric ice fraction, # ``ν, ...`` denotes parameters relating to soil type, such as porosity, and # ``F_T`` is the freeze-thaw term. # To begin, we will show how to implement adding in this source term. After the results are obtained, # we will [explain](#Discussion-and-Model-Explanation) how our model parameterizes this effect and # compare the results with some analytic expections. # We solve these equations in an effectively 1-d domain with ``z ∈ [-0.2,0]``, # and with the following boundary and initial conditions: # ``- κ ∇T(t, z = 0) = 28 W/m^2/K (T - 267.15K) ẑ`` # ``- κ ∇T(t, z= -0.2) = 0 ẑ `` # `` T(t = 0, z) = 279.85 K`` # ``- K ∇h(t, z = 0) = 0 ẑ `` # `` -K ∇h(t, z = -0.2) = 0 ẑ`` # `` ϑ_l(t = 0, z) = 0.33 ``. # The problem setup and soil properties are chosen to match the lab experiment of [Mizoguchi1990](@cite), as detailed in [Hansson2004](@cite) and [DallAmico2011](@cite)]. # # Import necessary modules # External (non - CliMA) modules using MPI using OrderedCollections using StaticArrays using Statistics using Test using DelimitedFiles using Plots # CliMA Parameters using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using CLIMAParameters.Planet: ρ_cloud_liq using CLIMAParameters.Planet: ρ_cloud_ice # ClimateMachine modules using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Land.SoilHeatParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.SystemSolvers using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state using ArtifactWrappers # # Preliminary set-up # Get the parameter set, which holds constants used across CliMA models: struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet(); # Initialize and pick a floating point precision: ClimateMachine.init() const FT = Float64; # Load plot helpers: const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath(clima_dir, "docs", "plothelpers.jl")); # # Simulation specific parameters N_poly = 1 nelem_vert = 20 zmax = FT(0) zmin = FT(-0.2) t0 = FT(0) dt = FT(6) timeend = FT(3600 * 50) n_outputs = 50 every_x_simulation_time = ceil(Int, timeend / n_outputs) Δ = abs(zmin - zmax) / FT(nelem_vert); # # Soil properties. # All are given in mks units. ν = FT(0.535) θ_r = FT(0.05) S_s = FT(1e-3) Ksat = FT(3.2e-6) vg_α = 1.11 vg_n = 1.48; ν_ss_quartz = FT(0.7) ν_ss_minerals = FT(0.0) ν_ss_om = FT(0.3) ν_ss_gravel = FT(0.0); κ_quartz = FT(7.7) κ_minerals = FT(2.4) κ_om = FT(0.25) κ_liq = FT(0.57) κ_ice = FT(2.29); κ_solid = k_solid(ν_ss_om, ν_ss_quartz, κ_quartz, κ_minerals, κ_om) κ_sat_frozen = ksat_frozen(κ_solid, ν, κ_ice) κ_sat_unfrozen = ksat_unfrozen(κ_solid, ν, κ_liq); ρp = FT(3200) ρc_ds = FT((1 - ν) * 2.3e6); soil_param_functions = SoilParamFunctions( FT; porosity = ν, ν_ss_gravel = ν_ss_gravel, ν_ss_om = ν_ss_om, ν_ss_quartz = ν_ss_quartz, ρc_ds = ρc_ds, ρp = ρp, κ_solid = κ_solid, κ_sat_unfrozen = κ_sat_unfrozen, κ_sat_frozen = κ_sat_frozen, water = WaterParamFunctions(FT; Ksat = Ksat, S_s = S_s, θ_r = θ_r), ); # # Build the model # Initial and Boundary conditions. The default initial condition for # `θ_i` is zero everywhere, so we don't modify that. Furthermore, since # the equation for `θ_i` does not involve spatial derivatives, we don't need # to supply boundary conditions for it. Note that Neumann fluxes, when chosen, # are specified by giving the magnitude of the normal flux *into* the domain. # In this case, the normal vector at the surface n̂ = ẑ. Internally, we multiply # the flux magnitude by -n̂. zero_flux = (aux, t) -> eltype(aux)(0.0) surface_heat_flux = (aux, t) -> eltype(aux)(-28) * (aux.soil.heat.T - eltype(aux)(273.15 - 6)) T_init = aux -> eltype(aux)(279.85) ϑ_l0 = (aux) -> eltype(aux)(0.33); bc = LandDomainBC( bottom_bc = LandComponentBC( soil_heat = Neumann(zero_flux), soil_water = Neumann(zero_flux), ), surface_bc = LandComponentBC( soil_heat = Neumann(surface_heat_flux), soil_water = Neumann(zero_flux), ), ); # Create the [`SoilWaterModel`](@ref ClimateMachine.Land.SoilWaterModel), # [`SoilHeatModel`](@ref ClimateMachine.Land.SoilHeatModel), # and the [`SoilModel`](@ref ClimateMachine.Land.SoilModel) instances. # Note that we are allowing for the hydraulic conductivity to be affected by # both temperature and ice fraction by choosing the following # [`viscosity_factor`](@ref ClimateMachine.Land.SoilWaterParameterizations.viscosity_factor) # and [`impedance_factor`](@ref ClimateMachine.Land.SoilWaterParameterizations.impedance_factor). # To turn these off - the default - just remove these lines. These factors are explained more # [here](../Water/hydraulic_functions.md). soil_water_model = SoilWaterModel( FT; viscosity_factor = TemperatureDependentViscosity{FT}(), moisture_factor = MoistureDependent{FT}(), impedance_factor = IceImpedance{FT}(Ω = 7.0), hydraulics = vanGenuchten(FT; α = vg_α, n = vg_n), initialϑ_l = ϑ_l0, ) soil_heat_model = SoilHeatModel(FT; initialT = T_init); m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model); # Create the source term instance. Our phase change model requires # knowledge of the vertical spacing, so we pass # that information in via an attribute of the # [`PhaseChange`](@ref ClimateMachine.Land.PhaseChange) structure. freeze_thaw_source = PhaseChange{FT}(Δz = Δ); # Sources are added as elements of a list of sources. Here we just add freezing # and thawing. sources = (freeze_thaw_source,); # Next, we define the required `init_soil!` function, which takes the user # specified functions of space for `T_init` and `ϑ_l0` and initializes the state # variables of volumetric internal energy and augmented liquid fraction. This requires # a conversion from `T` to `ρe_int`. function init_soil!(land, state, aux, localgeo, time) myFT = eltype(state) ϑ_l = myFT(land.soil.water.initialϑ_l(aux)) θ_i = myFT(land.soil.water.initialθ_i(aux)) state.soil.water.ϑ_l = ϑ_l state.soil.water.θ_i = θ_i param_set = land.param_set θ_l = volumetric_liquid_fraction(ϑ_l, land.soil.param_functions.porosity) ρc_ds = land.soil.param_functions.ρc_ds ρc_s = volumetric_heat_capacity(θ_l, θ_i, ρc_ds, param_set) state.soil.heat.ρe_int = volumetric_internal_energy( θ_i, ρc_s, land.soil.heat.initialT(aux), param_set, ) end; # Lastly, package it all up in the `LandModel`: m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil!, ); # # Build the simulation domain, solver, and callbacks driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ); solver_config = ClimateMachine.SolverConfiguration(t0, timeend, driver_config, ode_dt = dt); dg = solver_config.dg Q = solver_config.Q state_types = (Prognostic(), Auxiliary(), GradientFlux()) dons_arr = Dict[dict_of_nodal_states(solver_config, state_types; interp = true)] time_data = FT[0] callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do dons = dict_of_nodal_states(solver_config, state_types; interp = true) push!(dons_arr, dons) push!(time_data, gettime(solver_config.solver)) nothing end; # # Run the simulation, and plot the output ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)); z = get_z(solver_config.dg.grid; rm_dupes = true); output_dir = @__DIR__; mkpath(output_dir); export_plot( z, time_data[[1, 16, 31, 46]] ./ (60 * 60), dons_arr[[1, 16, 31, 46]], ("soil.water.ϑ_l",), joinpath(output_dir, "moisture_plot.png"); xlabel = "ϑ_l", ylabel = "z (m)", time_units = "hrs ", ) # ![](moisture_plot.png) export_plot( z, time_data[[1, 16, 31, 46]] ./ (60 * 60), dons_arr[[1, 16, 31, 46]], ("soil.water.θ_i",), joinpath(output_dir, "ice_plot.png"); xlabel = "θ_i", ylabel = "z (m)", time_units = "hrs ", legend = :bottomright, ) # ![](ice_plot.png) export_plot( z, time_data[[1, 16, 31, 46]] ./ (60 * 60), dons_arr[[1, 16, 31, 46]], ("soil.heat.T",), joinpath(output_dir, "T_plot.png"); xlabel = "T (K)", ylabel = "z (m)", time_units = "hrs ", ) # ![](T_plot.png) # # Comparison to data # This data was obtained by us from the figures of [Hansson2004](@cite), but was originally obtained # by [Mizoguchi1990](@cite). No error bars were reported, and we haven't quantified the error in our # estimation of the data from images. dataset = ArtifactWrapper( @__DIR__, isempty(get(ENV, "CI", "")), "mizoguchi", ArtifactFile[ArtifactFile( url = "https://caltech.box.com/shared/static/3xbo4rlam8u390vmucc498cao6wmqlnd.csv", filename = "mizoguchi_all_data.csv", ),], ); dataset_path = get_data_folder(dataset); data = joinpath(dataset_path, "mizoguchi_all_data.csv") ds = readdlm(data, ',') hours = ds[:, 1][2:end] vwc = ds[:, 2][2:end] ./ 100.0 depth = ds[:, 3][2:end] mask_12h = hours .== 12 mask_24h = hours .== 24 mask_50h = hours .== 50; plot_12h = scatter(vwc[mask_12h], -depth[mask_12h], label = "", color = "purple") plot!( dons_arr[13]["soil.water.θ_i"] .+ dons_arr[13]["soil.water.ϑ_l"], z, label = "", color = "green", ) plot!(title = "12h") plot!(xlim = [0.2, 0.55]) plot!(xticks = [0.2, 0.3, 0.4, 0.5]) plot!(ylabel = "Depth (m)"); plot_24h = scatter(vwc[mask_24h], -depth[mask_24h], label = "Data", color = "purple") plot!( dons_arr[25]["soil.water.θ_i"] .+ dons_arr[25]["soil.water.ϑ_l"], z, label = "Simulation", color = "green", ) plot!(title = "24h") plot!(legend = :bottomright) plot!(xlim = [0.2, 0.55]) plot!(xticks = [0.2, 0.3, 0.4, 0.5]); plot_50h = scatter(vwc[mask_50h], -depth[mask_50h], label = "", color = "purple") plot!( dons_arr[51]["soil.water.θ_i"] .+ dons_arr[51]["soil.water.ϑ_l"], z, label = "", color = "green", ) plot!(title = "50h") plot!(xlim = [0.2, 0.55]) plot!(xticks = [0.2, 0.3, 0.4, 0.5]); plot(plot_12h, plot_24h, plot_50h, layout = (1, 3)) plot!(xlabel = "θ_l+θ_i") savefig("mizoguchi_data_comparison.png") # ![](mizoguchi_data_comparison.png) # # Discussion and Model Explanation # To begin, let's observe that the freeze thaw source term alone conserves water mass, as # it satisfies # `` # ρ_l \partial_tϑ_l + ρ_i \partial_tθ_i = -F_T + F_T = 0 # `` # Next, we describe how we define `F_T`. # The Clausius-Clapeyron (CC) equation defines a pressure-temperature curve along which two # phases can co-exist. It assumes that the phases are at equal temperature and pressures. # For water in soil, however, the liquid water experiences pressure `ρ_l g ψ`, where # `ψ` is the matric potential. A more general form of the CC equation allows for different # pressures in the two phases. Usually the ice pressure is taken to be zero, which is reasonable # for unsaturated freezing soils. In saturated soils, freezing can lead to heaving of the soil which # we do not model. After that assumption is made, we obtain that, below freezing (``T < T_f``) # `` # \frac{dp_l}{ρ_l} = L_f \frac{dT}{T}, # `` # or # `` # p_l = p_{l,0} + L_f ρ_l \frac{T-T_f}{T_f} \mathcal{H}(T_f-T) # `` # where we have assumed that assumed `T` is near the freezing point, and then # performed a Taylor explansion of the logarithm, # and we are ignoring the freezing point depression, which is small (less than one degree) for # non-clay soils. What we have sketched is further explained in [DallAmico2011](@cite) and [KurylykWatanabe2013](@cite). # What this implies is that above the freezing point, the pressure is equal to ``p_{l,0}``, # which is independent of temperature. Once the temperature drops below the freezing point, # the pressure drops. Since prior to freezing, the pressure ``p_{l,0}`` is equal to # `ρ_l g ψ(θ_l)`, water undergoing freezing alone (without flowing) should satisfy ([DallAmico2011](@cite)): # `` # p_{l,0} = ρ_l g ψ(θ_l+ρ_iθ_i/ρ_l) # `` # where `ψ` is the matric potential function of van Genuchten. At each step, we know both # the water and ice contents, as well as the temperature, and can then solve for # `` # θ_{l}^* = (ν-θ_r) ψ^{-1}(p_l/(ρ_l g)) + θ_r. # `` # For freezing, the freeze thaw function `F_T` is equal to # `` # F_T = \frac{1}{τ} ρ_l (θ_l-θ_{l}^*) \mathcal{H}(T_f-T) \mathcal{H}(θ_l-θ_{l}^*) # `` # which brings the `θ_l` to a value which satisfies `p_l = ρ_l g ψ(θ_l)`. # This is why, in our simulation, we see the liquid # water fraction approaches a constant around 0.075 in the frozen region, rather than the residual fraction # of 0.019, or 0. This behavior is observed, for example, in the experiments of [Watanabe2011](@cite). # Although this approach may indicate that we should replace the pressure head appearing in the # diffusive water flux term in Richards equation ([DallAmico2011](@cite)), we do not do so at present. As such, we may not be modeling # the flow of water around the freezing front properly. However, we still observe cryosuction, which # is the flow of water towards the freezing front, from the unfrozen side. As the water freezes, the liquid # water content drops, # setting up a larger gradient in matric potential across the freezing front, which generates upward flow # against gravity. This is evident because the total water content at the top is larger at the end of the # simulation # than it was at `t=0` (when it was 0.33). # This model differs from others (e.g. [Painter2011](@cite), [Hansson2004](@cite), [DallAmico2011](@cite)) in that it requires us to set a timescale for the phase change, `τ`. # In a first-order # phase transition, the temperature is fixed while the necessary latent heat is either lost or gained by the # system. Ignoring # changes in internal energy due to flowing water, we would expect # `` # \partial_t ρe_{int} \approx (ρ_l c_l \partial_t θ_l + ρ_i c_i \partial_t θ_i) (T-T_0) -ρ_i L_f \partial_t θ_i # `` # `` # = [(c_i-c_l) (T-T_0) -L_f]F_T \approx -L_f F_T # `` # or # `` # F_T ∼ \frac{κ|∇²T|}{L_f} ∼\frac{κ}{c̃ Δz²}\frac{c̃ |∂zT| Δz}{L_f} # `` # suggesting # `` # τ ∼ τ_{LTE}\frac{ρ_lL_f (ν-θ_r)}{c̃ |∂zT| Δz} # `` # with # `` # τ_{LTE}= c̃ Δz²/κ # `` # This is the value we use. This seems to work adequately for modeling freezing front propagation and # cryosuction, via comparisons with [Mizoguchi1990](@cite), but we plan to revisit it in the future. For example, # we do not see a strong temperature plateau at the freezing point ([Watanabe2011](@cite)), # which we would expect while the phase change is occuring. Experimentally, this timescale also affects the abruptness of the freezing front, which our simulation softens. # # References # - [Mizoguchi1990](@cite) # - [Hansson2004](@cite) # - [DallAmico2011](@cite) # - [KurylykWatanabe2013](@cite) # - [Watanabe2011](@cite) # - [Painter2011](@cite) # [^a]: # Note that `θ_i` is always treated as a prognostic variable # in the `SoilWaterModel`, but with # zero terms on the RHS unless freezing and thawing is turn on, as demonstrated in this # tutorial. That means that the user could, in principle, set the initial condition to be nonzero # (`θ_i(x, y, z ,t=0) = 0` is the default), which in turn would allow a nonzero `θ_i` # profile to affect things like thermal conductivity, etc, # in a consistent way. However, it would not be enforced that ``θ_l+θ_i \leq ν``, because there would # be no physics linking the liquid and water content to each other, and they are independent # variables in our model. We don't envision this being a common use case. ================================================ FILE: tutorials/Land/Soil/PhaseChange/phase_change_analytic_test.jl ================================================ # # Comparison to Neumann analytic solution # Before reading this tutorial, # we recommend that you look over the coupled energy # and water [tutorial](../Coupled/equilibrium_test.md) # and the freezing front [tutorial](freezing_front.md). # The former shows how to solve the heat equation for soil volumetric # internal energy `ρe_int` simultaneously # with Richards equation for volumetric liquid water fraction `ϑ_l`, assuming zero # volumetric ice fraction `θ_i` for all time, everywhere in the domain[^a]. # The latter shows how to include freezing and thawing, and explains the freeze thaw model employed # by CliMA Land. This tutorial compares a simulated temperature profile # from a freezing front with an analytic solution. # The analytic solution applies to a freezing front propagating under certain assumptions. It assumes # that there is no water movement, which we mimic by setting `K_sat=0`. It also assumes a semi-infinite # domain, which we approximate by making the domain larger than the extent to which the front propagates. # The solution also assumes that all water freezes. Our model does not satisfy that assumption, as discussed # [here](freezing_front.md#Discussion-and-Model-Explanation), but as we will see, the model still matches the # analytic expectation well in the frozen region. # As such, our set of equations is # `` # \frac{∂ ρe_{int}}{∂ t} = ∇ ⋅ κ(θ_l, θ_i; ν, ...) ∇T # `` # `` # \frac{ ∂ ϑ_l}{∂ t} = -\frac{F_T}{ρ_l} # `` # `` # \frac{ ∂ θ_i}{∂ t} = \frac{F_T}{ρ_i} # `` # Here # ``t`` is the time (s), # ``z`` is the location in the vertical (m), # ``ρe_{int}`` is the volumetric internal energy of the soil (J/m^3), # ``T`` is the temperature of the soil (K), # ``κ`` is the thermal conductivity (W/m/K), # ``ϑ_l`` is the augmented volumetric liquid water fraction, # ``θ_i`` is the volumetric ice fraction, # ``ν, ...`` denotes parameters relating to soil type, such as porosity, and # ``F_T`` is the freeze-thaw term. # Our domain is effectively 1-d, with ``z ∈ [-3,0]``, # and with the following boundary and initial conditions: # `` T(t, z=0) = 263.15 K`` # ``- κ ∇T(t, z= -3) = 0 ẑ `` # `` T(t = 0, z) = 275.15 K`` # ``- K ∇h(t, z = 0) = 0 ẑ `` # `` -K ∇h(t, z = -3) = 0 ẑ`` # `` ϑ(t = 0, z) = 0.33 ``. # # Import necessary modules # External (non - CliMA) modules using MPI using OrderedCollections using StaticArrays using Statistics using Test using DelimitedFiles using Plots # CliMA Parameters using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using CLIMAParameters.Planet: ρ_cloud_liq using CLIMAParameters.Planet: ρ_cloud_ice using CLIMAParameters.Planet: LH_f0 # ClimateMachine modules using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Land.SoilHeatParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.SystemSolvers using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state using SpecialFunctions using ArtifactWrappers # # Preliminary set-up # Get the parameter set, which holds constants used across CliMA models: struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet(); # Initialize and pick a floating point precision: ClimateMachine.init() const FT = Float64; # # Simulation specific parameters N_poly = 1 nelem_vert = 40 zmax = FT(0) zmin = FT(-3) t0 = FT(0) dt = FT(50) timeend = FT(3600 * 24 * 20) n_outputs = 540 every_x_simulation_time = ceil(Int, timeend / n_outputs) Δ = abs(zmin - zmax) / FT(nelem_vert); # # Soil properties # All units are mks. porosity = FT(0.535) vg_α = 1.11 vg_n = 1.48 Ksat = 0.0 ν_ss_quartz = FT(0.2) ν_ss_minerals = FT(0.6) ν_ss_om = FT(0.2) ν_ss_gravel = FT(0.0); κ_quartz = FT(7.7) κ_minerals = FT(2.5) κ_om = FT(0.25) κ_liq = FT(0.57) κ_ice = FT(2.29) ρp = FT(2700) κ_solid = k_solid(ν_ss_om, ν_ss_quartz, κ_quartz, κ_minerals, κ_om) κ_sat_frozen = ksat_frozen(κ_solid, porosity, κ_ice) κ_sat_unfrozen = ksat_unfrozen(κ_solid, porosity, κ_liq) ρc_ds = FT((1 - porosity) * 2.3e6) soil_param_functions = SoilParamFunctions( FT; porosity = porosity, ν_ss_gravel = ν_ss_gravel, ν_ss_om = ν_ss_om, ν_ss_quartz = ν_ss_quartz, ρc_ds = ρc_ds, ρp = ρp, κ_solid = κ_solid, κ_sat_unfrozen = κ_sat_unfrozen, κ_sat_frozen = κ_sat_frozen, water = WaterParamFunctions(FT; Ksat = Ksat, S_s = 1e-3), ); # # Build the model # Initial and Boundary conditions. The default initial condition for # `θ_i` is zero everywhere, so we don't modify that. Furthermore, since # the equation for `θ_i` does not involve spatial derivatives, we don't need # to supply boundary conditions for it. zero_flux = (aux, t) -> eltype(aux)(0.0) ϑ_l0 = (aux) -> eltype(aux)(0.33) surface_state = (aux, t) -> eltype(aux)(273.15 - 10.0) T_init = aux -> eltype(aux)(275.15) bc = LandDomainBC( bottom_bc = LandComponentBC( soil_heat = Neumann(zero_flux), soil_water = Neumann(zero_flux), ), surface_bc = LandComponentBC( soil_heat = Dirichlet(surface_state), soil_water = Neumann(zero_flux), ), ); # Create the [`SoilWaterModel`](@ref ClimateMachine.Land.SoilWaterModel), # [`SoilHeatModel`](@ref ClimateMachine.Land.SoilHeatModel), # and the [`SoilModel`](@ref ClimateMachine.Land.SoilModel) instances. # Note that we are still specifying a hydraulics model, because the matric potential # and hydraulic conductivity functions are still evaluated (though they don't affect # the outcome). Setting `Ksat =0` is just a # hack for turning off water flow. soil_water_model = SoilWaterModel( FT; hydraulics = vanGenuchten(FT; α = vg_α, n = vg_n), initialϑ_l = ϑ_l0, ); soil_heat_model = SoilHeatModel(FT; initialT = T_init); m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model); # Create the source term instance. Our phase change model requires # knowledge of the vertical spacing, so we pass # that information in via an attribute of the # [`PhaseChange`](@ref ClimateMachine.Land.PhaseChange) structure. freeze_thaw_source = PhaseChange{FT}(Δz = Δ); # Sources are added as elements of a list of sources. Here we just add freezing # and thawing. sources = (freeze_thaw_source,); # Next, we define the required `init_soil!` function, which takes the user # specified functions of space for `T_init` and `ϑ_l0` and initializes the state # variables of volumetric internal energy and augmented liquid fraction. This requires # a conversion from `T` to `ρe_int`. function init_soil!(land, state, aux, localgeo, time) myFT = eltype(state) ϑ_l = myFT(land.soil.water.initialϑ_l(aux)) θ_i = myFT(land.soil.water.initialθ_i(aux)) state.soil.water.ϑ_l = ϑ_l state.soil.water.θ_i = θ_i param_set = land.param_set θ_l = volumetric_liquid_fraction(ϑ_l, land.soil.param_functions.porosity) ρc_ds = land.soil.param_functions.ρc_ds ρc_s = volumetric_heat_capacity(θ_l, θ_i, ρc_ds, param_set) state.soil.heat.ρe_int = volumetric_internal_energy( θ_i, ρc_s, land.soil.heat.initialT(aux), param_set, ) end; # Lastly, package it all up in the `LandModel`: m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil!, ); # # Set up and run the simulation driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ); solver_config = ClimateMachine.SolverConfiguration(t0, timeend, driver_config, ode_dt = dt); state_types = (Prognostic(), Auxiliary(), GradientFlux()) all_data = Dict[dict_of_nodal_states(solver_config, state_types; interp = true)] time_data = FT[0] callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do dons = dict_of_nodal_states(solver_config, state_types; interp = true) push!(all_data, dons) push!(time_data, gettime(solver_config.solver)) nothing end; ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)); z = get_z(solver_config.dg.grid; rm_dupes = true); # # Analytic Solution of Neumann # All details here are taken from [DallAmico2011](@cite) (see also [CarslawJaeger](@cite)), and the reader is referred to that # for further information on the solution. It takes the form of a function # for T(z) on each side of the freezing front interface, which depends on # the thermal properties in that region, and which is also parameterized by # a parameter (ζ), which we show how to solve for below. In computing the # thermal properties, we evaluate the conductivity and heat capacity # assuming that all of the water is either in liquid or frozen form, with total # mass proportional to ``θ_{l,0}ρ_l`` (as we have no water flow). # Compute the thermal conductivity and heat capacity in the frozen region - subscript 1. θ_l0 = FT(0.33) kdry = k_dry(param_set, soil_param_functions) ksat = saturated_thermal_conductivity( FT(0.0), θ_l0 * ρ_cloud_liq(param_set) / ρ_cloud_ice(param_set), κ_sat_unfrozen, κ_sat_frozen, ) kersten = kersten_number( θ_l0 * ρ_cloud_liq(param_set) / ρ_cloud_ice(param_set), θ_l0 * ρ_cloud_liq(param_set) / ρ_cloud_ice(param_set) / porosity, soil_param_functions, ) λ1 = thermal_conductivity(kdry, kersten, ksat) c1 = volumetric_heat_capacity( FT(0.0), θ_l0 * ρ_cloud_liq(param_set) / ρ_cloud_ice(param_set), ρc_ds, param_set, ) d1 = λ1 / c1; # Compute the thermal conductivity and heat capacity in the region # with liquid water - subscript 2. ksat = saturated_thermal_conductivity(θ_l0, FT(0.0), κ_sat_unfrozen, κ_sat_frozen) kersten = kersten_number(FT(0.0), θ_l0 / porosity, soil_param_functions) λ2 = thermal_conductivity(kdry, kersten, ksat) c2 = volumetric_heat_capacity(θ_l0, FT(0.0), ρc_ds, param_set) d2 = λ2 / c2; # Initial T and surface T, in Celsius Ti = FT(2) Ts = FT(-10.0); # The solution requires the root of the implicit equation below # (you'll need to install `Roots` via the package manager). # ``` julia # using Roots # function implicit(ζ) # term1 = exp(-ζ^2) / ζ / erf(ζ) # term2 = # -λ2 * sqrt(d1) * (Ti - 0) / # (λ1 * sqrt(d2) * (0 - Ts) * ζ * erfc(ζ * sqrt(d1 / d2))) * # exp(-d1 / d2 * ζ^2) # term3 = # -LH_f0(param_set) * ρ_cloud_liq(param_set) * θ_l0 * sqrt(π) / c1 / # (0 - Ts) # return (term1 + term2 + term3) # end # find_zero(implicit, (0.25,0.27), Bisection()) # ``` # The root is ζ = 0.26447353269809687; # This function plots the analytic solution # and the simulated result, at the output time index # `k`. function f(k; ζ = ζ) T = all_data[k]["soil.heat.T"][:] .- 273.15 plot( T, z[:], xlim = [-10.5, 3], ylim = [zmin, zmax], xlabel = "T", label = "simulation", ) t = time_data[k] zf = 2.0 * ζ * sqrt(d1 * t) myz = zmin:0.001:0 spatially_varying = (erfc.(abs.(myz) ./ (zf / ζ / (d1 / d2)^0.5))) ./ erfc(ζ * (d1 / d2)^0.5) mask = abs.(myz) .>= zf plot!( Ti .- (Ti - 0.0) .* spatially_varying[mask], myz[mask], label = "analytic", color = "green", ) spatially_varying = ((erf.(abs.(myz) ./ (zf / ζ)))) ./ erf(ζ) mask = abs.(myz) .< zf plot!( Ts .+ (0.0 - Ts) .* spatially_varying[mask], myz[mask], label = "", color = "green", ) end # Now we will plot this and compare to other methods for modeling phase change without water movement. # These solutions were produced by modifying the Supplemental Program 5:3 from [Bonan19a](@cite). # Excess heat solution: eh_dataset = ArtifactWrapper( @__DIR__, isempty(get(ENV, "CI", "")), "eh", ArtifactFile[ArtifactFile( url = "https://caltech.box.com/shared/static/6xs1r98wk7u1b0xjhpkdvt80q4sh16un.csv", filename = "bonan_data.csv", ),], ); eh_dataset_path = get_data_folder(eh_dataset); eh_data = joinpath(eh_dataset_path, "bonan_data.csv") ds = readdlm(eh_data, ','); # Apparent heat capacity solution: ahc_dataset = ArtifactWrapper( @__DIR__, isempty(get(ENV, "CI", "")), "ahc", ArtifactFile[ArtifactFile( url = "https://caltech.box.com/shared/static/d6xciskzl2djwi03xxfo8wu4obrptjmy.csv", filename = "bonan_data_ahc.csv", ),], ); ahc_dataset_path = get_data_folder(ahc_dataset); ahc_data = joinpath(ahc_dataset_path, "bonan_data_ahc.csv") ds2 = readdlm(ahc_data, ','); k = n_outputs f(k) plot!(ds[:, 2], ds[:, 1], label = "Excess Heat") plot!(ds2[:, 2], ds2[:, 1], label = "Apparent heat capacity") plot!(legend = :bottomleft) savefig("analytic_comparison.png") # ![](analytic_comparison.png) # # References # - [DallAmico2011](@cite) # - [CarslawJaeger](@cite) # - [Bonan19a](@cite) # [^a]: # Note that `θ_i` is always treated as a prognostic variable # in the `SoilWaterModel`, but with # zero terms on the RHS unless freezing and thawing is turn on, as demonstrated in this # tutorial. That means that the user could, in principle, set the initial condition to be nonzero # (`θ_i(x, y, z ,t=0) = 0` is the default), which in turn would allow a nonzero `θ_i` # profile to affect things like thermal conductivity, etc, # in a consistent way. However, it would not be enforced that ``θ_l+θ_i \leq ν``, because there would # be no physics linking the liquid and water content to each other, and they are independent # variables in our model. We don't envision this being a common use case. ================================================ FILE: tutorials/Land/Soil/Water/equilibrium_test.jl ================================================ # # Hydrostatic Equilibrium test for Richards Equation # This tutorial shows how to use `ClimateMachine` code to solve # Richards equation in a column of soil. We choose boundary # conditions of zero flux at the top and bottom of the column, # and then run the simulation long enough to see that the system # is approaching hydrostatic equilibrium, where the gradient of the # pressure head is equal and opposite the gradient of the # gravitational head. Note that the [`SoilWaterModel`](@ref # ClimateMachine.Land.SoilWaterModel) includes # a prognostic equation for the volumetric ice fraction, # as ice is a form of water that must be accounted for to1 ensure # water mass conservation. If freezing and thawing are not turned on # (the default), the amount of ice in the model is zero for all space and time # (again by default). # The equations are: # `` # \frac{ ∂ ϑ_l}{∂ t} = ∇ ⋅ K (T, ϑ_l, θ_i; ν, ...) ∇h( ϑ_l, z; ν, ...). # `` # `` # \frac{ ∂ θ_i}{∂ t} = 0 # `` # Here # ``t`` is the time (s), # ``z`` is the location in the vertical (m), # ``T`` is the temperature of the soil (K), # ``K`` is the hydraulic conductivity (m/s), # ``h`` is the hydraulic head (m), # ``ϑ_l`` is the augmented volumetric liquid water fraction, # ``θ_i`` is the volumetric ice fraction, and # ``ν, ...`` denotes parameters relating to soil type, such as porosity. # We will solve this equation in an effectively 1-d domain with ``z ∈ [-10,0]``, # and with the following boundary and initial conditions: # ``- K ∇h(t, z = 0) = 0 ẑ `` # `` -K ∇h(t, z = -10) = 0 ẑ`` # `` ϑ(t = 0, z) = ν-0.001 `` # `` θ_i(t = 0, z) = 0.0. `` # where ``\nu`` is the porosity. # A word about the hydraulic conductivity: please see the # [`hydraulic functions`](./hydraulic_functions.md) tutorial # for options regarding this function. The user can choose to make it depend # on the temperature and the amount of ice in the soil; the default, which we use # here, is that `K` only depends on the liquid moisture content. # Lastly, our formulation of this equation allows for a continuous solution in both # saturated and unsaturated areas, following [Woodward00a](@cite). # # Preliminary setup # - Load external packages using MPI using OrderedCollections using StaticArrays using Statistics # - Load CLIMAParameters and ClimateMachine modules using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.DGMethods: BalanceLaw, LocalGeometry using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux, vars_state # - Define the float type desired (`Float64` or `Float32`) const FT = Float64; # - Initialize ClimateMachine for CPU ClimateMachine.init(; disable_gpu = true); # Load plot helpers: const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath(clima_dir, "docs", "plothelpers.jl")); # # Set up the soil model # We want to solve Richards equation alone, without simultaneously # solving the heat equation. Because of that, we choose a # [`PrescribedTemperatureModel`](@ref # ClimateMachine.Land.PrescribedTemperatureModel). # The user can supply a function for temperature, # depending on time and space; if this option is desired, one could also # choose to model the temperature dependence of viscosity, or to drive # a freeze/thaw cycle, for example. If the user simply wants to model # Richards equation for liquid water, the defaults will allow for that. # Here we ignore the effects of temperature and freezing and thawing, # using the defaults. soil_heat_model = PrescribedTemperatureModel(); # Define the porosity, Ksat, and specific storage values for the soil. Note # that all values must be given in mks units. The soil parameters chosen # roughly correspond to Yolo light clay, and are stored in # [`SoilParamFunctions`](@ref ClimateMachine.Land.SoilParamFunctions). # Hydrology specific parameters are further organized and stored in # [`WaterParamFunctions`](@ref ClimateMachine.Land.WaterParamFunctions), # with the exception of the hydraulic model and hydraulic conductivity model - # see the [`hydraulics`](./hydraulic_functions.md) tutorial. wpf = WaterParamFunctions(FT; Ksat = 0.0443 / (3600 * 100), S_s = 1e-3); soil_param_functions = SoilParamFunctions(FT; porosity = 0.495, water = wpf); # Define the boundary conditions. The user can specify two conditions, # either at the top or at the bottom, and they can either be Dirichlet # (on `ϑ_l`) or Neumann (on `-K∇h`). Note that fluxes are supplied as # scalars, inside the code they are multiplied by ẑ. surface_flux = (aux, t) -> eltype(aux)(0.0) bottom_flux = (aux, t) -> eltype(aux)(0.0); # Our problem is effectively 1D, so we do not need to specify lateral boundary # conditions. bc = LandDomainBC( bottom_bc = LandComponentBC(soil_water = Neumann(bottom_flux)), surface_bc = LandComponentBC(soil_water = Neumann(surface_flux)), ); # Define the initial state function. The default for `θ_i` is zero. ϑ_l0 = (aux) -> eltype(aux)(0.494); # Create the SoilWaterModel. The defaults are a temperature independent # viscosity, and no impedance factor due to ice. We choose to make the # hydraulic conductivity a function of the moisture content `ϑ_l`, # and employ the vanGenuchten hydraulic model with `n` = 2.0. The van # Genuchten parameter `m` is calculated from `n`, and we use the default # value for `α`. soil_water_model = SoilWaterModel( FT; moisture_factor = MoistureDependent{FT}(), hydraulics = vanGenuchten(FT; n = 2.0), initialϑ_l = ϑ_l0, ); # Create the soil model - the coupled soil water and soil heat models. m_soil = SoilModel(soil_param_functions, soil_water_model, soil_heat_model); # We are ignoring sources and sinks here, like runoff or freezing and thawing. sources = (); # Define the function that initializes the prognostic variables. This # in turn calls the functions supplied to `soil_water_model`. function init_soil_water!(land, state, aux, localgeo, time) state.soil.water.ϑ_l = eltype(state)(land.soil.water.initialϑ_l(aux)) state.soil.water.θ_i = eltype(state)(land.soil.water.initialθ_i(aux)) end # Create the land model - in this tutorial, it only includes the soil. m = LandModel( param_set, m_soil; boundary_conditions = bc, source = sources, init_state_prognostic = init_soil_water!, ); # # Specify the numerical configuration and output data. # Specify the polynomial order and vertical resolution. N_poly = 2; nelem_vert = 20; # Specify the domain boundaries. zmax = FT(0); zmin = FT(-10); # Create the driver configuration. driver_config = ClimateMachine.SingleStackConfiguration( "LandModel", N_poly, nelem_vert, zmax, param_set, m; zmin = zmin, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ); # Choose the initial and final times, as well as a timestep. t0 = FT(0) timeend = FT(60 * 60 * 24 * 36) dt = FT(100); # Create the solver configuration. solver_config = ClimateMachine.SolverConfiguration(t0, timeend, driver_config, ode_dt = dt); # Determine how often you want output. const n_outputs = 6; const every_x_simulation_time = ceil(Int, timeend / n_outputs); # Create a place to store this output. state_types = (Prognostic(), Auxiliary(), GradientFlux()) dons_arr = Dict[dict_of_nodal_states(solver_config, state_types; interp = true)] time_data = FT[0] # store time data callback = GenericCallbacks.EveryXSimulationTime(every_x_simulation_time) do dons = dict_of_nodal_states(solver_config, state_types; interp = true) push!(dons_arr, dons) push!(time_data, gettime(solver_config.solver)) nothing end; # # Run the integration ClimateMachine.invoke!(solver_config; user_callbacks = (callback,)); # Get z-coordinate z = get_z(solver_config.dg.grid; rm_dupes = true); # # Create some plots # We'll plot the moisture content vs depth in the soil, as well as # the expected profile of `ϑ_l` in hydrostatic equilibrium. # For `ϑ_l` values above porosity, the soil is # saturated, and the pressure head changes from being equal to the matric # potential to the pressure generated by compression of water and the soil # matrix. The profile can be solved # for analytically by (1) solving for the form that `ϑ_l(z)` must take # in both the saturated and unsaturated zones to satisfy the steady-state # requirement with zero flux boundary conditions, (2) requiring that # at the interface between saturated and unsaturated zones, the water content # equals porosity, and (3) solving for the location of the interface by # requiring that the integrated water content at the end matches that # at the beginning (yielding an interface location of `z≈-0.56m`). output_dir = @__DIR__; t = time_data ./ (60 * 60 * 24); plot( dons_arr[1]["soil.water.ϑ_l"], dons_arr[1]["z"], label = string("t = ", string(t[1]), "days"), xlim = [0.47, 0.501], ylabel = "z", xlabel = "ϑ_l", legend = :bottomleft, title = "Equilibrium test", ); plot!( dons_arr[2]["soil.water.ϑ_l"], dons_arr[2]["z"], label = string("t = ", string(t[2]), "days"), ); plot!( dons_arr[7]["soil.water.ϑ_l"], dons_arr[7]["z"], label = string("t = ", string(t[7]), "days"), ); function expected(z, z_interface) ν = 0.495 S_s = 1e-3 α = 2.6 n = 2.0 m = 0.5 if z < z_interface return -S_s * (z - z_interface) + ν else return ν * (1 + (α * (z - z_interface))^n)^(-m) end end plot!(expected.(dons_arr[1]["z"], -0.56), dons_arr[1]["z"], label = "expected"); plot!( 1e-3 .+ dons_arr[1]["soil.water.ϑ_l"], dons_arr[1]["z"], label = "porosity", ); # save the output. savefig(joinpath(output_dir, "equilibrium_test_ϑ_l_vG.png")) # ![](equilibrium_test_ϑ_l_vG.png) # # References # - [Woodward00a](@cite) ================================================ FILE: tutorials/Land/Soil/Water/hydraulic_functions.jl ================================================ # # Hydraulic functions # This tutorial shows how to specify the hydraulic functions # used in Richard's equation. In particular, # we show how to choose the formalism for matric potential and hydraulic # conductivity, and how to make the hydraulic conductivity account for # the presence of ice as well as the temperature dependence of the # viscosity of liquid water. # # Preliminary setup # External modules using Plots # ClimateMachine modules using ClimateMachine using ClimateMachine.Land using ClimateMachine.Land.SoilWaterParameterizations FT = Float32; # # Specifying a hydraulics model # ClimateMachine's Land model allows the user to pick between two hydraulics models, # that of van Genuchten [vanGenuchten1980](@cite) or that of Brooks and Corey, see [BrooksCorey1964](@cite) or [Corey1977](@cite). The # same model is consistently used for the matric potential # and hydraulic conductivity. # The van Genuchten model requires two free parameters, `α` and `n`. # A third parameter, `m`, is computed from `n`. Of these, only `α` carries # units, of inverse meters. The Brooks and Corey model also uses # two free parameters, `ψ_b`, the magnitude of the matric potential at saturation, # and a constant `M`. `ψ_b` carries units of meters. These parameter sets are stored in # either the [`vanGenuchten`](@ref ClimateMachine.Land.SoilWaterParameterizations.vanGenuchten) or the # [`BrooksCorey`](@ref ClimateMachine.Land.SoilWaterParameterizations.BrooksCorey) # hydraulics model structures (more details below). These parameters are enough to compute the matric potential. # The hydraulic conductivity # requires an additional parameter, `Ksat` (m/s), which is the hydraulic conductivity # in saturated soil. This parameter is # not stored in the hydraulics model, but rather as part of the # [`WaterParamFunctions`](@ref ClimateMachine.Land.WaterParamFunctions), which stores # other parameters needed for the soil water modeling. # Below we show how to create two concrete examples of these hydraulics models, # for sandy loam ([Bonan19a](@cite)). Note that the parameters chosen are a function of soil type, # and that the parameters are converted to type `FT` internally. vg_α = 7.5 # m^-1 vg_n = 1.89 hydraulics = vanGenuchten(FT; α = vg_α, n = vg_n); ψ_sat = 0.218 # m Mval = 0.2041 hydraulics_bc = BrooksCorey(FT; ψb = ψ_sat, m = Mval); # # Matric Potential # The matric potential `ψ` reflects the negative pressure of water # in unsaturated soil. The negative pressure (suction) of water arises # because of adhesive forces between water and soil. # The van Genuchten expression for matric potential is # `` # ψ = -\frac{1}{α} S_l^{-1/(nm)}\times (1-S_l^{1/m})^{1/n}, # `` # and the Brooks and Corey expression is # `` # ψ = -ψ_b S_l^{-M}. # `` # Here `S_l` is the effective saturation of liquid water, `θ_l/ν`, where `ν` is # porosity of the soil. We generally neglect the residual pore space in the CliMA model, # but the user can set the parameter in the # [`WaterParamFunctions`](@ref ClimateMachine.Land.WaterParamFunctions) structure if it is # desired. # In the CliMA code, we use [multiple dispatch](https://en.wikipedia.org/wiki/Multiple_dispatch). # With multiple dispatch, a function can have many # ways of executing (called methods), depending on the *type* of the # variables passed in. A simple example of multiple dispatch is the division operation. # Integer division takes two numbers as input, and returns an integer - ignoring the decimal. # Float division takes two numbers as input, and returns a floating point number, including the decimal. # In Julia, we might write these as: # ```julia # function division(a::Int, b::Int) # return floor(Int, a/b) # end # ``` # ```julia # function division(a::Float64, b::Float64) # return a/b # end # ``` # We can see that `division` is now a function with two methods. # ```julia # julia> division # division (generic function with 2 methods) # ``` # Now, using the same function signature, we can carry out integer # division or floating point division, depending on the types of the # arguments: # ```julia # julia> division(1,2) # 0 # # julia> division(1.0,2.0) # 0.5 # ``` # Here are more pertinent examples: # Based on our choice of `FT = Float32`, # ```julia # julia> typeof(hydraulics) # vanGenuchten{Float32,Float32,Float32,Float32} # ``` # but meanwhile, # ```julia # julia> typeof(hydraulics_bc) # BrooksCorey{Float32,Float32,Float32} # ``` # The function `matric_potential` will execute different methods # depending on if we pass a hydraulics model of type `vanGenuchten` or # `BrooksCorey`. In both cases, it will return the correct value # for `ψ`. # Let's plot the matric potential as a function of the effective saturation `S_l = θ_l/ν`, # which can range from zero to one. S_l = FT.(0.01:0.01:0.99) ψ = matric_potential.(Ref(hydraulics), S_l) ψ_bc = matric_potential.(Ref(hydraulics_bc), S_l) plot( S_l, log10.(-ψ), xlabel = "effective saturation", ylabel = "Log10(|ψ|)", label = "van Genuchten", ) plot!(S_l, log10.(-ψ_bc), label = "Brooks and Corey") savefig("bc_vg_matric_potential.png") # ![](bc_vg_matric_potential.png) # The steep slope in # `ψ` near saturated and completely dry soil are part of the reason # why Richard's equation is such a challenging numerical problem. # # Hydraulic conductivity # The hydraulic conductivity is a more complex function than the matric potential, # as it depends on the temperature of the water, the volumetric ice fraction, and # the volumetric liquid water fraction. It also depends on the hydraulics model # chosen. # We represent the hydraulic conductivity `K` as the product of four factors: # `Ksat`, an impedance factor (which accounts for the effect of ice on conductivity) # a viscosity factor (which accounts for the effect of temperature on the # viscosity of liquid water, and how that in turn affects conductivity) # and a moisture factor (which accounts for the effect of liquid water, and is determined by the hydraulics model). # We are going to calculate `K = Ksat × viscosity factor × impedance factor × moisture factor`. # In the code, each of these factors is # computed by a function with multiple methods, except for `Ksat`. # Like we defined new type # classes for `vanGenuchten` and `BrooksCorey`, we also created new type classes # for the impedance choice, the viscosity choice, and the moisture choice. # The function [`viscosity_factor`](@ref ClimateMachine.Land.SoilWaterParameterizations.viscosity_factor) # takes as arguments the temperature of the soil and the # viscosity model desired, and returns the factor `k_v` by which the hydraulic conductivity is scaled. # One option is to account for this effect: # `` # k_v = e^{γ (T-T_{\rm ref})} # `` # where γ = 0.0264/K and ``T_{\rm ref}`` = 288K. # For example, at the freezing point of water, using the default values # for γ and T_ref, viscosity reduces the conductivity by a third: viscous_effect_model = TemperatureDependentViscosity{FT}(); viscosity_factor(viscous_effect_model, FT(273.15)) # The other option is to ignore this effect: # `` # k_v = 1 # `` # This is the default approach. no_viscous_effect_model = ConstantViscosity{FT}(); viscosity_factor(no_viscous_effect_model, FT(273.15)) # Very similarly, the function # [`impedance_factor`](@ref ClimateMachine.Land.SoilWaterParameterizations.impedance_factor) # takes as arguments the liquid water and ice # volumetric fractions in the soil, as well as the impedance model being used, and returns # the factor `k_i` by which the hydraulic conductivity is scaled. # One option is to account for this effect: # `` # k_i = 10^{-Ω f_i}, # `` # where `Ω = 7` is an empirical factor and # `f_i` is the ratio of the volumetric # ice fraction to total volumetric water fraction ([Lundin1990](@cite)). # For example, with ``\theta_i = \theta_l``, or f_i = 0.5, ice reduces the conductivity by over 1000x. impedance_effect_model = IceImpedance{FT}(); impedance_factor(impedance_effect_model, FT(0.5)) # The other option is to ignore this effect: # `` # k_i = 1 # `` # This is the default approach. no_impedance_effect_model = NoImpedance{FT}(); impedance_factor(no_impedance_effect_model, FT(0.5)) # As for the moisture dependence of hydraulic conductivity, it can also be either # independent of moisture, or dependent on moisture. If it is dependent on moisture, # the specific function evaluated is dictated by the hydraulics model. # The [`moisture_factor`](@ref ClimateMachine.Land.SoilWaterParameterizations.moisture_factor) # for the van Genuchten model is (denoting it as ``k_m``) # `` # k_m = \sqrt{S_l}[1-(1-S_l^{1/m})^m]^2, # `` # for ``S_l < 1``, # and for the Brooks and Corey model it is # `` # k_m = S_l^{2M+3}, # `` # also for ``S_l<1``. When ``S_l\geq 1``, ``k_m = 1`` for each model. # Let's put all these factors together now. Below # we choose additional parameters, consistent with the hydraulics parameters # for sandy loam ([Bonan19a](@cite)), and show how hydraulic conductivity varies with # liquid water content, in the case without ice impedance or temperature effects. Ksat = FT(4.42 / (3600 * 100)) T = FT(0.0) f_i = FT(0.0) K = hydraulic_conductivity.( Ref(Ksat), Ref(impedance_factor(NoImpedance{FT}(), f_i)), Ref(viscosity_factor(ConstantViscosity{FT}(), T)), moisture_factor.(Ref(MoistureDependent{FT}()), Ref(hydraulics), S_l), ); # Let's also compute `K` when we include the effects of temperature # and ice on the hydraulic conductivity. # In the cases where a # [`TemperatureDependentViscosity`](@ref ClimateMachine.Land.SoilWaterParameterizations.TemperatureDependentViscosity) # or # [`IceImpedance`](@ref ClimateMachine.Land.SoilWaterParameterizations.IceImpedance) # type is passed, the correct factors are calculated, # based on the temperature `T` and volumetric ice fraction `θ_i`. T = FT(273.15) S_i = FT(0.1); # = θ_i/ν # The total volumetric water fraction cannot # exceed unity, so the effective liquid water saturation # should have a max of 1-S_i. S_l_accounting_for_ice = FT.(0.01:0.01:(0.99 - S_i)) f_i = S_i ./ (S_l_accounting_for_ice .+ S_i) K_w_factors = hydraulic_conductivity.( Ref(Ksat), impedance_factor.(Ref(NoImpedance{FT}()), f_i), Ref(viscosity_factor(ConstantViscosity{FT}(), T)), moisture_factor.( Ref(MoistureDependent{FT}()), Ref(hydraulics), S_l_accounting_for_ice, ), ); plot( S_l, log10.(K), xlabel = "total effective saturation, (θ_i+θ_l)/ν", ylabel = "Log10(K)", label = "Base case", legend = :bottomright, ) plot!( S_l_accounting_for_ice .+ S_i, log10.(K_w_factors), label = "θ_i = 0.1, T = 273.15", ) savefig("T_ice_K.png") # ![](T_ice_K.png) # If the user is not considering phase transitions # and does not add in Freeze/Thaw source terms, the default is for zero # ice in the model, for all time and space. In this case the ice impedance # factor evaluates to 1 regardless of which type is passed. # # Other features # The user also has the choice of making the conductivity constant by choosing # [`MoistureIndependent`](@ref ClimateMachine.Land.SoilWaterParameterizations.MoistureIndependent) # along with # [`ConstantViscosity`](@ref ClimateMachine.Land.SoilWaterParameterizations.ConstantViscosity) # and # [`NoImpedance`](@ref ClimateMachine.Land.SoilWaterParameterizations.NoImpedance). # This is useful for debugging! no_moisture_dependence = MoistureIndependent{FT}() K_constant = hydraulic_conductivity.( Ref(Ksat), Ref(FT(1.0)), Ref(FT(1.0)), moisture_factor.(Ref(no_moisture_dependence), Ref(hydraulics), S_l), ); # ```julia # julia> unique(K_constant) # 1-element Array{Float32,1}: # 1.2277777f-5 # ``` # Note that choosing this option does not mean the matric potential # is constant, as a hydraulics model is still required and employed. # And, lastly, you might also find it helpful in debugging # to be able to turn off the flow of water by setting `Ksat = 0`. # # References # - [vanGenuchten1980](@cite) # - [BrooksCorey1964](@cite) # - [Corey1977](@cite) # - [Lundin1990](@cite) # - [Bonan19a](@cite) ================================================ FILE: tutorials/Land/Soil/interpolation_helper.jl ================================================ """ function create_interpolation_grid(xbnd, xres, thegrid) Given boundaries of a domain, and a resolution in each direction, create an interpolation grid. This is where the interpolation functions for a set of variables will be evaluated. """ function create_interpolation_grid(xbnd, xres, thegrid) x1g = collect(range(xbnd[1, 1], xbnd[2, 1], step = xres[1])) x2g = collect(range(xbnd[1, 2], xbnd[2, 2], step = xres[2])) x3g = collect(range(xbnd[1, 3], xbnd[2, 3], step = xres[3])) intrp_brck = ClimateMachine.InterpolationBrick(thegrid, xbnd, x1g, x2g, x3g) return intrp_brck end """ function interpolate_variables(objects, brick) Create an interpolation function from data and evaluate that function on the interpolation brick passed. """ function interpolate_variables(objects, brick) i_objects = [] for object in objects nvars = size(object.data, 2) i_object = Array{FT}(undef, brick.Npl, nvars) ClimateMachine.interpolate_local!(brick, object.data, i_object) push!(i_objects, i_object) end return i_objects end ================================================ FILE: tutorials/Numerics/DGMethods/Box1D.jl ================================================ # A box advection test to visualise how different filters work using MPI using OrderedCollections using Plots using StaticArrays using Printf using CLIMAParameters struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() using ClimateMachine using ClimateMachine.Mesh.Topologies using ClimateMachine.Mesh.Grids using ClimateMachine.DGMethods using ClimateMachine.DGMethods.NumericalFluxes using ClimateMachine.BalanceLaws: BalanceLaw, Prognostic, Auxiliary, Gradient, GradientFlux using ClimateMachine.Mesh.Geometry: LocalGeometry using ClimateMachine.Mesh.Filters using ClimateMachine.MPIStateArrays using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using ClimateMachine.VariableTemplates using ClimateMachine.SingleStackUtils import ClimateMachine.BalanceLaws: vars_state, source!, flux_second_order!, flux_first_order!, compute_gradient_argument!, compute_gradient_flux!, update_auxiliary_state!, nodal_init_state_auxiliary!, init_state_prognostic!, boundary_conditions, wavespeed ClimateMachine.init(; disable_gpu = true, log_level = "warn"); const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath(clima_dir, "docs", "plothelpers.jl")); Base.@kwdef struct Box1D{FT, _init_q, _amplitude, _velo} <: BalanceLaw param_set::AbstractParameterSet = param_set init_q::FT = _init_q amplitude::FT = _amplitude velo::FT = _velo end vars_state(::Box1D, ::Auxiliary, FT) = @vars(z_dim::FT); vars_state(::Box1D, ::Prognostic, FT) = @vars(q::FT); vars_state(::Box1D, ::Gradient, FT) = @vars(); vars_state(::Box1D, ::GradientFlux, FT) = @vars(); function wavespeed( ::Box1D{FT, _init_q, _amplitude, _velo}, _..., ) where {FT, _init_q, _amplitude, _velo} return _velo end function nodal_init_state_auxiliary!( m::Box1D, aux::Vars, tmp::Vars, geom::LocalGeometry, ) aux.z_dim = geom.coord[3] end; function init_state_prognostic!( m::Box1D, state::Vars, aux::Vars, localgeo, t::Real, ) if aux.z_dim >= 75 && aux.z_dim <= 125 state.q = m.init_q + m.amplitude else state.q = m.init_q end end; function update_auxiliary_state!( dg::DGModel, m::Box1D, Q::MPIStateArray, t::Real, elems::UnitRange, ) return true end; function source!(m::Box1D, _...) end; @inline function flux_first_order!( m::Box1D, flux::Grad, state::Vars, aux::Vars, t::Real, _..., ) FT = eltype(state) @inbounds begin flux.q = SVector(FT(0), FT(0), state.q * m.velo) end end @inline function flux_second_order!( m::Box1D, flux::Grad, state::Vars, diffusive::Vars, hyperdiffusive::Vars, aux::Vars, t::Real, ) end @inline function flux_second_order!( m::Box1D, flux::Grad, state::Vars, τ, d_h_tot, ) end boundary_condtions(m::Box1D) = () function run_box1D( N_poly::Int, init_q::FT, amplitude::FT, velo::FT, plot_name::String; tmar_filter::Bool = false, cutoff_filter::Bool = false, exp_filter::Bool = false, boyd_filter::Bool = false, cutoff_param::Int = 1, exp_param_1::Int = 0, exp_param_2::Int = 32, boyd_param_1::Int = 0, boyd_param_2::Int = 32, numerical_flux_first_order = CentralNumericalFluxFirstOrder(), ) where {FT} N_poly = N_poly nelem = 128 zmax = FT(350) m = Box1D{FT, init_q, amplitude, velo}() driver_config = ClimateMachine.SingleStackConfiguration( "Box1D", N_poly, nelem, zmax, param_set, m, numerical_flux_first_order = numerical_flux_first_order, boundary = ((0, 0), (0, 0), (0, 0)), periodicity = (true, true, true), ) t0 = FT(0) timeend = FT(450) Δ = min_node_distance(driver_config.grid, VerticalDirection()) max_vel = m.velo dt = Δ / max_vel solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, ode_dt = dt, ) grid = solver_config.dg.grid Q = solver_config.Q aux = solver_config.dg.state_auxiliary output_dir = @__DIR__ mkpath(output_dir) z_label = "z" z = get_z(grid) # store initial condition at ``t=0`` dons_arr = Dict[dict_of_nodal_states(solver_config)] time_data = FT[0] # store time data # output output_freq = floor(Int, timeend / dt) + 10 cb_output = GenericCallbacks.EveryXSimulationSteps(output_freq) do push!(dons_arr, dict_of_nodal_states(solver_config)) push!(time_data, gettime(solver_config.solver)) nothing end filter_freq = 1 # tmar filter cb_tmar = GenericCallbacks.EveryXSimulationSteps(filter_freq) do (init = false) Filters.apply!( solver_config.Q, (:q,), solver_config.dg.grid, TMARFilter(), ) nothing end # cutoff filter cb_cutoff = GenericCallbacks.EveryXSimulationSteps(filter_freq) do (init = false) Filters.apply!( solver_config.Q, (:q,), solver_config.dg.grid, CutoffFilter(solver_config.dg.grid, cutoff_param), ) nothing end # exponential filter cb_exp = GenericCallbacks.EveryXSimulationSteps(filter_freq) do (init = false) Filters.apply!( solver_config.Q, (:q,), solver_config.dg.grid, ExponentialFilter( solver_config.dg.grid, exp_param_1, exp_param_2, ), ) nothing end # Boyd Vandeven filter cb_boyd = GenericCallbacks.EveryXSimulationSteps(filter_freq) do (init = false) Filters.apply!( solver_config.Q, (:q,), solver_config.dg.grid, BoydVandevenFilter( solver_config.dg.grid, boyd_param_1, boyd_param_2, ), ) nothing end user_cb_arr = [cb_output] if tmar_filter push!(user_cb_arr, cb_tmar) end if cutoff_filter push!(user_cb_arr, cb_cutoff) end if exp_filter push!(user_cb_arr, cb_exp) end if boyd_filter push!(user_cb_arr, cb_boyd) end user_cb = (user_cb_arr...,) initial_mass = weightedsum(solver_config.Q) ClimateMachine.invoke!(solver_config; user_callbacks = (user_cb)) final_mass = weightedsum(solver_config.Q) @info @sprintf( """ Mass Conservation: initial mass = %.16e final mass = %.16e difference = %.16e normalized difference = %.16e""", initial_mass, final_mass, final_mass - initial_mass, (final_mass - initial_mass) / initial_mass ) push!(dons_arr, dict_of_nodal_states(solver_config)) push!(time_data, gettime(solver_config.solver)) export_plot( z, time_data, dons_arr, ("q",), joinpath(output_dir, plot_name); xlabel = "x", ylabel = "q", horiz_layout = true, ) end ================================================ FILE: tutorials/Numerics/DGMethods/showcase_filters.jl ================================================ # # Filters # In this tutorial we show the result of applying filters # available in the CliMA codebase in a 1 dimensional box advection setup. # See [Filters API](https://clima.github.io/ClimateMachine.jl/latest/APIs/Numerics/Meshes/Mesh/#Filters-1) for filters interface details. using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath(clima_dir, "tutorials", "Numerics", "DGMethods", "Box1D.jl")) const FT = Float64 output_dir = @__DIR__; mkpath(output_dir); # The unfiltered result of the box advection test for order 4 polynomial with # central flux is run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_no_filter.svg"), ) # ![](box_1D_4_no_filter.svg) # The unfiltered result of the box advection test for order 4 polynomial with # Rusanov flux (aka upwinding for advection) is run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_no_filter_upwind.svg"), numerical_flux_first_order = RusanovNumericalFlux(), ) # ![](box_1D_4_no_filter_upwind.svg) # Below we show results for the same box advection test # but using different filters. # # As seen in the results, when the TMAR filter is used mass is not necessarily # conserved (mass increases are possible). # `TMARFilter()` with central numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_tmar.svg"); tmar_filter = true, ) # ![](box_1D_4_tmar.svg) # Running the TMAR filter with Rusanov the mass conservation since some of the # are reduced, but mass is still not conserved. # `TMARFilter()` with Rusanov numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_tmar_upwind.svg"); tmar_filter = true, numerical_flux_first_order = RusanovNumericalFlux(), ) # ![](box_1D_4_tmar_upwind.svg) # `CutoffFilter(grid, Nc=1)` with central numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_cutoff_1.svg"); cutoff_filter = true, cutoff_param = 1, ) # ![](box_1D_4_cutoff_1.svg) # `CutoffFilter(grid, Nc=3)` with central numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_cutoff_3.svg"); cutoff_filter = true, cutoff_param = 3, ) # ![](box_1D_4_cutoff_3.svg) # `ExponentialFilter(grid, Nc=1, s=4)` with central numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_exp_1_4.svg"); exp_filter = true, exp_param_1 = 1, exp_param_2 = 4, ) # ![](box_1D_4_exp_1_4.svg) # `ExponentialFilter(grid, Nc=1, s=8)` with central numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_exp_1_8.svg"); exp_filter = true, exp_param_1 = 1, exp_param_2 = 8, ) # ![](box_1D_4_exp_1_8.svg) # `ExponentialFilter(grid, Nc=1, s=32)` with central numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_exp_1_32.svg"); exp_filter = true, exp_param_1 = 1, exp_param_2 = 32, ) # ![](box_1D_4_exp_1_32.svg) # `BoydVandevenFilter(grid, Nc=1, s=4)` with central numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_boyd_1_4.svg"); boyd_filter = true, boyd_param_1 = 1, boyd_param_2 = 4, ) # ![](box_1D_4_boyd_1_4.svg) # `BoydVandevenFilter(grid, Nc=1, s=8)` with central numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_boyd_1_8.svg"); boyd_filter = true, boyd_param_1 = 1, boyd_param_2 = 8, ) # ![](box_1D_4_boyd_1_8.svg) # `BoydVandevenFilter(grid, Nc=1, s=32)` with central numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_boyd_1_32.svg"); boyd_filter = true, boyd_param_1 = 1, boyd_param_2 = 32, ) # ![](box_1D_4_boyd_1_32.svg) # `ExponentialFilter(grid, Nc=1, s=8)` and `TMARFilter()` with central numerical # flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_tmar_exp_1_8.svg"); exp_filter = true, tmar_filter = true, exp_param_1 = 1, exp_param_2 = 8, ) # ![](box_1D_4_tmar_exp_1_8.svg) # `BoydVandevenFilter(grid, Nc=1, s=8)` and `TMARFilter()` with central # numerical flux: run_box1D( 4, FT(0.0), FT(1.0), FT(1.0), joinpath(output_dir, "box_1D_4_tmar_boyd_1_8.svg"); boyd_filter = true, tmar_filter = true, boyd_param_1 = 1, boyd_param_2 = 8, ) # ![](box_1D_4_tmar_boyd_1_8.svg) ================================================ FILE: tutorials/Numerics/SystemSolvers/bgmres.jl ================================================ # # Batched Generalized Minimal Residual # In this tutorial we describe the basics of using the batched gmres iterative solver. # At the end you should be able to # 1. Use BatchedGeneralizedMinimalResidual to solve batches of linear systems # 2. Construct a columnwise linear solver with BatchedGeneralizedMinimalResidual # ## What is the Generalized Minimal Residual Method? # The Generalized Minimal Residual Method (GMRES) is a [Krylov subspace](https://en.wikipedia.org/wiki/Krylov_subspace) method for solving linear systems: # ```math # Ax = b # ``` # See the [wikipedia](https://en.wikipedia.org/wiki/Generalized_minimal_residual_method) for more details. # ## What is the Batched Generalized Minimal Residual Method? # As the name suggests it solves a whole bunch of independent GMRES problems # ## Basic Example # First we must load a few things using ClimateMachine using ClimateMachine.SystemSolvers using LinearAlgebra, Random, Plots # Next we define two linear systems that we would like to solve simultaneously. # The matrix for the first linear system is A1 = [ 2.0 -1.0 0.0 -1.0 2.0 -1.0 0.0 -1.0 2.0 ]; # And the right hand side is b1 = ones(typeof(1.0), 3); # The exact solution to the first linear system is x1_exact = [1.5, 2.0, 1.5]; # The matrix for the first linear system is A2 = [ 2.0 -1.0 0.0 0.0 2.0 -1.0 0.0 0.0 2.0 ]; # And the right hand side is b2 = ones(typeof(1.0), 3); # The exact solution to second linear system is x2_exact = [0.875, 0.75, 0.5]; # We now define a function that performs the action of each linear operator independently. function closure_linear_operator(A1, A2) function linear_operator!(x, y) mul!(view(x, :, 1), A1, view(y, :, 1)) mul!(view(x, :, 2), A2, view(y, :, 2)) return nothing end return linear_operator! end; # To understand how this works let us construct an instance # of the linear operator and apply it to a vector linear_operator! = closure_linear_operator(A1, A2); # Let us see what the action of this linear operator is y1 = ones(typeof(1.0), 3); y2 = ones(typeof(1.0), 3) * 2.0; y = [y1 y2]; x = copy(y); linear_operator!(x, y); x # We see that the first column is `A1 * [1 1 1]'` # and the second column is `A2 * [2 2 2]'` # that is, [A1 * y1 A2 * y2] # We are now ready to set up our Batched Generalized Minimal Residual solver # We must now set up the right hand side of the linear system b = [b1 b2]; # as well as the exact solution, (to verify convergence) x_exact = [x1_exact x2_exact]; # !!! warning # For BatchedGeneralizedMinimalResidual the assumption is that each column of b is independent and corresponds to a batch. This will come back later. # We now use an instance of the solver linearsolver = BatchedGeneralizedMinimalResidual(b, size(A1, 1), 2); # As well as an initial guess, denoted by the variable x x1 = ones(typeof(1.0), 3); x2 = ones(typeof(1.0), 3); x = [x1 x2]; # To solve the linear system, we just need to pass to the linearsolve! function iters = linearsolve!(linear_operator!, nothing, linearsolver, x, b) # which is guaranteed to converge in 3 iterations since `length(b1)=length(b2)=3` # We can now check that the solution that we computed, x x # has converged to the exact solution x_exact # Which indeed it has. # ## Advanced Example # We now go through a more advanced application of the Batched Generalized Minimal Residual solver # !!! warning # Iterative methods should be used with preconditioners! # The first thing we do is define a linear operator that mimics # the behavior of a columnwise operator in ClimateMachine function closure_linear_operator!(A, tup) function linear_operator!(y, x) alias_x = reshape(x, tup) alias_y = reshape(y, tup) for i6 in 1:tup[6] for i4 in 1:tup[4] for i2 in 1:tup[2] for i1 in 1:tup[1] tmp = alias_x[i1, i2, :, i4, :, i6][:] tmp2 = A[i1, i2, i4, i6] * tmp alias_y[i1, i2, :, i4, :, i6] .= reshape(tmp2, (tup[3], tup[5])) end end end end end end; # Next we define the array structure of an MPIStateArray # in its true high dimensional form tup = (2, 2, 5, 2, 10, 2); # We define our linear operator as a random matrix Random.seed!(1234); B = [ randn(tup[3] * tup[5], tup[3] * tup[5]) for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ]; columnwise_A = [ B[i1, i2, i4, i6] + 3 * (i1 + i2 + i4 + i6) * I for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ]; # as well as its inverse columnwise_inv_A = [ inv(columnwise_A[i1, i2, i4, i6]) for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ]; columnwise_linear_operator! = closure_linear_operator!(columnwise_A, tup); columnwise_inverse_linear_operator! = closure_linear_operator!(columnwise_inv_A, tup); # The structure of an MPIStateArray is related to its true # higher dimensional form as follows: mpi_tup = (tup[1] * tup[2] * tup[3], tup[4], tup[5] * tup[6]); # We now define the right hand side of our Linear system b = randn(mpi_tup); # As well as the initial guess x = copy(b); x += randn(mpi_tup) * 0.1; # In the previous tutorial we mentioned that it is assumed that # the right hand side is an array whose column vectors all independent linear # systems. But right now the array structure of ``x`` and ``b`` do not follow # this requirement. # To handle this case we must pass in additional arguments that tell the # linear solver how to reconcile these differences. # The first thing that the linear solver must know of is the higher tensor # form of the MPIStateArray, which is just the `tup` from before reshape_tuple_f = tup; # The second thing it needs to know is which indices correspond to a column # and we want to make sure that these are the first set of indices that appear # in the permutation tuple (which can be thought of as enacting # a Tensor Transpose). permute_tuple_f = (5, 3, 4, 6, 1, 2); # It has this format since the 3 and 5 index slots # are the ones associated with traversing a column. And the 4 index # slot corresponds to a state. # We also need to tell our solver which kind of Array struct to use ArrayType = Array; # We are now ready to finally define our linear solver, which uses a number # of keyword arguments gmres = BatchedGeneralizedMinimalResidual( b, tup[3] * tup[5] * tup[4], tup[1] * tup[2] * tup[6]; atol = eps(Float64) * 100, rtol = eps(Float64) * 100, forward_reshape = reshape_tuple_f, forward_permute = permute_tuple_f, ); # `m` is the number of gridpoints along a column. As mentioned previously, # this is `tup[3]*tup[5]*tup[4]`. The `n` term corresponds to the batch size # or the number of columns in this case. `atol` and `rtol` are relative and # absolute tolerances # All the hard work is done, now we just call our linear solver iters = linearsolve!( columnwise_linear_operator!, nothing, gmres, x, b, max_iters = tup[3] * tup[5] * tup[4], ) # We see that it converged in less than `tup[3]*tup[5] = 50` iterations. # Let us verify that it is indeed correct by computing the exact answer # numerically and comparing it against the iterative solver. x_exact = copy(x); columnwise_inverse_linear_operator!(x_exact, b); # Now we can compare with some norms norm(x - x_exact) / norm(x_exact) columnwise_linear_operator!(x_exact, x); norm(x_exact - b) / norm(b) # Which we see are small, given our choice of `atol` and `rtol`. # The struct also keeps a record of its convergence rate # in the residual member (max over all batched solvers). # The can be visualized via plot(log.(gmres.resnorms[:]) / log(10)); plot!(legend = false, xlims = (1, iters), ylims = (-15, 2)); plot!(ylabel = "log10 residual", xlabel = "iterations") ================================================ FILE: tutorials/Numerics/SystemSolvers/cg.jl ================================================ # # Conjugate Gradient # In this tutorial we describe the basics of using the conjugate gradient iterative solvers # At the end you should be able to # 1. Use Conjugate Gradient to solve a linear system # 2. Know when to not use it # 3. Contruct a column-wise linear solver with Conjugate Gradient # ## What is it? # Conjugate Gradient is an iterative method for solving special kinds of linear systems: # ```math # Ax = b # ``` # via iterative methods. # !!! warning # The linear operator need to be symmetric positive definite and the preconditioner must be symmetric. # See the [wikipedia](https://en.wikipedia.org/wiki/Conjugate_gradient_method) for more details. # ## Basic Example # First we must load a few things using ClimateMachine using ClimateMachine.SystemSolvers using LinearAlgebra, Random # Next we define a 3x3 symmetric positive definite linear system. (In the ClimateMachine code a symmetric positive definite system could arise from treating diffusion implicitly.) A = [ 2.0 -1.0 0.0 -1.0 2.0 -1.0 0.0 -1.0 2.0 ]; # We define the matrix `A` here as a global variable for convenience later. # We can see that it is symmetric. We can check that it is positive definite by checking the spectrum eigvals(A) # The linear operators that are passed into the abstract iterative solvers need to be defined as functions that act on vectors. Let us do that with our matrix. We are using function closures for type stability. function closure_linear_operator!(A) function linear_operator!(x, y) mul!(x, A, y) end return linear_operator! end; # We now define our linear operator using the function closure linear_operator! = closure_linear_operator!(A) # We now define our `b` in the linear system b = ones(typeof(1.0), 3); # The exact solution to the system `Ax = b` is x_exact = [1.5, 2.0, 1.5]; # Now we can set up the ConjugateGradient struct linearsolver = ConjugateGradient(b); # and an initial guess for the iterative solver. x = ones(typeof(1.0), 3); # To solve the linear system we just need to pass to the linearsolve! function iters = linearsolve!(linear_operator!, nothing, linearsolver, x, b) # The variable `x` gets overwritten during the linear solve # The norm of the error is norm(x - x_exact) / norm(x_exact) # The relative norm of the residual is norm(A * x - b) / norm(b) # The number of iterations is iters # Conjugate Gradient is guaranteed to converge in 3 iterations with perfect arithmetic in this case. # ## Non-Example # Conjugate Gradient is not guaranteed to converge with nonsymmetric matrices. Consider A = [ 2.0 -1.0 0.0 0.0 2.0 -1.0 0.0 0.0 2.0 ]; # We define the matrix `A` here as a global variable for convenience later. # We can see that it is not symmetric, but it does have all positive eigenvalues eigvals(A) # The linear operators that are passed into the abstract iterative solvers need to be defined as functions that act on vectors. Let us do that with our matrix. We are using function closures for type stability. function closure_linear_operator!(A) function linear_operator!(x, y) mul!(x, A, y) end return linear_operator! end; # We define the linear operator using our function closure linear_operator! = closure_linear_operator!(A) # We now define our `b` in the linear system b = ones(typeof(1.0), 3); # The exact solution to the system `Ax = b` is x_exact = [0.875, 0.75, 0.5]; # Now we can set up the ConjugateGradient struct linearsolver = ConjugateGradient(b, max_iter = 100); # We also passed in the keyword argument "max_iter" for the maximum number of iterations of the iterative solver. By default it is assumed to be the size of the vector. # As before we need to define an initial guess x = ones(typeof(1.0), 3); # To (not) solve the linear system we just need to pass to the linearsolve! function iters = linearsolve!(linear_operator!, nothing, linearsolver, x, b) # The variable `x` gets overwitten during the linear solve # The norm of the error is norm(x - x_exact) / norm(x_exact) # The relative norm of the residual is norm(A * x - b) / norm(b) # The number of iterations is iters # Conjugate Gradient is guaranteed to converge in 3 iterations with perfect arithmetic for a symmetric positive definite matrix. Here we see that the matrix is not symmetric and it didn't converge even after 100 iterations. # ## More Complex Example # Here we show how to construct a column-wise iterative solver similar to what is is in the ClimateMachine code. The following is not for the faint of heart. # We must first define a linear operator that acts like one in the ClimateMachine function closure_linear_operator!(A, tup) function linear_operator!(y, x) alias_x = reshape(x, tup) alias_y = reshape(y, tup) for i6 in 1:tup[6] for i4 in 1:tup[4] for i2 in 1:tup[2] for i1 in 1:tup[1] tmp = alias_x[i1, i2, :, i4, :, i6][:] tmp2 = A[i1, i2, i4, i6] * tmp alias_y[i1, i2, :, i4, :, i6] .= reshape(tmp2, (tup[3], tup[5])) end end end end end end; # Now that we have this function, we can define a linear system that we will solve columnwise # First we define the structure of our array as `tup` in a manner that is similar to a stacked brick topology tup = (3, 4, 7, 2, 20, 2); # where # 1. tup[1] is the number of Gauss–Lobatto points in the x-direction # 2. tup[2] is the number of Gauss–Lobatto points in the y-direction # 3. tup[3] is the number of Gauss–Lobatto points in the z-direction # 4. tup[4] is the number of states # 6. tup[5] is the number of elements in the vertical direction # 7. tup[6] is the number of elements in the other directions # Now we define our linear operator as a random matrix. Random.seed!(1235); B = [ randn(tup[3] * tup[5], tup[3] * tup[5]) for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ]; columnwise_A = [ B[i1, i2, i4, i6] * B[i1, i2, i4, i6]' + 10I for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ]; columnwise_inv_A = [ inv(columnwise_A[i1, i2, i4, i6]) for i1 in 1:tup[1], i2 in 1:tup[2], i4 in 1:tup[4], i6 in 1:tup[6] ]; columnwise_linear_operator! = closure_linear_operator!(columnwise_A, tup); columnwise_inverse_linear_operator! = closure_linear_operator!(columnwise_inv_A, tup); # We define our `x` and `b` with matrix structures similar to an MPIStateArray mpi_tup = (tup[1] * tup[2] * tup[3], tup[4], tup[5] * tup[6]); b = randn(mpi_tup); x = randn(mpi_tup); # Now we solve the linear system columnwise linearsolver = ConjugateGradient( x, max_iter = tup[3] * tup[5], dims = (3, 5), reshape_tuple = tup, ); # The keyword arguments dims is the reduction dimension for the linear solver. In this case dims = (3,5) are the ones associated with a column. The reshape_tuple argument is to convert the shapes of the array `x` in the a form that is more easily usable for reductions in the linear solver # Now we can solve it iters = linearsolve!(columnwise_linear_operator!, nothing, linearsolver, x, b); x_exact = copy(x); columnwise_inverse_linear_operator!(x_exact, b); # The norm of the error is norm(x - x_exact) / norm(x_exact) # The number of iterations is iters # The algorithm converges within `tup[3]*tup[5] = 140` iterations # ## Tips # 1. The convergence criteria should be changed, machine precision is too small and the maximum iterations is often too large # 2. Use a preconditioner if possible # 3. Make sure that the linear system really is symmetric and positive-definite ================================================ FILE: tutorials/Numerics/TimeStepping/explicit_lsrk.jl ================================================ # # [Single-rate Explicit Timestepping](@id Single-rate-Explicit-Timestepping) # In this tutorial, we shall explore the use of explicit Runge-Kutta # methods for the solution of nonautonomous (or non time-invariant) equations. # For our model problem, we shall reuse the rising thermal bubble # tutorial. See its [tutorial page](@ref Rising-Thermal-Bubble-Configuration) # for details on the model and parameters. For the purposes of this tutorial, # we will only run the experiment for a total of 100 simulation seconds. using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath( clima_dir, "tutorials", "Numerics", "TimeStepping", "tutorial_risingbubble_config.jl", )) FT = Float64; # After discretizing the spatial terms in the equation, the semi-discretization # of the governing equations have the form: # `` # \begin{aligned} # \frac{\mathrm{d} \boldsymbol{q}}{ \mathrm{d} t} &= M^{-1}\left(M S + # D^{T} M (F^{adv} + F^{visc}) + \sum_{f=1}^{N_f} L^T M_f(\widehat{F}^{adv} + \widehat{F}^{visc}) # \right) \equiv \mathcal{T}(\boldsymbol{q}). # \end{aligned} # `` # Referencing the canonical form introduced in [Time integration](@ref # Time-integration) we have that in any explicit # formulation ``\mathcal{F}(t, \boldsymbol{q}) \equiv 0`` and, in this particular # forumlation, ``\mathcal{T}(t, \boldsymbol{q}) \equiv \mathcal{G}(t, \boldsymbol{q})``. # The time step restriction for an explicit method must satisfy the stable # [Courant number](https://en.wikipedia.org/wiki/Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition) # for the specific time-integrator and must be selected from the following # constraints # # `` # \Delta t_{\mathrm{explicit}} = min \left( \frac{C \Delta x_i}{u_i + a}, \frac{C \Delta x_i^2}{\nu} \right) # `` # # where ``C`` is the stable Courant number, ``u_i`` denotes the velocity components, # ``a`` the speed of sound, ``\Delta x_i`` the grid spacing (non-uniform in case of # spectral element methods) along the direction ``(x_1,x_2,x_3)``, and ``\nu`` the # kinematic viscosity. The first term on the right is the time step condition # due to the non-dissipative components, while the second term to the dissipation. # For explicit time-integrators, we have to find the minimum time step that # satisfies this condition along all three spatial directions. # # ## Runge-Kutta methods # # A single step of an ``s``-stage Runge-Kutta (RK) method for # solving the resulting ODE problem presented above and can be # expressed as the following: # # ```math # \begin{align} # \boldsymbol{q}^{n+1} = \boldsymbol{q}^n + \Delta t \sum_{i=1}^{s} b_i \mathcal{T}(\boldsymbol{Q}^i), # \end{align} # ``` # # where ``\boldsymbol{\mathcal{T}}(\boldsymbol{Q}^i)`` is the evaluation of the # right-hand side tendency at the stage value ``\boldsymbol{Q}^i``, defined at # each stage of the RK method: # # ```math # \begin{align} # \boldsymbol{Q}^i = \boldsymbol{q}^{n} + # \Delta t \sum_{j=1}^{s} a_{i,j} # \mathcal{T}(\boldsymbol{Q}^j). # \end{align} # ``` # # The first stage is initialized using the field at the previous time step: # ``\boldsymbol{Q}^{1} \leftarrow \boldsymbol{q}^n``. # # In the above expressions, we define # ``\boldsymbol{A} = \lbrace a_{i,j} \rbrace \in \mathbb{R}^{s\times s}``, # ``\boldsymbol{b} = \lbrace b_i \rbrace \in \mathbb{R}^s``, and # ``\boldsymbol{c} = \lbrace c_i \rbrace \in \mathbb{R}^s`` as the # characteristic coefficients of a given RK method. This means we can # associate any RK method with its so-called *Butcher tableau*: # # ```math # \begin{align} # \begin{array}{c|c} # \boldsymbol{c} &\boldsymbol{A}\\ # \hline # & \boldsymbol{b}^T # \end{array} = # \begin{array}{c|c c c c} # c_1 & a_{1,1} & a_{1,2} & \cdots & a_{1,s}\\ # c_2 & a_{2,1} & a_{2,2} & \cdots & a_{2,s}\\ # \vdots & \vdots & \vdots & \ddots & \vdots\\ # c_s & a_{s,1} & a_{s,2} & \cdots & a_{s,s}\\ # \hline # & b_1 & b_2 & \cdots & b_s # \end{array}. # \end{align} # ``` # # The vector ``\boldsymbol{c}`` is often called the *consistency vector*, # and is typically subjected to the row-sum condition: # # `` # c_i = \sum_{j=1}^{s} a_{i,j}, \quad \forall i = 1, \cdots, s. # `` # # This simplifies the order conditions for higher-order RK methods. # For more information on general RK methods, we refer the interested reader # to Ch. 5.2 of [Atkinson2011](@cite). # # ### [Low-storage Runge-Kutta (LSRK) methods](@id lsrk) # `ClimateMachine.jl` contains the following low-storage methods: # - Forward Euler [`LowStorageRungeKutta2N`](@ref ClimateMachine.ODESolvers.LowStorageRungeKutta2N), # - A 5-stage 4th-order Runge-Kutta method of Carpenter and Kennedy [`LSRK54CarpenterKennedy`](@ref ClimateMachine.ODESolvers.LSRK54CarpenterKennedy) # - A 14-stage 4th-order Runge-Kutta method developed by Niegemann, Diehl, and Busch [`LSRK144NiegemannDiehlBusch`](@ref ClimateMachine.ODESolvers.LSRK144NiegemannDiehlBusch). # # To start, let's try using the 5-stage method: `LSRK54CarpenterKennedy`. # As is the case for all explicit methods, we are limited by the fastest # propogating waves described by our governing equations. In our case, # these are the acoustic waves (with approximate wave speed given by the # speed of sound of 343 m/s). # For the rising bubble example used here, we use 4th order polynomials in # a discontinuous Galerkin approximation, with a domain resolution of # 125 meters in each spatial direction. This gives an effective # minimanl nodal distance (distance between LGL nodes) of 86 meters # over the entire mesh. Using the equation for the explcit time-step above, # we can determine the ``\Delta t`` by specifying the desired Courant number # ``C`` (denoted `CFL` in the code below). # In our case, a heuristically determined value of 0.4 is used. timeend = FT(100) ode_solver = ClimateMachine.ExplicitSolverType(solver_method = LSRK54CarpenterKennedy) CFL = FT(0.4) run_simulation(ode_solver, CFL, timeend); # What if we wish to take a larger timestep size? We could # try to increase the target Courant number, say ``C = 1.7``, and # re-run the simulation. But this would break! In fact, in this case the numerical # scheme would fall outside of the stability region and the simulation would crash. # This occurs when the time step _exceeds_ the maximal stable time-step size # of the method. For the 5-stage method, one can typically get away with using # time-step sizes corresponding to a Courant number of ``C \approx 0.4`` but # typically not much larger. In contrast, we can use an LSRK method with # a larger stability region. Let's illustrate this by using the 14-stage method # with a ``C = 1.7`` instead. ode_solver = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ) CFL = FT(1.7) run_simulation(ode_solver, CFL, timeend); # And it successfully completes. Currently, the 14-stage LSRK method # `LSRK144NiegemannDiehlBusch` contains the largest stability region of the # low-storage methods available in `ClimateMachine.jl`. # ### [Strong Stability Preserving Runge--Kutta (SSPRK) methods](@id ssprk) # Just as with the LSRK methods, the SSPRK methods are self-starting, # with ``\boldsymbol{Q}^{1} = \boldsymbol{q}^n``, and stage-values are of the form # ```math # \begin{align} # \boldsymbol{Q}^{i+1} = a_{i,1} \boldsymbol{q}^n # + a_{i,2} \boldsymbol{Q}^{i} # + \Delta t b_i\mathcal{T}(\boldsymbol{Q}^{i}) # \end{align} # ``` # # with the value at the next step being the ``(N+1)``-th stage value # ``\boldsymbol{q}^{n+1} = \boldsymbol{Q}^{(N+1)}``. This allows the updates # to be performed with only three copies of the state vector # (storing ``\boldsymbol{q}^n``, ``\boldsymbol{Q}^{i}`` and ``\mathcal{T}(\boldsymbol{Q}^{i})``). # We illustrate here the use of a [`SSPRK33ShuOsher`](@ref ClimateMachine.ODESolvers.SSPRK33ShuOsher) method. ode_solver = ClimateMachine.ExplicitSolverType(solver_method = SSPRK33ShuOsher) CFL = FT(0.2) run_simulation(ode_solver, CFL, timeend); # ## References # - [Shu1988](@cite) # - [Heun1900](@cite) ================================================ FILE: tutorials/Numerics/TimeStepping/imex_ark.jl ================================================ # # [Implicit-Explicit (IMEX) Additively-Partitioned Runge-Kutta Timestepping](@id Single-rate-IMEXARK-Timestepping) # In this tutorial, we shall explore the use of IMplicit-EXplicit (IMEX) methods # for the solution of nonautonomous (or non time-invariant) equations. # For our model problem, we shall reuse the acoustic wave test in the GCM # configuration. See its [code](@ref Acoustic-Wave-Configuration) # for details on the model and parameters. For the purposes of this tutorial, # we will only run the experiment for a total of 3600 simulation seconds. # Details on this test case can be found in Sec. 4.3 of [Giraldo2013](@cite). using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath( clima_dir, "tutorials", "Numerics", "TimeStepping", "tutorial_acousticwave_config.jl", )); # The acoustic wave test case used in this tutorial represents a global-scale # problem with inertia-gravity waves traveling around the entire planet. # It has a hydrostatically balanced initial state that is given a pressure # perturbation. # This initial pressure perturbation causes an acoustic wave to travel to # the antipode, coalesce, and return to the initial position. The exact solution # of this test case is simple in that the (linear) acoustic theory allows one # to verify the analytic speed of sound based on the thermodynamics variables. # The initial condition is defined as a hydrostatically balanced atmosphere # with background (reference) potential temperature. # To fully demonstrate the advantages of using an IMEX scheme over fully explicit # schemes, we start here by going over a simple, fully explicit scheme. The # reader can refer to the [Single-rate Explicit Timestepping tutorial](@ref Single-rate-Explicit-Timestepping) # for detailes on such schemes. Here we use the the 14-stage LSRK method # [`LSRK144NiegemannDiehlBusch`]((@ref ClimateMachine.ODESolvers.LSRK144NiegemannDiehlBusch)), which contains the largest stability region of # the low-storage methods available in `ClimateMachine.jl`. FT = Float64 timeend = FT(100) ode_solver = ClimateMachine.ExplicitSolverType( solver_method = LSRK144NiegemannDiehlBusch, ); # In the following example, the timestep calculation is based on the CFL condition # for horizontally-propogating acoustic waves. We use a Courant number ``C = 0.002`` # (denoted by `CFL` in the code bellow) in the horizontal, which corresponds # to a timestep size of approximately ``1`` second. CFL = FT(0.002) cfl_direction = HorizontalDirection() run_acousticwave(ode_solver, CFL, cfl_direction, timeend); # However, as it is imaginable, for real-world climate processes a time step # of 1 second would lead to extemely long time-to-solution simulations. # How can we do better? To be able to take larger time step, we can treat the # most restrictive wave speeds (vertical acoustic) implicitly rather than # explicitly. This motivates the use of an IMplicit-EXplicit (IMEX) methods. # In general, a single step of an ``s``-stage, ``N``-part additive RK method # (`ARK_N`) is defined by its generalized Butcher tableau: # ```math # \begin{align} # \begin{array}{c|c|c|c} # \boldsymbol{c} &\boldsymbol{A}_{1} & \cdots & \boldsymbol{A}_{N}\\ # \hline # & \boldsymbol{b}_1^T & \cdots & \boldsymbol{b}_N^T\\ # \hline # & \widehat{\boldsymbol{b}}_1^T & \cdots & \widehat{\boldsymbol{b}}_N^T # \end{array} = # \begin{array}{c|c c c | c | c c c } # c_1 & a^{[ 1 ]}_{1,1} & \cdots & a^{[ 1 ]}_{1,s} & \cdots # & a^{[ \nu ]}_{1,1} & \cdots & a^{[ \nu ]}_{1,s}\\ # \vdots & \vdots & \ddots & \vdots & \cdots # & \vdots & \ddots & \vdots \\ # c_s & a^{[ 1 ]}_{s,1} & \cdots & a^{[ 1 ]}_{s,s} & \cdots # & a^{[ \nu ]}_{s,1} & \cdots & a^{[ \nu ]}_{s,s}\\ # \hline # & b^{[ 1 ]}_1 & \cdots & b^{[ 1 ]}_s & \cdots # & b^{[ \nu ]}_1 & \cdots & b^{[ \nu ]}_s\\ # \hline # & \widehat{b}^{[ 1 ]}_1 & \cdots & \widehat{b}^{[ 1 ]}_s & # & \widehat{b}^{[ \nu ]}_1 & \cdots & \widehat{b}^{[ \nu ]}_s # \end{array} # \end{align} # ``` # and is given by # `` # \boldsymbol{q}^{n+1} = \boldsymbol{q}^n + \Delta t \left( \underbrace{\sum_{i=1}^{s}}_{\textrm{Stages}} \underbrace{\sum_{\nu=1}^{N}}_{\textrm{Components}} b_i^{[ \nu ]} {\mathcal{T}}^{[ \nu ]}(\boldsymbol{Q}^i)) \right) # `` # where ``s`` denotes the stages and ``N`` the components, and where the stage values are given by: # # `` # \boldsymbol{Q}^i = \boldsymbol{q}^n + \Delta t \sum_{j=1}^{s} \sum_{\nu = 1}^{N} a_{i,j}^{[ \nu ]} # {\mathcal{T}}^{[ \nu]}(\boldsymbol{Q}^j). # `` # # Similar to standard RK methods, the stage vectors are approximations to the state at each stage # of the ARK method. Moreover, the temporal coefficients ``c_i`` satisfy a similar # row-sum condition, holding for all ``\nu = 1, \cdots, N``: # `` # c_i = \sum_{j=1}^{s} a_{i, j}^{[ \nu ]}, \quad \forall \nu = 1, \cdots, N. # `` # # The Butcher coefficients ``\boldsymbol{c}``, ``\boldsymbol{b}_{\nu}``, ``\boldsymbol{A}_{\nu}``, and ``\widehat{\boldsymbol{b}}_{\nu}`` # are constrained by certain accuracy and stability requirements, which are summarized in # [Kennedy2001](@cite). # A common setting is the case ``N = 2``. This gives the typical context for # Implicit-Explicit (IMEX) splitting methods, where the tendency ``{\mathcal{T}}`` # is assumed to have the decomposition: # # `` # \dot{\boldsymbol{q}} = \mathcal{T}(\boldsymbol{q}) \equiv # {\mathcal{T}}_{s}(\boldsymbol{q}) + {\mathcal{T}}_{ns}(\boldsymbol{q}), # `` # where the right-hand side has been split into a "stiff" component ``{\mathcal{T}}_{s}``, # to be treated implicitly, and a non-stiff part ``{\mathcal{T}}_{ns}`` to be treated explicitly. # Referencing the canonical form introduced in [Time integration](@ref # Time-integration) we have that in this particular forumlation # ``\mathcal{T}_{ns}(t, \boldsymbol{q}) \equiv \mathcal{G}(t, \boldsymbol{q})`` and # ``\mathcal{T}_{s}(t, \boldsymbol{q}) \equiv \mathcal{F}(t, \boldsymbol{q})``. # Two different RK methods are applied to ``{\mathcal{T}}_{s}`` and ``{\mathcal{T}}_{ns}`` # separately, which have been specifically designed and coupled. Examples can be found in # [Giraldo2013](@cite). The Butcher Tableau for an `ARK_2` method will have the # form # # ```math # \begin{align} # \begin{array}{c|c|c} # \boldsymbol{c} &\boldsymbol{A}_E &\boldsymbol{A}_I\\ # \hline # & \boldsymbol{b}_E^T & \boldsymbol{b}_I^T \\ # \hline # & \widehat{\boldsymbol{b}}_E^T & \widehat{\boldsymbol{b}}_I^T # \end{array}, # \end{align} # ``` # # with # # `` # \boldsymbol{A}_O = \left\lbrace a_{i, j}^O \right\rbrace, \quad # \boldsymbol{b}_O = \left\lbrace b_{i}^O \right\rbrace, \quad # \widehat{\boldsymbol{b}}_O = \left\lbrace \widehat{b}_{i}^O \right\rbrace, # `` # # where ``O`` denotes the label (either ``E`` for explicit or ``I`` for implicit). # For the acoustic wave example used here, we use 4th order polynomials in # our discontinuous Galerkin approximation, with 6 elements in each horizontal # direction and 4 elements in the vertical direction, on the cubed-sphere. # This gives an effective minimal node-distance (distance between LGL nodes) # of roughly 203000 m. # As in the [previous tutorial](@ref Single-rate-Explicit-Timestepping), # we can determine our ``\Delta t`` by specifying our desired horizontal # Courant number ``C`` (the timestep calculation is based on the CFL condition # for horizontally-propogating acoustic waves). In this very simple test case, # we can use a value of 0.5, which corresponds to a time-step size of # around 257 seconds. But for this particular example, even higher values # might work. timeend = FT(3600) ode_solver = ClimateMachine.IMEXSolverType( solver_method = ARK2GiraldoKellyConstantinescu, ) CFL = FT(0.5) cfl_direction = HorizontalDirection() run_acousticwave(ode_solver, CFL, cfl_direction, timeend); # ## References # - [Giraldo2013](@cite) # - [Kennedy2001](@cite) ================================================ FILE: tutorials/Numerics/TimeStepping/mis.jl ================================================ # # [Multirate Infinitesimal Step (MIS) Timestepping](@id MIS-Timestepping) # In this tutorial, we shall explore the use of Multirate Infinitesimal Step # (MIS) methods for the solution of nonautonomous (or non time-invariant) equations. # For our model problem, we shall reuse the rising thermal bubble # tutorial. See its [tutorial page](@ref Rising-Thermal-Bubble-Configuration) # for details on the model and parameters. For the purposes of this tutorial, # we will only run the experiment for a total of 500 simulation seconds. using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath( clima_dir, "tutorials", "Numerics", "TimeStepping", "tutorial_risingbubble_config.jl", )) FT = Float64; # Referencing the formulation introduced in the previous # [Multirate RK methods tutorial](@ref Multirate-RK-Timestepping), we can # describe Multirate Infinitesimal Step (MIS) methods by # ```math # \begin{align} # v_i (0) # &= q^n + \sum_{j=1}^{i-1} \alpha_{ij} (Q^{(j)} - q^n) \\ # \frac{dv_i}{d\tau} # &= \sum_{j=1}^{i-1} \frac{\gamma_{ij}}{d_i \Delta t} (Q^{(j)} - q^n) # + \sum_{j=1}^i \frac{\beta_{ij}}{d_i} \mathcal{T}_S (Q^{(j)}, t + \Delta t c_i) # + \mathcal{T}_F(v_i, t^n + \Delta t \tilde c_i + \frac{c_i - \tilde c_i}{d_i} \tau), # \quad \tau \in [0, \Delta t d_i] \\ # Q^{(i)} &= v_i(\Delta t d_i), # \end{align} # ``` # # where we have used the the stage values ``Q^{(i)} = v_i(\tau_i)`` as the # solution to the _inner_ ODE problem, ``{\mathcal{T}_{s}}`` # for the slow component, and ``{\mathcal{T}_{f}}` for the fast # one, as in the [Multirate RK methods tutorial](@ref Multirate-RK-Timestepping). # Referencing the canonical form introduced in [Time integration](@ref # Time-integration), both ``{\mathcal{T}_{f}}`` and ``{\mathcal{T}_{s}}`` # could be discretized either explicitly or implicitly, hence, they could # belong to either ``\mathcal{F}(t, \boldsymbol{q})`` or ``\mathcal{G}(t, \boldsymbol{q})`` # term. # # The method is defined in terms of the lower-triangular matrices ``\alpha``, # ``\beta`` and ``\gamma``, with ``d_i = \sum_j \beta_{ij}``, # ``c_i = (I - \alpha - \gamma)^{-1} d`` and ``\tilde c = \alpha c``. # More details can be found in [WenschKnothGalant2009](@cite) and # [KnothWensch2014](@cite). ode_solver = ClimateMachine.MISSolverType(; mis_method = MIS2, fast_method = LSRK144NiegemannDiehlBusch, nsubsteps = (40,), ) timeend = FT(500) CFL = FT(20) run_simulation(ode_solver, CFL, timeend); # The reader can compare the Courant number (denoted by `CFL` in the code snippet) # used in this example, with the adopted in the # [single-rate explicit timestepping tutorial page](@ref Single-rate-Explicit-Timestepping) # in which we use the same scheme as the fast method employed in this case, # and notice that with this MIS method we are able to take a much larger # Courant number. # ## References # - [WenschKnothGalant2009](@cite) # - [KnothWensch2014](@cite) ================================================ FILE: tutorials/Numerics/TimeStepping/multirate_rk.jl ================================================ # # [Multirate Runge-Kutta Timestepping](@id Multirate-RK-Timestepping) # In this tutorial, we shall explore the use of Multirate Runge-Kutta # methods for the solution of nonautonomous (or non time-invariant) equations. # For our model problem, we shall reuse the acoustic wave test in the GCM # configuration. See its [code](@ref Acoustic-Wave-Configuration) # for details on the model and parameters. For the purposes of this tutorial, # we will only run the experiment for a total of 3600 simulation seconds. # Details on this test case can be found in Sec. 4.3 of [Giraldo2013](@cite). using ClimateMachine const clima_dir = dirname(dirname(pathof(ClimateMachine))); include(joinpath( clima_dir, "tutorials", "Numerics", "TimeStepping", "tutorial_acousticwave_config.jl", )) FT = Float64; # The typical context for Multirate splitting methods is given by problems # in which the tendency ``\mathcal{T}`` is assumed to have single parts that # operate on different time rates (such as a slow time scale and a fast time scale). # A general form is given by # # `` # \dot{\boldsymbol{q}} = \mathcal{T}(\boldsymbol{q}) \equiv # {\mathcal{T}}_{f}(\boldsymbol{q}) + {\mathcal{T}}_{s}(\boldsymbol{q}), # `` # # where the right-hand side has been split into a "fast" component ``{\mathcal{T}_{f}}``, # and a "slow" component ``{\mathcal{T}_{s}}``. # Referencing the canonical form introduced in [Time integration](@ref # Time-integration), both ``{\mathcal{T}_{f}}`` and ``{\mathcal{T}_{s}}`` # could be discretized either explicitly or implicitly, hence, they could # belong to either ``\mathcal{F}(t, \boldsymbol{q})`` or ``\mathcal{G}(t, \boldsymbol{q})`` # term. # # For a given time-step size ``\Delta t``, the two-rate method in [Schlegel2009](@cite) # is summarized as the following: # # ```math # \begin{align} # \boldsymbol{Q}_1 &= \boldsymbol{q}(t_n), \\ # \boldsymbol{r}_{i} &= \sum_{j=1}^{i-1}\tilde{a}^O_{ij} {\mathcal{T}_{s}}(\boldsymbol{Q}_{j}), \\ # \boldsymbol{w}_{i,1} &= \boldsymbol{Q}_{i-1},\\ # \boldsymbol{w}_{i,k} &= \boldsymbol{w}_{i,k-1} + \Delta t \tilde{c}_i^O \sum_{j=1}^{k-1}\tilde{a}^I_{k,j} # \left(\frac{1}{\tilde{c}_i^O}\boldsymbol{r}_i + {\mathcal{T}_{f}}(\boldsymbol{w}_{i,j})\right),\\ # & \quad\quad i = 2, \cdots, s^O + 1 \text{ and } k = 2, \cdots, s^I + 1,\nonumber \\ # \boldsymbol{Q}_i &= \boldsymbol{w}_{i,s^I + 1} # \end{align} # ``` # # where the tilde parameters denote increments per RK stage: # # ```math # \begin{align} # \tilde{a}_{ij} &= \begin{cases} # a_{i,j} - a_{i-1, j} & \text{if } i < s + 1 \\ # b_j - a_{s,j} & \text{if } i = s + 1 # \end{cases},\\ # \tilde{c}_{i} &= \begin{cases} # c_{i} - c_{i-1} & \text{if } i < s + 1 \\ # 1 - c_{s} & \text{if } i = s + 1 # \end{cases}, # \end{align} # ``` # # where the coefficients ``a``, ``b``, and ``c`` correspond to the Butcher # tableau for a given RK method. The superscripts ``O`` and ``I`` denote the # *outer* (slow) and *inner* (fast) components of the multirate method # respectively. Thus, tilde coefficients should be associated with the RK # method indicated by the superscripts. In other words, the RK methods # for the slow ``{\mathcal{T}_{s}}`` and fast # ``{\mathcal{T}_{f}}`` components have Butcher tables given by: # # ```math # \begin{align} # \begin{array}{c|c} # \boldsymbol{c}_{O} &\boldsymbol{A}_{O} \\ # \hline # & \boldsymbol{b}_O^T # \end{array}, \quad # \begin{array}{c|c} # \boldsymbol{c}_{I} &\boldsymbol{A}_{I} \\ # \hline # & \boldsymbol{b}_I^T # \end{array}, # \end{align} # ``` # # where ``\boldsymbol{A}_O = \lbrace a_{i,j}^O\rbrace``, ``\boldsymbol{b}_O = \lbrace b_i^O \rbrace``, and # ``c_O = \lbrace c_i^O \rbrace`` (similarly for ``\boldsymbol{A}_I``, ``\boldsymbol{b}_I``, and ``\boldsymbol{c}_I``). # The method described here is for an explicit RK outer method with ``s`` stages. # More details can be found in [Schlegel2012](@cite). # The acoustic wave test case used in this tutorial represents a global-scale # problem with inertia-gravity waves traveling around the entire planet. # It has a hydrostatically balanced initial state that is given a pressure # perturbation. # This initial pressure perturbation causes an acoustic wave to travel to # the antipode, coalesce, and return to the initial position. The exact solution # of this test case is simple in that the (linear) acoustic theory allows one # to verify the analytic speed of sound based on the thermodynamics variables. # The initial condition is defined as a hydrostatically balanced atmosphere # with background (reference) potential temperature. ode_solver = ClimateMachine.MultirateSolverType( splitting_type = ClimateMachine.HEVISplitting(), slow_method = LSRK54CarpenterKennedy, fast_method = ARK2GiraldoKellyConstantinescu, implicit_solver_adjustable = true, timestep_ratio = 100, ) timeend = FT(3600) CFL = FT(5) cfl_direction = HorizontalDirection() run_acousticwave(ode_solver, CFL, cfl_direction, timeend); # The interested reader can explore the combination of different slow and # fast methods for Multirate solvers, consulting the ones available in # `ClimateMachine.jl`, such as the # [`Low-Storage-Runge-Kutta-methods`](@ref ClimateMachine.ODESolvers.LowStorageRungeKutta2N), # [`Strong-Stability-Preserving-RungeKutta-methods`](@ref ClimateMachine.ODESolvers.StrongStabilityPreservingRungeKutta), # and [`Additive-Runge-Kutta-methods`](@ref ClimateMachine.ODESolvers.AdditiveRungeKutta). # ## References # - [Giraldo2013](@cite) # - [Schlegel2009](@cite) # - [Schlegel2012](@cite) ================================================ FILE: tutorials/Numerics/TimeStepping/ts_intro.jl ================================================ # # [Time integration](@id Time-integration) # Time integration methods for the numerical solution of Ordinary Differential # Equations (ODEs), also called timesteppers, can be of different nature and # flavor (e.g., explicit, semi-implicit, single-stage, multi-stage, single-step, # multi-step, single-rate, multi-rate, etc). ClimateMachine supports several # of them. Before showing the different nature of some of these methods, let us # introduce some common notation. # A commonly used notation for Initial Value Problems (IVPs) is: # ```math # \begin{align} # \frac{\mathrm{d} \boldsymbol{q}}{ \mathrm{d} t} &= \mathcal{T}(t, \boldsymbol{q}),\\ # \boldsymbol{q}(t_0) &= \boldsymbol{q_0}, # \end{align} # ``` # where ``\boldsymbol{q}`` is an unknown function (vector in most of our cases) # of time ``t``, which we would like to approximate, and at the initial time ``t_0`` # the corresponding initial value ``\boldsymbol{q}_0`` is given. # The given general formulation, is suitable for single-step explicit schemes. # Generally, the equation can be represented in the following canonical form: # ```math # \begin{align} # \dot {\boldsymbol{q}} + \mathcal{F}(t, \boldsymbol{q}) &= \mathcal{G}(t, \boldsymbol{q}), # \end{align} # ``` # where we have used ``\dot {\boldsymbol{q}} = d \boldsymbol{q} / dt``. # We refer to the term ``\mathcal{G}`` # as the right-hand-side (RHS) or explicit term, and to the spatial terms of # ``\mathcal{F}`` as the left-hand-side (LHS) or implicit term. ================================================ FILE: tutorials/Numerics/TimeStepping/tutorial_acousticwave_config.jl ================================================ # # [Acoustic Wave Configuration](@id Acoustic-Wave-Configuration) # # In this example, we demonstrate the usage of the `ClimateMachine` # [AtmosModel](@ref AtmosModel-docs) machinery to solve the fluid # dynamics of an acoustic wave. using ClimateMachine ClimateMachine.init() using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.Checkpoint using ClimateMachine.ConfigTypes using Thermodynamics.TemperatureProfiles using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates using ClimateMachine.Grids using ClimateMachine.ODESolvers using CLIMAParameters using StaticArrays struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet() Base.@kwdef struct AcousticWaveSetup{FT} domain_height::FT = 10e3 T_ref::FT = 300 α::FT = 3 γ::FT = 100 nv::Int = 1 end function (setup::AcousticWaveSetup)(problem, bl, state, aux, localgeo, t) ## callable to set initial conditions FT = eltype(state) param_set = parameter_set(bl) λ = longitude(bl, aux) φ = latitude(bl, aux) z = altitude(bl, aux) β = min(FT(1), setup.α * acos(cos(φ) * cos(λ))) f = (1 + cos(FT(π) * β)) / 2 g = sin(setup.nv * FT(π) * z / setup.domain_height) Δp = setup.γ * f * g p = aux.ref_state.p + Δp ts = PhaseDry_pT(param_set, p, setup.T_ref) q_pt = PhasePartition(ts) e_pot = gravitational_potential(bl.orientation, aux) e_int = internal_energy(ts) state.ρ = air_density(ts) state.ρu = SVector{3, FT}(0, 0, 0) state.energy.ρe = state.ρ * (e_int + e_pot) return nothing end function run_acousticwave( ode_solver_type, CFL::FT, CFL_direction, timeend::FT, ) where {FT} ## DG polynomial orders N = (4, 4) ## Domain resolution nelem_horz = 6 nelem_vert = 4 resolution = (nelem_horz, nelem_vert) t0 = FT(0) setup = AcousticWaveSetup{FT}() T_profile = IsothermalProfile(param_set, setup.T_ref) ref_state = HydrostaticState(T_profile) turbulence = ConstantDynamicViscosity(FT(0)) physics = AtmosPhysics{FT}( param_set; ref_state = ref_state, turbulence = turbulence, moisture = DryModel(), ) model = AtmosModel{FT}( AtmosGCMConfigType, physics; init_state_prognostic = setup, source = (Gravity(),), ) driver_config = ClimateMachine.AtmosGCMConfiguration( "GCM Driver: Acoustic wave test", N, resolution, setup.domain_height, param_set, setup; model = model, ) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, Courant_number = CFL, init_on_cpu = true, ode_solver_type = ode_solver_type, CFL_direction = CFL_direction, ) ClimateMachine.invoke!(solver_config) end ================================================ FILE: tutorials/Numerics/TimeStepping/tutorial_risingbubble_config.jl ================================================ # # [Rising Thermal Bubble Configuration](@id Rising-Thermal-Bubble-Configuration) # # In this example, we demonstrate the usage of the `ClimateMachine` # [AtmosModel](@ref AtmosModel-docs) machinery to solve the fluid # dynamics of a thermal perturbation in a neutrally stratified background state # defined by its uniform potential temperature. We solve a flow in a box configuration - # this is representative of a large-eddy simulation. Several versions of the problem # setup may be found in literature, but the general idea is to examine the # vertical ascent of a thermal bubble (we can interpret these as simple # representation of convective updrafts). # # ## Description of experiment # 1) Dry Rising Bubble (circular potential temperature perturbation) # 2) Boundaries # Top and Bottom boundaries: # - `Impenetrable(FreeSlip())` - Top and bottom: no momentum flux, no mass flux through # walls. # - `Impermeable()` - non-porous walls, i.e. no diffusive fluxes through # walls. # Lateral boundaries # - Laterally periodic # 3) Domain - 2500m (horizontal) x 2500m (horizontal) x 2500m (vertical) # 4) Resolution - 50m effective resolution # 5) Total simulation time - 1000s # 6) Mesh Aspect Ratio (Effective resolution) 1:1 # 7) Overrides defaults for # - CPU Initialisation # - Time integrator # - Sources # - Smagorinsky Coefficient #md # !!! note #md # This experiment setup assumes that you have installed the #md # `ClimateMachine` according to the instructions on the landing page. #md # We assume the users' familiarity with the conservative form of the #md # equations of motion for a compressible fluid (see the #md # [AtmosModel](@ref AtmosModel-docs) page). #md # #md # The following topics are covered in this example #md # - Package requirements #md # - Defining a `model` subtype for the set of conservation equations #md # - Defining the initial conditions #md # - Applying source terms #md # - Choosing a turbulence model #md # - Adding tracers to the model #md # - Choosing a time-integrator #md # - Choosing diagnostics (output) configurations #md # #md # The following topics are not covered in this example #md # - Defining new boundary conditions #md # - Defining new turbulence models #md # - Building new time-integrators #md # - Adding diagnostic variables (beyond a standard pre-defined list of #md # variables) # ## [Loading code](@id Loading-code-rtb) # Before setting up our experiment, we recognize that we need to import some # pre-defined functions from other packages. Julia allows us to use existing # modules (variable workspaces), or write our own to do so. Complete # documentation for the Julia module system can be found # [here](https://docs.julialang.org/en/v1/manual/modules/#). # We need to use the `ClimateMachine` module! This imports all functions # specific to atmospheric and ocean flow modeling. using ClimateMachine ClimateMachine.init() using ClimateMachine.Atmos using ClimateMachine.Orientations using ClimateMachine.ConfigTypes using ClimateMachine.Diagnostics using ClimateMachine.GenericCallbacks using ClimateMachine.ODESolvers using Thermodynamics.TemperatureProfiles using Thermodynamics using ClimateMachine.TurbulenceClosures using ClimateMachine.VariableTemplates # In ClimateMachine we use `StaticArrays` for our variable arrays. # We also use the `Test` package to help with unit tests and continuous # integration systems to design sensible tests for our experiment to ensure new # / modified blocks of code don't damage the fidelity of the physics. The test # defined within this experiment is not a unit test for a specific # subcomponent, but ensures time-integration of the defined problem conditions # within a reasonable tolerance. Immediately useful macros and functions from # this include `@test` and `@testset` which will allow us to define the testing # parameter sets. using StaticArrays using Test using CLIMAParameters using CLIMAParameters.Atmos.SubgridScale: C_smag using CLIMAParameters.Planet: R_d, cp_d, cv_d, MSLP, grav struct EarthParameterSet <: AbstractEarthParameterSet end const param_set = EarthParameterSet(); # ## [Initial Conditions](@id init-rtb) # This example demonstrates the use of functions defined # in the [`Thermodynamics`](@ref Thermodynamics) module to # generate the appropriate initial state for our problem. #md # !!! note #md # The following variables are assigned in the initial condition #md # - `state.ρ` = Scalar quantity for initial density profile #md # - `state.ρu`= 3-component vector for initial momentum profile #md # - `state.energy.ρe`= Scalar quantity for initial total-energy profile #md # humidity #md # - `state.tracers.ρχ` = Vector of four tracers (here, for demonstration #md # only; we can interpret these as dye injections for visualization #md # purposes) function init_risingbubble!(problem, bl, state, aux, localgeo, t) (x, y, z) = localgeo.coord ## Problem float-type FT = eltype(state) param_set = parameter_set(bl) ## Unpack constant parameters R_gas::FT = R_d(param_set) c_p::FT = cp_d(param_set) c_v::FT = cv_d(param_set) p0::FT = MSLP(param_set) _grav::FT = grav(param_set) γ::FT = c_p / c_v ## Define bubble center and background potential temperature xc::FT = 5000 yc::FT = 1000 zc::FT = 2000 r = sqrt((x - xc)^2 + (z - zc)^2) rc::FT = 2000 θamplitude::FT = 2 ## This is configured in the reference hydrostatic state ref_state = reference_state(bl) θ_ref::FT = ref_state.virtual_temperature_profile.T_surface ## Add the thermal perturbation: Δθ::FT = 0 if r <= rc Δθ = θamplitude * (1.0 - r / rc) end ## Compute perturbed thermodynamic state: θ = θ_ref + Δθ ## potential temperature π_exner = FT(1) - _grav / (c_p * θ) * z ## exner pressure ρ = p0 / (R_gas * θ) * (π_exner)^(c_v / R_gas) ## density T = θ * π_exner e_int = internal_energy(param_set, T) ts = PhaseDry(param_set, e_int, ρ) ρu = SVector(FT(0), FT(0), FT(0)) ## momentum ## State (prognostic) variable assignment e_kin = FT(0) ## kinetic energy e_pot = gravitational_potential(bl, aux) ## potential energy ρe_tot = ρ * total_energy(e_kin, e_pot, ts) ## total energy ρχ = FT(0) ## tracer ## We inject tracers at the initial condition at some specified z coordinates if 500 < z <= 550 ρχ += FT(0.05) end ## We want 4 tracers ntracers = 4 ## Define 4 tracers, (arbitrary scaling for this demo problem) ρχ = SVector{ntracers, FT}(ρχ, ρχ / 2, ρχ / 3, ρχ / 4) ## Assign State Variables state.ρ = ρ state.ρu = ρu state.energy.ρe = ρe_tot state.tracers.ρχ = ρχ end # ## [Model Configuration](@id config-helper) # We define a configuration function to assist in prescribing the physical # model. The purpose of this is to populate the # `ClimateMachine.AtmosLESConfiguration` with arguments # appropriate to the problem being considered. function config_risingbubble( ::Type{FT}, N, resolution, xmax, ymax, zmax, ) where {FT} ## Since we want four tracers, we specify this and include the appropriate ## diffusivity scaling coefficients (normally these would be physically ## informed but for this demonstration we use integers corresponding to the ## tracer index identifier) ntracers = 4 δ_χ = SVector{ntracers, FT}(1, 2, 3, 4) ## To assemble `AtmosModel` with no tracers, set `tracers = NoTracers()`. ## The model coefficient for the turbulence closure is defined via the ## [CLIMAParameters ## package](https://CliMA.github.io/CLIMAParameters.jl/latest/) A reference ## state for the linearisation step is also defined. T_surface = FT(300) T_min_ref = FT(0) T_profile = DryAdiabaticProfile{FT}(param_set, T_surface, T_min_ref) ref_state = HydrostaticState(T_profile) ## Here we assemble the `AtmosModel`. _C_smag = FT(C_smag(param_set)) physics = AtmosPhysics{FT}( param_set; ## Parameter set corresponding to earth parameters ref_state = ref_state, ## Reference state turbulence = SmagorinskyLilly(_C_smag), ## Turbulence closure model moisture = DryModel(), ## Exclude moisture variables tracers = NTracers{ntracers, FT}(δ_χ), ## Tracer model with diffusivity coefficients ) model = AtmosModel{FT}( AtmosLESConfigType, ## Flow in a box, requires the AtmosLESConfigType physics; ## Atmos physics init_state_prognostic = init_risingbubble!, ## Apply the initial condition source = (Gravity(),), ## Gravity is the only source term here ) ## Finally, we pass a `Problem Name` string, the mesh information, and the ## model type to the [`AtmosLESConfiguration`] object. config = ClimateMachine.AtmosLESConfiguration( "DryRisingBubble", ## Problem title [String] N, ## Polynomial order [Int] resolution, ## (Δx, Δy, Δz) effective resolution [m] xmax, ## Domain maximum size [m] ymax, ## Domain maximum size [m] zmax, ## Domain maximum size [m] param_set, ## Parameter set. init_risingbubble!, ## Function specifying initial condition model = model, ## Model type ) return config end #md # !!! note #md # `Keywords` are used to specify some arguments (see appropriate source #md # files). # ## Diagnostics # Here we define the diagnostic configuration specific to this problem. function config_diagnostics(driver_config) interval = "10000steps" dgngrp = setup_atmos_default_diagnostics( AtmosLESConfigType(), interval, driver_config.name, ) return ClimateMachine.DiagnosticsConfiguration([dgngrp]) end function run_simulation(ode_solver_type, CFL::FT, timeend::FT) where {FT} ## We need to specify the polynomial order for the DG discretization, ## effective resolution, simulation end-time, the domain bounds, and the ## courant-number for the time-integrator. Note how the time-integration ## components `solver_config` are distinct from the spatial / model ## components in `driver_config`. `init_on_cpu` is a helper keyword argument ## that forces problem initialization on CPU (thereby allowing the use of ## random seeds, spline interpolants and other special functions at the ## initialization step.) N = 4 Δh = FT(125) Δv = FT(125) resolution = (Δh, Δh, Δv) xmax = FT(10000) ymax = FT(500) zmax = FT(10000) t0 = FT(0) ## timeend = FT(100) ## For full simulation set `timeend = 1000` driver_config = config_risingbubble(FT, N, resolution, xmax, ymax, zmax) solver_config = ClimateMachine.SolverConfiguration( t0, timeend, driver_config, init_on_cpu = true, Courant_number = CFL, ode_solver_type = ode_solver_type, ) dgn_config = config_diagnostics(driver_config) @show solver_config.ode_solver_type ## Invoke solver (calls `solve!` function for time-integrator), pass the driver, ## solver and diagnostic config information. result = ClimateMachine.invoke!( solver_config; diagnostics_config = dgn_config, user_callbacks = (), check_euclidean_distance = true, ) return result end ================================================ FILE: tutorials/Ocean/geostrophic_adjustment.jl ================================================ # # Geostrophic adjustment in the hydrostatic Boussinesq equations # # This example simulates a one-dimensional geostrophic adjustement problem # using the `ClimateMachine.Ocean` subcomponent to solve the hydrostatic # Boussinesq equations. # # First we `ClimateMachine.init()`. using ClimateMachine ClimateMachine.init() # # Domain setup # # We formulate our problem in a Cartesian domain 100 km in ``x, y`` and 400 m # deep, and discretized on a grid with 100 fourth-order elements in ``x``, and 1 # fourth-order element in ``y, z``, using ClimateMachine.CartesianDomains domain = RectangularDomain( Ne = (25, 1, 1), Np = 4, x = (0, 1e6), y = (0, 1e6), z = (-400, 0), periodicity = (false, true, false), ) # # Physical parameters # # We use a Coriolis parameter appropriate for mid-latitudes, f = 1e-4 # s⁻¹, Coriolis parameter nothing # hide # and Earth's gravitational acceleration, using CLIMAParameters: AbstractEarthParameterSet, Planet struct EarthParameters <: AbstractEarthParameterSet end g = Planet.grav(EarthParameters()) # m s⁻² # # An unbalanced initial state # # We use a Gaussian, partially-balanced initial condition with parameters U = 0.1 # geostrophic velocity (m s⁻¹) L = domain.L.x / 40 # Gaussian width (m) a = f * U * L / g # amplitude of the geostrophic surface displacement (m) x₀ = domain.L.x / 4 # Gaussian origin (m, recall that x ∈ [0, Lx]) # and functional form Gaussian(x, L) = exp(-x^2 / (2 * L^2)) ## Geostrophic ``y``-velocity: f V = g ∂_x η vᵍ(x, y, z) = -U * (x - x₀) / L * Gaussian(x - x₀, L) ## Geostrophic surface displacement ηᵍ(x, y, z) = a * Gaussian(x - x₀, L) # We double the initial surface displacement so that the surface is half-balanced, # half unbalanced, ηⁱ(x, y, z) = 2 * ηᵍ(x, y, z) # In summary, using ClimateMachine.Ocean.OceanProblems: InitialConditions initial_conditions = InitialConditions(v = vᵍ, η = ηⁱ) @info """ Parameters for the Geostrophic adjustment problem are... Coriolis parameter: $f s⁻¹ Gravitational acceleration: $g m s⁻² Geostrophic velocity: $U m s⁻¹ Width of the initial geostrophic perturbation: $L m Amplitude of the initial surface perturbation: $a m Rossby number (U / f L): $(U / (f * L)) """ # # Boundary conditions, Driver configuration, and Solver configuration # # Next, we configure the `HydrostaticBoussinesqModel` and build the `DriverConfiguration`. # We configure our model in a domain which is bounded in the ``x`` direction. # Both the boundary conditions in ``x`` and in ``z`` require boundary conditions, # which we define: using ClimateMachine.Ocean: Impenetrable, Penetrable, FreeSlip, Insulating, OceanBC solid_surface_boundary_conditions = OceanBC( Impenetrable(FreeSlip()), # Velocity boundary conditions Insulating(), # Temperature boundary conditions ) free_surface_boundary_conditions = OceanBC( Penetrable(FreeSlip()), # Velocity boundary conditions Insulating(), # Temperature boundary conditions ) boundary_conditions = (solid_surface_boundary_conditions, free_surface_boundary_conditions) # We refer to these boundary conditions by their indices in the `boundary_tags` tuple # when specifying the boundary conditions for the `state`; in other words, "1" corresponds to # `solid_surface_boundary_conditions`, while `2` corresponds to `free_surface_boundary_conditions`, boundary_tags = ( (1, 1), # (west, east) boundary conditions (0, 0), # (south, north) boundary conditions (1, 2), # (bottom, top) boundary conditions ) # We're now ready to build the model. using ClimateMachine.Ocean model = Ocean.HydrostaticBoussinesqSuperModel( domain = domain, time_step = 2.0, initial_conditions = initial_conditions, parameters = EarthParameters(), turbulence_closure = (νʰ = 0, κʰ = 0, νᶻ = 0, κᶻ = 0), coriolis = (f₀ = f, β = 0), boundary_tags = boundary_tags, boundary_conditions = boundary_conditions, ); nothing # !!! info "Horizontallly-periodic boundary conditions" # To set horizontally-periodic boundary conditions with # `(solid_surface_boundary_conditions, free_surface_boundary_conditions)` # in the vertical direction use `periodicity = (true, true, false)` in # the `domain` constructor and `boundary_tags = ((0, 0), (0, 0), (1, 2))` # in the constructor for `HydrostaticBoussinesqSuperModel`. # # Animating the solution # # To animate the `ClimateMachine.Ocean` solution, we'll create a callback # that draws a plot and stores it in an array. When the simulation is finished, # we'll string together the plotted frames into an animation. using Printf using Plots using ClimateMachine.GenericCallbacks: EveryXSimulationSteps using ClimateMachine.CartesianFields: assemble using ClimateMachine.Ocean: current_step, current_time u, v, η, θ = model.fields ## Container to hold the plotted frames movie_plots = [] plot_every = 200 # iterations plot_maker = EveryXSimulationSteps(plot_every) do @info "Steps: $(current_step(model)), time: $(current_time(model))" assembled_u = assemble(u.elements) assembled_v = assemble(v.elements) assembled_η = assemble(η.elements) umax = 0.5 * max(maximum(abs, u), maximum(abs, v)) ulim = (-umax, umax) u_plot = plot( assembled_u.x[:, 1, 1], [assembled_u.data[:, 1, 1] assembled_v.data[:, 1, 1]], xlim = domain.x, ylim = (-0.7U, 0.7U), label = ["u" "v"], linewidth = 2, xlabel = "x (m)", ylabel = "Velocities (m s⁻¹)", ) η_plot = plot( assembled_η.x[:, 1, 1], assembled_η.data[:, 1, 1], xlim = domain.x, ylim = (-0.01a, 1.2a), linewidth = 2, label = nothing, xlabel = "x (m)", ylabel = "η (m)", ) push!(movie_plots, (u = u_plot, η = η_plot, time = current_time(model))) return nothing end # # Running the simulation and animating the results # # Finally, we run the simulation, hours = 3600.0 model.solver_configuration.timeend = 2hours result = ClimateMachine.invoke!( model.solver_configuration; user_callbacks = [plot_maker], ) # and animate the results, animation = @animate for p in movie_plots title = @sprintf("Geostrophic adjustment at t = %.2f hours", p.time / hours) frame = plot( p.u, p.η, layout = (2, 1), size = (800, 600), title = [title ""], ) end gif(animation, "geostrophic_adjustment.mp4", fps = 5) # hide ================================================ FILE: tutorials/Ocean/internal_wave.jl ================================================ # # Mode-1 internal wave reflection # # This example simulates the propagation of a mode-1 internal wave # using the `ClimateMachine.Ocean` subcomponent to solve the hydrostatic # Boussinesq equations. # # First we `ClimateMachine.init()`. using ClimateMachine ClimateMachine.init() # # Domain setup # # We formulate a non-dimension problem in a Cartesian domain with oceanic anisotropy, using ClimateMachine.CartesianDomains domain = RectangularDomain( Ne = (32, 1, 4), Np = 4, x = (-128, 128), y = (-128, 128), z = (-1, 0), periodicity = (false, false, false), ) # # Parameters # # We choose parameters appropriate for a hydrostatic internal wave, # Non-dimensional internal wave parameters f = 1 # Coriolis N = 10 # Buoyancy frequency # Note that the validity of the hydrostatic approximation requires # small aspect ratio motions with ``k / m \\ll 1``. # The hydrostatic dispersion relation for inertia-gravity waves then implies that λ = 8 # horizontal wave-length k = 2π / λ # horizontal wavenumber m = π ω² = f^2 + N^2 * k^2 / m^2 # and ω = √(ω²) # # Internal wave initial condition # # We impose modest gravitational acceleration to render time-stepping feasible, using CLIMAParameters: AbstractEarthParameterSet, Planet struct NonDimensionalParameters <: AbstractEarthParameterSet end Planet.grav(::NonDimensionalParameters) = 256.0 # we'd like to use `θ` as a buoyancy variable, which requires # setting the thermal expansion coefficient ``αᵀ`` to g = Planet.grav(NonDimensionalParameters()) αᵀ = 1 / g # We then use the "polarization relations" for vertically-standing, horizontally- # propagating hydrostatic internal waves to initialze two wave packets. # The hydrostatic polarization relations require # # ```math # \begin{gather} # (∂_t^2 + f^2) u = - ∂_x ∂_t p # ∂_t v = - f u # b = ∂_z p # \end{gather} # ``` # # Thus given ``p = \cos (k x - ω t) \cos (m z)``, we find δ = domain.L.x / 15 a(x) = 1e-6 * exp(-x^2 / 2 * δ^2) ũ(x, z, t) = +a(x) * ω * sin(k * x - ω * t) * cos(m * z) ṽ(x, z, t) = -a(x) * f * cos(k * x - ω * t) * cos(m * z) θ̃(x, z, t) = -a(x) * m / k * (ω^2 - f^2) * sin(k * x - ω * t) * sin(m * z) uᵢ(x, y, z) = ũ(x, z, 0) vᵢ(x, y, z) = ṽ(x, z, 0) θᵢ(x, y, z) = θ̃(x, z, 0) + N^2 * z using ClimateMachine.Ocean.OceanProblems: InitialConditions initial_conditions = InitialConditions(u = uᵢ, v = vᵢ, θ = θᵢ) # # Model configuration # # We choose a time-step that resolves the gravity wave phase speed, time_step = 0.005 # close to Δx / c = 0.5 * 1/16, where Δx is nominal resolution # and build a model with a smidgeon of viscosity and diffusion, using ClimateMachine.Ocean: HydrostaticBoussinesqSuperModel model = HydrostaticBoussinesqSuperModel( domain = domain, time_step = time_step, initial_conditions = initial_conditions, parameters = NonDimensionalParameters(), turbulence_closure = (νʰ = 1e-6, νᶻ = 1e-6, κʰ = 1e-6, κᶻ = 1e-6), coriolis = (f₀ = f, β = 0), buoyancy = (αᵀ = αᵀ,), boundary_tags = ((1, 1), (1, 1), (1, 2)), ); nothing # # Fetching data for an animation # # To animate the `ClimateMachine.Ocean` solution, we assemble and # cache the horizontal velocity ``u`` at periodic intervals: using ClimateMachine.Ocean: current_time using ClimateMachine.CartesianFields: assemble using ClimateMachine.GenericCallbacks: EveryXSimulationTime fetched_states = [] fetch_every = 0.2 * 2π / ω # time data_fetcher = EveryXSimulationTime(fetch_every) do push!( fetched_states, ( u = assemble(model.fields.u.elements), θ = assemble(model.fields.θ.elements), η = assemble(model.fields.η.elements), time = current_time(model), ), ) return nothing end # We also build a callback to log the progress of our simulation, using Printf using ClimateMachine.GenericCallbacks: EveryXSimulationSteps using ClimateMachine.Ocean: current_time, current_step, Δt print_every = 100 # iterations wall_clock = [time_ns()] tiny_progress_printer = EveryXSimulationSteps(print_every) do @info(@sprintf( "Steps: %d, time: %.2f, Δt: %.2f, max(|u|): %.2e, elapsed time: %.2f secs", current_step(model), current_time(model), Δt(model), maximum(abs, model.fields.u), 1e-9 * (time_ns() - wall_clock[1]) )) wall_clock[1] = time_ns() end # # Running the simulation and animating the results # # We're ready to launch. model.solver_configuration.timeend = 6 * 2π / ω ## model.solver.dt = 0.05 # make this work @info """ Simulating a hydrostatic Gaussian wave packet with parameters f (Coriolis parameter): $f N (buoyancy frequency): $N Internal wave frequency: $(abs(ω)) Surface wave frequency: $(k * sqrt(g * domain.L.z)) Surface wave group velocity: $(sqrt(g * domain.L.z)) Internal wave group velocity: $(N^2 * k / (ω * m)) Domain width: $(domain.L.x) Domain height: $(domain.L.z) """ result = ClimateMachine.invoke!( model.solver_configuration; user_callbacks = [tiny_progress_printer, data_fetcher], ) # # Animating the result # # We first analye the results to generate plotting limits and contour levels ηmax = maximum([maximum(abs, state.η.data) for state in fetched_states]) umax = maximum([maximum(abs, state.u.data) for state in fetched_states]) ηlim = (-ηmax, ηmax) ulim = (-umax, umax) ulevels = range(ulim[1], ulim[2], length = 31) # and then animate both fields in a loop, using Plots animation = @animate for (i, state) in enumerate(fetched_states) @info "Plotting frame $i of $(length(fetched_states))..." η_plot = plot( state.u.x[:, 1, 1], state.η.data[:, 1, 1], ylim = ηlim, label = nothing, title = @sprintf("η at t = %.2f", state.time), ) u_plot = contourf( state.u.x[:, 1, 1], state.u.z[1, 1, :], clamp.(state.u.data[:, 1, :], ulim[1], ulim[2])'; aspectratio = 64, linewidth = 0, xlim = domain.x, ylim = domain.z, xlabel = "x", ylabel = "z", color = :balance, colorbar = false, clim = ulim, levels = ulevels, title = @sprintf("u at t = %.2f", state.time), ) plot( η_plot, u_plot, layout = Plots.grid(2, 1, heights = (0.3, 0.7)), link = :x, size = (600, 300), ) end gif(animation, "internal_wave.mp4", fps = 5) # hide ================================================ FILE: tutorials/Ocean/shear_instability.jl ================================================ # # Shear instability of a free-surface flow # # This script simulates the instability of a sheared, free-surface # flow using `ClimateMachine.Ocean.HydrostaticBoussinesqSuperModel`. using Printf using Plots using ClimateMachine ClimateMachine.init() ClimateMachine.Settings.array_type = Array using ClimateMachine.Ocean using ClimateMachine.CartesianDomains using ClimateMachine.CartesianFields using ClimateMachine.GenericCallbacks: EveryXSimulationTime using ClimateMachine.Ocean: current_step, Δt, current_time using CLIMAParameters: AbstractEarthParameterSet, Planet # We begin by specifying the domain and mesh, domain = RectangularDomain( Ne = (24, 24, 1), Np = 4, x = (-3π, 3π), y = (-3π, 3π), z = (0, 1), periodicity = (true, false, false), ) # Note that the default solid-wall boundary conditions are free-slip and # insulating on tracers. Next, we specify model parameters and the sheared # initial conditions struct NonDimensionalParameters <: AbstractEarthParameterSet end Planet.grav(::NonDimensionalParameters) = 1 initial_conditions = InitialConditions( u = (x, y, z) -> tanh(y) + 0.1 * cos(x / 3) + 0.01 * randn(), v = (x, y, z) -> 0.1 * sin(y / 3), θ = (x, y, z) -> x, ) model = Ocean.HydrostaticBoussinesqSuperModel( domain = domain, time_step = 0.05, initial_conditions = initial_conditions, parameters = NonDimensionalParameters(), turbulence_closure = (νʰ = 1e-2, κʰ = 1e-2, νᶻ = 1e-2, κᶻ = 1e-2), rusanov_wave_speeds = (cʰ = 0.1, cᶻ = 1), boundary_tags = ((0, 0), (1, 1), (1, 2)), boundary_conditions = ( OceanBC(Impenetrable(FreeSlip()), Insulating()), OceanBC(Penetrable(FreeSlip()), Insulating()), ), ); nothing # We prepare a callback that periodically fetches the horizontal velocity and # tracer concentration for later animation, u, v, η, θ = model.fields fetched_states = [] start_time = time_ns() data_fetcher = EveryXSimulationTime(1) do step = @sprintf("Step: %d", current_step(model)) time = @sprintf("time: %.2f min", current_time(model) / 60) max_u = @sprintf("max|u|: %.6f", maximum(abs, u)) elapsed = (time_ns() - start_time) * 1e-9 wall_time = @sprintf("elapsed wall time: %.2f min", elapsed / 60) @info "$step, $time, $max_u, $wall_time" push!( fetched_states, (u = assemble(u), θ = assemble(θ), time = current_time(model)), ) end # and then run the simulation. model.solver_configuration.timeend = 100.0 result = ClimateMachine.invoke!( model.solver_configuration; user_callbacks = [data_fetcher], ) # Finally, we make an animation of the evolving shear instability. animation = @animate for (i, state) in enumerate(fetched_states) local u local θ @info "Plotting frame $i of $(length(fetched_states))..." kwargs = (xlim = domain.x, ylim = domain.y, linewidth = 0, aspectratio = 1) x, y = state.u.x[:, 1, 1], state.u.y[1, :, 1] u = state.u.data[:, :, 1] θ = state.θ.data[:, :, 1] ulim = 1 θlim = 8 ulevels = range(-ulim, ulim, length = 31) θlevels = range(-θlim, θlim, length = 31) u_plot = contourf( x, y, clamp.(u, -ulim, ulim)'; levels = ulevels, color = :balance, kwargs..., ) θ_plot = contourf( x, y, clamp.(θ, -θlim, θlim)'; levels = θlevels, color = :thermal, kwargs..., ) u_title = @sprintf("u at t = %.2f", state.time) θ_title = @sprintf("θ at t = %.2f", state.time) plot(u_plot, θ_plot, title = [u_title θ_title], size = (600, 250)) end gif(animation, "shear_instability.mp4", fps = 5) ================================================ FILE: tutorials/TutorialList.jl ================================================ # # Tutorials # A suite of concrete examples are provided here as a guidance for constructing experiments. # ## Balance Law # An introduction on components within a balance law is provided. # ## Atmos # Showcase drivers for atmospheric modelling in GCM, single stack, and LES simulations are provided. # - Dry Idealzed GCM: The Held-Suarez configuration is used as a guidance to create a driver that runs a simple GCM simulation. # - Single Element Stack: The Burgers Equations with a passive tracer is used as a guidance to run the simulation on a single element stack. # - LES Experiment: The dry rising bubble case is used as a quigance in creating an LES driver. # - Topography: Experiments of dry flow over prescirbe topography (Agnesi mountain) are provided for: # * Linear Hydrostatic Mountain # * Linear Non-Hydrostatic Mountain # ## Ocean # A showcase for Ocean model is still under construction. # ## Land # Examples are provided in constructing balance law and solving for fundemental equations in land modelling. # - Heat: A tutorial shows how to create a HeatModel to solve the heat equation and visualize the outputs. # - Soil: Examples of solving fundemental equations in the soil model are provided. # * Hydraulic Functions: a tutorial to specify the hydraulic function in the Richard's equation. # * Soil Heat Equations: a tutorial for solving the heat equation in the soil. # * Coupled Water and Heat: a tutorial for solving interactive heat and wateri in the soil model. # ## Numerics (need to be moved to How-to-Guide) # - System Solvers: Two numerical methods to solve the linear system Ax=b are provided. # * Conjugate Gradient # * Batched Generalized Minimal Residual # - DG Methods # * Filters # ## Diagnostics # A diagnostic tool that can # - generate statistics for MPIStateArrays # - validate with reference values # for debugging purposes. ================================================ FILE: tutorials/literate_markdown.jl ================================================ # # How to generate a literate tutorial file # To create an tutorial using ClimateMachine, please use [Literate.jl](https://github.com/fredrikekre/Literate.jl), and consult the [Literate documentation](https://fredrikekre.github.io/Literate.jl/stable/) for questions. # # For now, all literate tutorials are held in the `tutorials` directory # With Literate, all comments turn into markdown text and any Julia code is read and run *as if it is in the Julia REPL*. # As a small caveat to this, you might need to suppress the output of certain commands. # For example, if you define and run the following function function f() return x = [i * i for i in 1:10] end x = f() # The entire list will be output, while f(); # does not (because of the `;`). # # To show plots, you may do something like the following: using Plots plot(x) # Please consider writing the comments in your tutorial as if they are meant to be read as an *article explaining the topic the tutorial is meant to explain.* # If there are any specific nuances to writing Literate documentation for ClimateMachine, please let us know!