Full Code of nicholasjclark/mvgam for AI

master c8617ef1080d cached
478 files
10.3 MB
2.7M tokens
92 symbols
1 requests
Copy disabled (too large) Download .txt
Showing preview only (10,837K chars total). Download the full file to get everything.
Repository: nicholasjclark/mvgam
Branch: master
Commit: c8617ef1080d
Files: 478
Total size: 10.3 MB

Directory structure:
gitextract_s9w_3_or/

├── .Rbuildignore
├── .claude/
│   ├── commands/
│   │   ├── bug-find.md
│   │   ├── draft-pr-body.md
│   │   ├── feature-execute.md
│   │   ├── feature-plan.md
│   │   ├── pr-checklist.md
│   │   ├── reflect.md
│   │   ├── review-changes.md
│   │   └── spec-driven-dev.md
│   └── settings.local.json
├── .github/
│   ├── .gitignore
│   ├── CODE_OF_CONDUCT.md
│   ├── CONTRIBUTING.md
│   ├── FUNDING.yml
│   └── workflows/
│       ├── R-CMD-check-rstan.yaml
│       ├── R-CMD-check.yaml
│       ├── memcheck.yaml
│       ├── pkgdown.yaml
│       └── readme.yaml
├── .gitignore
├── CLAUDE.md
├── CRAN-SUBMISSION
├── DESCRIPTION
├── LICENSE
├── LICENSE.md
├── NAMESPACE
├── NEWS.md
├── R/
│   ├── RcppExports.R
│   ├── add_MACor.R
│   ├── add_base_dgam_lines.R
│   ├── add_binomial.R
│   ├── add_corcar.R
│   ├── add_nmixture.R
│   ├── add_poisson_lines.R
│   ├── add_residuals.R
│   ├── add_stan_data.R
│   ├── add_trend_lines.R
│   ├── add_tweedie_lines.R
│   ├── all_neon_tick_data.R
│   ├── as.data.frame.mvgam.R
│   ├── backends.R
│   ├── compute_edf.R
│   ├── conditional_effects.R
│   ├── cpp_funs.R
│   ├── data_grids.R
│   ├── dynamic.R
│   ├── ensemble.R
│   ├── evaluate_mvgams.R
│   ├── families.R
│   ├── fevd.mvgam.R
│   ├── forecast.mvgam.R
│   ├── formula.mvgam.R
│   ├── get_linear_predictors.R
│   ├── get_monitor_pars.R
│   ├── get_mvgam_priors.R
│   ├── globals.R
│   ├── gp.R
│   ├── gratia_methods.R
│   ├── hindcast.mvgam.R
│   ├── how_to_cite.R
│   ├── index-mvgam.R
│   ├── interpret_mvgam.R
│   ├── irf.mvgam.R
│   ├── jsdgam.R
│   ├── lfo_cv.mvgam.R
│   ├── logLik.mvgam.R
│   ├── loo.mvgam.R
│   ├── lv_correlations.R
│   ├── marginaleffects.mvgam.R
│   ├── mcmc_plot.mvgam.R
│   ├── model.frame.mvgam.R
│   ├── monotonic.R
│   ├── mvgam-class.R
│   ├── mvgam-package.R
│   ├── mvgam.R
│   ├── mvgam_diagnostics.R
│   ├── mvgam_fevd-class.R
│   ├── mvgam_forecast-class.R
│   ├── mvgam_formulae.R
│   ├── mvgam_irf-class.R
│   ├── mvgam_residcor-class.R
│   ├── mvgam_setup.R
│   ├── mvgam_trend_types.R
│   ├── noncent_trend.R
│   ├── onAttach.R
│   ├── ordinate.jsdgam.R
│   ├── pairs.mvgam.R
│   ├── piecewise_trends.R
│   ├── plot.mvgam.R
│   ├── plot_mvgam_factors.R
│   ├── plot_mvgam_fc.R
│   ├── plot_mvgam_pterms.R
│   ├── plot_mvgam_randomeffects.R
│   ├── plot_mvgam_resids.R
│   ├── plot_mvgam_series.R
│   ├── plot_mvgam_smooth.R
│   ├── plot_mvgam_trend.R
│   ├── plot_mvgam_uncertainty.R
│   ├── portal_data.R
│   ├── posterior_epred.mvgam.R
│   ├── ppc.mvgam.R
│   ├── predict.mvgam.R
│   ├── print.mvgam.R
│   ├── residual_cor.R
│   ├── residuals.mvgam.R
│   ├── sanitise_modelfile.R
│   ├── score.mvgam_forecast.R
│   ├── series_to_mvgam.R
│   ├── shared_obs_params.R
│   ├── sim_mvgam.R
│   ├── stability.R
│   ├── stan_utils.R
│   ├── stationarise_VAR.R
│   ├── summary.mvgam.R
│   ├── sysdata.rda
│   ├── tidier_methods.R
│   ├── trends.R
│   ├── update.mvgam.R
│   ├── update_priors.R
│   ├── utils-pipe.R
│   └── validations.R
├── README.Rmd
├── README.md
├── build/
│   └── vignette.rds
├── build_vignettes_CRAN.R
├── cran-comments.md
├── data/
│   ├── all_neon_tick_data.rda
│   └── portal_data.rda
├── doc/
│   ├── data_in_mvgam.R
│   ├── data_in_mvgam.Rmd
│   ├── data_in_mvgam.html
│   ├── forecast_evaluation.R
│   ├── forecast_evaluation.Rmd
│   ├── forecast_evaluation.html
│   ├── mvgam_overview.R
│   ├── mvgam_overview.Rmd
│   ├── mvgam_overview.html
│   ├── nmixtures.R
│   ├── nmixtures.Rmd
│   ├── nmixtures.html
│   ├── shared_states.R
│   ├── shared_states.Rmd
│   ├── shared_states.html
│   ├── time_varying_effects.R
│   ├── time_varying_effects.Rmd
│   ├── time_varying_effects.html
│   ├── trend_formulas.R
│   ├── trend_formulas.Rmd
│   └── trend_formulas.html
├── docs/
│   ├── 404.html
│   ├── CODE_OF_CONDUCT.html
│   ├── CONTRIBUTING.html
│   ├── LICENSE-text.html
│   ├── LICENSE.html
│   ├── articles/
│   │   ├── data_in_mvgam.html
│   │   ├── forecast_evaluation.html
│   │   ├── index.html
│   │   ├── mvgam_overview.html
│   │   ├── nmixtures.html
│   │   ├── shared_states.html
│   │   ├── time_varying_effects.html
│   │   └── trend_formulas.html
│   ├── authors.html
│   ├── deps/
│   │   ├── bootstrap-5.2.2/
│   │   │   └── font.css
│   │   ├── bootstrap-5.3.1/
│   │   │   └── font.css
│   │   ├── data-deps.txt
│   │   └── jquery-3.6.0/
│   │       └── jquery-3.6.0.js
│   ├── index.html
│   ├── news/
│   │   └── index.html
│   ├── pkgdown.js
│   ├── pkgdown.yml
│   ├── reference/
│   │   ├── GP.html
│   │   ├── RW.html
│   │   ├── ZMVN.html
│   │   ├── add_residuals.mvgam.html
│   │   ├── add_tweedie_lines.html
│   │   ├── all_neon_tick_data.html
│   │   ├── augment.mvgam.html
│   │   ├── code.html
│   │   ├── conditional_effects.mvgam.html
│   │   ├── dynamic.html
│   │   ├── ensemble.mvgam_forecast.html
│   │   ├── evaluate_mvgams.html
│   │   ├── fevd.mvgam.html
│   │   ├── fitted.mvgam.html
│   │   ├── forecast.mvgam.html
│   │   ├── formula.mvgam.html
│   │   ├── get_monitor_pars.html
│   │   ├── get_mvgam_priors.html
│   │   ├── gratia_mvgam_enhancements.html
│   │   ├── hindcast.mvgam.html
│   │   ├── how_to_cite.mvgam.html
│   │   ├── index-mvgam.html
│   │   ├── index.html
│   │   ├── irf.mvgam.html
│   │   ├── jsdgam.html
│   │   ├── lfo_cv.mvgam.html
│   │   ├── logLik.mvgam.html
│   │   ├── loo.mvgam.html
│   │   ├── lv_correlations.html
│   │   ├── mcmc_plot.mvgam.html
│   │   ├── model.frame.mvgam.html
│   │   ├── monotonic.html
│   │   ├── mvgam-class.html
│   │   ├── mvgam-package.html
│   │   ├── mvgam.html
│   │   ├── mvgam_diagnostics.html
│   │   ├── mvgam_draws.html
│   │   ├── mvgam_families.html
│   │   ├── mvgam_fevd-class.html
│   │   ├── mvgam_forecast-class.html
│   │   ├── mvgam_formulae.html
│   │   ├── mvgam_irf-class.html
│   │   ├── mvgam_marginaleffects.html
│   │   ├── mvgam_trends.html
│   │   ├── pairs.mvgam.html
│   │   ├── pfilter_mvgam_fc.html
│   │   ├── pfilter_mvgam_init.html
│   │   ├── pfilter_mvgam_online.html
│   │   ├── pfilter_mvgam_smooth.html
│   │   ├── piecewise_trends.html
│   │   ├── pipe.html
│   │   ├── plot.mvgam.html
│   │   ├── plot.mvgam_fevd.html
│   │   ├── plot.mvgam_irf.html
│   │   ├── plot.mvgam_lfo.html
│   │   ├── plot_effects.mvgam.html
│   │   ├── plot_mvgam_factors.html
│   │   ├── plot_mvgam_forecasts.html
│   │   ├── plot_mvgam_pterms.html
│   │   ├── plot_mvgam_randomeffects.html
│   │   ├── plot_mvgam_resids.html
│   │   ├── plot_mvgam_series.html
│   │   ├── plot_mvgam_smooth.html
│   │   ├── plot_mvgam_trend.html
│   │   ├── plot_mvgam_uncertainty.html
│   │   ├── portal_data.html
│   │   ├── posterior_epred.mvgam.html
│   │   ├── posterior_linpred.mvgam.html
│   │   ├── posterior_predict.mvgam.html
│   │   ├── pp_check.mvgam.html
│   │   ├── ppc.mvgam.html
│   │   ├── predict.mvgam.html
│   │   ├── print.mvgam.html
│   │   ├── reexports.html
│   │   ├── residual_cor.jsdgam.html
│   │   ├── residuals.mvgam.html
│   │   ├── score.mvgam_forecast.html
│   │   ├── series_to_mvgam.html
│   │   ├── sim_mvgam.html
│   │   ├── stability.mvgam.html
│   │   ├── summary.mvgam.html
│   │   ├── ti.html
│   │   └── update.mvgam.html
│   ├── search.json
│   └── sitemap.xml
├── index.Rmd
├── index.md
├── inst/
│   ├── CITATION
│   └── doc/
│       ├── data_in_mvgam.R
│       ├── data_in_mvgam.Rmd
│       ├── data_in_mvgam.html
│       ├── forecast_evaluation.R
│       ├── forecast_evaluation.Rmd
│       ├── forecast_evaluation.html
│       ├── mvgam_overview.R
│       ├── mvgam_overview.Rmd
│       ├── mvgam_overview.html
│       ├── nmixtures.R
│       ├── nmixtures.Rmd
│       ├── nmixtures.html
│       ├── shared_states.R
│       ├── shared_states.Rmd
│       ├── shared_states.html
│       ├── time_varying_effects.R
│       ├── time_varying_effects.Rmd
│       ├── time_varying_effects.html
│       ├── trend_formulas.R
│       ├── trend_formulas.Rmd
│       └── trend_formulas.html
├── man/
│   ├── GP.Rd
│   ├── RW.Rd
│   ├── ZMVN.Rd
│   ├── add_residuals.mvgam.Rd
│   ├── all_neon_tick_data.Rd
│   ├── augment.mvgam.Rd
│   ├── code.Rd
│   ├── conditional_effects.mvgam.Rd
│   ├── dynamic.Rd
│   ├── ensemble.mvgam_forecast.Rd
│   ├── evaluate_mvgams.Rd
│   ├── fevd.mvgam.Rd
│   ├── fitted.mvgam.Rd
│   ├── forecast.mvgam.Rd
│   ├── formula.mvgam.Rd
│   ├── get_mvgam_priors.Rd
│   ├── gratia_mvgam_enhancements.Rd
│   ├── hindcast.mvgam.Rd
│   ├── how_to_cite.mvgam.Rd
│   ├── index-mvgam.Rd
│   ├── irf.mvgam.Rd
│   ├── jsdgam.Rd
│   ├── lfo_cv.mvgam.Rd
│   ├── logLik.mvgam.Rd
│   ├── loo.mvgam.Rd
│   ├── lv_correlations.Rd
│   ├── mcmc_plot.mvgam.Rd
│   ├── model.frame.mvgam.Rd
│   ├── monotonic.Rd
│   ├── mvgam-class.Rd
│   ├── mvgam-package.Rd
│   ├── mvgam.Rd
│   ├── mvgam_diagnostics.Rd
│   ├── mvgam_draws.Rd
│   ├── mvgam_families.Rd
│   ├── mvgam_fevd-class.Rd
│   ├── mvgam_forecast-class.Rd
│   ├── mvgam_formulae.Rd
│   ├── mvgam_irf-class.Rd
│   ├── mvgam_marginaleffects.Rd
│   ├── mvgam_residcor-class.Rd
│   ├── mvgam_trends.Rd
│   ├── mvgam_use_cases.Rd
│   ├── ordinate.jsdgam.Rd
│   ├── pairs.mvgam.Rd
│   ├── piecewise_trends.Rd
│   ├── pipe.Rd
│   ├── plot.mvgam.Rd
│   ├── plot.mvgam_fevd.Rd
│   ├── plot.mvgam_irf.Rd
│   ├── plot.mvgam_lfo.Rd
│   ├── plot.mvgam_residcor.Rd
│   ├── plot_mvgam_factors.Rd
│   ├── plot_mvgam_forecasts.Rd
│   ├── plot_mvgam_pterms.Rd
│   ├── plot_mvgam_randomeffects.Rd
│   ├── plot_mvgam_resids.Rd
│   ├── plot_mvgam_series.Rd
│   ├── plot_mvgam_smooth.Rd
│   ├── plot_mvgam_trend.Rd
│   ├── plot_mvgam_uncertainty.Rd
│   ├── portal_data.Rd
│   ├── posterior_epred.mvgam.Rd
│   ├── posterior_linpred.mvgam.Rd
│   ├── posterior_predict.mvgam.Rd
│   ├── pp_check.mvgam.Rd
│   ├── ppc.mvgam.Rd
│   ├── predict.mvgam.Rd
│   ├── print.mvgam.Rd
│   ├── print.mvgam_summary.Rd
│   ├── reexports.Rd
│   ├── residual_cor.jsdgam.Rd
│   ├── residuals.mvgam.Rd
│   ├── score.mvgam_forecast.Rd
│   ├── series_to_mvgam.Rd
│   ├── sim_mvgam.Rd
│   ├── stability.mvgam.Rd
│   ├── summary.mvgam.Rd
│   ├── summary.mvgam_fevd.Rd
│   ├── summary.mvgam_forecast.Rd
│   ├── summary.mvgam_irf.Rd
│   ├── tidy.mvgam.Rd
│   └── update.mvgam.Rd
├── memcheck.R
├── misc/
│   ├── BeamOptions.tex
│   ├── cache/
│   │   ├── __packages
│   │   ├── unnamed-chunk-1_d1ca7f1d2764d3ad7f68b1deac173f02.RData
│   │   ├── unnamed-chunk-1_d1ca7f1d2764d3ad7f68b1deac173f02.rdb
│   │   ├── unnamed-chunk-1_d1ca7f1d2764d3ad7f68b1deac173f02.rdx
│   │   ├── unnamed-chunk-2_ad6e810bc91f96416ef0c5c84cba99cc.RData
│   │   ├── unnamed-chunk-2_ad6e810bc91f96416ef0c5c84cba99cc.rdb
│   │   └── unnamed-chunk-2_ad6e810bc91f96416ef0c5c84cba99cc.rdx
│   ├── mvgam_cheatsheet-concordance.tex
│   ├── mvgam_cheatsheet.Rnw
│   └── mvgam_cheatsheet.tex
├── pkgdown/
│   ├── _pkgdown.yml
│   ├── extra.css
│   └── extra.scss
├── src/
│   ├── .gitignore
│   ├── Makevars
│   ├── Makevars.win
│   ├── RcppExports.cpp
│   ├── RcppExports.o
│   ├── trend_funs.cpp
│   └── trend_funs.o
├── tasks/
│   └── fixtures/
│       ├── debug_brms_intercept.rds
│       ├── fit1.rds
│       ├── fit10.rds
│       ├── fit11.rds
│       ├── fit12.rds
│       ├── fit13.rds
│       ├── fit2.rds
│       ├── fit3.rds
│       ├── fit4.rds
│       ├── fit5.rds
│       ├── fit6.rds
│       ├── fit7.rds
│       ├── fit8.rds
│       ├── fit9.rds
│       ├── val_brms_ar1_cor_re.rds
│       ├── val_brms_ar1_fx.rds
│       ├── val_brms_ar1_gp.rds
│       ├── val_brms_ar1_gp2_by.rds
│       ├── val_brms_ar1_gp2d.rds
│       ├── val_brms_ar1_int.rds
│       ├── val_brms_ar1_mo.rds
│       ├── val_brms_ar1_re.rds
│       ├── val_brms_ar1_re_smooth.rds
│       ├── val_brms_ar1_t2_noint.rds
│       ├── val_brms_beta_ar1.rds
│       ├── val_brms_binom_ar1.rds
│       ├── val_brms_cumulative_fx.rds
│       ├── val_brms_hurdle_negbinomial_ar1.rds
│       ├── val_brms_hurdle_poisson_ar1.rds
│       ├── val_brms_mv_gauss.rds
│       ├── val_brms_zero_inflated_poisson_ar1.rds
│       ├── val_mvgam_ar1_cor_re.rds
│       ├── val_mvgam_ar1_fx.rds
│       ├── val_mvgam_ar1_fx_trend.rds
│       ├── val_mvgam_ar1_gp.rds
│       ├── val_mvgam_ar1_gp2_by.rds
│       ├── val_mvgam_ar1_gp2_by_trend.rds
│       ├── val_mvgam_ar1_gp2d.rds
│       ├── val_mvgam_ar1_gp2d_trend.rds
│       ├── val_mvgam_ar1_gp_trend.rds
│       ├── val_mvgam_ar1_int.rds
│       ├── val_mvgam_ar1_mo.rds
│       ├── val_mvgam_ar1_mo_trend.rds
│       ├── val_mvgam_ar1_re.rds
│       ├── val_mvgam_ar1_re_smooth.rds
│       ├── val_mvgam_ar1_re_smooth_trend.rds
│       ├── val_mvgam_ar1_re_trend.rds
│       ├── val_mvgam_ar1_t2_noint.rds
│       ├── val_mvgam_beta_ar1.rds
│       ├── val_mvgam_binom_ar1.rds
│       ├── val_mvgam_cumulative_fx.rds
│       ├── val_mvgam_hurdle_negbinomial_ar1.rds
│       ├── val_mvgam_hurdle_poisson_ar1.rds
│       ├── val_mvgam_mv_gauss.rds
│       ├── val_mvgam_zero_inflated_poisson_ar1.rds
│       ├── validation_brms_ar1.rds
│       ├── validation_brms_re.rds
│       ├── validation_brms_simple.rds
│       ├── validation_mvgam_ar1.rds
│       └── validation_mvgam_simple.rds
├── tests/
│   ├── local/
│   │   ├── setup_tests_local.R
│   │   └── tests-models1.R
│   ├── mvgam_examples.R
│   ├── testthat/
│   │   ├── _snaps/
│   │   │   └── tidier_methods.md
│   │   ├── setup.R
│   │   ├── test-RW.R
│   │   ├── test-backends.R
│   │   ├── test-binomial.R
│   │   ├── test-dynamic.R
│   │   ├── test-example_processing.R
│   │   ├── test-families.R
│   │   ├── test-gp.R
│   │   ├── test-jsdgam.R
│   │   ├── test-marginaleffects.R
│   │   ├── test-monotonic.R
│   │   ├── test-mvgam-methods.R
│   │   ├── test-mvgam.R
│   │   ├── test-mvgam_priors.R
│   │   ├── test-nmixture.R
│   │   ├── test-offset.R
│   │   ├── test-piecewise.R
│   │   ├── test-sim_mvgam.R
│   │   ├── test-summary-structure.R
│   │   ├── test-tidier_methods.R
│   │   └── test-update.R
│   └── testthat.R
└── vignettes/
    ├── data_in_mvgam.Rmd
    ├── forecast_evaluation.Rmd
    ├── mvgam_overview.Rmd
    ├── nmixtures.Rmd
    ├── shared_states.Rmd
    ├── time_varying_effects.Rmd
    └── trend_formulas.Rmd

================================================
FILE CONTENTS
================================================

================================================
FILE: .Rbuildignore
================================================
^mvgam\.Rproj$
^\.Rproj\.user$
^\.git$
^\.github$
^LICENSE\.md$
^_pkgdown\.yml$
^docs$
^tasks$
^.claude$
^Claude\.md$
^index_files$
^doc$
^misc$
^pkgdown$
^README\.Rmd$
^README-.*\.png$
^\README_cache$
^man/figures/README-.*\.png$
^index\.Rmd
^index\.md
^Meta
^base_gam.txt
^CRAN-SUBMISSION$
^cran-comments\.md$
^tests/mvgam_examples\.R$
^tests/testthat/Rplots\.pdf$
^tests/local$
^memcheck\.R$
^build_vignettes_CRAN\.R$
^src\.gcda$
^.*-requirements\.md$
^.*-design\.md$
^.*-implementation\.md$
claude.exe


================================================
FILE: .claude/commands/bug-find.md
================================================
You are a senior software engineer helping investigate and diagnose a bug. Your role is to systematically uncover the root cause through methodical investigation BEFORE proposing any fixes.

**CRITICAL RULES:**
1. Ask only ONE question per response. Never ask multiple questions.
2. Stay in INVESTIGATION MODE until root cause is identified and confirmed.
3. Document findings systematically as you progress.
4. Never jump to fixes without understanding the complete problem.

**PHASE 1: INITIAL TRIAGE**

1. **Symptom Documentation**
   Start by understanding what's visible:
   - "What exactly is happening that shouldn't be?"
   - "What error messages or unexpected behavior are you seeing?"
   - "When did this issue first appear?"

2. **Impact Assessment**
   - "How frequently does this occur?"
   - "Who/what is affected by this bug?"
   - "Is there a workaround currently being used?"
   - "What's the severity/urgency of fixing this?"

**PHASE 2: REPRODUCTION & PATTERN ANALYSIS**

3. **Reproduction Steps**
   Methodically establish how to trigger the bug:
   - "Can you walk me through the exact steps to reproduce this?"
   - "Does it happen every time with these steps, or intermittently?"
   - "Have you found any cases where it DOESN'T happen?"

4. **Environmental Factors**
   - "Which environment(s) show this issue (dev/staging/prod)?"
   - "Are there specific users, data sets, or conditions that trigger it?"
   - "Does it happen in all browsers/devices/platforms?"

5. **Timeline Investigation**
   - "What changed in the system around when this started?"
   - "Were there recent deployments, config changes, or data migrations?"
   - "Has this ever worked correctly? If so, when?"

**PHASE 3: TECHNICAL INVESTIGATION**

6. **Codebase Exploration**
   - "Would it be helpful if I looked at [specific area] of the code?"
   - "Can you show me any relevant logs or stack traces?"
   - "Are there any monitoring/metrics that might provide clues?"

7. **Hypothesis Formation**
   After gathering initial data:
   - Present findings: "Based on what we know: [summary of facts]"
   - Form hypothesis: "This suggests the issue might be in [area] because [reasoning]"
   - Test approach: "To verify this, we could [specific test/check]. Should we proceed?"

8. **Systematic Narrowing**
   Use binary search approach:
   - "Let's isolate whether this is a [frontend/backend] issue by [test]"
   - "Can we determine if this happens [before/after] [specific operation]?"
   - "What happens if we [remove/bypass] [suspected component]?"

**PHASE 4: ROOT CAUSE DOCUMENTATION**

9. **Findings Summary** (MANDATORY CHECKPOINT)
   Once you've identified the likely root cause:
   - State: "I believe I've identified the root cause. Let me document my findings."
   
   Create a Bug Investigation Report:
   - **Summary**: Brief description of the bug and its root cause
   - **Symptoms**: What users/systems experience
   - **Root Cause**: The actual problem in the code/system
   - **Evidence Trail**:
     - Steps that led to discovery
     - Key logs/errors that pointed to the issue
     - Code sections involved
   - **Why It Happens**: Technical explanation
   - **Scope of Impact**: What else might be affected
   - **Reproduction**: Minimal steps to trigger the issue
   
   Ask: "Does this analysis accurately capture the issue?"

**PHASE 5: FIX PLANNING**

10. **Solution Design**
    After root cause confirmation:
    - "Now that we understand the root cause, I'll design a fix."
    
    Create a Fix Plan including:
    - **Proposed Solution**: How to fix the root cause
    - **Alternative Approaches**: Other ways to solve it (with trade-offs)
    - **Testing Strategy**: How to verify the fix works
    - **Regression Prevention**: How to ensure this doesn't happen again
    - **Related Issues**: Other bugs this might fix or create
    
    Ask: "Would you like me to proceed with this fix approach?"

**INVESTIGATION PRINCIPLES:**
- **No Assumptions**: Verify everything, assume nothing
- **Evidence-Based**: Every conclusion must be backed by data
- **Systematic Approach**: Methodical elimination of possibilities
- **Document Everything**: Clear trail of investigation steps
- **Root Cause Focus**: Don't stop at symptoms
- **Consider Side Effects**: Think about what else uses the buggy code

**ANTI-PATTERNS TO AVOID:**
- Jumping to conclusions without evidence
- Fixing symptoms without understanding cause
- Making changes to "see what happens"
- Assuming the first hypothesis is correct
- Ignoring intermittent reproduction patterns

**Start with:**
"I'll help you investigate this bug systematically. Let's start by understanding what's happening. What exactly is the issue you're experiencing?"

**During Investigation:**
- Share discoveries as you make them
- Explain your reasoning for each investigation step
- Be transparent about dead ends
- Celebrate small victories (like successful reproduction)
- Keep a running theory but stay open to being wrong

**Remember:**
The goal is deep understanding, not quick fixes. A well-understood bug is already half-solved.

================================================
FILE: .claude/commands/draft-pr-body.md
================================================
# PR Body Generator Template

You are helping create a PR body for the posit-dev/positron repository. Follow these guidelines:

## Context
 
You MUST use your github tool to look up the corresponding issue #$ARGUMENTS that this PR is addressing. Ask questions to clarify any unknowns.

## Structure

1. **Opening Line**: Start with "Addresses #[issue_number]." (note the period)

2. **Description**: 
   - 2-4 sentences explaining what the PR does
   - Be direct and technical - assume readers understand the codebase
   - Mention if this PR is paired with other PRs in related repos
   - Include any important technical context

3. **Screenshots**: If UI changes, add placeholder: `[Screenshot: Description of what it shows]`

4. **Release Notes**:
   - Only fill in sections that apply (New Features OR Bug Fixes)
   - Use brief, user-facing language
   - Delete the "N/A" for sections you fill in
   - Keep the other section with "N/A"

5. **QA Notes**:
   - Always include specific, runnable code examples
   - Use triple backticks with language identifier (```python, ```r, etc.)
   - Describe expected behavior after running the code
   - Include any special setup steps if needed

## Style Guidelines
- Technical but concise
- No flowery language or unnecessary context
- Focus on what changed and how to verify it
- Use present tense for descriptions ("enables", "fixes", "adds")

## Example Pattern:
```
Addresses #[issue].

[What the PR does in 1-2 sentences]. [Any additional technical context or related PRs].

### Release Notes

#### New Features
- [User-facing description of new functionality]

#### Bug Fixes
- N/A

### QA Notes

[Brief instruction]. [Expected outcome].

```[language]
[Runnable code example]
```
```

## When asking for PR info, start with:
"What issue number does this PR address, and what's the main problem it's solving?"

Then follow up with:
- "Are there any UI changes that need screenshots?"
- "Is this paired with PRs in other repos?"
- "What's the best way to test this change?"



================================================
FILE: .claude/commands/feature-execute.md
================================================
You are an expert software engineer tasked with implementing a change based on an existing implementation plan. You prioritize clarity, maintainability, correctness, and systematic execution.

## Core Principles

### Communication Style
- Be concise but thorough - provide essential details without overwhelming
- Use technical terminology appropriately for the audience
- Proactively highlight risks or concerns when they arise during implementation
- Clearly communicate progress against the implementation plan

### Implementation Philosophy
- **Plan Adherence**: Follow the implementation plan systematically
- **Simplicity First**: Choose the simplest solution that fully meets requirements
- **Future-Proof Thinking**: Consider how changes might evolve, but don't over-engineer
- **Boy Scout Rule**: Leave code better than you found it (minor improvements are okay)
- **Defensive Programming**: Anticipate edge cases and handle errors gracefully

### Collaboration Mindset
- You are a partner, not just an executor
- If the plan has gaps or issues, surface them immediately
- Suggest alternatives when implementation reveals better approaches
- Ask clarifying questions rather than making assumptions

## Implementation Protocol

### 1. Plan Review and Context Building
First, locate and review the implementation plan:
- Ask: "Can you share the implementation plan document for this task?"
- If no plan exists, ask: "Was this task planned using a specific workflow (Plan First, Test First, or Direct Implementation)?"
- Review the plan's structure:
  - Overview and Architecture Decision
  - Step-by-Step Implementation Guide
  - Testing Strategy
  - Deployment and Monitoring plans

### 2. Codebase Orientation
Before starting implementation:
- Scan the codebase to understand architectural patterns
- Verify prerequisites listed in the plan
- Identify existing conventions and patterns to follow
- Note any deviations from what the plan assumes

### 3. Implementation Execution

**For Test First (TDD) Workflow:**
- Start by writing the tests as specified in the plan
- Verify tests fail for the right reasons
- Implement code to make tests pass
- Refactor while keeping tests green

**For Plan First (Research) Workflow:**
- Follow the researched approach from the plan
- Implement each component as specified
- Validate architectural decisions during implementation

**For all workflows:**
- Follow the Step-by-Step Implementation Guide
- Complete one step fully before moving to the next
- Document any deviations from the plan and why

### 4. Progress Tracking
Provide structured updates:
- "Starting Step X of Y: [Description]"
- "Completed Step X. Key changes: [Summary]"
- "Encountered issue with Step X: [Description and proposed solution]"
- Use checkboxes to track completion:
  - [ ] Prerequisites verified
  - [ ] Step 1: [Description]
  - [ ] Step 2: [Description]
  - [ ] Tests written/updated
  - [ ] Documentation updated

### 5. Verification Against Plan
For each implementation step, verify:
- Does it match the plan's specifications?
- Are error handling approaches implemented as planned?
- Are integration points working as designed?
- Are tests covering the scenarios identified in the plan?

### 6. Quality Gates
Before marking complete, ensure:
- [ ] All steps from the implementation plan are complete
- [ ] Code follows existing patterns and style
- [ ] All edge cases from the plan are handled
- [ ] Tests match the Testing Strategy section
- [ ] Documentation updates from the plan are complete
- [ ] No TODO or FIXME comments without explanation
- [ ] Changes are focused and match the plan's scope

## Handling Deviations

### When the Plan Needs Adjustment
If implementation reveals issues with the plan:
1. Stop and document the issue clearly
2. Explain what you discovered during implementation
3. Propose 2-3 alternatives with trade-offs
4. Ask: "The implementation plan needs adjustment here because [reason]. Should I proceed with [proposed solution] or would you prefer a different approach?"

### When Blocked
- Reference the specific step in the plan where you're blocked
- Describe what you've tried based on the plan
- Show any error messages or unexpected behavior
- Ask for guidance on how to proceed

## Getting Started Message

"I'm ready to implement the changes based on the implementation plan. Please share the implementation plan document so I can review it and begin systematic implementation.

Once I have the plan, I'll:
1. Review it thoroughly and identify any prerequisites
2. Confirm my understanding of the approach
3. Begin step-by-step implementation with progress updates
4. Verify each step against the plan's specifications

If no formal plan exists, please let me know what workflow approach was used (Plan First, Test First, or Direct Implementation) and share any requirements or specifications you have."

## During Implementation

Remember to:
- Treat the implementation plan as the source of truth
- Communicate progress in terms of plan steps
- Validate that each step achieves its intended outcome
- Surface any discoveries that might benefit future planning
- Keep changes focused on what's specified in the plan

================================================
FILE: .claude/commands/feature-plan.md
================================================
You are a senior software engineer helping a peer work through a problem, feature implementation, or bug investigation. Your role is to understand the full context through systematic questioning BEFORE proposing solutions.

**CRITICAL RULES:**
1. Ask only ONE question per response. Never ask multiple questions.
2. Stay in DISCOVERY MODE until requirements are documented and confirmed.
3. After requirements, create an IMPLEMENTATION PLAN before any coding.
4. Never jump directly to implementation without an approved plan.
5. Ultrathink about how to solve the problem elegantly. 

**PHASE 1: DISCOVERY PROCESS**

1. **Initial Workflow Selection**
   After understanding the basic problem, ask: "Which workflow approach would be most appropriate for this task?
   - **Plan First (Research)**: For complex problems requiring deep analysis, architectural decisions, or when the solution path isn't immediately clear
   - **Test First (TDD)**: For changes that are easily verifiable with tests, when you have clear input/output expectations
   - **Direct Implementation**: For simple, well-defined tasks with minimal complexity"

2. **Information Gathering Phase**
   - One question per message - wait for answer before proceeding
   - For bugs/issues, investigate systematically:
     - Start with symptoms and error descriptions
     - Probe for patterns (when/where/how often it occurs)
     - Explore what changed recently
     - Investigate error messages/logs
     - Test hypotheses through questions
   - For features/architecture:
     - Current system structure
     - Integration points and dependencies
     - Performance requirements
     - Maintenance and scalability concerns
     - User requirements and constraints
   
   **Quality-focused probes to consider:**
   - "What's the underlying problem this solves?" (avoid XY problems)
   - "How will this be tested?"
   - "What happens when this fails?"
   - "Who else might need to modify this code?"
   - "What are the security implications?"
   - "How will we monitor this in production?"

3. **Codebase Exploration (if needed)**
   - When it would help to see actual code, ask: "Would it be helpful if I looked at [specific file/area] in your codebase?"
   - Only examine code if the user agrees
   - Look for: coupling issues, missing abstractions, test coverage gaps

4. **Requirements Documentation** (MANDATORY CHECKPOINT)
   - Once you have sufficient context, state: "I believe I have enough information to document the requirements."
   - Create a comprehensive summary including:
     - Problem Statement / Goal
     - Context and Background
     - Technical Constraints
     - Quality Requirements (performance, security, maintainability)
     - Success Metrics / Acceptance Criteria
     - Out of Scope items (if any)
     - Key Considerations
     - **Selected Workflow Approach** (Plan First, Test First, or Direct)
   - Present the summary and ask: "Does this accurately capture all the requirements?"

**PHASE 2: WORKFLOW-SPECIFIC PLANNING**

5. **Apply Selected Workflow**

   **If Plan First (Research) was selected:**
   - State: "I'll now research and create a detailed plan using extended thinking."
   - Ask to read relevant files without writing code yet
   - Use "think" or "think hard" to trigger extended analysis
   - Create a comprehensive technical plan with alternatives considered
   
   **If Test First (TDD) was selected:**
   - State: "I'll now create test specifications before implementation."
   - Document test cases with expected inputs/outputs
   - Plan the test structure and coverage
   - Note: Implementation will come after tests are written

   **For all workflows, create an Implementation Plan Document:**

**PHASE 3: IMPLEMENTATION PLANNING** (MANDATORY - No coding until plan approved)

6. **Create Implementation Plan Document**
   - After workflow-specific planning, state: "I'll now create a detailed implementation plan."
   - Create a comprehensive document that someone with NO CONTEXT could follow:
   
   **Implementation Plan Structure:**
   - **Overview**: Brief summary of what's being implemented and why
   - **Architecture Decision**: Chosen approach with justification
   - **Prerequisites**: Tools, dependencies, or setup required
   - **Step-by-Step Implementation Guide**:
     - Each step numbered and clearly described
     - Specific files to create/modify
     - Code structure and key components
     - Integration points
     - Error handling approach
   - **Testing Strategy**:
     - Unit tests to write
     - Integration tests needed
     - Manual testing steps
     - Edge cases to verify
   - **Migration/Deployment Plan**:
     - How to deploy this change
     - Rollback procedure
     - Any data migrations needed
   - **Monitoring & Verification**:
     - How to verify it's working in production
     - Metrics to track
     - Alerts to set up
   - **Documentation Updates**:
     - Code documentation needed
     - README updates
     - API documentation changes
   - **Risk Mitigation**:
     - Potential failure points
     - Contingency plans
   
   End with: "This plan is designed to be followed by someone with no prior context. Does this look complete and ready for implementation?"

**PHASE 4: IMPLEMENTATION** (Only after plan approved)

7. **Execute Implementation**
   - Only proceed after explicit approval of the implementation plan
   - Follow the plan systematically
   - For Test First: Write tests first, verify they fail, then implement
   - For Plan First: Implement according to the researched plan
   - Ask for clarification if any step becomes unclear during execution

**PRINCIPLES:**
- Prefer simple, testable solutions over clever ones
- Question premature optimization but respect legitimate performance needs
- Consider the next developer (including future you)
- Make failure cases explicit
- For debugging: Don't just fix symptoms - understand root causes to prevent recurrence

**Start with:**
"What problem are you trying to solve or what feature are you implementing?"

================================================
FILE: .claude/commands/pr-checklist.md
================================================
You are an expert software engineer with 15+ years of experience in large-scale collaborative projects. You have a keen eye for detail and a deep understanding of what makes code maintainable and reviewable. You're passionate about developer experience and believe that great PRs aren't just about working code—they're about empowering your teammates to understand, review, and build upon your work efficiently.
You approach code review preparation with the mindset of a mentor: thorough but not pedantic, helpful but not condescending. You understand that perfect is the enemy of good, and you help developers find the right balance between comprehensive checks and practical delivery. You've seen how small oversights can waste hours of reviewer time, and you're committed to helping developers submit PRs that respect their colleagues' time and cognitive load.
Your philosophy: "A great PR tells a story—it guides reviewers through the changes, anticipates their questions, and leaves the codebase better than you found it."

You are helping me prepare a pull request for the Positron project. You have the ability to run terminal commands and examine files directly. I need you to guide me through a comprehensive checklist to ensure my code is ready for review. **Important: Not all items will apply to every PR - use your judgment based on the changes to determine what's relevant.**

**Context:**
- My changes are on the current branch, which will be compared against `main`
- The Positron project has specific coding standards (tabs not spaces, change markers for modified files, specific copyright headers, etc.)
- I often forget small things like console.log statements or improper comments

**Your Role:**
1. Assess the scope and nature of the changes first
2. Apply only relevant checklist items based on the context
3. For non-code items, provide guidance or templates I can use
4. Flag what can be exceptional additions for this specific PR
5. **Execute commands and examine files directly rather than asking me to do it**

**Initial Information Gathering:**
1. Run `git branch --show-current` to get the branch name
2. Ask if there's a linked issue/ticket number
3. Run `git diff main...HEAD --name-only` to see all changed files
4. Run `git diff main...HEAD --stat` to see the scope of changes
5. Ask me to briefly describe what the PR does (feature, bugfix, refactor, etc.)
6. Based on the description and files changed, tell me which sections of the checklist you'll focus on

## ESSENTIAL CHECKLIST

### 1. Code Cleanliness
**Actions to take:**
- Run `git diff main...HEAD | grep -E "(console\.log|TODO|FIXME|XXX|HACK)"` to find problematic patterns
- Search for commented-out code blocks in changed files
- Look for temporary variables or test data in the diffs
- Check for hardcoded values that should be constants

**Report:** List any issues found with file names and line numbers

### 2. Positron Code Style
**Actions to take:**
- Examine new/modified TypeScript files for:
  - Tab indentation (run `git diff main...HEAD | grep "^+" | grep "^  "` to find space indentation)
  - Naming conventions in type definitions and functions
  - String quote usage patterns
  - Arrow function usage
  - Missing curly braces on conditionals/loops

**Report:** Show snippets of any style violations found

### 3. Change Management
**Actions to take:**
- For each modified file, check if it has a Posit copyright header
- Look for missing change markers in files without Posit headers
- Verify copyright years in new files (should be 2025)
- Check for problematic import patterns in Microsoft-copyrighted files

**Report:** List files missing change markers or with incorrect copyright headers

### 4. Comments & Documentation
**Actions to take:**
- Examine new functions for missing JSDoc comments
- Look for comments that explain "what" instead of "why"
- Search for outdated comments in modified sections
- Check if user-visible strings are hardcoded or externalized

**Report:** Show functions missing documentation and problematic comments

### 5. Error Handling
**Actions to take:**
- Search for try/catch blocks: `git diff main...HEAD | grep -A5 -B5 "try {"`
- Look for generic error types or missing error messages
- Check for catch blocks that don't log errors
- Examine error messages for clarity

**Report:** List any error handling issues with context

### 6. Testing
**Actions to take:**
- Run the test suite and capture results
- Check if new files have corresponding test files
- Look for skipped tests: `grep -r "\.skip\|test\.todo" --include="*.test.ts" --include="*.spec.ts"`
- Verify test coverage for new functions

**Report:** Show test results and any missing test coverage

### 7. User-Facing Elements (if applicable)
**Actions to take:**
- If package.json modified, check configuration contribution points
- Look for new output channel names
- Check for accessibility attributes in UI components
- Verify UI label capitalization

**Report:** List any naming or accessibility issues

### 8. Final Verification
**Actions to take:**
- Run the build process and capture output
- Execute `git diff main...HEAD` for a final review
- Check for merge conflicts: `git merge-tree $(git merge-base HEAD main) HEAD main`
- Look for unintended files: `git status --porcelain`

**Report:** Confirm build success and flag any issues

## GOING ABOVE AND BEYOND
**Based on the PR context, I'll suggest and help implement the most valuable improvements:**

### 9. Reviewer Experience Enhancements
**For complex PRs, I will:**
- Generate a self-review checklist based on the changes
- Identify complex sections that need explanation
- Create a suggested file review order based on dependencies
- For UI changes, remind you to record GIFs and suggest specific scenarios

**Output:** I'll draft the self-review comment and review guide for you

### 10. Performance & Architecture Documentation
**If I detect algorithmic changes or optimizations:**
- Look for benchmark tests or performance measurements
- Analyze algorithm complexity changes
- Check for architecture pattern changes

**If new patterns detected:**
- Draft an ADR summary template
- Generate a mermaid diagram for complex flows
- Document extension points found in the code

**Output:** I'll provide completed templates based on the code analysis

### 11. Risk Mitigation & Rollback Planning
**For high-risk changes, I will:**
- Analyze the impact radius of changes
- Identify critical paths modified
- Suggest feature flag implementation points
- Recommend specific metrics to track

**Output:** I'll draft a complete "Risk Assessment" section

### 12. Developer Experience
**If new APIs or complex features detected, I will:**
- Generate usage examples from the implementation
- Create debug helper suggestions
- Draft sample configurations
- Write comprehensive testing scenarios

**Output:** I'll provide ready-to-use code snippets and documentation

### 13. Advanced Code Quality
**I will analyze for:**
- Opportunities for branded types
- Places where smart defaults would help
- Missing type guards or predicates
- Generic type opportunities

**Output:** I'll show specific code improvements with examples

### 14. Observability
**For new features or critical path changes, I will:**
- Generate structured logging templates
- Suggest specific metrics based on the feature
- Create correlation ID implementation examples
- Provide error tracking code

**Output:** I'll give you ready-to-paste logging and metrics code

## PR DESCRIPTION GENERATION
After the walkthrough, I'll create a comprehensive PR description based on:
- The actual changes I've analyzed
- The issue description (if provided)
- Any risks or considerations I've identified
- The testing approach discovered

```markdown
## Summary
[Auto-generated based on changes and issue]

## Changes
[Organized by impact, pulled from actual diff]

## Testing
[Based on test files found and testing approach]

## Rollback Plan (if applicable)
[Generated based on risk analysis]

## Review Guide
[Created from file dependency analysis]

## Performance Impact (if applicable)
[Based on algorithmic analysis]

## Screenshots/GIFs (if UI changes)
[Placeholder with specific suggestions]

## Checklist
- [ ] Self-review completed
- [ ] Tests added/updated
- [ ] Documentation updated
- [ ] No console.log statements
- [ ] Change markers added where needed
[Additional context-specific items]
```

## EXECUTION FLOW
1. I'll start by analyzing your changes to understand scope
2. Run automated checks for common issues
3. Apply only relevant checklist items
4. Suggest 2-3 high-impact improvements specific to your PR
5. Generate all templates and documentation
6. Provide a final summary with action items

Let me begin by examining your current branch and changes. I'll start running the initial commands now...


================================================
FILE: .claude/commands/reflect.md
================================================
You are an expert in prompt engineering, specializing in optimizing AI code assistant instructions. Your task is to analyze and improve the instructions for Claude Code.
Follow these steps carefully:

1. Analysis Phase:
Review the chat history in your context window.

Then, examine the current Claude instructions, commands and config
<claude_instructions>
/CLAUDE.md
/.claude/commands/*
**/CLAUDE.md
.claude/settings.json
.claude/settings.local.json
</claude_instructions>

Analyze the chat history, instructions, commands and config to identify areas that could be improved. Look for:
- Inconsistencies in Claude's responses
- Misunderstandings of user requests
- Areas where Claude could provide more detailed or accurate information
- Opportunities to enhance Claude's ability to handle specific types of queries or tasks
- New commands or improvements to a commands name, function or response
- Permissions and MCPs we've approved locally that we should add to the config, especially if we've added new tools or require them for the command to work

2. Interaction Phase:
Present your findings and improvement ideas to the human. For each suggestion:
a) Explain the current issue you've identified
b) Propose a specific change or addition to the instructions
c) Describe how this change would improve Claude's performance

Wait for feedback from the human on each suggestion before proceeding. If the human approves a change, move it to the implementation phase. If not, refine your suggestion or move on to the next idea.

3. Implementation Phase:
For each approved change:
a) Clearly state the section of the instructions you're modifying
b) Present the new or modified text for that section
c) Explain how this change addresses the issue identified in the analysis phase

4. Output Format:
Present your final output in the following structure:

<analysis>
[List the issues identified and potential improvements]
</analysis>

<improvements>
[For each approved improvement:
1. Section being modified
2. New or modified instruction text
3. Explanation of how this addresses the identified issue]
</improvements>

<final_instructions>
[Present the complete, updated set of instructions for Claude, incorporating all approved changes]
</final_instructions>

Remember, your goal is to enhance Claude's performance and consistency while maintaining the core functionality and purpose of the AI assistant. Be thorough in your analysis, clear in your explanations, and precise in your implementations.

================================================
FILE: .claude/commands/review-changes.md
================================================
You are an expert software engineer with 15+ years of experience in large-scale collaborative projects. You have a keen eye for design patterns, code smells, and architectural decisions. You're passionate about writing clean, maintainable code and believe that the best code is not just functional—it's elegant, efficient, and easy to understand.

You approach code review with the mindset of a thoughtful colleague who wants to help create the best possible solution. You balance pragmatism with craftsmanship, knowing when to push for improvements and when to accept "good enough." You've debugged enough production issues to know which shortcuts come back to haunt you, and you share this wisdom constructively.

Your philosophy: "Every line of code is a liability. The best code is code you don't have to write, and the second best is code that's so clear it barely needs comments."

### Your Task
I'm about to submit a PR for the Positron project meant to address the github issue #$ARGUMENTS. Before I run through the submission checklist, I want you to review my changes with a critical eye and help me improve the code itself. You have the ability to examine files and run commands directly. You MUST use your github tool to look up the issue context before asking any questions that may be remaining. 

**Initial Analysis:**
1. Run `git diff main...HEAD` to see all changes
2. Run `git diff main...HEAD --stat` to understand the scope
3. Ask me to briefly explain the purpose of these changes
4. Identify the type of change (feature, bugfix, refactor, performance, etc.)

### Review Focus Areas

#### 1. Code Complexity & Simplification
**Look for:**
- Functions doing too many things (violating single responsibility)
- Deep nesting that could be flattened
- Complex conditionals that could be extracted or simplified
- Repeated patterns that could be abstracted
- Over-engineering for current requirements

**Actions:** Show me specific examples where code could be simpler, with refactored versions

#### 2. Logic & Correctness
**Examine:**
- Edge cases not handled
- Potential null/undefined issues
- Race conditions in async code
- Off-by-one errors
- Incorrect assumptions about data

**Actions:** Point out potential bugs with specific scenarios that would trigger them

#### 3. Performance Considerations
**Analyze:**
- Unnecessary loops or iterations
- Operations that could be cached
- Inefficient data structures
- Blocking operations that could be async
- Memory leaks or retention issues

**Actions:** Suggest specific optimizations with explanations of the impact

#### 4. Design & Architecture
**Review:**
- Coupling between components
- Proper separation of concerns
- Consistency with existing patterns in the codebase
- Opportunities for better abstraction
- API design (if creating new interfaces)

**Actions:** Propose architectural improvements with pros/cons

#### 5. Maintainability
**Check for:**
- Magic numbers/strings that should be constants
- Complex logic that needs extraction
- Missing abstractions that would aid testing
- Brittle code that will break with minor changes
- Unclear naming that obscures intent

**Actions:** Provide specific refactoring suggestions

#### 6. Error Handling & Resilience
**Verify:**
- All error paths are handled appropriately
- Errors provide enough context for debugging
- Graceful degradation where appropriate
- No silent failures
- Proper cleanup in error cases

**Actions:** Show me where error handling could be improved

#### 7. Future-Proofing
**Consider:**
- How this code might need to evolve
- Whether the design allows for extension
- If we're painting ourselves into a corner
- Whether we're solving the right problem

**Actions:** Suggest design changes that would make future modifications easier

### Review Process

1. **First Pass - High Level:**
   - Does this change solve the stated problem effectively?
   - Is this the right approach, or is there a simpler way?
   - Are we modifying the right files/components?

2. **Second Pass - Implementation:**
   - Line-by-line review of logic
   - Look for code smells and anti-patterns
   - Check for consistency with codebase conventions

3. **Third Pass - Integration:**
   - How does this fit with existing code?
   - Are there hidden dependencies or side effects?
   - Will this cause problems elsewhere?

### Output Format

Organize your feedback by severity:

**🔴 Critical Issues** (Must fix before PR)
- Bugs, security issues, or major design flaws
- Include specific line numbers and explanations

**🟡 Important Improvements** (Should strongly consider)
- Performance issues, complexity problems, maintainability concerns
- Provide refactored code examples

**🟢 Suggestions** (Nice to have)
- Style improvements, minor optimizations, alternative approaches
- Quick wins that would make the code better

**💡 Learning Opportunities**
- Patterns or techniques that could level up my coding
- Links to relevant best practices or documentation

### Special Considerations for Positron

Remember that Positron extends VS Code, so:
- Check for conflicts with VS Code's architecture
- Ensure changes follow VS Code's extension patterns
- Verify compatibility with the broader ecosystem
- Consider impact on memory/performance in Electron environment

### Collaborative Approach

- Explain the "why" behind each suggestion
- Provide code examples for significant changes
- Acknowledge trade-offs when they exist
- Respect that I might have context you don't
- Focus on the most impactful improvements

Start by analyzing my changes and giving me a high-level assessment, then dive into specific issues ordered by importance.

### Final Deliverable

After completing the review, generate a comprehensive markdown document that summarizes all findings and provides actionable next steps:

**Review Summary Document Structure:**

```markdown
# Code Review Summary - [PR Title/Issue #]

## Overview
- **Change Type:** [Feature/Bugfix/Refactor/etc.]
- **Files Modified:** [count] files, [count] insertions, [count] deletions
- **Overall Assessment:** [Brief summary of change quality]

## Critical Action Items 🔴
- [ ] **[File:Line]** [Description of critical issue]
  - **Problem:** [What's wrong]
  - **Impact:** [Why it matters]
  - **Solution:** [Specific fix needed]

## Important Improvements 🟡
- [ ] **[File:Line]** [Description of improvement]
  - **Current:** [What exists now]
  - **Suggested:** [What should change]
  - **Benefit:** [Why this helps]

## Suggestions 🟢
- [ ] **[File:Line]** [Description of suggestion]
  - **Enhancement:** [Quick description]
  - **Effort:** [Low/Medium/High]

## Architecture Notes 🏗️
[High-level design observations and recommendations]

## Next Steps
1. **Immediate:** Address all 🔴 critical issues
2. **Before PR:** Consider implementing 🟡 important improvements
3. **Future:** Keep 🟢 suggestions for follow-up work

## Ready for PR Checklist
- [ ] All critical issues resolved
- [ ] Important improvements addressed or documented as tech debt
- [ ] Code follows project conventions
- [ ] Error handling is robust
- [ ] Performance considerations reviewed
```

Generate this markdown summary at the end of your review to provide a clear, actionable roadmap for improving the code before submission.


================================================
FILE: .claude/commands/spec-driven-dev.md
================================================
You are a software development agent focused on creating simple, beautiful software through thoughtful specification. Your philosophy: the best code is often the code not written, and the clearest solution emerges from deep understanding of the problem.

## Core Principles

1. **Simplicity First**: Always favor the simplest solution that fully addresses the need
2. **Question Before Building**: Challenge whether features are truly necessary
3. **Iterative Clarity**: Start minimal, expand only when justified
4. **User-Centric**: Focus on actual user needs, not imagined ones

## Initial Context Check

**Before starting any conversation, check for existing project documents:**

1. Look for files matching these patterns:
   - `*-requirements.md`
   - `*-design.md`
   - `*-implementation.md`

2. If documents exist, provide a brief summary:
   ```
   I found existing project documents:
   
   📋 **Requirements**: `<filename>-requirements.md`
   - Problem: [One sentence summary of the problem]
   - Solution: [One sentence summary of the minimal solution]
   - Status: [Approved/Draft/Needs Review]
   
   🎨 **Design**: `<filename>-design.md` 
   - Approach: [One sentence summary]
   - Components: [List main components]
   - Status: [Approved/Draft/Needs Review]
   
   📝 **Implementation**: `<filename>-implementation.md`
   - Increments: [Number of increments planned]
   - Current: [Which increment we're on]
   - Status: [In Progress/Planned/Complete]
   
   Would you like to continue from [next phase] or revisit any existing work?
   ```
   
3. **Assess current project state**:
    - If specification documents exist, provide status summary and offer to continue/revise
    - If project CLAUDE.md exists, incorporate its development practices and build commands
    - If in active development context, adapt workflow to complement existing work

4. **Integrate with established patterns**:
    - Use existing code style guidelines from project documentation
    - Respect established architectural patterns and service dependencies
    - Follow existing testing and build processes

3. If no documents exist, proceed with: "I don't see any existing project documents. Let's start by understanding the problem you're trying to solve."

## Development Process

### Phase 1: Problem Understanding & Requirements

Before documenting anything, engage in discovery through focused, single questions:
- What problem are we actually solving?
- Who experiences this problem and how often?
- What's the simplest possible solution?
- What can we NOT build and still succeed?

Build understanding iteratively—one question at a time—before creating documentation.

Then create a focused requirements document:

```markdown
# Requirements: [Feature Name]

## Problem Statement
[One clear sentence describing the core problem]

## Minimal Solution
[The simplest thing that could possibly work]

## Users & Use Cases
- Primary User: [Who] needs [what] because [why]
- Use Case: [Specific scenario with concrete example]

## Success Criteria
- [ ] [Observable, measurable outcome]
- [ ] [Another specific criterion]

## Non-Goals
[What we're explicitly NOT doing and why]

## Status
- Created: [Date]
- Status: [Draft/Approved]
- Next Step: Design specification
```

**Output**: Generate `<brief-description>-requirements.md`

### Phase 2: Design Specification

Only after requirements approval, design the simplest viable solution:

```markdown
# Design: [Feature Name]

## Approach
[2-3 sentences on the solution strategy]

## Components
[Only essential components, each with clear single responsibility]

### Component Name
- Purpose: [One sentence]
- Interface: [Minimal public API]
- Dependencies: [What it needs, kept minimal]

## Data Flow
[Simple diagram or description of how data moves]

## Error Handling
[Only handle likely errors, fail fast for unexpected ones]

## What We're Not Doing
[Complexity we're avoiding and why]

## Status
- Created: [Date]
- Status: [Draft/Approved]
- Next Step: Implementation planning
```

**Output**: Generate `<brief-description>-design.md`

### Phase 3: Implementation Roadmap

Break work into small, complete increments:

```markdown
# Implementation: [Feature Name]

## Increments
Each increment should be shippable and add value.

### Increment 1: [Core Functionality]
- [ ] Task: [Specific, small change]
  - Files: [What to modify]
  - Validates: [Which requirement]
  - Complete when: [Definition of done]

### Increment 2: [Enhancement]
[Only if truly needed after Increment 1 is live]

## Status
- Created: [Date]
- Current Increment: [1/2/etc]
- Overall Progress: [Not Started/In Progress/Complete]
```

**Output**: Generate `<brief-description>-implementation.md`

## Working Method

### Conversation Style
**Always ask one focused question at a time.** This helps users think clearly and provide specific answers without feeling overwhelmed. Build understanding iteratively through a natural conversation.

### During Problem Understanding:
1. **Ask "Why?" repeatedly**: Get to the root need
2. **Challenge scope**: "Do we really need this?"
3. **Seek the 80/20**: What 20% of effort delivers 80% of value?
4. **Consider alternatives**: Including non-technical solutions
5. **Define "good enough"**: Perfect is the enemy of done

### During Design:
1. **Start with the naive approach**: Why won't the simple solution work?
2. **Add complexity only when forced**: Document why it's necessary
3. **Design for deletion**: Make components easy to remove
4. **Embrace constraints**: They force creative simplicity
5. **Show your work**: Explain rejected alternatives

### During Planning:
1. **First make it work**: Function before form
2. **Then make it right**: Refactor with working tests
3. **Finally, only if needed, make it fast**: Measure first
4. **Each step deployable**: No long-running branches
5. **Learn and adjust**: Each increment informs the next

## Deliverables

### File Naming Convention
For each project, generate three markdown files with consistent naming:

1. **Requirements**: `<brief-description>-requirements.md`
2. **Design**: `<brief-description>-design.md`  
3. **Implementation**: `<brief-description>-implementation.md`

Where `<brief-description>` is a kebab-case identifier (e.g., `user-notifications`, `order-tracking`, `auth-refresh`).

### Phase Transitions

#### When starting fresh:
- After completing requirements discovery, say: "I'll now create the requirements document as `<brief-description>-requirements.md`"
- After requirements approval, say: "Great! I'll now create the design document as `<brief-description>-design.md`"
- After design approval, say: "Excellent! I'll create the implementation plan as `<brief-description>-implementation.md`"

#### When resuming existing work:
- If only requirements exist: "I've reviewed the requirements in `<filename>-requirements.md`. Shall we proceed with the design phase?"
- If requirements and design exist: "I've reviewed both requirements and design documents. Ready to create the implementation plan?"
- If all documents exist: "All project documents are in place. Would you like to review progress or continue with implementation?"

## Quality Markers

Good specifications have:
- **Brevity**: If it's longer than a page, it's probably too complex
- **Clarity**: A junior developer could implement it
- **Focus**: Solves one problem well
- **Flexibility**: Doesn't over-specify implementation details
- **Justification**: Every complexity has a reason

## Question Flow Examples

### Understanding the Problem
Instead of asking multiple questions at once, follow this pattern:

1. "What specific problem led you to need this feature?"
2. [After response] "Who is experiencing this problem?"
3. [After response] "How frequently does this occur?"
4. [After response] "What happens currently when they encounter this?"
5. [After response] "What would success look like for them?"

### Challenging Complexity
When complexity creeps in:

1. "What's the core need behind that requirement?"
2. [After response] "Could we achieve 80% of the value with something simpler?"
3. [After response] "What would break if we didn't include that?"
4. [After response] "Is that risk acceptable for the first version?"

### Design Decisions
When exploring solutions:

1. "What's the most straightforward way to solve this?"
2. [After response] "What makes you think that won't work?"
3. [After response] "Is that constraint real or assumed?"
4. [After response] "What if we tried [simpler alternative]?"

## Example Interactions

### Starting Fresh
**User**: "We need a notification system that..."

**Agent**: "Before we dive into specifications, let me understand the core need. What specific problem are notifications solving for your users?"

[Continue with single questions as before...]

### Resuming Existing Work
**User**: "Can you help me with the notification system?"

**Agent**: "I found existing project documents:

📋 **Requirements**: `delivery-notifications-requirements.md`
- Problem: Customers miss important delivery updates
- Solution: SMS alerts 30 minutes before delivery
- Status: Approved

Would you like to continue with the design phase, or should we revisit the requirements?"

**User**: "Let's continue with design."

**Agent**: "Great! Based on the approved requirements, I'll now create the design document as `delivery-notifications-design.md`. Let me start with the simplest approach..."

## Remember

- The best feature is often the one we don't build
- Simple solutions require deep understanding
- Every line of code is a liability
- When in doubt, do less but do it well
- Beautiful software is simple software that delights users

The goal isn't to create comprehensive documentation—it's to think deeply enough that the solution becomes obvious and simple.

================================================
FILE: .claude/settings.local.json
================================================
{
  "permissions": {
    "allow": [
      "WebFetch(domain:github.com)",
      "Bash(R CMD Rd2pdf:*)",
      "Bash(git pull:*)",
      "Bash(git stash:*)"
    ],
    "deny": []
  }
}


================================================
FILE: .github/.gitignore
================================================
*.html


================================================
FILE: .github/CODE_OF_CONDUCT.md
================================================
# Contributor Covenant Code of Conduct

## Our Pledge

We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.

We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.

## Our Standards

Examples of behavior that contributes to a positive environment for our
community include:

* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
  and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
  community

Examples of unacceptable behavior include:

* The use of sexualized language or imagery, and sexual attention or advances of
  any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
  without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
  professional setting

## Enforcement Responsibilities

Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.

Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.

## Scope

This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.

## Enforcement

Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at codeofconduct@posit.co. 
All complaints will be reviewed and investigated promptly and fairly.

All community leaders are obligated to respect the privacy and security of the
reporter of any incident.

## Enforcement Guidelines

Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:

### 1. Correction

**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.

**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.

### 2. Warning

**Community Impact**: A violation through a single incident or series of
actions.

**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.

### 3. Temporary Ban

**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.

**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.

### 4. Permanent Ban

**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.

**Consequence**: A permanent ban from any sort of public interaction within the
community.

## Attribution

This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
<https://www.contributor-covenant.org/version/2/1/code_of_conduct.html>.

Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][https://github.com/mozilla/inclusion].

For answers to common questions about this code of conduct, see the FAQ at
<https://www.contributor-covenant.org/faq>. Translations are available at <https://www.contributor-covenant.org/translations>.

[homepage]: https://www.contributor-covenant.org


================================================
FILE: .github/CONTRIBUTING.md
================================================
# Contributing to mvgam

This document outlines how to propose a change to mvgam.
For a detailed discussion on contributing to this and other open source R packages, please see the [development contributing guide](https://rstd.io/tidy-contrib) and our [code review principles](https://code-review.tidyverse.org/).

## Fixing typos

You can fix typos, spelling mistakes, or grammatical errors in the documentation directly using the GitHub web interface, as long as the changes are made in the _source_ file. 
This generally means you'll need to edit [roxygen2 comments](https://roxygen2.r-lib.org/articles/roxygen2.html) in an `.R`, not a `.Rd` file. 
You can find the `.R` file that generates the `.Rd` by reading the comment in the first line.

## Bigger changes

If you want to make a bigger change, it's a good idea to first file an issue and make sure someone from the team agrees that it’s needed. 
If you’ve found a bug, please file an issue that illustrates the bug with a minimal 
[reprex](https://www.tidyverse.org/help/#reprex) (this will also help you write a unit test, if needed).
See the tidyverse guide on [how to create a great issue](https://code-review.tidyverse.org/issues/) for more advice.

### Pull request process

*   Fork the package and clone onto your computer. If you haven't done this before, we recommend using `usethis::create_from_github("nicholasjclark/mvgam", fork = TRUE)`.

*   Install all development dependencies with `devtools::install_dev_deps()`, and then make sure the package passes R CMD check by running `devtools::check()`. 
    If R CMD check doesn't pass cleanly, it's a good idea to ask for help before continuing. 
*   Create a Git branch for your pull request (PR). We recommend using `usethis::pr_init("brief-description-of-change")`.

*   Make your changes, commit to git, and then create a PR by running `usethis::pr_push()`, and following the prompts in your browser.
    The title of your PR should briefly describe the change.
    The body of your PR should contain `Fixes #issue-number`.

*  For user-facing changes, add a bullet to the top of `NEWS.md` (i.e. just below the first header). Follow the style described in <https://style.tidyverse.org/news.html>.

### Code style

*   New code should follow the tidyverse [style guide](https://style.tidyverse.org) where possible. 
    You can use the [styler](https://CRAN.R-project.org/package=styler) package to apply these styles, but please don't restyle code that has nothing to do with your PR.  

*  We use [roxygen2](https://cran.r-project.org/package=roxygen2), with [Markdown syntax](https://cran.r-project.org/web/packages/roxygen2/vignettes/rd-formatting.html), for documentation.  

*  We use [testthat](https://cran.r-project.org/package=testthat) for unit tests. 
   Contributions with test cases included are easier to accept.  

## Code of Conduct

Please note that the mvgam project is released with a
[Contributor Code of Conduct](CODE_OF_CONDUCT.md). By contributing to this
project you agree to abide by its terms.

## Roadmap

The mvgam package is in a stable state of development, with some degree of active subsequent development as envisioned by the primary authors.


================================================
FILE: .github/FUNDING.yml
================================================
github: nicholasjclark


================================================
FILE: .github/workflows/R-CMD-check-rstan.yaml
================================================
# Workflow derived from https://github.com/r-lib/actions/tree/master/examples
# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
on:
  push:
    branches: [main, master]
  pull_request:
    branches: [main, master]

name: R-CMD-check-rstan

jobs:
  R-CMD-check:
    runs-on: ${{ matrix.config.os }}

    name: ${{ matrix.config.os }} (${{ matrix.config.r }})

    strategy:
      fail-fast: false
      matrix:
        config:
          - {os: macos-latest,   r: 'release'}
          - {os: windows-latest, r: 'release'}
          # use 4.0 or 4.1 to check with rtools40's older compiler
          - {os: windows-latest, r: 'oldrel-4'}
          - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'}
          - {os: ubuntu-latest, r: 'release'}

# Use a CRAN-like environment to emulate CRAN submission checks
    env:
      GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
      NOT_CRAN: false
      R_KEEP_PKG_SOURCE: yes

    steps:
      - uses: actions/checkout@v4
      - uses: n1hility/cancel-previous-runs@v2
        with:
          token: ${{ secrets.GITHUB_TOKEN }}

      - uses: r-lib/actions/setup-pandoc@v2
      - uses: r-lib/actions/setup-r@v2
        with:
          r-version: ${{ matrix.config.r }}
          rtools-version: ${{ matrix.config.rtools }}
          http-user-agent: ${{ matrix.config.http-user-agent }}
          use-public-rspm: true

      # Install some suggests packages
      - uses: r-lib/actions/setup-r-dependencies@v2
        with:
          dependencies: NA
          extra-packages: |
              BH
              RcppEigen
              knitr
              extraDistr
              lubridate
              wrswoR
              tweedie
              corpcor
              splines2
              ggrepel
              ggpp
              ggarrow
              scoringRules
              matrixStats
              xts
              collapse
              rmarkdown
              ggplot2
              rjags
              coda
              testthat
              usethis
              rcmdcheck

      - name: Ensure install works
        run: |
          install.packages('mvgam', repos = "http://cran.rstudio.com")
        shell: Rscript {0}

      - uses: r-lib/actions/check-r-package@v2
        with:
          build_args: 'c("--no-manual", "--no-build-vignettes")'
          args: 'c("--no-examples", "--no-manual", "--as-cran", "--ignore-vignettes")'


================================================
FILE: .github/workflows/R-CMD-check.yaml
================================================
# Workflow derived from https://github.com/r-lib/actions/tree/master/examples
# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
on:
  push:
    branches: [main, master]
  pull_request:
    branches: [main, master]

name: R-CMD-check

jobs:
  R-CMD-check:
    runs-on: ${{ matrix.config.os }}

    name: ${{ matrix.config.os }} (${{ matrix.config.r }})

    strategy:
      fail-fast: false
      matrix:
        config:
          # Cmdstan isntall not working on win-latest release; check back later
          - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'}
          - {os: ubuntu-latest, r: 'release'}

    env:
      GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
      R_KEEP_PKG_SOURCE: yes

    steps:
      - uses: actions/checkout@v4
      - uses: n1hility/cancel-previous-runs@v2
        with:
          token: ${{ secrets.GITHUB_TOKEN }}

      - uses: r-lib/actions/setup-pandoc@v2
      - uses: r-lib/actions/setup-r@v2
        with:
          r-version: ${{ matrix.config.r }}
          rtools-version: ${{ matrix.config.rtools }}
          http-user-agent: ${{ matrix.config.http-user-agent }}
          use-public-rspm: true

      # As cmdstanr is not yet on CRAN, configure the action to only install the
      # 'Depends' packages by default and then manually specify the 'Suggests'
      # packages that are needed for R CMD CHECK
      - uses: r-lib/actions/setup-r-dependencies@v2
        with:
          dependencies: NA
          extra-packages: |
              knitr
              extraDistr
              lubridate
              wrswoR
              tweedie
              corpcor
              splines2
              scoringRules
              matrixStats
              xts
              collapse
              rmarkdown
              ggplot2
              rjags
              ggrepel
              ggpp
              ggarrow
              coda
              stan-dev/cmdstanr
              testthat
              usethis
              rcmdcheck
              devtools

      - name: Build Cmdstan
        run: |
          install.packages('mvgam', repos = "http://cran.rstudio.com")
          cmdstanr::check_cmdstan_toolchain(fix = TRUE)
          cmdstanr::install_cmdstan()
        shell: Rscript {0}

      - name: Install colorspace manually
        run: |
          install.packages("colorspace", repos = "https://cran.rstudio.com/")
        shell: Rscript {0}
        
      - uses: r-lib/actions/check-r-package@v2
        with:
          build_args: 'c("--no-manual", "--no-build-vignettes")'
          args: 'c("--no-manual", "--as-cran", "--ignore-vignettes")'

      - name: Run dontrun examples
        run: |
          devtools::run_examples(run_dontrun = TRUE, fresh = FALSE)
        shell: Rscript {0}


================================================
FILE: .github/workflows/memcheck.yaml
================================================
# Workflow derived from https://github.com/r-lib/actions/tree/master/examples
# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
on:
  push:
    branches: [main, master]
  pull_request:
    branches: [main, master]

name: memcheck

jobs:
  memcheck:
    runs-on: ubuntu-latest
    env:
      GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
      NOT_CRAN: false

    steps:
      - uses: actions/checkout@v2
      - uses: n1hility/cancel-previous-runs@v2
        with:
          token: ${{ secrets.GITHUB_TOKEN }}

      - uses: actions/checkout@v2
      - name: apt install dependency
        run: |
          sudo apt-get update
          sudo apt-get -y install valgrind

      - uses: r-lib/actions/setup-r@v2
        with:
          use-public-rspm: true

      - uses: r-lib/actions/setup-r-dependencies@v2
        with:
          dependencies: NA
          extra-packages: |
              devtools
              testthat

      - name: Memory check
        run: |
          R -d valgrind -f memcheck.R

      - name: Configure GH
        run: |
          git config --global user.name "$GITHUB_ACTOR"
          git config --global user.email "$GITHUB_ACTOR@users.noreply.github.com"


      - name: Install Air latest
        shell: bash
        run: |
          curl -LsSf https://github.com/posit-dev/air/releases/latest/download/air-installer.sh | sh

      - name: Air version
        shell: bash
        run: |
          echo ""
          echo "Formatting R code with $(air --version)"
          echo ""

      - name: Format R code using Air
        shell: bash
        run: air format .

      - name: Commit any Air formatting changes
        shell: bash
        run: |
          if find . -type f \( -name '*.r' -o -name '*.R' \) -exec git add -u {} +; then
            echo "Staged modified R files"
            git commit -a -m '`air format` (GitHub Actions)'
            git push
          else
            echo "No changes found in any R files"
          fi


================================================
FILE: .github/workflows/pkgdown.yaml
================================================
# Workflow derived from https://github.com/r-lib/actions/tree/master/examples
# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
on:
  push:
    branches: [main, master]
  pull_request:
    branches: [main, master]
  release:
    types: [published]
  workflow_dispatch:

name: pkgdown

jobs:
  pkgdown:
    runs-on: ubuntu-latest
    # Only restrict concurrency for non-PR jobs
    concurrency:
      group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }}

    env:
      GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
      R_KEEP_PKG_SOURCE: yes

    steps:
      - uses: actions/checkout@v3

      - uses: r-lib/actions/setup-pandoc@v2
      - uses: r-lib/actions/setup-r@v2
        with:
          use-public-rspm: true

      - uses: r-lib/actions/setup-r-dependencies@v2
        with:
          dependencies: NA
          extra-packages: |
              local::.
              remotes
              knitr
              extraDistr
              lubridate
              gratia
              wrswoR
              tweedie
              ggrepel
              ggpp
              ggarrow
              corpcor
              splines2
              scoringRules
              matrixStats
              xts
              collapse
              rmarkdown
              stan-dev/cmdstanr
              usethis

      - name: Build Cmdstan and install development brms version
        run: |
          cmdstanr::check_cmdstan_toolchain(fix = TRUE)
          cmdstanr::install_cmdstan()
          remotes::install_github('paul-buerkner/brms')
          remotes::install_version("pkgdown", version = "2.0.9")
        shell: Rscript {0}

      - name: Build site
        run: pkgdown::build_site_github_pages(lazy = TRUE, run_dont_run = TRUE, new_process = FALSE, install = FALSE)
        shell: Rscript {0}

      - name: Deploy to GitHub pages 🚀
        if: github.event_name != 'pull_request'
        uses: JamesIves/github-pages-deploy-action@v4.4.1
        with:
          clean: false
          branch: gh-pages
          folder: docs


================================================
FILE: .github/workflows/readme.yaml
================================================
# Workflow derived from https://github.com/r-lib/actions/tree/master/examples
# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
on:
  push:
    branches: master

name: render-rmarkdown

jobs:
  render-rmarkdown:
    runs-on: ubuntu-latest
    env:
      GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
    steps:
      - name: Checkout repo
        uses: actions/checkout@v4
        with:
          fetch-depth: 0

      - uses: r-lib/actions/setup-pandoc@v2

      - uses: r-lib/actions/setup-r@v2
        with:
          use-public-rspm: true

      - uses: r-lib/actions/setup-r-dependencies@v2
        with:
          extra-packages: |
            rmarkdown
            knitr
            gratia
            patchwork
            remotes
            splines2
            extraDistr
            stan-dev/cmdstanr

      - name: Install mvgam
        run: Rscript -e 'remotes::install_github("nicholasjclark/mvgam", upgrade_dependencies = FALSE)'

      - name: Build Cmdstan
        run: |
          cmdstanr::check_cmdstan_toolchain(fix = TRUE)
          cmdstanr::install_cmdstan()
        shell: Rscript {0}

      - name: Render README
        run: Rscript -e 'rmarkdown::render("README.Rmd", output_format = "md_document")'

      - name: Render pkgdown index
        run: Rscript -e 'rmarkdown::render("index.Rmd", output_format = "md_document")'

      - name: Commit results
        run: |
          git config --local user.name "$GITHUB_ACTOR"
          git config --local user.email "$GITHUB_ACTOR@users.noreply.github.com"
          git commit README.md index.md man/figures/README*.png -m 'Re-build README.Rmd' || echo "No changes to commit"
          git push origin || echo "No changes to commit"


================================================
FILE: .gitignore
================================================
*.Rproj*
.Rhistory
.RData
.Ruserdata
.Rprofile
Meta
.Rproj.user
/Meta/
desktop.ini
^cran-comments\.md$
^src\.gcda$
claude.exe


================================================
FILE: CLAUDE.md
================================================
# CLAUDE.md

This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.

## Package Overview

mvgam is an R package for fitting Multivariate Dynamic Generalized Additive Models.The package enables Bayesian forecasting and analysis of multivariate time series data using flexible GAM frameworks. It can handle various data types (counts, proportions, continuous values) with complex temporal dynamics, missing data, and seasonality, building custom Stan models that provide robust Bayesian inference.

## Development Commands

### Testing
- `R CMD check` - Full package check (used in CI)
- `testthat::test_check("mvgam")` - Run all tests via testthat
- `devtools::test()` - Run tests interactively during development

### Building and Documentation
- `devtools::document()` - Generate documentation from roxygen2 comments
- `pkgdown::build_site()` - Build package website
- `devtools::build()` - Build package tarball
- `devtools::install()` - Install package locally for development

### Package Structure
- Uses standard R package structure with DESCRIPTION, NAMESPACE, and man/ directories
- Source code organized in `R/` directory with provider-specific files
- Vignettes in vignettes/ directory demonstrate key features
- Tests in `tests/testthat/`

## Architecture

### Key Design Patterns

**S3 Type System**: Uses S3 for structured objects
- Maintains compatibility with R's statistical modeling ecosystem
- Supports method inheritance and specialization

**Layered Architecture Pattern**: Uses clear separation of concerns across multiple layers:
- Interface Layer: User-facing functions (mvgam(), forecast(), plot()) provide clean APIs
- Model Specification Layer: Formula processing, trend model constructors (RW(), VAR(), GP()), family definitions
- Code Generation Layer: Translates R specifications into Stan/JAGS model code
- Computational Backend Layer: Interfaces with Stan/JAGS for MCMC sampling
- Post-processing Layer: Methods for analysis, diagnostics, and visualization

**Modular Component System**: Modular design where different components can be mixed and matched:
- Trend Modules: Independent implementations of different temporal dynamics (Random Walk, AR, VAR, Gaussian Process, CAR)
- Family Modules: Separate observation model implementations for different distributions
- Backend Modules: Pluggable computational backends (Stan via rstan/cmdstanr, JAGS)
- Visualization Modules: Modular plotting system with specialized functions for different aspects

**Bayesian Workflow Integration Pattern**: Designed around the complete Bayesian modeling workflow:
- Model Building: Formula specification, prior setup, trend model selection
- Fitting: MCMC sampling with convergence monitoring
- Checking: Posterior predictive checks, residual analysis, diagnostic plots
- Inference: Parameter summarization, uncertainty quantification
- Prediction: Forecasting with proper uncertainty propagation
- Evaluation: Cross-validation, scoring rules, model comparison

## Key Files

### Core Model Functions
- `R/mvgam.R` - Main model fitting function that:
  - Validates and processes GAM formulas for observation and trend processes
  - Sets up Stan/JAGS model code generation
  - Runs MCMC sampling and returns fitted model objects

- Trend model constructors in `R/mvgam_trend_types.R` (`RW()`, `AR()`, `VAR()`, `GP()`, `CAR()`):
  - Define temporal dynamics specifications
  - Configure stationarity constraints and correlation structures

### Prediction & Forecasting
- `R/forecast.mvgam.R` - Generates in-sample and out-of-sample forecasts:
  - Respects temporal dynamics for proper time series forecasting
  - Supports multiple prediction types (response, trend, link)
  - Returns structured forecast objects with uncertainty quantification

- `R/predict.mvgam.R` - General prediction treating trends as random effects
  
### Visualization Suite
- `R/plot.mvgam.R` - Main plotting method with multiple types:
  - Series plots, residual diagnostics, smooth functions, forecasts
  - Calls specialized functions: `plot_mvgam_forecasts()`, `plot_mvgam_series()`, `plot_mvgam_trend()`

### Model Analysis
- `R/summary.mvgam.R` - Parameter estimates and convergence diagnostics
- `R/ppc.mvgam.R` - Posterior predictive checks using bayesplot
- `R/residuals.mvgam.R` - Dunn-Smyth residuals for model checking
- `R/loo.mvgam.R` - Approximate leave-one-out cross-validation

### Family Support
- Extensive distribution families in `R/families.R`:
  - Standard: gaussian, poisson, binomial, Gamma
  - Extended: negative binomial, beta, Student-t, Tweedie
  - Special: N-mixture models for imperfect detection
  
### Testing and Quality
- `tests/testthat/` - Test suite
- `vignettes/` - Documentation and examples
- `.github/workflows/` - CI/CD with R CMD check, pkgdown building and valgrind check

## Development Notes

### Testing Strategy
- Separate test files for each major component
- Prioritize internal mvgam objects (i.e. `mvgam:::mvgam_example1`) for testing

### File Management
- Specification documents (`*-requirements.md`, `*-design.md`, `*-implementation.md`) should be automatically added to `.Rbuildignore`
- Any temporary development files should be excluded from package builds
- When creating new specification files, always update `.Rbuildignore` to prevent inclusion in built package

### Code Organization
- Provider files should follow consistent naming pattern
- Utility functions should be grouped by purpose (`utils-*.R`)
- Standalone imports should minimize external dependencies

### Documentation
- Roxygen2 comments for all exported functions
- tidyverse styling (https://style.tidyverse.org/) for all R and roxygen code
- Vignettes demonstrate in-depth use cases
- pkgdown site provides comprehensive documentation
- Examples demonstrate simpler use cases


================================================
FILE: CRAN-SUBMISSION
================================================
Version: 1.1.0
Date: 2024-04-18 23:09:30 UTC
SHA: 3d852f1f92b4d6d10ed64dc212fd6b0ebf933bca


================================================
FILE: DESCRIPTION
================================================
Package: mvgam
Title: Multivariate (Dynamic) Generalized Additive Models
Version: 1.1.595
Date: 2026-01-19
Authors@R: c(person("Nicholas J", "Clark", email = "nicholas.j.clark1214@gmail.com", 
                    role = c("aut", "cre"), comment = c(ORCID = "0000-0001-7131-3301")),
             person("KANK", "Karunarathna", role = c("ctb"),
                    comment = c("ARMA parameterisations and factor models", ORCID = "0000-0002-8995-5502")),
             person("Sarah", "Heaps", role = c("ctb"),
                    comment = c("VARMA parameterisations", ORCID = "0000-0002-5543-037X")),
             person("Scott", "Pease", role = c("ctb"),
                    comment = c("broom enhancements", ORCID = "0009-0006-8977-9285")),
             person("Matthijs", "Hollanders", role = c("ctb"),
                    comment = c("ggplot visualizations", ORCID = "0000-0003-0796-1018")))
Description: Fit Bayesian Dynamic Generalized Additive Models to multivariate observations. Users can build nonlinear State-Space models that can incorporate semiparametric effects in observation and process components, using a wide range of observation families. Estimation is performed using Markov Chain Monte Carlo with Hamiltonian Monte Carlo in the software 'Stan'. References: Clark & Wells (2023) <doi:10.1111/2041-210X.13974>.
URL: https://github.com/nicholasjclark/mvgam, https://nicholasjclark.github.io/mvgam/
BugReports: https://github.com/nicholasjclark/mvgam/issues
License: MIT + file LICENSE
Depends: 
    R (>= 3.6.0)
Imports: 
    brms (>= 2.21.0),
    methods,
    mgcv (>= 1.8-13),
    insight (>= 0.19.1),
    marginaleffects (>= 0.29.0),
    Rcpp (>= 0.12.0),
    rstan (>= 2.29.0),
    posterior (>= 1.0.0),
    loo (>= 2.3.1),
    rstantools (>= 2.1.1),
    bayesplot (>= 1.5.0),
    ggplot2 (>= 3.5.0),
    mvnfast,
    purrr,
    dplyr,
    magrittr,
    rlang,
    generics,
    tibble (>= 3.0.0),
    patchwork (>= 1.2.0)
Encoding: UTF-8
LazyData: true
Roxygen: list(markdown = TRUE)
RoxygenNote: 7.3.2
Suggests: 
    scoringRules,
    matrixStats,
    cmdstanr (>= 0.5.0),
    tweedie,
    splines2,
    extraDistr,
    corpcor,
    wrswoR,
    ggrepel,
    ggpp,
    ggarrow,
    xts,
    lubridate,
    knitr, 
    collapse,
    rmarkdown, 
    rjags,
    coda,
    runjags,
    usethis,
    testthat,
    colorspace
Enhances:
    gratia (>= 0.9.0),
    tidyr
Additional_repositories: https://mc-stan.org/r-packages/
LinkingTo: Rcpp, RcppArmadillo
VignetteBuilder: knitr


================================================
FILE: LICENSE
================================================
YEAR: 2021
COPYRIGHT HOLDER: Nicholas Clark


================================================
FILE: LICENSE.md
================================================
# MIT License

Copyright (c) 2021 Nicholas Clark

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: NAMESPACE
================================================
# Generated by roxygen2: do not edit by hand

S3method(Predict.matrix,mod.smooth)
S3method(Predict.matrix,moi.smooth)
S3method(add_residuals,mvgam)
S3method(as.array,mvgam)
S3method(as.data.frame,mvgam)
S3method(as.matrix,mvgam)
S3method(as_draws,mvgam)
S3method(as_draws_array,mvgam)
S3method(as_draws_df,mvgam)
S3method(as_draws_list,mvgam)
S3method(as_draws_matrix,mvgam)
S3method(as_draws_rvars,mvgam)
S3method(augment,mvgam)
S3method(coef,mvgam)
S3method(conditional_effects,mvgam)
S3method(ensemble,mvgam_forecast)
S3method(fevd,mvgam)
S3method(find_predictors,mvgam)
S3method(find_predictors,mvgam_prefit)
S3method(fitted,mvgam)
S3method(forecast,mvgam)
S3method(formula,mvgam)
S3method(formula,mvgam_prefit)
S3method(get_coef,mvgam)
S3method(get_data,mvgam)
S3method(get_data,mvgam_prefit)
S3method(get_predict,mvgam)
S3method(get_vcov,mvgam)
S3method(hindcast,mvgam)
S3method(how_to_cite,mvgam)
S3method(irf,mvgam)
S3method(lfo_cv,mvgam)
S3method(logLik,mvgam)
S3method(log_posterior,mvgam)
S3method(loo,mvgam)
S3method(loo_compare,mvgam)
S3method(mcmc_plot,mvgam)
S3method(model.frame,mvgam)
S3method(model.frame,mvgam_prefit)
S3method(neff_ratio,mvgam)
S3method(nuts_params,mvgam)
S3method(ordinate,jsdgam)
S3method(pairs,mvgam)
S3method(plot,mvgam)
S3method(plot,mvgam_conditional_effects)
S3method(plot,mvgam_fevd)
S3method(plot,mvgam_forecast)
S3method(plot,mvgam_irf)
S3method(plot,mvgam_lfo)
S3method(plot,mvgam_residcor)
S3method(posterior_epred,mvgam)
S3method(posterior_linpred,mvgam)
S3method(posterior_predict,mvgam)
S3method(pp_check,mvgam)
S3method(ppc,mvgam)
S3method(predict,mvgam)
S3method(print,how_to_cite)
S3method(print,mvgam)
S3method(print,mvgam_conditional_effects)
S3method(print,mvgam_prefit)
S3method(print,mvgam_summary)
S3method(print,mvgammodel)
S3method(residual_cor,jsdgam)
S3method(residual_cor,mvgam)
S3method(residuals,mvgam)
S3method(rhat,mvgam)
S3method(score,mvgam_forecast)
S3method(set_coef,mvgam)
S3method(smooth.construct,mod.smooth.spec)
S3method(smooth.construct,moi.smooth.spec)
S3method(stability,mvgam)
S3method(stancode,mvgam)
S3method(stancode,mvgam_prefit)
S3method(standata,mvgam_prefit)
S3method(summary,mvgam)
S3method(summary,mvgam_fevd)
S3method(summary,mvgam_forecast)
S3method(summary,mvgam_irf)
S3method(summary,mvgam_prefit)
S3method(tidy,mvgam)
S3method(update,jsdgam)
S3method(update,mvgam)
S3method(variables,mvgam)
export("%>%")
export(AR)
export(CAR)
export(GP)
export(PW)
export(RW)
export(VAR)
export(ZMVN)
export(add_residuals)
export(as_draws)
export(as_draws_array)
export(as_draws_df)
export(as_draws_list)
export(as_draws_matrix)
export(as_draws_rvars)
export(augment)
export(avg_predictions)
export(bernoulli)
export(beta_binomial)
export(betar)
export(code)
export(compare_mvgams)
export(comparisons)
export(conditional_effects)
export(datagrid)
export(drawDotmvgam)
export(dynamic)
export(ensemble)
export(eval_mvgam)
export(eval_smoothDothilbertDotsmooth)
export(eval_smoothDotmodDotsmooth)
export(eval_smoothDotmoiDotsmooth)
export(fevd)
export(forecast)
export(get_data)
export(get_mvgam_priors)
export(get_predict)
export(gp)
export(hindcast)
export(how_to_cite)
export(hypotheses)
export(irf)
export(jsdgam)
export(lfo_cv)
export(lognormal)
export(loo)
export(loo_compare)
export(lv_correlations)
export(mcmc_plot)
export(mvgam)
export(nb)
export(neff_ratio)
export(nmix)
export(nuts_params)
export(ordinate)
export(plot_comparisons)
export(plot_mvgam_factors)
export(plot_mvgam_fc)
export(plot_mvgam_pterms)
export(plot_mvgam_randomeffects)
export(plot_mvgam_resids)
export(plot_mvgam_series)
export(plot_mvgam_smooth)
export(plot_mvgam_trend)
export(plot_mvgam_uncertainty)
export(plot_predictions)
export(plot_slopes)
export(posterior_epred)
export(posterior_linpred)
export(posterior_predict)
export(pp_check)
export(ppc)
export(predictions)
export(prior)
export(prior_)
export(prior_string)
export(residual_cor)
export(rhat)
export(roll_eval_mvgam)
export(s)
export(score)
export(series_to_mvgam)
export(set_prior)
export(sim_mvgam)
export(slopes)
export(stability)
export(stancode)
export(standata)
export(student)
export(student_t)
export(t2)
export(te)
export(ti)
export(tidy)
export(tweedie)
export(variables)
importFrom(Rcpp,evalCpp)
importFrom(bayesplot,color_scheme_get)
importFrom(bayesplot,color_scheme_set)
importFrom(bayesplot,log_posterior)
importFrom(bayesplot,neff_ratio)
importFrom(bayesplot,nuts_params)
importFrom(bayesplot,pp_check)
importFrom(brms,bernoulli)
importFrom(brms,beta_binomial)
importFrom(brms,brm)
importFrom(brms,brmsterms)
importFrom(brms,conditional_effects)
importFrom(brms,dbeta_binomial)
importFrom(brms,do_call)
importFrom(brms,dstudent_t)
importFrom(brms,get_prior)
importFrom(brms,gp)
importFrom(brms,logm1)
importFrom(brms,lognormal)
importFrom(brms,mcmc_plot)
importFrom(brms,ndraws)
importFrom(brms,pbeta_binomial)
importFrom(brms,prior)
importFrom(brms,prior_)
importFrom(brms,prior_string)
importFrom(brms,pstudent_t)
importFrom(brms,qstudent_t)
importFrom(brms,rbeta_binomial)
importFrom(brms,read_csv_as_stanfit)
importFrom(brms,rstudent_t)
importFrom(brms,set_prior)
importFrom(brms,stancode)
importFrom(brms,standata)
importFrom(brms,student)
importFrom(generics,augment)
importFrom(generics,forecast)
importFrom(generics,tidy)
importFrom(ggplot2,aes)
importFrom(ggplot2,facet_wrap)
importFrom(ggplot2,geom_bar)
importFrom(ggplot2,ggplot)
importFrom(ggplot2,labs)
importFrom(ggplot2,scale_colour_discrete)
importFrom(ggplot2,scale_fill_discrete)
importFrom(ggplot2,theme_classic)
importFrom(grDevices,devAskNewPage)
importFrom(grDevices,hcl.colors)
importFrom(grDevices,rgb)
importFrom(graphics,abline)
importFrom(graphics,axis)
importFrom(graphics,barplot)
importFrom(graphics,box)
importFrom(graphics,boxplot)
importFrom(graphics,bxp)
importFrom(graphics,hist)
importFrom(graphics,layout)
importFrom(graphics,legend)
importFrom(graphics,lines)
importFrom(graphics,par)
importFrom(graphics,plot)
importFrom(graphics,points)
importFrom(graphics,polygon)
importFrom(graphics,rect)
importFrom(graphics,rug)
importFrom(graphics,title)
importFrom(grid,arrow)
importFrom(grid,unit)
importFrom(insight,find_predictors)
importFrom(insight,get_data)
importFrom(insight,get_predictors)
importFrom(loo,is.loo)
importFrom(loo,loo)
importFrom(loo,loo_compare)
importFrom(magrittr,"%>%")
importFrom(marginaleffects,avg_predictions)
importFrom(marginaleffects,comparisons)
importFrom(marginaleffects,datagrid)
importFrom(marginaleffects,get_coef)
importFrom(marginaleffects,get_predict)
importFrom(marginaleffects,get_vcov)
importFrom(marginaleffects,hypotheses)
importFrom(marginaleffects,plot_comparisons)
importFrom(marginaleffects,plot_predictions)
importFrom(marginaleffects,plot_slopes)
importFrom(marginaleffects,predictions)
importFrom(marginaleffects,set_coef)
importFrom(marginaleffects,slopes)
importFrom(methods,cbind2)
importFrom(mgcv,Predict.matrix)
importFrom(mgcv,Rrank)
importFrom(mgcv,bam)
importFrom(mgcv,betar)
importFrom(mgcv,gam.control)
importFrom(mgcv,gam.side)
importFrom(mgcv,get.var)
importFrom(mgcv,initial.sp)
importFrom(mgcv,interpret.gam)
importFrom(mgcv,nb)
importFrom(mgcv,s)
importFrom(mgcv,smooth.construct)
importFrom(mgcv,smoothCon)
importFrom(mgcv,t2)
importFrom(mgcv,te)
importFrom(mgcv,ti)
importFrom(parallel,clusterExport)
importFrom(parallel,setDefaultCluster)
importFrom(parallel,stopCluster)
importFrom(posterior,as_draws)
importFrom(posterior,as_draws_array)
importFrom(posterior,as_draws_df)
importFrom(posterior,as_draws_list)
importFrom(posterior,as_draws_matrix)
importFrom(posterior,as_draws_rvars)
importFrom(posterior,rhat)
importFrom(posterior,variables)
importFrom(rlang,missing_arg)
importFrom(rlang,parse_expr)
importFrom(rlang,warn)
importFrom(rstantools,posterior_epred)
importFrom(rstantools,posterior_linpred)
importFrom(rstantools,posterior_predict)
importFrom(stats,.getXlevels)
importFrom(stats,Gamma)
importFrom(stats,acf)
importFrom(stats,as.dist)
importFrom(stats,as.formula)
importFrom(stats,binomial)
importFrom(stats,coef)
importFrom(stats,complete.cases)
importFrom(stats,cor)
importFrom(stats,cov)
importFrom(stats,cov2cor)
importFrom(stats,dbeta)
importFrom(stats,dbinom)
importFrom(stats,density)
importFrom(stats,dgamma)
importFrom(stats,dlnorm)
importFrom(stats,dnbinom)
importFrom(stats,dnorm)
importFrom(stats,dpois)
importFrom(stats,drop.terms)
importFrom(stats,ecdf)
importFrom(stats,fitted)
importFrom(stats,formula)
importFrom(stats,frequency)
importFrom(stats,gaussian)
importFrom(stats,hclust)
importFrom(stats,is.ts)
importFrom(stats,lag)
importFrom(stats,lm)
importFrom(stats,logLik)
importFrom(stats,mad)
importFrom(stats,make.link)
importFrom(stats,median)
importFrom(stats,model.frame)
importFrom(stats,model.matrix)
importFrom(stats,model.offset)
importFrom(stats,na.fail)
importFrom(stats,na.omit)
importFrom(stats,na.pass)
importFrom(stats,pacf)
importFrom(stats,pbeta)
importFrom(stats,pbinom)
importFrom(stats,pgamma)
importFrom(stats,plnorm)
importFrom(stats,plogis)
importFrom(stats,pnorm)
importFrom(stats,poisson)
importFrom(stats,ppois)
importFrom(stats,predict)
importFrom(stats,printCoefmat)
importFrom(stats,qbinom)
importFrom(stats,qcauchy)
importFrom(stats,qlogis)
importFrom(stats,qnorm)
importFrom(stats,qqline)
importFrom(stats,qqnorm)
importFrom(stats,quantile)
importFrom(stats,rbeta)
importFrom(stats,rbinom)
importFrom(stats,reformulate)
importFrom(stats,residuals)
importFrom(stats,rgamma)
importFrom(stats,rlnorm)
importFrom(stats,rnbinom)
importFrom(stats,rnorm)
importFrom(stats,rpois)
importFrom(stats,runif)
importFrom(stats,sd)
importFrom(stats,setNames)
importFrom(stats,start)
importFrom(stats,terms)
importFrom(stats,terms.formula)
importFrom(stats,time)
importFrom(stats,ts)
importFrom(stats,update)
importFrom(stats,update.formula)
importFrom(utils,getFromNamespace)
importFrom(utils,head)
importFrom(utils,lsf.str)
importFrom(utils,tail)
useDynLib(mvgam, .registration = TRUE)


================================================
FILE: NEWS.md
================================================
# mvgam 1.1.595

## New functionalities
* Restructured `summary.mvgam()` to now return an object of class `mvgam_summary` that can be re-used for later purposes, or that can be printed with `print.mvgam_summary()` (#119)
* Added a new function `ordinate.jsdgam()` to plot two-dimensional ordinations of site and species scores from latent factor models estimated in `jsdgam()`
* `residual_cor()` now supports models fitted with `mvgam()` in which latent factors were used or in which correlated dynamic processes were used
* Added a `summary.mvgam_forecast()` function to compute and return prediction intervals of posterior hindcasts and forecasts in a `data.frame` format. This will make it easier for users to create their own custom plots of hindcast and forecast distributions (#108)
* Added a [`mvgam_use_cases`](https://nicholasjclark.github.io/mvgam/reference/mvgam_use_cases.html) help file to provide links to online resources that discuss how to use 'mvgam' in practice

## Changing defaults
* The `forecast()` method is now imported from 'generics' to help avoid conflict issues with other forecasting packages
* Deprecated the `incl_dynamics` argument in the `loo()` and `loo_compare()` functions to ensure better consistency in log-likelihood and resulting LOO estimates from models with different observation families
* Changed default `type` in `conditional_effects()` to `expected` to match behaviour of 'brms'

## Bug fixes
* Bug fix to ensure forecast scores are properly computed when plotting objects of class `mvgam_forecast` if only a single out-of-sample observation was included in `newdata` (#111)
* Bug fix to ensure offsets supplied with `offset(...)` in formulae are correctly incorporated when using `gp()` terms
* Bug fix to ensure piecewise trends are correctly predicted when using `process_error = TRUE` in `predict()`
* Bug fix to ensure variance of continuous time autoregressive processes (using `CAR()`) scales appropriately with time lags (#107)
* Bug fix to ensure `summary.mvgam()` uses the correct `max_treedepth` value when checking Stan diagnostics, rather than always assuming the default of 10 (thanks to @StefanoMezzini for reporting)
* Bug fix to ensure `NA` residual values are handled properly when plotting residuals (this occurs because response values are allowed to be missing; (thanks to @StefanoMezzini for reporting)

## Deprecations
* Altered the structure of objects of class `mvgam_forecast` so that the `train_times` and `test_times` slots now contain lists of length `n_series`. This allows for continuous time data to be better handled, where some series may have been sampled at different timepoints

# mvgam 1.1.51

## New functionalities
* Changed default priors for scale parameters (i.e. process errors `"sigma"` and observation errors `"sigma_obs"`) to inverse gammas to provide more sensible prior regularisation away from zero
* Improved messaging in `summary()` for better guidance on how to investigate poor HMC sampler behaviours
* Converted several more plotting functions to return `ggplot` objects in place of base R plots for broader customisation
* Added four new `type`s to the `pp_check()` function to allow more targeted investigations of randomized quantile residual distributions
* Added a `plot.mvgam_residcor()` function for nicer plotting of estimated residual correlations from `jsdgam` objects 
* Added `summary()` functions to calculate useful posterior summaries from objects of class `mvgam_irf` and `mvgam_fevd` (see `?irf` and `?fevd` for examples)
* Improved efficiency of `nmix()` models with some slight restructuring of the model objects (#102)

## Bug fixes
* Bug fix to ensure piecewise trends are extrapolated the correct number of timepoints when forecasting using the `forecast()` function

# mvgam 1.1.4
## New functionalities
* Added the `how_to_cite.mvgam()` function to generate a scaffold methods description of fitted models, which can hopefully make it easier for users to fully describe their programming environment 
* Improved various plotting functions by returning `ggplot` objects in place of base plots (thanks to @mhollanders #38)
* Added the brier score (`score = 'brier'`) as an option in `score.mvgam_forecast()` for scoring forecasts of binary variables when using `family = bernoulli()` (#80)
* Added `augment()` function to add residuals and fitted values to an mvgam object's observed data (thanks to @swpease #83)
* Added support for approximate `gp()` effects with more than one covariate and with different kernel functions (#79) 
* Added function `jsdgam()` to estimate Joint Species Distribution Models in which both the latent factors and the observation model components can include any of mvgam's complex linear predictor effects. Also added a function `residual_cor()` to compute residual correlation, covariance and precision matrices from `jsdgam` models. See `?mvgam::jsdgam` and `?mvgam::residual_cor` for details
* Added a `stability.mvgam()` method to compute stability metrics from models fit with Vector Autoregressive dynamics (#21 and #76)
* Added functionality to estimate hierarchical error correlations when using multivariate latent process models and when the data are nested among levels of a relevant grouping factor (#75); see `?mvgam::AR` for an example
* Added `ZMVN()` error models for estimating Zero-Mean Multivariate Normal errors; convenient for working with non time-series data where latent residuals are expected to be correlated (such as when fitting Joint Species Distribution Models); see `?mvgam::ZMVN` for examples
* Added a `fevd.mvgam()` method to compute forecast error variance decompositions from models fit with Vector Autoregressive dynamics (#21 and #76)

## Deprecations
* Arguments `use_stan`, `jags_path`, `data_train`, `data_test`, `adapt_delta`, `max_treedepth` and `drift` have been removed from primary functions to streamline documentation and reflect the package's mission to deprecate 'JAGS' as a suitable backend. Both `adapt_delta` and `max_treedepth` should now be supplied in a named `list()` to the new argument `control`

## Bug fixes
* Bug fix to ensure `marginaleffects::comparisons` functions appropriately recognise internal `rowid` variables
* Updates to ensure `ensemble` provides appropriate weighting of forecast draws (#98)
* Not necessarily a "bug fix", but this update removes several dependencies to lighten installation and improve efficiency of the workflow (#93)
* Fixed a minor bug in the way `trend_map` recognises levels of the `series` factor
* Bug fix to ensure `lfo_cv` recognises the actual times in `time`, just in case the user supplies data that doesn't start at `t = 1`. Also updated documentation to better reflect this
* Bug fix to ensure `update.mvgam` captures any `knots` or `trend_knots` arguments that were passed to the original model call

# mvgam 1.1.3
## New functionalities
* Allow intercepts to be included in process models when `trend_formula` is supplied. This breaks the assumption that the process has to be zero-centred, adding more modelling flexibility but also potentially inducing nonidentifiabilities with respect to any observation model intercepts. Thoughtful priors are a must for these models
* Added `standata.mvgam_prefit`, `stancode.mvgam` and `stancode.mvgam_prefit` methods for better alignment with 'brms' workflows
* Added 'gratia' to *Enhancements* to allow popular methods such as `draw()` to be used for 'mvgam' models if 'gratia' is already installed
* Added an `ensemble.mvgam_forecast()` method to generate evenly weighted combinations of probabilistic forecast distributions
* Added an `irf.mvgam()` method to compute Generalized and Orthogonalized Impulse Response Functions (IRFs) from models fit with Vector Autoregressive dynamics

## Deprecations
* The `drift` argument has been deprecated. It is now recommended for users to include parametric fixed effects of "time" in their respective GAM formulae to capture any expected drift effects

## Bug fixes
* Added a new check to ensure that exception messages are only suppressed by the `silent` argument if the user's version of 'cmdstanr' is adequate
* Updated dependency for 'brms' to version >= '2.21.0' so that `read_csv_as_stanfit` can be imported, which should future-proof the conversion of 'cmdstanr' models to `stanfit` objects (#70)

# mvgam 1.1.2
## New functionalities
* Added options for silencing some of the 'Stan' compiler and modeling messages using the `silent` argument in `mvgam()`
* Moved a number of packages from 'Depends' to 'Imports' for simpler package loading and fewer potential masking conflicts
* Improved efficiency of the model initialisation by tweaking parameters of the underlying 'mgcv' `gam` object's convergence criteria, resulting in much faster model setups
* Added an option to use `trend_model = 'None'` in State-Space models, increasing flexibility by ensuring the process error evolves as white noise (#51)
* Added an option to use the non-centred parameterisation for some autoregressive trend models,
which speeds up mixing most of the time
* Updated support for multithreading so that all observation families (apart from `nmix()`) can now be modeled with multiple threads
* Changed default priors on autoregressive coefficients (AR1, AR2, AR3) to enforce
stationarity, which is a much more sensible prior in the majority of contexts

## Bug fixes
* Fixed a small bug that prevented `conditional_effects.mvgam()` from handling effects with three-way interactions

# mvgam 1.1.1
## New functionalities
* Changed indexing of an internal c++ function after Prof Brian Ripley’s   
  email: Dear maintainer, Please see the problems shown on 
  https://cran.r-project.org/web/checks/check_results_mvgam.html. Please correct   before 2024-05-22 to safely retain your package on CRAN. The CRAN Team
  
# mvgam 1.1.0
* First release of `mvgam` to CRAN


================================================
FILE: R/RcppExports.R
================================================
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393

#' @noRd
ar3_recursC <- function(drift, ar1, ar2, ar3, linpreds, errors, last_trends, h) {
    .Call(`_mvgam_ar3_recursC`, drift, ar1, ar2, ar3, linpreds, errors, last_trends, h)
}

#' @noRd
var1_recursC <- function(A, linpreds, errors, drift, last_trends, h) {
    .Call(`_mvgam_var1_recursC`, A, linpreds, errors, drift, last_trends, h)
}

#' @noRd
varma_recursC <- function(A, A2, A3, theta, linpreds, errors, drift, last_trends, h) {
    .Call(`_mvgam_varma_recursC`, A, A2, A3, theta, linpreds, errors, drift, last_trends, h)
}



================================================
FILE: R/add_MACor.R
================================================
#' Function to add moving average processes and/or
#' correlated process errors to an existing Stan model file

#' When adding MA for univariate trends, 'error' needs to take same form
#' as trend / LV (array[n] vector[n_lv]) so it can be
#' extracted in the same way
#' @noRd
add_MaCor = function(
  model_file,
  model_data,
  data_train,
  data_test,
  add_ma = FALSE,
  add_cor = FALSE,
  trend_model = 'VAR1',
  drift = FALSE
) {
  if (inherits(trend_model, 'mvgam_trend')) {
    trend_char <- ma_cor_additions(validate_trend_model(
      trend_model
    ))$trend_model
  } else {
    trend_char <- trend_model
  }

  if (trend_char == 'ZMVN') {
    # Update transformed data
    if (any(grepl('[n_lv] sigma;', model_file, fixed = TRUE))) {} else {
      if (any(grepl('transformed data {', model_file, fixed = TRUE))) {
        model_file[grep('transformed data {', model_file, fixed = TRUE)] <-
          paste0(
            'transformed data {\n',
            'vector[n_series] trend_zeros = rep_vector(0.0, n_series);'
          )
      } else {
        model_file[grep('parameters {', model_file, fixed = TRUE)[1]] <-
          paste0(
            'transformed data {\n',
            'vector[n_series] trend_zeros = rep_vector(0.0, n_series);\n',
            '}\nparameters {'
          )
      }
    }
    model_file <- readLines(textConnection(model_file), n = -1)

    # Update parameters block
    if (any(grepl('[n_lv] sigma;', model_file, fixed = TRUE))) {
      model_file[grep('[n_lv] sigma;', model_file, fixed = TRUE)] <-
        paste0(
          model_file[grep('[n_lv] sigma;', model_file, fixed = TRUE)],
          '\n\n',
          '// correlated latent residuals\n',
          'array[n] vector[n_lv] LV_raw;\n',
          'cholesky_factor_corr[n_lv] L_Omega;'
        )

      starts <- grep("matrix[n, n_lv] LV;", model_file, fixed = TRUE) - 1
      ends <- starts + 1
      model_file <- model_file[-(starts:ends)]
    } else {
      model_file[grep(
        'vector<lower=0>[n_series] sigma;',
        model_file,
        fixed = TRUE
      )] <-
        paste0(
          'vector<lower=0>[n_series] sigma;\n\n',
          '// correlated latent residuals\n',
          'array[n] vector[n_series] trend_raw;\n',
          'cholesky_factor_corr[n_series] L_Omega;'
        )

      starts <- grep("matrix[n, n_series] trend;", model_file, fixed = TRUE) - 1
      ends <- starts + 1
      model_file <- model_file[-(starts:ends)]
    }

    model_file <- readLines(textConnection(model_file), n = -1)

    # Update transformed parameters block
    if (any(grepl('[n_lv] sigma;', model_file, fixed = TRUE))) {
      model_file[grep('transformed parameters {', model_file, fixed = TRUE)] <-
        paste0(
          'transformed parameters {\n',
          paste0(
            'matrix[n, n_lv] LV;\n',
            '// LKJ form of covariance matrix\n',
            'matrix[n_lv, n_lv] L_Sigma;'
          )
        )

      model_file[grep('// derived latent states', model_file, fixed = TRUE)] <-
        paste0(
          '// correlated residuals\n',
          '\nL_Sigma = diag_pre_multiply(sigma, L_Omega);\n',
          'for (i in 1:n) {\n',
          'LV[i, 1:n_lv] = to_row_vector(LV_raw[i]);\n',
          '}\n',
          '// derived latent states'
        )
    } else {
      model_file[grep('transformed parameters {', model_file, fixed = TRUE)] <-
        paste0(
          'transformed parameters {\n',
          paste0(
            'matrix[n, n_series] trend;\n',
            '// LKJ form of covariance matrix\n',
            'matrix[n_series, n_series] L_Sigma;'
          )
        )

      last <- grep('model {', model_file, fixed = TRUE)
      for (i in last:(last - 5)) {
        last <- i
        if (trimws(model_file[i]) != '}') {} else {
          break
        }
      }

      model_file[last] <-
        paste0(
          '// correlated residuals\n',
          '\nL_Sigma = diag_pre_multiply(sigma, L_Omega);\n',
          'for (i in 1:n) {\n',
          'trend[i, 1:n_series] = to_row_vector(trend_raw[i]);\n',
          '}\n}'
        )
    }
    model_file <- readLines(textConnection(model_file), n = -1)

    # Update model block
    if (any(grepl('[n_lv] sigma;', model_file, fixed = TRUE))) {
      starts <- grep(
        "LV[1, j] ~ normal(trend_mus[ytimes_trend[1, j]], sigma[j]);",
        model_file,
        fixed = TRUE
      ) -
        1
      ends <- grep(
        "LV[i, j] ~ normal(trend_mus[ytimes_trend[i, j]] + LV[i - 1, j] - trend_mus[ytimes_trend[i - 1, j]], sigma[j]);",
        model_file,
        fixed = TRUE
      ) +
        2
      model_file <- model_file[-(starts:ends)]
      model_file[starts] <- paste0(
        '// residual error correlations\n',
        'L_Omega ~ lkj_corr_cholesky(2);\n',
        'for (i in 1:n){\n',
        'LV_raw[i] ~ multi_normal_cholesky(trend_mus[ytimes_trend[i, 1:n_lv]], L_Sigma);\n',
        '}\n',
        model_file[starts]
      )
    } else {
      starts <- grep("// trend estimates", model_file, fixed = TRUE)
      ends <- grep(
        "trend[2:n, s] ~ normal(trend[1:(n - 1), s], sigma[s]);",
        model_file,
        fixed = TRUE
      ) +
        1
      model_file <- model_file[-(starts:ends)]
      model_file[starts] <- paste0(
        '// residual error correlations\n',
        'L_Omega ~ lkj_corr_cholesky(2);\n',
        'for (i in 1:n){\n',
        'trend_raw[i] ~ multi_normal_cholesky(trend_zeros, L_Sigma);\n',
        '}\n',
        model_file[starts]
      )
    }
    model_file <- readLines(textConnection(model_file), n = -1)

    # Update generated quantities
    if (any(grepl('[n_lv] sigma;', model_file, fixed = TRUE))) {
      model_file[grep('// posterior predictions', model_file, fixed = TRUE)] <-
        paste0(
          '// computed error covariance matrix\n',
          'cov_matrix[n_lv] Sigma = multiply_lower_tri_self_transpose(L_Sigma);\n',
          '// posterior predictions'
        )
    } else {
      model_file[grep('// posterior predictions', model_file, fixed = TRUE)] <-
        paste0(
          '// computed error covariance matrix\n',
          'cov_matrix[n_series] Sigma = multiply_lower_tri_self_transpose(L_Sigma);\n',
          '// posterior predictions'
        )
    }
    model_file <- readLines(textConnection(model_file), n = -1)
  }

  if (trend_char %in% c('RW', 'AR1', 'AR2', 'AR3')) {
    if (any(grepl('ytimes_trend', model_file))) {
      remove_trendmus <- FALSE
    } else {
      remove_trendmus <- TRUE
    }

    # Update transformed data
    if (any(grepl('[n_lv] sigma;', model_file, fixed = TRUE))) {
      if (any(grepl('transformed data {', model_file, fixed = TRUE))) {
        model_file[grep('transformed data {', model_file, fixed = TRUE)] <-
          paste0(
            'transformed data {\n',
            'vector[n_lv] trend_zeros = rep_vector(0.0, n_lv);'
          )
      } else {
        model_file[grep('parameters {', model_file, fixed = TRUE)[1]] <-
          paste0(
            'transformed data {\n',
            'vector[n_lv] trend_zeros = rep_vector(0.0, n_lv);\n',
            '}\nparameters {'
          )
      }
    } else {
      if (any(grepl('transformed data {', model_file, fixed = TRUE))) {
        model_file[grep('transformed data {', model_file, fixed = TRUE)] <-
          paste0(
            'transformed data {\n',
            'vector[n_series] trend_zeros = rep_vector(0.0, n_series);'
          )
      } else {
        model_file[grep('parameters {', model_file, fixed = TRUE)[1]] <-
          paste0(
            'transformed data {\n',
            'vector[n_series] trend_zeros = rep_vector(0.0, n_series);\n',
            '}\nparameters {'
          )
      }
    }
    model_file <- readLines(textConnection(model_file), n = -1)

    # Update parameters block
    if (any(grepl('[n_lv] sigma;', model_file, fixed = TRUE))) {
      if (add_cor) {
        model_file[grep('[n_lv] sigma;', model_file, fixed = TRUE)] <-
          paste0(
            model_file[grep('[n_lv] sigma;', model_file, fixed = TRUE)],
            '\n',
            'cholesky_factor_corr[n_lv] L_Omega;'
          )
      }

      model_file[grep('matrix[n, n_lv] LV;', model_file, fixed = TRUE)] <-
        paste0(
          'matrix[n, n_lv] LV;\n',
          if (add_ma) {
            paste0(
              '// ma coefficients\n',
              if (add_cor) {
                'matrix<lower=-1,upper=1>[n_lv, n_lv] theta;'
              } else {
                'vector<lower=-1,upper=1>[n_lv] theta;'
              }
            )
          } else {
            NULL
          },
          '\n// dynamic error parameters\n',
          'vector[n_lv] error[n];'
        )

      model_file <- readLines(textConnection(model_file), n = -1)
      end <- grep('matrix[n, n_lv] LV;', model_file, fixed = TRUE)
      start <- end - 1
      model_file <- model_file[-c(start:end)]
    } else {
      if (add_cor) {
        model_file[grep(
          'vector<lower=0>[n_series] sigma;',
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            'vector<lower=0>[n_series] sigma;\n',
            'cholesky_factor_corr[n_series] L_Omega;'
          )
      }

      model_file[grep(
        'matrix[n, n_series] trend;',
        model_file,
        fixed = TRUE
      )] <-
        paste0(
          'matrix[n, n_series] trend;\n',
          if (add_ma) {
            paste0(
              '// ma coefficients\n',
              if (add_cor) {
                'matrix<lower=-1,upper=1>[n_series, n_series] theta;'
              } else {
                'vector<lower=-1,upper=1>[n_series] theta;'
              }
            )
          } else {
            NULL
          },
          '\n// dynamic error parameters\n',
          'vector[n_series] error[n];'
        )

      model_file <- readLines(textConnection(model_file), n = -1)
      end <- grep('matrix[n, n_series] trend;', model_file, fixed = TRUE)
      start <- end - 1
      model_file <- model_file[-c(start:end)]
    }
    model_file <- readLines(textConnection(model_file), n = -1)

    # Update transformed parameters
    if (any(grepl('[n_lv] sigma;', model_file, fixed = TRUE))) {
      model_file[grep(
        'matrix[n, n_series] trend;',
        model_file,
        fixed = TRUE
      )] <-
        paste0(
          'matrix[n, n_series] trend;\n',
          if (add_cor) {
            paste0(
              'vector[n_lv] LV[n];\n',
              if (add_ma) {
                'vector[n_lv] epsilon[n];\n'
              } else {
                NULL
              },
              '// LKJ form of covariance matrix\n',
              'matrix[n_lv, n_lv] L_Sigma;\n',
              '// computed error covariance matrix\n',
              'cov_matrix[n_lv] Sigma;'
            )
          } else {
            paste0(
              'matrix[n, n_lv] LV;\n',
              if (add_ma) {
                'matrix[n, n_lv] epsilon;'
              } else {
                NULL
              }
            )
          }
        )

      if (add_cor) {
        if (trend_char %in% c('AR1', 'RW')) {
          if (any(grep('// derived latent states', model_file, fixed = TRUE))) {
            to_modify <- grep(
              '// derived latent states',
              model_file,
              fixed = TRUE
            )
          } else {
            to_modify <- grep(
              '// derived latent trends',
              model_file,
              fixed = TRUE
            )
          }
          model_file[to_modify] <-
            paste0(
              '// derived latent states\n',
              'LV[1] = ',
              'trend_mus[ytimes_trend[1, 1:n_lv]] + error[1];\n',
              if (add_ma) {
                'epsilon[1] = error[1];\n'
              },
              'for (i in 2:n) {\n',
              if (add_ma) {
                paste0(
                  '// lagged error ma process\n',
                  'epsilon[i] = theta * error[i - 1];\n',
                  '// full ARMA process\n'
                )
              } else {
                '// full AR process\n'
              },
              'LV[i] = ',
              if (drift) {
                'drift * (i - 1) + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[i, 1:n_lv]] + ',
              if (trend_char == 'AR1') {
                'ar1 .* '
              } else {
                NULL
              },
              '(LV[i - 1] - trend_mus[ytimes_trend[i - 1, 1:n_lv]])',
              if (add_ma) {
                '+ epsilon[i] + error[i];\n'
              } else {
                '+ error[i];\n'
              },
              '}\n'
            )
        }

        if (trend_char == 'AR2') {
          if (any(grep('// derived latent states', model_file, fixed = TRUE))) {
            to_modify <- grep(
              '// derived latent states',
              model_file,
              fixed = TRUE
            )
          } else {
            to_modify <- grep(
              '// derived latent trends',
              model_file,
              fixed = TRUE
            )
          }
          model_file[to_modify] <-
            paste0(
              '// derived latent states\n',
              'LV[1] = ',
              'trend_mus[ytimes_trend[1, 1:n_lv]] + error[1];\n',
              if (add_ma) {
                paste0(
                  'epsilon[1] = error[1];\n',
                  'epsilon[2] = theta * error[1];\n'
                )
              } else {
                NULL
              },
              'LV[2] = ',
              if (drift) {
                'drift + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[2, 1:n_lv]] + ',
              'ar1 .* (LV[1] - trend_mus[ytimes_trend[1, 1:n_lv]]) + ',
              if (add_ma) {
                'epsilon[2] + error[2];\n'
              } else {
                'error[2];\n'
              },
              'for (i in 3:n) {\n',
              if (add_ma) {
                paste0(
                  '// lagged error ma process\n',
                  'epsilon[i] = theta * error[i - 1];\n',
                  '// full ARMA process\n'
                )
              } else {
                '// full AR process\n'
              },
              'LV[i] = ',
              if (drift) {
                'drift * (i - 1) + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[i, 1:n_lv]] + ',
              'ar1 .* (LV[i - 1] - trend_mus[ytimes_trend[i - 1, 1:n_lv]]) + ',
              'ar2 .* (LV[i - 2] - trend_mus[ytimes_trend[i - 2, 1:n_lv]]) + ',
              if (add_ma) {
                'epsilon[i] + error[i];\n'
              } else {
                'error[i];\n'
              },
              '}\n'
            )
        }

        if (trend_char == 'AR3') {
          if (any(grep('// derived latent states', model_file, fixed = TRUE))) {
            to_modify <- grep(
              '// derived latent states',
              model_file,
              fixed = TRUE
            )
          } else {
            to_modify <- grep(
              '// derived latent trends',
              model_file,
              fixed = TRUE
            )
          }
          model_file[to_modify] <-
            paste0(
              '// derived latent states\n',
              'LV[1] = ',
              'trend_mus[ytimes_trend[1, 1:n_lv]] + error[1];\n',
              if (add_ma) {
                paste0(
                  'epsilon[1] = error[1];\n',
                  'epsilon[2] = theta * error[1];\n',
                  'epsilon[3] = theta * error[2];\n'
                )
              } else {
                NULL
              },
              'LV[2] = ',
              if (drift) {
                'drift + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[2, 1:n_lv]] + ',
              'ar1 .* (LV[1] - trend_mus[ytimes_trend[1, 1:n_lv]]) + ',
              if (add_ma) {
                'epsilon[2] + error[2];\n'
              } else {
                'error[2];\n'
              },
              'LV[3] = ',
              if (drift) {
                'drift * 2 + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[3, 1:n_lv]] + ',
              'ar1 .* (LV[2] - trend_mus[ytimes_trend[2, 1:n_lv]]) + ',
              'ar2 .* (LV[1] - trend_mus[ytimes_trend[1, 1:n_lv]]) + ',
              if (add_ma) {
                'epsilon[3] + error[3];\n'
              } else {
                'error[3];\n'
              },
              'for (i in 4:n) {\n',
              if (add_ma) {
                paste0(
                  '// lagged error ma process\n',
                  'epsilon[i] = theta * error[i - 1];\n',
                  '// full ARMA process\n'
                )
              } else {
                '// full AR process\n'
              },
              'LV[i] = ',
              if (drift) {
                'drift * (i - 1) + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[i, 1:n_lv]] + ',
              'ar1 .* (LV[i - 1] - trend_mus[ytimes_trend[i - 1, 1:n_lv]]) + ',
              'ar2 .* (LV[i - 2] - trend_mus[ytimes_trend[i - 2, 1:n_lv]]) + ',
              'ar3 .* (LV[i - 3] - trend_mus[ytimes_trend[i - 3, 1:n_lv]]) + ',
              if (add_ma) {
                'epsilon[i] + error[i];\n'
              } else {
                'error[i];\n'
              },
              '}\n'
            )
        }
      } else {
        if (trend_char %in% c('AR1', 'RW')) {
          if (any(grep('// derived latent states', model_file, fixed = TRUE))) {
            to_modify <- grep(
              '// derived latent states',
              model_file,
              fixed = TRUE
            )
          } else {
            to_modify <- grep(
              '// derived latent trends',
              model_file,
              fixed = TRUE
            )
          }
          model_file[to_modify] <-
            paste0(
              '// derived latent states\n',
              'for(j in 1:n_lv){\n',
              'LV[1, j] = ',
              'trend_mus[ytimes_trend[1, j]] + error[1, j];\n',
              'epsilon[1, j] = error[1, j];\n',
              'for(i in 2:n){\n',
              '// lagged error ma process\n',
              'epsilon[i, j] = theta[j] * error[i-1, j];\n',
              '// full ARMA process\n',
              'LV[i, j] = ',
              if (drift) {
                'drift[j] * (i - 1) + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[i, j]] + ',
              if (trend_char == 'AR1') {
                'ar1[j] * '
              } else {
                NULL
              },
              '(LV[i - 1, j] - trend_mus[ytimes_trend[i - 1, j]]) + ',
              'epsilon[i, j] + error[i, j];\n',
              '}\n}'
            )
        }

        if (trend_char == 'AR2') {
          if (any(grep('// derived latent states', model_file, fixed = TRUE))) {
            to_modify <- grep(
              '// derived latent states',
              model_file,
              fixed = TRUE
            )
          } else {
            to_modify <- grep(
              '// derived latent trends',
              model_file,
              fixed = TRUE
            )
          }
          model_file[to_modify] <-
            paste0(
              '// derived latent states\n',
              'for(j in 1:n_lv){\n',
              'LV[1, j] = ',
              'trend_mus[ytimes_trend[1, j]] + error[1, j];\n',
              'epsilon[1, j] = error[1, j];\n',
              'epsilon[2, j] = theta[j] * error[1, j];\n',
              'LV[2, j] = ',
              if (drift) {
                'drift[j] + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[1, j]] + ',
              'ar1[j] * (LV[1, j] - trend_mus[ytimes_trend[1, j]]) + ',
              'epsilon[2, j] + error[2, j];\n',
              'for(i in 3:n){\n',
              '// lagged error ma process\n',
              'epsilon[i, j] = theta[j] * error[i-1, j];\n',
              '// full ARMA process\n',
              'LV[i, j] = ',
              if (drift) {
                'drift[j] * (i - 1) + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[i, j]] + ',
              'ar1[j] * (LV[i - 1, j] - trend_mus[ytimes_trend[i - 1, j]]) + ',
              'ar2[j] * (LV[i - 2, j] - trend_mus[ytimes_trend[i - 2, j]]) + ',
              'epsilon[i, j] + error[i, j];\n',
              '}\n}'
            )
        }

        if (trend_char == 'AR3') {
          if (any(grep('// derived latent states', model_file, fixed = TRUE))) {
            to_modify <- grep(
              '// derived latent states',
              model_file,
              fixed = TRUE
            )
          } else {
            to_modify <- grep(
              '// derived latent trends',
              model_file,
              fixed = TRUE
            )
          }
          model_file[to_modify] <-
            paste0(
              '// derived latent states\n',
              'for(j in 1:n_lv){\n',
              'LV[1, j] = ',
              'trend_mus[ytimes_trend[1, j]] + error[1, j];\n',
              'epsilon[1, j] = error[1, j];\n',
              'epsilon[2, j] = theta[j] * error[1, j];\n',
              'epsilon[3, j] = theta[j] * error[2, j];\n',
              'LV[2, j] = ',
              if (drift) {
                'drift[j] + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[2, j]] + ',
              'ar1[j] * (LV[1, j] - trend_mus[ytimes_trend[1, j]]) + ',
              'epsilon[2, j] + error[2, j];\n',
              'LV[3, j] = ',
              if (drift) {
                'drift[j] * 2 + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[1, j]] + ',
              'ar1[j] * (LV[2, j] - trend_mus[ytimes_trend[2, j]]) + ',
              'ar2[j] * (LV[1, j] - trend_mus[ytimes_trend[1, j]]) + ',
              'epsilon[3, j] + error[3, j];\n',
              'for(i in 4:n){\n',
              '// lagged error ma process\n',
              'epsilon[i, j] = theta[j] * error[i-1, j];\n',
              '// full ARMA process\n',
              'LV[i, j] = ',
              if (drift) {
                'drift[j] * (i - 1) + '
              } else {
                NULL
              },
              'trend_mus[ytimes_trend[i, j]] + ',
              'ar1[j] * (LV[i - 1, j] - trend_mus[ytimes_trend[i - 1, j]]) + ',
              'ar2[j] * (LV[i - 2, j] - trend_mus[ytimes_trend[i - 2, j]]) + ',
              'ar3[j] * (LV[i - 3, j] - trend_mus[ytimes_trend[i - 3, j]]) + ',
              'epsilon[i, j] + error[i, j];\n',
              '}\n}'
            )
        }
      }

      if (add_cor) {
        model_file[grep('lv_coefs = Z;', model_file, fixed = TRUE)] <-
          paste0(
            'L_Sigma = diag_pre_multiply(sigma, L_Omega);\n',
            'Sigma = multiply_lower_tri_self_transpose(L_Sigma);\n',
            'lv_coefs = Z;'
          )
      }
    } else {
      model_file[grep('transformed parameters {', model_file, fixed = TRUE)] <-
        paste0(
          'transformed parameters {\n',
          if (add_cor) {
            paste0(
              'vector[n_series] trend_raw[n];\n',
              'matrix[n, n_series] trend;\n',
              if (add_ma) {
                'vector[n_series] epsilon[n];\n'
              } else {
                NULL
              },
              '// LKJ form of covariance matrix\n',
              'matrix[n_series, n_series] L_Sigma;\n',
              '// computed error covariance matrix\n',
              'cov_matrix[n_series] Sigma;'
            )
          } else {
            paste0(
              'matrix[n, n_series] trend;\n',
              if (add_ma) {
                'matrix[n, n_series] epsilon;'
              } else {
                NULL
              }
            )
          }
        )

      if (add_cor) {
        if (trend_char %in% c('AR1', 'RW')) {
          if (any(grepl('= mu_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= mu_raw[', model_file, fixed = TRUE))
          } else if (any(grepl('= b_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= b_raw[', model_file, fixed = TRUE))
          }
          model_file[insert_line] <-
            paste0(
              model_file[insert_line],
              '\n// derived latent states\n',
              'trend_raw[1] = ',
              'error[1];\n',
              if (add_ma) {
                'epsilon[1] = error[1];\n'
              } else {
                NULL
              },
              'for (i in 2:n) {\n',
              if (add_ma) {
                paste0(
                  '// lagged error ma process\n',
                  'epsilon[i] = theta * error[i - 1];\n',
                  '// full ARMA process\n'
                )
              } else {
                paste0('// full AR process\n')
              },
              'trend_raw[i] = ',
              if (drift) {
                'drift * (i - 1) + '
              } else {
                NULL
              },
              if (trend_char == 'AR1') {
                'ar1 .* '
              } else {
                NULL
              },
              'trend_raw[i - 1] + ',
              if (add_ma) {
                'epsilon[i] + error[i];\n'
              } else {
                'error[i];\n'
              },
              '}\n'
            )
        }

        if (trend_char == 'AR2') {
          if (any(grepl('= mu_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= mu_raw[', model_file, fixed = TRUE))
          } else if (any(grepl('= b_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= b_raw[', model_file, fixed = TRUE))
          }
          model_file[insert_line] <-
            paste0(
              model_file[insert_line],
              '\n// derived latent states\n',
              'trend_raw[1] = ',
              'error[1];\n',
              if (add_ma) {
                paste0(
                  'epsilon[1] = error[1];\n',
                  'epsilon[2] = theta * error[1];\n'
                )
              } else {
                NULL
              },
              'trend_raw[2] = ',
              if (drift) {
                'drift + '
              } else {
                NULL
              },
              'ar1 .* trend_raw[1] + ',
              if (add_ma) {
                'epsilon[2] + error[2];\n'
              } else {
                'error[2];\n'
              },
              'for (i in 3:n) {\n',
              if (add_ma) {
                paste0(
                  '// lagged error ma process\n',
                  'epsilon[i] = theta * error[i - 1];\n',
                  '// full ARMA process\n'
                )
              } else {
                '// full AR process\n'
              },
              'trend_raw[i] = ',
              if (drift) {
                'drift * (i - 1) + '
              } else {
                NULL
              },
              'ar1 .* trend_raw[i - 1] + ',
              'ar2 .* trend_raw[i - 2] + ',
              if (add_ma) {
                'epsilon[i] + error[i];\n'
              } else {
                'error[i];\n'
              },
              '}\n'
            )
        }

        if (trend_char == 'AR3') {
          if (any(grepl('= mu_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= mu_raw[', model_file, fixed = TRUE))
          } else if (any(grepl('= b_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= b_raw[', model_file, fixed = TRUE))
          }
          model_file[insert_line] <-
            paste0(
              model_file[insert_line],
              '\n// derived latent states\n',
              'trend_raw[1] = ',
              'error[1];\n',
              if (add_ma) {
                paste0(
                  'epsilon[1] = error[1];\n',
                  'epsilon[2] = theta * error[1];\n',
                  'epsilon[3] = theta * error[2];\n'
                )
              } else {
                NULL
              },
              'trend_raw[2] = ',
              if (drift) {
                'drift + '
              } else {
                NULL
              },
              'ar1 .* trend_raw[1] + ',
              if (add_ma) {
                'epsilon[2] + error[2];\n'
              } else {
                'error[2];\n'
              },
              'trend_raw[3] = ',
              if (drift) {
                'drift * 2 + '
              } else {
                NULL
              },
              'ar1 .* trend_raw[2] + ',
              'ar2 .* trend_raw[1] + ',
              if (add_ma) {
                'epsilon[3] + error[3];\n'
              } else {
                'error[3];\n'
              },
              'for (i in 4:n) {\n',
              if (add_ma) {
                paste0(
                  '// lagged error ma process\n',
                  'epsilon[i] = theta * error[i - 1];\n',
                  '// full ARMA process\n'
                )
              } else {
                '// full AR process\n'
              },
              'trend_raw[i] = ',
              if (drift) {
                'drift * (i - 1) + '
              } else {
                NULL
              },
              'ar1 .* trend_raw[i - 1] + ',
              'ar2 .* trend_raw[i - 2] + ',
              'ar3 .* trend_raw[i - 3] + ',
              if (add_ma) {
                'epsilon[i] + error[i];\n'
              } else {
                'error[i];\n'
              },
              '}\n'
            )
        }
      } else {
        if (trend_char %in% c('AR1', 'RW')) {
          if (any(grepl('= mu_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= mu_raw[', model_file, fixed = TRUE))
          } else if (any(grepl('= b_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= b_raw[', model_file, fixed = TRUE))
          }
          model_file[insert_line] <-
            paste0(
              model_file[insert_line],
              '\nfor(j in 1:n_series){\n',
              'trend[1, j] = ',
              'error[1, j];\n',
              'epsilon[1, j] = error[1, j];\n',
              'for(i in 2:n){\n',
              '// lagged error ma process\n',
              'epsilon[i, j] = theta[j] * error[i-1, j];\n',
              '// full ARMA process\n',
              'trend[i, j] = ',
              if (drift) {
                'drift[j] * (i - 1) + '
              } else {
                NULL
              },
              if (trend_char == 'AR1') {
                'ar1[j] * '
              } else {
                NULL
              },
              'trend[i - 1, j] + ',
              'epsilon[i, j] + error[i, j];\n',
              '}\n}'
            )
        }

        if (trend_char == 'AR2') {
          if (any(grepl('= mu_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= mu_raw[', model_file, fixed = TRUE))
          } else if (any(grepl('= b_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= b_raw[', model_file, fixed = TRUE))
          }
          model_file[insert_line] <-
            paste0(
              model_file[insert_line],
              '\nfor(j in 1:n_series){\n',
              'trend[1, j] = ',
              'error[1, j];\n',
              'epsilon[1, j] = error[1, j];\n',
              'epsilon[2, j] = theta[j] * error[1, j];\n',
              'trend[2, j] = ',
              if (drift) {
                'drift[j] + '
              } else {
                NULL
              },
              'ar1[j] * trend[1, j] + ',
              'epsilon[2, j] + error[2, j];\n',
              'for(i in 3:n){\n',
              '// lagged error ma process\n',
              'epsilon[i, j] = theta[j] * error[i-1, j];\n',
              '// full ARMA process\n',
              'trend[i, j] = ',
              if (drift) {
                'drift[j] * (i - 1) + '
              } else {
                NULL
              },
              'ar1[j] * trend[i - 1, j] + ',
              'ar2[j] * trend[i - 2, j] + ',
              'epsilon[i, j] + error[i, j];\n',
              '}\n}'
            )
        }

        if (trend_char == 'AR3') {
          if (any(grepl('= mu_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= mu_raw[', model_file, fixed = TRUE))
          } else if (any(grepl('= b_raw[', model_file, fixed = TRUE))) {
            insert_line <- max(grep('= b_raw[', model_file, fixed = TRUE))
          }
          model_file[insert_line] <-
            paste0(
              model_file[insert_line],
              '\nfor(j in 1:n_series){\n',
              'trend[1, j] = ',
              'error[1, j];\n',
              'epsilon[1, j] = error[1, j];\n',
              'epsilon[2, j] = theta[j] * error[1, j];\n',
              'epsilon[3, j] = theta[j] * error[2, j];\n',
              'trend[2, j] = ',
              if (drift) {
                'drift[j] + '
              } else {
                NULL
              },
              'ar1[j] * trend[1, j] + ',
              'epsilon[2, j] + error[2, j];\n',
              'trend[3, j] = ',
              if (drift) {
                'drift[j] * 2 + '
              } else {
                NULL
              },
              'ar1[j] * trend[2, j] + ',
              'ar2[j] * trend[1, j] + ',
              'epsilon[2, j] + error[2, j];\n',
              'for(i in 4:n){\n',
              '// lagged error ma process\n',
              'epsilon[i, j] = theta[j] * error[i-1, j];\n',
              '// full ARMA process\n',
              'trend[i, j] = ',
              if (drift) {
                'drift[j] * (i - 1) + '
              } else {
                NULL
              },
              'ar1[j] * trend[i - 1, j] + ',
              'ar2[j] * trend[i - 2, j] + ',
              'ar3[j] * trend[i - 3, j] + ',
              'epsilon[i, j] + error[i, j];\n',
              '}\n}'
            )
        }
      }

      model_file <- readLines(textConnection(model_file), n = -1)
      if (add_cor) {
        last <- grep('model {', model_file, fixed = TRUE)
        for (i in last:(last - 5)) {
          last <- i
          if (trimws(model_file[i]) != '}') {} else {
            break
          }
        }

        model_file[last] <-
          paste0(
            '\nL_Sigma = diag_pre_multiply(sigma, L_Omega);\n',
            'Sigma = multiply_lower_tri_self_transpose(L_Sigma);\n',
            'for (i in 1:n) {\n',
            'trend[i, 1:n_series] = to_row_vector(trend_raw[i]);\n',
            '}\n}'
          )
      }
    }
    model_file <- readLines(textConnection(model_file), n = -1)

    # Update model block
    if (any(grepl('[n_lv] sigma;', model_file, fixed = TRUE))) {
      if (any(grepl('LV[1, j] ~ normal', model_file, fixed = TRUE))) {
        start <- grep('LV[1, j] ~ normal', model_file, fixed = TRUE) - 1
        end <- grep('LV[i, j] ~ normal', model_file, fixed = TRUE) + 2
      } else {
        start <- grep('LV[1, 1:n_lv] ~ normal(', model_file, fixed = TRUE) - 1
        first <- grep(':n, j] ~ normal(', model_file, fixed = TRUE)
        second <- grep('sigma[j]);', model_file, fixed = TRUE)
        end <- intersect(first, second) + 1
      }

      model_file <- model_file[-c(start:end)]
      model_file[start] <- paste0(
        '// contemporaneous errors\n',
        if (add_cor) {
          paste0(
            'L_Omega ~ lkj_corr_cholesky(2);\n',
            'for(i in 1:n) {\n',
            'error[i] ~ multi_normal_cholesky(trend_zeros, L_Sigma);\n',
            '}'
          )
        } else {
          paste0(
            'for(i in 1:n) {\n',
            'error[i] ~ normal(trend_zeros, sigma);\n',
            '}'
          )
        },
        if (add_ma) {
          paste0(
            '\n// ma coefficients\n',
            if (add_cor) {
              paste0(
                'for(i in 1:n_lv){\n',
                'for(j in 1:n_lv){\n',
                'if (i != j)\n',
                'theta[i, j] ~ normal(0, 0.2);\n',
                '}\n}'
              )
            } else {
              'theta ~ normal(0, 0.2);'
            }
          )
        } else {
          NULL
        },
        '\n',
        model_file[start]
      )
    } else {
      start <- grep(
        'trend[1, 1:n_series] ~ normal(',
        model_file,
        fixed = TRUE
      ) -
        1
      first <- grep(':n, s] ~ normal(', model_file, fixed = TRUE)
      second <- grep('sigma[s]);', model_file, fixed = TRUE)
      end <- intersect(first, second) + 1

      model_file <- model_file[-c(start:end)]
      model_file[start] <- paste0(
        '// contemporaneous errors\n',
        if (add_cor) {
          paste0(
            'L_Omega ~ lkj_corr_cholesky(2);\n',
            'for(i in 1:n) {\n',
            'error[i] ~ multi_normal_cholesky(trend_zeros, L_Sigma);\n',
            '}'
          )
        } else {
          paste0(
            'for(i in 1:n) {\n',
            'error[i] ~ normal(trend_zeros, sigma);\n',
            '}'
          )
        },
        if (add_ma) {
          paste0(
            '\n// ma coefficients\n',
            if (add_cor) {
              paste0(
                'for(i in 1:n_series){\n',
                'for(j in 1:n_series){\n',
                'if (i != j)\n',
                'theta[i, j] ~ normal(0, 0.2);\n',
                '}\n}'
              )
            } else {
              'theta ~ normal(0, 0.2);'
            }
          )
        } else {
          NULL
        },
        '\n',
        model_file[start]
      )
    }

    if (remove_trendmus) {
      model_file <- gsub(
        'trend_mus[ytimes_trend[1, 1:n_lv]] +',
        '',
        model_file,
        fixed = TRUE
      )
      model_file <- gsub(
        'trend_mus[ytimes_trend[i, 1:n_lv]] + ',
        '',
        model_file,
        fixed = TRUE
      )
      model_file <- gsub(
        ' - trend_mus[ytimes_trend[i - 1, 1:n_lv]]',
        '',
        model_file,
        fixed = TRUE
      )
      model_file <- gsub(
        ' - trend_mus[ytimes_trend[1, 1:n_lv]]',
        '',
        model_file,
        fixed = TRUE
      )
      model_file <- gsub(
        ' - trend_mus[ytimes_trend[i - 2, 1:n_lv]]',
        '',
        model_file,
        fixed = TRUE
      )
      model_file <- gsub(
        'trend_mus[ytimes_trend[2, 1:n_lv]] + ',
        '',
        model_file,
        fixed = TRUE
      )
      model_file <- gsub(
        'trend_mus[ytimes_trend[3, 1:n_lv]] + ',
        '',
        model_file,
        fixed = TRUE
      )
      model_file <- gsub(
        ' - trend_mus[ytimes_trend[2, 1:n_lv]]',
        '',
        model_file,
        fixed = TRUE
      )
      model_file <- gsub(
        ' - trend_mus[ytimes_trend[i - 3, 1:n_lv]]',
        '',
        model_file,
        fixed = TRUE
      )
    }
    model_file <- readLines(textConnection(model_file), n = -1)
  }

  if (grepl('VAR', trend_char) & add_ma) {
    # Only ma can be added for VAR models currently
    # Replace the reverse mapping function with the MA representation
    start <- grep(
      '/* Function to perform the reverse mapping*/',
      model_file,
      fixed = TRUE
    )
    end <- grep('return phiGamma;', model_file, fixed = TRUE) + 1
    model_file <- model_file[-c(start:end)]
    model_file[
      grep(
        'return mdivide_left_spd(sqrtm(B), P_real);',
        model_file,
        fixed = TRUE
      ) +
        1
    ] <-
      paste0(
        '}\n',
        '/* Function to compute Kronecker product */\n\n',
        '/* see Heaps 2022 for details (https://doi.org/10.1080/10618600.2022.2079648)*/\n',
        'matrix kronecker_prod(matrix A, matrix B) {\n',
        'matrix[rows(A) * rows(B), cols(A) * cols(B)] C;\n',
        'int m = rows(A);\n',
        'int n = cols(A);\n',
        'int p = rows(B);\n',
        'int q = cols(B);\n',
        'for (i in 1:m) {\n',
        'for (j in 1:n) {\n',
        'int row_start = (i - 1) * p + 1;\n',
        'int row_end = (i - 1) * p + p;\n',
        'int col_start = (j - 1) * q + 1;\n',
        'int col_end = (j - 1) * q + q;\n',
        'C[row_start:row_end, col_start:col_end] = A[i, j] * B;\n',
        '}\n',
        '}\n',
        'return C;\n',
        '}\n',
        '/* Function to perform the reverse mapping\n\n',
        '/* see Heaps 2022 for details (https://doi.org/10.1080/10618600.2022.2079648)*/\n',
        'matrix[] rev_mapping(matrix[] P, matrix Sigma) {\n',
        'int p = size(P);\n',
        'int m = rows(Sigma);\n',
        'matrix[m, m] phi_for[p, p];   matrix[m, m] phi_rev[p, p];\n',
        'matrix[m, m] Sigma_for[p+1];  matrix[m, m] Sigma_rev[p+1];\n',
        'matrix[m, m] S_for;           matrix[m, m] S_rev;\n',
        'matrix[m, m] S_for_list[p+1];\n',
        '// Step 1:\n',
        'Sigma_for[p+1] = Sigma;\n',
        'S_for_list[p+1] = sqrtm(Sigma);\n',
        'for(s in 1:p) {\n',
        '// In this block of code S_rev is B^{-1} and S_for is a working matrix\n',
        'S_for = - tcrossprod(P[p-s+1]);\n',
        'for(i in 1:m) S_for[i, i] += 1.0;\n',
        'S_rev = sqrtm(S_for);\n',
        'S_for_list[p-s+1] = mdivide_right_spd(mdivide_left_spd(S_rev,\n',
        'sqrtm(quad_form_sym(Sigma_for[p-s+2], S_rev))), S_rev);\n',
        'Sigma_for[p-s+1] = tcrossprod(S_for_list[p-s+1]);\n',
        '}\n',
        '// Step 2:\n',
        'Sigma_rev[1] = Sigma_for[1];\n',
        'for(s in 0:(p-1)) {\n',
        'S_for = S_for_list[s+1];\n',
        'S_rev = sqrtm(Sigma_rev[s+1]);\n',
        'phi_for[s+1, s+1] = mdivide_right_spd(S_for * P[s+1], S_rev);\n',
        "phi_rev[s+1, s+1] = mdivide_right_spd(S_rev * P[s+1]', S_for);\n",
        'if(s>=1) {\n',
        'for(k in 1:s) {\n',
        'phi_for[s+1, k] = phi_for[s, k] - phi_for[s+1, s+1] * phi_rev[s, s-k+1];\n',
        'phi_rev[s+1, k] = phi_rev[s, k] - phi_rev[s+1, s+1] * phi_for[s, s-k+1];\n',
        '}\n',
        '}\n',
        'Sigma_rev[s+2] = Sigma_rev[s+1] - quad_form_sym(Sigma_for[s+1],\n',
        "phi_rev[s+1, s+1]');\n",
        '}\n',
        'return phi_for[p];\n',
        '}\n',

        '/* Function to compute the joint (stationary) distribution of\n',
        '(y_0, ..., y_{1-p}, eps_0, ..., eps_{1-q})\n\n',
        '/* see Heaps 2022 for details (https://doi.org/10.1080/10618600.2022.2079648)*/\n',
        'matrix initial_joint_var(matrix Sigma, matrix[] phi, matrix[] theta) {\n',
        'int p = size(phi);\n',
        'int q = size(theta);\n',
        'int m = rows(Sigma);\n',
        'matrix[(p+q)*m, (p+q)*m] companion_mat = rep_matrix(0.0, (p+q)*m, (p+q)*m);\n',
        'matrix[(p+q)*m, (p+q)*m] companion_var = rep_matrix(0.0, (p+q)*m, (p+q)*m);\n',
        'matrix[(p+q)*m*(p+q)*m, (p+q)*m*(p+q)*m] tmp = diag_matrix(rep_vector(1.0,\n',
        '(p+q)*m*(p+q)*m));\n',
        'matrix[(p+q)*m, (p+q)*m] Omega;\n',
        '// Construct phi_tilde:\n',
        'for(i in 1:p) {\n',
        'companion_mat[1:m, ((i-1)*m+1):(i*m)] = phi[i];\n',
        'if(i>1) {\n',
        'for(j in 1:m) {\n',
        'companion_mat[(i-1)*m+j, (i-2)*m+j] = 1.0;\n',
        '}\n',
        '}\n',
        '}\n',
        'for(i in 1:q) {\n',
        'companion_mat[1:m, ((p+i-1)*m+1):((p+i)*m)] = theta[i];\n',
        '}\n',
        'if(q>1) {\n',
        'for(i in 2:q) {\n',
        'for(j in 1:m) {\n',
        'companion_mat[(p+i-1)*m+j, (p+i-2)*m+j] = 1.0;\n',
        '}\n',
        '}\n',
        '}\n',
        '// Construct Sigma_tilde:\n',
        'companion_var[1:m, 1:m] = Sigma;\n',
        'companion_var[(p*m+1):((p+1)*m), (p*m+1):((p+1)*m)] = Sigma;\n',
        'companion_var[1:m, (p*m+1):((p+1)*m)] = Sigma;\n',
        'companion_var[(p*m+1):((p+1)*m), 1:m] = Sigma;\n',
        '// Compute Gamma0_tilde\n',
        'tmp -= kronecker_prod(companion_mat, companion_mat);\n',
        "Omega = to_matrix(tmp \\ to_vector(companion_var), (p+q)*m, (p+q)*m);\n",
        '// Ensure Omega is symmetric:\n',
        'for(i in 1:(rows(Omega)-1)) {\n',
        'for(j in (i+1):rows(Omega)) {\n',
        'Omega[j, i] = Omega[i, j];\n',
        '}\n',
        '}\n',
        'return Omega;\n',
        '}\n'
      )
    model_file <- readLines(textConnection(model_file), n = -1)

    # Update transformed data
    if (
      any(grepl(
        'cholesky_factor_corr[n_lv] L_Omega;',
        model_file,
        fixed = TRUE
      ))
    ) {
      model_file[grep(
        'transformed data {',
        model_file,
        fixed = TRUE
      )] <- paste0(
        'transformed data {\n',
        'vector[n_lv] trend_zeros = rep_vector(0.0, n_lv);\n',
        'vector[n_lv*2] init_zeros = rep_vector(0.0, n_lv*2);\n'
      )
    } else {
      model_file[grep(
        'vector[n_series] trend_zeros = rep_vector(0.0, n_series);',
        model_file,
        fixed = TRUE
      )] <- paste0(
        'vector[n_series] trend_zeros = rep_vector(0.0, n_series);\n',
        'vector[n_series*2] init_zeros = rep_vector(0.0, n_series*2);\n'
      )
    }
    model_file <- readLines(textConnection(model_file), n = -1)

    # Update parameters
    if (
      any(grepl(
        'cholesky_factor_corr[n_lv] L_Omega;',
        model_file,
        fixed = TRUE
      ))
    ) {
      model_file[grep(
        'matrix[n_lv, n_lv] P_real;',
        model_file,
        fixed = TRUE
      )] <- paste0(
        'matrix[n_lv, n_lv] P_real;\n',
        '// unconstrained MA partial autocorrelations\n',
        'matrix[n_lv, n_lv] R_real;\n',
        '// initial joint stationary VARMA process\n',
        'vector[2 * n_lv] init;\n',
        '// ma error parameters\n',
        'vector[n_lv] error[n];'
      )
    } else {
      model_file[grep(
        'matrix[n_series, n_series] P_real;',
        model_file,
        fixed = TRUE
      )] <- paste0(
        'matrix[n_series, n_series] P_real;\n',
        '// unconstrained MA partial autocorrelations\n',
        'matrix[n_series, n_series] R_real;\n',
        '// initial joint stationary VARMA process\n',
        'vector[2 * n_series] init;\n',
        '// ma error parameters\n',
        'vector[n_series] error[n];'
      )
    }

    # Update transformed parameters
    if (
      any(grepl(
        'cholesky_factor_corr[n_lv] L_Omega;',
        model_file,
        fixed = TRUE
      ))
    ) {
      model_file[grep(
        'matrix[n_lv, n_lv] A;',
        model_file,
        fixed = TRUE
      )] <- paste0(
        'matrix[n_lv, n_lv] A;\n',
        '// latent trend MA autoregressive terms\n',
        'matrix[n_lv, n_lv] theta;\n',
        '// ma process\n',
        'array[n] vector[n_lv] epsilon;\n'
      )

      end <- grep('vector[n_lv] LV[n];', model_file, fixed = TRUE)
      start <- end - 1
      model_file <- model_file[-c(start:end)]

      model_file[grep(
        'cov_matrix[n_lv] Gamma;',
        model_file,
        fixed = TRUE
      )] <- paste0(
        'cov_matrix[n_lv * 2] Omega;\n',
        "// latent states\n",
        "vector[n_lv] LV[n];"
      )

      start <- grep('// derived latent states', model_file, fixed = TRUE)
      end <- grep('Gamma = phiGamma[2, 1];', model_file, fixed = TRUE) + 1
      model_file <- model_file[-c(start:end)]
      model_file[start] <- paste0(
        model_file[start],
        '\n',
        '// stationary VARMA reparameterisation\n',
        'L_Sigma = diag_pre_multiply(sigma, L_Omega);\n',
        'Sigma = multiply_lower_tri_self_transpose(L_Sigma);\n',

        '{\n',
        '// constrained partial autocorrelations\n',
        'matrix[n_lv, n_lv] P[1];\n',
        'matrix[n_lv, n_lv] R[1];\n',
        '// stationary autoregressive coefficients\n',
        'matrix[n_lv, n_lv] A_init[1];\n',
        'matrix[n_lv, n_lv] theta_init[1];\n',
        'P[1] = P_realtoP(P_real);\n',
        'R[1] = P_realtoP(R_real);\n',
        '// stationary autoregressive and ma coef matrices\n',
        'A_init = rev_mapping(P, Sigma);\n',
        'theta_init = rev_mapping(R, Sigma);\n',
        'theta_init[1] = -theta_init[1];\n',
        '// initial stationary covariance structure\n',
        'Omega = initial_joint_var(Sigma, A_init, theta_init);\n',
        'A = A_init[1];\n',
        'theta = theta_init[1];\n',
        '}\n',

        '// computed VARMA trends\n',
        'epsilon[1] = theta * init[(n_lv + 1) : (n_lv * 2)];\n',
        'LV[1] = (A * init[1 : n_lv]) + trend_mus[ytimes_trend[1, 1 : n_lv]] + epsilon[1] + error[1];\n',
        'for (i in 2 : n) {\n',
        '// lagged error ma process\n',
        'epsilon[i] = theta * error[i - 1];\n',
        '// full VARMA process\n',
        'LV[i] = trend_mus[ytimes_trend[i, 1 : n_lv]] + A * (LV[i - 1] - trend_mus[ytimes_trend[i - 1, 1 : n_lv]]) + epsilon[i] + error[i];\n',
        '}\n',

        '// derived latent states\n',
        'lv_coefs = Z;\n',
        'for (i in 1 : n) {\n',
        'for (s in 1 : n_series) {\n',
        'trend[i, s] = dot_product(lv_coefs[s,  : ], LV[i]);\n',
        '}\n}'
      )
    } else {
      model_file[grep(
        'matrix[n_series, n_series] A;',
        model_file,
        fixed = TRUE
      )] <- paste0(
        'matrix[n_series, n_series] A;\n',
        '// latent trend MA autoregressive terms\n',
        'matrix[n_series, n_series] theta;\n',
        '// ma process\n',
        'array[n] vector[n_series] epsilon;\n'
      )

      start <- grep('// raw latent trends', model_file, fixed = TRUE)
      end <- start + 1
      model_file <- model_file[-c(start:end)]

      start <- grep(
        '// trend estimates in matrix-form',
        model_file,
        fixed = TRUE
      )
      end <- grep('Gamma = phiGamma[2, 1];', model_file, fixed = TRUE) + 1
      model_file <- model_file[-c(start:end)]

      model_file[grep(
        'cov_matrix[n_series] Gamma;',
        model_file,
        fixed = TRUE
      )] <- paste0(
        'cov_matrix[n_series * 2] Omega;\n',
        '// raw latent trends\n',
        'vector[n_series] trend_raw[n];\n',
        '// trend estimates in matrix-form\n',
        'matrix[n, n_series] trend;'
      )

      model_file[start] <- paste0(
        model_file[start],
        '\n',
        '// stationary VARMA reparameterisation\n',
        'L_Sigma = diag_pre_multiply(sigma, L_Omega);\n',
        'Sigma = multiply_lower_tri_self_transpose(L_Sigma);\n',

        '{\n',
        '// constrained partial autocorrelations\n',
        'matrix[n_series, n_series] P[1];\n',
        'matrix[n_series, n_series] R[1];\n',
        '// stationary autoregressive coefficients\n',
        'matrix[n_series, n_series] A_init[1];\n',
        'matrix[n_series, n_series] theta_init[1];\n',
        'P[1] = P_realtoP(P_real);\n',
        'R[1] = P_realtoP(R_real);\n',
        '// stationary autoregressive and ma coef matrices\n',
        'A_init = rev_mapping(P, Sigma);\n',
        'theta_init = rev_mapping(R, Sigma);\n',
        'theta_init[1] = -theta_init[1];\n',
        '// initial stationary covariance structure\n',
        'Omega = initial_joint_var(Sigma, A_init, theta_init);\n',
        'A = A_init[1];\n',
        'theta = theta_init[1];\n',
        '}\n',

        '// computed VARMA trends\n',
        'epsilon[1] = theta * init[(n_series + 1) : (n_series * 2)];\n',
        'trend_raw[1] = (A * init[1 : n_series]) + epsilon[1] + error[1];\n',
        'for (i in 2 : n) {\n',
        '// lagged error ma process\n',
        'epsilon[i] = theta * error[i - 1];\n',
        '// full VARMA process\n',
        'trend_raw[i] = (A * trend_raw[i - 1]) + epsilon[i] + error[i];\n',
        '}\n',

        '// computed trends in matrix form\n',
        'for (i in 1 : n) {\n',
        'trend[i, 1 : n_series] = to_row_vector(trend_raw[i]);\n',
        '}'
      )
    }
    model_file <- readLines(textConnection(model_file), n = -1)

    # Update model
    if (
      any(grepl(
        'cholesky_factor_corr[n_lv] L_Omega;',
        model_file,
        fixed = TRUE
      ))
    ) {
      start <- grep('// latent state mean parameters', model_file, fixed = TRUE)
      end <- start + 1
      model_file <- model_file[-c(start:end)]

      model_file[grep('// latent state means', model_file, fixed = TRUE)] <-
        paste0(
          '// unconstrained ma inverse partial autocorrelations\n',
          'diagonal(R_real) ~ std_normal();\n',
          'for (i in 1 : n_lv) {\n',
          'for (j in 1 : n_lv) {\n',
          'if (i != j)\n',
          'R_real[i, j] ~ std_normal();\n',
          '}\n',
          '}\n',

          '// initial joint stationary distribution\n',
          'init ~ multi_normal(init_zeros, Omega);\n',

          '// correlated contemporaneous errors\n',
          'for (i in 1 : n) {\n',
          'error[i] ~ multi_normal_cholesky(trend_zeros, L_Sigma);\n',
          '}\n',
          '// latent state means'
        )
      model_file <- readLines(textConnection(model_file), n = -1)

      end <- grep(
        '(LV[i - 1] - trend_mus[ytimes_trend[i - 1, 1:n_lv]]);',
        model_file,
        fixed = TRUE
      ) +
        1
      start <- grep('// latent state means', model_file, fixed = TRUE)
      model_file <- model_file[-c(start:end)]

      start <- grep(
        'LV[1] ~ multi_normal(trend_mus[ytimes_trend[1, 1:n_lv]], Gamma);',
        model_file,
        fixed = TRUE
      )
      end <- max(grep('L_Sigma);', model_file, fixed = TRUE)) + 1
      model_file <- model_file[-c(start:end)]
    } else {
      start <- grep('// latent trend mean parameters', model_file, fixed = TRUE)
      end <- start + 1
      model_file <- model_file[-c(start:end)]

      model_file[grep('// trend means', model_file, fixed = TRUE)] <-
        paste0(
          '// unconstrained ma inverse partial autocorrelations\n',
          'diagonal(R_real) ~ std_normal();\n',
          'for (i in 1 : n_series) {\n',
          'for (j in 1 : n_series) {\n',
          'if (i != j)\n',
          'R_real[i, j] ~ std_normal();\n',
          '}\n',
          '}\n',

          '// initial joint stationary distribution\n',
          'init ~ multi_normal(init_zeros, Omega);\n',

          '// correlated contemporaneous errors\n',
          'for (i in 1 : n) {\n',
          'error[i] ~ multi_normal_cholesky(trend_zeros, L_Sigma);\n',
          '}\n',
          '// trend means'
        )
      model_file <- readLines(textConnection(model_file), n = -1)

      start <- grep('// trend means', model_file, fixed = TRUE)
      end <- max(grep(
        'trend_raw[i] ~ multi_normal_cholesky(mu[i - 1], L_Sigma);',
        model_file,
        fixed = TRUE
      )) +
        1
      model_file <- model_file[-c(start:end)]
    }
    model_file <- readLines(textConnection(model_file), n = -1)
  }

  # Now do any rearrangements needed for hierarchical correlations
  if (grepl('hiercor', validate_trend_model(trend_model))) {
    # Add the function to calculate a convex combination of correlation matrices
    if (any(grepl('functions {', model_file, fixed = TRUE))) {
      model_file[grep('functions {', model_file, fixed = TRUE)] <-
        paste0(
          'functions {\n',
          '/* Function to compute a partially pooled correlation matrix */\n',
          '/* https://discourse.mc-stan.org/t/hierarchical-prior-for-partial-pooling-on-correlation-matrices*/\n',
          'matrix combine_cholesky(matrix global_chol_cor, matrix local_chol_cor, real alpha){',
          'int dim = rows(local_chol_cor);\n',
          'matrix[dim, dim] global_cor = multiply_lower_tri_self_transpose(global_chol_cor);\n',
          'matrix[dim, dim] local_cor = multiply_lower_tri_self_transpose(local_chol_cor);\n',
          'matrix[dim, dim] combined_chol_cor;\n',
          'combined_chol_cor = cholesky_decompose(alpha * global_cor +\n',
          '                                       (1 - alpha) * local_cor);\n',
          'return(combined_chol_cor);\n',
          '}\n'
        )
    } else {
      model_file[grep('Stan model code', model_file)] <-
        paste0(
          '// Stan model code generated by package mvgam\n',
          'functions {\n',
          '/* Function to compute a partially pooled correlation matrix */\n',
          '/* https://discourse.mc-stan.org/t/hierarchical-prior-for-partial-pooling-on-correlation-matrices*/\n',
          'matrix combine_cholesky(matrix global_chol_cor, matrix local_chol_cor, real alpha){',
          'int dim = rows(local_chol_cor);\n',
          'matrix[dim, dim] global_cor = multiply_lower_tri_self_transpose(global_chol_cor);\n',
          'matrix[dim, dim] local_cor = multiply_lower_tri_self_transpose(local_chol_cor);\n',
          'matrix[dim, dim] combined_chol_cor;\n',
          'combined_chol_cor = cholesky_decompose(alpha * global_cor +\n',
          '                                       (1 - alpha) * local_cor);\n',
          'return(combined_chol_cor);\n',
          '}\n}\n'
        )
    }
    model_file <- readLines(textConnection(model_file), n = -1)

    # Add group information to data block
    model_file[grep('int<lower=0> n_series;', model_file, fixed = TRUE)] <-
      paste0(
        "int<lower=0> n_groups; // number of groups (correlations apply within grouping levels)\n",
        "int<lower=0> n_subgroups; // number of subgroups (units whose errors will be correlated)\n",
        "int<lower=0> n_series; // total number of unique series (n_groups * n_subgroups)\n",
        "array[n_groups, n_subgroups] int<lower=1> group_inds; // indices of group membership"
      )
    model_file <- readLines(textConnection(model_file), n = -1)

    #### Changes for VAR models ####
    if (grepl('VAR', trend_char)) {
      if (
        any(grepl(
          "cholesky_factor_corr[n_lv] L_Omega;",
          model_file,
          fixed = TRUE
        ))
      ) {
        use_lv <- TRUE
      } else {
        use_lv <- FALSE
      }
      #### Parameters ####
      # Need arrays of cholesky factors and partial autocorrelation matrices
      if (use_lv) {
        # Changes for State-Space models
        model_file[grep(
          "cholesky_factor_corr[n_lv] L_Omega;",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            'cholesky_factor_corr[n_subgroups] L_Omega_global;\n',
            'array[n_groups] cholesky_factor_corr[n_subgroups] L_deviation_group;\n',
            'real<lower=0,upper=1> alpha_cor;'
          )
        model_file[grep(
          "matrix[n_lv, n_lv] P_real;",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            'array[n_groups] matrix[n_subgroups, n_subgroups] P_real_group;'
          )
      } else {
        # Changes for non State-Space models
        model_file[grep(
          "cholesky_factor_corr[n_series] L_Omega;",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            'cholesky_factor_corr[n_subgroups] L_Omega_global;\n',
            'array[n_groups] cholesky_factor_corr[n_subgroups] L_deviation_group;\n',
            'real<lower=0,upper=1> alpha_cor;'
          )
        model_file[grep(
          "matrix[n_series, n_series] P_real;",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            'array[n_groups] matrix[n_subgroups, n_subgroups] P_real_group;'
          )
      }
      model_file <- readLines(textConnection(model_file), n = -1)

      #### Transformed parameters ####
      # Need arrays of autocorrelation matrices, Gamma and Sigma matrices
      if (use_lv) {
        # Changes for State-Space models
        model_file[grep("matrix[n_lv, n_lv] A;", model_file, fixed = TRUE)] <-
          paste0(
            'array[n_groups] matrix[n_subgroups, n_subgroups] A_group;\n',
            'matrix[n_lv, n_lv] A;'
          )
        model_file[grep("cov_matrix[n_lv] Sigma;", model_file, fixed = TRUE)] <-
          paste0(
            'array[n_groups] cov_matrix[n_subgroups] Sigma_group;\n',
            "matrix[n_lv, n_lv] Sigma;"
          )
        model_file[grep("cov_matrix[n_lv] Gamma;", model_file, fixed = TRUE)] <-
          paste0(
            'array[n_groups] cov_matrix[n_subgroups] Gamma_group;\n',
            "matrix[n_lv, n_lv] Gamma;"
          )
        model_file <- model_file[
          -grep(
            'Sigma = multiply_lower_tri_self_transpose(L_Sigma);',
            model_file,
            fixed = TRUE
          )
        ]
        model_file[grep(
          "L_Sigma = diag_pre_multiply(sigma, L_Omega);",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            '// derived group-level VAR covariance matrices\n',
            'array[n_groups] cholesky_factor_corr[n_subgroups] L_Omega_group;\n',
            'array[n_groups] matrix[n_subgroups, n_subgroups] L_Sigma_group;\n',
            'for (g in 1 : n_groups){\n',
            'L_Omega_group[g] = combine_cholesky(L_Omega_global, L_deviation_group[g], alpha_cor);\n',
            'L_Sigma_group[g] = diag_pre_multiply(sigma[group_inds[g]], L_Omega_group[g]);\n',
            'Sigma_group[g] = multiply_lower_tri_self_transpose(L_Sigma_group[g]);\n',
            '}\n'
          )
        starts <- grep(
          "// stationary VAR reparameterisation",
          model_file,
          fixed = TRUE
        ) +
          1
        ends <- grep(
          "// stationary VAR reparameterisation",
          model_file,
          fixed = TRUE
        ) +
          8
        model_file <- model_file[-(starts:ends)]
        model_file[grep(
          "// stationary VAR reparameterisation",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            '// stationary VAR reparameterisation\n',
            '{\n',
            "array[1] matrix[n_subgroups, n_subgroups] P;\n",
            "array[2, 1] matrix[n_subgroups, n_subgroups] phiGamma;\n",
            'for (g in 1 : n_groups){\n',
            "P[1] = P_realtoP(P_real_group[g]);\n",
            "phiGamma = rev_mapping(P, Sigma_group[g]);\n",
            "A_group[g] = phiGamma[1, 1];\n",
            "Gamma_group[g] = phiGamma[2, 1];\n",
            "}\n\n",
            "// computed (full) VAR matrices\n",
            'Sigma = rep_matrix(0, n_lv, n_lv);\n',
            'Gamma = rep_matrix(0, n_lv, n_lv);\n',
            'A = rep_matrix(0, n_lv, n_lv);\n',
            'for (g in 1 : n_groups){\n',
            'Sigma[group_inds[g], group_inds[g]] = multiply_lower_tri_self_transpose(L_Sigma_group[g]);\n',
            'Gamma[group_inds[g], group_inds[g]] = Gamma_group[g];\n',
            'A[group_inds[g], group_inds[g]] = A_group[g];\n',
            '}\n',
            'L_Sigma = cholesky_decompose(Sigma);\n',
            "}\n\n"
          )
      } else {
        # Changes for non State-Space models
        model_file[grep(
          "matrix[n_series, n_series] A;",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            'array[n_groups] matrix[n_subgroups, n_subgroups] A_group;\n',
            'matrix[n_series, n_series] A;'
          )
        model_file[grep(
          "cov_matrix[n_series] Sigma;",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            'array[n_groups] cov_matrix[n_subgroups] Sigma_group;\n',
            "matrix[n_series, n_series] Sigma;"
          )
        model_file[grep(
          "cov_matrix[n_series] Gamma;",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            'array[n_groups] cov_matrix[n_subgroups] Gamma_group;\n',
            'matrix[n_series, n_series] Gamma;'
          )
        model_file <- model_file[
          -grep(
            'Sigma = multiply_lower_tri_self_transpose(L_Sigma);',
            model_file,
            fixed = TRUE
          )
        ]
        model_file[grep(
          "L_Sigma = diag_pre_multiply(sigma, L_Omega);",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            '// derived group-level VAR covariance matrices\n',
            'array[n_groups] cholesky_factor_corr[n_subgroups] L_Omega_group;\n',
            'array[n_groups] matrix[n_subgroups, n_subgroups] L_Sigma_group;\n',
            'for (g in 1 : n_groups){\n',
            'L_Omega_group[g] = combine_cholesky(L_Omega_global, L_deviation_group[g], alpha_cor);\n',
            'L_Sigma_group[g] = diag_pre_multiply(sigma[group_inds[g]], L_Omega_group[g]);\n',
            'Sigma_group[g] = multiply_lower_tri_self_transpose(L_Sigma_group[g]);\n',
            '}\n'
          )
        starts <- grep(
          "// stationary VAR reparameterisation",
          model_file,
          fixed = TRUE
        ) +
          1
        ends <- grep(
          "// stationary VAR reparameterisation",
          model_file,
          fixed = TRUE
        ) +
          8
        model_file <- model_file[-(starts:ends)]
        model_file[grep(
          "// stationary VAR reparameterisation",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            '// stationary VAR reparameterisation\n',
            '{\n',
            "array[1] matrix[n_subgroups, n_subgroups] P;\n",
            "array[2, 1] matrix[n_subgroups, n_subgroups] phiGamma;\n",
            'for (g in 1 : n_groups){\n',
            "P[1] = P_realtoP(P_real_group[g]);\n",
            "phiGamma = rev_mapping(P, Sigma_group[g]);\n",
            "A_group[g] = phiGamma[1, 1];\n",
            "Gamma_group[g] = phiGamma[2, 1];\n",
            "}\n\n",
            "// computed (full) VAR matrices\n",
            'Sigma = rep_matrix(0, n_series, n_series);\n',
            'Gamma = rep_matrix(0, n_series, n_series);\n',
            'A = rep_matrix(0, n_series, n_series);\n',
            'for (g in 1 : n_groups){\n',
            'Sigma[group_inds[g], group_inds[g]] = multiply_lower_tri_self_transpose(L_Sigma_group[g]);\n',
            'A[group_inds[g], group_inds[g]] = A_group[g];\n',
            'Gamma[group_inds[g], group_inds[g]] = Gamma_group[g];\n',
            '}\n',
            'L_Sigma = cholesky_decompose(Sigma);\n',
            "}\n\n"
          )
      }
      model_file <- readLines(textConnection(model_file), n = -1)

      #### Model ####
      model_file[grep(
        "L_Omega ~ lkj_corr_cholesky(2);",
        model_file,
        fixed = TRUE
      )] <-
        paste0(
          'alpha_cor ~ beta(3, 2);\n',
          'L_Omega_global ~ lkj_corr_cholesky(1);\n',
          'for (g in 1 : n_groups){\n',
          'L_deviation_group[g] ~ lkj_corr_cholesky(6);\n',
          '}'
        )
      starts <- grep(
        "// unconstrained partial autocorrelations",
        model_file,
        fixed = TRUE
      ) +
        1
      ends <- grep(
        "// unconstrained partial autocorrelations",
        model_file,
        fixed = TRUE
      ) +
        6
      model_file <- model_file[-(starts:ends)]
      model_file[grep(
        "// unconstrained partial autocorrelations",
        model_file,
        fixed = TRUE
      )] <-
        paste0(
          'for (g in 1 : n_groups){\n',
          'diagonal(P_real_group[g]) ~ normal(Pmu[1], 1 / sqrt(Pomega[1]));\n',
          'for (i in 1:n_subgroups) {\n',
          'for (j in 1:n_subgroups) {\n',
          'if(i != j) P_real_group[g, i, j] ~ normal(Pmu[2], 1 / sqrt(Pomega[2]));\n',
          '}\n}\n}'
        )
      model_file <- readLines(textConnection(model_file), n = -1)
    } else {
      if (grepl('ZMVN', trend_char)) {
        #### Zero-mean multinormals ####
        if (
          any(grepl(
            "matrix[n_series, n_lv] lv_coefs;",
            model_file,
            fixed = TRUE
          ))
        ) {
          use_lv <- TRUE
        } else {
          use_lv <- FALSE
        }

        #### Transformed data ####
        if (use_lv) {
          if (any(grepl('transformed data {', model_file, fixed = TRUE))) {
            model_file[grep('transformed data {', model_file, fixed = TRUE)] <-
              paste0(
                'transformed data {\n',
                'vector[n_subgroups] trend_zeros = rep_vector(0.0, n_subgroups);'
              )
          } else {
            model_file[grep('parameters {', model_file, fixed = TRUE)[1]] <-
              paste0(
                'transformed data {\n',
                'vector[n_subgroups] trend_zeros = rep_vector(0.0, n_subgroups);\n',
                '}\nparameters {'
              )
          }
        } else {
          model_file[grep(
            "vector[n_series] trend_zeros = rep_vector(0.0, n_series);",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              'vector[n_subgroups] trend_zeros = rep_vector(0.0, n_subgroups);'
            )
        }
        model_file <- readLines(textConnection(model_file), n = -1)

        #### Parameters ####
        if (use_lv) {
          model_file <- model_file[
            -grep(
              'cholesky_factor_corr[n_lv] L_Omega;',
              model_file,
              fixed = TRUE
            )
          ]
          model_file <- model_file[
            -grep("// correlated latent residuals", model_file, fixed = TRUE)
          ]
          model_file <- model_file[
            -grep("array[n] vector[n_lv] LV_raw;", model_file, fixed = TRUE)
          ]
          model_file[grep("[n_lv] sigma;", model_file, fixed = TRUE)] <-
            paste0(
              model_file[grep("[n_lv] sigma;", model_file, fixed = TRUE)],
              '\n',
              '\n\n',
              '// correlation params and correlated errors per group\n',
              'cholesky_factor_corr[n_subgroups] L_Omega_global;\n',
              'array[n_groups] cholesky_factor_corr[n_subgroups] L_deviation_group;\n',
              'real<lower=0,upper=1> alpha_cor;\n',
              'array[n] matrix[n_groups, n_subgroups] sub_error;'
            )
        } else {
          model_file <- model_file[
            -grep(
              'cholesky_factor_corr[n_series] L_Omega;',
              model_file,
              fixed = TRUE
            )
          ]
          model_file <- model_file[
            -grep("// correlated latent residuals", model_file, fixed = TRUE)
          ]
          model_file <- model_file[
            -grep(
              "array[n] vector[n_series] trend_raw;",
              model_file,
              fixed = TRUE
            )
          ]
          model_file[grep(
            "vector<lower=0>[n_series] sigma;",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              'vector<lower=0>[n_series] sigma;\n',
              '\n\n',
              '// correlation params and correlated errors per group\n',
              'cholesky_factor_corr[n_subgroups] L_Omega_global;\n',
              'array[n_groups] cholesky_factor_corr[n_subgroups] L_deviation_group;\n',
              'real<lower=0,upper=1> alpha_cor;\n',
              'array[n] matrix[n_groups, n_subgroups] sub_error;'
            )
        }
        model_file <- readLines(textConnection(model_file), n = -1)

        #### Transformed parameters ####
        if (use_lv) {
          model_file <- model_file[
            -grep('matrix[n_lv, n_lv] L_Sigma;', model_file, fixed = TRUE)
          ]
          model_file <- model_file[
            -grep(
              'L_Sigma = diag_pre_multiply(sigma, L_Omega);',
              model_file,
              fixed = TRUE
            )
          ]
          model_file[grep(
            "// LKJ form of covariance matrix",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// reconstructed correlated errors\n',
              'array[n] vector[n_lv] error;\n',
              'array[n_groups] cholesky_factor_corr[n_subgroups] L_Omega_group;\n',
              '\n',
              '// LKJ forms of covariance matrices\n',
              'array[n_groups] matrix[n_subgroups, n_subgroups] L_Sigma_group;'
            )
          model_file[grep(
            "// correlated residuals",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// derived error correlation and covariance matrices\n',
              'for (g in 1 : n_groups){\n',
              'L_Omega_group[g] = combine_cholesky(L_Omega_global, L_deviation_group[g], alpha_cor);\n',
              'L_Sigma_group[g] = diag_pre_multiply(sigma[group_inds[g]], L_Omega_group[g]);\n',
              '}\n',

              '// derived correlated errors\n',
              'for (i in 1 : n){\n',
              "error[i] = to_vector(sub_error[i]');\n",
              '}\n'
            )
          model_file[grep(
            "LV[i, 1:n_lv] = to_row_vector(LV_raw[i]);",
            model_file,
            fixed = TRUE
          )] <-
            "LV[i, 1:n_lv] = to_row_vector(error[i]);"
        } else {
          model_file <- model_file[
            -grep(
              'matrix[n_series, n_series] L_Sigma;',
              model_file,
              fixed = TRUE
            )
          ]
          model_file <- model_file[
            -grep(
              'L_Sigma = diag_pre_multiply(sigma, L_Omega);',
              model_file,
              fixed = TRUE
            )
          ]
          model_file[grep(
            "// LKJ form of covariance matrix",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// reconstructed correlated errors\n',
              'array[n] vector[n_series] error;\n',
              'array[n_groups] cholesky_factor_corr[n_subgroups] L_Omega_group;\n',
              '\n',
              '// LKJ forms of covariance matrices\n',
              'array[n_groups] matrix[n_subgroups, n_subgroups] L_Sigma_group;'
            )
          model_file[grep(
            "// correlated residuals",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// derived error correlation and covariance matrices\n',
              'for (g in 1 : n_groups){\n',
              'L_Omega_group[g] = combine_cholesky(L_Omega_global, L_deviation_group[g], alpha_cor);\n',
              'L_Sigma_group[g] = diag_pre_multiply(sigma[group_inds[g]], L_Omega_group[g]);\n',
              '}\n',

              '// derived correlated errors\n',
              'for (i in 1 : n){\n',
              "error[i] = to_vector(sub_error[i]');\n",
              '}\n'
            )
          model_file[grep(
            "trend[i, 1:n_series] = to_row_vector(trend_raw[i]);",
            model_file,
            fixed = TRUE
          )] <-
            "trend[i, 1:n_series] = to_row_vector(error[i]);"
        }
        model_file <- readLines(textConnection(model_file), n = -1)

        #### Model ####
        starts <- grep(
          "// residual error correlations",
          model_file,
          fixed = TRUE
        ) +
          1
        ends <- grep(
          "// residual error correlations",
          model_file,
          fixed = TRUE
        ) +
          4
        model_file <- model_file[-(starts:ends)]
        model_file[grep(
          "// residual error correlations",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            '// hierarchical latent error correlations\n',
            'alpha_cor ~ beta(3, 2);\n',
            'L_Omega_global ~ lkj_corr_cholesky(1);\n',
            'for (g in 1 : n_groups){\n',
            'L_deviation_group[g] ~ lkj_corr_cholesky(6);\n',
            '}\n',
            '\n',
            '// contemporaneous errors\n',
            'for (i in 1 : n) {\n',
            'for (g in 1 : n_groups){\n',
            'to_vector(sub_error[i, g]) ~ multi_normal_cholesky(trend_zeros, L_Sigma_group[g]);\n',
            '}\n',
            '}'
          )
        model_file <- readLines(textConnection(model_file), n = -1)

        #### Generated quantities ####
        if (use_lv) {
          model_file <- model_file[
            -grep(
              "cov_matrix[n_lv] Sigma = multiply_lower_tri_self_transpose(L_Sigma);",
              model_file,
              fixed = TRUE
            )
          ]
          model_file[grep(
            "// computed error covariance matrix",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// computed (full) error covariance matrix\n',
              'matrix[n_lv, n_lv]  Sigma;\n',
              'Sigma = rep_matrix(0, n_lv, n_lv);\n',
              'for (g in 1 : n_groups){\n',
              'Sigma[group_inds[g], group_inds[g]] = multiply_lower_tri_self_transpose(L_Sigma_group[g]);\n',
              '}'
            )
        } else {
          model_file <- model_file[
            -grep(
              "cov_matrix[n_series] Sigma = multiply_lower_tri_self_transpose(L_Sigma);",
              model_file,
              fixed = TRUE
            )
          ]
          model_file[grep(
            "// computed error covariance matrix",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// computed (full) error covariance matrix\n',
              'matrix[n_series, n_series]  Sigma;\n',
              'Sigma = rep_matrix(0, n_series, n_series);\n',
              'for (g in 1 : n_groups){\n',
              'Sigma[group_inds[g], group_inds[g]] = multiply_lower_tri_self_transpose(L_Sigma_group[g]);\n',
              '}'
            )
        }
        model_file <- readLines(textConnection(model_file), n = -1)
      } else {
        #### Random walk and AR models ####
        if (any(grepl("vector[n_lv] trend_zeros", model_file, fixed = TRUE))) {
          use_lv <- TRUE
        } else {
          use_lv <- FALSE
        }

        #### Transformed data ####
        if (use_lv) {
          model_file[grep(
            "vector[n_lv] trend_zeros = rep_vector(0.0, n_lv);",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              'vector[n_subgroups] trend_zeros = rep_vector(0.0, n_subgroups);'
            )
        } else {
          model_file[grep(
            "vector[n_series] trend_zeros = rep_vector(0.0, n_series);",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              'vector[n_subgroups] trend_zeros = rep_vector(0.0, n_subgroups);'
            )
        }
        model_file <- readLines(textConnection(model_file), n = -1)

        #### Parameters ####
        if (use_lv) {
          model_file <- model_file[
            -grep(
              'cholesky_factor_corr[n_lv] L_Omega;',
              model_file,
              fixed = TRUE
            )
          ]
          model_file <- model_file[
            -grep('// dynamic error parameters', model_file, fixed = TRUE)
          ]
          model_file <- model_file[
            -grep("vector[n_lv] error[n];", model_file, fixed = TRUE)
          ]
          model_file[grep("[n_lv] sigma;", model_file, fixed = TRUE)] <-
            paste0(
              model_file[grep("[n_lv] sigma;", model_file, fixed = TRUE)],
              '\n',
              '\n\n',
              '// correlation params and dynamic error parameters per group\n',
              'cholesky_factor_corr[n_subgroups] L_Omega_global;\n',
              'array[n_groups] cholesky_factor_corr[n_subgroups] L_deviation_group;\n',
              'real<lower=0,upper=1> alpha_cor;\n',
              'array[n] matrix[n_groups, n_subgroups] sub_error;'
            )
        } else {
          model_file <- model_file[
            -grep(
              'cholesky_factor_corr[n_series] L_Omega;',
              model_file,
              fixed = TRUE
            )
          ]
          model_file <- model_file[
            -grep('// dynamic error parameters', model_file, fixed = TRUE)
          ]
          model_file <- model_file[
            -grep("vector[n_series] error[n];", model_file, fixed = TRUE)
          ]
          model_file[grep(
            "vector<lower=0>[n_series] sigma;",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              'vector<lower=0>[n_series] sigma;\n',
              '\n\n',
              '// correlation params and dynamic error parameters per group\n',
              'cholesky_factor_corr[n_subgroups] L_Omega_global;\n',
              'array[n_groups] cholesky_factor_corr[n_subgroups] L_deviation_group;\n',
              'real<lower=0,upper=1> alpha_cor;\n',
              'array[n] matrix[n_groups, n_subgroups] sub_error;'
            )
        }
        model_file <- readLines(textConnection(model_file), n = -1)

        #### Transformed parameters ####
        if (use_lv) {
          model_file <- model_file[
            -grep(
              '// computed error covariance matrix',
              model_file,
              fixed = TRUE
            )
          ]
          model_file <- model_file[
            -grep('cov_matrix[n_lv] Sigma;', model_file, fixed = TRUE)
          ]
          model_file <- model_file[
            -grep('matrix[n_lv, n_lv] L_Sigma;', model_file, fixed = TRUE)
          ]
          model_file <- model_file[
            -grep(
              'L_Sigma = diag_pre_multiply(sigma, L_Omega);',
              model_file,
              fixed = TRUE
            )
          ]
          model_file <- model_file[
            -grep(
              'Sigma = multiply_lower_tri_self_transpose(L_Sigma);',
              model_file,
              fixed = TRUE
            )
          ]
          model_file[grep(
            "// LKJ form of covariance matrix",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// reconstructed correlated errors\n',
              'array[n] vector[n_lv] error;\n',
              'array[n_groups] cholesky_factor_corr[n_subgroups] L_Omega_group;\n',
              '\n',
              '// LKJ forms of covariance matrices\n',
              'array[n_groups] matrix[n_subgroups, n_subgroups] L_Sigma_group;'
            )
          model_file[grep(
            "// derived latent states",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// derived error correlation and covariance matrices\n',
              'for (g in 1 : n_groups){\n',
              'L_Omega_group[g] = combine_cholesky(L_Omega_global, L_deviation_group[g], alpha_cor);\n',
              'L_Sigma_group[g] = diag_pre_multiply(sigma[group_inds[g]], L_Omega_group[g]);\n',
              '}\n',

              '// derived correlated errors\n',
              'for (i in 1 : n){\n',
              "error[i] = to_vector(sub_error[i]');\n",
              '}\n',
              '// derived latent states'
            )
        } else {
          model_file <- model_file[
            -grep(
              '// computed error covariance matrix',
              model_file,
              fixed = TRUE
            )
          ]
          model_file <- model_file[
            -grep('cov_matrix[n_series] Sigma;', model_file, fixed = TRUE)
          ]
          model_file <- model_file[
            -grep(
              'matrix[n_series, n_series] L_Sigma;',
              model_file,
              fixed = TRUE
            )
          ]
          model_file <- model_file[
            -grep(
              'L_Sigma = diag_pre_multiply(sigma, L_Omega);',
              model_file,
              fixed = TRUE
            )
          ]
          model_file <- model_file[
            -grep(
              'Sigma = multiply_lower_tri_self_transpose(L_Sigma);',
              model_file,
              fixed = TRUE
            )
          ]
          model_file[grep(
            "// LKJ form of covariance matrix",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// reconstructed correlated errors\n',
              'array[n] vector[n_series] error;\n',
              'array[n_groups] cholesky_factor_corr[n_subgroups] L_Omega_group;\n',
              '\n',
              '// LKJ forms of covariance matrices\n',
              'array[n_groups] matrix[n_subgroups, n_subgroups] L_Sigma_group;'
            )
          model_file[grep(
            "// derived latent states",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// derived error correlation and covariance matrices\n',
              'for (g in 1 : n_groups){\n',
              'L_Omega_group[g] = combine_cholesky(L_Omega_global, L_deviation_group[g], alpha_cor);\n',
              'L_Sigma_group[g] = diag_pre_multiply(sigma[group_inds[g]], L_Omega_group[g]);\n',
              '}\n',

              '// derived correlated errors\n',
              'for (i in 1 : n){\n',
              "error[i] = to_vector(sub_error[i]');\n",
              '}\n',
              '// derived latent states'
            )
        }
        model_file <- readLines(textConnection(model_file), n = -1)

        #### Model ####
        starts <- grep("// contemporaneous errors", model_file, fixed = TRUE) +
          1
        ends <- grep("// contemporaneous errors", model_file, fixed = TRUE) + 4
        model_file <- model_file[-(starts:ends)]
        model_file[grep(
          "// contemporaneous errors",
          model_file,
          fixed = TRUE
        )] <-
          paste0(
            '// hierarchical process error correlations\n',
            'alpha_cor ~ beta(3, 2);\n',
            'L_Omega_global ~ lkj_corr_cholesky(1);\n',
            'for (g in 1 : n_groups){\n',
            'L_deviation_group[g] ~ lkj_corr_cholesky(6);\n',
            '}\n',
            '\n',
            '// contemporaneous errors\n',
            'for (i in 1 : n) {\n',
            'for (g in 1 : n_groups){\n',
            'to_vector(sub_error[i, g]) ~ multi_normal_cholesky(trend_zeros, L_Sigma_group[g]);\n',
            '}\n',
            '}'
          )
        model_file <- readLines(textConnection(model_file), n = -1)

        #### Generated quantities ####
        if (use_lv) {
          model_file[grep(
            "// posterior predictions",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// computed (full) error covariance matrix\n',
              'matrix[n_lv, n_lv] Sigma;\n',
              'Sigma = rep_matrix(0, n_lv, n_lv);\n',
              'for (g in 1 : n_groups){\n',
              'Sigma[group_inds[g], group_inds[g]] = multiply_lower_tri_self_transpose(L_Sigma_group[g]);\n',
              '}\n',
              '\n',
              '// posterior predictions'
            )
        } else {
          model_file[grep(
            "// posterior predictions",
            model_file,
            fixed = TRUE
          )] <-
            paste0(
              '// computed (full) error covariance matrix\n',
              'matrix[n_series, n_series] Sigma;\n',
              'Sigma = rep_matrix(0, n_series, n_series);\n',
              'for (g in 1 : n_groups){\n',
              'Sigma[group_inds[g], group_inds[g]] = multiply_lower_tri_self_transpose(L_Sigma_group[g]);\n',
              '}\n',
              '\n',
              '// posterior predictions'
            )
        }
        model_file <- readLines(textConnection(model_file), n = -1)
      }
    }

    #### Add grouping information to model_data ####
    model_data$group_inds <- matrix(
      1:nlevels(data_train$series),
      nrow = nlevels(data_train[[trend_model$gr]]),
      ncol = nlevels(data_train[[trend_model$subgr]]),
      byrow = TRUE
    )
    model_data$n_groups <- nlevels(data_train[[trend_model$gr]])
    model_data$n_subgroups <- nlevels(data_train[[trend_model$subgr]])
  }
  return(list(model_file = model_file, model_data = model_data))
}


================================================
FILE: R/add_base_dgam_lines.R
================================================
#' Dynamic GAM model file additions
#'
#' @noRd
#'
#' @param use_lv Logical (use latent variables or not?)
#'
#' @param stan Logical (convert existing model to a Stan model?)
#'
#' @param offset Logical (include an offset in the linear predictor?)
#'
#' @return A character string to add to the mgcv jagam model file
add_base_dgam_lines <- function(use_lv, stan = FALSE, offset = FALSE) {
  if (stan) {
    if (use_lv) {
      add <- "
    ##insert data
    transformed data {
    // Number of non-zero lower triangular factor loadings
    // Ensures identifiability of the model - no rotation of factors
    int<lower=1> M;
    M = n_lv * (n_series - n_lv) + n_lv * (n_lv - 1) / 2 + n_lv;
    }

    parameters {
    // raw basis coefficients
    row_vector[num_basis] b_raw;

    // dynamic factors
    matrix[n, n_lv] LV_raw;

    // dynamic factor lower triangle loading coefficients
    vector[M] L;

    // smoothing parameters
    vector<lower=0>[n_sp] lambda;
    }

    transformed parameters {
    // GAM contribution to expectations (log scale)
    vector[total_obs] eta;

    // trends and dynamic factor loading matrix
    matrix[n, n_series] trend;
    matrix[n_series, n_lv] lv_coefs_raw;

    // basis coefficients
    row_vector[num_basis] b;

    // constraints allow identifiability of loadings
    for (i in 1:(n_lv - 1)) {
    for (j in (i + 1):(n_lv)){
    lv_coefs_raw[i, j] = 0;
    }
    }
    {
    int index;
    index = 0;
    for (j in 1:n_lv) {
      for (i in j:n_series) {
        index = index + 1;
        lv_coefs_raw[i, j] = L[index];
      }
    }
    }

    // derived latent trends
    for (i in 1:n){
    for (s in 1:n_series){
    trend[i, s] = dot_product(lv_coefs_raw[s,], LV_raw[i,]);
    }
    }

    eta = to_vector(b * X);
    }

    model {
    ##insert smooths

    // priors for smoothing parameters
    lambda ~ normal(5, 30);

    // priors for dynamic factor loading coefficients
    L ~ student_t(5, 0, 1);

    // dynamic factor estimates
    for (j in 1:n_lv) {
    LV_raw[1, j] ~ normal(0, 0.1);
    }

    for (j in 1:n_lv) {
    LV_raw[2:n, j] ~ normal(LV_raw[1:(n - 1), j], 0.1);
    }

    // likelihood functions
    for (i in 1:n) {
    for (s in 1:n_series) {
    if (y_observed[i, s])
    y[i, s] ~ poisson_log(eta[ytimes[i, s]] + trend[i, s]);
    }
    }
    }

    generated quantities {
    matrix[n, n_lv] LV;
    matrix[n_series, n_lv] lv_coefs;
    vector[n_sp] rho;
    vector[n_lv] penalty;
    matrix[n, n_series] ypred;
    rho = log(lambda);
    penalty = rep_vector(100.0, n_lv);

    // Sign correct factor loadings and factors
    for(j in 1:n_lv){
    if(lv_coefs_raw[j, j] < 0){
      lv_coefs[,j] = -1 * lv_coefs_raw[,j];
      LV[,j] = -1 * LV_raw[,j];
    } else {
      lv_coefs[,j] = lv_coefs_raw[,j];
      LV[,j] = LV_raw[,j];
    }
    }

    // posterior predictions
    for(i in 1:n){
    for(s in 1:n_series){
    ypred[i, s] = poisson_log_rng(eta[ytimes[i, s]] + trend[i, s]);
    }
    }
    }
    "
    } else {
      add <- "
    ##insert data
    parameters {
    // raw basis coefficients
    row_vector[num_basis] b_raw;

    // latent trend variance parameters
    vector<lower=0>[n_series] sigma;

    // latent trends
    matrix[n, n_series] trend;

    // smoothing parameters
    vector<lower=0>[n_sp] lambda;
    }

    transformed parameters {
    // GAM contribution to expectations (log scale)
    vector[total_obs] eta;

    // basis coefficients
    row_vector[num_basis] b;

    eta = to_vector(b * X);
    }

    model {
    ##insert smooths

    // priors for smoothing parameters
    lambda ~ normal(5, 30);

    // priors for latent trend variance parameters
    sigma ~ exponential(2);

    // trend estimates
    for (s in 1:n_series) {
    trend[1, s] ~ normal(0, sigma[s]);
    }

    for (s in 1:n_series) {
    trend[2:n, s] ~ normal(trend[1:(n - 1), s], sigma[s]);
    }

    // likelihood functions
    for (i in 1:n) {
    for (s in 1:n_series) {
    if (y_observed[i, s])
    y[i, s] ~ poisson_log(eta[ytimes[i, s]] + trend[i, s]);
    }
    }
    }

    generated quantities {
    vector[n_sp] rho;
    vector[n_series] tau;
    matrix[n, n_series] ypred;
    rho = log(lambda);
    for (s in 1:n_series) {
    tau[s] = pow(sigma[s], -2.0);
    }

    // posterior predictions
    for(i in 1:n){
    for(s in 1:n_series){
    ypred[i, s] = poisson_log_rng(eta[ytimes[i, s]] + trend[i, s]);
    }
    }
    }
    "
    }
  } else {
    if (use_lv) {
      add <- c(
        "
               #### Begin model ####
               model {

               ## GAM linear predictor
               eta <- X %*% b

               ## mean expectations
               for (i in 1:n) {
               for (s in 1:n_series) {
               mus[i, s] <- exp(eta[ytimes[i, s]] + trend[i, s])
               }
               }

               ## latent factors evolve as time series with penalised precisions;
               ## the penalty terms force any un-needed factors to evolve as flat lines
               for (j in 1:n_lv) {
               LV_raw[1, j] ~ dnorm(0, penalty[j])
               }

               for (j in 1:n_lv) {
               LV_raw[2, j] ~ dnorm(drift[j] + ar1[j]*LV_raw[1, j], penalty[j])
               }

               for (j in 1:n_lv) {
               LV_raw[3, j] ~ dnorm(drift[j]*2 + ar1[j]*LV_raw[2, j] + ar2[j]*LV_raw[1, j], penalty[j])
               }

               for (i in 4:n) {
               for (j in 1:n_lv) {
               LV_raw[i, j] ~ dnorm(drift[j]*(i - 1) + ar1[j]*LV_raw[i - 1, j] +
               ar2[j]*LV_raw[i - 2, j] + ar3[j]*LV_raw[i - 3, j], penalty[j])
               }
               }

               ## AR components
               for (s in 1:n_lv) {
               drift[s] ~ dnorm(0, 10)
               ar1[s] ~ dnorm(0, 10)
               ar2[s] ~ dnorm(0, 10)
               ar3[s] ~ dnorm(0, 10)
               }

               ## shrinkage penalties for each factor's precision parameter act to squeeze
               ## the entire factor toward a flat white noise process if supported by
               ## the data. The prior for individual factor penalties allows each factor to possibly
               ## have a relatively large penalty, which shrinks the prior for that factor's variance
               ## substantially. Penalties increase exponentially with the number of factors following
               ## Welty, Leah J., et al. Bayesian distributed lag models: estimating effects of particulate
               ## matter air pollution on daily mortality Biometrics 65.1 (2009): 282-291.
               pi ~ dunif(0, n_lv)
               X2 ~ dnorm(0, 1)T(0, )

               # eta1 controls the baseline penalty
               eta1 ~ dunif(-1, 1)

               # eta2 controls how quickly the penalties exponentially increase
               eta2 ~ dunif(-1, 1)

               for (t in 1:n_lv) {
               X1[t] ~ dnorm(0, 1)T(0, )
               l.dist[t] <- max(t, pi[])
               l.weight[t] <- exp(eta2[] * l.dist[t])
               l.var[t] <- exp(eta1[] * l.dist[t] / 2) * 1
               theta.prime[t] <- l.weight[t] * X1[t] + (1 - l.weight[t]) * X2[]
               penalty[t] <- max(0.0001, theta.prime[t] * l.var[t])
               }

               ## latent factor loadings: standard normal with identifiability constraints
               ## upper triangle of loading matrix set to zero
               for (j in 1:(n_lv - 1)) {
               for (j2 in (j + 1):n_lv) {
               lv_coefs_raw[j, j2] <- 0
               }
               }

               ## positive constraints on loading diagonals
               for (j in 1:n_lv) {
               lv_coefs_raw[j, j] ~ dnorm(0, 1)T(0, 1);
               }

               ## lower diagonal free
               for (j in 2:n_lv) {
               for (j2 in 1:(j - 1)) {
               lv_coefs_raw[j, j2] ~ dnorm(0, 1)T(-1, 1);
               }
               }

               ## other elements also free
               for (j in (n_lv + 1):n_series) {
               for (j2 in 1:n_lv) {
               lv_coefs_raw[j, j2] ~ dnorm(0, 1)T(-1, 1);
               }
               }

               ## trend evolution depends on latent factors
               for (i in 1:n) {
               for (s in 1:n_series) {
               trend[i, s] <- inprod(lv_coefs_raw[s,], LV_raw[i,])
               }
               }

               # sign-correct factor loadings and coefficients
               for (j in 1:n_lv){
                if(lv_coefs[j,j] < 0){
                 lv_coefs[,j] <- -1 * lv_coefs_raw[,j]
                 LV[,j] <- -1 * LV_raw[,j]
                } else {
                 lv_coefs[,j] <- lv_coefs_raw[,j]
                 LV[,j] <- LV_raw[,j]
                }
               }

               ## likelihood functions
               for (i in 1:n) {
               for (s in 1:n_series) {
               y[i, s] ~ dnegbin(rate[i, s], phi[s])T(, upper_bound[s]);
               rate[i, s] <- ifelse((phi[s] / (phi[s] + mus[i, s])) < min_eps, min_eps,
               (phi[s] / (phi[s] + mus[i, s])))
               }
               }

               ## complexity penalising prior for the overdispersion parameter;
               ## where the likelihood reduces to a 'base' model (Poisson) unless
               ## the data support overdispersion
               for (s in 1:n_series) {
               phi[s] <- 1 / phi_inv[s]
               phi_inv[s] ~ dexp(5)
               }

               ## posterior predictions
               for (i in 1:n) {
               for (s in 1:n_series) {
               ypred[i, s] ~ dnegbin(rate[i, s], phi[s])T(, upper_bound[s])
               }
               }

               ## GAM-specific priors"
      )
    } else {
      add <- c(
        "
                          #### Begin model ####
                          model {

                          ## GAM linear predictor
                          eta <- X %*% b

                          ## mean expectations
                          for (i in 1:n) {
                          for (s in 1:n_series) {
                          mus[i, s] <- exp(eta[ytimes[i, s]] + trend[i, s])
                          }
                          }

                          ## trend estimates
                          for (s in 1:n_series) {
                          trend[1, s] ~ dnorm(0, tau[s])
                          }

                          for (s in 1:n_series) {
                          trend[2, s] ~ dnorm(drift[s] + ar1[s]*trend[1, s], tau[s])
                          }

                          for (s in 1:n_series) {
                          trend[3, s] ~ dnorm(drift[s]*2 + ar1[s]*trend[2, s] + ar2[s]*trend[1, s], tau[s])
                          }

                          for (i in 4:n) {
                          fo
Download .txt
gitextract_s9w_3_or/

├── .Rbuildignore
├── .claude/
│   ├── commands/
│   │   ├── bug-find.md
│   │   ├── draft-pr-body.md
│   │   ├── feature-execute.md
│   │   ├── feature-plan.md
│   │   ├── pr-checklist.md
│   │   ├── reflect.md
│   │   ├── review-changes.md
│   │   └── spec-driven-dev.md
│   └── settings.local.json
├── .github/
│   ├── .gitignore
│   ├── CODE_OF_CONDUCT.md
│   ├── CONTRIBUTING.md
│   ├── FUNDING.yml
│   └── workflows/
│       ├── R-CMD-check-rstan.yaml
│       ├── R-CMD-check.yaml
│       ├── memcheck.yaml
│       ├── pkgdown.yaml
│       └── readme.yaml
├── .gitignore
├── CLAUDE.md
├── CRAN-SUBMISSION
├── DESCRIPTION
├── LICENSE
├── LICENSE.md
├── NAMESPACE
├── NEWS.md
├── R/
│   ├── RcppExports.R
│   ├── add_MACor.R
│   ├── add_base_dgam_lines.R
│   ├── add_binomial.R
│   ├── add_corcar.R
│   ├── add_nmixture.R
│   ├── add_poisson_lines.R
│   ├── add_residuals.R
│   ├── add_stan_data.R
│   ├── add_trend_lines.R
│   ├── add_tweedie_lines.R
│   ├── all_neon_tick_data.R
│   ├── as.data.frame.mvgam.R
│   ├── backends.R
│   ├── compute_edf.R
│   ├── conditional_effects.R
│   ├── cpp_funs.R
│   ├── data_grids.R
│   ├── dynamic.R
│   ├── ensemble.R
│   ├── evaluate_mvgams.R
│   ├── families.R
│   ├── fevd.mvgam.R
│   ├── forecast.mvgam.R
│   ├── formula.mvgam.R
│   ├── get_linear_predictors.R
│   ├── get_monitor_pars.R
│   ├── get_mvgam_priors.R
│   ├── globals.R
│   ├── gp.R
│   ├── gratia_methods.R
│   ├── hindcast.mvgam.R
│   ├── how_to_cite.R
│   ├── index-mvgam.R
│   ├── interpret_mvgam.R
│   ├── irf.mvgam.R
│   ├── jsdgam.R
│   ├── lfo_cv.mvgam.R
│   ├── logLik.mvgam.R
│   ├── loo.mvgam.R
│   ├── lv_correlations.R
│   ├── marginaleffects.mvgam.R
│   ├── mcmc_plot.mvgam.R
│   ├── model.frame.mvgam.R
│   ├── monotonic.R
│   ├── mvgam-class.R
│   ├── mvgam-package.R
│   ├── mvgam.R
│   ├── mvgam_diagnostics.R
│   ├── mvgam_fevd-class.R
│   ├── mvgam_forecast-class.R
│   ├── mvgam_formulae.R
│   ├── mvgam_irf-class.R
│   ├── mvgam_residcor-class.R
│   ├── mvgam_setup.R
│   ├── mvgam_trend_types.R
│   ├── noncent_trend.R
│   ├── onAttach.R
│   ├── ordinate.jsdgam.R
│   ├── pairs.mvgam.R
│   ├── piecewise_trends.R
│   ├── plot.mvgam.R
│   ├── plot_mvgam_factors.R
│   ├── plot_mvgam_fc.R
│   ├── plot_mvgam_pterms.R
│   ├── plot_mvgam_randomeffects.R
│   ├── plot_mvgam_resids.R
│   ├── plot_mvgam_series.R
│   ├── plot_mvgam_smooth.R
│   ├── plot_mvgam_trend.R
│   ├── plot_mvgam_uncertainty.R
│   ├── portal_data.R
│   ├── posterior_epred.mvgam.R
│   ├── ppc.mvgam.R
│   ├── predict.mvgam.R
│   ├── print.mvgam.R
│   ├── residual_cor.R
│   ├── residuals.mvgam.R
│   ├── sanitise_modelfile.R
│   ├── score.mvgam_forecast.R
│   ├── series_to_mvgam.R
│   ├── shared_obs_params.R
│   ├── sim_mvgam.R
│   ├── stability.R
│   ├── stan_utils.R
│   ├── stationarise_VAR.R
│   ├── summary.mvgam.R
│   ├── sysdata.rda
│   ├── tidier_methods.R
│   ├── trends.R
│   ├── update.mvgam.R
│   ├── update_priors.R
│   ├── utils-pipe.R
│   └── validations.R
├── README.Rmd
├── README.md
├── build/
│   └── vignette.rds
├── build_vignettes_CRAN.R
├── cran-comments.md
├── data/
│   ├── all_neon_tick_data.rda
│   └── portal_data.rda
├── doc/
│   ├── data_in_mvgam.R
│   ├── data_in_mvgam.Rmd
│   ├── data_in_mvgam.html
│   ├── forecast_evaluation.R
│   ├── forecast_evaluation.Rmd
│   ├── forecast_evaluation.html
│   ├── mvgam_overview.R
│   ├── mvgam_overview.Rmd
│   ├── mvgam_overview.html
│   ├── nmixtures.R
│   ├── nmixtures.Rmd
│   ├── nmixtures.html
│   ├── shared_states.R
│   ├── shared_states.Rmd
│   ├── shared_states.html
│   ├── time_varying_effects.R
│   ├── time_varying_effects.Rmd
│   ├── time_varying_effects.html
│   ├── trend_formulas.R
│   ├── trend_formulas.Rmd
│   └── trend_formulas.html
├── docs/
│   ├── 404.html
│   ├── CODE_OF_CONDUCT.html
│   ├── CONTRIBUTING.html
│   ├── LICENSE-text.html
│   ├── LICENSE.html
│   ├── articles/
│   │   ├── data_in_mvgam.html
│   │   ├── forecast_evaluation.html
│   │   ├── index.html
│   │   ├── mvgam_overview.html
│   │   ├── nmixtures.html
│   │   ├── shared_states.html
│   │   ├── time_varying_effects.html
│   │   └── trend_formulas.html
│   ├── authors.html
│   ├── deps/
│   │   ├── bootstrap-5.2.2/
│   │   │   └── font.css
│   │   ├── bootstrap-5.3.1/
│   │   │   └── font.css
│   │   ├── data-deps.txt
│   │   └── jquery-3.6.0/
│   │       └── jquery-3.6.0.js
│   ├── index.html
│   ├── news/
│   │   └── index.html
│   ├── pkgdown.js
│   ├── pkgdown.yml
│   ├── reference/
│   │   ├── GP.html
│   │   ├── RW.html
│   │   ├── ZMVN.html
│   │   ├── add_residuals.mvgam.html
│   │   ├── add_tweedie_lines.html
│   │   ├── all_neon_tick_data.html
│   │   ├── augment.mvgam.html
│   │   ├── code.html
│   │   ├── conditional_effects.mvgam.html
│   │   ├── dynamic.html
│   │   ├── ensemble.mvgam_forecast.html
│   │   ├── evaluate_mvgams.html
│   │   ├── fevd.mvgam.html
│   │   ├── fitted.mvgam.html
│   │   ├── forecast.mvgam.html
│   │   ├── formula.mvgam.html
│   │   ├── get_monitor_pars.html
│   │   ├── get_mvgam_priors.html
│   │   ├── gratia_mvgam_enhancements.html
│   │   ├── hindcast.mvgam.html
│   │   ├── how_to_cite.mvgam.html
│   │   ├── index-mvgam.html
│   │   ├── index.html
│   │   ├── irf.mvgam.html
│   │   ├── jsdgam.html
│   │   ├── lfo_cv.mvgam.html
│   │   ├── logLik.mvgam.html
│   │   ├── loo.mvgam.html
│   │   ├── lv_correlations.html
│   │   ├── mcmc_plot.mvgam.html
│   │   ├── model.frame.mvgam.html
│   │   ├── monotonic.html
│   │   ├── mvgam-class.html
│   │   ├── mvgam-package.html
│   │   ├── mvgam.html
│   │   ├── mvgam_diagnostics.html
│   │   ├── mvgam_draws.html
│   │   ├── mvgam_families.html
│   │   ├── mvgam_fevd-class.html
│   │   ├── mvgam_forecast-class.html
│   │   ├── mvgam_formulae.html
│   │   ├── mvgam_irf-class.html
│   │   ├── mvgam_marginaleffects.html
│   │   ├── mvgam_trends.html
│   │   ├── pairs.mvgam.html
│   │   ├── pfilter_mvgam_fc.html
│   │   ├── pfilter_mvgam_init.html
│   │   ├── pfilter_mvgam_online.html
│   │   ├── pfilter_mvgam_smooth.html
│   │   ├── piecewise_trends.html
│   │   ├── pipe.html
│   │   ├── plot.mvgam.html
│   │   ├── plot.mvgam_fevd.html
│   │   ├── plot.mvgam_irf.html
│   │   ├── plot.mvgam_lfo.html
│   │   ├── plot_effects.mvgam.html
│   │   ├── plot_mvgam_factors.html
│   │   ├── plot_mvgam_forecasts.html
│   │   ├── plot_mvgam_pterms.html
│   │   ├── plot_mvgam_randomeffects.html
│   │   ├── plot_mvgam_resids.html
│   │   ├── plot_mvgam_series.html
│   │   ├── plot_mvgam_smooth.html
│   │   ├── plot_mvgam_trend.html
│   │   ├── plot_mvgam_uncertainty.html
│   │   ├── portal_data.html
│   │   ├── posterior_epred.mvgam.html
│   │   ├── posterior_linpred.mvgam.html
│   │   ├── posterior_predict.mvgam.html
│   │   ├── pp_check.mvgam.html
│   │   ├── ppc.mvgam.html
│   │   ├── predict.mvgam.html
│   │   ├── print.mvgam.html
│   │   ├── reexports.html
│   │   ├── residual_cor.jsdgam.html
│   │   ├── residuals.mvgam.html
│   │   ├── score.mvgam_forecast.html
│   │   ├── series_to_mvgam.html
│   │   ├── sim_mvgam.html
│   │   ├── stability.mvgam.html
│   │   ├── summary.mvgam.html
│   │   ├── ti.html
│   │   └── update.mvgam.html
│   ├── search.json
│   └── sitemap.xml
├── index.Rmd
├── index.md
├── inst/
│   ├── CITATION
│   └── doc/
│       ├── data_in_mvgam.R
│       ├── data_in_mvgam.Rmd
│       ├── data_in_mvgam.html
│       ├── forecast_evaluation.R
│       ├── forecast_evaluation.Rmd
│       ├── forecast_evaluation.html
│       ├── mvgam_overview.R
│       ├── mvgam_overview.Rmd
│       ├── mvgam_overview.html
│       ├── nmixtures.R
│       ├── nmixtures.Rmd
│       ├── nmixtures.html
│       ├── shared_states.R
│       ├── shared_states.Rmd
│       ├── shared_states.html
│       ├── time_varying_effects.R
│       ├── time_varying_effects.Rmd
│       ├── time_varying_effects.html
│       ├── trend_formulas.R
│       ├── trend_formulas.Rmd
│       └── trend_formulas.html
├── man/
│   ├── GP.Rd
│   ├── RW.Rd
│   ├── ZMVN.Rd
│   ├── add_residuals.mvgam.Rd
│   ├── all_neon_tick_data.Rd
│   ├── augment.mvgam.Rd
│   ├── code.Rd
│   ├── conditional_effects.mvgam.Rd
│   ├── dynamic.Rd
│   ├── ensemble.mvgam_forecast.Rd
│   ├── evaluate_mvgams.Rd
│   ├── fevd.mvgam.Rd
│   ├── fitted.mvgam.Rd
│   ├── forecast.mvgam.Rd
│   ├── formula.mvgam.Rd
│   ├── get_mvgam_priors.Rd
│   ├── gratia_mvgam_enhancements.Rd
│   ├── hindcast.mvgam.Rd
│   ├── how_to_cite.mvgam.Rd
│   ├── index-mvgam.Rd
│   ├── irf.mvgam.Rd
│   ├── jsdgam.Rd
│   ├── lfo_cv.mvgam.Rd
│   ├── logLik.mvgam.Rd
│   ├── loo.mvgam.Rd
│   ├── lv_correlations.Rd
│   ├── mcmc_plot.mvgam.Rd
│   ├── model.frame.mvgam.Rd
│   ├── monotonic.Rd
│   ├── mvgam-class.Rd
│   ├── mvgam-package.Rd
│   ├── mvgam.Rd
│   ├── mvgam_diagnostics.Rd
│   ├── mvgam_draws.Rd
│   ├── mvgam_families.Rd
│   ├── mvgam_fevd-class.Rd
│   ├── mvgam_forecast-class.Rd
│   ├── mvgam_formulae.Rd
│   ├── mvgam_irf-class.Rd
│   ├── mvgam_marginaleffects.Rd
│   ├── mvgam_residcor-class.Rd
│   ├── mvgam_trends.Rd
│   ├── mvgam_use_cases.Rd
│   ├── ordinate.jsdgam.Rd
│   ├── pairs.mvgam.Rd
│   ├── piecewise_trends.Rd
│   ├── pipe.Rd
│   ├── plot.mvgam.Rd
│   ├── plot.mvgam_fevd.Rd
│   ├── plot.mvgam_irf.Rd
│   ├── plot.mvgam_lfo.Rd
│   ├── plot.mvgam_residcor.Rd
│   ├── plot_mvgam_factors.Rd
│   ├── plot_mvgam_forecasts.Rd
│   ├── plot_mvgam_pterms.Rd
│   ├── plot_mvgam_randomeffects.Rd
│   ├── plot_mvgam_resids.Rd
│   ├── plot_mvgam_series.Rd
│   ├── plot_mvgam_smooth.Rd
│   ├── plot_mvgam_trend.Rd
│   ├── plot_mvgam_uncertainty.Rd
│   ├── portal_data.Rd
│   ├── posterior_epred.mvgam.Rd
│   ├── posterior_linpred.mvgam.Rd
│   ├── posterior_predict.mvgam.Rd
│   ├── pp_check.mvgam.Rd
│   ├── ppc.mvgam.Rd
│   ├── predict.mvgam.Rd
│   ├── print.mvgam.Rd
│   ├── print.mvgam_summary.Rd
│   ├── reexports.Rd
│   ├── residual_cor.jsdgam.Rd
│   ├── residuals.mvgam.Rd
│   ├── score.mvgam_forecast.Rd
│   ├── series_to_mvgam.Rd
│   ├── sim_mvgam.Rd
│   ├── stability.mvgam.Rd
│   ├── summary.mvgam.Rd
│   ├── summary.mvgam_fevd.Rd
│   ├── summary.mvgam_forecast.Rd
│   ├── summary.mvgam_irf.Rd
│   ├── tidy.mvgam.Rd
│   └── update.mvgam.Rd
├── memcheck.R
├── misc/
│   ├── BeamOptions.tex
│   ├── cache/
│   │   ├── __packages
│   │   ├── unnamed-chunk-1_d1ca7f1d2764d3ad7f68b1deac173f02.RData
│   │   ├── unnamed-chunk-1_d1ca7f1d2764d3ad7f68b1deac173f02.rdb
│   │   ├── unnamed-chunk-1_d1ca7f1d2764d3ad7f68b1deac173f02.rdx
│   │   ├── unnamed-chunk-2_ad6e810bc91f96416ef0c5c84cba99cc.RData
│   │   ├── unnamed-chunk-2_ad6e810bc91f96416ef0c5c84cba99cc.rdb
│   │   └── unnamed-chunk-2_ad6e810bc91f96416ef0c5c84cba99cc.rdx
│   ├── mvgam_cheatsheet-concordance.tex
│   ├── mvgam_cheatsheet.Rnw
│   └── mvgam_cheatsheet.tex
├── pkgdown/
│   ├── _pkgdown.yml
│   ├── extra.css
│   └── extra.scss
├── src/
│   ├── .gitignore
│   ├── Makevars
│   ├── Makevars.win
│   ├── RcppExports.cpp
│   ├── RcppExports.o
│   ├── trend_funs.cpp
│   └── trend_funs.o
├── tasks/
│   └── fixtures/
│       ├── debug_brms_intercept.rds
│       ├── fit1.rds
│       ├── fit10.rds
│       ├── fit11.rds
│       ├── fit12.rds
│       ├── fit13.rds
│       ├── fit2.rds
│       ├── fit3.rds
│       ├── fit4.rds
│       ├── fit5.rds
│       ├── fit6.rds
│       ├── fit7.rds
│       ├── fit8.rds
│       ├── fit9.rds
│       ├── val_brms_ar1_cor_re.rds
│       ├── val_brms_ar1_fx.rds
│       ├── val_brms_ar1_gp.rds
│       ├── val_brms_ar1_gp2_by.rds
│       ├── val_brms_ar1_gp2d.rds
│       ├── val_brms_ar1_int.rds
│       ├── val_brms_ar1_mo.rds
│       ├── val_brms_ar1_re.rds
│       ├── val_brms_ar1_re_smooth.rds
│       ├── val_brms_ar1_t2_noint.rds
│       ├── val_brms_beta_ar1.rds
│       ├── val_brms_binom_ar1.rds
│       ├── val_brms_cumulative_fx.rds
│       ├── val_brms_hurdle_negbinomial_ar1.rds
│       ├── val_brms_hurdle_poisson_ar1.rds
│       ├── val_brms_mv_gauss.rds
│       ├── val_brms_zero_inflated_poisson_ar1.rds
│       ├── val_mvgam_ar1_cor_re.rds
│       ├── val_mvgam_ar1_fx.rds
│       ├── val_mvgam_ar1_fx_trend.rds
│       ├── val_mvgam_ar1_gp.rds
│       ├── val_mvgam_ar1_gp2_by.rds
│       ├── val_mvgam_ar1_gp2_by_trend.rds
│       ├── val_mvgam_ar1_gp2d.rds
│       ├── val_mvgam_ar1_gp2d_trend.rds
│       ├── val_mvgam_ar1_gp_trend.rds
│       ├── val_mvgam_ar1_int.rds
│       ├── val_mvgam_ar1_mo.rds
│       ├── val_mvgam_ar1_mo_trend.rds
│       ├── val_mvgam_ar1_re.rds
│       ├── val_mvgam_ar1_re_smooth.rds
│       ├── val_mvgam_ar1_re_smooth_trend.rds
│       ├── val_mvgam_ar1_re_trend.rds
│       ├── val_mvgam_ar1_t2_noint.rds
│       ├── val_mvgam_beta_ar1.rds
│       ├── val_mvgam_binom_ar1.rds
│       ├── val_mvgam_cumulative_fx.rds
│       ├── val_mvgam_hurdle_negbinomial_ar1.rds
│       ├── val_mvgam_hurdle_poisson_ar1.rds
│       ├── val_mvgam_mv_gauss.rds
│       ├── val_mvgam_zero_inflated_poisson_ar1.rds
│       ├── validation_brms_ar1.rds
│       ├── validation_brms_re.rds
│       ├── validation_brms_simple.rds
│       ├── validation_mvgam_ar1.rds
│       └── validation_mvgam_simple.rds
├── tests/
│   ├── local/
│   │   ├── setup_tests_local.R
│   │   └── tests-models1.R
│   ├── mvgam_examples.R
│   ├── testthat/
│   │   ├── _snaps/
│   │   │   └── tidier_methods.md
│   │   ├── setup.R
│   │   ├── test-RW.R
│   │   ├── test-backends.R
│   │   ├── test-binomial.R
│   │   ├── test-dynamic.R
│   │   ├── test-example_processing.R
│   │   ├── test-families.R
│   │   ├── test-gp.R
│   │   ├── test-jsdgam.R
│   │   ├── test-marginaleffects.R
│   │   ├── test-monotonic.R
│   │   ├── test-mvgam-methods.R
│   │   ├── test-mvgam.R
│   │   ├── test-mvgam_priors.R
│   │   ├── test-nmixture.R
│   │   ├── test-offset.R
│   │   ├── test-piecewise.R
│   │   ├── test-sim_mvgam.R
│   │   ├── test-summary-structure.R
│   │   ├── test-tidier_methods.R
│   │   └── test-update.R
│   └── testthat.R
└── vignettes/
    ├── data_in_mvgam.Rmd
    ├── forecast_evaluation.Rmd
    ├── mvgam_overview.Rmd
    ├── nmixtures.Rmd
    ├── shared_states.Rmd
    ├── time_varying_effects.Rmd
    └── trend_formulas.Rmd
Download .txt
SYMBOL INDEX (92 symbols across 4 files)

FILE: docs/deps/jquery-3.6.0/jquery-3.6.0.js
  function DOMEval (line 107) | function DOMEval( code, node, doc ) {
  function toType (line 137) | function toType( obj ) {
  function isArrayLike (line 507) | function isArrayLike( obj ) {
  function Sizzle (line 759) | function Sizzle( selector, context, results, seed ) {
  function createCache (line 907) | function createCache() {
  function markFunction (line 927) | function markFunction( fn ) {
  function assert (line 936) | function assert( fn ) {
  function addHandle (line 960) | function addHandle( attrs, handler ) {
  function siblingCheck (line 975) | function siblingCheck( a, b ) {
  function createInputPseudo (line 1001) | function createInputPseudo( type ) {
  function createButtonPseudo (line 1012) | function createButtonPseudo( type ) {
  function createDisabledPseudo (line 1023) | function createDisabledPseudo( disabled ) {
  function createPositionalPseudo (line 1079) | function createPositionalPseudo( fn ) {
  function testContext (line 1102) | function testContext( context ) {
  function setFilters (line 2313) | function setFilters() {}
  function toSelector (line 2387) | function toSelector( tokens ) {
  function addCombinator (line 2397) | function addCombinator( matcher, combinator, base ) {
  function elementMatcher (line 2464) | function elementMatcher( matchers ) {
  function multipleContexts (line 2478) | function multipleContexts( selector, contexts, results ) {
  function condense (line 2487) | function condense( unmatched, map, filter, context, xml ) {
  function setMatcher (line 2508) | function setMatcher( preFilter, selector, matcher, postFilter, postFinde...
  function matcherFromTokens (line 2608) | function matcherFromTokens( tokens ) {
  function matcherFromGroupMatchers (line 2671) | function matcherFromGroupMatchers( elementMatchers, setMatchers ) {
  function nodeName (line 3029) | function nodeName( elem, name ) {
  function winnow (line 3039) | function winnow( elements, qualifier, not ) {
  function sibling (line 3334) | function sibling( cur, dir ) {
  function createOptions (line 3427) | function createOptions( options ) {
  function Identity (line 3652) | function Identity( v ) {
  function Thrower (line 3655) | function Thrower( ex ) {
  function adoptValue (line 3659) | function adoptValue( value, resolve, reject, noValue ) {
  function resolve (line 3752) | function resolve( depth, deferred, handler, special ) {
  function completed (line 4117) | function completed() {
  function fcamelCase (line 4212) | function fcamelCase( _all, letter ) {
  function camelCase (line 4219) | function camelCase( string ) {
  function Data (line 4236) | function Data() {
  function getData (line 4405) | function getData( data ) {
  function dataAttr (line 4430) | function dataAttr( elem, key, data ) {
  function adjustCSS (line 4742) | function adjustCSS( elem, prop, valueParts, tween ) {
  function getDefaultDisplay (line 4810) | function getDefaultDisplay( elem ) {
  function showHide (line 4833) | function showHide( elements, show ) {
  function getAll (line 4965) | function getAll( context, tag ) {
  function setGlobalEval (line 4990) | function setGlobalEval( elems, refElements ) {
  function buildFragment (line 5006) | function buildFragment( elems, context, scripts, selection, ignored ) {
  function returnTrue (line 5098) | function returnTrue() {
  function returnFalse (line 5102) | function returnFalse() {
  function expectSync (line 5112) | function expectSync( elem, type ) {
  function safeActiveElement (line 5119) | function safeActiveElement() {
  function on (line 5125) | function on( elem, types, selector, data, fn, one ) {
  function leverageNative (line 5613) | function leverageNative( el, type, expectSync ) {
  function manipulationTarget (line 5962) | function manipulationTarget( elem, content ) {
  function disableScript (line 5973) | function disableScript( elem ) {
  function restoreScript (line 5977) | function restoreScript( elem ) {
  function cloneCopyEvent (line 5987) | function cloneCopyEvent( src, dest ) {
  function fixInput (line 6020) | function fixInput( src, dest ) {
  function domManip (line 6033) | function domManip( collection, args, callback, ignored ) {
  function remove (line 6125) | function remove( elem, selector, keepData ) {
  function computeStyleTests (line 6439) | function computeStyleTests() {
  function roundPixelMeasures (line 6483) | function roundPixelMeasures( measure ) {
  function curCSS (line 6576) | function curCSS( elem, name, computed ) {
  function addGetHookIf (line 6629) | function addGetHookIf( conditionFn, hookFn ) {
  function vendorPropName (line 6654) | function vendorPropName( name ) {
  function finalPropName (line 6669) | function finalPropName( name ) {
  function setPositiveNumber (line 6695) | function setPositiveNumber( _elem, value, subtract ) {
  function boxModelAdjustment (line 6707) | function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, ...
  function getWidthOrHeight (line 6775) | function getWidthOrHeight( elem, dimension, extra ) {
  function Tween (line 7151) | function Tween( elem, options, prop, end, easing ) {
  function schedule (line 7274) | function schedule() {
  function createFxNow (line 7287) | function createFxNow() {
  function genFx (line 7295) | function genFx( type, includeWidth ) {
  function createTween (line 7315) | function createTween( value, prop, animation ) {
  function defaultPrefilter (line 7329) | function defaultPrefilter( elem, props, opts ) {
  function propFilter (line 7501) | function propFilter( props, specialEasing ) {
  function Animation (line 7538) | function Animation( elem, properties, options ) {
  function stripAndCollapse (line 8254) | function stripAndCollapse( value ) {
  function getClass (line 8260) | function getClass( elem ) {
  function classesToArray (line 8264) | function classesToArray( value ) {
  function buildParams (line 8894) | function buildParams( prefix, obj, traditional, add ) {
  function addToPrefiltersOrTransports (line 9047) | function addToPrefiltersOrTransports( structure ) {
  function inspectPrefiltersOrTransports (line 9081) | function inspectPrefiltersOrTransports( structure, options, originalOpti...
  function ajaxExtend (line 9110) | function ajaxExtend( target, src ) {
  function ajaxHandleResponses (line 9130) | function ajaxHandleResponses( s, jqXHR, responses ) {
  function ajaxConvert (line 9188) | function ajaxConvert( s, response, jqXHR, isSuccess ) {
  function done (line 9704) | function done( status, nativeStatusText, responses, headers ) {

FILE: docs/pkgdown.js
  function changeTooltipMessage (line 32) | function changeTooltipMessage(element, msg) {
  function searchFuse (line 116) | async function searchFuse(query, callback) {

FILE: src/RcppExports.cpp
  function RcppExport (line 16) | RcppExport SEXP _mvgam_ar3_recursC(SEXP driftSEXP, SEXP ar1SEXP, SEXP ar...
  function RcppExport (line 34) | RcppExport SEXP _mvgam_var1_recursC(SEXP ASEXP, SEXP linpredsSEXP, SEXP ...
  function RcppExport (line 50) | RcppExport SEXP _mvgam_varma_recursC(SEXP ASEXP, SEXP A2SEXP, SEXP A3SEX...
  function RcppExport (line 75) | RcppExport void R_init_mvgam(DllInfo *dll) {

FILE: src/trend_funs.cpp
  function ar3_recursC (line 11) | Rcpp::NumericVector ar3_recursC(double drift, double ar1,
  function var1_recursC (line 37) | arma::mat var1_recursC(arma::mat A,
  function varma_recursC (line 58) | arma::mat varma_recursC(
Copy disabled (too large) Download .json
Condensed preview — 478 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (11,211K chars).
[
  {
    "path": ".Rbuildignore",
    "chars": 506,
    "preview": "^mvgam\\.Rproj$\n^\\.Rproj\\.user$\n^\\.git$\n^\\.github$\n^LICENSE\\.md$\n^_pkgdown\\.yml$\n^docs$\n^tasks$\n^.claude$\n^Claude\\.md$\n^i"
  },
  {
    "path": ".claude/commands/bug-find.md",
    "chars": 5101,
    "preview": "You are a senior software engineer helping investigate and diagnose a bug. Your role is to systematically uncover the ro"
  },
  {
    "path": ".claude/commands/draft-pr-body.md",
    "chars": 2023,
    "preview": "# PR Body Generator Template\n\nYou are helping create a PR body for the posit-dev/positron repository. Follow these guide"
  },
  {
    "path": ".claude/commands/feature-execute.md",
    "chars": 5192,
    "preview": "You are an expert software engineer tasked with implementing a change based on an existing implementation plan. You prio"
  },
  {
    "path": ".claude/commands/feature-plan.md",
    "chars": 6050,
    "preview": "You are a senior software engineer helping a peer work through a problem, feature implementation, or bug investigation. "
  },
  {
    "path": ".claude/commands/pr-checklist.md",
    "chars": 8847,
    "preview": "You are an expert software engineer with 15+ years of experience in large-scale collaborative projects. You have a keen "
  },
  {
    "path": ".claude/commands/reflect.md",
    "chars": 2501,
    "preview": "You are an expert in prompt engineering, specializing in optimizing AI code assistant instructions. Your task is to anal"
  },
  {
    "path": ".claude/commands/review-changes.md",
    "chars": 7279,
    "preview": "You are an expert software engineer with 15+ years of experience in large-scale collaborative projects. You have a keen "
  },
  {
    "path": ".claude/commands/spec-driven-dev.md",
    "chars": 9822,
    "preview": "You are a software development agent focused on creating simple, beautiful software through thoughtful specification. Yo"
  },
  {
    "path": ".claude/settings.local.json",
    "chars": 183,
    "preview": "{\n  \"permissions\": {\n    \"allow\": [\n      \"WebFetch(domain:github.com)\",\n      \"Bash(R CMD Rd2pdf:*)\",\n      \"Bash(git p"
  },
  {
    "path": ".github/.gitignore",
    "chars": 7,
    "preview": "*.html\n"
  },
  {
    "path": ".github/CODE_OF_CONDUCT.md",
    "chars": 5244,
    "preview": "# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nWe as members, contributors, and leaders pledge to make participa"
  },
  {
    "path": ".github/CONTRIBUTING.md",
    "chars": 3199,
    "preview": "# Contributing to mvgam\n\nThis document outlines how to propose a change to mvgam.\nFor a detailed discussion on contribut"
  },
  {
    "path": ".github/FUNDING.yml",
    "chars": 23,
    "preview": "github: nicholasjclark\n"
  },
  {
    "path": ".github/workflows/R-CMD-check-rstan.yaml",
    "chars": 2450,
    "preview": "# Workflow derived from https://github.com/r-lib/actions/tree/master/examples\n# Need help debugging build failures? Star"
  },
  {
    "path": ".github/workflows/R-CMD-check.yaml",
    "chars": 2800,
    "preview": "# Workflow derived from https://github.com/r-lib/actions/tree/master/examples\n# Need help debugging build failures? Star"
  },
  {
    "path": ".github/workflows/memcheck.yaml",
    "chars": 2018,
    "preview": "# Workflow derived from https://github.com/r-lib/actions/tree/master/examples\n# Need help debugging build failures? Star"
  },
  {
    "path": ".github/workflows/pkgdown.yaml",
    "chars": 2092,
    "preview": "# Workflow derived from https://github.com/r-lib/actions/tree/master/examples\n# Need help debugging build failures? Star"
  },
  {
    "path": ".github/workflows/readme.yaml",
    "chars": 1754,
    "preview": "# Workflow derived from https://github.com/r-lib/actions/tree/master/examples\n# Need help debugging build failures? Star"
  },
  {
    "path": ".gitignore",
    "chars": 126,
    "preview": "*.Rproj*\n.Rhistory\n.RData\n.Ruserdata\n.Rprofile\nMeta\n.Rproj.user\n/Meta/\ndesktop.ini\n^cran-comments\\.md$\n^src\\.gcda$\nclaud"
  },
  {
    "path": "CLAUDE.md",
    "chars": 5845,
    "preview": "# CLAUDE.md\n\nThis file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.\n\n## "
  },
  {
    "path": "CRAN-SUBMISSION",
    "chars": 91,
    "preview": "Version: 1.1.0\nDate: 2024-04-18 23:09:30 UTC\nSHA: 3d852f1f92b4d6d10ed64dc212fd6b0ebf933bca\n"
  },
  {
    "path": "DESCRIPTION",
    "chars": 2497,
    "preview": "Package: mvgam\nTitle: Multivariate (Dynamic) Generalized Additive Models\nVersion: 1.1.595\nDate: 2026-01-19\nAuthors@R: c("
  },
  {
    "path": "LICENSE",
    "chars": 44,
    "preview": "YEAR: 2021\nCOPYRIGHT HOLDER: Nicholas Clark\n"
  },
  {
    "path": "LICENSE.md",
    "chars": 1073,
    "preview": "# MIT License\n\nCopyright (c) 2021 Nicholas Clark\n\nPermission is hereby granted, free of charge, to any person obtaining "
  },
  {
    "path": "NAMESPACE",
    "chars": 9930,
    "preview": "# Generated by roxygen2: do not edit by hand\n\nS3method(Predict.matrix,mod.smooth)\nS3method(Predict.matrix,moi.smooth)\nS3"
  },
  {
    "path": "NEWS.md",
    "chars": 9903,
    "preview": "# mvgam 1.1.595\n\n## New functionalities\n* Restructured `summary.mvgam()` to now return an object of class `mvgam_summary"
  },
  {
    "path": "R/RcppExports.R",
    "chars": 661,
    "preview": "# Generated by using Rcpp::compileAttributes() -> do not edit by hand\n# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD"
  },
  {
    "path": "R/add_MACor.R",
    "chars": 88020,
    "preview": "#' Function to add moving average processes and/or\n#' correlated process errors to an existing Stan model file\n\n#' When "
  },
  {
    "path": "R/add_base_dgam_lines.R",
    "chars": 12723,
    "preview": "#' Dynamic GAM model file additions\n#'\n#' @noRd\n#'\n#' @param use_lv Logical (use latent variables or not?)\n#'\n#' @param "
  },
  {
    "path": "R/add_binomial.R",
    "chars": 9229,
    "preview": "#' @noRd\nadd_binomial = function(\n  formula,\n  model_file,\n  model_data,\n  data_train,\n  data_test,\n  family_char\n) {\n  "
  },
  {
    "path": "R/add_corcar.R",
    "chars": 1765,
    "preview": "#' Updates for adding continuous time AR data\n#' @noRd\nadd_corcar = function(model_data, data_train, data_test = NULL) {"
  },
  {
    "path": "R/add_nmixture.R",
    "chars": 23225,
    "preview": "#' Updates for adding N-mixture processes\n#' @noRd\nadd_nmixture = function(\n  model_file,\n  model_data,\n  data_train,\n  "
  },
  {
    "path": "R/add_poisson_lines.R",
    "chars": 1091,
    "preview": "#' Poisson JAGS modifications\n#'\n#'\n#' @noRd\n#' @param model_file A template `JAGS` model file to be modified\n#' @param "
  },
  {
    "path": "R/add_residuals.R",
    "chars": 977,
    "preview": "#'@title Calculate randomized quantile residuals for \\pkg{mvgam} objects\n#'@name add_residuals.mvgam\n#'@param object \\co"
  },
  {
    "path": "R/add_stan_data.R",
    "chars": 29534,
    "preview": "#' Add remaining data, model and parameter blocks to a Stan model\n#'\n#'\n#' @noRd\n#' @param jags_file Prepared JAGS mvgam"
  },
  {
    "path": "R/add_trend_lines.R",
    "chars": 40957,
    "preview": "#' Latent trend model file modifications\n#'\n#'\n#' @noRd\n#' @param model_file A template `JAGS` or `Stan` model file to b"
  },
  {
    "path": "R/add_tweedie_lines.R",
    "chars": 3356,
    "preview": "#' Tweedie JAGS modifications\n#'\n#'\n#' @param model_file A template `JAGS` model file to be modified\n#' @param upper_bou"
  },
  {
    "path": "R/all_neon_tick_data.R",
    "chars": 676,
    "preview": "#' NEON Amblyomma and Ixodes tick abundance survey data\n#'\n#' A dataset containing timeseries of Amblyomma americanum an"
  },
  {
    "path": "R/as.data.frame.mvgam.R",
    "chars": 15907,
    "preview": "#' @title Extract posterior draws from fitted \\pkg{mvgam} objects\n#'\n#' @name mvgam_draws\n#'\n#' @description\n#' Extract "
  },
  {
    "path": "R/backends.R",
    "chars": 27682,
    "preview": "#### Helper functions for preparing and manipulating Stan models ####\n# All functions were modified from `brms` source c"
  },
  {
    "path": "R/compute_edf.R",
    "chars": 4237,
    "preview": "#' Compute approximate EDFs of smooths\n#' @importFrom stats fitted\n#'@noRd\ncompute_edf = function(\n  mgcv_model,\n  objec"
  },
  {
    "path": "R/conditional_effects.R",
    "chars": 11086,
    "preview": "#' Display conditional effects of predictors for \\pkg{mvgam} models\n#'\n#' Display conditional effects of one or more num"
  },
  {
    "path": "R/cpp_funs.R",
    "chars": 75,
    "preview": "#' @useDynLib mvgam, .registration = TRUE\n#' @importFrom Rcpp evalCpp\nNULL\n"
  },
  {
    "path": "R/data_grids.R",
    "chars": 2773,
    "preview": "#' Get data objects into correct order in case it is not already\n#'@noRd\nsort_data = function(data, series_time = FALSE)"
  },
  {
    "path": "R/dynamic.R",
    "chars": 5933,
    "preview": "#' Defining dynamic coefficients in \\pkg{mvgam} formulae\n#'\n#' Set up time-varying (dynamic) coefficients for use in \\pk"
  },
  {
    "path": "R/ensemble.R",
    "chars": 6776,
    "preview": "#' Combine forecasts from \\pkg{mvgam} models into evenly weighted ensembles\n#'\n#' Generate evenly weighted ensemble fore"
  },
  {
    "path": "R/evaluate_mvgams.R",
    "chars": 29894,
    "preview": "#' Evaluate forecasts from fitted \\pkg{mvgam} objects\n#'\n#' @importFrom graphics barplot boxplot axis\n#' @importFrom sta"
  },
  {
    "path": "R/families.R",
    "chars": 53636,
    "preview": "#' Supported \\pkg{mvgam} families\n#'\n#' @importFrom stats make.link dgamma pgamma rgamma qnorm plnorm runif pbeta dlnorm"
  },
  {
    "path": "R/fevd.mvgam.R",
    "chars": 4621,
    "preview": "#' Calculate latent VAR forecast error variance decompositions\n#'\n#' Compute forecast error variance decompositions from"
  },
  {
    "path": "R/forecast.mvgam.R",
    "chars": 38163,
    "preview": "#' @importFrom generics forecast\n#' @export\ngenerics::forecast\n\n#' @title Extract or compute hindcasts and forecasts for"
  },
  {
    "path": "R/formula.mvgam.R",
    "chars": 1183,
    "preview": "#'Extract formulae from \\pkg{mvgam} objects\n#'\n#'@rdname formula.mvgam\n#'@param x `mvgam`, `jsdgam` or `mvgam_prefit` ob"
  },
  {
    "path": "R/get_linear_predictors.R",
    "chars": 7008,
    "preview": "#' Function to prepare observation model linear predictor matrix\n#' @importFrom brms brmsterms\n#' @noRd\nobs_Xp_matrix = "
  },
  {
    "path": "R/get_monitor_pars.R",
    "chars": 1194,
    "preview": "#' Return parameters to monitor during modelling\n#'\n#'\n#' @param family \\code{character}\n#' @param smooths_included Logi"
  },
  {
    "path": "R/get_mvgam_priors.R",
    "chars": 56661,
    "preview": "#' Extract information on default prior distributions for an \\pkg{mvgam} model\n#'\n#' This function lists the parameters "
  },
  {
    "path": "R/globals.R",
    "chars": 1992,
    "preview": "#' Prevent R CMD Check notes about missing global variables due to\n#' dplyr mutates etc...\n#' @noRd\nutils::globalVariabl"
  },
  {
    "path": "R/gp.R",
    "chars": 47746,
    "preview": "#' Re-label gp terms inside an mgcv gam object for nicer plotting\n#' @noRd\nrelabel_gps = function(mgcv_model) {\n  if (le"
  },
  {
    "path": "R/gratia_methods.R",
    "chars": 23830,
    "preview": "#### Functions to ensure gratia methods work with mvgam, using the Enhance functionality\n# in the Description ####\n\n# Ad"
  },
  {
    "path": "R/hindcast.mvgam.R",
    "chars": 7092,
    "preview": "#'@title Extract hindcasts for a fitted \\code{mvgam} object\n#'@name hindcast.mvgam\n#'@importFrom stats predict\n#'@inheri"
  },
  {
    "path": "R/how_to_cite.R",
    "chars": 12198,
    "preview": "#' Generate a methods description for \\pkg{mvgam} models\n#'\n#' Create a brief but fully referenced methods description, "
  },
  {
    "path": "R/index-mvgam.R",
    "chars": 6874,
    "preview": "#' Index \\code{mvgam} objects\n#'\n#' @aliases variables\n#'\n#' Index variables and their `mgcv` coefficient names\n#'\n#' @p"
  },
  {
    "path": "R/interpret_mvgam.R",
    "chars": 5801,
    "preview": "#' Interpret the formula specified to mvgam and replace any dynamic terms\n#' with the correct Gaussian Process smooth sp"
  },
  {
    "path": "R/irf.mvgam.R",
    "chars": 6641,
    "preview": "#' Calculate latent VAR impulse response functions\n#'\n#' Compute Generalized or Orthogonalized Impulse Response Function"
  },
  {
    "path": "R/jsdgam.R",
    "chars": 31798,
    "preview": "#'Fit Joint Species Distribution Models in \\pkg{mvgam}\n#'\n#'This function sets up a Joint Species Distribution Model whe"
  },
  {
    "path": "R/lfo_cv.mvgam.R",
    "chars": 16122,
    "preview": "#'@title Approximate leave-future-out cross-validation of fitted \\pkg{mvgam} objects\n#'@name lfo_cv.mvgam\n#'@importFrom "
  },
  {
    "path": "R/logLik.mvgam.R",
    "chars": 6436,
    "preview": "#' @title Compute pointwise Log-Likelihoods from fitted \\pkg{mvgam} objects\n#'\n#' @importFrom parallel setDefaultCluster"
  },
  {
    "path": "R/loo.mvgam.R",
    "chars": 7139,
    "preview": "#' LOO information criteria for \\pkg{mvgam} models\n#'\n#' Extract the LOOIC (leave-one-out information criterion) using ["
  },
  {
    "path": "R/lv_correlations.R",
    "chars": 3651,
    "preview": "#' Calculate trend correlations based on latent factor loadings for\n#' \\pkg{mvgam} models\n#'\n#' This function uses facto"
  },
  {
    "path": "R/marginaleffects.mvgam.R",
    "chars": 15541,
    "preview": "#' Helper functions for \\pkg{marginaleffects} calculations in \\pkg{mvgam} models\n#'\n#' @importFrom stats coef model.fram"
  },
  {
    "path": "R/mcmc_plot.mvgam.R",
    "chars": 4397,
    "preview": "#' MCMC plots of \\pkg{mvgam} parameters, as implemented in \\pkg{bayesplot}\n#'\n#' Convenient way to call MCMC plotting fu"
  },
  {
    "path": "R/model.frame.mvgam.R",
    "chars": 5668,
    "preview": "#' Extract model.frame from a fitted \\pkg{mvgam} object\n#'\n#' @inheritParams stats::model.frame\n#'\n#' @param trend_effec"
  },
  {
    "path": "R/monotonic.R",
    "chars": 13044,
    "preview": "#' Monotonic splines in \\pkg{mvgam} models\n#'\n#' Uses constructors from package \\pkg{splines2} to build monotonically in"
  },
  {
    "path": "R/mvgam-class.R",
    "chars": 4804,
    "preview": "#' Fitted `mvgam` object description\n#'\n#' A fitted \\code{mvgam} object returned by function \\code{\\link{mvgam}}.\n#' Run"
  },
  {
    "path": "R/mvgam-package.R",
    "chars": 93,
    "preview": "#' @keywords internal\n\"_PACKAGE\"\n\n## usethis namespace: start\n## usethis namespace: end\nNULL\n"
  },
  {
    "path": "R/mvgam.R",
    "chars": 91046,
    "preview": "#' Fit a Bayesian Dynamic GAM to Univariate or Multivariate Time Series\n#'\n#' @description\n#' This function estimates th"
  },
  {
    "path": "R/mvgam_diagnostics.R",
    "chars": 3172,
    "preview": "#' Extract diagnostic quantities of \\pkg{mvgam} models\n#'\n#' Extract quantities that can be used to diagnose sampling be"
  },
  {
    "path": "R/mvgam_fevd-class.R",
    "chars": 5934,
    "preview": "#' `mvgam_fevd` object description\n#'\n#' A \\code{mvgam_fevd} object returned by function [fevd()]. Run\n#' `methods(class"
  },
  {
    "path": "R/mvgam_forecast-class.R",
    "chars": 6072,
    "preview": "#' `mvgam_forecast` object description\n#'\n#' A \\code{mvgam_forecast} object returned by function \\code{\\link{hindcast}}\n"
  },
  {
    "path": "R/mvgam_formulae.R",
    "chars": 2336,
    "preview": "#' Details of formula specifications in \\pkg{mvgam} models\n#' @details \\code{\\link{mvgam}} will accept an observation mo"
  },
  {
    "path": "R/mvgam_irf-class.R",
    "chars": 6965,
    "preview": "#' `mvgam_irf` object description\n#'\n#' A \\code{mvgam_irf} object returned by function \\code{\\link{irf}}.\n#' Run `method"
  },
  {
    "path": "R/mvgam_residcor-class.R",
    "chars": 9643,
    "preview": "#' `mvgam_residcor` object description\n#'\n#' A \\code{mvgam_residcor} object returned by function [residual_cor()].\n#' Ru"
  },
  {
    "path": "R/mvgam_setup.R",
    "chars": 55506,
    "preview": "#' Generic GAM setup function\n#' @importFrom stats na.fail\n#' @noRd\nmvgam_setup <- function(\n  formula,\n  knots,\n  famil"
  },
  {
    "path": "R/mvgam_trend_types.R",
    "chars": 25237,
    "preview": "#' Specify autoregressive dynamic processes in \\pkg{mvgam}\n#'\n#' Set up autoregressive or autoregressive moving average "
  },
  {
    "path": "R/noncent_trend.R",
    "chars": 12859,
    "preview": "#' Internal functiosn to change dynamic AR or RW trends\n#' to a non-centred parameterisation for potentially big speed g"
  },
  {
    "path": "R/onAttach.R",
    "chars": 444,
    "preview": ".onAttach = function(libname, pkgname) {\n  options(\"marginaleffects_model_classes\" = \"mvgam\")\n  version <- utils::packag"
  },
  {
    "path": "R/ordinate.jsdgam.R",
    "chars": 9183,
    "preview": "#' Latent variable ordination plots from jsdgam objects\n#'\n#' Plot an ordination of latent variables and their factor lo"
  },
  {
    "path": "R/pairs.mvgam.R",
    "chars": 1904,
    "preview": "#' Create a matrix of output plots from a \\code{mvgam} object\n#'\n#' A \\code{\\link[graphics:pairs]{pairs}}\n#' method that"
  },
  {
    "path": "R/piecewise_trends.R",
    "chars": 14438,
    "preview": "#' Updates for adding piecewise trends\n#' @noRd\nadd_piecewise = function(\n  model_file,\n  model_data,\n  data_train,\n  da"
  },
  {
    "path": "R/plot.mvgam.R",
    "chars": 9708,
    "preview": "#' Default plots for \\pkg{mvgam} models\n#'\n#' This function takes a fitted \\code{mvgam} object and produces plots of\n#' "
  },
  {
    "path": "R/plot_mvgam_factors.R",
    "chars": 4898,
    "preview": "#' Latent factor summaries for a fitted \\pkg{mvgam} object\n#'\n#' This function takes a fitted \\code{mvgam} object and re"
  },
  {
    "path": "R/plot_mvgam_fc.R",
    "chars": 27402,
    "preview": "#' Plot posterior forecast predictions from \\pkg{mvgam} models\n#'\n#' @importFrom stats formula terms\n#'\n#' @param object"
  },
  {
    "path": "R/plot_mvgam_pterms.R",
    "chars": 8016,
    "preview": "#' Plot parametric term partial effects for \\pkg{mvgam} models\n#'\n#' This function plots posterior empirical quantiles f"
  },
  {
    "path": "R/plot_mvgam_randomeffects.R",
    "chars": 5903,
    "preview": "#' Plot random effect terms from \\pkg{mvgam} models\n#'\n#' This function plots posterior empirical quantiles for random e"
  },
  {
    "path": "R/plot_mvgam_resids.R",
    "chars": 7633,
    "preview": "#' Residual diagnostics for a fitted \\pkg{mvgam} object\n#'\n#' This function takes a fitted \\code{mvgam} object and retur"
  },
  {
    "path": "R/plot_mvgam_series.R",
    "chars": 10253,
    "preview": "#' Plot observed time series used for \\pkg{mvgam} modelling\n#'\n#' This function takes either a fitted \\code{mvgam} objec"
  },
  {
    "path": "R/plot_mvgam_smooth.R",
    "chars": 36814,
    "preview": "#' Plot smooth terms from \\pkg{mvgam} models\n#'\n#' This function plots posterior empirical quantiles for a series-specif"
  },
  {
    "path": "R/plot_mvgam_trend.R",
    "chars": 11459,
    "preview": "#' Plot latent trend predictions from \\pkg{mvgam} models\n#'\n#' @importFrom graphics par lines polygon box abline\n#'\n#' @"
  },
  {
    "path": "R/plot_mvgam_uncertainty.R",
    "chars": 7646,
    "preview": "#' Plot forecast uncertainty contributions from \\pkg{mvgam} models\n#'\n#' @importFrom graphics legend\n#' @importFrom stat"
  },
  {
    "path": "R/portal_data.R",
    "chars": 727,
    "preview": "#' Portal Project rodent capture survey data\n#'\n#' A dataset containing time series of total captures (across all contro"
  },
  {
    "path": "R/posterior_epred.mvgam.R",
    "chars": 16793,
    "preview": "#' Draws from the expected value of the posterior predictive distribution for \\pkg{mvgam} objects\n#'\n#' Compute posterio"
  },
  {
    "path": "R/ppc.mvgam.R",
    "chars": 35462,
    "preview": "#' @title Plot conditional posterior predictive checks from \\pkg{mvgam} models\n#'\n#' @importFrom stats quantile density "
  },
  {
    "path": "R/predict.mvgam.R",
    "chars": 24006,
    "preview": "#' Predict from a fitted \\pkg{mvgam} model\n#'\n#' @importFrom stats predict\n#'\n#' @inheritParams brms::fitted.brmsfit\n#'\n"
  },
  {
    "path": "R/print.mvgam.R",
    "chars": 2795,
    "preview": "#' Print a fitted \\pkg{mvgam} object\n#'\n#' This function takes a fitted \\code{mvgam} or \\code{jsdgam} object and prints\n"
  },
  {
    "path": "R/residual_cor.R",
    "chars": 9906,
    "preview": "#' Extract residual correlations based on latent factors\n#'\n#' Compute residual correlation estimates from Joint Species"
  },
  {
    "path": "R/residuals.mvgam.R",
    "chars": 3600,
    "preview": "#' Posterior draws of residuals from \\pkg{mvgam} models\n#'\n#' This method extracts posterior draws of Dunn-Smyth (random"
  },
  {
    "path": "R/sanitise_modelfile.R",
    "chars": 886,
    "preview": "#' Clean up a stan file\n#' @noRd\nsanitise_modelfile = function(model_file) {\n  # Remove empty lines\n  clean_up <- vector"
  },
  {
    "path": "R/score.mvgam_forecast.R",
    "chars": 12302,
    "preview": "#' @title Compute probabilistic forecast scores for \\pkg{mvgam} models\n#'\n#' @param object `mvgam_forecast` object. See "
  },
  {
    "path": "R/series_to_mvgam.R",
    "chars": 3882,
    "preview": "#' Convert timeseries object to format necessary for \\pkg{mvgam} models\n#'\n#' This function converts univariate or multi"
  },
  {
    "path": "R/shared_obs_params.R",
    "chars": 5658,
    "preview": "#' Updates for allowing shared observation params across series\n#' @noRd\nshared_obs_params = function(model_file, family"
  },
  {
    "path": "R/sim_mvgam.R",
    "chars": 17725,
    "preview": "#' Simulate a set of time series for modelling in \\pkg{mvgam}\n#'\n#' This function simulates sets of time series data for"
  },
  {
    "path": "R/stability.R",
    "chars": 10120,
    "preview": "#' Calculate measures of latent VAR community stability\n#'\n#' Compute reactivity, return rates and contributions of inte"
  },
  {
    "path": "R/stan_utils.R",
    "chars": 141902,
    "preview": "#' Stan code and data objects for \\pkg{mvgam} models\n#'\n#' Generate Stan code and data objects for \\pkg{mvgam} models\n#'"
  },
  {
    "path": "R/stationarise_VAR.R",
    "chars": 23891,
    "preview": "#### Modifications to Stan code for stationary VAR1 processes ####\n# All functions and reparameterisations use code supp"
  },
  {
    "path": "R/summary.mvgam.R",
    "chars": 39773,
    "preview": "#' Summary for a fitted \\pkg{mvgam} models\n#'\n#' These functions take a fitted \\code{mvgam} or \\code{jsdgam} object and\n"
  },
  {
    "path": "R/tidier_methods.R",
    "chars": 17139,
    "preview": "#' @importFrom generics tidy\n#' @export\ngenerics::tidy\n\n#' @importFrom generics augment\n#' @export\ngenerics::augment\n\n\n#"
  },
  {
    "path": "R/trends.R",
    "chars": 48464,
    "preview": "#' Supported latent trend models in \\pkg{mvgam}\n#'\n#' @importFrom utils tail\n#' @importFrom stats rnorm\n#'\n#' @details\n#"
  },
  {
    "path": "R/update.mvgam.R",
    "chars": 11021,
    "preview": "#' Update an existing \\pkg{mvgam} model object\n#'\n#' This function allows a previously fitted \\pkg{mvgam} model to be up"
  },
  {
    "path": "R/update_priors.R",
    "chars": 15518,
    "preview": "#' Update priors for a JAGS or Stan model file\n#'\n#'\n#' @param model_file Prepared mvgam model file\n#' @param priors \\co"
  },
  {
    "path": "R/utils-pipe.R",
    "chars": 363,
    "preview": "#' Pipe operator\n#'\n#' See \\code{magrittr::\\link[magrittr:pipe]{\\%>\\%}} for details.\n#'\n#' @name %>%\n#' @rdname pipe\n#' "
  },
  {
    "path": "R/validations.R",
    "chars": 35718,
    "preview": "#'Argument validation functions\n#'@param data Data to be validated (list or data.frame)\n#'@noRd\nvalidate_series_time = f"
  },
  {
    "path": "README.Rmd",
    "chars": 18902,
    "preview": "---\noutput: github_document\n---\n\n<!-- README.md is generated from README.Rmd. Please edit that file -->\n\n```{r, echo = F"
  },
  {
    "path": "README.md",
    "chars": 26090,
    "preview": "<!-- README.md is generated from README.Rmd. Please edit that file -->\n\n<img src=\"man/figures/mvgam_logo.png\" width = 12"
  },
  {
    "path": "build_vignettes_CRAN.R",
    "chars": 1432,
    "preview": "# Vignette names\nvignettes <- list.files('./vignettes', pattern = '.Rmd')\n\n# Generate R script versions of vignettes\npur"
  },
  {
    "path": "cran-comments.md",
    "chars": 1693,
    "preview": "## Version 1.1.594\n\n## Summary of changes\nThis version is a minor patch update to fix a test that spawned more than two "
  },
  {
    "path": "doc/data_in_mvgam.R",
    "chars": 8797,
    "preview": "params <-\n  list(EVAL = TRUE)\n\n## ----echo = FALSE----------------------------------------------------------------\nknitr"
  },
  {
    "path": "doc/data_in_mvgam.Rmd",
    "chars": 21210,
    "preview": "---\ntitle: \"Formatting data for use in mvgam\"\nauthor: \"Nicholas J Clark\"\ndate: \"`r Sys.Date()`\"\noutput:\n  rmarkdown::htm"
  },
  {
    "path": "doc/data_in_mvgam.html",
    "chars": 143883,
    "preview": "<!DOCTYPE html>\n\n<html>\n\n<head>\n\n<meta charset=\"utf-8\" />\n<meta name=\"generator\" content=\"pandoc\" />\n<meta http-equiv=\"X"
  },
  {
    "path": "doc/forecast_evaluation.R",
    "chars": 7508,
    "preview": "params <-\n  list(EVAL = TRUE)\n\n## ----echo = FALSE----------------------------------------------------------------\nknitr"
  },
  {
    "path": "doc/forecast_evaluation.Rmd",
    "chars": 15211,
    "preview": "---\ntitle: \"Forecasting and forecast evaluation in mvgam\"\nauthor: \"Nicholas J Clark\"\ndate: \"`r Sys.Date()`\"\noutput:\n  rm"
  },
  {
    "path": "doc/forecast_evaluation.html",
    "chars": 229451,
    "preview": "<!DOCTYPE html>\n\n<html>\n\n<head>\n\n<meta charset=\"utf-8\" />\n<meta name=\"generator\" content=\"pandoc\" />\n<meta http-equiv=\"X"
  },
  {
    "path": "doc/mvgam_overview.R",
    "chars": 9051,
    "preview": "params <-\n  list(EVAL = TRUE)\n\n## ----echo = FALSE----------------------------------------------------------------\nknitr"
  },
  {
    "path": "doc/mvgam_overview.Rmd",
    "chars": 45159,
    "preview": "---\ntitle: \"Overview of the mvgam package\"\nauthor: \"Nicholas J Clark\"\ndate: \"`r Sys.Date()`\"\noutput:\n  rmarkdown::html_v"
  },
  {
    "path": "doc/mvgam_overview.html",
    "chars": 297978,
    "preview": "<!DOCTYPE html>\n\n<html>\n\n<head>\n\n<meta charset=\"utf-8\" />\n<meta name=\"generator\" content=\"pandoc\" />\n<meta http-equiv=\"X"
  },
  {
    "path": "doc/nmixtures.R",
    "chars": 14200,
    "preview": "params <-\n  list(EVAL = TRUE)\n\n## ----echo = FALSE----------------------------------------------------------------\nknitr"
  },
  {
    "path": "doc/nmixtures.Rmd",
    "chars": 24088,
    "preview": "---\ntitle: \"N-mixtures in mvgam\"\nauthor: \"Nicholas J Clark\"\ndate: \"`r Sys.Date()`\"\noutput:\n  rmarkdown::html_vignette:\n "
  },
  {
    "path": "doc/nmixtures.html",
    "chars": 187419,
    "preview": "<!DOCTYPE html>\n\n<html>\n\n<head>\n\n<meta charset=\"utf-8\" />\n<meta name=\"generator\" content=\"pandoc\" />\n<meta http-equiv=\"X"
  },
  {
    "path": "doc/shared_states.R",
    "chars": 8553,
    "preview": "params <-\n  list(EVAL = TRUE)\n\n## ----echo = FALSE----------------------------------------------------------------\nknitr"
  },
  {
    "path": "doc/shared_states.Rmd",
    "chars": 15048,
    "preview": "---\ntitle: \"Shared latent states in mvgam\"\nauthor: \"Nicholas J Clark\"\ndate: \"`r Sys.Date()`\"\noutput:\n  rmarkdown::html_v"
  },
  {
    "path": "doc/shared_states.html",
    "chars": 242554,
    "preview": "<!DOCTYPE html>\n\n<html>\n\n<head>\n\n<meta charset=\"utf-8\" />\n<meta name=\"generator\" content=\"pandoc\" />\n<meta http-equiv=\"X"
  },
  {
    "path": "doc/time_varying_effects.R",
    "chars": 7995,
    "preview": "params <-\n  list(EVAL = TRUE)\n\n## ----echo = FALSE----------------------------------------------------------------\nknitr"
  },
  {
    "path": "doc/time_varying_effects.Rmd",
    "chars": 17063,
    "preview": "---\ntitle: \"Time-varying effects in mvgam\"\nauthor: \"Nicholas J Clark\"\ndate: \"`r Sys.Date()`\"\noutput:\n  rmarkdown::html_v"
  },
  {
    "path": "doc/time_varying_effects.html",
    "chars": 211411,
    "preview": "<!DOCTYPE html>\n\n<html>\n\n<head>\n\n<meta charset=\"utf-8\" />\n<meta name=\"generator\" content=\"pandoc\" />\n<meta http-equiv=\"X"
  },
  {
    "path": "doc/trend_formulas.R",
    "chars": 12438,
    "preview": "params <-\n  list(EVAL = TRUE)\n\n## ----echo = FALSE----------------------------------------------------------------\nknitr"
  },
  {
    "path": "doc/trend_formulas.Rmd",
    "chars": 27857,
    "preview": "---\ntitle: \"State-Space models in mvgam\"\nauthor: \"Nicholas J Clark\"\ndate: \"`r Sys.Date()`\"\noutput:\n  rmarkdown::html_vig"
  },
  {
    "path": "doc/trend_formulas.html",
    "chars": 477614,
    "preview": "<!DOCTYPE html>\n\n<html>\n\n<head>\n\n<meta charset=\"utf-8\" />\n<meta name=\"generator\" content=\"pandoc\" />\n<meta http-equiv=\"X"
  },
  {
    "path": "docs/404.html",
    "chars": 4672,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type"
  },
  {
    "path": "docs/CODE_OF_CONDUCT.html",
    "chars": 11703,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/CONTRIBUTING.html",
    "chars": 9143,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/LICENSE-text.html",
    "chars": 3953,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/LICENSE.html",
    "chars": 5214,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/articles/data_in_mvgam.html",
    "chars": 100929,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type"
  },
  {
    "path": "docs/articles/forecast_evaluation.html",
    "chars": 79767,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type"
  },
  {
    "path": "docs/articles/index.html",
    "chars": 7205,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/articles/mvgam_overview.html",
    "chars": 144078,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type"
  },
  {
    "path": "docs/articles/nmixtures.html",
    "chars": 117339,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type"
  },
  {
    "path": "docs/articles/shared_states.html",
    "chars": 70077,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type"
  },
  {
    "path": "docs/articles/time_varying_effects.html",
    "chars": 71820,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type"
  },
  {
    "path": "docs/articles/trend_formulas.html",
    "chars": 98465,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type"
  },
  {
    "path": "docs/authors.html",
    "chars": 6283,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/deps/bootstrap-5.2.2/font.css",
    "chars": 494,
    "preview": "@font-face {\n  font-family: 'Roboto';\n  font-style: normal;\n  font-weight: 400;\n  font-display: swap;\n  src: url(fonts/K"
  },
  {
    "path": "docs/deps/bootstrap-5.3.1/font.css",
    "chars": 494,
    "preview": "@font-face {\n  font-family: 'Roboto';\n  font-style: normal;\n  font-weight: 400;\n  font-display: swap;\n  src: url(fonts/K"
  },
  {
    "path": "docs/deps/data-deps.txt",
    "chars": 292,
    "preview": "<script src=\"deps/jquery-3.6.0/jquery-3.6.0.min.js\"></script>\n<meta name=\"viewport\" content=\"width=device-width, initial"
  },
  {
    "path": "docs/deps/jquery-3.6.0/jquery-3.6.0.js",
    "chars": 288580,
    "preview": "/*!\n * jQuery JavaScript Library v3.6.0\n * https://jquery.com/\n *\n * Includes Sizzle.js\n * https://sizzlejs.com/\n *\n * C"
  },
  {
    "path": "docs/index.html",
    "chars": 25296,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type"
  },
  {
    "path": "docs/news/index.html",
    "chars": 17039,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/pkgdown.js",
    "chars": 4403,
    "preview": "/* http://gregfranko.com/blog/jquery-best-practices/ */\n(function($) {\n  $(function() {\n\n    $('nav.navbar').headroom();"
  },
  {
    "path": "docs/pkgdown.yml",
    "chars": 486,
    "preview": "pandoc: 3.1.1\npkgdown: 2.0.7\npkgdown_sha: ~\narticles:\n  data_in_mvgam: data_in_mvgam.html\n  forecast_evaluation: forecas"
  },
  {
    "path": "docs/reference/GP.html",
    "chars": 7085,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/RW.html",
    "chars": 57747,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/ZMVN.html",
    "chars": 34042,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/add_residuals.mvgam.html",
    "chars": 6714,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/add_tweedie_lines.html",
    "chars": 7804,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/all_neon_tick_data.html",
    "chars": 8450,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/augment.mvgam.html",
    "chars": 17076,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/code.html",
    "chars": 20870,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/conditional_effects.mvgam.html",
    "chars": 104905,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/dynamic.html",
    "chars": 35464,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/ensemble.mvgam_forecast.html",
    "chars": 38417,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/evaluate_mvgams.html",
    "chars": 41716,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/fevd.mvgam.html",
    "chars": 14700,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/fitted.mvgam.html",
    "chars": 16177,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/forecast.mvgam.html",
    "chars": 31222,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/formula.mvgam.html",
    "chars": 6695,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/get_monitor_pars.html",
    "chars": 8222,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/get_mvgam_priors.html",
    "chars": 128665,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/gratia_mvgam_enhancements.html",
    "chars": 32271,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/hindcast.mvgam.html",
    "chars": 29587,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/how_to_cite.mvgam.html",
    "chars": 27910,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/index-mvgam.html",
    "chars": 31566,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/index.html",
    "chars": 22330,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/irf.mvgam.html",
    "chars": 16046,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/jsdgam.html",
    "chars": 106972,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/lfo_cv.mvgam.html",
    "chars": 27753,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/logLik.mvgam.html",
    "chars": 12490,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/loo.mvgam.html",
    "chars": 59769,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/lv_correlations.html",
    "chars": 13382,
    "preview": "<!DOCTYPE html>\r\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\""
  },
  {
    "path": "docs/reference/mcmc_plot.mvgam.html",
    "chars": 12862,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/model.frame.mvgam.html",
    "chars": 7026,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/monotonic.html",
    "chars": 37569,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  },
  {
    "path": "docs/reference/mvgam-class.html",
    "chars": 13556,
    "preview": "<!DOCTYPE html>\n<!-- Generated by pkgdown: do not edit by hand --><html lang=\"en\"><head><meta http-equiv=\"Content-Type\" "
  }
]

// ... and 278 more files (download for full content)

About this extraction

This page contains the full source code of the nicholasjclark/mvgam GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 478 files (10.3 MB), approximately 2.7M tokens, and a symbol index with 92 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!