Full Code of covid19datahub/COVID19 for AI

master 751c3d9a54c6 cached
673 files
4.4 MB
1.2M tokens
1 symbols
1 requests
Download .txt
Showing preview only (4,742K chars total). Download the full file or copy to clipboard to get everything.
Repository: covid19datahub/COVID19
Branch: master
Commit: 751c3d9a54c6
Files: 673
Total size: 4.4 MB

Directory structure:
gitextract_37nyhq5j/

├── .github/
│   ├── .gitignore
│   └── workflows/
│       └── pkgdown.yaml
├── .gitignore
├── .travis.yml
├── DESCRIPTION
├── LICENSE.md
├── NAMESPACE
├── NEWS.md
├── R/
│   ├── covid19.R
│   ├── ds_admin.ch.R
│   ├── ds_arcgis.de.R
│   ├── ds_arcgis.se.R
│   ├── ds_canada.ca.R
│   ├── ds_cdc.gov.R
│   ├── ds_covid19india.org.R
│   ├── ds_covidtracking.com.R
│   ├── ds_geohive.ie.R
│   ├── ds_github.ceedsdemm.covidprodataset.R
│   ├── ds_github.covid19euzh.covid19eudata.R
│   ├── ds_github.cssegisanddata.covid19.R
│   ├── ds_github.cssegisanddata.covid19unified.R
│   ├── ds_github.dsfsi.covid19za.R
│   ├── ds_github.dssgpt.covid19ptdata.R
│   ├── ds_github.eguidotti.covid19br.R
│   ├── ds_github.italia.covid19opendatavaccini.R
│   ├── ds_github.jmcastagnetto.covid19perudata.R
│   ├── ds_github.lisphilar.covid19sir.R
│   ├── ds_github.m3it.covid19data.R
│   ├── ds_github.minciencia.datoscovid19.R
│   ├── ds_github.mpiktas.covid19lt.R
│   ├── ds_github.nytimes.covid19data.R
│   ├── ds_github.openzh.covid19.R
│   ├── ds_github.oxcgrt.covidpolicytracker.R
│   ├── ds_github.ozanerturk.covid19turkeyapi.R
│   ├── ds_github.pcmdpc.covid19.R
│   ├── ds_github.robertkochinstitut.covid19impfungenindeutschland.R
│   ├── ds_github.swsoyee.2019ncovjapan.R
│   ├── ds_github.wcota.covid19br.R
│   ├── ds_github.wcota.covid19br.vac.R
│   ├── ds_go.th.R
│   ├── ds_gob.ar.R
│   ├── ds_gob.pe.R
│   ├── ds_gouv.fr.R
│   ├── ds_gov.co.R
│   ├── ds_gov.lv.R
│   ├── ds_gov.si.R
│   ├── ds_gov.tw.R
│   ├── ds_gov.uk.R
│   ├── ds_gv.at.R
│   ├── ds_healthdata.gov.R
│   ├── ds_humdata.af.R
│   ├── ds_humdata.ht.R
│   ├── ds_impfdashboard.de.R
│   ├── ds_isciii.es.R
│   ├── ds_koronavirus.hr.R
│   ├── ds_mzcr.cz.R
│   ├── ds_ourworldindata.org.R
│   ├── ds_rivm.nl.R
│   ├── ds_sciensano.be.R
│   ├── ds_ssi.dk.R
│   ├── ds_toyokeizai.net.R
│   ├── ds_who.int.R
│   ├── ds_wikipedia.dp.R
│   ├── iso_ABW.R
│   ├── iso_AFG.R
│   ├── iso_AGO.R
│   ├── iso_AIA.R
│   ├── iso_ALB.R
│   ├── iso_AND.R
│   ├── iso_ARE.R
│   ├── iso_ARG.R
│   ├── iso_ARM.R
│   ├── iso_ASM.R
│   ├── iso_ATG.R
│   ├── iso_AUS.R
│   ├── iso_AUT.R
│   ├── iso_AZE.R
│   ├── iso_BDI.R
│   ├── iso_BEL.R
│   ├── iso_BEN.R
│   ├── iso_BES.R
│   ├── iso_BFA.R
│   ├── iso_BGD.R
│   ├── iso_BGR.R
│   ├── iso_BHR.R
│   ├── iso_BHS.R
│   ├── iso_BIH.R
│   ├── iso_BLR.R
│   ├── iso_BLZ.R
│   ├── iso_BMU.R
│   ├── iso_BOL.R
│   ├── iso_BRA.R
│   ├── iso_BRB.R
│   ├── iso_BRN.R
│   ├── iso_BTN.R
│   ├── iso_BWA.R
│   ├── iso_CAC.R
│   ├── iso_CAF.R
│   ├── iso_CAN.R
│   ├── iso_CHE.R
│   ├── iso_CHL.R
│   ├── iso_CHN.R
│   ├── iso_CIV.R
│   ├── iso_CMR.R
│   ├── iso_COD.R
│   ├── iso_COG.R
│   ├── iso_COK.R
│   ├── iso_COL.R
│   ├── iso_COM.R
│   ├── iso_CPV.R
│   ├── iso_CRI.R
│   ├── iso_CUB.R
│   ├── iso_CUW.R
│   ├── iso_CYM.R
│   ├── iso_CYP.R
│   ├── iso_CZE.R
│   ├── iso_DEU.R
│   ├── iso_DJI.R
│   ├── iso_DMA.R
│   ├── iso_DNK.R
│   ├── iso_DOM.R
│   ├── iso_DPC.R
│   ├── iso_DZA.R
│   ├── iso_ECU.R
│   ├── iso_EGY.R
│   ├── iso_ERI.R
│   ├── iso_ESP.R
│   ├── iso_EST.R
│   ├── iso_ETH.R
│   ├── iso_FIN.R
│   ├── iso_FJI.R
│   ├── iso_FLK.R
│   ├── iso_FRA.R
│   ├── iso_FRO.R
│   ├── iso_FSM.R
│   ├── iso_GAB.R
│   ├── iso_GBR.R
│   ├── iso_GEO.R
│   ├── iso_GGY.R
│   ├── iso_GHA.R
│   ├── iso_GIB.R
│   ├── iso_GIN.R
│   ├── iso_GLP.R
│   ├── iso_GMB.R
│   ├── iso_GNB.R
│   ├── iso_GNQ.R
│   ├── iso_GPC.R
│   ├── iso_GRC.R
│   ├── iso_GRD.R
│   ├── iso_GRL.R
│   ├── iso_GTM.R
│   ├── iso_GUF.R
│   ├── iso_GUM.R
│   ├── iso_GUY.R
│   ├── iso_HKG.R
│   ├── iso_HND.R
│   ├── iso_HRV.R
│   ├── iso_HTI.R
│   ├── iso_HUN.R
│   ├── iso_IDN.R
│   ├── iso_IMN.R
│   ├── iso_IND.R
│   ├── iso_IRL.R
│   ├── iso_IRN.R
│   ├── iso_IRQ.R
│   ├── iso_ISL.R
│   ├── iso_ISR.R
│   ├── iso_ITA.R
│   ├── iso_JAM.R
│   ├── iso_JEY.R
│   ├── iso_JOR.R
│   ├── iso_JPN.R
│   ├── iso_KAZ.R
│   ├── iso_KEN.R
│   ├── iso_KGZ.R
│   ├── iso_KHM.R
│   ├── iso_KIR.R
│   ├── iso_KNA.R
│   ├── iso_KOR.R
│   ├── iso_KWT.R
│   ├── iso_LAO.R
│   ├── iso_LBN.R
│   ├── iso_LBR.R
│   ├── iso_LBY.R
│   ├── iso_LCA.R
│   ├── iso_LIE.R
│   ├── iso_LKA.R
│   ├── iso_LSO.R
│   ├── iso_LTU.R
│   ├── iso_LUX.R
│   ├── iso_LVA.R
│   ├── iso_MAC.R
│   ├── iso_MAR.R
│   ├── iso_MCO.R
│   ├── iso_MDA.R
│   ├── iso_MDG.R
│   ├── iso_MDV.R
│   ├── iso_MEX.R
│   ├── iso_MHL.R
│   ├── iso_MKD.R
│   ├── iso_MLI.R
│   ├── iso_MLT.R
│   ├── iso_MMR.R
│   ├── iso_MNE.R
│   ├── iso_MNG.R
│   ├── iso_MNP.R
│   ├── iso_MOZ.R
│   ├── iso_MRT.R
│   ├── iso_MSR.R
│   ├── iso_MTQ.R
│   ├── iso_MUS.R
│   ├── iso_MWI.R
│   ├── iso_MYS.R
│   ├── iso_MYT.R
│   ├── iso_NAM.R
│   ├── iso_NCL.R
│   ├── iso_NER.R
│   ├── iso_NGA.R
│   ├── iso_NIC.R
│   ├── iso_NIU.R
│   ├── iso_NLD.R
│   ├── iso_NOR.R
│   ├── iso_NPL.R
│   ├── iso_NRU.R
│   ├── iso_NZL.R
│   ├── iso_OMN.R
│   ├── iso_PAK.R
│   ├── iso_PAN.R
│   ├── iso_PCN.R
│   ├── iso_PER.R
│   ├── iso_PHL.R
│   ├── iso_PLW.R
│   ├── iso_PNG.R
│   ├── iso_POL.R
│   ├── iso_PRI.R
│   ├── iso_PRT.R
│   ├── iso_PRY.R
│   ├── iso_PSE.R
│   ├── iso_PYF.R
│   ├── iso_QAT.R
│   ├── iso_REU.R
│   ├── iso_RKS.R
│   ├── iso_ROU.R
│   ├── iso_RUS.R
│   ├── iso_RWA.R
│   ├── iso_SAU.R
│   ├── iso_SDN.R
│   ├── iso_SEN.R
│   ├── iso_SGP.R
│   ├── iso_SHN.R
│   ├── iso_SLB.R
│   ├── iso_SLE.R
│   ├── iso_SLV.R
│   ├── iso_SMR.R
│   ├── iso_SOM.R
│   ├── iso_SRB.R
│   ├── iso_SSD.R
│   ├── iso_STP.R
│   ├── iso_SUR.R
│   ├── iso_SVK.R
│   ├── iso_SVN.R
│   ├── iso_SWE.R
│   ├── iso_SWZ.R
│   ├── iso_SXM.R
│   ├── iso_SYC.R
│   ├── iso_SYR.R
│   ├── iso_TCA.R
│   ├── iso_TCD.R
│   ├── iso_TGO.R
│   ├── iso_THA.R
│   ├── iso_TJK.R
│   ├── iso_TKL.R
│   ├── iso_TKM.R
│   ├── iso_TLS.R
│   ├── iso_TON.R
│   ├── iso_TTO.R
│   ├── iso_TUN.R
│   ├── iso_TUR.R
│   ├── iso_TUV.R
│   ├── iso_TWN.R
│   ├── iso_TZA.R
│   ├── iso_UGA.R
│   ├── iso_UKR.R
│   ├── iso_URY.R
│   ├── iso_USA.R
│   ├── iso_UZB.R
│   ├── iso_VAT.R
│   ├── iso_VCT.R
│   ├── iso_VEN.R
│   ├── iso_VGB.R
│   ├── iso_VIR.R
│   ├── iso_VNM.R
│   ├── iso_VUT.R
│   ├── iso_WLF.R
│   ├── iso_WSM.R
│   ├── iso_YEM.R
│   ├── iso_ZAF.R
│   ├── iso_ZMB.R
│   └── iso_ZWE.R
├── README.md
├── index.md
├── inst/
│   ├── CITATION
│   ├── extdata/
│   │   ├── db/
│   │   │   ├── AFG.csv
│   │   │   ├── ARG.csv
│   │   │   ├── AUS.csv
│   │   │   ├── AUT.csv
│   │   │   ├── BEL.csv
│   │   │   ├── BRA.csv
│   │   │   ├── CAN.csv
│   │   │   ├── CHE.csv
│   │   │   ├── CHL.csv
│   │   │   ├── CHN.csv
│   │   │   ├── COL.csv
│   │   │   ├── CZE.csv
│   │   │   ├── DEU.csv
│   │   │   ├── DNK.csv
│   │   │   ├── ESP.csv
│   │   │   ├── FRA.csv
│   │   │   ├── GBR.csv
│   │   │   ├── HRV.csv
│   │   │   ├── HTI.csv
│   │   │   ├── IND.csv
│   │   │   ├── IRL.csv
│   │   │   ├── ISO.csv
│   │   │   ├── ITA.csv
│   │   │   ├── JPN.csv
│   │   │   ├── LTU.csv
│   │   │   ├── LVA.csv
│   │   │   ├── MEX.csv
│   │   │   ├── NLD.csv
│   │   │   ├── PAK.csv
│   │   │   ├── PER.csv
│   │   │   ├── POL.csv
│   │   │   ├── PRT.csv
│   │   │   ├── RUS.csv
│   │   │   ├── SWE.csv
│   │   │   ├── THA.csv
│   │   │   ├── TWN.csv
│   │   │   ├── UKR.csv
│   │   │   ├── USA.csv
│   │   │   └── ZAF.csv
│   │   ├── db.R
│   │   ├── ds/
│   │   │   ├── CAN.csv
│   │   │   └── DPC.csv
│   │   └── src.csv
│   ├── joss/
│   │   ├── README.md
│   │   ├── apa.csl
│   │   ├── paper.Rmd
│   │   ├── paper.bib
│   │   └── paper.md
│   └── templates/
│       ├── ds_.R
│       └── iso_.R
├── man/
│   ├── ABW.Rd
│   ├── AFG.Rd
│   ├── AGO.Rd
│   ├── AIA.Rd
│   ├── ALB.Rd
│   ├── AND.Rd
│   ├── ARE.Rd
│   ├── ARG.Rd
│   ├── ARM.Rd
│   ├── ASM.Rd
│   ├── ATG.Rd
│   ├── AUS.Rd
│   ├── AUT.Rd
│   ├── AZE.Rd
│   ├── BDI.Rd
│   ├── BEL.Rd
│   ├── BEN.Rd
│   ├── BES.Rd
│   ├── BFA.Rd
│   ├── BGD.Rd
│   ├── BGR.Rd
│   ├── BHR.Rd
│   ├── BHS.Rd
│   ├── BIH.Rd
│   ├── BLR.Rd
│   ├── BLZ.Rd
│   ├── BMU.Rd
│   ├── BOL.Rd
│   ├── BRA.Rd
│   ├── BRB.Rd
│   ├── BRN.Rd
│   ├── BTN.Rd
│   ├── BWA.Rd
│   ├── CAC.Rd
│   ├── CAF.Rd
│   ├── CAN.Rd
│   ├── CHE.Rd
│   ├── CHL.Rd
│   ├── CHN.Rd
│   ├── CIV.Rd
│   ├── CMR.Rd
│   ├── COD.Rd
│   ├── COG.Rd
│   ├── COK.Rd
│   ├── COL.Rd
│   ├── COM.Rd
│   ├── CPV.Rd
│   ├── CRI.Rd
│   ├── CUB.Rd
│   ├── CUW.Rd
│   ├── CYM.Rd
│   ├── CYP.Rd
│   ├── CZE.Rd
│   ├── DEU.Rd
│   ├── DJI.Rd
│   ├── DMA.Rd
│   ├── DNK.Rd
│   ├── DOM.Rd
│   ├── DPC.Rd
│   ├── DZA.Rd
│   ├── ECU.Rd
│   ├── EGY.Rd
│   ├── ERI.Rd
│   ├── ESP.Rd
│   ├── EST.Rd
│   ├── ETH.Rd
│   ├── FIN.Rd
│   ├── FJI.Rd
│   ├── FLK.Rd
│   ├── FRA.Rd
│   ├── FRO.Rd
│   ├── FSM.Rd
│   ├── GAB.Rd
│   ├── GBR.Rd
│   ├── GEO.Rd
│   ├── GGY.Rd
│   ├── GHA.Rd
│   ├── GIB.Rd
│   ├── GIN.Rd
│   ├── GLP.Rd
│   ├── GMB.Rd
│   ├── GNB.Rd
│   ├── GNQ.Rd
│   ├── GPC.Rd
│   ├── GRC.Rd
│   ├── GRD.Rd
│   ├── GRL.Rd
│   ├── GTM.Rd
│   ├── GUF.Rd
│   ├── GUM.Rd
│   ├── GUY.Rd
│   ├── HKG.Rd
│   ├── HND.Rd
│   ├── HRV.Rd
│   ├── HTI.Rd
│   ├── HUN.Rd
│   ├── IDN.Rd
│   ├── IMN.Rd
│   ├── IND.Rd
│   ├── IRL.Rd
│   ├── IRN.Rd
│   ├── IRQ.Rd
│   ├── ISL.Rd
│   ├── ISR.Rd
│   ├── ITA.Rd
│   ├── JAM.Rd
│   ├── JEY.Rd
│   ├── JOR.Rd
│   ├── JPN.Rd
│   ├── KAZ.Rd
│   ├── KEN.Rd
│   ├── KGZ.Rd
│   ├── KHM.Rd
│   ├── KIR.Rd
│   ├── KNA.Rd
│   ├── KOR.Rd
│   ├── KWT.Rd
│   ├── LAO.Rd
│   ├── LBN.Rd
│   ├── LBR.Rd
│   ├── LBY.Rd
│   ├── LCA.Rd
│   ├── LIE.Rd
│   ├── LKA.Rd
│   ├── LSO.Rd
│   ├── LTU.Rd
│   ├── LUX.Rd
│   ├── LVA.Rd
│   ├── MAC.Rd
│   ├── MAR.Rd
│   ├── MCO.Rd
│   ├── MDA.Rd
│   ├── MDG.Rd
│   ├── MDV.Rd
│   ├── MEX.Rd
│   ├── MHL.Rd
│   ├── MKD.Rd
│   ├── MLI.Rd
│   ├── MLT.Rd
│   ├── MMR.Rd
│   ├── MNE.Rd
│   ├── MNG.Rd
│   ├── MNP.Rd
│   ├── MOZ.Rd
│   ├── MRT.Rd
│   ├── MSR.Rd
│   ├── MTQ.Rd
│   ├── MUS.Rd
│   ├── MWI.Rd
│   ├── MYS.Rd
│   ├── MYT.Rd
│   ├── NAM.Rd
│   ├── NCL.Rd
│   ├── NER.Rd
│   ├── NGA.Rd
│   ├── NIC.Rd
│   ├── NIU.Rd
│   ├── NLD.Rd
│   ├── NOR.Rd
│   ├── NPL.Rd
│   ├── NRU.Rd
│   ├── NZL.Rd
│   ├── OMN.Rd
│   ├── PAK.Rd
│   ├── PAN.Rd
│   ├── PCN.Rd
│   ├── PER.Rd
│   ├── PHL.Rd
│   ├── PLW.Rd
│   ├── PNG.Rd
│   ├── POL.Rd
│   ├── PRI.Rd
│   ├── PRT.Rd
│   ├── PRY.Rd
│   ├── PSE.Rd
│   ├── PYF.Rd
│   ├── QAT.Rd
│   ├── REU.Rd
│   ├── RKS.Rd
│   ├── ROU.Rd
│   ├── RUS.Rd
│   ├── RWA.Rd
│   ├── SAU.Rd
│   ├── SDN.Rd
│   ├── SEN.Rd
│   ├── SGP.Rd
│   ├── SHN.Rd
│   ├── SLB.Rd
│   ├── SLE.Rd
│   ├── SLV.Rd
│   ├── SMR.Rd
│   ├── SOM.Rd
│   ├── SRB.Rd
│   ├── SSD.Rd
│   ├── STP.Rd
│   ├── SUR.Rd
│   ├── SVK.Rd
│   ├── SVN.Rd
│   ├── SWE.Rd
│   ├── SWZ.Rd
│   ├── SXM.Rd
│   ├── SYC.Rd
│   ├── SYR.Rd
│   ├── TCA.Rd
│   ├── TCD.Rd
│   ├── TGO.Rd
│   ├── THA.Rd
│   ├── TJK.Rd
│   ├── TKL.Rd
│   ├── TKM.Rd
│   ├── TLS.Rd
│   ├── TON.Rd
│   ├── TTO.Rd
│   ├── TUN.Rd
│   ├── TUR.Rd
│   ├── TUV.Rd
│   ├── TWN.Rd
│   ├── TZA.Rd
│   ├── UGA.Rd
│   ├── UKR.Rd
│   ├── URY.Rd
│   ├── USA.Rd
│   ├── UZB.Rd
│   ├── VAT.Rd
│   ├── VCT.Rd
│   ├── VEN.Rd
│   ├── VGB.Rd
│   ├── VIR.Rd
│   ├── VNM.Rd
│   ├── VUT.Rd
│   ├── WLF.Rd
│   ├── WSM.Rd
│   ├── YEM.Rd
│   ├── ZAF.Rd
│   ├── ZMB.Rd
│   ├── ZWE.Rd
│   ├── add_iso.Rd
│   ├── admin.ch.Rd
│   ├── arcgis.de.Rd
│   ├── arcgis.se.Rd
│   ├── canada.ca.Rd
│   ├── cdc.gov.Rd
│   ├── covid19.Rd
│   ├── covid19india.org.Rd
│   ├── covidtracking.com.Rd
│   ├── cumsum.Rd
│   ├── decreasing.Rd
│   ├── docstring.Rd
│   ├── drop_decreasing.Rd
│   ├── ds_check_format.Rd
│   ├── ds_docstring.Rd
│   ├── extdata.Rd
│   ├── geohive.ie.Rd
│   ├── github.ceedsdemm.covidprodataset.Rd
│   ├── github.covid19datahub.covid19br.Rd
│   ├── github.covid19euzh.covid19eudata.Rd
│   ├── github.cssegisanddata.covid19.Rd
│   ├── github.cssegisanddata.covid19unified.Rd
│   ├── github.dsfsi.covid19za.Rd
│   ├── github.dssgpt.covid19ptdata.Rd
│   ├── github.italia.covid19opendatavaccini.Rd
│   ├── github.jmcastagnetto.covid19perudata.Rd
│   ├── github.lisphilar.covid19sir.Rd
│   ├── github.m3it.covid19data.Rd
│   ├── github.minciencia.datoscovid19.Rd
│   ├── github.mpiktas.covid19lt.Rd
│   ├── github.nytimes.covid19data.Rd
│   ├── github.openzh.covid19.Rd
│   ├── github.oxcgrt.covidpolicytracker.Rd
│   ├── github.ozanerturk.covid19turkeyapi.Rd
│   ├── github.pcmdpc.covid19.Rd
│   ├── github.robertkochinstitut.covid19impfungenindeutschland.Rd
│   ├── github.swsoyee.2019ncovjapan.Rd
│   ├── github.wcota.covid19br.Rd
│   ├── github.wcota.covid19br.vac.Rd
│   ├── go.th.Rd
│   ├── gob.ar.Rd
│   ├── gob.pe.Rd
│   ├── gouv.fr.Rd
│   ├── gov.co.Rd
│   ├── gov.lv.Rd
│   ├── gov.si.Rd
│   ├── gov.tw.Rd
│   ├── gov.uk.Rd
│   ├── gv.at.Rd
│   ├── healthdata.gov.Rd
│   ├── humdata.af.Rd
│   ├── humdata.ht.Rd
│   ├── id.Rd
│   ├── impfdashboard.de.Rd
│   ├── isciii.es.Rd
│   ├── iso_docstring.Rd
│   ├── isoweek2date.Rd
│   ├── koronavirus.hr.Rd
│   ├── map_data.Rd
│   ├── map_values.Rd
│   ├── mzcr.cz.Rd
│   ├── naming.Rd
│   ├── ourworldindata.org.Rd
│   ├── read.csv.Rd
│   ├── read.excel.Rd
│   ├── read.xsv.Rd
│   ├── read.zip.Rd
│   ├── repo.Rd
│   ├── rivm.nl.Rd
│   ├── sciensano.be.Rd
│   ├── ssi.dk.Rd
│   ├── toyokeizai.net.Rd
│   ├── who.int.Rd
│   ├── wikipedia.dp.Rd
│   └── write.csv.Rd
├── pkgdown/
│   ├── _pkgdown.yml
│   ├── extra.css
│   └── extra.js
├── styleguide.md
├── test/
│   └── compare_data.R
└── vignettes/
    ├── contributors.Rmd
    ├── data.Rmd
    ├── docs.Rmd
    ├── python.Rmd
    └── r.Rmd

================================================
FILE CONTENTS
================================================

================================================
FILE: .github/.gitignore
================================================
*.html


================================================
FILE: .github/workflows/pkgdown.yaml
================================================
on:
  
  push:
  
  schedule:
    
    # * is a special character in YAML so you have to quote this string
    - cron:  '0 */6 * * *'

name: pkgdown

jobs:
  
  pkgdown:
    
    runs-on: ubuntu-latest
    
    env:
      GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
    
    steps:
    
      - name: Credentials
        run: |
            git config --global user.email "emanuele.guidotti@unine.ch"
            git config --global user.name "Emanuele Guidotti"
      
      - uses: actions/checkout@v2
      
      - uses: r-lib/actions/setup-r@v2

      - uses: r-lib/actions/setup-pandoc@v2
      
      - name: Install libcurl
        run: sudo apt-get install libcurl4-openssl-dev
      
      - name: Install FriBidi
        run: sudo apt-get install -y libfribidi-dev
      
      - name: Install HarfBuzz
        run: sudo apt-get install -y libharfbuzz-dev

      - name: Install additional libs
        run: sudo apt-get install -y libfontconfig1-dev libtiff-dev

      - name: Install dependencies
        run: |
          install.packages("DT")
          install.packages("remotes")
          remotes::install_version("pkgdown", version = "1.6.1")
          remotes::install_deps(dependencies = TRUE)
        shell: Rscript {0}

      - name: Install package
        run: R CMD INSTALL .

      - name: Deploy package
        run: pkgdown::deploy_to_branch(new_process = FALSE, clean = TRUE)
        shell: Rscript {0}


================================================
FILE: .gitignore
================================================
# History files
.Rhistory
.Rapp.history

# Session Data files
.RData

# User-specific files
.Ruserdata

# Example code in package build process
*-Ex.R

# Output files from R CMD build
/*.tar.gz

# Output files from R CMD check
/*.Rcheck/

# RStudio files
.Rproj.user/

# produced vignettes
vignettes/*.html
vignettes/*.pdf

# OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3
.httr-oauth

# knitr and R markdown default cache directories
*_cache/
/cache/

# Temporary files created by R markdown
*.utf8.md
*.knit.md
*.Rproj
.Rbuildignore
LICENSE

# R Environment Variables
.Renviron

# Additional files
/doc
/Meta
/docs
inst/doc
*.log
/inst/extdata/apple.csv
/inst/extdata/google.csv
.DS_Store


================================================
FILE: .travis.yml
================================================
language: r
sudo: required
cache: packages

r:
  - devel
  - release

os:
  - linux
  
env:
  - _R_CHECK_TESTS_NLINES_=0
  
script:
  - |
    R CMD build .
    travis_wait 180 R CMD check --no-build-vignettes --no-manual COVID19*tar.gz
  

================================================
FILE: DESCRIPTION
================================================
Package: COVID19
Type: Package
Title: COVID-19 Data Hub
Version: 3.0.0
Authors@R: c(
    person(given = "Emanuele", family = "Guidotti", email = "emanuele.guidotti@unine.ch", role = c("aut", "cre"), comment = c(ORCID = "0000-0002-8961-6623")),
    person(given = "David", family = "Ardia", role = c("ctb"), comment = c(ORCID = "0000-0003-2823-782X"))
  )
Description: Unified datasets for a better understanding of COVID-19.
License: GPL-3
URL: https://covid19datahub.io
BugReports: https://github.com/covid19datahub/COVID19/issues
Encoding: UTF-8
LazyData: true
Depends: R (>= 3.5.0), utils, dplyr (>= 1.0.0), tidyr (>= 1.0.0)
Imports: remotes, curl, digest, readxl, readr, httr, xml2, jsonlite, rvest, data.table, R.utils, MMWRweek
Roxygen: list(markdown = TRUE)
RoxygenNote: 7.2.1
Suggests: knitr, rmarkdown
VignetteBuilder: knitr


================================================
FILE: LICENSE.md
================================================
# Terms of use

> We have invested a lot of time and effort in creating [COVID-19 Data Hub](https://covid19datahub.io/), please: 

- cite [Guidotti and Ardia (2020)](https://doi.org/10.21105/joss.02376) when using [COVID-19 Data Hub](https://covid19datahub.io).
- place the URL https://covid19datahub.io in a footnote to help others find [COVID-19 Data Hub](https://covid19datahub.io/).
- you assume full risk for the use of [COVID-19 Data Hub](https://covid19datahub.io/). We try our best to guarantee the data quality and consistency and the continuous filling of the Data Hub. However, it is free software and comes with ABSOLUTELY NO WARRANTY.

The output data files are published under the [CC BY license](https://creativecommons.org/licenses/by/4.0/). All other code and assets are published under the [GPL-3 license](https://www.r-project.org/Licenses/GPL-3).

## Cite as

> *Guidotti, E., Ardia, D., (2020), "COVID-19 Data Hub", Journal of Open Source Software 5(51):2376, doi: [10.21105/joss.02376](https://doi.org/10.21105/joss.02376).*

A BibTeX entry for LaTeX users is:

```latex
@Article{guidotti2020,
    title = {COVID-19 Data Hub},
    year = {2020},
    doi = {10.21105/joss.02376},
    author = {Emanuele Guidotti and David Ardia},
    journal = {Journal of Open Source Software},
    volume = {5},
    number = {51},
    pages = {2376}
}
```

The implementation details and the latest version of the data are described in:

> *Guidotti, E., (2022), "A worldwide epidemiological database for COVID-19 at fine-grained spatial resolution", Sci Data 9(1):112, doi: [10.1038/s41597-022-01245-1](https://doi.org/10.1038/s41597-022-01245-1)*

A BibTeX entry for LaTeX users is:

```latex
@Article{guidotti2022,
    title = {A worldwide epidemiological database for COVID-19 at fine-grained spatial resolution},
    year = {2022},
    doi = {10.1038/s41597-022-01245-1},
    author = {Emanuele Guidotti},
    journal = {Scientific Data},
    volume = {9},
    number = {1},
    pages = {112}
}
```

## Comments

<div class="utterances"></div>
<script src="https://utteranc.es/client.js"
        repo="covid19datahub/COVID19"
        issue-term="pathname"
        theme="github-light"
        crossorigin="anonymous"
        async>
</script>


================================================
FILE: NAMESPACE
================================================
# Generated by roxygen2: do not edit by hand

export(add_iso)
export(covid19)
export(cumsum)
export(decreasing)
export(docstring)
export(drop_decreasing)
export(ds_check_format)
export(ds_docstring)
export(extdata)
export(id)
export(iso_docstring)
export(isoweek2date)
export(map_data)
export(map_values)
export(naming)
export(read.csv)
export(read.excel)
export(read.xsv)
export(read.zip)
export(repo)
export(write.csv)


================================================
FILE: NEWS.md
================================================
# COVID19 v3.0.0

## Data Update

- added the data on the number of people who have received at least one vaccine dose
- added the number of people who are fully vaccinated according to the vaccination protocol
- added the data on population for all the administrative areas that we provide (more than 12,000)
- added the latest policy measures by Oxford Covid-19 Government Response Tracker
- added google and apple mobility identifiers for 3000+ administrative areas
- added a new set of identifiers to enable geospatial analysis by linking to NUTS codes for Europe or to the GADM database worldwide
- to compute coordinates, the centroid is replaced by [`st_point_on_surface`](https://r-spatial.github.io/sf/reference/geos_unary.html), which guarantees to return a point on the surface of the administrative area

## Breaking changes

- the `id` for level 1 has been replaced with a 8-alphanumeric hash code for consistency with levels 2 and 3 
- the column `key_google_mobility` now matches the Google `place_id`
- the columns `key`, `key_alpha_2`, and `key_numeric` are replaced by `key_local`; containing the administrative area identifier used by the local authorities regardless of its type, e.g., numeric, 2-alpha code, etc. Codes such as FIPS now include leading zeros
- the column `currency` is renamed in `iso_currency`
- vintage data are now shipped in SQLite databases instead of ZIP folders
- vintage data sources are now reported in PDF files, instead of `src.csv`

## Website and documentation

- new website with interactive visualization of the data and data sources
- improved documentation

## New URLs

A new set of [endpoints](/articles/data.html) is available to download the data in several ways. The following files will continue to be updated for backward compatibility, but it is strongly recommended to switch to the new URLs:

| Old URL | New URL | Description | Format | Downloads |
|------------------------------------------------|----------------------------------------------|---------------|--------|-----------|
| https://storage.covid19datahub.io/rawdata-1.csv  | https://storage.covid19datahub.io/level/1.csv  | Worldwide country-level data | CSV | ![](https://storage.covid19datahub.io/downloads/rawdata-1.csv.svg) | 
| https://storage.covid19datahub.io/rawdata-1.zip  | https://storage.covid19datahub.io/level/1.csv.zip  | Worldwide country-level data | ZIP | ![](https://storage.covid19datahub.io/downloads/rawdata-1.zip.svg) | 
| https://storage.covid19datahub.io/rawdata-2.csv  | https://storage.covid19datahub.io/level/2.csv  | Worldwide state-level data | CSV | ![](https://storage.covid19datahub.io/downloads/rawdata-2.csv.svg) | 
| https://storage.covid19datahub.io/rawdata-2.zip  | https://storage.covid19datahub.io/level/2.csv.zip  | Worldwide state-level data | ZIP | ![](https://storage.covid19datahub.io/downloads/rawdata-2.zip.svg) | 
| https://storage.covid19datahub.io/rawdata-3.csv  | https://storage.covid19datahub.io/level/3.csv  | Worldwide city-level data | CSV | ![](https://storage.covid19datahub.io/downloads/rawdata-3.csv.svg) | 
| https://storage.covid19datahub.io/rawdata-3.zip  | https://storage.covid19datahub.io/level/3.csv.zip  | Worldwide city-level data | ZIP | ![](https://storage.covid19datahub.io/downloads/rawdata-3.zip.svg) | 

## Deprecated files

The following files have been deprecated and are no longer maintained.

| URL | Description | Format | Downloads |
|-----------------------------------------------|-------------------------------------------|--------|-----------|
| ~~https://storage.covid19datahub.io/data-1.csv~~  | Pre-processed worldwide country-level data | CSV    | ![](https://storage.covid19datahub.io/downloads/data-1.csv.svg) |
| ~~https://storage.covid19datahub.io/data-2.csv~~  | Pre-processed worldwide state-level data | CSV    | ![](https://storage.covid19datahub.io/downloads/data-2.csv.svg) |
| ~~https://storage.covid19datahub.io/data-3.csv~~  | Pre-processed worldwide city-level data | CSV    | ![](https://storage.covid19datahub.io/downloads/data-3.csv.svg) |
| ~~https://storage.covid19datahub.io/data-1.zip~~  | Pre-processed worldwide country-level data | ZIP    | ![](https://storage.covid19datahub.io/downloads/data-1.zip.svg) |
| ~~https://storage.covid19datahub.io/data-2.zip~~  | Pre-processed worldwide state-level data | ZIP    | ![](https://storage.covid19datahub.io/downloads/data-2.zip.svg) |
| ~~https://storage.covid19datahub.io/data-3.zip~~  | Pre-processed worldwide city-level data | ZIP    | ![](https://storage.covid19datahub.io/downloads/data-3.zip.svg) |
| ~~https://storage.covid19datahub.io/data.log~~    | Log file                  | CSV    | ![](https://storage.covid19datahub.io/downloads/data.log.svg) |
| ~~https://storage.covid19datahub.io/rawdata.log~~ | Log file                  | CSV    | ![](https://storage.covid19datahub.io/downloads/rawdata.log.svg) |
| ~~https://storage.covid19datahub.io/src.csv~~ | Data sources | CSV | ![](https://storage.covid19datahub.io/downloads/src.csv.svg) |



================================================
FILE: R/covid19.R
================================================
#' COVID-19 Data Hub
#'
#' @param country vector of 3-letter ISO codes for countries.
#' @param level integer. Granularity level. 1: country-level data. 2: state-level data. 3: city-level data.
#'
#' @source \url{https://covid19datahub.io}
#'
#' @references 
#' Guidotti, E., Ardia, D., (2020), "COVID-19 Data Hub", Journal of Open Source Software 5(51):2376, \doi{10.21105/joss.02376}.
#'
#' @keywords internal
#' 
#' @export
#' 
covid19 <- function(country = NULL, level = 1){
  
  # fallback
  if(!(level %in% 1:3))
    stop("Valid options for 'level' are:
         1: admin area level 1
         2: admin area level 2
         3: admin area level 3")
  
  # ISO 
  iso <- extdata("db", "ISO.csv")
  iso <- mutate_if(iso, is.integer, as.character)
  
  # load all csv files
  db <- bind_rows(lapply(iso$iso_alpha_3, function(i){
    extdata(sprintf("db/%s.csv", i)) %>%
      as.data.frame() %>%
      mutate_if(is.integer, as.character) %>%
      mutate(iso_alpha_3 = i)
  })) 

  # drop missing id
  db <- db[!is.na(db$id),]
  
  # add level 1 data
  cols <- c("iso_alpha_3", "iso_alpha_2", "iso_numeric", "iso_currency", "administrative_area_level_1")
  db <- left_join(db, iso[,cols], by = "iso_alpha_3")
  db <- bind_rows(db, iso)

  # check duplicated ids
  if(length(idx <- which(duplicated(db$id))))
    stop(sprintf("Duplicated ids in CSV files: %s", paste(db$id[idx], collapse = ", ")))
  
  # check google key
  if(length(idx <- which(!is.na(db$key_google_mobility) & nchar(db$key_google_mobility)!=27)))
    stop(sprintf("Google mobility key not of 27 characters: %s", paste(db$id[idx], collapse = ", ")))
  
  # download data
  x <- data.frame()
  if(is.null(country)) country <- iso$id_covid19datahub.io
  for(fun in country) if(exists(fun, envir = asNamespace("COVID19"), mode = "function", inherits = FALSE)) {
    
    # try 
    y <- try(do.call(fun, args = list(level = level)))
    
    # skip on NULL
    if(is.null(y))
      next
    
    # check error
    if("try-error" %in% class(y)){
      warning(sprintf("%s: try-error", fun))
      next
    }
    
    # subset
    y <- y[,intersect(colnames(y), c('id', 'date', vars('cases')))]
    
    # add country code
    y$iso_alpha_3 <- fun

    # add id for level 1    
    if(level==1)
      y$id <- iso$id[which(iso$id_covid19datahub.io==fun)]
    
    # check format
    if(!ds_check_format(y, level = level)){
      warning(sprintf("%s: check failed", fun))
      # next
    }
    
    # add data
    x <- bind_rows(x, y)
    
  }
  
  # filter
  x <- x[!is.na(x$id),]
  
  # check empty
  if(!nrow(x))
    return(NULL)
  
  # policy measures
  o <- github.oxcgrt.covidpolicytracker(level = level)
  
  # add oxcgrt id
  map <- db$id_github.oxcgrt.covidpolicytracker
  names(map) <- db$id
  x$id_oxcgrt <- map[x$id]
  
  # fallback to country when id is missing
  idx <- which(is.na(x$id_oxcgrt))
  x$id_oxcgrt[idx] <- x$iso_alpha_3[idx]
  
  # merge policy measures
  x <- left_join(x, o, by = c('date','id_oxcgrt'))
  
  # fill missing columns and subset
  key <- c('id', 'date', vars('cases'), vars('measures'))
  x[,key[!(key %in% colnames(x))]] <- NA
  x <- x[,key]
  
  # merge top level data
  x <- left_join(x, db[,intersect(colnames(db), c("id", vars("admin")))], by = "id")
  
  # fill missing columns and subset
  cn <- vars()
  x[,cn[!(cn %in% colnames(x))]] <- NA
  x <- x[,cn]
  
  # type conversion
  x <- x %>% 
    dplyr::mutate_at('date', as.Date) %>%
    dplyr::mutate_at(vars('integer'), as.integer) %>%
    dplyr::mutate_at(vars('numeric'), as.numeric) %>%
    dplyr::mutate_at(vars('character'), as.character)
  
  # order by id and date
  x <- arrange(x, id, date)
  
  # check missing dates
  if(length(which(idx <- is.na(x$date))))
    stop(sprintf("column 'date' contains NA values: %s", paste0(unique(x$id[idx]), collapse = ", ")))
  
  # check duplicated dates
  if(length(idx <- which(duplicated(x[,c("id", "date")]))))
    stop(sprintf("multiple dates per id: %s", paste0(unique(x$id[idx]), collapse = ", ")))
  
  # check date range
  if(any(x$date<"2020-01-01" | x$date>Sys.Date()))
    stop("Some dates are out of range")
  
  # check duplicated names
  idx <- which(duplicated(x[,c('date','administrative_area_level_1','administrative_area_level_2','administrative_area_level_3')]))
  if(length(idx))
    stop(sprintf("the tuple ('date','administrative_area_level_1','administrative_area_level_2','administrative_area_level_3') is not unique: %s", paste(unique(x$id[idx]), collapse = ", ")))
  
  # return
  x
  
}

#' Generate link to the file at the GitHub repository
#' 
#' @param x name of the iso_ or ds_ function, or name of the .csv file
#' 
#' @keywords internal
#' 
#' @export
repo <- function(x, csv = FALSE){
  master <- "https://github.com/covid19datahub/COVID19/blob/master" 
  if(csv){
    url <- sprintf("%s/inst/extdata/db/%s.csv", master, x)
  }
  else{
    prefix <- ifelse(grepl("^[A-Z]{3}$", x), "iso", "ds")
    url <- sprintf("%s/R/%s_%s.R", master, prefix, x)
  }
  return(url)
}

#' Naming convention
#' 
#' @param x the return of a ds_ funtion
#' 
#' @keywords internal
#' 
#' @export
naming <- function(x){
  n <- na.omit(map_values(colnames(x), force = TRUE, c(
    "confirmed"               = "0 confirmed cases",
    "deaths"                  = "1 deaths",
    "recovered"               = "2 recovered",
    "tests"                   = "3 tests",
    "vaccines"                = "4 total vaccine doses administered",  
    "people_vaccinated"       = "5 people with at least one vaccine dose", 
    "people_fully_vaccinated" = "6 people fully vaccinated", 
    "hosp"                    = "7 hospitalizations",  
    "icu"                     = "8 intensive care",  
    "vent"                    = "9 patients requiring ventilation"
  )))
  gsub("^..", "", sort(n))
}

#' Generate docstring to use in the ds_ files
#' 
#' @param ds the name of the ds_ R function
#' @param name the name of the data provider
#' @param desc the name(s) of the countries supported by the data provider, e.g., "United States".
#' @param url the link to the data provider
#' @param ... arguments passed to the ds_ function
#' 
#' @keywords internal
#' 
#' @export
ds_docstring <- function(ds, name, desc, url, ...){
  variables <- lapply(1:3, function(level){
    x <- do.call(ds, args = c(list(level = level), list(...)))
    if(is.null(x)) return(NULL)
    naming(x)    
  })
  levels <- which(!sapply(variables, is.null))
  sections <- sapply(levels, function(level){
    v <- variables[[level]]
    s <- paste("#' -", v, collapse = "\n")
    sprintf("#' @section Level %s:\n%s\n", level, s)
  })
  sections <- paste(sections, collapse = "#'\n")
  params <- sprintf("#' @param level %s\n", paste(levels, collapse = ", "))
  extra <- setdiff(names(formals(ds)), "level")
  if(length(extra)){
    extra <- sapply(extra, function(p) sprintf("#' @param %s <INSERT DESCRIPTION HERE>\n", p))
    params <- paste(c(params, extra), collapse = "")
  }
  cat(sprintf(
    "#' %s\n#'\n#' Data source for: %s\n#'\n%s#'\n%s#'\n#' @source %s\n#'\n#' @keywords internal\n#'", 
    name, desc, params, sections, url))
}

#' Generate docstring to use in the iso_ files
#' 
#' @param ds the name of the ds_ R function
#' @param ... arguments passed to the ds_ function
#' 
#' @keywords internal
#' 
#' @export
iso_docstring <- function(ds, ...){
  x <- do.call(ds, args = list(...))
  n <- naming(x)
  t <- gsub("#' ", "", readLines(sprintf("R/ds_%s.R", ds))[1], fixed = TRUE)
  cat(sprintf('#\' - \\href{`r repo("%s")`}{%s}:\n#\' %s.\n#\'\n', ds, t, paste(n, collapse = ",\n#' ")))
}

#' Generate docstring to use in the iso_ files to list the population data source
#' 
#' @param iso the ISO code of the country
#' @param level 1, 2, 3
#' 
#' @keywords internal
#' 
#' @export
docstring <- function(iso, level){
  if(level==1){
    x <- extdata("db/ISO.csv") %>% dplyr::filter(id_covid19datahub.io==iso)
    url <- repo("ISO", csv = TRUE)
  }
  else{
    x <- extdata(sprintf("db/%s.csv", iso))
    url <- repo(iso, csv = TRUE)
  }
  ds <- na.omit(unique(x$population_data_source[x$administrative_area_level==level]))
  if(length(ds)==0) return(NULL)
  ds <- sprintf("\\href{%s}{%s}", url, ds)  
  sprintf(" - %s: population.", paste(ds, collapse = ", "))
}

cachedata <- new.env(hash = TRUE)
cachecall <- function(fun, ...){
  
  args  <- list(...)
  cache <- ifelse(is.null(args$cache), TRUE, args$cache)
  key   <- make.names(sprintf("%s_%s",paste(deparse(fun), collapse = ''),paste(names(args),args,sep = ".",collapse = "..")))
  
  if(cache & exists(key, envir = cachedata))
    return(get(key, envir = cachedata))
  else
    x <- do.call(fun, args = args)
  
  if(cache)
    assign(key, x, envir = cachedata)
  
  return(x)
  
}

#' Convert identifiers
#'
#' Map the ids of administrative areas used by the data provider to the 
#' identifiers used in the Data Hub.
#' 
#' @param x vector of identifiers used by the data source to identify administrative areas.
#' @param iso the 3 letter ISO code of the country.
#' @param ds the name of the data source function.
#' @param level the level of the administrative areas.
#' 
#' @return converted vector of identifiers to use in the Data Hub.
#' 
#' @keywords internal
#' 
#' @export
id <- function(x, iso, ds, level){
  
  db <- extdata("db",sprintf("%s.csv",iso))
  db <- db[which(db$administrative_area_level==level),]
  
  map        <- db$id
  names(map) <- db[[sprintf("id_%s",ds)]]
  
  x   <- map_values(x, map)
  idx <- which(!(x %in% map))
  if(length(idx)){
    warning(sprintf("missing id: %s", paste0(unique(x[idx]), collapse = ", ")))
    x[idx] <- NA
  }
  
  return(x)
  
}

vars <- function(type = NULL){
  
  cum <- c(
    'numeric' = 'confirmed',
    'numeric' = 'deaths',
    'numeric' = 'recovered',
    'numeric' = 'tests',
    'numeric' = 'vaccines',
    'numeric' = 'people_vaccinated',
    'numeric' = 'people_fully_vaccinated'
  )
  
  spot <- c(
    'numeric' = 'hosp',
    'numeric' = 'icu',
    'numeric' = 'vent'
  )
  
  measures <- c(
    'integer' = 'school_closing',
    'integer' = 'workplace_closing',
    'integer' = 'cancel_events',
    'integer' = 'gatherings_restrictions',
    'integer' = 'transport_closing',
    'integer' = 'stay_home_restrictions',
    'integer' = 'internal_movement_restrictions',
    'integer' = 'international_movement_restrictions',
    'integer' = 'information_campaigns',
    'integer' = 'testing_policy',
    'integer' = 'contact_tracing',
    'integer' = 'facial_coverings',
    'integer' = 'vaccination_policy',
    'integer' = 'elderly_people_protection',
    'numeric' = 'government_response_index',
    'numeric' = 'stringency_index',
    'numeric' = 'containment_health_index',
    'numeric' = 'economic_support_index'
  )
  
  admin <- c(
    'character' = 'iso_alpha_3',
    'character' = 'iso_alpha_2',
    'integer'   = 'iso_numeric',
    'character' = 'iso_currency',
    'integer'   = 'administrative_area_level',
    'character' = 'administrative_area_level_1',
    'character' = 'administrative_area_level_2',
    'character' = 'administrative_area_level_3',
    'numeric'   = 'latitude',
    'numeric'   = 'longitude',
    'integer'   = 'population',
    'character' = 'key_local',
    'character' = 'key_google_mobility',
    'character' = 'key_apple_mobility',
    'character' = 'key_jhu_csse',
    'character' = 'key_nuts',
    'character' = 'key_gadm'
  )
  
  if(is.null(type))
    return(unname(unique(c('id', 'date', cum, spot, 'population', measures, admin))))
  
  if(type=="cum")
    return(unname(cum))
  
  if(type=="spot")
    return(unname(spot))
  
  if(type=="measures")
    return(unname(measures))
    
  if(type=="admin")
    return(unname(admin))
  
  if(type=="cases")
    return(unname(c(cum, spot)))
  
  all <- c(cum, spot, measures, admin)
  all <- all[which(names(all)==type)]
  return(unname(all))
  
}

#' Cumulative Sums
#' 
#' Returns a numeric object whose elements are the cumulative sums of the elements of the argument.
#' 
#' @param x a numeric object.
#' @param na.rm logical. Should missing values be removed? Default \code{FALSE}.
#' 
#' @details 
#' If \code{na.rm=TRUE}, then \code{NA} are treated as \code{0} when computing the cumulative sum.
#' 
#' @keywords internal
#' 
#' @export
cumsum <- function(x, na.rm = TRUE){
  
  if(!na.rm)
    return(base::cumsum(x))
  
  miss <- is.na(x)
  x[miss] <- 0
  
  x <- base::cumsum(x)
  x[miss] <- NA
  
  return(x)
  
}

#' External Data
#' 
#' Read files in the inst/extdata/ folder.
#' 
#' @param ... path to file
#' 
#' @return \code{data.frame}
#' 
#' @keywords internal
#' 
#' @export
extdata <- function(...){
  
  file <- system.file("extdata", ..., package = "COVID19")
  if(!file.exists(file))
    return(NULL)
  
  utils::read.csv(file, na.strings = "", stringsAsFactors = FALSE, encoding = "UTF-8")
  
}

#' Add XXX.csv file
#' 
#' Add new country in the inst/extdata/db/ folder
#' 
#' @param x \code{data.frame} generated by a data source function.
#' @param iso ISO code (3 letters).
#' @param ds name of the data source function generating \code{x}.
#' @param map named vector mapping the columns of \code{x} to the columns of the XXX.csv file.
#' @param append logical. Append the data to the XXX.csv file if it already exists? Default \code{TRUE}.
#' @param level integer. Granularity level. 1: country-level data. 2: state-level data. 3: city-level data.
#' 
#' @return \code{data.frame}
#' 
#' @keywords internal
#' 
#' @export
add_iso <- function(x, iso, ds, level, map = c("id"), append = TRUE){
  
  if(!level %in% 2:3)
    stop("level must be 2 or 3")
  
  id_ds <- sprintf("id_%s", ds)
  key   <- c('id',id_ds,'administrative_area_level','administrative_area_level_2','administrative_area_level_3','latitude','longitude','population')
  map   <- c(map, key[!(key %in% map)])
  
  x <- map_data(x, map)
  x <- x[!duplicated(x),,drop=FALSE]
  if(!("id" %in% colnames(x)))
    stop("specify the 'id' column using the 'map' argument, eg. map = c('column' = 'id')")
  
  x[[id_ds]] <- x$id 
  x$id <- sapply(x$id, FUN = function(x) digest::digest(c(iso, x), algo = 'crc32'))
  
  x[,key[!(key %in% colnames(x))]] <- NA
  x$administrative_area_level      <- level
  
  file <- sprintf("%s.csv", iso)
  
  if(append){
    
    if(file.exists(file))
      y <- read.csv(file, cache = FALSE)
    else
      y <- extdata("db", file)
    
    x <- dplyr::bind_rows(y, x)
    
  }
  
  cn  <- colnames(x)
  key <- unique(c("id", cn[grepl("^id\\_", cn)], key, cn[grepl("^key(\\_|$)", cn)]))
  x   <- x[,key]
  
  write.csv(x, file)
  cat(sprintf("File saved: %s", file))
  
  return(x)
  
}

#' Map values
#' 
#' Map values of a vector.
#' 
#' @param x vector.
#' @param map named vector mapping names to values.
#' 
#' @return \code{vector}.
#' 
#' @keywords internal
#' 
#' @export
map_values <- function(x, map, force = FALSE){
  
  value <- tolower(x)
  from  <- tolower(names(map))
  to    <- map
  
  if(force)
    y <- rep(NA, length(x))
  else 
    y <- x
  
  for(i in 1:length(map)){
    idx <- which(value==from[i])
    if(length(idx)>0)
      y[idx] <- to[i]
  }
  
  return(y)
  
}

#' Map Data
#' 
#' Subset a \code{data.frame} and change column names.
#' 
#' @param x \code{data.frame}
#' @param map named vector. Map columns of \code{x} and subset.
#' 
#' @return \code{data.frame}
#' 
#' @keywords internal
#' 
#' @export
map_data <- function(x, map){
  
  cn <- names(map)
  if(is.null(cn))
    cn <- map
  
  idx <- which(cn=="")
  if(length(idx))
    cn[idx] <- unname(map)[idx]
  
  x <- x[,intersect(cn, colnames(x)), drop = FALSE]
  colnames(x) <- map_values(colnames(x), map)
  
  return(x)
  
}

#' Decreasing counts
#'
#' Check which elements in a numeric vector are decreasing with respect to the previous elements
#'
#' @param x numeric vector
#' @param k vector of lags (e.g., \code{k=1:7} returns \code{TRUE} if an element is decreasing with respect to any of the previous 7 elements)
#' @param strict logical indicating whether only strictly decreasing counts should be considered
#'
#' @return \code{logical} indicating decreasing elements
#' 
#' @keywords internal
#' 
#' @export
decreasing <- function(x, k = 1, strict = TRUE){
  if(strict)
    apply(sapply(k, function(k) c(rep(FALSE, k), diff(x, lag = k)<0)), 1, any)
  else
    apply(sapply(k, function(k) c(rep(FALSE, k), diff(x, lag = k)<=0)), 1, any)
}

#' Drop decreasing counts
#' 
#' @param x \code{data.frame} containing the column \code{date}
#' @param by vector to group by
#' @param cols vector of columns to clean
#' @param k vector of lags (e.g., \code{k=1:7} removes values that are decreasing with respect to any of the previous 7 values)
#' @param strict logical indicating whether only strictly decreasing counts should be dropped
#' 
#' @return \code{x} where decreasing counts are replaced with \code{NA} 
#' 
#' @keywords internal
#' 
#' @export
drop_decreasing <- function(x, by, cols, k, strict){
  x %>%
    group_by_at(by) %>%
    arrange(date) %>%
    mutate(across(all_of(cols), function(x){
      replace(x, decreasing(x, k = k, strict = strict), NA)
    }))
}

#' Data Output
#' 
#' Write csv in UTF-8.
#' 
#' @param x the object to be written, preferably a matrix or data frame. If not, it is attempted to coerce x to a data frame.
#' @param file either a character string naming a file or a connection open for writing. "" indicates output to the console.
#' @param row.names either a logical value indicating whether the row names of x are to be written along with x, or a character vector of row names to be written.
#' @param na the string to use for missing values in the data.
#' @param fileEncoding character string: if non-empty declares the encoding to be used on a file (not a connection) so the character data can be re-encoded as they are written. 
#' @param ... arguments passed to \code{\link[utils:write.table]{write.csv}}
#' 
#' @return return value of \code{\link[utils:write.table]{write.csv}}
#' 
#' @keywords internal
#' 
#' @export
write.csv <- function(x, file, row.names = FALSE, na = "", fileEncoding = "UTF-8", ...){
  
  utils::write.csv(x, file = file, row.names = row.names, na = na, fileEncoding = fileEncoding, ...)
  
}

#' Data Input (csv)
#' 
#' Reads a file in table format and creates a data frame from it, with cases corresponding to lines and variables to fields in the file.
#' 
#' @param file the name of the file which the data are to be read from. Each row of the table appears as one line of the file. If it does not contain an absolute path, the file name is relative to the current working directory, getwd(). Tilde-expansion is performed where supported. This can be a compressed file.
#' @param cache logical. Memory caching? Default \code{FALSE}.
#' @param na.strings a character vector of strings which are to be interpreted as \code{NA} values. Blank fields are also considered to be missing values in logical, integer, numeric and complex fields. Note that the test happens after white space is stripped from the input, so \code{na.strings} values may need their own white space stripped in advance.
#' @param stringsAsFactors logical: should character vectors be converted to factors?
#' @param encoding encoding to be assumed for input strings. It is used to mark character strings as known to be in Latin-1 or UTF-8: it is not used to re-encode the input, but allows R to handle encoded strings in their native encoding. 
#' @param ... arguments passed to \code{\link[utils:write.table]{read.csv}}
#' 
#' @return return value of \code{\link[utils:write.table]{read.csv}}
#' 
#' @keywords internal
#' 
#' @export
read.csv <- function(file, cache = FALSE, na.strings = "", stringsAsFactors = FALSE, encoding = "UTF-8", ...){
  
  if(cache)
    x <- cachecall(utils::read.csv, file = file, na.strings = na.strings, stringsAsFactors = stringsAsFactors, encoding = encoding, ...)
  else
    x <- utils::read.csv(file = file, na.strings = na.strings, stringsAsFactors = stringsAsFactors, encoding = encoding, ...)
  
  return(x)
  
}

#' Data Input (excel)
#' 
#' Read xls and xlsx files.
#' 
#' @param path Path to the xls/xlsx file.
#' @param cache logical. Memory caching? Default \code{FALSE}.
#' @param sheet Sheet to read. Either a string (the name of a sheet), or an integer (the position of the sheet). Ignored if the sheet is specified via range. If neither argument specifies the sheet, defaults to all sheets.
#' @param ... arguments passed to \code{\link[readxl]{read_excel}}
#' 
#' @return list of \code{data.frame}
#' 
#' @examples 
#' \dontrun{
#' 
#' url <- "https://epistat.sciensano.be/Data/COVID19BE.xlsx"
#' x   <- read.excel(url, cache = TRUE)  
#' 
#' }
#' 
#' @keywords internal
#' 
#' @export
read.excel <- function(path, cache = FALSE, sheet = NA, ...) {
  
  # read excel from url
  read_excel <- function(path, sheet, ...) {
    
    tmp <- tempfile()
    utils::download.file(path, destfile = tmp, mode = "wb", quiet = TRUE)
    
    # sheet not given - all sheets
    if(all(is.na(sheet))) {
      
      sheets <- readxl::excel_sheets(path = tmp)
      
      x <- lapply(sheets, function(X) readxl::read_excel(path = tmp, sheet = X))
      names(x) <- sheets
      
    } 
    # multiple sheets
    else if (length(sheet) > 1){

      x <- lapply(sheet, function(X) readxl::read_excel(path = tmp, sheet = X))
      names(x) <- sheet

    }
    # single sheet
    else {
      
      x <- readxl::read_excel(path = tmp, sheet = sheet, ...)
      
    }
    
    return(x)
    
  }
  
  # is url (readxl::read_excel supports only http, https, ftp)
  if(grepl(x = path, pattern = "^(http:\\/\\/)|(https:\\/\\/)|(ftp:\\/\\/)")) 
    reader <- read_excel
  # local file
  else
    reader <- readxl::read_excel
  
  if(cache)
    x <- cachecall(reader, path = path, sheet = sheet, ...)
  else
    x <- reader(path = path, sheet = sheet, ...)
  
  return(x)
  
}

#' Data Input (csv)
#' 
#' Reads specific columns of a csv file by using the xsv command line utility
#' https://github.com/BurntSushi/xsv
#' 
#' @param file the path to the csv file
#' @param select character vector of column names to extract
#' @param ... additional arguments passed to \code{\link[data.table]{fread}}
#' 
#' @return data.table
#' 
#' @keywords internal
#' 
#' @export
read.xsv <- function(file, select, ...){
  data.table::fread(cmd = sprintf("xsv select %s %s", paste0(select, collapse = ","), file), showProgress = FALSE, ...)
}

#' Data Input (zip)
#' 
#' Reads files from a zip folder.
#' 
#' @param zip path (url) to the zip folder.
#' @param files vector of filenames to read inside the zip folder.
#' @param cache logical. Memory caching? Default \code{FALSE}.
#' @param fread logical indicating whether \code{\link[data.table]{fread}} should be used
#' @param xsv logical indicating whether \code{\link{xsv}} should be used. If \code{TRUE}, the argument \code{select} must be specified
#' @param method the download method passed to \code{\link[utils]{download.file}}
#' @param ... arguments passed to \code{\link{read.csv}} or \code{\link{read.excel}}.
#' 
#' @return list of data frames
#' 
#' @keywords internal
#' 
#' @export
read.zip <- function(zip, files, cache = FALSE, fread = FALSE, xsv = FALSE, method = "auto", ...){
  
  read.zip <- function(zip, files, ...){
    
    temp <- tempfile()
    utils::download.file(zip, temp, method = method, quiet = TRUE)
    
    if(fread | xsv){
      exdir <- tempdir()
      unzip(temp, exdir = exdir, files = files, unzip = "unzip", junkpaths = TRUE)
    }
    
    x <- lapply(files, function(file){
      if(grepl("\\.xlsx?$", file))
        readxl::read_excel(unz(temp, file), ...)
      else if(xsv)
        read.xsv(sprintf("%s/%s", exdir, file), ...)
      else if(fread)
        data.table::fread(sprintf("%s/%s", exdir, file), showProgress = FALSE, ...)
      else
        read.csv(unz(temp, file), cache = FALSE, ...)  
    })
    
    unlink(temp)
    if(fread | xsv) for(file in files) 
      unlink(sprintf("%s/%s", exdir, file))
    
    return(x)
    
  }
  
  if(cache)
    x <- cachecall(read.zip, zip = zip, files = files, ...)
  else 
    x <- read.zip(zip = zip, files = files, ...)
  
  return(x)
  
}

#' Check Data Source Format
#' 
#' Checks if the output of a data source function is correctly formatted. 
#' The function checks the FORMAT, NOT the DATA.
#' Before submission, the data should be double checked by comparing with external data sources (e.g. Google search).
#' 
#' @param x output of a data source function.
#' @param level integer. Granularity level. 1: country-level data. 2: state-level data. 3: city-level data.
#' 
#' @return logical. 
#' 
#' @keywords internal
#' 
#' @export
ds_check_format <- function(x, level, ci = 0.8) {
  
  check <- function(c, message) {
    c <- mean(c, na.rm = TRUE) > ci
    if(is.na(c)) 
      c <- TRUE
    if(!c) 
      warning(message)
    return(c)
  }
  
  # fallback
  if(!any(vars("cases") %in% colnames(x))){
    warning("no valid column detected. Please rename the columns according to the documentation available at https://covid19datahub.io/articles/docs.html")
    return(FALSE)
  }
  
  # id missing 
  if(!("id" %in% colnames(x))){
    if(level>1){
      warning("column 'id' missing. Please add the id for each location (required for level > 1)")
      return(FALSE)
    }
    else{ 
      x$id <- "id"
    }
  }
  
  # subset
  x      <- x[!is.na(x$id),]
  x      <- x[, apply(x, 2, function(x) any(!is.na(x))), drop=FALSE]
  cols   <- colnames(x)
  status <- TRUE
  
  # date missing 
  if(!("date" %in% cols)){
    warning("column 'date' missing. Please add the date for each observation")
    return(FALSE)
  }
  
  # NA dates
  if(any(is.na(x$date))){
    warning("column date contains NA values")
    return(FALSE)
  }
  
  # check date column is date
  status <- status & check(inherits(x$date, c("Date")),
                           "column date of wrong type")
  
  # check duplicated dates
  if(length(idx <- which(duplicated(x[,c("id", "date")])))){
    warning(sprintf("multiple dates per id: %s", paste0(unique(x$id[idx]), collapse = ", ")))
    return(FALSE)
  }
  
  # check data types
  for(col in intersect(cols, c('tests','confirmed','recovered','deaths','hosp','vent','icu'))){
    if(!is.numeric(x[[col]])){
      warning(sprintf("%s not of class numeric", col))
      return(FALSE)
    }
  }
  
  # deaths <= confirmed
  if("confirmed" %in% cols & "deaths" %in% cols)
    status <- status & check(ci < mean(x$deaths <= x$confirmed, na.rm = TRUE), "deaths > confirmed")

  # confirmed <= tests
  if("confirmed" %in% cols & "tests" %in% cols)
    status <- status & check(ci < mean(x$confirmed <= x$tests, na.rm = TRUE), "confirmed > tests")
  
  # recovered <= confirmed
  if("recovered" %in% cols & "confirmed" %in% cols)
    status <- status & check(ci < mean(x$recovered <= x$confirmed, na.rm = TRUE), "recovered > confirmed")
  
  # people_vaccinated <= vaccines 
  if("vaccines" %in% cols & "people_vaccinated" %in% cols)
    status <- status & check(ci < mean(x$people_vaccinated <= x$vaccines, na.rm = TRUE), "people_vaccinated > vaccines")
  
  # people_fully_vaccinated <= vaccines
  if("vaccines" %in% cols & "people_fully_vaccinated" %in% cols)
    status <- status & check(ci < mean(x$people_fully_vaccinated <= x$vaccines, na.rm = TRUE), "people_fully_vaccinated > vaccines")
  
  # people_fully_vaccinated <= people_vaccinated
  if("people_vaccinated" %in% cols & "people_fully_vaccinated" %in% cols)
    status <- status & check(ci < mean(x$people_fully_vaccinated <= x$people_vaccinated, na.rm = TRUE), "people_fully_vaccinated > people_vaccinated")
  
  # icu <= hosp
  if("icu" %in% cols & "hosp" %in% cols)
    status <- status & check(ci < mean(x$icu <= x$hosp, na.rm = TRUE), "icu > hosp")
  
  # vent <= confirmed
  if("vent" %in% cols & "confirmed" %in% cols)
    status <- status & check(ci < mean(x$vent <= x$confirmed, na.rm = TRUE), "vent > confirmed")
  
  # vent <= icu
  if("vent" %in% cols & "icu" %in% cols)
    status <- status & check(ci < mean(x$vent <= x$icu, na.rm = TRUE), "vent > icu")
  
  # check cumulative/non-cumulative
  y <- x %>%
    
    dplyr::mutate(
      deaths    = if("deaths" %in% cols) deaths else 0,
      confirmed = if("confirmed" %in% cols) confirmed else 0,
      tests     = if("tests" %in% cols) tests else 0,
      vaccines  = if("vaccines" %in% cols) vaccines else 0,
      people_vaccinated       = if("people_vaccinated" %in% cols) people_vaccinated else 0,
      people_fully_vaccinated = if("people_fully_vaccinated" %in% cols) people_fully_vaccinated else 0,
      recovered = if("recovered" %in% cols) recovered else 0,
      hosp      = if("hosp" %in% cols) hosp else 0,
      vent      = if("vent" %in% cols) vent else 0,
      icu       = if("icu" %in% cols) icu else 0 ) %>%
    
    dplyr::group_by_at('id') %>%
    dplyr::arrange_at('date') %>%
    
    # detect negative derivation
    dplyr::summarise(
      d_confirmed_tests  = ci < mean(diff(confirmed) <= diff(tests), na.rm = T),
      d_deaths_nonneg    = ci < mean(diff(deaths)    >= 0, na.rm = T),
      d_confirmed_nonneg = ci < mean(diff(confirmed) >= 0, na.rm = T),
      d_tests_nonneg     = ci < mean(diff(tests)     >= 0, na.rm = T),
      d_vaccines_nonneg  = ci < mean(diff(vaccines)     >= 0, na.rm = T),
      d_people_vaccinated_nonneg       = ci < mean(diff(people_vaccinated)     >= 0, na.rm = T),
      d_people_fully_vaccinated_nonneg = ci < mean(diff(people_fully_vaccinated)     >= 0, na.rm = T),
      d_recovered_nonneg = ci < mean(diff(recovered) >= 0, na.rm = T),
      d_hosp_anyneg      = all(diff(hosp)==0, na.rm = T) | any(diff(hosp) < 0, na.rm = T),
      d_vent_anyneg      = all(diff(vent)==0, na.rm = T) | any(diff(vent) < 0, na.rm = T),
      d_icu_anyneg       = all(diff(icu)==0, na.rm = T)  | any(diff(icu)  < 0, na.rm = T) )
  
  # daily confirmed <= daily tests
  # status <- status & check(y$d_confirmed_tests, "confirmed > tests")
  
  # deaths not descending
  status <- status & check(y$d_deaths_nonneg,
                           "are you sure 'deaths' are cumulative counts?")
  # confirmed not descending
  status <- status & check(y$d_confirmed_nonneg,
                           "are you sure 'confirmed' are cumulative counts?")
  # tests not descending
  status <- status & check(y$d_tests_nonneg,
                           "are you sure 'tests' are cumulative counts?")
  
  # vaccines not descending
  status <- status & check(y$d_vaccines_nonneg,
                           "are you sure 'vaccines' are cumulative counts?")
  
  # people_vaccinated not descending
  status <- status & check(y$d_people_vaccinated_nonneg,
                           "are you sure 'people_vaccinated' are cumulative counts?")
  
  # people_fully_vaccinated not descending
  status <- status & check(y$d_people_fully_vaccinated_nonneg,
                           "are you sure 'people_fully_vaccinated' are cumulative counts?")
  
  # recovered not descending
  status <- status & check(y$d_recovered_nonneg,
                           "are you sure 'recovered' are cumulative counts?")
  
  # hosp not cumulative (any descending)
  status <- status & check(y$d_hosp_anyneg,
                           "are you sure 'hosp' are NOT cumulative counts?")
  # vent not cumulative (any descending)
  status <- status & check(y$d_vent_anyneg,
                           "are you sure 'vent' are NOT cumulative counts?")
  # icu not cumulative (any descending)
  status <- status & check(y$d_icu_anyneg,
                           "are you sure 'icu' are NOT cumulative counts?")
  
  # return
  return(status)
}

#' ISO week to date
#' 
#' Converts ISO week (e.g., 202009) to date (e.g. 2020-02-24)
#' 
#' @param isoweeks vector of isoweeks in the format YYYYMM.
#' @param day integer between 1 (Mondays) and 7 (Sundays). 
#' 
#' @return Date. 
#' 
#' @keywords internal
#' 
#' @export
isoweek2date <- function(isoweeks, day){
  
  year <- as.integer(isoweeks / 100)
  week <- isoweeks - year * 100
  
  return(MMWRweek::MMWRweek2Date(year, week) + day)
  
}


================================================
FILE: R/ds_admin.ch.R
================================================
#' Federal Office of Public Health
#'
#' Data source for: Switzerland and Liechtenstein
#'
#' @param level 1, or 2 (only for Switzerland)
#' @param state one of CH (Switzerland) or FL (Liechtenstein)
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#'
#' @source https://www.covid19.admin.ch/en/overview
#'
#' @keywords internal
#'
admin.ch <- function(level, state = NULL) {
    if(state=="FL" & level!=1) return(NULL)
    if(state=="CH" & !level %in% 1:2) return(NULL)
    
    # metadata
    meta <- jsonlite::fromJSON("https://www.covid19.admin.ch/api/data/context")
    csv <- meta$sources$individual$csv
    
    # total vaccine doses 
    x <- read.csv(csv$vaccDosesAdministered, na.strings = "NA")
    vaccines <- map_data(x, c(
        "date"      = "date",
        "geoRegion" = "code",
        "sumTotal"  = "vaccines"
    ))
    
    # people vaccinated
    x <- read.csv(csv$vaccPersonsV2, na.strings = "NA")
    vaccinated <- map_data(x, c(
        "date"      = "date",
        "geoRegion" = "code",
        "sumTotal"  = "total",
        "type"      = "type",
        "age_group" = "age"
    )) 
    # filter by total population and pivot
    vaccinated <- vaccinated %>%
        filter(age=="total_population") %>%
        pivot_wider(id_cols = c("code", "date"), names_from = "type", values_from = "total") %>%
        rename(people_vaccinated = COVID19AtLeastOneDosePersons)

    # people fully vaccinated
    urls <- c("CH" = "https://storage.covid19datahub.io/country/CHE.csv",
              "FL" = "https://storage.covid19datahub.io/country/LIE.csv")
    labels <- c("CH" = "CH", "FL" = "LIE")
    
    past_data <- read.csv(urls[state])
    fully_vaccinated <- past_data %>%
      mutate(
        code = ifelse(is.na(key_local) & administrative_area_level == 1, state, key_local)
      ) %>%
      select(date, code, people_fully_vaccinated)
              
    
    # confirmed
    x <- read.csv(csv$daily$cases, na.strings = "NA")
    confirmed <- map_data(x, c(
        "datum"     = "date",
        "geoRegion" = "code",
        "sumTotal"  = "confirmed"
    ))
    
    # confirmed weekly
    x <- read.csv(csv$weekly$default$cases, na.strings = "NA")
    confirmed_w <- map_data(x, c(
      "datum"     = "date",
      "geoRegion" = "code",
      "sumTotal"  = "confirmed"
    ))
    confirmed_w$date <- as.character(isoweek2date(confirmed_w$date, 7))

    # deaths
    x <- read.csv(csv$daily$death, na.strings = "NA")
    deaths <- map_data(x, c(
        "datum"     = "date",
        "geoRegion" = "code",
        "sumTotal"  = "deaths"
    ))
    
    # deaths weekly
    x <- read.csv(csv$weekly$default$death, na.strings = "NA")
    deaths_w <- map_data(x, c(
      "datum"     = "date",
      "geoRegion" = "code",
      "sumTotal"  = "deaths"
    ))
    deaths_w$date <- as.character(isoweek2date(deaths_w$date, 7))
    
    # tests weekly
    x <- read.csv(csv$weekly$default$test, na.strings = "NA")
    tests_w <- map_data(x, c(
      "datum"     = "date",
      "geoRegion" = "code",
      "sumTotal"  = "tests"
    ))
    tests_w$date <- as.character(isoweek2date(tests_w$date, 7))
    
    # hosp
    x <- read.csv(csv$daily$hospCapacity, na.strings = "NA")
    x <- x[x$type_variant=="nfp",]
    hosp <- map_data(x, c(
        "date"      = "date",
        "geoRegion" = "code",
        "Total_Covid19Patients"  = "hosp",
        "ICU_Covid19Patients"    = "icu"
    ))
    
    if (state == "FL") {
      hosp$hosp <- NULL
    }
    
    # merge 
    by <- c("code", "date")
    x <- vaccines %>%
        full_join(vaccinated, by = by) %>%
        full_join(fully_vaccinated, by = by) %>%
        full_join(hosp, by = by) %>%
        full_join(tests_w, by = by) %>%
        full_join(bind_rows(
          confirmed, 
          confirmed_w[confirmed_w$date > max(confirmed$date),]
        ), by = by) %>%
        full_join(bind_rows(
          deaths, 
          deaths_w[deaths_w$date > max(deaths$date),]
        ), by = by)

    # clean code
    x <- x[!is.na(x$code),]
    
    # filter by state
    if(level==1){
        x <- x[x$code==state,]
    }
    # select only Swiss cantons
    else{
        x <- x[x$code %in% c(
          "AG", "AI", "AR", "BE", "BL", "BS", "FR", "GE",
          "GL", "GR", "JU", "LU", "NE", "NW", "OW", "SG",
          "SH", "SO", "SZ", "TG", "TI", "UR", "VD", "VS",
          "ZG", "ZH"
        ),]
    }
    
    # convert date
    x$date <- as.Date(x$date)
    
    return(x)
}


================================================
FILE: R/ds_arcgis.de.R
================================================
#' Robert Koch Institute
#'
#' Data source for: Germany
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#'
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - recovered
#'
#' @source https://www.arcgis.com/home/item.html?id=f10774f1c63e40168479a1feb6c7ca74
#'
#' @keywords internal
#'
arcgis.de <- function(level){
  if(!level %in% 1:3) return(NULL)
  
  # download
  url <- "https://www.arcgis.com/sharing/rest/content/items/f10774f1c63e40168479a1feb6c7ca74/data"
  x <- data.table::fread(url, showProgress = FALSE)
  
  # format
  x <- map_data(x, c(
    'Meldedatum'      = 'date',
    'Bundesland'      = 'state',
    'Landkreis'       = 'district',
    'IdBundesland'    = 'id_state',
    'IdLandkreis'     = 'id_district',
    'AnzahlFall'      = 'confirmed',
    'AnzahlTodesfall' = 'deaths',
    'AnzahlGenesen'   = 'recovered',
    'NeuerFall'       = 'confirmed_status',
    'NeuerTodesfall'  = 'deaths_status',
    'NeuGenesen'      = 'recovered_status'
  ))
  
  # convert date type
  x <- x[!is.na(x$date),]
  x$date <- as.Date(x$date, format = "%Y/%m/%d")
  
  # group key
  if(level == 1) 
    by <- NULL
  if(level == 2) 
    by <- c('id_state')
  if(level == 3) 
    by <- c('id_state','id_district')
  
  # The German data is slightly odd. It is a CSV file which is updated daily,
  # the 'status' flags is the status of the entry on the day the file was
  # updated. The flags mean:
  #                                     
  #    1 - cdr was only added today (as in the day the file was created)
  #    0 - cdr was added to yesterdays file, or before, but NOT today
  #   -1 - cd will be removed tomorrow but is still present today, exclude it
  #   -9 - not applicable (e.g. recovered_status=-9 on a death)
  #
  # So, basically just exclude anything with a negative value, but only for that
  # specific column type.
  # 
  # (Abbreviated case/death/recovery as cdr)
  x <- x %>%
    # drop negative counts
    mutate(
      confirmed = replace(confirmed, confirmed < 0, 0),
      deaths    = replace(deaths, deaths < 0, 0),
      recovered = replace(recovered, recovered < 0, 0)) %>%
    # group by date and admin area
    group_by_at(c('date', by)) %>%
    # compute total counts
    summarise(
      confirmed = sum(confirmed),
      deaths    = sum(deaths),
      recovered = sum(recovered)) %>%
    # group by admin area
    group_by_at(by) %>%
    # sort by date
    arrange(date) %>%
    # cumulate
    mutate(
      confirmed = cumsum(confirmed),
      deaths    = cumsum(deaths),
      recovered = cumsum(recovered))
  
  return(x) 
}


================================================
FILE: R/ds_arcgis.se.R
================================================
#' Public Health Agency of Sweden
#'
#' Data source for: Sweden
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#'
#' @section Level 2:
#' - confirmed cases
#'
#' @source https://www.arcgis.com/home/item.html?id=b5e7488e117749c19881cce45db13f7e
#'
#' @keywords internal
#'
arcgis.se <- function(level){
  if(!level %in% 1:2) return(NULL)
  
  # excel file
  url <- "https://www.arcgis.com/sharing/rest/content/items/b5e7488e117749c19881cce45db13f7e/data"
  
  # level
  if(level==1){
    
    # cases
    x.cases <- read.excel(url, sheet = 1)
    
    # format cases
    x.cases <- map_data(x.cases, c(
      'Statistikdatum'    = 'date',
      'Totalt_antal_fall' = 'confirmed'
    ))
    
    # convert date
    x.cases$date <- as.Date(x.cases$date)
    
    # deaths
    x.deaths <- suppressWarnings(read.excel(url, sheet = 2, col_types = c("date", "numeric")))
    
    # format deaths
    x.deaths <- map_data(x.deaths, c(
      "Datum_avliden" = "date",
      "Antal_avlidna" = "deaths"
    ))
    
    # clean date
    x.deaths$date <- as.Date(x.deaths$date)
    x.deaths <- x.deaths[!is.na(x.deaths$date),]
    
    # merge
    x <- full_join(x.cases, x.deaths, by = "date")  %>% 
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(
        confirmed = cumsum(confirmed),
        deaths = cumsum(deaths))
      
  }
  if(level==2){
    
    # confirmed
    x <- read.excel(url, sheet = 1)
    
    # drop national data
    x <- x[,-2]
    
    # format date
    colnames(x)[1] <- "date"
    x$date <- as.Date(x$date)
    
    # sort by date and cumulate
    x <- x[order(x$date),]
    x[,-1] <- cumsum(x[,-1])
    
    # pivot
    x <- tidyr::pivot_longer(x, -1, names_to = "state", values_to = "confirmed")
    
  }
    
  return(x)
}


================================================
FILE: R/ds_canada.ca.R
================================================
#' Public Health Agency of Canada
#'
#' Data source for: Canada
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://www.canada.ca/en/public-health/services/diseases/coronavirus-disease-covid-19.html
#'
#' @keywords internal
#'
canada.ca <- function(level){
  if(!level %in% 1:2) return(NULL)
  
  # download cases
  # see https://health-infobase.canada.ca/covid-19/
  url <- "https://health-infobase.canada.ca/src/data/covidLive/covid19-download.csv"
  x1 <- read.csv(url)
  
  # format
  x1 <- map_data(x1, c(
    "date",
    "pruid"      = "id",
    "prname"     = "name",
    "numdeaths"  = "deaths",
    "totalcases" = "confirmed"
  ))
  
  # download total vaccine doses
  # see https://health-infobase.canada.ca/covid-19/vaccine-administration/
  url <- "https://health-infobase.canada.ca/src/data/covidLive/vaccination-administration.csv"
  x2 <- read.csv(url)
  
  # format
  x2 <- map_data(x2, c(
    "pruid" = "id",
    "report_date" = "date",
    "numtotal_all_administered" = "vaccines"
  ))
  
  # sanitize
  x2$vaccines <- suppressWarnings(as.integer(x2$vaccines))
  
  # download people vaccinated
  # see https://health-infobase.canada.ca/covid-19/vaccination-coverage/
  url <- "https://health-infobase.canada.ca/src/data/covidLive/vaccination-coverage-map.csv"
  x3 <- read.csv(url, fileEncoding = "UTF-8-BOM")
  
  # format
  x3 <- map_data(x3, c(
    "pruid" = "id",
    "week_end" = "date",
    "numtotal_atleast1dose" = "people_vaccinated",
    "numtotal_fully" = "people_fully_vaccinated"
  ))

  # download tests
  url <- "https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-labIndicators.csv"
  x4 <- read.csv(url, fileEncoding = "UTF-8-BOM")
  
  # format
  x4 <- map_data(x4, c(
    "pruid" = "id",
    "date" = "date",
    "numtests_total" = "tests"
  ))
  
  # merge
  by <- c("id", "date")
  x <- x1 %>%
    full_join(x2, by = by) %>%
    full_join(x3, by = by) %>%
    full_join(x4, by = by)

  # remove non-geographic entity
  x <- x[which(x$id!=99),] 
  
  # fill with daily series before June 2022
  x <- bind_rows(extdata("ds/CAN.csv"), x) %>%
    # for each id and date
    group_by(id, date) %>%
    # take last non-NA element
    summarise_all(function(x) ifelse(is.na(x[2]), x[1], x[2]))
  
  # filter by level (id=1 -> Canada)
  if(level==1)
    x <- x[x$id==1,]   
  if(level==2)
    x <- x[x$id!=1,]  

  # convert date
  x$date <- as.Date(x$date)
  
  return(x)
}


================================================
FILE: R/ds_cdc.gov.R
================================================
#' Centers for Disease Control and Prevention
#'
#' Data source for: United States
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 2:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 3:
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://data.cdc.gov/browse?q=COVID-19%20Vaccination&sortBy=relevance
#'
#' @keywords internal
#'
cdc.gov <- function(level){
  if(!level %in% 1:3) return(NULL)
  
  if(level==1 | level==2){
   
    # vaccines
    # see https://data.cdc.gov/d/rh2h-3yt2
    url <- "https://data.cdc.gov/api/views/rh2h-3yt2/rows.csv?accessType=DOWNLOAD"
    x <- read.csv(url)
    
    # format
    x <- map_data(x, c(
      "date_type" = "type",
      "Date" = "date",
      "Location" = "state",
      "Administered_Cumulative" = "vaccines",
      "Admin_Dose_1_Cumulative" = "people_vaccinated",
      "Series_Complete_Cumulative" = "people_fully_vaccinated"
    ))
    
    # select data by date of vaccine administration
    x <- filter(x, type=="Admin")
    
    # filter by level
    if(level==1)
      x <- filter(x, state=="US")
    if(level==2)
      x <- filter(x, !is.na(state) & !state %in% c("US", "PW", "FM", "RP", "MH"))
    
    # convert date
    x$date <- as.Date(x$date, format = "%m/%d/%Y")
    
  }
  
  if(level==3){
    
    # vaccines
    # see https://data.cdc.gov/d/8xkx-amqh
    url <- "https://data.cdc.gov/api/views/8xkx-amqh/rows.csv?accessType=DOWNLOAD"
    x <- data.table::fread(url, showProgress = FALSE)
    
    # format
    x <- map_data(x, c(
      "Date" = "date",
      "FIPS" = "fips",
      "Series_Complete_Yes" = "people_fully_vaccinated",
      "Administered_Dose1_Recip" = "people_vaccinated"
    ))
    
    # clean
    x <- x %>%
      # drop unassigned and Guam
      filter(fips!="UNK" & fips!="66010") %>%
      # fips to integers
      mutate(fips = as.integer(fips)) %>%
      # drop duplicated rows
      distinct() %>%
      # standardize fips
      mutate(        
        # map "Yakutat" and "Hoonah-Angoon" to "Yakutat plus Hoonah-Angoon"
        fips = replace(fips, fips %in% c(2105, 2282), 2998),
        # map "Bristol Bay" and "Lake and Peninsula" to "Bristol Bay plus Lake and Peninsula"
        fips = replace(fips, fips %in% c(2164, 2060), 2997),
        # map New York boroughs to New York City
        fips = replace(fips, fips %in% c(36081, 36005, 36085, 36047), 36061)) %>%
      # for each date and fips
      group_by(date, fips) %>%
      # compute total counts
      summarise(
        people_vaccinated = sum(people_vaccinated),
        people_fully_vaccinated = sum(people_fully_vaccinated))
    
    # convert date
    x$date <- as.Date(x$date, format = "%m/%d/%Y")
    
  }
  
  return(x)
}


================================================
FILE: R/ds_covid19india.org.R
================================================
#' COVID-19 India API
#'
#' Data source for: India
#'
#' @param level 2
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://data.covid19india.org
#'
#' @keywords internal
#'
covid19india.org <- function(level){
  if(level!=2) return(NULL)

  # state codes
  url.codes <- "https://data.covid19india.org/csv/latest/state_wise.csv"
  x.codes <- read.csv(url.codes)
  
  # format codes
  x.codes <- map_data(x.codes, c(
    "State" = "state",
    "State_code" = "code"
  ))
  
  # cases
  url.cases <- "https://data.covid19india.org/csv/latest/states.csv"
  x.cases <- read.csv(url.cases)
  
  # format cases
  x.cases <- map_data(x.cases, c(
    "Date" = "date",
    "State" = "state",
    "Confirmed" = "confirmed",
    "Recovered" = "recovered",
    "Deceased" = "deaths",
    "Tested" = "tests"
  ))
  
  # convert date
  x.cases$date <- as.Date(x.cases$date)
  
  # vaccines
  url.vacc <- "http://data.covid19india.org/csv/latest/vaccine_doses_statewise_v2.csv"
  x.vacc <- read.csv(url.vacc)
  
  # format vaccines
  x.vacc <- map_data(x.vacc, c(
    "Vaccinated.As.of" = "date",
    "State" = "state",
    "First.Dose.Administered" = "people_vaccinated",
    "Second.Dose.Administered" = "people_fully_vaccinated",
    "Total.Doses.Administered" = "vaccines"
  ))
  
  # convert date
  x.vacc$date <- as.Date(x.vacc$date, format = "%d/%m/%Y")

  # merge
  by <- c("date", "state")
  x <- x.cases %>%
    full_join(x.vacc, by = by) %>%
    left_join(x.codes, by = "state")
  
  # drop total and unassigned
  x <- x %>%
    filter(!is.na(code) & !code %in% c("TT", "UN"))
  
  return(x)
}


================================================
FILE: R/ds_covidtracking.com.R
================================================
#' The COVID Tracking Project
#'
#' Data source for: United States
#'
#' @param level 2
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @source https://covidtracking.com/data/api
#'
#' @keywords internal
#'
covidtracking.com <- function(level){
  if(level!=2) return(NULL)
  
  # download
  url <- "https://covidtracking.com/data/download/all-states-history.csv"
  x   <- read.csv(url)
  
  # format 
  x <- map_data(x, c(
    'date',
    'state',
    'fips',
    'positive'              = 'confirmed',
    'death'                 = 'deaths',
    'totalTestResults'      = 'tests',
    'recovered'             = 'recovered',
    'hospitalizedCurrently' = 'hosp',
    'inIcuCurrently'        = 'icu',
    'onVentilatorCurrently' = 'vent'
  ))

  # date
  x$date <- as.Date(as.character(x$date), format = "%Y-%m-%d")
  
  return(x) 
}


================================================
FILE: R/ds_geohive.ie.R
================================================
#' Health Protection Surveillance Centre (HPSC) and Health Service Executive (HSE)
#'
#' Data source for: Ireland
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#'
#' @source https://covid-19.geohive.ie/search
#'
#' @keywords internal
#'
geohive.ie <- function(level) {
  if(!level %in% 1:2) return(NULL)

  if(level==1) {
    
    # cases
    url.cases <- "https://opendata.arcgis.com/datasets/d8eb52d56273413b84b0187a4e9117be_0.csv"
    x.cases <- read.csv(url.cases)
    
    # format cases
    x.cases <- map_data(x.cases, c(
      "Date"                     = "date",
      "TotalConfirmedCovidCases" = "confirmed",
      "TotalCovidDeaths"         = "deaths"
    ))
    
    # convert date
    x.cases$date <- as.Date(x.cases$date, "%Y/%m/%d")
    
    # intensive care
    url.icu <- "https://opendata.arcgis.com/datasets/c8208a0a8ff04a45b2922ae69e9b2206_0.csv"
    x.icu <- read.csv(url.icu)
    
    # format icu
    x.icu <- map_data(x.icu, c(
      "extract"    = "date",
      "ncovidconf" = "icu"
    ))
    
    # convert date
    x.icu$date <- as.Date(x.icu$date, "%Y/%m/%d")
    
    # hospitalizations
    url.hosp  <- "https://opendata.arcgis.com/datasets/fe9bb23592ec4142a4f4c2c9bd32f749_0.csv"
    x.hosp  <- read.csv(url.hosp)
    
    # format hospitalizations
    x.hosp <- map_data(x.hosp, c(
      "Date"                            = "date",
      "SUM_number_of_confirmed_covid_1" = "hosp"
    ))
    
    # convert date
    x.hosp$date <- as.Date(x.hosp$date, "%Y/%m/%d")
    
    # tests
    url.tests <- "https://opendata.arcgis.com/datasets/f6d6332820ca466999dbd852f6ad4d5a_0.csv"
    x.tests <- read.csv(url.tests, fileEncoding = "UTF-8-BOM")
    
    # format tests
    x.tests <- map_data(x.tests, c(
      "Date_HPSC" = "date",
      "TotalLabs" = "tests"
    ))
    
    # convert date
    x.tests$date <- as.Date(x.tests$date, "%Y/%m/%d")
    
    # vaccines
    url.vacc <- "https://opendata.arcgis.com/datasets/a0e3a1c53ad8422faf00604ee08955db_0.csv"
    x.vacc <- read.csv(url.vacc, fileEncoding = "UTF-8-BOM")
    
    # format vaccines
    x.vacc <- map_data(x.vacc, c(
      "VaccinationDate" = "date",
      "Dose1Cum" = "first",
      "Dose2Cum" = "second",
      "SingleDoseCum" = "oneshot",
      "PartiallyVacc" = "people_vaccinated",
      "FullyVacc" = "people_fully_vaccinated"
    )) 
    
    # compute total vaccine doses
    x.vacc <- x.vacc %>%
      mutate(vaccines = first + second + oneshot)
    
    # convert date
    x.vacc$date <- as.Date(x.vacc$date, format = "%Y/%m/%d")
    
    # merge
    by <- "date"
    x <- x.cases %>% 
      full_join(x.icu, by = by) %>%
      full_join(x.hosp, by = by) %>%
      full_join(x.vacc, by = by) %>%
      full_join(x.tests, by = by)
    
    # fix duplicates
    x <- x[!duplicated(x[,"date"], fromLast = TRUE),]
    
  }
  
  if(level==2) {
    
    # cases
    url <- "https://opendata.arcgis.com/datasets/d9be85b30d7748b5b7c09450b8aede63_0.csv"
    x <- read.csv(url)
    
    # format cases
    x <- map_data(x, c(
      "ORIGID"                  = "county_id",
      "CountyName"              = "county",
      "PopulationCensus16"      = "population",
      "Lat"                     = "latitude",
      "Long"                    = "longitude",
      "TimeStamp"               = "date",
      "ConfirmedCovidCases"     = "confirmed",
      "ConfirmedCovidDeaths"    = "deaths", 
      "ConfirmedCovidRecovered" = "recovered"
    ))
    
    # convert date
    x$date <- as.Date(x$date, "%Y/%m/%d")
    
    # fix duplicates
    x <- x[!duplicated(x[,c("date", "county_id")], fromLast = TRUE),]
    
  }
  
  return(x)
}


================================================
FILE: R/ds_github.ceedsdemm.covidprodataset.R
================================================
#' Centre of Excellence in Economics and Data Science, University of Milan
#'
#' Data source for: Italy
#'
#' @param level 3
#'
#' @section Level 3:
#' - deaths
#'
#' @source https://github.com/CEEDS-DEMM/COVID-Pro-Dataset
#'
#' @keywords internal
#'
github.ceedsdemm.covidprodataset <- function(level){
  if(level!=3) return(NULL)
                      
  # source
  url  <- "https://raw.githubusercontent.com/CEEDS-DEMM/COVID-Pro-Dataset/master/deathsItaProv.csv"

  # download
  x   <- read.csv(url)
  
  # formatting
  x <- map_data(x, c(
    'Date'       = 'date',
    'id_prov'    = 'city_code',
    'Tot_deaths' = 'deaths'
  ))
  
  # date
  x$date <- as.Date(x$date, format = "%Y-%m-%d")

  # return
  return(x)

}


================================================
FILE: R/ds_github.covid19euzh.covid19eudata.R
================================================
#' COVID-19 European Chinese Channel
#'
#' Data source for: Poland
#'
#' @param level 2
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - tests
#'
#' @source https://github.com/covid19-eu-zh/covid19-eu-data
#'
#' @keywords internal
#'
github.covid19euzh.covid19eudata <- function(level){
  if(level!=2) return(NULL)
  
  # download
  url <- "https://raw.githubusercontent.com/covid19-eu-zh/covid19-eu-data/master/dataset/covid-19-pl.csv"
  x <- read.csv(url)

  # clean
  x$nuts_2 <- gsub("warmi.*sko-mazurskie", "warminsko-mazurskie", x$nuts_2)
  x$nuts_2 <- gsub("ma.*opolskie", "malopolskie", x$nuts_2)
  x$nuts_2 <- gsub("dolno.*skie", "dolnoslaskie", x$nuts_2)
  x$nuts_2 <- gsub(".*dzkie$", "lodzkie", x$nuts_2)
  x$nuts_2 <- gsub(".*tokrzyskie$", "swietokrzyskie", x$nuts_2)
  x$nuts_2 <- gsub(".*[^a-z]skie$", "slaskie", x$nuts_2)
  
  # format
  x <- map_data(x, c(
    "datetime" = "date",
    "nuts_2" = "nuts",
    "cases" = "confirmed",
    "deaths" = "deaths",
    "tests" = "tests"
  ))
  
  # remove broken nuts
  x <- x[which(!is.na(x$nuts) & !startsWith(x$nuts, "https://")),]
  
  # date
  x$date <- as.Date(x$date, format = "%Y-%m-%d")

  # return
  return(x)

}


================================================
FILE: R/ds_github.cssegisanddata.covid19.R
================================================
#' Johns Hopkins Center for Systems Science and Engineering
#'
#' Data source for: Worldwide
#'
#' @param level 1, 2, or 3 (U.S only)
#' @param file one of "global" for worldwide data or "US" for U.S. data
#' @param country filter by name of country
#' @param state filter by name of state
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' 
#' @section Level 3:
#' - confirmed cases (U.S. only)
#' - deaths (U.S. only)
#'
#' @source https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
#'
#' @keywords internal
#' 
github.cssegisanddata.covid19 <- function(level = 1, file = "global", country = NULL, state = NULL){
  if(file=="US" & !level %in% 1:3) return(NULL)
  if(file=="global" & !level %in% 1:2) return(NULL)
  
  # source
  repo <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/"

  if(file=="global")
    urls = c(
      "confirmed" = "time_series_covid19_confirmed_global.csv",
      "deaths"    = "time_series_covid19_deaths_global.csv",
      "recovered" = "time_series_covid19_recovered_global.csv"
    )

  if(file=="US")
    urls = c(
      "confirmed" = "time_series_covid19_confirmed_US.csv",
      "deaths"    = "time_series_covid19_deaths_US.csv"
    )

  for(i in 1:length(urls)){

    # download
    url <- sprintf("%s/csse_covid_19_time_series/%s", repo, urls[i])
    xx  <- read.csv(url, cache = level!=3)

    if(class(xx)=="try-error")
      next

    # NA
    xx <- xx %>%
      mutate(across(where(is.numeric), ~ na_if(., 0))) %>%
      mutate(across(where(is.numeric), ~ na_if(., -1)))
    
    # formatting
    colnames(xx) <- gsub(pattern = "\\_$", replacement = "", x = colnames(xx))
    colnames(xx) <- gsub(pattern = "\\_", replacement = ".", x = colnames(xx))
    colnames(xx) <- gsub(pattern = "^.\\_\\_", replacement = "", x = colnames(xx))
    colnames(xx) <- gsub(pattern = "^_", replacement = "", x = colnames(xx))

    if(file=="US") {
      
      colnames(xx) <- map_values(colnames(xx), c(
        'UID'            = 'id',
        'FIPS'           = 'fips',
        'iso3'           = 'country',
        'Province.State' = 'state',
        'Admin2'         = 'city',
        'Lat'            = 'lat',
        'Long'           = 'lng',
        'Population'     = 'pop'))
      
      xx <- xx[which( (!is.na(xx$city) & !is.na(xx$fips) & !is.na(xx$id)) | xx$country!="USA" ),]
      if(level==3){
        xx <- xx[-which(xx$city=="Unassigned"),]
        xx <- xx[!grepl("^Out of ", xx$city),]
      }
        
    }
    if(file=="global"){
      
      colnames(xx) <- map_values(colnames(xx), c(
        'Country.Region' = 'country',
        'Province.State' = 'state',
        'Lat'            = 'lat',
        'Long'           = 'lng'))

      idx <- which(xx$state=="Grand Princess")
      xx$country[idx] <- "Grand Princess"
      xx$state[idx]   <- NA
      
      idx <- which(xx$state %in% c("Recovered","Diamond Princess"))
      if(length(idx))
        xx  <- xx[-idx,]
      
      idx <- which(xx$country %in% c("Summer Olympics 2020"))
      if(length(idx))
        xx  <- xx[-idx,]
      
      if(level==1){
        xx <- xx[is.na(xx$state),]
        xx$id <- xx$country
      }
      if(level==2){
        xx <- xx[!is.na(xx$state),]
        xx$id <- paste(xx$country, xx$state, sep = ", ")
      }
        
    }
    
    # filter
    if(!is.null(country))
      xx <- xx[which(xx$country==country),]
    if(!is.null(state))
      xx <- xx[which(xx$state==state),]

    # pivot
    xx <- xx %>%
      pivot_longer(cols = starts_with("X", ignore.case = FALSE), values_to = names(urls[i]), names_to = "date") %>%
      select(c("id", "date", names(urls[i])))
    
    # date
    xx$date <- as.Date(xx$date, format = "X%m.%d.%y")

    # merge
    if(i==1)
      x <- xx
    else
      x <- full_join(x, xx, by = c('id', 'date'))
    
  }
  
  # remove constant cumulative counts
  cols <- intersect(colnames(x), c("confirmed", "deaths", "recovered"))
  clean <- function(x) replace(x, c(NA, diff(x))==0, NA)
  x <- x %>% 
    group_by(id) %>%
    arrange(date) %>%
    mutate(across(cols, clean))
  
  return(x)
}


================================================
FILE: R/ds_github.cssegisanddata.covid19unified.R
================================================
#' Johns Hopkins Center for Systems Science and Engineering
#'
#' Data source for: Worldwide
#'
#' @param level 1, 2, 3
#' @param iso the 2-letter (level=1) or 3-letter (level>1) ISO code of the country
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#' 
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @source https://github.com/CSSEGISandData/COVID-19_Unified-Dataset
#'
#' @keywords internal
#'
github.cssegisanddata.covid19unified <- function(level, iso){
  if(!level %in% 1:3) return(NULL)
  
  # JHU ids
  ids <- iso
  if(level!=1){
    db <- extdata(sprintf("db/%s.csv", iso))
    ids <- db$id_github.cssegisanddata.covid19unified[db$administrative_area_level==level]
  }
  
  # download
  url <- "https://github.com/CSSEGISandData/COVID-19_Unified-Dataset/blob/master/COVID-19.rds?raw=true"
  file <- tempfile()
  download.file(url, file, mode = "wb", quiet = TRUE)
  x <- readRDS(file)
  unlink(file)
  
  # filter
  x <- x[which((x$ID %in% ids) & x$Age=="Total" & x$Sex=="Total" & x$Cases>0),]
  
  # select data source with best data coverage
  s <- names(which.max(table(x$Source)))
  x <- x[x$Source==s,]
  
  # pivot
  x <- tidyr::pivot_wider(x, id_cols = c("ID", "Date"), names_from = "Type", values_from = "Cases")
  
  # map values
  x <- map_data(x, c(
    "ID"               = "id",
    "Date"             = "date",
    "Tests"            = "tests",
    "Recovered"        = "recovered",
    "Confirmed"        = "confirmed",
    "Deaths"           = "deaths",
    "Hospitalized_Now" = "hosp",
    "ICU_Now"          = "icu",
    "Ventilator_Now"   = "vent"
  ))
  
  # date
  x$date <- as.Date(x$date)

  return(x)
}


================================================
FILE: R/ds_github.dsfsi.covid19za.R
================================================
#' Data Science for Social Impact research group, University of Pretoria
#'
#' Data source for: South Africa
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#'
#' @source https://github.com/dsfsi/covid19za
#'
#' @keywords internal
#'
github.dsfsi.covid19za <- function(level){
  if(!level %in% 1:2) return(NULL)
  
  # download
  baseurl  <- "https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_provincial_cumulative_timeline_"
  x.cases  <- read.csv(paste0(baseurl, "confirmed.csv"))
  x.deaths <- read.csv(paste0(baseurl, "deaths.csv"))
  x.tests  <- read.csv(paste0(baseurl, "testing.csv"))
  x.recov  <- read.csv(paste0(baseurl, "recoveries.csv"))
  
  if(level==1)
    cols <- "total"
  if(level==2)
    cols <- c("EC","FS","GP","KZN","LP","MP","NC","NW","WC")
  
  # cases
  x.cases <- x.cases %>%
    select(c("date", cols)) %>%
    pivot_longer(cols = all_of(cols), names_to = "state", values_to = "confirmed")
  
  # deaths
  x.deaths <- x.deaths %>%
    select(c("date", cols)) %>%
    pivot_longer(cols = all_of(cols), names_to = "state", values_to = "deaths")
  
  # cases
  x.tests <- x.tests %>%
    select(c("date", cols)) %>%
    pivot_longer(cols = all_of(cols), names_to = "state", values_to = "tests")
  
  # cases
  x.recov <- x.recov %>%
    select(c("date", cols)) %>%
    pivot_longer(cols = all_of(cols), names_to = "state", values_to = "recovered")
  
  # merge
  by <- c("date", "state")
  x <- x.cases %>%
    full_join(x.deaths, by = by) %>%
    full_join(x.recov, by = by) %>%
    full_join(x.tests, by = by)
  
  # convert date
  x$date <- as.Date(x$date, format = "%d-%m-%Y")
  
  return(x)
}


================================================
FILE: R/ds_github.dssgpt.covid19ptdata.R
================================================
#' Data Science for Social Good Portugal
#'
#' Data source for: Portugal
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - hospitalizations
#' - intensive care
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#'
#' @source https://github.com/dssg-pt/covid19pt-data
#'
#' @keywords internal
#'
github.dssgpt.covid19ptdata <- function(level) {
  if(!level %in% 1:2) return(NULL)
  
  # download
  url <- "https://raw.githubusercontent.com/dssg-pt/covid19pt-data/master/data.csv"
  x   <- read.csv(url)
  
  # map data
  x <- map_data(x, c(
    "data"                    = "date",
    # country
    "confirmados"             = "confirmed",
    "confirmados_novos"       = "confirmed_daily",
    "recuperados"             = "recovered",
    "obitos"                  = "deaths",
    "internados"              = "hosp",
    "internados_uci"          = "icu",
    # confirmed by region
    "confirmados_arsnorte"    = "confirmed_PT11",
    "confirmados_arsalgarve"  = "confirmed_PT15",
    "confirmados_arscentro"   = "confirmed_PT16",
    "confirmados_arslvt"      = "confirmed_PT17",
    "confirmados_arsalentejo" = "confirmed_PT18",
    "confirmados_acores"      = "confirmed_PT20",
    "confirmados_madeira"     = "confirmed_PT30",
    # deaths by region
    "obitos_arsnorte"         = "deaths_PT11",
    "obitos_arsalgarve"       = "deaths_PT15",
    "obitos_arscentro"        = "deaths_PT16",
    "obitos_arslvt"           = "deaths_PT17",
    "obitos_arsalentejo"      = "deaths_PT18",
    "obitos_acores"           = "deaths_PT20",
    "obitos_madeira"          = "deaths_PT30"
  ))
  
  if(level == 1) {
    
    # select national data
    x <- x[,c("date","confirmed","recovered","deaths","hosp","icu")]
    
  }
  
  if(level == 2) {
    
    # region columns
    regions           <- c("PT11","PT15","PT16","PT17","PT18","PT20","PT30")
    regions.confirmed <- sapply(regions, function(r) paste("confirmed", r, sep = "_"))
    regions.deaths    <- sapply(regions, function(r) paste("deaths", r, sep = "_"))
    
    # cases
    x.cases <- x[,c("date", regions.confirmed)] %>%
      pivot_longer(!date, names_to = "region", names_prefix = "confirmed_", values_to = "confirmed")
    
    # deaths
    x.deaths <- x[,c("date", regions.deaths)] %>%
      pivot_longer(!date, names_to = "region", names_prefix = "deaths_", values_to = "deaths")
    
    # merge
    x <- full_join(x.cases, x.deaths, by = c("date", "region"))
    
  }
  
  # date
  x$date <- as.Date(x$date, "%d-%m-%Y")
  
  return(x)
}


================================================
FILE: R/ds_github.eguidotti.covid19br.R
================================================
#' Emanuele Guidotti
#'
#' Data source for: Brazil
#'
#' @param level 3
#'
#' @section Level 3:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://github.com/covid19datahub/covid19br
#'
#' @keywords internal
#'
github.covid19datahub.covid19br <- function(level){
  if(level!=3) return(NULL)
  
  # download
  url <- "https://raw.githubusercontent.com/covid19datahub/covid19br/main/data.csv.gz"
  x <- data.table::fread(url, encoding = "UTF-8", showProgress = FALSE)
  
  # format
  x <- map_data(x, c(
    "Date" = "date",
    "IBGE" = "ibge",
    "TotalVaccinations" = "vaccines",
    "PeopleVaccinated" = "people_vaccinated",
    "PeopleFullyVaccinated" = "people_fully_vaccinated"
  ))
  
  # drop missing IBGE
  x <- x[which(x$ibge!="999999"),]
  
  # convert date
  x$date <- as.Date(x$date)
  
  return(x)
}


================================================
FILE: R/ds_github.italia.covid19opendatavaccini.R
================================================
#' Commissario straordinario per l'emergenza Covid-19, Presidenza del Consiglio dei Ministri
#'
#' Data source for: Italy
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 2:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://github.com/italia/covid19-opendata-vaccini
#'
#' @keywords internal
#'
github.italia.covid19opendatavaccini <- function(level){
  if(!level %in% 1:2) return(NULL)
  
  # download
  urls <- c("https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/refs/heads/master/dati/somministrazioni-vaccini-latest-2020.csv",
            "https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/refs/heads/master/dati/somministrazioni-vaccini-latest-2021.csv",
            "https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/refs/heads/master/dati/somministrazioni-vaccini-latest-2022.csv",
            "https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/refs/heads/master/dati/somministrazioni-vaccini-latest-2023.csv",
            "https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/refs/heads/master/dati/somministrazioni-vaccini-latest-campagna-2023-2024.csv",
            "https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/refs/heads/master/dati/somministrazioni-vaccini-latest-campagna-2024-2025.csv"
  )
  
  x <- dplyr::bind_rows(lapply(urls, read.csv))
  
  # format
  x <- map_data(x, c(
    "data" = "date",
    "forn" = "type",
    "N2" = "state",
    "d1" = "first",
    "d2" = "second",
    "dpi" = "oneshot",
    "db1" = "extra_1",
    "db2" = "extra_2",
    "db3" = "extra_3",
    "d" = "unsp_dose"
  ))
  
  # people vaccinated and total doses
  x <- x %>%
    dplyr::mutate(
          vaccines = coalesce(first, 0) + 
            coalesce(second, 0) + 
            coalesce(oneshot, 0) + 
            coalesce(extra_1, 0) + 
            coalesce(extra_2, 0) + 
            coalesce(extra_3, 0) + 
            coalesce(unsp_dose, 0),
          people_vaccinated = first + oneshot,
          people_fully_vaccinated = second + oneshot + first*(type=="Janssen"))
  
  if(level==1){
    
    # vaccines
    x <- x %>%
      # for each date
      dplyr::group_by(date) %>%
      # compute total counts
      dplyr::summarise(
        vaccines = sum(vaccines),
        people_vaccinated = sum(people_vaccinated),
        people_fully_vaccinated = sum(people_fully_vaccinated)) %>%
      # sort by date
      dplyr::arrange(date) %>%
      # cumulate
      dplyr::mutate(
        vaccines = cumsum(vaccines),
        people_vaccinated = cumsum(people_vaccinated),
        people_fully_vaccinated = cumsum(people_fully_vaccinated))  
  
  }
  
  if(level==2){
    
    # vaccines
    x <- x %>%
      # for each date and region
      dplyr::group_by(date, state) %>%
      # compute total counts
      dplyr::summarise(
        vaccines = sum(vaccines),
        people_vaccinated = sum(people_vaccinated),
        people_fully_vaccinated = sum(people_fully_vaccinated)) %>%
      # group by date
      dplyr::group_by(state) %>%
      # sort by date
      dplyr::arrange(date) %>%
      # cumulate
      dplyr::mutate(
        vaccines = cumsum(vaccines),
        people_vaccinated = cumsum(people_vaccinated),
        people_fully_vaccinated = cumsum(people_fully_vaccinated))  
    
  }

  # format date
  x$date <- as.Date(x$date)

  return(x)
}


================================================
FILE: R/ds_github.jmcastagnetto.covid19perudata.R
================================================
#' Jesus M. Castagnetto
#'
#' Data source for: Peru
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#'
#' @source https://github.com/jmcastagnetto/covid-19-peru-data
#'
#' @keywords internal
#'
github.jmcastagnetto.covid19perudata <- function(level) {
    if(!level %in% 1:2) return(NULL)
    
    # download cases
    repo <- "https://raw.githubusercontent.com/jmcastagnetto/covid-19-peru-data"
    url  <- "/main/datos/covid-19-peru-data-augmented.csv"
    x.cases <- read.csv(paste0(repo,url), na.strings = c("NA",""))
    
    # format cases
    x.cases <- map_data(x.cases, c(
        "date"            = "date",
        "region"          = "region",
        "iso_3166_2_code" = "id",
        "ubigeo"          = "ubigeo",
        "confirmed"       = "confirmed",
        "deaths"          = "deaths",
        "recovered"       = "recovered",
        "total_tests"     = "tests"
    ))
    
    # convert date
    x.cases$date <- as.Date(x.cases$date)
    
    if(level==1){
        
        # download hosp        
        url <- "/main/datos/covid-19-peru-detalle-hospitalizados.csv"
        x.hosp <- read.csv(paste0(repo,url), na.strings = c("NA",""))
        
        # format hosp
        x.hosp <- map_data(x.hosp, c(
            "fecha"                = "date",
            "hospitalizados"       = "hosp",
            "ventilacion_mecanica" = "vent"
        ))
        
        # convert date
        x.hosp$date <- as.Date(x.hosp$date)
        
        # download icu
        url <- "/main/datos/covid-19-peru-camas-uci.csv"
        x.icu <- read.csv(paste0(repo,url), na.strings = c("NA",""))
        
        # format icu
        x.icu <- map_data(x.icu, c(
            "fecha"  = "date",
            "estado" = "status",
            "total"  = "icu"
        ))
        
        # select icu beds in use
        x.icu <- filter(x.icu, status=="en uso")
        
        # convert date
        x.icu$date <- as.Date(x.icu$date)
        
        # select national level cases
        x.cases <- x.cases[is.na(x.cases$id),]
        
        # merge
        x <- x.cases %>%
            full_join(x.hosp, by = "date") %>%
            full_join(x.icu, by = "date")
        
    }
    
    if(level==2){
        
        # cases
        x <- x.cases %>%
            # drop national level cases
            filter(!is.na(id)) %>%
            # group by date and region
            dplyr::group_by(date, id) %>%
            # compute total counts
            dplyr::summarise(
                confirmed = sum(confirmed),
                deaths    = sum(deaths),
                recovered = sum(recovered),
                tests     = sum(tests))
        
    }
    
    return(x)
}


================================================
FILE: R/ds_github.lisphilar.covid19sir.R
================================================
#' Hirokazu Takaya
#'
#' Data source for: Japan
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#'
#' @source https://github.com/lisphilar/covid19-sir
#'
#' @keywords internal
#'
github.lisphilar.covid19sir <- function(level) {
  if(!level %in% 1:2) return(NULL)
  
  if(level==1){  
    
    url <- "https://raw.githubusercontent.com/lisphilar/covid19-sir/main/data/japan/covid_jpn_total.csv"
    x <- read.csv(url, na.strings = c("0", "NA"))
    
    x <- x[x$Location=="Domestic",]
    x <- map_data(x, c(
      "Date" = "date",
      "Positive" = "confirmed",
      "Tested" = "tests",
      "Discharged" = "recovered",
      "Hosp_require" = "hosp",
      "Hosp_severe" = "icu",
      "Fatal" = "deaths",
      "Vaccinated_1st" = "people_vaccinated",
      "Vaccinated_2nd" = "people_fully_vaccinated",
      "Vaccinated_3rd" = "vaccines_3",
      "Vaccinated_4th" = "vaccines_4",
      "Vaccinated_5th" = "vaccines_5"
    ))
    
    x$vaccines <- x$people_vaccinated + 
      tidyr::replace_na(x$people_fully_vaccinated, 0) + 
      tidyr::replace_na(x$vaccines_3, 0) + 
      tidyr::replace_na(x$vaccines_4, 0) + 
      tidyr::replace_na(x$vaccines_5, 0)
    
  }
  
  if(level==2){
    
    url <- "https://raw.githubusercontent.com/lisphilar/covid19-sir/main/data/japan/covid_jpn_prefecture.csv"
    x <- read.csv(url, na.strings = c("0", "NA"))
    
    x <- map_data(x, c(
      "Date" = "date",
      "Prefecture" = "prefecture",
      "Positive" = "confirmed",
      "Tested" = "tests",
      "Discharged" = "recovered",
      "Hosp_require" = "hosp",
      "Hosp_severe" = "icu",
      "Fatal" = "deaths"
    ))
    
  }
  
  x$date <- as.Date(x$date)
  
  return(x)
}


================================================
FILE: R/ds_github.m3it.covid19data.R
================================================
#' Matt Bolton
#'
#' Data source for: Australia
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @source https://github.com/M3IT/COVID-19_Data
#'
#' @keywords internal
#'
github.m3it.covid19data <- function(level) {
  if(!level %in% 1:2) return(NULL)

  # download
  url <- "https://raw.githubusercontent.com/M3IT/COVID-19_Data/master/Data/COVID19_Data_Hub.csv"
  x <- read.csv(url, na.strings = c("0", "NA", ""))

  # format date
  x$date <- as.Date(x$date)

  # filter by level
  x <- x[x$administrative_area_level == level,]

  return(x)
}


================================================
FILE: R/ds_github.minciencia.datoscovid19.R
================================================
#' Ministerio de Ciencia, Tecnología, Conocimiento, e Innovación
#'
#' Data source for: Chile
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - intensive care
#'
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://github.com/MinCiencia/Datos-COVID19
#'
#' @keywords internal
#'
github.minciencia.datoscovid19 <- function(level) {
  if(!level %in% 1:3) return(NULL)
  
  if(level==1 | level==2){
    
    # confirmed, deaths, and recovered at regional and national level
    url.cases <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto3/TotalesPorRegion_std.csv"
    x.cases <- read.csv(url.cases)
    # format
    x.cases <- map_data(x.cases, c(
      "Region"    = "region",
      "Fecha"     = "date",
      "Categoria" = "type",
      "Total"     = "n"
    ))
    # pivot
    x.cases <- x.cases %>%
      mutate(type = map_values(type, force = TRUE, map = c(
        "Casos acumulados" = "confirmed",
        "Fallecidos totales" = "deaths",
        "Casos confirmados recuperados" = "recovered"))) %>%
      filter(!is.na(type)) %>%
      pivot_wider(id_cols = c("region", "date"), names_from = "type", values_from = "n")
    
    
    # vaccination data at national and regional level
    url.vacc  <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto76/vacunacion_std.csv"
    x.vacc  <- read.csv(url.vacc)
    # format
    x.vacc <- map_data(x.vacc, c(
      "Region"   = "region",
      "Fecha"    = "date",
      "Dosis"    = "type",
      "Cantidad" = "n"
    ))
    # compute people vaccinated
    x.vacc <- x.vacc %>%
      group_by(region, date) %>%
      summarise(
        vaccines = sum(n),
        people_vaccinated = sum(n[type %in% c("Primera", "Unica")]),
        people_fully_vaccinated = sum(n[type %in% c("Segunda", "Unica")]))
    
    if(level==1){
      
      # hospitalization data at national level
      url.hosp  <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto24/CamasHospital_Diario_std.csv"
      x.hosp  <- read.csv(url.hosp)
      # format      
      x.hosp <- map_data(x.hosp, c(
        "fecha" = "date",
        "Tipo.de.cama" = "type",
        "Casos.confirmados" = "n"
      ))
      # compute total hospitalizations and intensive care
      x.hosp <- x.hosp %>%
        group_by(date) %>%
        summarise(
          hosp = sum(n),
          icu = sum(n[type %in% c("UTI", "UCI")]))
      
      # this file contains data on patients requiring ventilation at national level
      url.vent  <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto30/PacientesVMI_std.csv"
      x.vent  <- read.csv(url.vent)
      # format
      x.vent <- map_data(x.vent, c(
        "Fecha" = "date",
        "Casos" = "type",
        "Casos.confirmados" = "n"
      ))
      # extract patients requiring ventilation
      x.vent <- x.vent %>%
        group_by(date) %>%
        summarise(vent = n[type=="Pacientes VMI"])
      
      # this file contains the total tests at national level
      url.tests <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto17/PCREstablecimiento_std.csv"
      x.tests <- read.csv(url.tests)
      # format
      x.tests <- map_data(x.tests, c(
        "fecha" = "date",
        "Establecimiento" = "type",
        "Numero.de.PCR" = "n"
      ))
      # extract the total number of tests performed 
      x.tests <- x.tests %>%
        group_by(date) %>%
        summarise(tests = n[type=="Total realizados"])
      
      # extract national cases and vaccination data
      x.cases <- filter(x.cases, region=="Total")
      x.vacc <- filter(x.vacc, region=="Total")

      # merge
      by <- "date"
      x <- x.cases %>%
        full_join(x.vacc, by = by) %>%
        full_join(x.hosp, by = by) %>%
        full_join(x.vent, by = by) %>%
        full_join(x.tests, by = by)
      
    }
    
    if(level==2){
      
      # data on realized tests at regional level
      url.tests <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto7/PCR_std.csv"
      x.tests <- read.csv(url.tests)
      # format
      x.tests <- map_data(x.tests, c(
        "fecha" = "date",
        "Region" = "region",
        "numero" = "n"
      ))
      # cumulate
      x.tests <- x.tests %>%
        group_by(region) %>%
        arrange(date) %>%
        mutate(tests = cumsum(n))
      
      # intensive care at regional level
      url.icu  <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto8/UCI_std.csv"
      x.icu <- read.csv(url.icu)
      # format
      x.icu <- map_data(x.icu, c(
        "fecha" = "date",
        "Region" = "region",
        "numero" = "icu"
      ))  
      
      # extract regional cases and vaccination data
      x.cases <- filter(x.cases, region!="Total")
      x.vacc <- filter(x.vacc, region!="Total")
      
      # merge
      by <- c("date", "region")
      x <- x.cases %>%
        full_join(x.vacc, by = by) %>%
        full_join(x.icu, by = by) %>%
        full_join(x.tests, by = by)
      
    }
    
  }
  
  if(level == 3) {
    
    # url
    url.pos    <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto55/Positividad_por_comuna.csv"
    url.cases  <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto1/Covid-19_std.csv"
    url.deaths <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto38/CasosFallecidosPorComuna_std.csv"
    url.vacc.0 <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto80/vacunacion_comuna_UnicaDosis_std.csv"
    url.vacc.1 <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto80/vacunacion_comuna_1eraDosis_std.csv"
    url.vacc.2 <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto80/vacunacion_comuna_2daDosis_std.csv"
    url.vacc.3 <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto80/vacunacion_comuna_Refuerzo_std.csv"
    
    # download
    x.pos    <- read.csv(url.pos)
    x.cases  <- read.csv(url.cases)
    x.deaths <- read.csv(url.deaths)
    x.vacc.0 <- read.csv(url.vacc.0)
    x.vacc.1 <- read.csv(url.vacc.1)
    x.vacc.2 <- read.csv(url.vacc.2)
    x.vacc.3 <- read.csv(url.vacc.3)
    
    # format positivity rate
    x.pos <- map_data(x.pos, c(
      "Codigo.comuna" = "municipality",
      "fecha" = "date",
      "positividad" = "positivity"
    ))
    
    # format cases
    x.cases <- map_data(x.cases, c(
      "Codigo.comuna" = "municipality",
      "Fecha" = "date",
      "Casos.confirmados" = "confirmed"
    ))
    
    # format deaths
    x.deaths <- map_data(x.deaths, c(
      "Codigo.comuna" = "municipality",
      "Fecha" = "date",
      "Casos.fallecidos" = "deaths"
    ))
    
    # format one shot vaccine dose
    x.vacc.0 <- map_data(x.vacc.0, c(
      "Fecha" = "date",
      "Codigo.comuna" = "municipality",
      "Unica.Dosis" = "oneshot"
    ))
    
    # format first vaccine dose
    x.vacc.1 <- map_data(x.vacc.1, c(
      "Fecha" = "date",
      "Codigo.comuna" = "municipality",
      "Primera.Dosis" = "first"
    ))
    
    # format second vaccine dose
    x.vacc.2 <- map_data(x.vacc.2, c(
      "Fecha" = "date",
      "Codigo.comuna" = "municipality",
      "Segunda.Dosis" = "second"
    ))
    
    # format extra vaccine dose
    x.vacc.3 <- map_data(x.vacc.3, c(
      "Fecha" = "date",
      "Codigo.comuna" = "municipality",
      "Dosis.Refuerzo" = "extra"
    ))
    
    # drop non-geographical entities
    x.pos    <- filter(x.pos,    !is.na(municipality))
    x.cases  <- filter(x.cases,  !is.na(municipality))
    x.deaths <- filter(x.deaths, !is.na(municipality))
    x.vacc.0 <- filter(x.vacc.0, !is.na(municipality))
    x.vacc.1 <- filter(x.vacc.1, !is.na(municipality))
    x.vacc.2 <- filter(x.vacc.2, !is.na(municipality))
    x.vacc.3 <- filter(x.vacc.3, !is.na(municipality))
    
    # merge
    by <- c("date", "municipality")
    x <- x.cases %>%
      full_join(x.pos,    by = by) %>%
      full_join(x.deaths, by = by) %>%
      full_join(x.vacc.0, by = by) %>%
      full_join(x.vacc.1, by = by) %>%
      full_join(x.vacc.2, by = by) %>%
      full_join(x.vacc.3, by = by) 
    
    # vaccines and tests
    x <- x %>%
      mutate(
        # total positive cases = confirmed cases
        pos_tot = confirmed,
        # drop negative and unfeasible values of positivity rate
        positivity = replace(positivity, positivity < 1e-4, NA)) %>%
      # for each municipality
      group_by(municipality) %>%
      # order by date
      arrange(date) %>%
      # replace missing positives with the previous value
      fill(pos_tot) %>%
      mutate(
        # compute vaccines and people vaccinated
        vaccines = cumsum(first + second + oneshot + extra),
        people_vaccinated = cumsum(first + oneshot),
        people_fully_vaccinated = cumsum(second + oneshot),
        # compute new positive
        pos_new = pmax(0, c(NA, diff(pos_tot))),
        # compute new tests
        tests_new = pos_new/positivity,
        # cumulate tests
        tests = as.integer(cumsum(tests_new)),
        # drop tests when confirmed is missing or it is greater than tests
        tests = replace(tests, is.na(confirmed) | confirmed>tests, NA))
    
  }
  
  # convert date
  x$date <- as.Date(x$date)
  
  return(x)
}


================================================
FILE: R/ds_github.mpiktas.covid19lt.R
================================================
#' Vaidotas Zemlys-Balevicius
#'
#' Data source for: Lithuania
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#' 
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://github.com/mpiktas/covid19lt
#'
#' @keywords internal
#'
github.mpiktas.covid19lt <- function(level) {
    if(!level %in% 1:3) return(NULL)
    
    # source
    url <- "https://raw.githubusercontent.com/mpiktas/covid19lt/master/data/"
    
    # levels
    if(level == 1){
        
        # download
        file <- paste0(url, "lt-covid19-country.csv")
        x <- read.csv(file)
        
        # format
        x <- map_data(x, c(
            "day"          = "date",
            "confirmed"    = "confirmed",
            "tests"        = "tests",
            "deaths_1"     = "deaths",
            "recovered"    = "recovered",
            "icu"          = "icu",
            "ventilated"   = "vent",
            "hospitalized" = "hosp",
            "vaccinated_1" = "dose_1",
            "vaccinated_2" = "dose_2",
            "vaccinated_3" = "dose_3",
            "fully_protected" = "people_fully_vaccinated"
        ))
        
        # people vaccinated
        x$people_vaccinated <- x$dose_1
        
        # total vaccine doses
        x$vaccines <- x$dose_1 + x$dose_2 + x$dose_3
        
    }
    
    if(level==2 | level==3){
        
        # download
        file <- paste0(url, sprintf("lt-covid19-level%s.csv", level))
        x <- read.csv(file)
        
        # format
        x <- map_data(x, c(
            "administrative_level_2" = "admin2",
            "administrative_level_3" = "admin3",
            "population"             = "population",
            "day"                    = "date",
            "confirmed"              = "confirmed",
            "tests"                  = "tests",
            "deaths_1"               = "deaths",
            "recovered"              = "recovered",
            "vaccinated_1"           = "dose_1",
            "vaccinated_2"           = "dose_2",
            "vaccinated_3"           = "dose_3",
            "fully_protected"        = "people_fully_vaccinated"
        ))
        
        # people vaccinated
        x$people_vaccinated <- x$dose_1
        
        # total vaccine doses
        x$vaccines <- x$dose_1 + x$dose_2 + x$dose_3
        
    }
    
    # date
    x$date <- as.Date(x$date)
    
    # return
    return(x)
}


================================================
FILE: R/ds_github.nytimes.covid19data.R
================================================
#' The New York Times
#'
#' Data source for: United States
#'
#' @param level 1, 2, 3
#' @param fips filter by FIPS code
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#'
#' @section Level 3:
#' - confirmed cases
#' - deaths
#'
#' @source https://github.com/nytimes/covid-19-data
#'
#' @keywords internal
#'
github.nytimes.covid19data <- function(level, fips = NULL){
  if(!level %in% 1:3) return(NULL)
  
  # source
  repo <- "https://raw.githubusercontent.com/nytimes/covid-19-data/master/" 
  files <- list(
    '1' = "us.csv", 
    '2' = "us-states.csv", 
    '3' = sprintf("us-counties-%s.csv", 2020:as.integer(format(Sys.Date(), "%Y")))
  )
  
  # build url
  urls  <- paste0(repo, files[[as.character(level)]])
  
  # download
  x <- lapply(urls, read.csv)
  x <- do.call(rbind, x)
  
  # format 
  x <- map_data(x, c(
    'date',
    'state',
    'fips',
    'county' = 'city',
    'cases'  = 'confirmed',
    'deaths' = 'deaths'
  ))

  # clean
  if(level==3){
    x <- x[x$city!="Unknown",]
    x$fips[x$city=="New York City"] <- 36061
    x$fips[x$city=="Kansas City"]   <- 29901
    x$fips[x$city=="Joplin"]        <- 29592
  }
  
  # date
  x$date <- as.Date(x$date, format = "%Y-%m-%d")
  
  # filter by FIPS code
  if(!is.null(fips))
    x <- x[which(startsWith(as.character(x$fips), fips)),]
  
  return(x) 
}


================================================
FILE: R/ds_github.openzh.covid19.R
================================================
#' Specialist Unit for Open Government Data Canton of Zurich
#'
#' Data source for: Switzerland and Liechtenstein
#'
#' @param level 1 (only for Liechtenstein), or 2 (only for Switzerland)
#' @param state one of CH (Switzerland) or FL (Liechtenstein)
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @source https://github.com/openZH/covid_19
#'
#' @keywords internal
#'
github.openzh.covid19 <- function(level, state){
  if(state=="FL" & level!=1) return(NULL)
  if(state=="CH" & level!=2) return(NULL)
  
  # download
  url <- "https://raw.githubusercontent.com/openZH/covid_19/master/COVID19_Fallzahlen_CH_total_v2.csv"
  x <- read.csv(url)

  # formatting
  x <- map_data(x, c(
    'date',
    'abbreviation_canton_and_fl' = 'code',
    'ncumul_conf'                = 'confirmed',
    'ncumul_tested'              = 'tests',
    'ncumul_deceased'            = 'deaths',
    'ncumul_released'            = 'recovered',
    'current_hosp'               = 'hosp',
    'current_icu'                = 'icu',
    'current_vent'               = 'vent'  
  ))

  # filter by state
  if(state=="FL")
    x <- x[which(x$code=="FL"),]
  if(state=="CH")
    x <- x[which(x$code!="FL"),]

  # convert date
  x$date <- as.Date(x$date)

  return(x)
}


================================================
FILE: R/ds_github.oxcgrt.covidpolicytracker.R
================================================
#' Oxford Covid-19 Government Response Tracker
#'
#' Data source for: Worldwide
#' Documentation: https://covid19datahub.io/articles/docs.html#policy-measures
#'
#' @param level 1, 2, 3
#'
#' @source https://github.com/OxCGRT/covid-policy-tracker
#'
#' @keywords internal
#'
github.oxcgrt.covidpolicytracker <- function(level){
  if(!level %in% 1:3) return(NULL)
  
  # download"BRA" "CAN" "CHN" "GBR" "USA"
  url <- "https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker-legacy/main/legacy_data_202207/OxCGRT_latest.csv"
  x   <- read.csv(url, cache = TRUE)

  # issue a warning if new sub-national data are available
  codes <- unique(x$CountryCode[!is.na(x$RegionCode)])
  new <- setdiff(codes, c("AUS", "BRA", "CAN", "CHN", "GBR", "IND", "USA"))
  if(length(new)>0) warning(sprintf("OxCGRT: New sub-national level are available: %s", new))
  
  # C8, H2, H3, H7 have no binary flag for geographic scope
  # -> they do not vary within the country 
  # -> set flag=1 (general policy)
  x$C8_Flag <- 1
  x$H2_Flag <- 1
  x$H3_Flag <- 1
  x$H7_Flag <- 1
  
  # formatting
  x <- map_data(x, c(
    "Date"                                    = "date",
    "CountryCode"                             = "iso_alpha_3",
    "RegionCode"                              = "region_code",
    "C1_School.closing"                       = "school_closing",
    "C1_Flag"                                 = "school_closing_flag",
    "C2_Workplace.closing"                    = "workplace_closing",
    "C2_Flag"                                 = "workplace_closing_flag",
    "C3_Cancel.public.events"                 = "cancel_events",
    "C3_Flag"                                 = "cancel_events_flag",
    "C4_Restrictions.on.gatherings"           = "gatherings_restrictions",
    "C4_Flag"                                 = "gatherings_restrictions_flag",
    "C5_Close.public.transport"               = "transport_closing",
    "C5_Flag"                                 = "transport_closing_flag",
    "C6_Stay.at.home.requirements"            = "stay_home_restrictions",
    "C6_Flag"                                 = "stay_home_restrictions_flag",
    "C7_Restrictions.on.internal.movement"    = "internal_movement_restrictions",
    "C7_Flag"                                 = "internal_movement_restrictions_flag",
    "C8_International.travel.controls"        = "international_movement_restrictions",
    "C8_Flag"                                 = "international_movement_restrictions_flag",
    "H1_Public.information.campaigns"         = "information_campaigns",
    "H1_Flag"                                 = "information_campaigns_flag",
    "H2_Testing.policy"                       = "testing_policy",
    "H2_Flag"                                 = "testing_policy_flag",
    "H3_Contact.tracing"                      = "contact_tracing",
    "H3_Flag"                                 = "contact_tracing_flag",
    "H6_Facial.Coverings"                     = "facial_coverings", 
    "H6_Flag"                                 = "facial_coverings_flag",
    "H7_Vaccination.policy"                   = "vaccination_policy",
    "H7_Flag"                                 = "vaccination_policy_flag",
    "H8_Protection.of.elderly.people"         = "elderly_people_protection",
    "H8_Flag"                                 = "elderly_people_protection_flag",
    "GovernmentResponseIndex"                 = "government_response_index",
    "StringencyIndex"                         = "stringency_index",
    "ContainmentHealthIndex"                  = "containment_health_index",
    "EconomicSupportIndex"                    = "economic_support_index"))
  
  # define flags, policy, and index columns  
  cn <- colnames(x)
  flags <- cn[endsWith(cn, "_flag")]
  value <- gsub("\\_flag", "", flags)
  index <- cn[endsWith(cn, "_index")]
  
  # set negative values for policies with missing flag or flag equal to zero
  x[,flags][is.na(x[,flags])] <- 0
  x[,value] <- x[,value] * sign(x[,flags]-0.5)
  
  # country level
  if(level == 1) {
    
    # nothing to do

  }
  
  # sub-national level
  if(level==2) {

    # do not alter sub-national index
    # set negative values for country-level index
    idx <- is.na(x$region_code)
    x[idx, index] <- -x[idx, index]
    
  }
  
  # municipality level
  if(level==3){

    # OxCGRT has no data at the municipality level
    # set negative values for all index
    x[, index] <- -x[, index]
    
  }
  
  # date
  x$date <- as.Date(as.character(x$date), format = "%Y%m%d")
  
  # create identifier with country code for countries, and region code for regions
  x$id_oxcgrt <- ifelse(is.na(x$region_code), x$iso_alpha_3, x$region_code)
  
  return(x)
}


================================================
FILE: R/ds_github.ozanerturk.covid19turkeyapi.R
================================================
#' Ozan Erturk
#'
#' Data source for: Turkey
#'
#' @param level 1
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - intensive care
#' - patients requiring ventilation
#'
#' @source https://github.com/ozanerturk/covid19-turkey-api
#'
#' @keywords internal
#'
github.ozanerturk.covid19turkeyapi <- function(level){
  if(level!=1) return(NULL)

  # download
  url <- "https://raw.githubusercontent.com/ozanerturk/covid19-turkey-api/master/dataset/timeline.csv"
  x   <- read.csv(url, na.strings = c("", "-")) 
  
  # formatting
  x <- map_data(x, c(
    'date'               = 'date',
    'totalTests'         = 'tests',
    'totalPatients'      = 'confirmed',
    'totalDeaths'        = 'deaths',
    'totalRecovered'     = 'recovered',
    'totalIntensiveCare' = 'icu',
    'totalIntubated'     = 'vent'
  ))
  
  # clean date
  x <- x[!is.na(x$date),]
  x$date <- as.Date(x$date, format = "%d/%m/%Y")
  
  return(x)
}


================================================
FILE: R/ds_github.pcmdpc.covid19.R
================================================
#' Ministero della Salute
#'
#' Data source for: Italy
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#'
#' @section Level 3:
#' - confirmed cases
#'
#' @source https://github.com/pcm-dpc/COVID-19
#' 
#' @keywords internal
#'
github.pcmdpc.covid19 <- function(level){
  if(!level %in% 1:3) return(NULL)
                             
  # source
  repo <- "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/"
  urls <- c(
    "dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv",
    "dati-regioni/dpc-covid19-ita-regioni.csv",
    "dati-province/dpc-covid19-ita-province.csv"
  )

  # download
  url <- sprintf("%s/%s", repo, urls[level])
  x   <- read.csv(url)

  # date
  d <- as.Date(x$data, format = "%Y-%m-%d %H:%M:%S")
  if(all(is.na(d)))
    d <- as.Date(x$data, format = "%Y-%m-%dT%H:%M:%S")
  x$date <- d

  # filter
  if(!is.null(x$lat) & !is.null(x$long))
    x <- x[which(x$lat!=0 | x$long!=0),]
  
  # formatting
  x <- map_data(x, c(
    'date',
    'denominazione_regione'   = 'state', 
    'codice_regione'          = 'state_code',
    'sigla_provincia'         = 'city',
    'codice_provincia'        = 'city_code',
    'tamponi'                 = 'tests', 
    'totale_casi'             = 'confirmed', 
    'deceduti'                = 'deaths',        
    'dimessi_guariti'         = 'recovered',     
    'totale_ospedalizzati'    = 'hosp',
    'terapia_intensiva'       = 'icu' 
  ))
  
  return(x)
}


================================================
FILE: R/ds_github.robertkochinstitut.covid19impfungenindeutschland.R
================================================
#' Robert Koch Institut
#'
#' Data source for: Germany
#'
#' @param level 2, 3
#'
#' @section Level 2:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 3:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://github.com/robert-koch-institut/COVID-19-Impfungen_in_Deutschland
#'
#' @keywords internal
#'
github.robertkochinstitut.covid19impfungenindeutschland <- function(level){
  if(!level %in% 2:3) return(NULL)
  
  if(level==2){
    
    # download
    url <- "https://raw.githubusercontent.com/robert-koch-institut/COVID-19-Impfungen_in_Deutschland/master/Aktuell_Deutschland_Bundeslaender_COVID-19-Impfungen.csv"
    x <- read.csv(url)
    
    # format
    x <- map_data(x, c(
      "Impfdatum" = "date",
      "BundeslandId_Impfort" = "id",
      "Impfstoff" = "type",
      "Impfserie" = "dose",
      "Anzahl" = "n"
    ))
    
    # vaccines
    x <- x %>%
      # remove undefined region
      filter(id!=17) %>%
      # create oneshot column
      mutate(is_oneshot = type=="Janssen") %>%
      # for each date and region
      group_by(date, id) %>%
      # compute total doses and people vaccinated
      summarise(
        vaccines = sum(n),
        people_vaccinated = sum(n[dose==1]),
        people_fully_vaccinated = sum(n[(dose==1 & is_oneshot) | (dose==2 & !is_oneshot)])) %>%
      # group by region
      group_by(id) %>%
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(
        vaccines = cumsum(vaccines),
        people_vaccinated = cumsum(people_vaccinated),
        people_fully_vaccinated = cumsum(people_fully_vaccinated))
    
  }
  
  if(level==3){
    
    # download
    url <- "https://raw.githubusercontent.com/robert-koch-institut/COVID-19-Impfungen_in_Deutschland/master/Aktuell_Deutschland_Landkreise_COVID-19-Impfungen.csv"
    x <- read.csv(url)
    
    # format
    x <- map_data(x, c(
      "Impfdatum" = "date",
      "LandkreisId_Impfort" = "id",
      "Impfschutz" = "type",
      "Anzahl" = "n"
    ))
    
    # vaccines
    x <- x %>%
      # keep only valid admin areas (5 digits)
      # drop undefined (17000) and Berlin region (11000)
      filter(grepl("^\\d{5}$", id) & !id %in% c("11000", "17000")) %>%
      # id to integer
      mutate(id = as.integer(id)) %>%
      # for each admin area and date
      group_by(id, date) %>%
      # compute total doses and people vaccinated
      summarize(
        vaccines = sum(n),
        people_vaccinated = sum(n[type==1]),
        people_fully_vaccinated = sum(n[type==2])) %>%
      # group by admin area
      group_by(id) %>%
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(
        vaccines = cumsum(vaccines),
        people_vaccinated = cumsum(people_vaccinated),
        people_fully_vaccinated = cumsum(people_fully_vaccinated))
    
  }
  
  # convert date
  x$date <- as.Date(x$date)
  
  return(x) 
}


================================================
FILE: R/ds_github.swsoyee.2019ncovjapan.R
================================================
#' Su Wei
#'
#' Data source for: Japan and Cruise Ships
#'
#' @param level 1, 2
#' @param id filter by name
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#' 
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @source https://github.com/swsoyee/2019-ncov-japan/blob/master/README.en.md
#'
#' @keywords internal
#'
github.swsoyee.2019ncovjapan <- function(level, id = NULL) {
  if(!level %in% 1:2) return(NULL)
  
  # download
  url <- "https://raw.githubusercontent.com/swsoyee/2019-ncov-japan/master/50_Data/covid19_jp.csv"
  x <- read.csv(url)
  
  # fix severe
  if("severe" %in% colnames(x))
    x$icu <- x$severe
  
  # fix value for Costa Atlantica
  idx <- which(x$date=="2020-05-10" & x$administrative_area_level_2=="Costa Atlantica")
  x$tests[idx] <- 623
  
  # filter by level
  x <- x[x$administrative_area_level == level,]
  
  # filter by id
  if(!is.null(id))
    x <- x[which(x$administrative_area_level_2 == id),]
  # remove cruise ships and other non-geographical entities
  else if(level==1)
    x <- x[is.na(x$administrative_area_level_2),]
    
  # convert to date
  x$date <- as.Date(x$date)
  
  return(x)
}


================================================
FILE: R/ds_github.wcota.covid19br.R
================================================
#' Wesley Cota
#'
#' Data source for: Brazil
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 3:
#' - confirmed cases
#' - deaths
#'
#' @source https://github.com/wcota/covid19br
#'
#' @keywords internal
#' 
github.wcota.covid19br <- function(level){
  if(!level %in% 1:3) return(NULL)
  
  if(level==1 | level==2){
    
    # download
    url <- "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-states.csv"
    x <- read.csv(url)
    
    # formatting
    x <- map_data(x, c(
      "date" = "date",
      "state" = "state",
      "deaths" = "deaths",
      "totalCases" = "confirmed",
      "recovered" = "recovered",
      "tests" = "tests",
      "vaccinated" = "first",
      "vaccinated_second" = "second",
      "vaccinated_single" = "oneshot",
      "vaccinated_third" = "extra"
    ))
    
    # total number of doses
    x <- x %>%
      dplyr::mutate(
        vaccines = first + replace_na(second, 0) + replace_na(oneshot, 0) + replace_na(extra, 0),
        people_vaccinated = first + replace_na(oneshot, 0),
        people_fully_vaccinated = second + replace_na(oneshot, 0))

    # filter
    idx <- which(x$state=="TOTAL")
    if(level==1)
      x <- x[idx,]
    if(level==2)
      x <- x[-idx,]
    
  }
  else {

    # urls
    urls <- sprintf(
      "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-cities-time%s.csv.gz",
      c('', paste0('_', 2020:2030))
    )
    
    # filter by urls that actually exist
    urls <- urls[!sapply(urls, httr::http_error)]
    
    # process data by year
    x <- lapply(urls, function(url){

      # download  
      tmp <- tempfile()
      download.file(url, destfile=tmp, mode="wb", quiet = TRUE)
      x <- read.csv(tmp)
      unlink(tmp)
      
      # formatting
      x <- map_data(x, c(
        "date" = "date",
        "ibgeID" = "code",
        "state" = "state",
        "deaths" = "deaths",
        "totalCases" = "confirmed"
      ))
      
      # filter cities
      x <- x[nchar(x$code)==7,]
      
    })
    
    # merge all years together
    x <- do.call(rbind, x)
    
  }
  
  # date
  x$date <- as.Date(x$date)
  
  return(x)
}


================================================
FILE: R/ds_github.wcota.covid19br.vac.R
================================================
#' Wesley Cota
#'
#' Data source for: Brazil
#'
#' @param level 3
#'
#' @section Level 3:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://github.com/wcota/covid19br-vac
#'
#' @keywords internal
#' 
github.wcota.covid19br.vac <- function(level){
  if(level != 3) return(NULL)
  
  # master file
  url <- "https://raw.githubusercontent.com/wcota/covid19br-vac/main/states.json"
  master <- jsonlite::fromJSON(url)
  
  # urls
  urls <- sprintf(
    "https://github.com/wcota/covid19br-vac/blob/main/processed_%s.csv.gz?raw=true",
    names(master$vaccination)
  )
  
  # process data by state
  x <- lapply(urls, function(url){
    
    # read  
    tmp <- tempfile()
    download.file(url, destfile=tmp, mode="wb", quiet = TRUE)
    x <- read.csv(tmp)
    unlink(tmp)
    
    # formatting
    x <- map_data(x, c(
      "date" = "date",
      "ibgeID" = "code",
      "dose" = "dose",
      "count" = "n"
    ))
    
    # filter cities
    x <- x[nchar(x$code)==7,]
  
  })
  
  # merge all states together
  x <- do.call(rbind, x)
  
  # compute vaccinated people by municipality and date
  x <- x %>%
    dplyr::group_by(code, date) %>%
    dplyr::summarise(
      vaccines = sum(n),
      people_vaccinated = sum(n[dose == 0 | dose == 1]),
      people_fully_vaccinated = sum(n[dose == 0 | dose == 2]))

  # convert to date and drop missing
  x$date <- as.Date(x$date)
  x <- x[!is.na(x$date),]
  
  # compute cumulative counts
  x <- x %>%
    dplyr::group_by(code) %>%
    dplyr::arrange(date) %>%
    dplyr::mutate(vaccines = cumsum(vaccines),
                  people_vaccinated = cumsum(people_vaccinated),
                  people_fully_vaccinated = cumsum(people_fully_vaccinated))
  
  return(x)
}


================================================
FILE: R/ds_go.th.R
================================================
#' Department of Disease Control, Thailand Ministry of Public Health
#'
#' Data source for: Thailand
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#'
#' @source https://data.go.th/en/dataset/covid-19-daily
#'
#' @keywords internal
#'
go.th <- function(level){
  if(!level %in% 1:2) return(NULL)  
  
  # link to waves 1-2 ad 3
  if(level == 1){
    w1 <- "https://covid19.ddc.moph.go.th/api/Cases/round-1to2-all"
    w2 <- "https://covid19.ddc.moph.go.th/api/Cases/timeline-cases-all" 
  }
  if(level == 2){
    w1 <- "https://covid19.ddc.moph.go.th/api/Cases/round-1to2-by-provinces"
    w2 <- "https://covid19.ddc.moph.go.th/api/Cases/timeline-cases-by-provinces"
  }
  
  # download waves
  x1 <- jsonlite::fromJSON(w1, flatten=TRUE)
  x2 <- jsonlite::fromJSON(w2, flatten=TRUE)
  
  # merge waves and format
  x <- dplyr::bind_rows(x1, x2) %>%
    map_data(c(
      "txn_date" = "date",
      "province" = "province",
      "total_case" = "confirmed",
      "total_death" = "deaths"
    ))
  
  # drop unassigned
  if(level==2){
    x <- filter(x, province!="ไม่ระบุ")
  }
  
  # convert date
  x$date <- as.Date(x$date)
  
  # fix duplicates
  cols <- intersect(colnames(x), c("date","province"))
  x <- x[!duplicated(x[,cols]),]
  
  return(x)
}


================================================
FILE: R/ds_gob.ar.R
================================================
#' Argentine Ministry of Health
#'
#' Data source for: Argentina
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' 
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' 
#' @source https://datos.gob.ar/dataset?q=covid&tags=COVID-19&sort=metadata_modified+desc
#'
#' @keywords internal
#'
gob.ar <- function(level){
  if(!level %in% 1:3) return(NULL)
  
  # group by level
  if(level==1)
    by <- NULL
  if(level==2)
    by <- "prov"
  if(level==3)
    by <- "dep"
  
  # download cases 
  # see https://datos.gob.ar/dataset/salud-covid-19-casos-registrados-republica-argentina
  url.cases <- "https://sisa.msal.gov.ar/datos/descargas/covid-19/files/Covid19Casos.zip"
  x.cases <- read.zip(url.cases, files = "Covid19Casos.csv", method = "wget", xsv = TRUE, 
                      select = c("clasificacion_resumen", "fecha_apertura", "fecha_fallecimiento",
                                 "residencia_provincia_id", "residencia_departamento_id"))
  
  # format cases
  x.cases <- map_data(x.cases[[1]], c(
    "clasificacion_resumen" = "type",
    "fecha_apertura" = "date_confirmed",
    "fecha_fallecimiento" = "date_deaths",
    "residencia_provincia_id" = "prov",
    "residencia_departamento_id" = "dep"
  ))
  
  # sanitize cases
  x.cases <- x.cases[x.cases$type=="Confirmado",]
  x.cases$prov <- sprintf("%.02d", x.cases$prov)
  x.cases$dep <- paste0(x.cases$prov, sprintf("%.03d", x.cases$dep))
  
  # compute confirmed
  x.confirmed <- x.cases %>%
    rename(date = date_confirmed) %>%
    group_by_at(c("date", by)) %>%
    summarize(confirmed = n()) %>%
    group_by_at(by) %>%
    arrange(date) %>%
    mutate(confirmed = cumsum(confirmed))
  
  # compute deaths
  x.deaths <- x.cases %>%
    rename(date = date_deaths) %>%
    filter(!is.na(date)) %>%
    group_by_at(c("date", by)) %>%
    summarize(deaths = n()) %>%
    group_by_at(by) %>%
    arrange(date) %>%
    mutate(deaths = cumsum(deaths))
  
  # download tests
  # see https://datos.gob.ar/dataset/salud-covid-19-determinaciones-registradas-republica-argentina
  url.tests <- "https://sisa.msal.gov.ar/datos/descargas/covid-19/files/Covid19Determinaciones.zip"
  x.tests <- read.zip(url.tests, files = "Covid19Determinaciones.csv", fread = TRUE)
  
  # format tests
  x.tests <- map_data(x.tests[[1]], c(
    "fecha" = "date",
    "codigo_indec_provincia" = "prov",
    "codigo_indec_departamento" = "dep",
    "positivos" = "confirmed",
    "total" = "tests"
  ))
  
  # sanitize tests
  x.tests$prov <- sprintf("%.02d", x.tests$prov)
  x.tests$dep <- paste0(x.tests$prov, sprintf("%.03d", x.tests$dep))
  
  # compute tests
  x.tests <- x.tests %>%
    group_by_at(c("date", by)) %>%
    summarise(tests = sum(tests),
              confirmed = sum(confirmed)) %>%
    group_by_at(by) %>%
    arrange(date) %>%
    mutate(tests = cumsum(tests),
           confirmed = cumsum(confirmed))
  
  # download vaccines
  # see https://datos.gob.ar/dataset/salud-vacunas-contra-covid-19-dosis-aplicadas-republica-argentina---registro-desagregado
  url.vacc <- "https://sisa.msal.gov.ar/datos/descargas/covid-19/files/datos_nomivac_covid19.zip"
  x.vacc <- read.zip(url.vacc, files = "datos_nomivac_covid19.csv", method = "wget", xsv = TRUE, 
                     select = c("fecha_aplicacion", "jurisdiccion_residencia_id", "depto_residencia_id", "orden_dosis"))
  
  # format vaccines
  x.vacc <- map_data(x.vacc[[1]], c(
    "fecha_aplicacion" = "date",
    "jurisdiccion_residencia_id" = "prov",
    "depto_residencia_id" = "dep",
    "orden_dosis" = "dose"
  ))
  
  # sanitize vaccines
  x.vacc$prov <- sprintf("%.02d", x.vacc$prov)
  x.vacc$dep <- paste0(x.vacc$prov, sprintf("%.03d", x.vacc$dep))
  
  # compute vaccines
  x.vacc <- x.vacc %>%
    group_by_at(c("date", by)) %>%
    summarize(vaccines = n(),
              people_vaccinated = sum(dose==1),
              people_fully_vaccinated = sum(dose==2)) %>%
    group_by_at(by) %>%
    arrange(date) %>%
    mutate(vaccines = cumsum(vaccines),
           people_vaccinated = cumsum(people_vaccinated),
           people_fully_vaccinated = cumsum(people_fully_vaccinated))
  
  # merge
  x <- x.deaths %>%
    full_join(x.tests, by = c("date", by)) %>%
    full_join(x.vacc, by = c("date", by))
  
  # confirmed tests are reported by testing location, confirmed cases by residence.
  # we need confirmed tests to be compatible with the number of tests at level 3.
  # for levels 1 and 2, it doesn't make much difference and we can use confirmed cases that have a longer history.
  # if level!=3 use confirmed cases instead of confirmed tests.
  if(level!=3){
    x <- x %>%
      select(-confirmed) %>%
      full_join(x.confirmed, by = c("date", by))
  }
  
  # convert date and sanitize
  x <- x %>%
    mutate(date = as.Date(date)) %>%
    filter(!is.na(date) & date>="2020-01-01")

  # fill missing values originated by the merge
  x <- x %>%
    # for each group
    group_by_at(by) %>%
    # sort by date
    arrange(date) %>%
    # fill with previous value
    fill(confirmed, deaths, tests, vaccines, people_vaccinated, people_fully_vaccinated) %>%
    # ungroup
    ungroup() %>%
    # set to missing if date greater than the corresponding max date
    mutate(confirmed = replace(confirmed, date>max(x.confirmed$date), NA),
           deaths = replace(deaths, date>max(x.deaths$date), NA),
           tests = replace(tests, date>max(x.tests$date), NA),
           vaccines = replace(vaccines, date>max(x.vacc$date), NA),
           people_vaccinated = replace(people_vaccinated, date>max(x.vacc$date), NA),
           people_fully_vaccinated = replace(people_fully_vaccinated, date>max(x.vacc$date), NA))
  
  # drop unassigned provinces
  if(level==2){
    x <- x %>% 
      filter(prov!="99" & prov!="00") %>%
      mutate(prov = as.integer(prov))
  }
  
  # drop unassigned departments
  if(level==3){
    x <- x %>% 
      filter(!startsWith(dep, "99") & !endsWith(dep, "999") & !startsWith(dep, "00") & !endsWith(dep, "000")) %>%
      mutate(dep = as.integer(dep))
  }
  
  return(x)
}


================================================
FILE: R/ds_gob.pe.R
================================================
#' Ministerio de Salud
#'
#' Data source for: Peru
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 2:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' 
#' @section Level 3:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' 
#' @source https://www.datosabiertos.gob.pe/dataset/vacunaci%C3%B3n-contra-covid-19-ministerio-de-salud-minsa
#'
#' @keywords internal
#'
gob.pe <- function(level){
  if(!level %in% 1:3) return(NULL)
  
  # download vaccines
  zip <- tempfile()
  url <- "https://cloud.minsa.gob.pe/s/To2QtqoNjKqobfw/download"
  download.file(url, zip, mode = "wb", quiet = TRUE)
  
  # unzip, read, and delete
  x <- data.table::fread(cmd = sprintf("7za x -so %s", zip), showProgress = FALSE)
  unlink(zip)
  
  # format
  x <- map_data(x, c(
    "FECHA_VACUNACION" = "date",
    "DOSIS" = "dose",
    "FABRICANTE" = "type",
    "DEPARTAMENTO" = "department",
    "PROVINCIA" = "province",
    "DISTRITO" = "district"
  ))
  
  # grouping by level
  by <- switch(level,
    "1" = c("date"),
    "2" = c("date", "department"),
    "3" = c("date", "department", "district"))
    
  # vaccines
  x <- x %>%
    # for each date and area
    group_by_at(by) %>%
    # compute people vaccinated and total doses
    summarise(
      vaccines = n(),
      people_vaccinated = sum(dose==1),
      people_fully_vaccinated = sum(dose==2)) %>%
    # group by area
    group_by_at(by[-1]) %>%
    # sort by date
    arrange(date) %>%
    # cumulate
    mutate(
      vaccines = cumsum(vaccines),
      people_vaccinated = cumsum(people_vaccinated),
      people_fully_vaccinated = cumsum(people_fully_vaccinated))
    
  # convert date
  x$date <- as.Date(as.character(x$date), format = "%Y%m%d")
  
  return(x)
}


================================================
FILE: R/ds_gouv.fr.R
================================================
#' Santé Publique France
#'
#' Data source for: France and overseas territories
#'
#' @param level 1, 2, 3
#' @param reg filter by region code
#' @param dep filter by department code
#'
#' @section Level 1:
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#' 
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#'
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#'
#' @source https://www.data.gouv.fr/fr/pages/donnees-coronavirus/
#'
#' @keywords internal
#'
gouv.fr <- function(level = 1, reg = NULL, dep = NULL){
  if(!level %in% 1:3) return(NULL)
  
  if(level==1){
    
    # download cases
    # see https://www.data.gouv.fr/fr/datasets/synthese-des-indicateurs-de-suivi-de-lepidemie-covid-19/#_
    url.cases <- "https://www.data.gouv.fr/fr/datasets/r/f335f9ea-86e3-4ffa-9684-93c009d5e617"
    x.cases <- read.csv(url.cases)
    
    # format cases
    x.cases <- map_data(x.cases, c(
      "date" = "date",
      "hosp" = "hosp",
      "rea" = "icu",
      "dc_tot" = "deaths"
    ))
    
    # download tests
    # see https://www.data.gouv.fr/fr/datasets/donnees-de-laboratoires-pour-le-depistage-a-compter-du-18-05-2022-si-dep/
    url.tests <- "https://www.data.gouv.fr/fr/datasets/r/d349accb-56ef-4b53-b218-46c2a7f902e0"
    x.tests <- read.csv(url.tests, sep = ";", dec = ",")
    
    # format tests
    x.tests <- map_data(x.tests, c(
      "jour" = "date",
      "T"    = "tests"
    )) 
    
    # cumulate tests
    x.tests <- x.tests %>%
      group_by(date) %>%
      summarise(tests = as.integer(sum(tests))) %>%
      arrange(date) %>%
      mutate(tests = cumsum(tests))
    
    # download people vaccinated
    # see https://www.data.gouv.fr/fr/datasets/donnees-relatives-aux-personnes-vaccinees-contre-la-covid-19-1/
    url.vacc <- "https://www.data.gouv.fr/fr/datasets/r/efe23314-67c4-45d3-89a2-3faef82fae90"
    x.vacc  <- read.csv(url.vacc, sep = ";")
    
    # format people vaccinated
    x.vacc <- map_data(x.vacc, c(
      "jour" = "date",
      "n_cum_dose1" = "people_vaccinated",
      "n_cum_complet" = "people_fully_vaccinated"
    ))
    
    # download vaccine doses
    # see https://www.data.gouv.fr/fr/datasets/donnees-relatives-aux-personnes-vaccinees-contre-la-covid-19-1/
    url.doses <- "https://www.data.gouv.fr/fr/datasets/r/b273cf3b-e9de-437c-af55-eda5979e92fc"
    x.doses <- read.csv(url.doses, sep = ";")
    
    # compute vaccine doses
    x.doses <- x.doses %>%
      # filter by total vaccines
      filter(vaccin==0) %>%
      # for each row
      rowwise() %>%
      # compute the sum of all doses
      mutate(vaccines = sum(c_across(starts_with("n_cum_dose")))) %>%
      # rename date
      mutate(date = jour)
    
    # merge
    by <- "date"
    x <- x.cases %>%
      full_join(x.tests, by = by) %>%
      full_join(x.doses, by = by) %>%
      full_join(x.vacc, by = by)
    
  }
  
  if(level==2){
    
    # download cases
    # see https://www.data.gouv.fr/fr/datasets/synthese-des-indicateurs-de-suivi-de-lepidemie-covid-19/#_
    url.cases <- "https://www.data.gouv.fr/fr/datasets/r/5c4e1452-3850-4b59-b11c-3dd51d7fb8b5"
    x.cases <- read.csv(url.cases)
    
    # format cases
    x.cases <- map_data(x.cases, c(
      "date" = "date",
      "reg"  = "reg",
      "hosp" = "hosp",
      "rea" = "icu",
      "dchosp" = "deaths",
      "pos" = "confirmed"
    ))
    
    # cases
    x.cases <- x.cases %>%
      # for each date and region
      dplyr::group_by(reg, date) %>%
      # compute total counts
      dplyr::summarise(
        hosp = sum(hosp),
        icu = sum(icu),
        deaths = sum(deaths),
        confirmed = sum(confirmed)) %>%
      # group by region
      dplyr::group_by(reg) %>%
      # sort by date
      dplyr::arrange(date) %>%
      # cumulate confirmed cases (deaths are already cumulative)
      dplyr::mutate(confirmed = cumsum(confirmed))
    
    # download tests
    # see https://www.data.gouv.fr/fr/datasets/donnees-de-laboratoires-pour-le-depistage-a-compter-du-18-05-2022-si-dep/
    url.tests <- "https://www.data.gouv.fr/fr/datasets/r/8b382611-4b86-41ff-9e58-9ee638a6d564"
    x.tests <- read.csv(url.tests, sep = ";", dec = ",")
    
    # format tests
    x.tests <- map_data(x.tests, c(
      "reg"  = "reg",
      "jour" = "date",
      "T"    = "tests"
    )) 
    
    # cumulate tests
    x.tests <- x.tests %>% 
      group_by(reg, date) %>%
      summarise(tests = as.integer(sum(tests))) %>%
      group_by(reg) %>%
      arrange(date) %>%
      mutate(tests = cumsum(tests))
    
    # download people vaccinated
    # see https://www.data.gouv.fr/fr/datasets/donnees-relatives-aux-personnes-vaccinees-contre-la-covid-19-1/
    url.vacc <- "https://www.data.gouv.fr/fr/datasets/r/735b0df8-51b4-4dd2-8a2d-8e46d77d60d8"
    x.vacc  <- read.csv(url.vacc, sep = ";")
    
    # format people vaccinated
    x.vacc <- map_data(x.vacc, c(
      "jour" = "date",
      "reg" = "reg",
      "n_cum_dose1" = "people_vaccinated",
      "n_cum_complet" = "people_fully_vaccinated"
    ))
    
    # download vaccine doses
    # see https://www.data.gouv.fr/fr/datasets/donnees-relatives-aux-personnes-vaccinees-contre-la-covid-19-1/
    url.doses <- "https://www.data.gouv.fr/fr/datasets/r/900da9b0-8987-4ba7-b117-7aea0e53f530"
    x.doses <- read.csv(url.doses, sep = ";")
    
    # compute vaccine doses
    x.doses <- x.doses %>%
      # filter by total vaccines
      filter(vaccin==0) %>%
      # for each row
      rowwise() %>%
      # compute the sum of all doses
      mutate(vaccines = sum(c_across(starts_with("n_cum_dose")))) %>%
      # rename date
      mutate(date = jour)
    
    # merge
    by <- c("date", "reg")
    x <- x.cases %>%
      full_join(x.tests, by = by) %>%
      full_join(x.doses, by = by) %>%
      full_join(x.vacc, by = by)
    
  }
  
  if(level==3){
    
    # download cases
    # see https://www.data.gouv.fr/fr/datasets/synthese-des-indicateurs-de-suivi-de-lepidemie-covid-19/#_
    url.cases <- "https://www.data.gouv.fr/fr/datasets/r/5c4e1452-3850-4b59-b11c-3dd51d7fb8b5"
    x.cases <- read.csv(url.cases)
    
    # format cases
    x.cases <- map_data(x.cases, c(
      "date" = "date",
      "dep"  = "dep",
      "hosp" = "hosp",
      "rea" = "icu",
      "dchosp" = "deaths",
      "pos" = "confirmed"
    )) 
    
    # cumulate cases
    x.cases <- x.cases %>%
      dplyr::group_by(dep) %>%
      dplyr::arrange(date) %>%
      dplyr::mutate(confirmed = cumsum(confirmed))
    
    # download tests
    # see https://www.data.gouv.fr/fr/datasets/donnees-de-laboratoires-pour-le-depistage-a-compter-du-18-05-2022-si-dep/
    url.tests <- "https://www.data.gouv.fr/fr/datasets/r/674bddab-6d61-4e59-b0bd-0be535490db0"
    x.tests <- read.csv(url.tests, sep = ";", dec = ",")
    
    # format tests
    x.tests <- map_data(x.tests, c(
      "dep"  = "dep",
      "jour" = "date",
      "T"    = "tests"
    )) 
    
    # cumulate tests
    x.tests <- x.tests %>% 
      group_by(dep, date) %>%
      summarise(tests = as.integer(sum(tests))) %>%
      group_by(dep) %>%
      arrange(date) %>%
      mutate(tests = cumsum(tests))
    
    # download people vaccinated
    # see https://www.data.gouv.fr/fr/datasets/donnees-relatives-aux-personnes-vaccinees-contre-la-covid-19-1/
    url.vacc <- "https://www.data.gouv.fr/fr/datasets/r/4f39ec91-80d7-4602-befb-4b522804c0af"
    x.vacc <- read.csv(url.vacc, sep = ";")
    
    # format people vaccinated
    x.vacc <- map_data(x.vacc, c(
      "jour" = "date",
      "dep" = "dep",
      "n_cum_dose1" = "people_vaccinated",
      "n_cum_complet" = "people_fully_vaccinated"
    ))
    
    # download vaccine doses
    # see https://www.data.gouv.fr/fr/datasets/donnees-relatives-aux-personnes-vaccinees-contre-la-covid-19-1/
    url.doses <- "https://www.data.gouv.fr/fr/datasets/r/535f8686-d75d-43d9-94b3-da8cdf850634"
    x.doses <- read.csv(url.doses, sep = ";")
    
    # compute vaccine doses
    x.doses <- x.doses %>%
      # filter by total vaccines
      filter(vaccin==0) %>%
      # for each row
      rowwise() %>%
      # compute the sum of all doses
      mutate(vaccines = sum(c_across(starts_with("n_cum_dose")))) %>%
      # rename date
      mutate(date = jour)
    
    # merge
    by <- c("date", "dep")
    x <- x.cases %>%
      full_join(x.tests, by = by) %>%
      full_join(x.doses, by = by) %>%
      full_join(x.vacc, by = by)
    
  }
  
  # convert to date
  x$date <- as.Date(x$date)
  
  # filter
  if(!is.null(reg))
    x <- x[which(x$reg==reg),]
  if(!is.null(dep))
    x <- x[which(x$dep==dep),]
  
  return(x)
}


================================================
FILE: R/ds_gov.co.R
================================================
#' Instituto Nacional de Salud
#'
#' Data source for: Colombia
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' 
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - recovered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://www.datos.gov.co/browse?category=Salud+y+Protecci%C3%B3n+Social&q=covid&sortBy=relevance
#'
#' @keywords internal
#'
gov.co <- function(level){
  if(!level %in% 1:3) return(NULL)
  
  # confirmed, recovered, deaths
  # the number of rows in this file matches the number of confirmed cases reported by other sources (e.g. JHU)
  # this file can be aggregated to compute confirmed, recovered, and deaths at all levels
  # see https://www.datos.gov.co/Salud-y-Protecci-n-Social/Casos-positivos-de-COVID-19-en-Colombia/gt2j-8ykr
  url <- 'https://www.datos.gov.co/resource/gt2j-8ykr.csv?$limit=9999999999'
  x.cases <- read.csv(url, encoding = "UTF-8")
  # format
  x.cases <- map_data(x.cases, c(
    'fecha_reporte_web'     = 'date_confirmed',
    'fecha_muerte'          = 'date_deaths',
    'fecha_recuperado'      = 'date_recovered',
    'departamento'          = 'state_code',
    'ciudad_municipio'      = 'city_code',
    'departamento_nom'      = 'state',
    'ciudad_municipio_nom'  = 'city'
  ))
  # compute cumulative cases, deaths, and recovered
  x.cases <- x.cases %>%
    # pivot
    pivot_longer(cols = starts_with("date"), names_to = "type", values_to = "date", values_drop_na = TRUE, names_prefix = "date_") %>%
    # convert date
    mutate(date = as.Date(date, format = "%d/%m/%Y")) %>%
    # keep only valid dates
    filter(!is.na(date)) %>%
    # group by city, date, and type of metrics
    group_by(state_code, city_code, date, type) %>%
    # compute the counts for each metric
    summarise(n = n())
  
  if(level==1 | level==2){
    
    # pcr tests
    # see https://www.datos.gov.co/Salud-y-Protecci-n-Social/Pruebas-PCR-procesadas-de-COVID-19-en-Colombia-Dep/8835-5baf
    url <- 'https://www.datos.gov.co/resource/8835-5baf.csv?$limit=9999999999'
    x.pcr <- read.csv(url, encoding="UTF-8")
    # format and sanitize
    x.pcr <- x.pcr %>%
      rename(date = fecha) %>%
      mutate(date = as.Date(date, format = "%Y-%m-%d")) %>%
      filter(!is.na(date)) %>%
      select(-c("cartagena", "barranquilla", "santa_marta",
                "indeterminadas", "procedencia_desconocida",
                "positivas_acumuladas", "negativas_acumuladas", 
                "positividad_acumulada")) %>%
      pivot_longer(cols = -1, names_to = 'state', values_to = 'n', values_drop_na = TRUE)
    
    # antigen tests
    # see https://www.datos.gov.co/Salud-y-Protecci-n-Social/Ant-geno-procesadas-de-COVID-19-en-Colombia-Depart/ci85-cyhe
    url <- 'https://www.datos.gov.co/resource/ci85-cyhe.csv?$limit=9999999999'
    x.ant <- read.csv(url, encoding="UTF-8")
    # format and sanitize (make names compatible with the PCR dataset)
    x.ant <- x.ant %>%
      rename(
        date = mes,
        narino = nari_o, 
        norte_de_santander = norte_santander,
        bogota = bogota_dc) %>%
      mutate(date = as.Date(date, format = "%Y-%m-%d")) %>%
      filter(!is.na(date)) %>%
      pivot_longer(cols = -1, names_to = 'state', values_to = 'n', values_drop_na = TRUE)
    
    if(level==1){
      
      # confirmed, recovered, deaths
      x.cases <- x.cases %>%
        group_by(date, type) %>%
        summarise(n = sum(n)) %>%
        group_by(type) %>%
        arrange(date) %>%
        mutate(n = cumsum(n)) %>%
        pivot_wider(id_cols = "date", names_from = "type", values_from = "n")
      
      # pcr tests
      x.pcr <- x.pcr %>%
        filter(state=="acumuladas") %>%
        rename(pcr = n)
      
      # antigen tests
      x.ant <- x.ant %>%
        group_by(date) %>%
        summarise(antigen = sum(n))
      
      # merge 
      by <- "date"
      x <- x.cases %>%
        full_join(x.pcr, by = by) %>%
        full_join(x.ant, by = by) %>%
        mutate(tests = antigen + pcr)
      
    }
    
    if(level==2){
      
      # confirmed, recovered, deaths
      x.cases <- x.cases %>%
        filter(!state_code %in% c(13001, 8001, 47001)) %>%
        group_by(date, state_code, type) %>%
        summarise(n = sum(n)) %>%
        group_by(type, state_code) %>%
        arrange(date) %>%
        mutate(n = cumsum(n)) %>%
        pivot_wider(id_cols = c("date", "state_code"), names_from = "type", values_from = "n")

      # pcr tests
      x.pcr <- x.pcr %>%
        filter(state!="acumuladas") %>%
        rename(pcr = n)
      
      # antigen tests
      x.ant <- x.ant %>%
        rename(antigen = n)
      
      # map code to state as used in the PCR and antigen datasets      
      db <- extdata("db/COL.csv")
      idx <- which(db$administrative_area_level==2)
      map <- db$id_gov.co[idx]
      names(map) <- as.integer(db$key_local[idx])
      x.cases$state <- map_values(x.cases$state_code, map = map)
      
      # merge 
      by <- c("date", "state")
      x <- x.cases %>%
        full_join(x.pcr, by = by) %>%
        full_join(x.ant, by = by) %>%
        mutate(tests = antigen + pcr)
      
    }
    
  }
  
  if(level==3){
    
    # this file does not sum up to the vaccines for level 1 (from e.g. Our World in Data)
    # use only for level 3 as it is: do not aggregate to upper levels
    # see https://www.datos.gov.co/Salud-y-Protecci-n-Social/Coberturas-de-Vacunaci-n-contra-COVID-19/8cgj-t5ds
    url <- 'https://www.datos.gov.co/resource/8cgj-t5ds.csv?$limit=9999999999'
    x.vacc <- read.csv(url, encoding="UTF-8")
    # format
    x.vacc <- map_data(x.vacc, c(
      "fecha_de_corte"     = "date",
      "municipio_divipola" = "city_code",
      "grupo_edad"         = "age",
      "n_mero_acumulado_de_1_dosis" = "people_vaccinated",
      "n_mero_acumulado_de_esquema" = "people_fully_vaccinated"
    ))
    # sanitize
    x.vacc <- x.vacc %>%
      mutate(date = as.Date(date, format = "%Y-%m-%d")) %>%
      filter(age=="Todas" & city_code!="No definido" & !is.na(date)) %>%
      mutate(city_code = as.integer(city_code))
      
    # confirmed, recovered, deaths
    x.cases <- x.cases %>%
      group_by(type, city_code) %>%
      arrange(date) %>%
      mutate(n = cumsum(n)) %>%
      pivot_wider(id_cols = c("date", "city_code"), names_from = "type", values_from = "n")
    
    # merge
    by <- c("date", "city_code")
    x <- x.cases %>%
      full_join(x.vacc, by = by)
    
  }
  
  return(x)
}


================================================
FILE: R/ds_gov.lv.R
================================================
#' Center for Disease Prevention and Control
#'
#' Data source for: Latvia
#'
#' @param level 1, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - tests
#'
#' @section Level 3:
#' - confirmed cases
#'
#' @source https://data.gov.lv/dati/eng/dataset?q=covid&res_format=CSV&sort=score+desc%2C+metadata_modified+desc
#'
#' @keywords internal
#'
gov.lv <- function(level){
  if(level!=1 & level!=3) return(NULL)
  
  if(level==1){
    
    # cases
    # see https://data.gov.lv/dati/eng/dataset/covid-19
    url <- 'https://data.gov.lv/dati/dataset/f01ada0a-2e77-4a82-8ba2-09cf0cf90db3/resource/d499d2f0-b1ea-4ba2-9600-2c701b03bd4a/download/covid_19_izmeklejumi_rezultati.csv'
    x <- read.csv(url, sep = ";")
    
    # format cases
    x <- map_data(x, c(
      'Datums' = "date",
      'TestuSkaits' = 'tests',
      'ApstiprinataCOVID19InfekcijaSkaits' = 'confirmed',
      'MirusoPersonuSkaits' = 'deaths'
    ))
    
    # cumulate
    x <- x %>%
      arrange(date) %>%
      mutate(across(c("confirmed", "deaths", "tests"), cumsum))
    
    # convert date
    x$date <- as.Date(x$date, format="%Y.%m.%d.")
    
  }
  
  if(level==3){
    
    # cases
    # see https://data.gov.lv/dati/eng/dataset/covid-19-pa-adm-terit
    url <- 'https://data.gov.lv/dati/dataset/e150cc9a-27c1-4920-a6c2-d20d10469873/resource/492931dd-0012-46d7-b415-76fe0ec7c216/download/covid_19_pa_adm_terit.csv'
    x <- read.csv(url, sep = ";", fileEncoding = "UTF-8-BOM", encoding = "Latin1")
    
    # format cases
    x <- map_data(x, c(
      'Datums' = 'date',
      'AdministrativiTeritorialasVienibasNosaukums' = 'region',
      'ATVK' = 'atvk',
      'ApstiprinataCOVID19infekcija' = 'confirmed'
    ))
    
    # remove country reports from region reports
    x <- x[which(x$atvk != 'Nav'),]
    
    # replace range
    x$confirmed <- replace(x$confirmed, grepl("^no 1", x$confirmed), 1)
    
    # convert types
    x$date <- as.Date(x$date, format="%Y.%m.%d.")
    x$confirmed <- as.integer(x$confirmed)
    x$atvk <- as.integer(x$atvk)
    
  }
  
  return(x)
}


================================================
FILE: R/ds_gov.si.R
================================================
#' Ministry of Health and National Institute for Public health
#'
#' Data source for: Slovenia
#'
#' @param level 1
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - tests
#' - hospitalizations
#' - intensive care
#'
#' @source https://www.gov.si/en/topics/coronavirus-disease-covid-19/actual-data/
#'
#' @keywords internal
#'
gov.si <- function(level){
  if(level!=1) return(NULL)
  
  # download
  url <- 'https://www.gov.si/assets/vlada/Koronavirus-podatki/EN_Covid-19-all-data-v2.xlsx'
  x <- read.excel(url, sheet = 1)
  
  # format
  x <- map_data(x, c(
    'Date'                                         = 'date',
    'Tested (all)'                                 = 'tests',
    'Positive (all)'                               = 'confirmed',
    'All hospitalized on certain day'              = 'hosp',
    'All persons in intensive care on certain day' = 'icu',
    'Deaths (all)'                                 = 'deaths'
  ))
  
  # clean date
  x$date <- as.Date(suppressWarnings(as.numeric(x$date)), origin = "1899-12-30")   
  x <- x[!is.na(x$date),]
  
  return(x)
}


================================================
FILE: R/ds_gov.tw.R
================================================
#' Ministry of Health and Welfare of Taiwan
#'
#' Data source for: Taiwan
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - tests
#'
#' @section Level 2:
#' - confirmed cases
#'
#' @source https://data.gov.tw
#'
#' @keywords internal
#'
gov.tw <- function(level) {
  if(!level %in% 1:2) return(NULL)

  # download cases
  url <- "https://od.cdc.gov.tw/eic/Day_Confirmation_Age_County_Gender_19CoV.csv"
  x   <- read.csv(url, encoding = "UTF-8")
  
  # format cases
  x <- x[,-1]
  colnames(x) <- c("date", "county", "city", "gender", "imported", "age_group", "confirmed")
  
  # convert date
  x$date <- as.Date(x$date, "%Y/%m/%d")

  if(level == 1) {
    
    # download tests
    # see https://data.gov.tw/dataset/120451
    url.tests <- "https://od.cdc.gov.tw/eic/covid19/covid19_tw_specimen.csv"
    x.tests   <- read.csv(url.tests, encoding = "UTF-8")
    
    # format tests
    colnames(x.tests) <- c("date", "notification", "home quarantine", "monitoring", "total")
    x.tests <- x.tests %>%
      # convert date
      mutate(date  = as.Date(date, "%Y/%m/%d")) %>%
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(tests = cumsum(total)) %>%
      # subset
      select(date, tests) %>%
      # drop missing values
      filter(!is.na(tests))
    
    # cases
    x <- x %>% 
      # for each date
      group_by(date) %>%
      # compute total counts
      summarise(confirmed = sum(confirmed)) %>%
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(confirmed = cumsum(confirmed))
      
    # merge
    x <- full_join(x, x.tests, by = "date")
    
  }
  
  if(level == 2) {
    
    # drop unassigned
    x <- filter(x, x$county!="空值")

    # cases
    x <- x %>% 
      # for each date and countr
      group_by(date, county) %>%
      # compute total counts
      summarise(confirmed = sum(confirmed)) %>%
      # group by county
      group_by(county) %>%
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(confirmed = cumsum(confirmed))
    
  }
    
  # fix date
  x <- x[x$date<=Sys.Date(),]
  
  return(x)
}


================================================
FILE: R/ds_gov.uk.R
================================================
#' UK Health Security Agency
#'
#' Data source for: United Kingdom
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - patients requiring ventilation
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - patients requiring ventilation
#' 
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - patients requiring ventilation
#'
#' @source https://coronavirus.data.gov.uk
#'
#' @keywords internal
#'
gov.uk <- function(level){
  
  # Extracts paginated data by requesting all of the pages
  # and combining the results.
  #
  # @param filters    API filters. See the API documentations for 
  #                   additional information.
  #                   
  # @param structure  Structure parameter. See the API documentations 
  #                   for additional information.
  #                   
  # @return list      Comprehensive list of dictionaries containing all 
  #                   the data for the given ``filter`` and ``structure`.`
  get_paginated_data <- function (filters, structure) {
    
    endpoint     <- "https://api.coronavirus.data.gov.uk/v1/data"
    results      <- list()
    current_page <- 1
    
    repeat {
      
      i <- 1
      repeat{
        
        httr::GET(
          url   = endpoint,
          query = list(
            filters   = paste(filters, collapse = ";"),
            structure = jsonlite::toJSON(structure, auto_unbox = TRUE),
            page      = current_page
          ),
          httr::timeout(30)
        ) -> response
        
        # Handle errors:
        if ( response$status_code >= 400 ) {
          i <- i+1
          if(i<5)
            Sys.sleep(i+runif(1))
          else
            stop(httr::http_status(response))
        } else break
        
      }
      
      # Handle errors:
      if ( response$status_code == 204 ) {
        break
      }
      
      # Convert response from binary to JSON:
      json_text <- httr::content(response, "text")
      dt        <- jsonlite::fromJSON(json_text)
      results   <- rbind(results, dt$data)
      
      if ( is.null( dt$pagination$`next` ) ){
        break
      }
      
      current_page <- current_page + 1
      
    }
    
    return(results)
    
  }
  
  # level
  area_type <- switch (level, "overview", "nation", "ltla")
  
  # download
  x <- NULL
  for(a in area_type){
    
    # create filters
    filters <- c(
      sprintf("areaType=%s", a)
    )
    
    # vaccination metrics
    dose1 <- "cumPeopleVaccinatedFirstDoseByVaccinationDate"
    dose2 <- "cumPeopleVaccinatedSecondDoseByVaccinationDate"
    if(a %in% c("overview", "nation")){
      dose1 <- "cumPeopleVaccinatedFirstDoseByPublishDate"
      dose2 <- "cumPeopleVaccinatedSecondDoseByPublishDate"
    }
    
    # tests metrics
    tests <- "cumVirusTestsBySpecimenDate"
    if(a == "overview"){
      tests <- "cumVirusTestsByPublishDate"
    }
    
    # create structure
    structure <- list(
      "date"       = "date",
      "type"       = "areaType",
      "name"       = "areaName",
      "code"       = "areaCode",
      "confirmed"  = "cumCasesBySpecimenDate",
      "deaths"     = "cumDeaths28DaysByDeathDate",
      "tests"      =  tests,
      "vent"       = "covidOccupiedMVBeds",
      "hosp"       = "hospitalCases",
      "vaccines"   = "cumVaccinesGivenByPublishDate",
      "people_vaccinated" = dose1,
      "people_fully_vaccinated" = dose2
    )
    
    # download data
    x <- dplyr::bind_rows(x, get_paginated_data(filters, structure))
    
  }
  
  # clean
  x <- x[!duplicated(x[,c("date","code")]),]
  
  # date
  x$date <- as.Date(x$date)
  
  return(x) 
}


================================================
FILE: R/ds_gv.at.R
================================================
#' Federal Ministry of Social Affairs, Health, Care and Consumer Protection, Austria
#'
#' Data source for: Austria
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#'
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - recovered
#'
#' @source https://www.data.gv.at/covid-19/
#'
#' @keywords internal
#'
gv.at <- function(level){
  if(!level %in% 1:3) return(NULL)
  
  if(level==1 | level==2){

    # see https://www.data.gv.at/katalog/dataset/846448a5-a26e-4297-ac08-ad7040af20f1
    url.hosp <- "https://covid19-dashboard.ages.at/data/Hospitalisierung.csv"    
    
    # see https://www.data.gv.at/katalog/dataset/ef8e980b-9644-45d8-b0e9-c6aaf0eff0c0
    url.cases <- "https://covid19-dashboard.ages.at/data/CovidFaelle_Timeline.csv"
    
    # see https://www.data.gv.at/katalog/dataset/85d040af-e09a-4401-8d67-8cee3e41fcaa
    url.vacc <- "https://info.gesundheitsministerium.gv.at/data/COVID19_vaccination_doses_timeline_v202206.csv"
    
    # import hosp
    x.hosp <- read.csv(url.hosp, sep = ";")
    x.hosp <- map_data(x.hosp, c(
      "Meldedatum"   = "date",
      "Bundesland"   = "state",
      "BundeslandID" = "state_id",
      "TestGesamt"   = "tests",
      "NormalBettenBelCovid19"   = "hosp",
      "IntensivBettenBelCovid19" = "icu"
    )) 
    
    # import cases
    x.cases <- read.csv(url.cases, sep = ";")
    x.cases <- map_data(x.cases, c(
      "Time"             = "date",
      "Bundesland"       = "state",
      "BundeslandID"     = "state_id",
      "AnzEinwohner"     = "population",
      "AnzahlFaelleSum"  = "confirmed",
      "AnzahlGeheiltSum" = "recovered",
      "AnzahlTotSum"     = 'deaths'
    ))
    
    # import vaccines
    x.vacc <- read.csv(url.vacc, sep = ";", fileEncoding = "UTF-8-BOM")
    x.vacc <- map_data(x.vacc, c(
      "date" = "date",
      "state_id" = "state_id",
      "vaccine" = "type",
      "dose_number" = "dose",
      "doses_administered_cumulative" = "n"
    ))
    
    # format date
    x.hosp$date <- as.Date(x.hosp$date, format = "%d.%m.%Y")
    x.cases$date <- as.Date(x.cases$date, format = "%d.%m.%Y")
    x.vacc$date <- as.Date(x.vacc$date, format = "%Y-%m-%d")
    
    # first, second, and total doses by state
    x.vacc <- x.vacc %>%
      filter(state_id != 0) %>%
      group_by(date, state_id) %>%
      summarise(
        vaccines = sum(n),
        people_vaccinated = sum(n[dose == "1"]),
        people_fully_vaccinated = sum(n[dose == "2" | (dose == "1" & type == "Janssen")])) %>%
      mutate(people_fully_vaccinated = pmin(people_fully_vaccinated, people_vaccinated))
    
    if(level==1){
      
      # national level data
      x.cases <- x.cases[which(x.cases$state_id==10),]
      x.hosp  <- x.hosp[which(x.hosp$state_id==10),]
      x.vacc  <- x.vacc[which(x.vacc$state_id==10),]
      
      # merge
      x <- x.cases %>%
        full_join(x.hosp, by = "date") %>%
        full_join(x.vacc, by = "date")
      
    }
    
    if(level == 2){
      
      # drop national level data
      x.cases <- x.cases[-which(x.cases$state_id==10),]
      x.hosp  <- x.hosp[-which(x.hosp$state_id==10),]
      x.vacc  <- x.vacc[-which(x.vacc$state_id==10),]
      
      # merge
      x <- x.cases %>%
        full_join(x.hosp, by = c("date","state_id")) %>%
        full_join(x.vacc, by = c("date","state_id"))
      
    }
    
  }
  
  if(level == 3){
    
    # see https://www.data.gv.at/katalog/dataset/4b71eb3d-7d55-4967-b80d-91a3f220b60c
    url <- "https://covid19-dashboard.ages.at/data/CovidFaelle_Timeline_GKZ.csv"
    
    # download
    x <- read.csv(url, sep = ";")
    
    # format
    x <- map_data(x, c(
      "Time"             = "date",
      "Bezirk"           = "city",
      "GKZ"              = "city_id",
      "AnzEinwohner"     = "population",
      "AnzahlFaelleSum"  = "confirmed",
      "AnzahlGeheiltSum" = "recovered",
      "AnzahlTotSum"     = 'deaths'
    ))
    
    # convert date
    x$date <- as.Date(x$date, format = "%d.%m.%Y")
    
  }
  
  return(x)
}


================================================
FILE: R/ds_healthdata.gov.R
================================================
#' U.S. Department of Health & Human Services
#'
#' Data source for: United States
#'
#' @param level 2
#'
#' @section Level 2:
#' - tests
#' - hospitalizations
#' - intensive care
#'
#' @source https://healthdata.gov/browse?tags=hhs+covid-19
#'
#' @keywords internal
#'
healthdata.gov <- function(level){
  if(level!=2) return(NULL)
  
  # hospitalizations
  # see https://healthdata.gov/d/g62h-syeh
  url <- "https://healthdata.gov/api/views/g62h-syeh/rows.csv?accessType=DOWNLOAD"
  x.hosp <- read.csv(url)
  
  # format
  x.hosp <- map_data(x.hosp, c(
    "state" = "state",
    "date" = "date",
    "total_adult_patients_hospitalized_confirmed_and_suspected_covid" = "hosp",
    "staffed_icu_adult_patients_confirmed_and_suspected_covid" = "icu"
  ))
  
  # tests
  # see https://healthdata.gov/d/j8mb-icvb
  url <- "https://healthdata.gov/api/views/j8mb-icvb/rows.csv?accessType=DOWNLOAD"
  x.tests <- read.csv(url)
  
  # format
  x.tests <- map_data(x.tests, c(
    "state" = "state",
    "date" = "date",
    "total_results_reported" = "tests"
  ))
  
  # compute total tests
  x.tests <- x.tests %>% 
    group_by(date, state) %>%
    summarise(tests = sum(tests))
  
  # merge
  x <- full_join(x.tests, x.hosp, by = c("state", "date"))
  
  # drop Marshall Islands
  x <- x[which(x$state != "MH"),]
  
  # convert date
  x$date <- as.Date(x$date)
  
  return(x)
}


================================================
FILE: R/ds_humdata.af.R
================================================
#' Afghanistan Ministry of Health
#'
#' Data source for: Afghanistan
#'
#' @param level 2
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#'
#' @source https://data.humdata.org/dataset/afghanistan-covid-19-statistics-per-province
#'
#' @keywords internal
#'
humdata.af <- function(level){
  if(level!=2) return(NULL)
  
  # download
  url <- "https://docs.google.com/spreadsheets/d/1F-AMEDtqK78EA6LYME2oOsWQsgJi4CT3V_G4Uo-47Rg/export?format=csv"
  x <- read.csv(url)
  
  # formatting 
  x <- map_data(x[-1,], c(
    'Province'   = 'state',
    'Date'       = 'date',
    'Cases'      = 'confirmed',
    'Deaths'     = 'deaths',
    'Recoveries' = 'recovered',
    'Tests'      = 'tests'
  ))
  
  # sanitize
  x <- x %>%
    mutate(
      # states to lower
      state = gsub("[^a-z]+province.*$", "", tolower(x$state)),
      # fix names
      state = replace(state, state=="dykundi", "daykundi"),
      state = replace(state, state=="hirat", "herat"),
      state = replace(state, state=="jawzjan", "jowzjan"),
      state = replace(state, state=="nimroz", "nimruz"),
      state = replace(state, state=="nooristan", "nuristan"),
      state = replace(state, state=="paktya", "paktia"),
      state = replace(state, state=="panjshir", "panjsher"),
      state = replace(state, grepl("^sar[^a-z].*p[ou]l$", state), "sar-e pol"),
      # convert to integers
      confirmed = as.integer(gsub("[^0-9]", "", confirmed)),
      deaths =  as.integer(gsub("[^0-9]", "", deaths)),
      recovered =  as.integer(gsub("[^0-9]", "", recovered)),
      tests =  as.integer(gsub("[^0-9]", "", tests)),
      # convert to date
      date = as.Date(date, format = "%Y-%m-%d"))
  
  # remove decreasing cumulative counts
  # the data are not clean and these issues are most likely manual entry mistakes
  x <- drop_decreasing(x, by = "state", cols = c("confirmed", "deaths", "recovered", "tests"), k = 1:7, strict = FALSE)
  
  return(x)
}


================================================
FILE: R/ds_humdata.ht.R
================================================
#' Ministry of Public Health and Population of Haiti
#'
#' Data source for: Haiti
#'
#' @param level 2
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#'
#' @source https://data.humdata.org/dataset/haiti-covid-19-subnational-cases
#'
#' @keywords internal
#'
humdata.ht <- function(level){
  if(level!=2) return(NULL)

  # download
  url <- "https://proxy.hxlstandard.org/data/738954/download/haiti-covid-19-subnational-data.csv"
  x   <- read.csv(url)
  
  # formatting 
  x <- map_data(x[-1,], c(
    'Département'       = 'state',
    'Date'              = 'date',
    'Cumulative.cases'  = 'confirmed',
    'Cumulative.Deaths' = 'deaths'
  ))
  
  # sanitize
  x <- x %>%
    mutate(
      # states to lower
      state = tolower(state),
      # fix names
      state = replace(state, state=="grand anse", "grandanse"),
      state = replace(state, state=="quest", "ouest"),
      # convert to integers
      confirmed = as.integer(gsub(",", "", confirmed)),
      deaths =  as.integer(gsub(",", "", deaths)),
      # convert to date
      date = as.Date(date, format = "%d-%m-%Y")) %>%
    # drop duplicates
    distinct(state, date, .keep_all = TRUE)
  
  return(x)
}


================================================
FILE: R/ds_impfdashboard.de.R
================================================
#' Robert Koch Institute and the Federal Ministry of Health
#'
#' Data source for: Germany
#'
#' @param level 1
#'
#' @section Level 1:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://impfdashboard.de/en/data
#'
#' @keywords internal
#'
impfdashboard.de <- function(level){
  if(level!=1) return(NULL)
  
  # download
  url <- "https://impfdashboard.de/static/data/germany_vaccinations_timeseries_v2.tsv"
  x <- read.csv(url, sep = "\t")
  
  # format
  x <- map_data(x, c(
    "date" = "date",
    "dosen_kumulativ" = "vaccines",
    "personen_erst_kumulativ" = "people_vaccinated",
    "personen_voll_kumulativ" = "people_fully_vaccinated"
  ))
  
  # convert date
  x$date <- as.Date(x$date)
  
  return(x) 
}


================================================
FILE: R/ds_isciii.es.R
================================================
#' Centro Nacional de Epidemiología
#'
#' Data source for: Spain
#'
#' @param level 2, 3
#'
#' @section Level 2:
#' - confirmed cases
#'
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - hospitalizations
#' - intensive care
#'
#' @source https://cnecovid.isciii.es/covid19/#documentaci%C3%B3n-y-datos
#'
#' @keywords internal
#'
isciii.es <- function(level) {
  if(!level %in% 2:3) return(NULL)
  
  if(level==2){
    
    # download cases
    url <- 'https://cnecovid.isciii.es/covid19/resources/casos_diag_ccaadecl.csv'
    x   <- read.csv(url)
    
    # format cases
    x <- map_data(x, c(
      "fecha"     = "date",
      "ccaa_iso"  = "state",
      "num_casos" = "confirmed")) 
    
    # cases
    x <- x %>%
      group_by(state) %>%
      arrange(date) %>%
      mutate(confirmed = cumsum(confirmed))
    
    # date
    x$date <- as.Date(x$date)
    
  }
  
  if(level==3) {
    
    # download cases
    url <- 'https://cnecovid.isciii.es/covid19/resources/casos_hosp_uci_def_sexo_edad_provres.csv'
    x <- read.csv(url)
    
    # format cases
    x <- map_data(x, c(
      "fecha"          = "date",
      "provincia_iso"  = "district",
      "num_casos"      = "confirmed",
      "num_hosp"       = "hosp",
      "num_uci"        = "icu",
      "num_def"        = "deaths")) 
    
    # cases
    x <- x %>%
      # remove unassigned
      filter(district!="NC") %>%
      # for each date and district
      group_by(date, district) %>%
      # compute total counts
      summarise(
        confirmed = sum(confirmed),
        hosp = sum(hosp),
        icu = sum(icu),
        deaths = sum(deaths)) %>%
      # group by district
      group_by(district) %>%
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(
        confirmed = cumsum(confirmed),
        deaths = cumsum(deaths))
    
    # date
    x$date <- as.Date(x$date)
    
  }
  
  return(x)
}


================================================
FILE: R/ds_koronavirus.hr.R
================================================
#' Croatian Institute of Public Health
#'
#' Data source for: Croatia
#'
#' @param level 1, 2
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#'
#' @source https://www.koronavirus.hr/otvoreni-strojno-citljivi-podaci/526
#'
#' @keywords internal
#'
koronavirus.hr <- function(level) {
  if(!level %in% 1:2) return(NULL)
  
  # download
  url <- 'https://www.koronavirus.hr/json/?action=po_danima_zupanijama'
  x   <- jsonlite::fromJSON(url, flatten=TRUE)
  
  # make longer along regions (zupanija)
  x <- bind_rows(apply(x, 1, function(row) {
    cbind(date = row$Datum, row$PodaciDetaljno)
  }))

  # format
  x <- map_data(x, c(
    "date"           = "date",
    "broj_zarazenih" = "confirmed",
    "broj_umrlih"    = "deaths",
    "broj_aktivni"   = "active",
    "Zupanija"       = "region"
  ))
  
  # recovered
  x <- x %>%
    mutate(recovered = confirmed - deaths - active)
  
  if(level==1){

    # compute total counts    
    x <- x %>%
      group_by(date) %>%
      summarise(across(c("confirmed", "deaths", "recovered"), sum))
    
  }
  
  if(level==2){

    # sanitize regions
    x$region <- trimws(x$region)
    
  }
  
  # date
  x$date <- as.Date(x$date, "%Y-%m-%d %H:%M")
  
  return(x)
}


================================================
FILE: R/ds_mzcr.cz.R
================================================
#' Ministry of Health of the Czech Republic
#'
#' Data source for: Czech Republic
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#' 
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @section Level 3:
#' - confirmed cases
#' - deaths
#' - recovered
#' - tests
#'
#' @source https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19
#'
#' @keywords internal
#'
mzcr.cz <- function(level){
  if(!level %in% 1:3) return(NULL)
  
  # vaccines
  url.vacc <- "https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/ockovani.csv"
  x.vacc <- read.csv(url.vacc, fileEncoding = "UTF-8-BOM", encoding = "Latin1")
  
  # format
  x.vacc <- map_data(x.vacc, c(
    "datum" = "date",
    "vakcina" = "type",
    "kraj_nuts_kod" = "nuts",
    "prvnich_davek" = "first",
    "druhych_davek" = "second"
  ))
  
  # compute total doses and people vaccinated  
  x.vacc <- x.vacc %>%
    mutate(
      is_oneshot = type=="COVID-19 Vaccine Janssen",
      vaccines = first + second,
      people_vaccinated = first,
      people_fully_vaccinated = first*is_oneshot + second*(!is_oneshot))
  
  if(level==1){
    
    # confirmed, recovered, deaths, tests
    url.cases <- "https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/nakazeni-vyleceni-umrti-testy.csv"
    x.cases <- read.csv(url.cases, fileEncoding = "UTF-8-BOM")
    
    # format
    x.cases <- map_data(x.cases, c(
      "datum" = "date",
      "kumulativni_pocet_nakazenych" = "confirmed",
      "kumulativni_pocet_vylecenych" = "recovered",
      "kumulativni_pocet_umrti" = "deaths",
      "kumulativni_pocet_testu" = "pcr",
      "kumulativni_pocet_ag_testu" = "antigen"
    ))
    
    # compute total tests
    x.cases$tests <- x.cases$pcr + x.cases$antigen
    
    # hospitalizations
    url.hosp <- "https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/hospitalizace.csv"
    x.hosp <- read.csv(url.hosp, fileEncoding = "UTF-8-BOM")
    x.hosp <- map_data(x.hosp, c(
      "datum" = "date",
      "pocet_hosp" = "hosp",
      "jip" = "icu",
      "upv" = "vent"
    ))
    
    # compute cumulative vaccination data
    x.vacc <- x.vacc %>%
      # for each date
      group_by(date) %>%
      # compute total counts
      summarise(
        vaccines = sum(vaccines),
        people_vaccinated = sum(people_vaccinated),
        people_fully_vaccinated = sum(people_fully_vaccinated)) %>%
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(
        vaccines = cumsum(vaccines),
        people_vaccinated = cumsum(people_vaccinated),
        people_fully_vaccinated = cumsum(people_fully_vaccinated))
    
    # merge
    by <- "date"
    x <- x.cases %>%
      full_join(x.hosp, by = by) %>%
      full_join(x.vacc, by = by)
    
  }
  
  if(level==2 | level==3){
    
    # confirmed, recovered, deaths
    url.cases <- "https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/kraj-okres-nakazeni-vyleceni-umrti.csv"
    x.cases <- read.csv(url.cases, fileEncoding = "UTF-8-BOM")
    
    # format
    x.cases <- map_data(x.cases, c(
      "datum" = "date",
      "kraj_nuts_kod" = "nuts",
      "okres_lau_kod" = "lau",
      "kumulativni_pocet_nakazenych" = "confirmed",
      "kumulativni_pocet_vylecenych" = "recovered",
      "kumulativni_pocet_umrti" = "deaths"
    ))
    
    # tests
    url.tests <- "https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/kraj-okres-testy.csv"
    x.tests <- read.csv(url.tests, fileEncoding = "UTF-8-BOM")  
    
    # format
    x.tests <- map_data(x.tests, c(
      "datum" = "date",
      "kraj_nuts_kod" = "nuts",
      "okres_lau_kod" = "lau",
      "kumulativni_pocet_testu_okres" = "tests_lau",
      "kumulativni_pocet_testu_kraj" = "tests_nuts"
    ))
    
    if(level==2){
      
      # compute cases by state
      x.cases <- x.cases %>%
        filter(!is.na(nuts)) %>%
        group_by(nuts, date) %>%
        summarise(
          confirmed = sum(confirmed),
          recovered = sum(recovered),
          deaths    = sum(deaths))
      
      # compute tests by state
      x.tests <- x.tests %>%
        filter(!is.na(nuts)) %>%
        group_by(nuts, date) %>%
        summarise(tests = median(tests_nuts))
      
      # compute vaccines by state
      x.vacc <- x.vacc %>%
        # for each date and state
        group_by(date, nuts) %>%
        # compute total counts
        summarise(
          vaccines = sum(vaccines),
          people_vaccinated = sum(people_vaccinated),
          people_fully_vaccinated = sum(people_fully_vaccinated)) %>%
        # group by state
        group_by(nuts) %>%
        # sort by date
        arrange(date) %>%
        # cumulate
        mutate(
          vaccines = cumsum(vaccines),
          people_vaccinated = cumsum(people_vaccinated),
          people_fully_vaccinated = cumsum(people_fully_vaccinated))
      
      # merge
      by <- c("date", "nuts")
      x <- x.cases %>%
        full_join(x.tests, by = by) %>%
        full_join(x.vacc, by = by)
      
    }
    
    if(level==3){
      
      # filter cases by lau
      x.cases <- x.cases %>%
        filter(!is.na(lau)) 
      
      # filter tests by lau
      x.tests <- x.tests %>%
        filter(!is.na(lau)) %>%
        mutate(tests = tests_lau)
      
      # merge
      x <- full_join(x.cases, x.tests, by = c("date", "lau")) 
      
    }
    
  }
  
  # convert date
  x$date <- as.Date(x$date)
  
  return(x)
}


================================================
FILE: R/ds_ourworldindata.org.R
================================================
#' Our World in Data
#'
#' Data source for: Worldwide
#'
#' @param level 1, 2
#' @param id filter by ISO code if level=1 or by name of state if level=2
#'
#' @section Level 1:
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#'
#' @section Level 2:
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://covid.ourworldindata.org
#'
#' @keywords internal
#'
ourworldindata.org <- function(level = 1, id = NULL){
  if(!level %in% 1:2) return(NULL)
  
  if(level==1){
    
    # download
    url <- "https://covid.ourworldindata.org/data/owid-covid-data.csv"
    x   <- read.csv(url, cache = TRUE)
    
    # keep only countries 
    x <- x[!is.na(x$iso_code),]
    
    # filter by id 
    if(!is.null(id))
      x <- x[x$iso_code %in% id,]
    
    # formatting
    x <- map_data(x, c(
      'date',
      'iso_code'                = 'iso_alpha_3',
      'location'                = 'country',
      'total_tests'             = 'tests',
      'total_vaccinations'      = 'vaccines',
      'people_vaccinated'       = 'people_vaccinated',
      'people_fully_vaccinated' = 'people_fully_vaccinated',
      'icu_patients'            = 'icu',
      'hosp_patients'           = 'hosp'
    ))
    
  }
  
  if(level==2){
    
    # download
    url <- "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/us_state_vaccinations.csv"
    x   <- read.csv(url)
    
    # formatting
    x <- map_data(x, c(
      'date',
      'location'                = 'state',
      'total_vaccinations'      = 'vaccines',
      'people_vaccinated'       = 'people_vaccinated',
      'people_fully_vaccinated' = 'people_fully_vaccinated'
    ))
    
    # filter by id
    if(!is.null(id)){
      x <- x[which(x$state %in% id),]
    }
    # drop states that should not be in level 2
    else{
      x <- x[which(!x$state %in% c(
        "Bureau of Prisons", "Dept of Defense", "Federated States of Micronesia", 
        "Indian Health Svc", "Long Term Care", "Marshall Islands", "Republic of Palau", 
        "United States", "Veterans Health")),]
    }
    
  }
  
  # date
  x$date <- as.Date(x$date)
  
  return(x)
}


================================================
FILE: R/ds_rivm.nl.R
================================================
#' National Institute for Public Health and the Environment
#'
#' Data source for: Netherlands
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#'
#' @section Level 3:
#' - confirmed cases
#' - deaths
#'
#' @source https://data.rivm.nl/covid-19/
#'
#' @keywords internal
#'
rivm.nl <- function(level) {
  if(!level %in% 1:3) return(NULL)

  # download
  url <- "https://data.rivm.nl/covid-19/COVID-19_aantallen_gemeente_per_dag.csv"
  x   <- read.csv(url, sep = ";")
  
  # format
  x <- map_data(x, c(
    "Date_of_publication" = "date",
    "Municipality_code"   = "municipality_code",
    "Municipality_name"   = "municipality",
    "Province"            = "province",
    "Total_reported"      = "confirmed",
    "Deceased"            = "deaths")) 
  
  # sanitize
  x$date <- as.Date(x$date)
  x$province <- trimws(x$province)
  x$municipality <- trimws(x$municipality)
  
  # group by
  if(level == 1){
    by <- c("date") 
  }
  if(level == 2){
    by <- c("date", "province")
    x <- x[!is.na(x$province),]
  }
  if(level == 3){
    by <- c("date", "province", "municipality")
    x <- x[!is.na(x$province) & !is.na(x$municipality),]
  }
  
  # aggregate
  x <- x %>% 
    # for each date and area
    dplyr::group_by_at(by) %>%
    # compute total counts
    dplyr::summarise(
      confirmed = sum(confirmed),
      deaths    = sum(deaths)) %>%
    # group by area
    dplyr::group_by_at(by[-1]) %>%
    # sort by date
    dplyr::arrange(date) %>%
    # cumulate
    dplyr::mutate(
      confirmed = cumsum(confirmed),
      deaths    = cumsum(deaths))
  
  return(x)
}


================================================
FILE: R/ds_sciensano.be.R
================================================
#' Sciensano, the Belgian Institute for Health
#'
#' Data source for: Belgium
#'
#' @param level 1, 2, 3
#'
#' @section Level 1:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#' 
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @section Level 3:
#' - confirmed cases
#' - tests
#' - total vaccine doses administered
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#' - intensive care
#' - patients requiring ventilation
#'
#' @source https://epistat.wiv-isp.be/covid/
#'
#' @keywords internal
#'
sciensano.be <- function(level){
  if(!level %in% 1:3) return(NULL)
  
  # download
  url <- "https://epistat.sciensano.be/Data/COVID19BE.xlsx"
  x   <- read.excel(url, sheet = c(
    "CASES_AGESEX", "CASES_MUNI_CUM", "HOSP", "MORT", "TESTS",         
    "VACC", "VACC_MUNI_CUM_1", "VACC_MUNI_CUM_2", "VACC_MUNI_CUM_3"       
  ))  
  
  # convert date in all the excel sheets
  x <- lapply(x, function(x){
    if("DATE" %in% colnames(x)){
      x$date <- as.Date(x$DATE)
      x$DATE <- NULL  
      x <- x %>% 
        filter(!is.na(date)) %>% 
        arrange(date)
    }
    return(x)
  })
  
  if(level==1){
    
    # confirmed
    confirmed <- x$CASES_AGESEX %>% 
      group_by(date) %>% 
      summarise(confirmed = sum(CASES)) %>%
      arrange(date) %>%
      mutate(confirmed = cumsum(confirmed))
    
    # hosp
    hosp <- x$HOSP %>%
      group_by(date) %>%
      summarise(
        hosp = sum(TOTAL_IN),
        icu  = sum(TOTAL_IN_ICU),
        vent = sum(TOTAL_IN_RESP))
    
    # deaths
    deaths <- x$MORT %>%
      group_by(date) %>%
      summarise(deaths = sum(DEATHS)) %>%
      arrange(date) %>%
      mutate(deaths = cumsum(deaths))
    
    # tests 
    tests <- x$TESTS %>%
      group_by(date) %>%
      summarise(tests = sum(TESTS_ALL)) %>%
      arrange(date) %>%
      mutate(tests = cumsum(tests))
    
    # Sciensano uses the following codes for vaccines. 
    # - For vaccines requiring 2 doses: A for first dose, B for second dose; 
    # - C for vaccine requiring only 1 dose 
    # - E for extra dose of vaccine administered since the 9th of September 2021
    # We use A+C to compute people_vaccinated and B+C to compute people_fully_vaccinated.
    # See https://epistat.sciensano.be/COVID19BE_codebook.pdf
    vaccines <- x$VACC %>%
      group_by(date) %>%
      summarise(
        vaccines = sum(COUNT),
        people_vaccinated = sum(COUNT[DOSE %in% c("A", "C")]),
        people_fully_vaccinated = sum(COUNT[DOSE %in% c("B", "C")])) %>%
      arrange(date) %>%
      mutate(
        vaccines = cumsum(vaccines),
        people_vaccinated = cumsum(people_vaccinated),
        people_fully_vaccinated = cumsum(people_fully_vaccinated))
    
    # merge
    by <- "date"
    x <- confirmed %>%
      full_join(hosp, by = by) %>%
      full_join(deaths, by = by) %>%
      full_join(tests, by = by) %>%
      full_join(vaccines, by = by)
    
  }
  if(level==2){
    
    # confirmed
    confirmed <- x$CASES_AGESEX %>%
      filter(!is.na(REGION)) %>%
      group_by(date, REGION) %>% 
      summarise(confirmed = sum(CASES)) %>%
      group_by(REGION) %>% 
      arrange(date) %>%
      mutate(confirmed = cumsum(confirmed))
    
    # hosp
    hosp <- x$HOSP %>%
      filter(!is.na(REGION)) %>%
      group_by(date, REGION) %>%
      summarise(
        hosp = sum(TOTAL_IN),
        icu  = sum(TOTAL_IN_ICU),
        vent = sum(TOTAL_IN_RESP))
    
    # deaths
    deaths <- x$MORT %>%
      filter(!is.na(REGION)) %>%
      group_by(date, REGION) %>%
      summarise(deaths = sum(DEATHS)) %>%
      group_by(REGION) %>% 
      arrange(date) %>%
      mutate(deaths = cumsum(deaths))
    
    # tests
    tests <- x$TESTS %>%
      filter(!is.na(REGION)) %>%
      group_by(date, REGION) %>%
      summarise(tests = sum(TESTS_ALL)) %>%
      group_by(REGION) %>% 
      arrange(date) %>%
      mutate(tests = cumsum(tests))
    
    # Sciensano uses the following codes for vaccines. 
    # - For vaccines requiring 2 doses: A for first dose, B for second dose; 
    # - C for vaccine requiring only 1 dose 
    # - E for extra dose of vaccine administered since the 9th of September 2021
    # We use A+C to compute people_vaccinated and B+C to compute people_fully_vaccinated.
    # See https://epistat.sciensano.be/COVID19BE_codebook.pdf
    vaccines <- x$VACC %>%
      filter(!is.na(REGION)) %>%
      group_by(date, REGION) %>%
      summarise(
        vaccines = sum(COUNT),
        people_vaccinated = sum(COUNT[DOSE %in% c("A", "C")]),
        people_fully_vaccinated = sum(COUNT[DOSE %in% c("B", "C")])) %>%
      group_by(REGION) %>% 
      arrange(date) %>%
      mutate(
        vaccines = cumsum(vaccines),
        people_vaccinated = cumsum(people_vaccinated),
        people_fully_vaccinated = cumsum(people_fully_vaccinated))
    
    # merge
    by <- c("date", "REGION")
    x <- confirmed %>%
      full_join(hosp, by = by) %>%
      full_join(deaths, by = by) %>%
      full_join(tests, by = by) %>%
      full_join(vaccines, by = by)
    
  }
  
  if(level==3){
    
    # confirmed
    confirmed <- x$CASES_AGESEX %>%
      filter(!is.na(REGION) & !is.na(PROVINCE)) %>%
      group_by(date, REGION, PROVINCE) %>% 
      summarise(confirmed = sum(CASES)) %>%
      group_by(REGION, PROVINCE) %>% 
      arrange(date) %>%
      mutate(confirmed = cumsum(confirmed))
    
    # hosp
    hosp <- x$HOSP %>%
      filter(!is.na(REGION) & !is.na(PROVINCE)) %>%
      group_by(date, REGION, PROVINCE) %>%
      summarise(
        hosp = sum(TOTAL_IN),
        icu  = sum(TOTAL_IN_ICU),
        vent = sum(TOTAL_IN_RESP))
    
    # tests
    tests <- x$TESTS %>%
      filter(!is.na(REGION) & !is.na(PROVINCE)) %>%
      group_by(date, REGION, PROVINCE) %>%
      summarise(tests = sum(TESTS_ALL)) %>%
      group_by(REGION, PROVINCE) %>% 
      arrange(date) %>%
      mutate(tests = cumsum(tests))
    
    # Sciensano uses the following codes for vaccines. 
    # - For vaccines requiring 2 doses: A for first dose, B for second dose; 
    # - C for vaccine requiring only 1 dose 
    # - E for extra dose of vaccine administered since the 9th of September 2021
    # We use A+C to compute people_vaccinated and B+C to compute people_fully_vaccinated.
    # See https://epistat.sciensano.be/COVID19BE_codebook.pdf
    vaccines <- bind_rows(x$VACC_MUNI_CUM_1, x$VACC_MUNI_CUM_2, x$VACC_MUNI_CUM_3) %>%
      filter(CUMUL!="<10") %>%
      mutate(
        NIS5 = as.character(NIS5),
        CUMUL = as.integer(CUMUL),
        # convert epidemiological week to date. 
        # the vaccines are reported at the end of the week on Sundays.
        YEAR = as.integer(paste0("20", substr(YEAR_WEEK, 0, 2))),
        WEEK = as.integer(substr(YEAR_WEEK, 4, 6)),
        date = MMWRweek::MMWRweek2Date(YEAR, WEEK)+7) %>%
      left_join(x$CASES_MUNI_CUM, by = "NIS5") %>%
      filter(!is.na(REGION) & !is.na(PROVINCE)) %>%
      group_by(date, REGION, PROVINCE) %>%
      summarise(
        vaccines = sum(CUMUL),
        people_vaccinated = sum(CUMUL[DOSE %in% c("A", "C")]),
        people_fully_vaccinated = sum(CUMUL[DOSE %in% c("B", "C")]))
    
    # merge
    by <- c("date", "REGION", "PROVINCE")
    x <- confirmed %>%
      full_join(hosp, by = by) %>%
      full_join(tests, by = by) %>%
      full_join(vaccines, by = by)
    
  }
  
  return(x)
}


================================================
FILE: R/ds_ssi.dk.R
================================================
#' Statens Serum Institut
#'
#' Data source for: Denmark
#'
#' @param level 2, 3
#'
#' @section Level 2:
#' - confirmed cases
#' - deaths
#' - tests
#' - people with at least one vaccine dose
#' - people fully vaccinated
#' - hospitalizations
#'
#' @section Level 3:
#' - confirmed cases
#' - tests
#' - people with at least one vaccine dose
#' - people fully vaccinated
#'
#' @source https://covid19.ssi.dk/overvagningsdata
#'
#' @keywords internal
#'
ssi.dk <- function(level){
  if(!level %in% 2:3) return(NULL)
  
  # function to scrape the url of the latest data file
  get_url <- function(webpage, baseurl){
    html <- httr::GET(webpage)
    data <- httr::content(html)
    data <- as.character(data)
    pattern <- paste0(baseurl, "-\\d{8}[-\\_a-z0-9]*")
    m <- regexpr(pattern, data, ignore.case = TRUE)
    regmatches(data, m)
  }
  
  # download cases
  webpage <- "https://covid19.ssi.dk/overvagningsdata/download-fil-med-overvaagningdata"
  baseurl <- "https://files.ssi.dk/covid19/overvagning/dashboard/overvaagningsdata-dashboard-covid19"
  zip.cases <- tempfile()
  url <- get_url(webpage = webpage, baseurl = baseurl)
  download.file(url, zip.cases, quiet = TRUE, mode = "wb")  

  # download vaccines  
  webpage <- "https://covid19.ssi.dk/overvagningsdata/download-fil-med-vaccinationsdata"
  baseurl <- "https://files.ssi.dk/covid19/vaccinationsdata/zipfil/vaccinationsdata-dashboard-covid19"
  zip.vacc <- tempfile()
  url <- get_url(webpage = webpage, baseurl = baseurl)
  download.file(url, zip.vacc, quiet = TRUE, mode = "wb")  
  
  # temp dir to unzip
  dir <- tempdir()
  
  if(level==2){
    
    # read cases and deaths
    file <- "Regionalt_DB/03_bekraeftede_tilfaelde_doede_indlagte_pr_dag_pr_koen.csv"
    file <- unzip(zip.cases, files = file, exdir = dir)
    x.cases <- read.csv(file, sep = ";", fileEncoding = "Latin1", encoding = "ANSI")

    # format cases
    x.cases <- map_data(x.cases, c(
      "Region" = "region",
      "Prøvetagningsdato" = "date",
      "Døde" = "deaths",
      "Bekræftede.tilfælde" = "confirmed",
      "Bekræftede.tilfælde.i.alt" = "confirmed"
    ))
    
    # cases
    x.cases <- x.cases %>%
      # for each region and date
      group_by(region, date) %>%
      # compute total counts
      summarise(
        deaths = sum(deaths),
        confirmed = sum(confirmed)) %>%
      # group by region
      group_by(region) %>%
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(
        deaths = cumsum(deaths),
        confirmed = cumsum(confirmed))
    
    # read hosp
    file <- "Regionalt_DB/15_indlagte_pr_region_pr_dag.csv"
    file <- unzip(zip.cases, files = file, exdir = dir)
    x.hosp <- read.csv(file, sep = ";", fileEncoding = "Latin1", encoding = "ANSI")
    
    # format hosp
    x.hosp <- map_data(x.hosp, c(
      "Region" = "region",
      "Dato" = "date",
      "Indlagte" = "hosp"
    ))
    # drop duplicated entry: Syddanmark 2021-10-26
    x.hosp <- x.hosp[!duplicated(x.hosp),]
     
    # read tests
    file <- "Regionalt_DB/16_pcr_og_antigen_test_pr_region.csv"
    file <- unzip(zip.cases, files = file, exdir = dir)
    x.tests <- read.csv(file, sep = ";", fileEncoding = "Latin1", encoding = "ANSI")
    
    # format tests
    x.tests <- map_data(x.tests, c(
      "Uge" = "epiweek",
      "Region" = "region",
      "Metode" = "type",
      "Prøver" = "tests"
    ))
  
    # tests
    x.tests <- x.tests %>%
      # drop missing region and keep only PCR tests
      # only PCR tests are counted as confirmed cases
      # see https://covid19.ssi.dk/overvagningsdata/konfirmatorisk-pcr-test
      filter(!is.na(region) & type=="PCR") %>%
      # convert epiweek to date
      mutate(
        YEAR = as.integer(substr(epiweek, 0, 4)),
        WEEK = as.integer(substr(epiweek, 7, 9)),
        date = as.character(MMWRweek::MMWRweek2Date(YEAR, WEEK)+7)) %>%
      # for each date and region
      group_by(date, region) %>%
      # compute total counts
      summarize(tests = sum(tests)) %>%
      # group by region
      group_by(region) %>%
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(tests = cumsum(tests))
      
    # read people vaccinated
    file <- "Vaccine_DB/Vaccine_dato_region.csv"
    file <- unzip(zip.vacc, files = file, exdir = dir)
    x.vacc <- read.csv(file, sep = ";", fileEncoding = "Latin1", encoding = "ANSI")
    
    # format people vaccinated
    x.vacc <- map_data(x.vacc, c(
      "Dato" = "date",
      "Region" = "region",
      "Samlet.antal.1..stik" = "people_vaccinated",
      "Samlet.antal.2..stik" = "people_fully_vaccinated"
    ))
    
    # merge
    by <- c("region", "date")
    x <- x.cases %>%
      full_join(x.hosp, by = by) %>%
      full_join(x.tests, by = by) %>%
      full_join(x.vacc, by = by)

  }
  
  if(level==3){
    
    # read cases
    file <- "Kommunalt_DB/07_bekraeftede_tilfaelde_pr_dag_pr_kommune.csv"
    file <- unzip(zip.cases, files = file, exdir = dir)
    x.cases <- read.csv(file, sep = ";", fileEncoding = "Latin1", encoding = "ANSI")
    
    # format cases
    x.cases <- map_data(x.cases, c(
      "Kommune" = "code",
      "Kommunenavn" = "name",
      "Dato" = "date",
      "Bekræftede.tilfælde" = "confirmed",
      "Bekræftede.tilfælde.i.alt" = "confirmed"
    )) 
    
    # cases
    x.cases <- x.cases %>%
      filter(!is.na(code)) %>%
      group_by(code) %>%
      arrange(date) %>%
      mutate(confirmed = cumsum(confirmed))
    
    # read tests
    file <- "Kommunalt_DB/12_pcr_og_antigen_test_pr_kommune.csv"
    file <- unzip(zip.cases, files = file, exdir = dir)
    x.tests <- read.csv(file, sep = ";", fileEncoding = "Latin1", encoding = "ANSI")
    
    # format tests
    x.tests <- map_data(x.tests, c(
      "Uge" = "epiweek",
      "Kommune.kode" = "code",
      "Kommunenavn" = "name",
      "Metode" = "type",
      "Prøver" = "tests"
    ))
    
    # map
    idx <- which(!duplicated(x.tests$code))
    map <- x.tests$code[idx]
    names(map) <- x.tests$name[idx]
    
    # tests
    x.tests <- x.tests %>%
      # drop missing municipality and keep only PCR tests
      # only PCR tests are counted as confirmed cases
      # see https://covid19.ssi.dk/overvagningsdata/konfirmatorisk-pcr-test
      filter(!is.na(code) & !is.na(name) & type=="PCR") %>%
      # convert epiweek to date
      mutate(
        YEAR = as.integer(substr(epiweek, 0, 4)),
        WEEK = as.integer(substr(epiweek, 7, 9)),
        date = as.character(MMWRweek::MMWRweek2Date(YEAR, WEEK)+7)) %>%
      # for each date and municipality
      group_by(date, code) %>%
      # compute total counts
      summarize(tests = sum(tests)) %>%
      # group by municipality
      group_by(code) %>%
      # sort by date
      arrange(date) %>%
      # cumulate
      mutate(tests = cumsum(tests))
    
    # read people vaccinated
    file <- "Vaccine_DB/Vaccine_dato_kommune.csv"
    file <- unzip(zip.vacc, files = file, exdir = dir)
    x.vacc <- read.csv(file, sep = ";", fileEncoding = "Latin1", encoding = "ANSI")
    
    # format people vaccinated
    x.vacc <- map_data(x.vacc, c(
      "Dato" = "da
Download .txt
gitextract_37nyhq5j/

├── .github/
│   ├── .gitignore
│   └── workflows/
│       └── pkgdown.yaml
├── .gitignore
├── .travis.yml
├── DESCRIPTION
├── LICENSE.md
├── NAMESPACE
├── NEWS.md
├── R/
│   ├── covid19.R
│   ├── ds_admin.ch.R
│   ├── ds_arcgis.de.R
│   ├── ds_arcgis.se.R
│   ├── ds_canada.ca.R
│   ├── ds_cdc.gov.R
│   ├── ds_covid19india.org.R
│   ├── ds_covidtracking.com.R
│   ├── ds_geohive.ie.R
│   ├── ds_github.ceedsdemm.covidprodataset.R
│   ├── ds_github.covid19euzh.covid19eudata.R
│   ├── ds_github.cssegisanddata.covid19.R
│   ├── ds_github.cssegisanddata.covid19unified.R
│   ├── ds_github.dsfsi.covid19za.R
│   ├── ds_github.dssgpt.covid19ptdata.R
│   ├── ds_github.eguidotti.covid19br.R
│   ├── ds_github.italia.covid19opendatavaccini.R
│   ├── ds_github.jmcastagnetto.covid19perudata.R
│   ├── ds_github.lisphilar.covid19sir.R
│   ├── ds_github.m3it.covid19data.R
│   ├── ds_github.minciencia.datoscovid19.R
│   ├── ds_github.mpiktas.covid19lt.R
│   ├── ds_github.nytimes.covid19data.R
│   ├── ds_github.openzh.covid19.R
│   ├── ds_github.oxcgrt.covidpolicytracker.R
│   ├── ds_github.ozanerturk.covid19turkeyapi.R
│   ├── ds_github.pcmdpc.covid19.R
│   ├── ds_github.robertkochinstitut.covid19impfungenindeutschland.R
│   ├── ds_github.swsoyee.2019ncovjapan.R
│   ├── ds_github.wcota.covid19br.R
│   ├── ds_github.wcota.covid19br.vac.R
│   ├── ds_go.th.R
│   ├── ds_gob.ar.R
│   ├── ds_gob.pe.R
│   ├── ds_gouv.fr.R
│   ├── ds_gov.co.R
│   ├── ds_gov.lv.R
│   ├── ds_gov.si.R
│   ├── ds_gov.tw.R
│   ├── ds_gov.uk.R
│   ├── ds_gv.at.R
│   ├── ds_healthdata.gov.R
│   ├── ds_humdata.af.R
│   ├── ds_humdata.ht.R
│   ├── ds_impfdashboard.de.R
│   ├── ds_isciii.es.R
│   ├── ds_koronavirus.hr.R
│   ├── ds_mzcr.cz.R
│   ├── ds_ourworldindata.org.R
│   ├── ds_rivm.nl.R
│   ├── ds_sciensano.be.R
│   ├── ds_ssi.dk.R
│   ├── ds_toyokeizai.net.R
│   ├── ds_who.int.R
│   ├── ds_wikipedia.dp.R
│   ├── iso_ABW.R
│   ├── iso_AFG.R
│   ├── iso_AGO.R
│   ├── iso_AIA.R
│   ├── iso_ALB.R
│   ├── iso_AND.R
│   ├── iso_ARE.R
│   ├── iso_ARG.R
│   ├── iso_ARM.R
│   ├── iso_ASM.R
│   ├── iso_ATG.R
│   ├── iso_AUS.R
│   ├── iso_AUT.R
│   ├── iso_AZE.R
│   ├── iso_BDI.R
│   ├── iso_BEL.R
│   ├── iso_BEN.R
│   ├── iso_BES.R
│   ├── iso_BFA.R
│   ├── iso_BGD.R
│   ├── iso_BGR.R
│   ├── iso_BHR.R
│   ├── iso_BHS.R
│   ├── iso_BIH.R
│   ├── iso_BLR.R
│   ├── iso_BLZ.R
│   ├── iso_BMU.R
│   ├── iso_BOL.R
│   ├── iso_BRA.R
│   ├── iso_BRB.R
│   ├── iso_BRN.R
│   ├── iso_BTN.R
│   ├── iso_BWA.R
│   ├── iso_CAC.R
│   ├── iso_CAF.R
│   ├── iso_CAN.R
│   ├── iso_CHE.R
│   ├── iso_CHL.R
│   ├── iso_CHN.R
│   ├── iso_CIV.R
│   ├── iso_CMR.R
│   ├── iso_COD.R
│   ├── iso_COG.R
│   ├── iso_COK.R
│   ├── iso_COL.R
│   ├── iso_COM.R
│   ├── iso_CPV.R
│   ├── iso_CRI.R
│   ├── iso_CUB.R
│   ├── iso_CUW.R
│   ├── iso_CYM.R
│   ├── iso_CYP.R
│   ├── iso_CZE.R
│   ├── iso_DEU.R
│   ├── iso_DJI.R
│   ├── iso_DMA.R
│   ├── iso_DNK.R
│   ├── iso_DOM.R
│   ├── iso_DPC.R
│   ├── iso_DZA.R
│   ├── iso_ECU.R
│   ├── iso_EGY.R
│   ├── iso_ERI.R
│   ├── iso_ESP.R
│   ├── iso_EST.R
│   ├── iso_ETH.R
│   ├── iso_FIN.R
│   ├── iso_FJI.R
│   ├── iso_FLK.R
│   ├── iso_FRA.R
│   ├── iso_FRO.R
│   ├── iso_FSM.R
│   ├── iso_GAB.R
│   ├── iso_GBR.R
│   ├── iso_GEO.R
│   ├── iso_GGY.R
│   ├── iso_GHA.R
│   ├── iso_GIB.R
│   ├── iso_GIN.R
│   ├── iso_GLP.R
│   ├── iso_GMB.R
│   ├── iso_GNB.R
│   ├── iso_GNQ.R
│   ├── iso_GPC.R
│   ├── iso_GRC.R
│   ├── iso_GRD.R
│   ├── iso_GRL.R
│   ├── iso_GTM.R
│   ├── iso_GUF.R
│   ├── iso_GUM.R
│   ├── iso_GUY.R
│   ├── iso_HKG.R
│   ├── iso_HND.R
│   ├── iso_HRV.R
│   ├── iso_HTI.R
│   ├── iso_HUN.R
│   ├── iso_IDN.R
│   ├── iso_IMN.R
│   ├── iso_IND.R
│   ├── iso_IRL.R
│   ├── iso_IRN.R
│   ├── iso_IRQ.R
│   ├── iso_ISL.R
│   ├── iso_ISR.R
│   ├── iso_ITA.R
│   ├── iso_JAM.R
│   ├── iso_JEY.R
│   ├── iso_JOR.R
│   ├── iso_JPN.R
│   ├── iso_KAZ.R
│   ├── iso_KEN.R
│   ├── iso_KGZ.R
│   ├── iso_KHM.R
│   ├── iso_KIR.R
│   ├── iso_KNA.R
│   ├── iso_KOR.R
│   ├── iso_KWT.R
│   ├── iso_LAO.R
│   ├── iso_LBN.R
│   ├── iso_LBR.R
│   ├── iso_LBY.R
│   ├── iso_LCA.R
│   ├── iso_LIE.R
│   ├── iso_LKA.R
│   ├── iso_LSO.R
│   ├── iso_LTU.R
│   ├── iso_LUX.R
│   ├── iso_LVA.R
│   ├── iso_MAC.R
│   ├── iso_MAR.R
│   ├── iso_MCO.R
│   ├── iso_MDA.R
│   ├── iso_MDG.R
│   ├── iso_MDV.R
│   ├── iso_MEX.R
│   ├── iso_MHL.R
│   ├── iso_MKD.R
│   ├── iso_MLI.R
│   ├── iso_MLT.R
│   ├── iso_MMR.R
│   ├── iso_MNE.R
│   ├── iso_MNG.R
│   ├── iso_MNP.R
│   ├── iso_MOZ.R
│   ├── iso_MRT.R
│   ├── iso_MSR.R
│   ├── iso_MTQ.R
│   ├── iso_MUS.R
│   ├── iso_MWI.R
│   ├── iso_MYS.R
│   ├── iso_MYT.R
│   ├── iso_NAM.R
│   ├── iso_NCL.R
│   ├── iso_NER.R
│   ├── iso_NGA.R
│   ├── iso_NIC.R
│   ├── iso_NIU.R
│   ├── iso_NLD.R
│   ├── iso_NOR.R
│   ├── iso_NPL.R
│   ├── iso_NRU.R
│   ├── iso_NZL.R
│   ├── iso_OMN.R
│   ├── iso_PAK.R
│   ├── iso_PAN.R
│   ├── iso_PCN.R
│   ├── iso_PER.R
│   ├── iso_PHL.R
│   ├── iso_PLW.R
│   ├── iso_PNG.R
│   ├── iso_POL.R
│   ├── iso_PRI.R
│   ├── iso_PRT.R
│   ├── iso_PRY.R
│   ├── iso_PSE.R
│   ├── iso_PYF.R
│   ├── iso_QAT.R
│   ├── iso_REU.R
│   ├── iso_RKS.R
│   ├── iso_ROU.R
│   ├── iso_RUS.R
│   ├── iso_RWA.R
│   ├── iso_SAU.R
│   ├── iso_SDN.R
│   ├── iso_SEN.R
│   ├── iso_SGP.R
│   ├── iso_SHN.R
│   ├── iso_SLB.R
│   ├── iso_SLE.R
│   ├── iso_SLV.R
│   ├── iso_SMR.R
│   ├── iso_SOM.R
│   ├── iso_SRB.R
│   ├── iso_SSD.R
│   ├── iso_STP.R
│   ├── iso_SUR.R
│   ├── iso_SVK.R
│   ├── iso_SVN.R
│   ├── iso_SWE.R
│   ├── iso_SWZ.R
│   ├── iso_SXM.R
│   ├── iso_SYC.R
│   ├── iso_SYR.R
│   ├── iso_TCA.R
│   ├── iso_TCD.R
│   ├── iso_TGO.R
│   ├── iso_THA.R
│   ├── iso_TJK.R
│   ├── iso_TKL.R
│   ├── iso_TKM.R
│   ├── iso_TLS.R
│   ├── iso_TON.R
│   ├── iso_TTO.R
│   ├── iso_TUN.R
│   ├── iso_TUR.R
│   ├── iso_TUV.R
│   ├── iso_TWN.R
│   ├── iso_TZA.R
│   ├── iso_UGA.R
│   ├── iso_UKR.R
│   ├── iso_URY.R
│   ├── iso_USA.R
│   ├── iso_UZB.R
│   ├── iso_VAT.R
│   ├── iso_VCT.R
│   ├── iso_VEN.R
│   ├── iso_VGB.R
│   ├── iso_VIR.R
│   ├── iso_VNM.R
│   ├── iso_VUT.R
│   ├── iso_WLF.R
│   ├── iso_WSM.R
│   ├── iso_YEM.R
│   ├── iso_ZAF.R
│   ├── iso_ZMB.R
│   └── iso_ZWE.R
├── README.md
├── index.md
├── inst/
│   ├── CITATION
│   ├── extdata/
│   │   ├── db/
│   │   │   ├── AFG.csv
│   │   │   ├── ARG.csv
│   │   │   ├── AUS.csv
│   │   │   ├── AUT.csv
│   │   │   ├── BEL.csv
│   │   │   ├── BRA.csv
│   │   │   ├── CAN.csv
│   │   │   ├── CHE.csv
│   │   │   ├── CHL.csv
│   │   │   ├── CHN.csv
│   │   │   ├── COL.csv
│   │   │   ├── CZE.csv
│   │   │   ├── DEU.csv
│   │   │   ├── DNK.csv
│   │   │   ├── ESP.csv
│   │   │   ├── FRA.csv
│   │   │   ├── GBR.csv
│   │   │   ├── HRV.csv
│   │   │   ├── HTI.csv
│   │   │   ├── IND.csv
│   │   │   ├── IRL.csv
│   │   │   ├── ISO.csv
│   │   │   ├── ITA.csv
│   │   │   ├── JPN.csv
│   │   │   ├── LTU.csv
│   │   │   ├── LVA.csv
│   │   │   ├── MEX.csv
│   │   │   ├── NLD.csv
│   │   │   ├── PAK.csv
│   │   │   ├── PER.csv
│   │   │   ├── POL.csv
│   │   │   ├── PRT.csv
│   │   │   ├── RUS.csv
│   │   │   ├── SWE.csv
│   │   │   ├── THA.csv
│   │   │   ├── TWN.csv
│   │   │   ├── UKR.csv
│   │   │   ├── USA.csv
│   │   │   └── ZAF.csv
│   │   ├── db.R
│   │   ├── ds/
│   │   │   ├── CAN.csv
│   │   │   └── DPC.csv
│   │   └── src.csv
│   ├── joss/
│   │   ├── README.md
│   │   ├── apa.csl
│   │   ├── paper.Rmd
│   │   ├── paper.bib
│   │   └── paper.md
│   └── templates/
│       ├── ds_.R
│       └── iso_.R
├── man/
│   ├── ABW.Rd
│   ├── AFG.Rd
│   ├── AGO.Rd
│   ├── AIA.Rd
│   ├── ALB.Rd
│   ├── AND.Rd
│   ├── ARE.Rd
│   ├── ARG.Rd
│   ├── ARM.Rd
│   ├── ASM.Rd
│   ├── ATG.Rd
│   ├── AUS.Rd
│   ├── AUT.Rd
│   ├── AZE.Rd
│   ├── BDI.Rd
│   ├── BEL.Rd
│   ├── BEN.Rd
│   ├── BES.Rd
│   ├── BFA.Rd
│   ├── BGD.Rd
│   ├── BGR.Rd
│   ├── BHR.Rd
│   ├── BHS.Rd
│   ├── BIH.Rd
│   ├── BLR.Rd
│   ├── BLZ.Rd
│   ├── BMU.Rd
│   ├── BOL.Rd
│   ├── BRA.Rd
│   ├── BRB.Rd
│   ├── BRN.Rd
│   ├── BTN.Rd
│   ├── BWA.Rd
│   ├── CAC.Rd
│   ├── CAF.Rd
│   ├── CAN.Rd
│   ├── CHE.Rd
│   ├── CHL.Rd
│   ├── CHN.Rd
│   ├── CIV.Rd
│   ├── CMR.Rd
│   ├── COD.Rd
│   ├── COG.Rd
│   ├── COK.Rd
│   ├── COL.Rd
│   ├── COM.Rd
│   ├── CPV.Rd
│   ├── CRI.Rd
│   ├── CUB.Rd
│   ├── CUW.Rd
│   ├── CYM.Rd
│   ├── CYP.Rd
│   ├── CZE.Rd
│   ├── DEU.Rd
│   ├── DJI.Rd
│   ├── DMA.Rd
│   ├── DNK.Rd
│   ├── DOM.Rd
│   ├── DPC.Rd
│   ├── DZA.Rd
│   ├── ECU.Rd
│   ├── EGY.Rd
│   ├── ERI.Rd
│   ├── ESP.Rd
│   ├── EST.Rd
│   ├── ETH.Rd
│   ├── FIN.Rd
│   ├── FJI.Rd
│   ├── FLK.Rd
│   ├── FRA.Rd
│   ├── FRO.Rd
│   ├── FSM.Rd
│   ├── GAB.Rd
│   ├── GBR.Rd
│   ├── GEO.Rd
│   ├── GGY.Rd
│   ├── GHA.Rd
│   ├── GIB.Rd
│   ├── GIN.Rd
│   ├── GLP.Rd
│   ├── GMB.Rd
│   ├── GNB.Rd
│   ├── GNQ.Rd
│   ├── GPC.Rd
│   ├── GRC.Rd
│   ├── GRD.Rd
│   ├── GRL.Rd
│   ├── GTM.Rd
│   ├── GUF.Rd
│   ├── GUM.Rd
│   ├── GUY.Rd
│   ├── HKG.Rd
│   ├── HND.Rd
│   ├── HRV.Rd
│   ├── HTI.Rd
│   ├── HUN.Rd
│   ├── IDN.Rd
│   ├── IMN.Rd
│   ├── IND.Rd
│   ├── IRL.Rd
│   ├── IRN.Rd
│   ├── IRQ.Rd
│   ├── ISL.Rd
│   ├── ISR.Rd
│   ├── ITA.Rd
│   ├── JAM.Rd
│   ├── JEY.Rd
│   ├── JOR.Rd
│   ├── JPN.Rd
│   ├── KAZ.Rd
│   ├── KEN.Rd
│   ├── KGZ.Rd
│   ├── KHM.Rd
│   ├── KIR.Rd
│   ├── KNA.Rd
│   ├── KOR.Rd
│   ├── KWT.Rd
│   ├── LAO.Rd
│   ├── LBN.Rd
│   ├── LBR.Rd
│   ├── LBY.Rd
│   ├── LCA.Rd
│   ├── LIE.Rd
│   ├── LKA.Rd
│   ├── LSO.Rd
│   ├── LTU.Rd
│   ├── LUX.Rd
│   ├── LVA.Rd
│   ├── MAC.Rd
│   ├── MAR.Rd
│   ├── MCO.Rd
│   ├── MDA.Rd
│   ├── MDG.Rd
│   ├── MDV.Rd
│   ├── MEX.Rd
│   ├── MHL.Rd
│   ├── MKD.Rd
│   ├── MLI.Rd
│   ├── MLT.Rd
│   ├── MMR.Rd
│   ├── MNE.Rd
│   ├── MNG.Rd
│   ├── MNP.Rd
│   ├── MOZ.Rd
│   ├── MRT.Rd
│   ├── MSR.Rd
│   ├── MTQ.Rd
│   ├── MUS.Rd
│   ├── MWI.Rd
│   ├── MYS.Rd
│   ├── MYT.Rd
│   ├── NAM.Rd
│   ├── NCL.Rd
│   ├── NER.Rd
│   ├── NGA.Rd
│   ├── NIC.Rd
│   ├── NIU.Rd
│   ├── NLD.Rd
│   ├── NOR.Rd
│   ├── NPL.Rd
│   ├── NRU.Rd
│   ├── NZL.Rd
│   ├── OMN.Rd
│   ├── PAK.Rd
│   ├── PAN.Rd
│   ├── PCN.Rd
│   ├── PER.Rd
│   ├── PHL.Rd
│   ├── PLW.Rd
│   ├── PNG.Rd
│   ├── POL.Rd
│   ├── PRI.Rd
│   ├── PRT.Rd
│   ├── PRY.Rd
│   ├── PSE.Rd
│   ├── PYF.Rd
│   ├── QAT.Rd
│   ├── REU.Rd
│   ├── RKS.Rd
│   ├── ROU.Rd
│   ├── RUS.Rd
│   ├── RWA.Rd
│   ├── SAU.Rd
│   ├── SDN.Rd
│   ├── SEN.Rd
│   ├── SGP.Rd
│   ├── SHN.Rd
│   ├── SLB.Rd
│   ├── SLE.Rd
│   ├── SLV.Rd
│   ├── SMR.Rd
│   ├── SOM.Rd
│   ├── SRB.Rd
│   ├── SSD.Rd
│   ├── STP.Rd
│   ├── SUR.Rd
│   ├── SVK.Rd
│   ├── SVN.Rd
│   ├── SWE.Rd
│   ├── SWZ.Rd
│   ├── SXM.Rd
│   ├── SYC.Rd
│   ├── SYR.Rd
│   ├── TCA.Rd
│   ├── TCD.Rd
│   ├── TGO.Rd
│   ├── THA.Rd
│   ├── TJK.Rd
│   ├── TKL.Rd
│   ├── TKM.Rd
│   ├── TLS.Rd
│   ├── TON.Rd
│   ├── TTO.Rd
│   ├── TUN.Rd
│   ├── TUR.Rd
│   ├── TUV.Rd
│   ├── TWN.Rd
│   ├── TZA.Rd
│   ├── UGA.Rd
│   ├── UKR.Rd
│   ├── URY.Rd
│   ├── USA.Rd
│   ├── UZB.Rd
│   ├── VAT.Rd
│   ├── VCT.Rd
│   ├── VEN.Rd
│   ├── VGB.Rd
│   ├── VIR.Rd
│   ├── VNM.Rd
│   ├── VUT.Rd
│   ├── WLF.Rd
│   ├── WSM.Rd
│   ├── YEM.Rd
│   ├── ZAF.Rd
│   ├── ZMB.Rd
│   ├── ZWE.Rd
│   ├── add_iso.Rd
│   ├── admin.ch.Rd
│   ├── arcgis.de.Rd
│   ├── arcgis.se.Rd
│   ├── canada.ca.Rd
│   ├── cdc.gov.Rd
│   ├── covid19.Rd
│   ├── covid19india.org.Rd
│   ├── covidtracking.com.Rd
│   ├── cumsum.Rd
│   ├── decreasing.Rd
│   ├── docstring.Rd
│   ├── drop_decreasing.Rd
│   ├── ds_check_format.Rd
│   ├── ds_docstring.Rd
│   ├── extdata.Rd
│   ├── geohive.ie.Rd
│   ├── github.ceedsdemm.covidprodataset.Rd
│   ├── github.covid19datahub.covid19br.Rd
│   ├── github.covid19euzh.covid19eudata.Rd
│   ├── github.cssegisanddata.covid19.Rd
│   ├── github.cssegisanddata.covid19unified.Rd
│   ├── github.dsfsi.covid19za.Rd
│   ├── github.dssgpt.covid19ptdata.Rd
│   ├── github.italia.covid19opendatavaccini.Rd
│   ├── github.jmcastagnetto.covid19perudata.Rd
│   ├── github.lisphilar.covid19sir.Rd
│   ├── github.m3it.covid19data.Rd
│   ├── github.minciencia.datoscovid19.Rd
│   ├── github.mpiktas.covid19lt.Rd
│   ├── github.nytimes.covid19data.Rd
│   ├── github.openzh.covid19.Rd
│   ├── github.oxcgrt.covidpolicytracker.Rd
│   ├── github.ozanerturk.covid19turkeyapi.Rd
│   ├── github.pcmdpc.covid19.Rd
│   ├── github.robertkochinstitut.covid19impfungenindeutschland.Rd
│   ├── github.swsoyee.2019ncovjapan.Rd
│   ├── github.wcota.covid19br.Rd
│   ├── github.wcota.covid19br.vac.Rd
│   ├── go.th.Rd
│   ├── gob.ar.Rd
│   ├── gob.pe.Rd
│   ├── gouv.fr.Rd
│   ├── gov.co.Rd
│   ├── gov.lv.Rd
│   ├── gov.si.Rd
│   ├── gov.tw.Rd
│   ├── gov.uk.Rd
│   ├── gv.at.Rd
│   ├── healthdata.gov.Rd
│   ├── humdata.af.Rd
│   ├── humdata.ht.Rd
│   ├── id.Rd
│   ├── impfdashboard.de.Rd
│   ├── isciii.es.Rd
│   ├── iso_docstring.Rd
│   ├── isoweek2date.Rd
│   ├── koronavirus.hr.Rd
│   ├── map_data.Rd
│   ├── map_values.Rd
│   ├── mzcr.cz.Rd
│   ├── naming.Rd
│   ├── ourworldindata.org.Rd
│   ├── read.csv.Rd
│   ├── read.excel.Rd
│   ├── read.xsv.Rd
│   ├── read.zip.Rd
│   ├── repo.Rd
│   ├── rivm.nl.Rd
│   ├── sciensano.be.Rd
│   ├── ssi.dk.Rd
│   ├── toyokeizai.net.Rd
│   ├── who.int.Rd
│   ├── wikipedia.dp.Rd
│   └── write.csv.Rd
├── pkgdown/
│   ├── _pkgdown.yml
│   ├── extra.css
│   └── extra.js
├── styleguide.md
├── test/
│   └── compare_data.R
└── vignettes/
    ├── contributors.Rmd
    ├── data.Rmd
    ├── docs.Rmd
    ├── python.Rmd
    └── r.Rmd
Download .txt
SYMBOL INDEX (1 symbols across 1 files)

FILE: pkgdown/extra.js
  function downloadTable (line 1) | function downloadTable(el, slug) {
Condensed preview — 673 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (5,044K chars).
[
  {
    "path": ".github/.gitignore",
    "chars": 7,
    "preview": "*.html\n"
  },
  {
    "path": ".github/workflows/pkgdown.yaml",
    "chars": 1427,
    "preview": "on:\n  \n  push:\n  \n  schedule:\n    \n    # * is a special character in YAML so you have to quote this string\n    - cron:  "
  },
  {
    "path": ".gitignore",
    "chars": 711,
    "preview": "# History files\n.Rhistory\n.Rapp.history\n\n# Session Data files\n.RData\n\n# User-specific files\n.Ruserdata\n\n# Example code i"
  },
  {
    "path": ".travis.yml",
    "chars": 238,
    "preview": "language: r\nsudo: required\ncache: packages\n\nr:\n  - devel\n  - release\n\nos:\n  - linux\n  \nenv:\n  - _R_CHECK_TESTS_NLINES_=0"
  },
  {
    "path": "DESCRIPTION",
    "chars": 834,
    "preview": "Package: COVID19\nType: Package\nTitle: COVID-19 Data Hub\nVersion: 3.0.0\nAuthors@R: c(\n    person(given = \"Emanuele\", fami"
  },
  {
    "path": "LICENSE.md",
    "chars": 2253,
    "preview": "# Terms of use\n\n> We have invested a lot of time and effort in creating [COVID-19 Data Hub](https://covid19datahub.io/),"
  },
  {
    "path": "NAMESPACE",
    "chars": 421,
    "preview": "# Generated by roxygen2: do not edit by hand\n\nexport(add_iso)\nexport(covid19)\nexport(cumsum)\nexport(decreasing)\nexport(d"
  },
  {
    "path": "NEWS.md",
    "chars": 5039,
    "preview": "# COVID19 v3.0.0\n\n## Data Update\n\n- added the data on the number of people who have received at least one vaccine dose\n-"
  },
  {
    "path": "R/covid19.R",
    "chars": 32098,
    "preview": "#' COVID-19 Data Hub\n#'\n#' @param country vector of 3-letter ISO codes for countries.\n#' @param level integer. Granulari"
  },
  {
    "path": "R/ds_admin.ch.R",
    "chars": 4849,
    "preview": "#' Federal Office of Public Health\n#'\n#' Data source for: Switzerland and Liechtenstein\n#'\n#' @param level 1, or 2 (only"
  },
  {
    "path": "R/ds_arcgis.de.R",
    "chars": 2696,
    "preview": "#' Robert Koch Institute\n#'\n#' Data source for: Germany\n#'\n#' @param level 1, 2, 3\n#'\n#' @section Level 1:\n#' - confirme"
  },
  {
    "path": "R/ds_arcgis.se.R",
    "chars": 1801,
    "preview": "#' Public Health Agency of Sweden\n#'\n#' Data source for: Sweden\n#'\n#' @param level 1, 2\n#'\n#' @section Level 1:\n#' - con"
  },
  {
    "path": "R/ds_canada.ca.R",
    "chars": 2777,
    "preview": "#' Public Health Agency of Canada\n#'\n#' Data source for: Canada\n#'\n#' @param level 1, 2\n#'\n#' @section Level 1:\n#' - con"
  },
  {
    "path": "R/ds_cdc.gov.R",
    "chars": 2940,
    "preview": "#' Centers for Disease Control and Prevention\n#'\n#' Data source for: United States\n#'\n#' @param level 1, 2, 3\n#'\n#' @sec"
  },
  {
    "path": "R/ds_covid19india.org.R",
    "chars": 1753,
    "preview": "#' COVID-19 India API\n#'\n#' Data source for: India\n#'\n#' @param level 2\n#'\n#' @section Level 2:\n#' - confirmed cases\n#' "
  },
  {
    "path": "R/ds_covidtracking.com.R",
    "chars": 952,
    "preview": "#' The COVID Tracking Project\n#'\n#' Data source for: United States\n#'\n#' @param level 2\n#'\n#' @section Level 2:\n#' - con"
  },
  {
    "path": "R/ds_geohive.ie.R",
    "chars": 3897,
    "preview": "#' Health Protection Surveillance Centre (HPSC) and Health Service Executive (HSE)\n#'\n#' Data source for: Ireland\n#'\n#' "
  },
  {
    "path": "R/ds_github.ceedsdemm.covidprodataset.R",
    "chars": 723,
    "preview": "#' Centre of Excellence in Economics and Data Science, University of Milan\n#'\n#' Data source for: Italy\n#'\n#' @param lev"
  },
  {
    "path": "R/ds_github.covid19euzh.covid19eudata.R",
    "chars": 1195,
    "preview": "#' COVID-19 European Chinese Channel\n#'\n#' Data source for: Poland\n#'\n#' @param level 2\n#'\n#' @section Level 2:\n#' - con"
  },
  {
    "path": "R/ds_github.cssegisanddata.covid19.R",
    "chars": 4252,
    "preview": "#' Johns Hopkins Center for Systems Science and Engineering\n#'\n#' Data source for: Worldwide\n#'\n#' @param level 1, 2, or"
  },
  {
    "path": "R/ds_github.cssegisanddata.covid19unified.R",
    "chars": 2027,
    "preview": "#' Johns Hopkins Center for Systems Science and Engineering\n#'\n#' Data source for: Worldwide\n#'\n#' @param level 1, 2, 3\n"
  },
  {
    "path": "R/ds_github.dsfsi.covid19za.R",
    "chars": 1786,
    "preview": "#' Data Science for Social Impact research group, University of Pretoria\n#'\n#' Data source for: South Africa\n#'\n#' @para"
  },
  {
    "path": "R/ds_github.dssgpt.covid19ptdata.R",
    "chars": 2565,
    "preview": "#' Data Science for Social Good Portugal\n#'\n#' Data source for: Portugal\n#'\n#' @param level 1, 2\n#'\n#' @section Level 1:"
  },
  {
    "path": "R/ds_github.eguidotti.covid19br.R",
    "chars": 895,
    "preview": "#' Emanuele Guidotti\n#'\n#' Data source for: Brazil\n#'\n#' @param level 3\n#'\n#' @section Level 3:\n#' - total vaccine doses"
  },
  {
    "path": "R/ds_github.italia.covid19opendatavaccini.R",
    "chars": 3564,
    "preview": "#' Commissario straordinario per l'emergenza Covid-19, Presidenza del Consiglio dei Ministri\n#'\n#' Data source for: Ital"
  },
  {
    "path": "R/ds_github.jmcastagnetto.covid19perudata.R",
    "chars": 2911,
    "preview": "#' Jesus M. Castagnetto\n#'\n#' Data source for: Peru\n#'\n#' @param level 1, 2\n#'\n#' @section Level 1:\n#' - confirmed cases"
  },
  {
    "path": "R/ds_github.lisphilar.covid19sir.R",
    "chars": 2020,
    "preview": "#' Hirokazu Takaya\n#'\n#' Data source for: Japan\n#'\n#' @param level 1, 2\n#'\n#' @section Level 1:\n#' - confirmed cases\n#' "
  },
  {
    "path": "R/ds_github.m3it.covid19data.R",
    "chars": 1066,
    "preview": "#' Matt Bolton\n#'\n#' Data source for: Australia\n#'\n#' @param level 1, 2\n#'\n#' @section Level 1:\n#' - confirmed cases\n#' "
  },
  {
    "path": "R/ds_github.minciencia.datoscovid19.R",
    "chars": 10082,
    "preview": "#' Ministerio de Ciencia, Tecnología, Conocimiento, e Innovación\n#'\n#' Data source for: Chile\n#'\n#' @param level 1, 2, 3"
  },
  {
    "path": "R/ds_github.mpiktas.covid19lt.R",
    "chars": 2986,
    "preview": "#' Vaidotas Zemlys-Balevicius\n#'\n#' Data source for: Lithuania\n#'\n#' @param level 1, 2, 3\n#'\n#' @section Level 1:\n#' - c"
  },
  {
    "path": "R/ds_github.nytimes.covid19data.R",
    "chars": 1393,
    "preview": "#' The New York Times\n#'\n#' Data source for: United States\n#'\n#' @param level 1, 2, 3\n#' @param fips filter by FIPS code"
  },
  {
    "path": "R/ds_github.openzh.covid19.R",
    "chars": 1519,
    "preview": "#' Specialist Unit for Open Government Data Canton of Zurich\n#'\n#' Data source for: Switzerland and Liechtenstein\n#'\n#' "
  },
  {
    "path": "R/ds_github.oxcgrt.covidpolicytracker.R",
    "chars": 4715,
    "preview": "#' Oxford Covid-19 Government Response Tracker\n#'\n#' Data source for: Worldwide\n#' Documentation: https://covid19datahub"
  },
  {
    "path": "R/ds_github.ozanerturk.covid19turkeyapi.R",
    "chars": 956,
    "preview": "#' Ozan Erturk\n#'\n#' Data source for: Turkey\n#'\n#' @param level 1\n#'\n#' @section Level 1:\n#' - confirmed cases\n#' - deat"
  },
  {
    "path": "R/ds_github.pcmdpc.covid19.R",
    "chars": 1673,
    "preview": "#' Ministero della Salute\n#'\n#' Data source for: Italy\n#'\n#' @param level 1, 2, 3\n#'\n#' @section Level 1:\n#' - confirmed"
  },
  {
    "path": "R/ds_github.robertkochinstitut.covid19impfungenindeutschland.R",
    "chars": 3029,
    "preview": "#' Robert Koch Institut\n#'\n#' Data source for: Germany\n#'\n#' @param level 2, 3\n#'\n#' @section Level 2:\n#' - total vaccin"
  },
  {
    "path": "R/ds_github.swsoyee.2019ncovjapan.R",
    "chars": 1358,
    "preview": "#' Su Wei\n#'\n#' Data source for: Japan and Cruise Ships\n#'\n#' @param level 1, 2\n#' @param id filter by name\n#'\n#' @secti"
  },
  {
    "path": "R/ds_github.wcota.covid19br.R",
    "chars": 2533,
    "preview": "#' Wesley Cota\n#'\n#' Data source for: Brazil\n#'\n#' @param level 1, 2, 3\n#'\n#' @section Level 1:\n#' - confirmed cases\n#' "
  },
  {
    "path": "R/ds_github.wcota.covid19br.vac.R",
    "chars": 1793,
    "preview": "#' Wesley Cota\n#'\n#' Data source for: Brazil\n#'\n#' @param level 3\n#'\n#' @section Level 3:\n#' - total vaccine doses admin"
  },
  {
    "path": "R/ds_go.th.R",
    "chars": 1344,
    "preview": "#' Department of Disease Control, Thailand Ministry of Public Health\n#'\n#' Data source for: Thailand\n#'\n#' @param level "
  },
  {
    "path": "R/ds_gob.ar.R",
    "chars": 6496,
    "preview": "#' Argentine Ministry of Health\n#'\n#' Data source for: Argentina\n#'\n#' @param level 1, 2, 3\n#'\n#' @section Level 1:\n#' -"
  },
  {
    "path": "R/ds_gob.pe.R",
    "chars": 1970,
    "preview": "#' Ministerio de Salud\n#'\n#' Data source for: Peru\n#'\n#' @param level 1, 2, 3\n#'\n#' @section Level 1:\n#' - total vaccine"
  },
  {
    "path": "R/ds_gouv.fr.R",
    "chars": 9032,
    "preview": "#' Santé Publique France\n#'\n#' Data source for: France and overseas territories\n#'\n#' @param level 1, 2, 3\n#' @param reg"
  },
  {
    "path": "R/ds_gov.co.R",
    "chars": 6629,
    "preview": "#' Instituto Nacional de Salud\n#'\n#' Data source for: Colombia\n#'\n#' @param level 1, 2, 3\n#'\n#' @section Level 1:\n#' - c"
  },
  {
    "path": "R/ds_gov.lv.R",
    "chars": 2069,
    "preview": "#' Center for Disease Prevention and Control\n#'\n#' Data source for: Latvia\n#'\n#' @param level 1, 3\n#'\n#' @section Level "
  },
  {
    "path": "R/ds_gov.si.R",
    "chars": 1094,
    "preview": "#' Ministry of Health and National Institute for Public health\n#'\n#' Data source for: Slovenia\n#'\n#' @param level 1\n#'\n#"
  },
  {
    "path": "R/ds_gov.tw.R",
    "chars": 2123,
    "preview": "#' Ministry of Health and Welfare of Taiwan\n#'\n#' Data source for: Taiwan\n#'\n#' @param level 1, 2\n#'\n#' @section Level 1"
  },
  {
    "path": "R/ds_gov.uk.R",
    "chars": 4083,
    "preview": "#' UK Health Security Agency\n#'\n#' Data source for: United Kingdom\n#'\n#' @param level 1, 2, 3\n#'\n#' @section Level 1:\n#'"
  },
  {
    "path": "R/ds_gv.at.R",
    "chars": 4447,
    "preview": "#' Federal Ministry of Social Affairs, Health, Care and Consumer Protection, Austria\n#'\n#' Data source for: Austria\n#'\n#"
  },
  {
    "path": "R/ds_healthdata.gov.R",
    "chars": 1375,
    "preview": "#' U.S. Department of Health & Human Services\n#'\n#' Data source for: United States\n#'\n#' @param level 2\n#'\n#' @section L"
  },
  {
    "path": "R/ds_humdata.af.R",
    "chars": 1962,
    "preview": "#' Afghanistan Ministry of Health\n#'\n#' Data source for: Afghanistan\n#'\n#' @param level 2\n#'\n#' @section Level 2:\n#' - c"
  },
  {
    "path": "R/ds_humdata.ht.R",
    "chars": 1183,
    "preview": "#' Ministry of Public Health and Population of Haiti\n#'\n#' Data source for: Haiti\n#'\n#' @param level 2\n#'\n#' @section Le"
  },
  {
    "path": "R/ds_impfdashboard.de.R",
    "chars": 797,
    "preview": "#' Robert Koch Institute and the Federal Ministry of Health\n#'\n#' Data source for: Germany\n#'\n#' @param level 1\n#'\n#' @s"
  },
  {
    "path": "R/ds_isciii.es.R",
    "chars": 1905,
    "preview": "#' Centro Nacional de Epidemiología\n#'\n#' Data source for: Spain\n#'\n#' @param level 2, 3\n#'\n#' @section Level 2:\n#' - co"
  },
  {
    "path": "R/ds_koronavirus.hr.R",
    "chars": 1307,
    "preview": "#' Croatian Institute of Public Health\n#'\n#' Data source for: Croatia\n#'\n#' @param level 1, 2\n#'\n#' @section Level 1:\n#'"
  },
  {
    "path": "R/ds_mzcr.cz.R",
    "chars": 5751,
    "preview": "#' Ministry of Health of the Czech Republic\n#'\n#' Data source for: Czech Republic\n#'\n#' @param level 1, 2, 3\n#'\n#' @sect"
  },
  {
    "path": "R/ds_ourworldindata.org.R",
    "chars": 2321,
    "preview": "#' Our World in Data\n#'\n#' Data source for: Worldwide\n#'\n#' @param level 1, 2\n#' @param id filter by ISO code if level=1"
  },
  {
    "path": "R/ds_rivm.nl.R",
    "chars": 1673,
    "preview": "#' National Institute for Public Health and the Environment\n#'\n#' Data source for: Netherlands\n#'\n#' @param level 1, 2, "
  },
  {
    "path": "R/ds_sciensano.be.R",
    "chars": 7760,
    "preview": "#' Sciensano, the Belgian Institute for Health\n#'\n#' Data source for: Belgium\n#'\n#' @param level 1, 2, 3\n#'\n#' @section "
  },
  {
    "path": "R/ds_ssi.dk.R",
    "chars": 7566,
    "preview": "#' Statens Serum Institut\n#'\n#' Data source for: Denmark\n#'\n#' @param level 2, 3\n#'\n#' @section Level 2:\n#' - confirmed "
  },
  {
    "path": "R/ds_toyokeizai.net.R",
    "chars": 2579,
    "preview": "#' Toyo Keizai\n#'\n#' Data source for: Japan\n#'\n#' @param level 1, 2\n#'\n#' @section Level 1:\n#' - confirmed cases\n#' - de"
  },
  {
    "path": "R/ds_who.int.R",
    "chars": 1251,
    "preview": "#' World Health Organization\n#'\n#' Data source for: Worldwide\n#'\n#' @param level 1\n#' @param id filter by 2-letter ISO c"
  },
  {
    "path": "R/ds_wikipedia.dp.R",
    "chars": 549,
    "preview": "#' Wikipedia\n#'\n#' Data source for: Diamond Princess (Cruise Ship)\n#'\n#' @param level 1\n#'\n#' @section Level 1:\n#' - con"
  },
  {
    "path": "R/iso_ABW.R",
    "chars": 1126,
    "preview": "#' Aruba\n#'\n#' @source \\url{`r repo(\"ABW\")`}\n#' \nABW <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_AFG.R",
    "chars": 1504,
    "preview": "#' Afghanistan\n#'\n#' @source \\url{`r repo(\"AFG\")`}\n#' \nAFG <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #'"
  },
  {
    "path": "R/iso_AGO.R",
    "chars": 842,
    "preview": "#' Angola\n#'\n#' @source \\url{`r repo(\"AGO\")`}\n#' \nAGO <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_AIA.R",
    "chars": 855,
    "preview": "#' Anguilla\n#'\n#' @source \\url{`r repo(\"AIA\")`}\n#' \nAIA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_ALB.R",
    "chars": 844,
    "preview": "#' Albania\n#'\n#' @source \\url{`r repo(\"ALB\")`}\n#' \nALB <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_AND.R",
    "chars": 844,
    "preview": "#' Andorra\n#'\n#' @source \\url{`r repo(\"AND\")`}\n#' \nAND <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_ARE.R",
    "chars": 870,
    "preview": "#' United Arab Emirates\n#'\n#' @source \\url{`r repo(\"ARE\")`}\n#' \nARE <- function(level){\n  x <- NULL\n  \n  #' @concept Lev"
  },
  {
    "path": "R/iso_ARG.R",
    "chars": 1810,
    "preview": "#' Argentina\n#'\n#' @source \\url{`r repo(\"ARG\")`}\n#' \nARG <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @"
  },
  {
    "path": "R/iso_ARM.R",
    "chars": 844,
    "preview": "#' Armenia\n#'\n#' @source \\url{`r repo(\"ARM\")`}\n#' \nARM <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_ASM.R",
    "chars": 792,
    "preview": "#' American Samoa\n#'\n#' @source \\url{`r repo(\"ASM\")`}\n#' \nASM <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n "
  },
  {
    "path": "R/iso_ATG.R",
    "chars": 868,
    "preview": "#' Antigua and Barbuda\n#'\n#' @source \\url{`r repo(\"ATG\")`}\n#' \nATG <- function(level){\n  x <- NULL\n  \n  #' @concept Leve"
  },
  {
    "path": "R/iso_AUS.R",
    "chars": 1288,
    "preview": "#' Australia\n#'\n#' @source \\url{`r repo(\"AUS\")`}\n#' \nAUS <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @"
  },
  {
    "path": "R/iso_AUT.R",
    "chars": 1661,
    "preview": "#' Austria\n#'\n#' @source \\url{`r repo(\"AUT\")`}\n#' \nAUT <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_AZE.R",
    "chars": 850,
    "preview": "#' Azerbaijan\n#'\n#' @source \\url{`r repo(\"AZE\")`}\n#' \nAZE <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' "
  },
  {
    "path": "R/iso_BDI.R",
    "chars": 844,
    "preview": "#' Burundi\n#'\n#' @source \\url{`r repo(\"BDI\")`}\n#' \nBDI <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_BEL.R",
    "chars": 2141,
    "preview": "#' Belgium\n#'\n#' @source \\url{`r repo(\"BEL\")`}\n#' \nBEL <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_BEN.R",
    "chars": 840,
    "preview": "#' Benin\n#'\n#' @source \\url{`r repo(\"BEN\")`}\n#' \nBEN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_BES.R",
    "chars": 903,
    "preview": "#' Bonaire, Sint Eustatius and Saba\n#'\n#' @source \\url{`r repo(\"BES\")`}\n#' \nBES <- function(level){\n  x <- NULL\n  \n  #' "
  },
  {
    "path": "R/iso_BFA.R",
    "chars": 854,
    "preview": "#' Burkina Faso\n#'\n#' @source \\url{`r repo(\"BFA\")`}\n#' \nBFA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #"
  },
  {
    "path": "R/iso_BGD.R",
    "chars": 850,
    "preview": "#' Bangladesh\n#'\n#' @source \\url{`r repo(\"BGD\")`}\n#' \nBGD <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' "
  },
  {
    "path": "R/iso_BGR.R",
    "chars": 846,
    "preview": "#' Bulgaria\n#'\n#' @source \\url{`r repo(\"BGR\")`}\n#' \nBGR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_BHR.R",
    "chars": 844,
    "preview": "#' Bahrain\n#'\n#' @source \\url{`r repo(\"BHR\")`}\n#' \nBHR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_BHS.R",
    "chars": 844,
    "preview": "#' Bahamas\n#'\n#' @source \\url{`r repo(\"BHS\")`}\n#' \nBHS <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_BIH.R",
    "chars": 874,
    "preview": "#' Bosnia and Herzegovina\n#'\n#' @source \\url{`r repo(\"BIH\")`}\n#' \nBIH <- function(level){\n  x <- NULL\n  \n  #' @concept L"
  },
  {
    "path": "R/iso_BLR.R",
    "chars": 844,
    "preview": "#' Belarus\n#'\n#' @source \\url{`r repo(\"BLR\")`}\n#' \nBLR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_BLZ.R",
    "chars": 842,
    "preview": "#' Belize\n#'\n#' @source \\url{`r repo(\"BLZ\")`}\n#' \nBLZ <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_BMU.R",
    "chars": 869,
    "preview": "#' Bermuda\n#'\n#' @source \\url{`r repo(\"BMU\")`}\n#' \nBMU <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_BOL.R",
    "chars": 844,
    "preview": "#' Bolivia\n#'\n#' @source \\url{`r repo(\"BOL\")`}\n#' \nBOL <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_BRA.R",
    "chars": 1712,
    "preview": "#' Brazil \n#'\n#' @source \\url{`r repo(\"BRA\")`}\n#' \nBRA <- function(level, ...){\n  x <- NULL\n  \n  #' @concept Level 1\n  #"
  },
  {
    "path": "R/iso_BRB.R",
    "chars": 846,
    "preview": "#' Barbados\n#'\n#' @source \\url{`r repo(\"BRB\")`}\n#' \nBRB <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_BRN.R",
    "chars": 842,
    "preview": "#' Brunei\n#'\n#' @source \\url{`r repo(\"BRN\")`}\n#' \nBRN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_BTN.R",
    "chars": 842,
    "preview": "#' Bhutan\n#'\n#' @source \\url{`r repo(\"BTN\")`}\n#' \nBTN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_BWA.R",
    "chars": 846,
    "preview": "#' Botswana\n#'\n#' @source \\url{`r repo(\"BWA\")`}\n#' \nBWA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_CAC.R",
    "chars": 568,
    "preview": "#' Cruise Ship Costa Atlantica\n#'\n#' @source \\url{`r repo(\"CAC\")`}\n#' \nCAC <- function(level){\n  x <- NULL\n  \n  #' @conc"
  },
  {
    "path": "R/iso_CAF.R",
    "chars": 878,
    "preview": "#' Central African Republic\n#'\n#' @source \\url{`r repo(\"CAF\")`}\n#' \nCAF <- function(level){\n  x <- NULL\n  \n  #' @concept"
  },
  {
    "path": "R/iso_CAN.R",
    "chars": 1355,
    "preview": "#' Canada\n#'\n#' @source \\url{`r repo(\"CAN\")`}\n#' \nCAN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_CHE.R",
    "chars": 1930,
    "preview": "#' Switzerland\n#'\n#' @source \\url{`r repo(\"CHE\")`}\n#' \nCHE <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #'"
  },
  {
    "path": "R/iso_CHL.R",
    "chars": 1931,
    "preview": "#' Chile\n#'\n#' @source \\url{`r repo(\"CHL\")`}\n#' \nCHL <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_CHN.R",
    "chars": 739,
    "preview": "#' China\n#'\n#' @source \\url{`r repo(\"CHN\")`}\n#' \nCHN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_CIV.R",
    "chars": 856,
    "preview": "#' Cote d'Ivoire\n#'\n#' @source \\url{`r repo(\"CIV\")`}\n#' \nCIV <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  "
  },
  {
    "path": "R/iso_CMR.R",
    "chars": 846,
    "preview": "#' Cameroon\n#'\n#' @source \\url{`r repo(\"CMR\")`}\n#' \nCMR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_COD.R",
    "chars": 862,
    "preview": "#' Congo (Kinshasa)\n#'\n#' @source \\url{`r repo(\"COD\")`}\n#' \nCOD <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1"
  },
  {
    "path": "R/iso_COG.R",
    "chars": 868,
    "preview": "#' Congo (Brazzaville)\n#'\n#' @source \\url{`r repo(\"COG\")`}\n#' \nCOG <- function(level){\n  x <- NULL\n  \n  #' @concept Leve"
  },
  {
    "path": "R/iso_COK.R",
    "chars": 863,
    "preview": "#' Cook Islands\n#'\n#' @source \\url{`r repo(\"COK\")`}\n#' \nCOK <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #"
  },
  {
    "path": "R/iso_COL.R",
    "chars": 1626,
    "preview": "#' Colombia\n#'\n#' @source \\url{`r repo(\"COL\")`}\n#' \nCOL <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_COM.R",
    "chars": 860,
    "preview": "#' Comoros\n#'\n#' @source \\url{`r repo(\"COM\")`}\n#' \nCOM <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_CPV.R",
    "chars": 850,
    "preview": "#' Cabo Verde\n#'\n#' @source \\url{`r repo(\"CPV\")`}\n#' \nCPV <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' "
  },
  {
    "path": "R/iso_CRI.R",
    "chars": 850,
    "preview": "#' Costa Rica\n#'\n#' @source \\url{`r repo(\"CRI\")`}\n#' \nCRI <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' "
  },
  {
    "path": "R/iso_CUB.R",
    "chars": 838,
    "preview": "#' Cuba\n#'\n#' @source \\url{`r repo(\"CUB\")`}\n#' \nCUB <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @secti"
  },
  {
    "path": "R/iso_CUW.R",
    "chars": 869,
    "preview": "#' Curaçao\n#'\n#' @source \\url{`r repo(\"CUW\")`}\n#' \nCUW <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_CYM.R",
    "chars": 883,
    "preview": "#' Cayman Islands\n#'\n#' @source \\url{`r repo(\"CYM\")`}\n#' \nCYM <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n "
  },
  {
    "path": "R/iso_CYP.R",
    "chars": 842,
    "preview": "#' Cyprus\n#'\n#' @source \\url{`r repo(\"CYP\")`}\n#' \nCYP <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_CZE.R",
    "chars": 1558,
    "preview": "#' Czech Republic\n#'\n#' @source \\url{`r repo(\"CZE\")`}\n#' \nCZE <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n "
  },
  {
    "path": "R/iso_DEU.R",
    "chars": 2807,
    "preview": "#' Germany\n#'\n#' @source \\url{`r repo(\"DEU\")`}\n#' \nDEU <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_DJI.R",
    "chars": 846,
    "preview": "#' Djibouti\n#'\n#' @source \\url{`r repo(\"DJI\")`}\n#' \nDJI <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_DMA.R",
    "chars": 846,
    "preview": "#' Dominica\n#'\n#' @source \\url{`r repo(\"DMA\")`}\n#' \nDMA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_DNK.R",
    "chars": 1728,
    "preview": "#' Denmark\n#'\n#' @source \\url{`r repo(\"DNK\")`}\n#' \nDNK <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_DOM.R",
    "chars": 866,
    "preview": "#' Dominican Republic\n#'\n#' @source \\url{`r repo(\"DOM\")`}\n#' \nDOM <- function(level){\n  x <- NULL\n  \n  #' @concept Level"
  },
  {
    "path": "R/iso_DPC.R",
    "chars": 948,
    "preview": "#' Cruise Ship Diamond Princess\n#'\n#' @source \\url{`r repo(\"DPC\")`}\n#' \nDPC <- function(level){\n  x <- NULL\n  \n  #' @con"
  },
  {
    "path": "R/iso_DZA.R",
    "chars": 860,
    "preview": "#' Algeria\n#'\n#' @source \\url{`r repo(\"DZA\")`}\n#' \nDZA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_ECU.R",
    "chars": 844,
    "preview": "#' Ecuador\n#'\n#' @source \\url{`r repo(\"ECU\")`}\n#' \nECU <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_EGY.R",
    "chars": 840,
    "preview": "#' Egypt\n#'\n#' @source \\url{`r repo(\"EGY\")`}\n#' \nEGY <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_ERI.R",
    "chars": 844,
    "preview": "#' Eritrea\n#'\n#' @source \\url{`r repo(\"ERI\")`}\n#' \nERI <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_ESP.R",
    "chars": 1604,
    "preview": "#' Spain\n#'\n#' @source \\url{`r repo(\"ESP\")`}\n#' \nESP <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_EST.R",
    "chars": 844,
    "preview": "#' Estonia\n#'\n#' @source \\url{`r repo(\"EST\")`}\n#' \nEST <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_ETH.R",
    "chars": 846,
    "preview": "#' Ethiopia\n#'\n#' @source \\url{`r repo(\"ETH\")`}\n#' \nETH <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_FIN.R",
    "chars": 844,
    "preview": "#' Finland\n#'\n#' @source \\url{`r repo(\"FIN\")`}\n#' \nFIN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_FJI.R",
    "chars": 838,
    "preview": "#' Fiji\n#'\n#' @source \\url{`r repo(\"FJI\")`}\n#' \nFJI <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @secti"
  },
  {
    "path": "R/iso_FLK.R",
    "chars": 882,
    "preview": "#' Falkland Islands\n#'\n#' @source \\url{`r repo(\"FLK\")`}\n#' \nFLK <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1"
  },
  {
    "path": "R/iso_FRA.R",
    "chars": 1910,
    "preview": "#' France\n#'\n#' @source \\url{`r repo(\"FRA\")`}\n#' \nFRA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_FRO.R",
    "chars": 863,
    "preview": "#' Faroe Islands\n#'\n#' @source \\url{`r repo(\"FRO\")`}\n#' \nFRO <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  "
  },
  {
    "path": "R/iso_FSM.R",
    "chars": 850,
    "preview": "#' Micronesia\n#'\n#' @source \\url{`r repo(\"FSM\")`}\n#' \nFSM <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' "
  },
  {
    "path": "R/iso_GAB.R",
    "chars": 840,
    "preview": "#' Gabon\n#'\n#' @source \\url{`r repo(\"GAB\")`}\n#' \nGAB <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_GBR.R",
    "chars": 1993,
    "preview": "#' United Kingdom\n#'\n#' @source \\url{`r repo(\"GBR\")`}\n#' \nGBR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n "
  },
  {
    "path": "R/iso_GEO.R",
    "chars": 844,
    "preview": "#' Georgia\n#'\n#' @source \\url{`r repo(\"GEO\")`}\n#' \nGEO <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_GGY.R",
    "chars": 846,
    "preview": "#' Guernsey\n#'\n#' @source \\url{`r repo(\"GGY\")`}\n#' \nGGY <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_GHA.R",
    "chars": 840,
    "preview": "#' Ghana\n#'\n#' @source \\url{`r repo(\"GHA\")`}\n#' \nGHA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_GIB.R",
    "chars": 857,
    "preview": "#' Gibraltar\n#'\n#' @source \\url{`r repo(\"GIB\")`}\n#' \nGIB <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @"
  },
  {
    "path": "R/iso_GIN.R",
    "chars": 842,
    "preview": "#' Guinea\n#'\n#' @source \\url{`r repo(\"GIN\")`}\n#' \nGIN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_GLP.R",
    "chars": 575,
    "preview": "#' Guadeloupe\n#'\n#' @source \\url{`r repo(\"GLP\")`}\n#' \nGLP <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' "
  },
  {
    "path": "R/iso_GMB.R",
    "chars": 842,
    "preview": "#' Gambia\n#'\n#' @source \\url{`r repo(\"GMB\")`}\n#' \nGMB <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_GNB.R",
    "chars": 856,
    "preview": "#' Guinea-Bissau\n#'\n#' @source \\url{`r repo(\"GNB\")`}\n#' \nGNB <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  "
  },
  {
    "path": "R/iso_GNQ.R",
    "chars": 864,
    "preview": "#' Equatorial Guinea\n#'\n#' @source \\url{`r repo(\"GNQ\")`}\n#' \nGNQ <- function(level){\n  x <- NULL\n  \n  #' @concept Level "
  },
  {
    "path": "R/iso_GPC.R",
    "chars": 583,
    "preview": "#' Cruise Ship Grand Princess\n#'\n#' @source \\url{`r repo(\"GPC\")`}\n#' \nGPC <- function(level){\n  x <- NULL\n  \n  #' @conce"
  },
  {
    "path": "R/iso_GRC.R",
    "chars": 842,
    "preview": "#' Greece\n#'\n#' @source \\url{`r repo(\"GRC\")`}\n#' \nGRC <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_GRD.R",
    "chars": 844,
    "preview": "#' Grenada\n#'\n#' @source \\url{`r repo(\"GRD\")`}\n#' \nGRD <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_GRL.R",
    "chars": 857,
    "preview": "#' Greenland\n#'\n#' @source \\url{`r repo(\"GRL\")`}\n#' \nGRL <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @"
  },
  {
    "path": "R/iso_GTM.R",
    "chars": 848,
    "preview": "#' Guatemala\n#'\n#' @source \\url{`r repo(\"GTM\")`}\n#' \nGTM <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @"
  },
  {
    "path": "R/iso_GUF.R",
    "chars": 578,
    "preview": "#' French Guiana\n#'\n#' @source \\url{`r repo(\"GUF\")`}\n#' \nGUF <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  "
  },
  {
    "path": "R/iso_GUM.R",
    "chars": 782,
    "preview": "#' Guam\n#'\n#' @source \\url{`r repo(\"GUM\")`}\n#' \nGUM <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @secti"
  },
  {
    "path": "R/iso_GUY.R",
    "chars": 842,
    "preview": "#' Guyana\n#'\n#' @source \\url{`r repo(\"GUY\")`}\n#' \nGUY <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_HKG.R",
    "chars": 876,
    "preview": "#' Hong Kong\n#'\n#' @source \\url{`r repo(\"HKG\")`}\n#' \nHKG <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @"
  },
  {
    "path": "R/iso_HND.R",
    "chars": 846,
    "preview": "#' Honduras\n#'\n#' @source \\url{`r repo(\"HND\")`}\n#' \nHND <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_HRV.R",
    "chars": 1187,
    "preview": "#' Croatia\n#'\n#' @source \\url{`r repo(\"HRV\")`}\n#' \nHRV <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_HTI.R",
    "chars": 1223,
    "preview": "#' Haiti\n#'\n#' @source \\url{`r repo(\"HTI\")`}\n#' \nHTI <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_HUN.R",
    "chars": 844,
    "preview": "#' Hungary\n#'\n#' @source \\url{`r repo(\"HUN\")`}\n#' \nHUN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_IDN.R",
    "chars": 848,
    "preview": "#' Indonesia\n#'\n#' @source \\url{`r repo(\"IDN\")`}\n#' \nIDN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @"
  },
  {
    "path": "R/iso_IMN.R",
    "chars": 861,
    "preview": "#' Isle of Man\n#'\n#' @source \\url{`r repo(\"IMN\")`}\n#' \nIMN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #'"
  },
  {
    "path": "R/iso_IND.R",
    "chars": 1361,
    "preview": "#' India\n#'\n#' @source \\url{`r repo(\"IND\")`}\n#' \nIND <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_IRL.R",
    "chars": 1372,
    "preview": "#' Ireland\n#'\n#' @source \\url{`r repo(\"IRL\")`}\n#' \nIRL <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_IRN.R",
    "chars": 838,
    "preview": "#' Iran\n#'\n#' @source \\url{`r repo(\"IRN\")`}\n#' \nIRN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @secti"
  },
  {
    "path": "R/iso_IRQ.R",
    "chars": 838,
    "preview": "#' Iraq\n#'\n#' @source \\url{`r repo(\"IRQ\")`}\n#' \nIRQ <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @secti"
  },
  {
    "path": "R/iso_ISL.R",
    "chars": 844,
    "preview": "#' Iceland\n#'\n#' @source \\url{`r repo(\"ISL\")`}\n#' \nISL <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_ISR.R",
    "chars": 842,
    "preview": "#' Israel\n#'\n#' @source \\url{`r repo(\"ISR\")`}\n#' \nISR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_ITA.R",
    "chars": 2259,
    "preview": "#' Italy\n#'\n#' @source \\url{`r repo(\"ITA\")`}\n#' \nITA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_JAM.R",
    "chars": 844,
    "preview": "#' Jamaica\n#'\n#' @source \\url{`r repo(\"JAM\")`}\n#' \nJAM <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_JEY.R",
    "chars": 842,
    "preview": "#' Jersey\n#'\n#' @source \\url{`r repo(\"JEY\")`}\n#' \nJEY <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_JOR.R",
    "chars": 842,
    "preview": "#' Jordan\n#'\n#' @source \\url{`r repo(\"JOR\")`}\n#' \nJOR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_JPN.R",
    "chars": 1098,
    "preview": "#' Japan\n#'\n#' @source \\url{`r repo(\"JPN\")`}\n#' \nJPN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_KAZ.R",
    "chars": 850,
    "preview": "#' Kazakhstan\n#'\n#' @source \\url{`r repo(\"KAZ\")`}\n#' \nKAZ <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' "
  },
  {
    "path": "R/iso_KEN.R",
    "chars": 840,
    "preview": "#' Kenya\n#'\n#' @source \\url{`r repo(\"KEN\")`}\n#' \nKEN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_KGZ.R",
    "chars": 850,
    "preview": "#' Kyrgyzstan\n#'\n#' @source \\url{`r repo(\"KGZ\")`}\n#' \nKGZ <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' "
  },
  {
    "path": "R/iso_KHM.R",
    "chars": 846,
    "preview": "#' Cambodia\n#'\n#' @source \\url{`r repo(\"KHM\")`}\n#' \nKHM <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_KIR.R",
    "chars": 846,
    "preview": "#' Kiribati\n#'\n#' @source \\url{`r repo(\"KIR\")`}\n#' \nKIR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_KNA.R",
    "chars": 872,
    "preview": "#' Saint Kitts and Nevis\n#'\n#' @source \\url{`r repo(\"KNA\")`}\n#' \nKNA <- function(level){\n  x <- NULL\n  \n  #' @concept Le"
  },
  {
    "path": "R/iso_KOR.R",
    "chars": 854,
    "preview": "#' Korea, South\n#'\n#' @source \\url{`r repo(\"KOR\")`}\n#' \nKOR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #"
  },
  {
    "path": "R/iso_KWT.R",
    "chars": 842,
    "preview": "#' Kuwait\n#'\n#' @source \\url{`r repo(\"KWT\")`}\n#' \nKWT <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_LAO.R",
    "chars": 838,
    "preview": "#' Laos\n#'\n#' @source \\url{`r repo(\"LAO\")`}\n#' \nLAO <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @secti"
  },
  {
    "path": "R/iso_LBN.R",
    "chars": 844,
    "preview": "#' Lebanon\n#'\n#' @source \\url{`r repo(\"LBN\")`}\n#' \nLBN <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_LBR.R",
    "chars": 844,
    "preview": "#' Liberia\n#'\n#' @source \\url{`r repo(\"LBR\")`}\n#' \nLBR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_LBY.R",
    "chars": 840,
    "preview": "#' Libya\n#'\n#' @source \\url{`r repo(\"LBY\")`}\n#' \nLBY <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_LCA.R",
    "chars": 852,
    "preview": "#' Saint Lucia\n#'\n#' @source \\url{`r repo(\"LCA\")`}\n#' \nLCA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #'"
  },
  {
    "path": "R/iso_LIE.R",
    "chars": 945,
    "preview": "#' Liechtenstein\n#'\n#' @source \\url{`r repo(\"LIE\")`}\n#' \nLIE <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  "
  },
  {
    "path": "R/iso_LKA.R",
    "chars": 848,
    "preview": "#' Sri Lanka\n#'\n#' @source \\url{`r repo(\"LKA\")`}\n#' \nLKA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @"
  },
  {
    "path": "R/iso_LSO.R",
    "chars": 844,
    "preview": "#' Lesotho\n#'\n#' @source \\url{`r repo(\"LSO\")`}\n#' \nLSO <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_LTU.R",
    "chars": 1771,
    "preview": "#' Lithuania\n#'\n#' @source \\url{`r repo(\"LTU\")`}\n#' \nLTU <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @"
  },
  {
    "path": "R/iso_LUX.R",
    "chars": 850,
    "preview": "#' Luxembourg\n#'\n#' @source \\url{`r repo(\"LUX\")`}\n#' \nLUX <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' "
  },
  {
    "path": "R/iso_LVA.R",
    "chars": 1441,
    "preview": "#' Latvia\n#'\n#' @source \\url{`r repo(\"LVA\")`}\n#' \nLVA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_MAC.R",
    "chars": 840,
    "preview": "#' Macao\n#'\n#' @source \\url{`r repo(\"MAC\")`}\n#' \nMAC <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sect"
  },
  {
    "path": "R/iso_MAR.R",
    "chars": 844,
    "preview": "#' Morocco\n#'\n#' @source \\url{`r repo(\"MAR\")`}\n#' \nMAR <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_MCO.R",
    "chars": 842,
    "preview": "#' Monaco\n#'\n#' @source \\url{`r repo(\"MCO\")`}\n#' \nMCO <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_MDA.R",
    "chars": 844,
    "preview": "#' Moldova\n#'\n#' @source \\url{`r repo(\"MDA\")`}\n#' \nMDA <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @se"
  },
  {
    "path": "R/iso_MDG.R",
    "chars": 850,
    "preview": "#' Madagascar\n#'\n#' @source \\url{`r repo(\"MDG\")`}\n#' \nMDG <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' "
  },
  {
    "path": "R/iso_MDV.R",
    "chars": 846,
    "preview": "#' Maldives\n#'\n#' @source \\url{`r repo(\"MDV\")`}\n#' \nMDV <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @s"
  },
  {
    "path": "R/iso_MEX.R",
    "chars": 1338,
    "preview": "#' Mexico\n#'\n#' @source \\url{`r repo(\"MEX\")`}\n#' \nMEX <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n  #' @sec"
  },
  {
    "path": "R/iso_MHL.R",
    "chars": 862,
    "preview": "#' Marshall Islands\n#'\n#' @source \\url{`r repo(\"MHL\")`}\n#' \nMHL <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1"
  },
  {
    "path": "R/iso_MKD.R",
    "chars": 860,
    "preview": "#' North Macedonia\n#'\n#' @source \\url{`r repo(\"MKD\")`}\n#' \nMKD <- function(level){\n  x <- NULL\n  \n  #' @concept Level 1\n"
  }
]

// ... and 473 more files (download for full content)

About this extraction

This page contains the full source code of the covid19datahub/COVID19 GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 673 files (4.4 MB), approximately 1.2M tokens, and a symbol index with 1 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!