Repository: louislam/uptime-kuma Branch: master Commit: cdfca8466484 Files: 718 Total size: 5.9 MB Directory structure: gitextract_wdewrnrd/ ├── .dockerignore ├── .editorconfig ├── .eslintrc.js ├── .github/ │ ├── FUNDING.yml │ ├── ISSUE_TEMPLATE/ │ │ ├── ask_for_help.yml │ │ ├── bug_report.yml │ │ ├── config.yml │ │ ├── feature_request.yml │ │ └── security_issue.yml │ ├── PULL_REQUEST_TEMPLATE.md │ ├── REVIEW_GUIDELINES.md │ ├── config/ │ │ └── exclude.txt │ ├── copilot-instructions.md │ ├── dependabot.yml │ └── workflows/ │ ├── auto-test.yml │ ├── autofix.yml │ ├── build-docker-base.yml │ ├── build-docker-push.yml │ ├── close-incorrect-issue.yml │ ├── codeql-analysis.yml │ ├── conflict-labeler.yml │ ├── mark-as-draft-on-requesting-changes.yml │ ├── new-contributor-pr.yml │ ├── npm-update.yml │ ├── pr-description-check.yml │ ├── pr-title.yml │ ├── prevent-file-change.yml │ ├── release-beta.yml │ ├── release-final.yml │ ├── release-nightly.yml │ ├── stale-bot.yml │ └── validate.yml ├── .gitignore ├── .npmrc ├── .prettierignore ├── .prettierrc.js ├── .stylelintrc ├── CNAME ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── compose.yaml ├── config/ │ ├── playwright.config.js │ └── vite.config.js ├── db/ │ ├── knex_init_db.js │ ├── knex_migrations/ │ │ ├── 2023-08-16-0000-create-uptime.js │ │ ├── 2023-08-18-0301-heartbeat.js │ │ ├── 2023-09-29-0000-heartbeat-retires.js │ │ ├── 2023-10-08-0000-mqtt-query.js │ │ ├── 2023-10-11-1915-push-token-to-32.js │ │ ├── 2023-10-16-0000-create-remote-browsers.js │ │ ├── 2023-12-20-0000-alter-status-page.js │ │ ├── 2023-12-21-0000-stat-ping-min-max.js │ │ ├── 2023-12-22-0000-hourly-uptime.js │ │ ├── 2024-01-22-0000-stats-extras.js │ │ ├── 2024-04-26-0000-snmp-monitor.js │ │ ├── 2024-08-24-000-add-cache-bust.js │ │ ├── 2024-08-24-0000-conditions.js │ │ ├── 2024-10-1315-rabbitmq-monitor.js │ │ ├── 2024-10-31-0000-fix-snmp-monitor.js │ │ ├── 2024-11-27-1927-fix-info-json-data-type.js │ │ ├── 2025-01-01-0000-add-smtp.js │ │ ├── 2025-02-15-2312-add-wstest.js │ │ ├── 2025-02-17-2142-generalize-analytics.js │ │ ├── 2025-03-04-0000-ping-advanced-options.js │ │ ├── 2025-03-25-0127-fix-5721.js │ │ ├── 2025-05-09-0000-add-custom-url.js │ │ ├── 2025-06-03-0000-add-ip-family.js │ │ ├── 2025-06-11-0000-add-manual-monitor.js │ │ ├── 2025-06-13-0000-maintenance-add-last-start.js │ │ ├── 2025-06-15-0001-manual-monitor-fix.js │ │ ├── 2025-06-24-0000-add-audience-to-oauth.js │ │ ├── 2025-07-17-0000-mqtt-websocket-path.js │ │ ├── 2025-09-02-0000-add-domain-expiry.js │ │ ├── 2025-10-14-0000-add-ip-family-fix.js │ │ ├── 2025-10-15-0000-stat-table-fix.js │ │ ├── 2025-10-15-0001-add-monitor-response-config.js │ │ ├── 2025-10-15-0002-add-response-to-heartbeat.js │ │ ├── 2025-10-24-0000-show-only-last-heartbeat.js │ │ ├── 2025-12-09-0000-add-system-service-monitor.js │ │ ├── 2025-12-17-0000-add-globalping-monitor.js │ │ ├── 2025-12-22-0121-optimize-important-indexes.js │ │ ├── 2025-12-29-0000-remove-line-notify.js │ │ ├── 2025-12-31-2143-add-snmp-v3-username.js │ │ ├── 2026-01-02-0551-dns-last-result-to-text.js │ │ ├── 2026-01-02-0713-gamedig-v4-to-v5.js │ │ ├── 2026-01-05-0000-add-rss-title.js │ │ ├── 2026-01-05-0000-add-tls-monitor.js │ │ ├── 2026-01-06-0000-fix-domain-expiry-column-type.js │ │ ├── 2026-01-10-0000-convert-float-precision.js │ │ ├── 2026-01-15-0000-add-json-query-retry-only-status-code.js │ │ ├── 2026-01-16-0000-add-screenshot-delay.js │ │ ├── 2026-02-07-0000-disable-domain-expiry-unsupported-tlds.js │ │ └── README.md │ ├── old_migrations/ │ │ ├── README.md │ │ ├── patch-2fa-invalidate-used-token.sql │ │ ├── patch-2fa.sql │ │ ├── patch-add-certificate-expiry-status-page.sql │ │ ├── patch-add-clickable-status-page-link.sql │ │ ├── patch-add-description-monitor.sql │ │ ├── patch-add-docker-columns.sql │ │ ├── patch-add-gamedig-given-port.sql │ │ ├── patch-add-gamedig-monitor.sql │ │ ├── patch-add-google-analytics-status-page-tag.sql │ │ ├── patch-add-invert-keyword.sql │ │ ├── patch-add-other-auth.sql │ │ ├── patch-add-parent-monitor.sql │ │ ├── patch-add-radius-monitor.sql │ │ ├── patch-add-retry-interval-monitor.sql │ │ ├── patch-add-sqlserver-monitor.sql │ │ ├── patch-add-timeout-monitor.sql │ │ ├── patch-added-json-query.sql │ │ ├── patch-added-kafka-producer.sql │ │ ├── patch-added-mqtt-monitor.sql │ │ ├── patch-api-key-table.sql │ │ ├── patch-fix-kafka-producer-booleans.sql │ │ ├── patch-group-table.sql │ │ ├── patch-grpc-monitor.sql │ │ ├── patch-http-body-encoding.sql │ │ ├── patch-http-monitor-method-body-and-headers.sql │ │ ├── patch-improve-performance.sql │ │ ├── patch-incident-table.sql │ │ ├── patch-maintenance-cron.sql │ │ ├── patch-maintenance-table2.sql │ │ ├── patch-monitor-add-resend-interval.sql │ │ ├── patch-monitor-basic-auth.sql │ │ ├── patch-monitor-expiry-notification.sql │ │ ├── patch-monitor-oauth-cc.sql │ │ ├── patch-monitor-push_token.sql │ │ ├── patch-monitor-tls-info-add-fk.sql │ │ ├── patch-monitor-tls.sql │ │ ├── patch-notification-config.sql │ │ ├── patch-notification_sent_history.sql │ │ ├── patch-ping-packet-size.sql │ │ ├── patch-proxy.sql │ │ ├── patch-setting-value-type.sql │ │ ├── patch-status-page-footer-css.sql │ │ ├── patch-status-page.sql │ │ ├── patch-timeout.sql │ │ ├── patch1.sql │ │ ├── patch10.sql │ │ ├── patch2.sql │ │ ├── patch3.sql │ │ ├── patch4.sql │ │ ├── patch5.sql │ │ ├── patch6.sql │ │ ├── patch7.sql │ │ ├── patch8.sql │ │ └── patch9.sql │ └── patch-monitor-tls-info-add-fk.sql ├── docker/ │ ├── builder-go.dockerfile │ ├── debian-base.dockerfile │ ├── docker-compose-dev.yml │ ├── dockerfile │ └── etc/ │ ├── nscd.conf │ └── sudoers ├── ecosystem.config.js ├── extra/ │ ├── beta/ │ │ └── update-version.mjs │ ├── build-healthcheck.js │ ├── check-knex-filenames.mjs │ ├── check-lang-json.js │ ├── check-package-json.mjs │ ├── checkout-pr.mjs │ ├── close-incorrect-issue.js │ ├── compile-install-script.ps1 │ ├── deploy-demo-server.js │ ├── download-apprise.mjs │ ├── download-dist.js │ ├── generate-changelog.mjs │ ├── healthcheck.go │ ├── healthcheck.js │ ├── kuma-pr/ │ │ ├── index.mjs │ │ ├── package.json │ │ └── pr-lib.mjs │ ├── mark-as-nightly.js │ ├── push-examples/ │ │ ├── .gitignore │ │ ├── bash-curl/ │ │ │ └── index.sh │ │ ├── csharp/ │ │ │ └── index.cs │ │ ├── docker/ │ │ │ └── index.sh │ │ ├── go/ │ │ │ └── index.go │ │ ├── java/ │ │ │ └── index.java │ │ ├── javascript-fetch/ │ │ │ ├── index.js │ │ │ └── package.json │ │ ├── php/ │ │ │ └── index.php │ │ ├── powershell/ │ │ │ └── index.ps1 │ │ ├── python/ │ │ │ └── index.py │ │ └── typescript-fetch/ │ │ ├── README.md │ │ ├── index.ts │ │ └── package.json │ ├── rdap-dns.json │ ├── rebase-pr.js │ ├── release/ │ │ ├── beta.mjs │ │ ├── final.mjs │ │ ├── lib.mjs │ │ ├── nightly.mjs │ │ ├── upload-artifacts-beta.mjs │ │ └── upload-artifacts.mjs │ ├── remove-2fa.js │ ├── remove-empty-lang-keys.js │ ├── remove-playwright-test-data.js │ ├── reset-migrate-aggregate-table-state.js │ ├── reset-password.js │ ├── simple-dns-server.js │ ├── simple-mqtt-server.js │ ├── update-version.mjs │ └── uptime-kuma-push/ │ ├── .gitignore │ ├── Dockerfile │ ├── build.js │ ├── package.json │ └── uptime-kuma-push.go ├── index.html ├── package.json ├── public/ │ ├── manifest.json │ └── serviceWorker.js ├── server/ │ ├── 2fa.js │ ├── analytics/ │ │ ├── analytics.js │ │ ├── google-analytics.js │ │ ├── matomo-analytics.js │ │ ├── plausible-analytics.js │ │ └── umami-analytics.js │ ├── auth.js │ ├── check-version.js │ ├── client.js │ ├── config.js │ ├── database.js │ ├── docker.js │ ├── embedded-mariadb.js │ ├── image-data-uri.js │ ├── jobs/ │ │ ├── clear-old-data.js │ │ └── incremental-vacuum.js │ ├── jobs.js │ ├── model/ │ │ ├── api_key.js │ │ ├── docker_host.js │ │ ├── domain_expiry.js │ │ ├── group.js │ │ ├── heartbeat.js │ │ ├── incident.js │ │ ├── maintenance.js │ │ ├── monitor.js │ │ ├── proxy.js │ │ ├── remote_browser.js │ │ ├── status_page.js │ │ ├── tag.js │ │ └── user.js │ ├── modules/ │ │ ├── apicache/ │ │ │ ├── apicache.js │ │ │ ├── index.js │ │ │ └── memory-cache.js │ │ ├── axios-ntlm/ │ │ │ ├── LICENSE │ │ │ └── lib/ │ │ │ ├── flags.js │ │ │ ├── hash.js │ │ │ ├── ntlm.js │ │ │ └── ntlmClient.js │ │ └── dayjs/ │ │ └── plugin/ │ │ ├── timezone.d.ts │ │ └── timezone.js │ ├── monitor-conditions/ │ │ ├── evaluator.js │ │ ├── expression.js │ │ ├── operators.js │ │ └── variables.js │ ├── monitor-types/ │ │ ├── dns.js │ │ ├── gamedig.js │ │ ├── globalping.js │ │ ├── group.js │ │ ├── grpc.js │ │ ├── manual.js │ │ ├── mongodb.js │ │ ├── monitor-type.js │ │ ├── mqtt.js │ │ ├── mssql.js │ │ ├── mysql.js │ │ ├── oracledb.js │ │ ├── postgres.js │ │ ├── rabbitmq.js │ │ ├── real-browser-monitor-type.js │ │ ├── redis.js │ │ ├── sip-options.js │ │ ├── smtp.js │ │ ├── snmp.js │ │ ├── system-service.js │ │ ├── tailscale-ping.js │ │ ├── tcp.js │ │ └── websocket-upgrade.js │ ├── notification-providers/ │ │ ├── 360messenger.js │ │ ├── 46elks.js │ │ ├── HaloPSA.js │ │ ├── Webpush.js │ │ ├── alerta.js │ │ ├── alertnow.js │ │ ├── aliyun-sms.js │ │ ├── apprise.js │ │ ├── bale.js │ │ ├── bark.js │ │ ├── bitrix24.js │ │ ├── brevo.js │ │ ├── call-me-bot.js │ │ ├── cellsynt.js │ │ ├── clicksendsms.js │ │ ├── dingding.js │ │ ├── discord.js │ │ ├── evolution.js │ │ ├── feishu.js │ │ ├── flashduty.js │ │ ├── fluxer.js │ │ ├── freemobile.js │ │ ├── goalert.js │ │ ├── google-chat.js │ │ ├── google-sheets.js │ │ ├── gorush.js │ │ ├── gotify.js │ │ ├── grafana-oncall.js │ │ ├── gtx-messaging.js │ │ ├── heii-oncall.js │ │ ├── home-assistant.js │ │ ├── jira-service-management.js │ │ ├── keep.js │ │ ├── kook.js │ │ ├── line.js │ │ ├── lunasea.js │ │ ├── matrix.js │ │ ├── mattermost.js │ │ ├── max.js │ │ ├── nextcloudtalk.js │ │ ├── nostr.js │ │ ├── notifery.js │ │ ├── notification-provider.js │ │ ├── ntfy.js │ │ ├── octopush.js │ │ ├── onebot.js │ │ ├── onechat.js │ │ ├── onesender.js │ │ ├── opsgenie.js │ │ ├── pagerduty.js │ │ ├── pagertree.js │ │ ├── promosms.js │ │ ├── pumble.js │ │ ├── pushbullet.js │ │ ├── pushdeer.js │ │ ├── pushover.js │ │ ├── pushplus.js │ │ ├── pushy.js │ │ ├── resend.js │ │ ├── rocket-chat.js │ │ ├── send-grid.js │ │ ├── serverchan.js │ │ ├── serwersms.js │ │ ├── sevenio.js │ │ ├── signal.js │ │ ├── signl4.js │ │ ├── slack.js │ │ ├── sms-planet.js │ │ ├── smsc.js │ │ ├── smseagle.js │ │ ├── smsir.js │ │ ├── smsmanager.js │ │ ├── smspartner.js │ │ ├── smtp.js │ │ ├── splunk.js │ │ ├── spugpush.js │ │ ├── squadcast.js │ │ ├── stackfield.js │ │ ├── teams.js │ │ ├── techulus-push.js │ │ ├── telegram.js │ │ ├── teltonika.js │ │ ├── threema.js │ │ ├── twilio.js │ │ ├── waha.js │ │ ├── webhook.js │ │ ├── wecom.js │ │ ├── whapi.js │ │ ├── wpush.js │ │ ├── yzj.js │ │ └── zoho-cliq.js │ ├── notification.js │ ├── password-hash.js │ ├── prometheus.js │ ├── proxy.js │ ├── radius-client.js │ ├── rate-limiter.js │ ├── remote-browser.js │ ├── routers/ │ │ ├── api-router.js │ │ └── status-page-router.js │ ├── server.js │ ├── settings.js │ ├── setup-database.js │ ├── socket-handlers/ │ │ ├── api-key-socket-handler.js │ │ ├── chart-socket-handler.js │ │ ├── cloudflared-socket-handler.js │ │ ├── database-socket-handler.js │ │ ├── docker-socket-handler.js │ │ ├── general-socket-handler.js │ │ ├── maintenance-socket-handler.js │ │ ├── proxy-socket-handler.js │ │ ├── remote-browser-socket-handler.js │ │ └── status-page-socket-handler.js │ ├── translatable-error.js │ ├── uptime-calculator.js │ ├── uptime-kuma-server.js │ ├── util-server.js │ └── utils/ │ ├── array-with-key.js │ ├── knex/ │ │ └── lib/ │ │ └── dialects/ │ │ └── mysql2/ │ │ └── schema/ │ │ └── mysql2-columncompiler.js │ ├── limit-queue.js │ └── simple-migration-server.js ├── src/ │ ├── App.vue │ ├── assets/ │ │ ├── app.scss │ │ ├── localization.scss │ │ ├── multiselect.scss │ │ ├── vars.scss │ │ └── vue-datepicker.scss │ ├── components/ │ │ ├── APIKeyDialog.vue │ │ ├── ActionInput.vue │ │ ├── ActionSelect.vue │ │ ├── BadgeLinkGeneratorDialog.vue │ │ ├── CertificateInfo.vue │ │ ├── CertificateInfoRow.vue │ │ ├── Confirm.vue │ │ ├── CopyableInput.vue │ │ ├── CountUp.vue │ │ ├── CreateGroupDialog.vue │ │ ├── Datetime.vue │ │ ├── DockerHostDialog.vue │ │ ├── EditMonitorCondition.vue │ │ ├── EditMonitorConditionGroup.vue │ │ ├── EditMonitorConditions.vue │ │ ├── GroupSortDropdown.vue │ │ ├── HeartbeatBar.vue │ │ ├── HiddenInput.vue │ │ ├── IncidentEditForm.vue │ │ ├── IncidentHistory.vue │ │ ├── IncidentManageModal.vue │ │ ├── Login.vue │ │ ├── MaintenanceTime.vue │ │ ├── MonitorList.vue │ │ ├── MonitorListFilter.vue │ │ ├── MonitorListFilterDropdown.vue │ │ ├── MonitorListItem.vue │ │ ├── MonitorSettingDialog.vue │ │ ├── NotificationDialog.vue │ │ ├── PingChart.vue │ │ ├── ProxyDialog.vue │ │ ├── PublicGroupList.vue │ │ ├── RemoteBrowserDialog.vue │ │ ├── ScreenshotDialog.vue │ │ ├── Status.vue │ │ ├── Tag.vue │ │ ├── TagEditDialog.vue │ │ ├── TagsManager.vue │ │ ├── TemplatedInput.vue │ │ ├── TemplatedTextarea.vue │ │ ├── ToggleSection.vue │ │ ├── Tooltip.vue │ │ ├── TwoFADialog.vue │ │ ├── Uptime.vue │ │ ├── notifications/ │ │ │ ├── 360messenger.vue │ │ │ ├── 46elks.vue │ │ │ ├── AlertNow.vue │ │ │ ├── Alerta.vue │ │ │ ├── AliyunSms.vue │ │ │ ├── Apprise.vue │ │ │ ├── Bale.vue │ │ │ ├── Bark.vue │ │ │ ├── Bitrix24.vue │ │ │ ├── Brevo.vue │ │ │ ├── CallMeBot.vue │ │ │ ├── Cellsynt.vue │ │ │ ├── ClickSendSMS.vue │ │ │ ├── DingDing.vue │ │ │ ├── Discord.vue │ │ │ ├── Evolution.vue │ │ │ ├── Feishu.vue │ │ │ ├── FlashDuty.vue │ │ │ ├── Fluxer.vue │ │ │ ├── FreeMobile.vue │ │ │ ├── GoAlert.vue │ │ │ ├── GoogleChat.vue │ │ │ ├── GoogleSheets.vue │ │ │ ├── Gorush.vue │ │ │ ├── Gotify.vue │ │ │ ├── GrafanaOncall.vue │ │ │ ├── GtxMessaging.vue │ │ │ ├── HaloPSA.vue │ │ │ ├── HeiiOnCall.vue │ │ │ ├── HomeAssistant.vue │ │ │ ├── JiraServiceManagement.vue │ │ │ ├── Keep.vue │ │ │ ├── Kook.vue │ │ │ ├── Line.vue │ │ │ ├── LunaSea.vue │ │ │ ├── Matrix.vue │ │ │ ├── Mattermost.vue │ │ │ ├── Max.vue │ │ │ ├── NextcloudTalk.vue │ │ │ ├── Nostr.vue │ │ │ ├── Notifery.vue │ │ │ ├── Ntfy.vue │ │ │ ├── Octopush.vue │ │ │ ├── OneBot.vue │ │ │ ├── OneChat.vue │ │ │ ├── Onesender.vue │ │ │ ├── Opsgenie.vue │ │ │ ├── PagerDuty.vue │ │ │ ├── PagerTree.vue │ │ │ ├── PromoSMS.vue │ │ │ ├── Pumble.vue │ │ │ ├── PushDeer.vue │ │ │ ├── PushPlus.vue │ │ │ ├── Pushbullet.vue │ │ │ ├── Pushover.vue │ │ │ ├── Pushy.vue │ │ │ ├── Resend.vue │ │ │ ├── RocketChat.vue │ │ │ ├── SIGNL4.vue │ │ │ ├── SMSC.vue │ │ │ ├── SMSEagle.vue │ │ │ ├── SMSIR.vue │ │ │ ├── SMSManager.vue │ │ │ ├── SMSPartner.vue │ │ │ ├── SMSPlanet.vue │ │ │ ├── SMTP.vue │ │ │ ├── SendGrid.vue │ │ │ ├── ServerChan.vue │ │ │ ├── SerwerSMS.vue │ │ │ ├── SevenIO.vue │ │ │ ├── Signal.vue │ │ │ ├── Slack.vue │ │ │ ├── Splunk.vue │ │ │ ├── SpugPush.vue │ │ │ ├── Squadcast.vue │ │ │ ├── Stackfield.vue │ │ │ ├── Teams.vue │ │ │ ├── TechulusPush.vue │ │ │ ├── Telegram.vue │ │ │ ├── Teltonika.vue │ │ │ ├── Threema.vue │ │ │ ├── Twilio.vue │ │ │ ├── WAHA.vue │ │ │ ├── WPush.vue │ │ │ ├── WeCom.vue │ │ │ ├── Webhook.vue │ │ │ ├── Webpush.vue │ │ │ ├── Whapi.vue │ │ │ ├── YZJ.vue │ │ │ ├── ZohoCliq.vue │ │ │ └── index.js │ │ └── settings/ │ │ ├── APIKeys.vue │ │ ├── About.vue │ │ ├── Appearance.vue │ │ ├── Docker.vue │ │ ├── General.vue │ │ ├── MonitorHistory.vue │ │ ├── Notifications.vue │ │ ├── Proxies.vue │ │ ├── RemoteBrowsers.vue │ │ ├── ReverseProxy.vue │ │ ├── Security.vue │ │ └── Tags.vue │ ├── i18n.js │ ├── icon.js │ ├── lang/ │ │ ├── README.md │ │ ├── ab.json │ │ ├── af.json │ │ ├── ang.json │ │ ├── ar-SY.json │ │ ├── ar.json │ │ ├── bar.json │ │ ├── be.json │ │ ├── bg-BG.json │ │ ├── bn.json │ │ ├── ca.json │ │ ├── ca@valencia.json │ │ ├── ckb.json │ │ ├── cs-CZ.json │ │ ├── da-DK.json │ │ ├── de-CH.json │ │ ├── de-DE.json │ │ ├── el-GR.json │ │ ├── en.json │ │ ├── en_GB.json │ │ ├── enm.json │ │ ├── es-ES.json │ │ ├── et-EE.json │ │ ├── eu.json │ │ ├── fa.json │ │ ├── fi.json │ │ ├── fr-FR.json │ │ ├── ga.json │ │ ├── gl.json │ │ ├── he-IL.json │ │ ├── he.json │ │ ├── hi.json │ │ ├── hr-HR.json │ │ ├── hu.json │ │ ├── id-ID.json │ │ ├── it-IT.json │ │ ├── ja.json │ │ ├── ka.json │ │ ├── ko-KR.json │ │ ├── lt.json │ │ ├── lv.json │ │ ├── lzh.json │ │ ├── mk.json │ │ ├── ml.json │ │ ├── ms.json │ │ ├── my.json │ │ ├── nb-NO.json │ │ ├── ne.json │ │ ├── nl-NL.json │ │ ├── pa.json │ │ ├── pa_PK.json │ │ ├── pl.json │ │ ├── pt-BR.json │ │ ├── pt-PT.json │ │ ├── pt.json │ │ ├── ro.json │ │ ├── ru-RU.json │ │ ├── sk.json │ │ ├── sl-SI.json │ │ ├── sq.json │ │ ├── sr-latn.json │ │ ├── sr.json │ │ ├── sv-SE.json │ │ ├── te.json │ │ ├── th-TH.json │ │ ├── tr-TR.json │ │ ├── ug.json │ │ ├── uk-UA.json │ │ ├── ur.json │ │ ├── uz.json │ │ ├── vi-VN.json │ │ ├── vls.json │ │ ├── xh.json │ │ ├── yue.json │ │ ├── zh-CN.json │ │ ├── zh-HK.json │ │ ├── zh-TW.json │ │ └── zu.json │ ├── layouts/ │ │ ├── EmptyLayout.vue │ │ └── Layout.vue │ ├── main.js │ ├── mixins/ │ │ ├── datetime.js │ │ ├── lang.js │ │ ├── mobile.js │ │ ├── public.js │ │ ├── socket.js │ │ └── theme.js │ ├── modules/ │ │ └── dayjs/ │ │ ├── constant.js │ │ └── plugin/ │ │ └── timezone/ │ │ ├── index.d.ts │ │ └── index.js │ ├── pages/ │ │ ├── AddStatusPage.vue │ │ ├── Dashboard.vue │ │ ├── DashboardHome.vue │ │ ├── Details.vue │ │ ├── EditMaintenance.vue │ │ ├── EditMonitor.vue │ │ ├── Entry.vue │ │ ├── List.vue │ │ ├── ManageMaintenance.vue │ │ ├── ManageStatusPage.vue │ │ ├── NotFound.vue │ │ ├── Settings.vue │ │ ├── Setup.vue │ │ ├── SetupDatabase.vue │ │ └── StatusPage.vue │ ├── router.js │ ├── util-frontend.js │ ├── util.js │ └── util.ts ├── test/ │ ├── backend-test/ │ │ ├── README.md │ │ ├── check-translations.test.js │ │ ├── monitor-conditions/ │ │ │ ├── test-evaluator.js │ │ │ ├── test-expressions.js │ │ │ └── test-operators.js │ │ ├── monitors/ │ │ │ ├── test-gamedig.js │ │ │ ├── test-grpc.js │ │ │ ├── test-mqtt.js │ │ │ ├── test-mssql.js │ │ │ ├── test-mysql.js │ │ │ ├── test-oracledb.js │ │ │ ├── test-postgres.js │ │ │ ├── test-rabbitmq.js │ │ │ ├── test-tcp.js │ │ │ └── test-websocket.js │ │ ├── notification-providers/ │ │ │ ├── mock-webhook.js │ │ │ ├── test-notification-provider.js │ │ │ └── test-ntlm.js │ │ ├── test-cert-hostname-match.js │ │ ├── test-domain.js │ │ ├── test-globalping.js │ │ ├── test-migration.js │ │ ├── test-monitor-response.js │ │ ├── test-ping-chart.js │ │ ├── test-snmp.js │ │ ├── test-status-page.js │ │ ├── test-system-service.js │ │ ├── test-uptime-calculator.js │ │ ├── test-util-server.js │ │ └── test-util.js │ ├── e2e/ │ │ ├── specs/ │ │ │ ├── domain-expiry-notification.spec.js │ │ │ ├── example.spec.js │ │ │ ├── fridendly-name.spec.js │ │ │ ├── incident-history.spec.js │ │ │ ├── monitor-form.spec.js │ │ │ ├── setup-process.once.js │ │ │ └── status-page.spec.js │ │ └── util-test.js │ ├── manual-test-grpc/ │ │ ├── echo.proto │ │ └── simple-grpc-server.js │ ├── manual-test-radius/ │ │ └── compose.yaml │ ├── manual-test-radius-tls/ │ │ ├── certs/ │ │ │ ├── redis.crt │ │ │ └── redis.key │ │ └── compose.yaml │ ├── mock-testdb.js │ ├── prepare-test-server.js │ ├── test-backend.mjs │ └── test-radius.dockerfile ├── tsconfig-backend.json └── tsconfig.json ================================================ FILE CONTENTS ================================================ ================================================ FILE: .dockerignore ================================================ /.idea /node_modules /data* /out /test /kubernetes /.do **/.dockerignore /private **/.git **/.gitignore **/docker-compose* **/[Dd]ockerfile* LICENSE README.md .editorconfig .vscode .eslint* .stylelint* /.github yarn.lock app.json CODE_OF_CONDUCT.md CONTRIBUTING.md CNAME install.sh SECURITY.md tsconfig.json .env /tmp /ecosystem.config.js /extra/healthcheck.exe /extra/healthcheck /extra/exe-builder /extra/uptime-kuma-push # Comment the following line if you want to rebuild the healthcheck binary /extra/healthcheck-armv7 ### .gitignore content (commented rules are duplicated) #node_modules .DS_Store #dist dist-ssr *.local #.idea #/data #!/data/.gitkeep #.vscode ### End of .gitignore content ================================================ FILE: .editorconfig ================================================ root = true [*] indent_style = space indent_size = 4 end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true [*.md] trim_trailing_whitespace = false [*.yaml] indent_size = 2 [*.yml] indent_size = 2 [*.vue] trim_trailing_whitespace = false [*.go] indent_style = tab ================================================ FILE: .eslintrc.js ================================================ module.exports = { ignorePatterns: ["test/*.js", "server/modules/*", "src/util.js"], root: true, env: { browser: true, commonjs: true, es2020: true, node: true, }, extends: [ "eslint:recommended", "plugin:vue/vue3-recommended", "plugin:vue-scoped-css/vue3-recommended", "plugin:jsdoc/recommended-error", "prettier", // Disables ESLint formatting rules that conflict with Prettier ], parser: "vue-eslint-parser", parserOptions: { parser: "@typescript-eslint/parser", sourceType: "module", requireConfigFile: false, }, plugins: ["jsdoc", "@typescript-eslint"], rules: { yoda: "error", eqeqeq: ["warn", "smart"], camelcase: [ "warn", { properties: "never", ignoreImports: true, }, ], "no-unused-vars": [ "warn", { args: "none", }, ], "vue/max-attributes-per-line": "off", "vue/singleline-html-element-content-newline": "off", "vue/html-self-closing": "off", "vue/require-component-is": "off", // not allow is="style" https://github.com/vuejs/eslint-plugin-vue/issues/462#issuecomment-430234675 "vue/attribute-hyphenation": "off", // This change noNL to "no-n-l" unexpectedly "vue/multi-word-component-names": "off", "vue-scoped-css/no-unused-selector": "warn", curly: "error", "no-var": "error", "no-throw-literal": "error", "no-constant-condition": [ "error", { checkLoops: false, }, ], //"no-console": "warn", "no-extra-boolean-cast": "off", "no-unneeded-ternary": "error", //"prefer-template": "error", "no-empty": [ "error", { allowEmptyCatch: true, }, ], "no-control-regex": "off", "one-var": ["error", "never"], "max-statements-per-line": ["error", { max: 1 }], "jsdoc/check-tag-names": [ "error", { definedTags: ["link"], }, ], "jsdoc/no-undefined-types": "off", "jsdoc/no-defaults": ["error", { noOptionalParamNames: true }], "jsdoc/require-throws": "warn", "jsdoc/require-jsdoc": [ "error", { require: { FunctionDeclaration: true, MethodDefinition: true, }, }, ], "jsdoc/no-blank-block-descriptions": "error", "jsdoc/require-returns-description": "warn", "jsdoc/require-returns-check": ["error", { reportMissingReturnForUndefinedTypes: false }], "jsdoc/require-returns": [ "warn", { forceRequireReturn: true, forceReturnsWithAsync: true, }, ], "jsdoc/require-param-type": "warn", "jsdoc/require-param-description": "warn", }, overrides: [ // Override for TypeScript { files: ["**/*.ts"], extends: ["plugin:@typescript-eslint/recommended"], rules: { "jsdoc/require-returns-type": "off", "jsdoc/require-param-type": "off", "@typescript-eslint/no-explicit-any": "off", "prefer-const": "off", }, }, ], }; ================================================ FILE: .github/FUNDING.yml ================================================ # These are supported funding model platforms github: louislam # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] #patreon: # Replace with a single Patreon username open_collective: uptime-kuma # Replace with a single Open Collective username #ko_fi: # Replace with a single Ko-fi username #tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel #community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry #liberapay: # Replace with a single Liberapay username #issuehunt: # Replace with a single IssueHunt username #otechie: # Replace with a single Otechie username #custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] ================================================ FILE: .github/ISSUE_TEMPLATE/ask_for_help.yml ================================================ --- name: ❓ Ask for help description: | Submit any question related to Uptime Kuma #title: "[Help]" labels: ["help"] body: - type: markdown attributes: value: | 🚫 **We kindly ask you to refrain from pinging maintainers unless absolutely necessary. Pings are reserved for critical/urgent issues that require immediate attention.** - type: checkboxes id: no-duplicate-question attributes: label: ⚠️ Please verify that your question has not already been reported description: | To avoid duplicate reports, please search for any existing issues before submitting a new one. You can find the list of existing issues **[HERE](https://github.com/louislam/uptime-kuma/issues?q=is%3Aissue%20sort%3Acreated-desc%20)**. options: - label: | I have searched the [existing issues](https://github.com/louislam/uptime-kuma/issues?q=is%3Aissue%20sort%3Acreated-desc%20) and found no similar reports. required: true - type: checkboxes id: security-policy attributes: label: 🛡️ Security Policy description: | Please review and acknowledge the Security Policy before reporting any security-related issues or bugs. You can find the full Security Policy **[HERE](https://github.com/louislam/uptime-kuma/security/policy)**. options: - label: | I have read and agree to Uptime Kuma's [Security Policy](https://github.com/louislam/uptime-kuma/security/policy). required: true - type: textarea id: steps-to-reproduce validations: required: true attributes: label: 📝 Describe your problem description: | Please walk us through it step by step. Include all important details and add screenshots where appropriate. placeholder: | Describe what are you asking for ... - type: textarea id: error-msg attributes: label: 📝 Error Message(s) or Log description: | Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. render: bash session validations: required: false - type: input id: uptime-kuma-version attributes: label: 🐻 Uptime-Kuma Version description: | What version of Uptime-Kuma are you running? Please do not provide Docker tags like `latest` or `1`. placeholder: | e.g., 1.23.16 or 2.0.0-beta.2 validations: required: true - type: input id: operating-system attributes: label: 💻 Operating System and Arch description: | Which OS is your server/device running on? (For Replit, please do not report this bug) placeholder: | e.g., Ubuntu Server 24.04.2 LTS (GNU/Linux 6.8.0-55-generic x86_64) validations: required: true - type: input id: browser-vendor attributes: label: 🌐 Browser description: | Which browser are you running on? (For Replit, please do not report this bug) placeholder: | e.g., Google Chrome 134.0.6998.183 (Official Build) (64-bit) validations: required: true - type: textarea id: deployment-info attributes: label: 🖥️ Deployment Environment description: | Provide details about the deployment environment, including runtime components, databases, and storage configurations. This will help assess the infrastructure and identify any potential compatibility requirements. **Remove any fields that do not apply to your setup.** value: | - **Runtime Environment**: - Docker: Version `X.X.X` (Build `Y.Y.Y`) - Docker Compose: Version `X.X.X` - Portainer (BE/CE): Version `X.X.X` (LTS: Yes/No) - MariaDB: Version `X.X.X` (LTS: Yes/No) - Node.js: Version `X.X.X` (LTS: Yes/No) - Kubernetes (K3S/K8S): Version `X.X.X` (LTS: Yes/No, via `[method/tool]`) - **Database**: - SQLite: Embedded - MariaDB: Embedded/External - **Database Storage**: - **Filesystem**: - Linux: ext4/XFS/Btrfs/ZFS/F2FS - macOS: APFS/ HFS+ - Windows: NTFS/ReFS - **Storage Medium**: HDD/eMMC/SSD/NVMe - **Uptime Kuma Setup**: - Number of monitors: `X` validations: required: true ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.yml ================================================ --- name: 🐛 Bug Report description: | Submit a bug report to help us improve #title: "[Bug]" labels: ["bug"] body: - type: markdown attributes: value: | 🚫 **We kindly ask you to refrain from pinging maintainers unless absolutely necessary. Pings are reserved for critical/urgent issues that require immediate attention.** - type: textarea id: related-issues validations: required: true attributes: label: 📑 I have found these related issues/pull requests description: | Please search for related **[ISSUES](https://github.com/louislam/uptime-kuma/issues?q=is%3Aissue%20sort%3Acreated-desc)** and **[PULL REQUESTS](https://github.com/louislam/uptime-kuma/pulls?q=is%3Apr+sort%3Acreated-desc+)**. Explain the differences between them or clarify if you were unable to find any related issues/pull requests. placeholder: | Example: This relates to issue #1, which also affects the ... system. It should not be merged because ... - type: checkboxes id: security-policy attributes: label: 🛡️ Security Policy description: | Please review and acknowledge the Security Policy before reporting any security-related issues or bugs. You can find the full Security Policy **[HERE](https://github.com/louislam/uptime-kuma/security/policy)**. options: - label: | I have read and agree to Uptime Kuma's [Security Policy](https://github.com/louislam/uptime-kuma/security/policy). required: true - type: textarea id: description validations: required: false attributes: label: 📝 Description description: | You could also upload screenshots - type: textarea id: steps-to-reproduce validations: required: true attributes: label: 👟 Reproduction steps description: | How do you trigger this bug? Please walk us through it step by step. Include all important details and add screenshots where appropriate placeholder: | ... - type: textarea id: expected-behavior validations: required: true attributes: label: 👀 Expected behavior description: | What did you think would happen? placeholder: | ... - type: textarea id: actual-behavior validations: required: true attributes: label: 😓 Actual Behavior description: | What actually happen? placeholder: | ... - type: input id: uptime-kuma-version attributes: label: 🐻 Uptime-Kuma Version description: | What version of Uptime-Kuma are you running? Please do not provide Docker tags like `latest` or `1`. placeholder: | e.g., 1.23.16 or 2.0.0-beta.2 validations: required: true - type: input id: operating-system attributes: label: 💻 Operating System and Arch description: | Which OS is your server/device running on? (For Replit, please do not report this bug) placeholder: | e.g., Ubuntu Server 24.04.2 LTS (GNU/Linux 6.8.0-55-generic x86_64) validations: required: true - type: input id: browser-vendor attributes: label: 🌐 Browser description: | Which browser are you running on? placeholder: | e.g., Google Chrome 134.0.6998.183 (Official Build) (64-bit) validations: required: true - type: textarea id: deployment-info attributes: label: 🖥️ Deployment Environment description: | Provide details about the deployment environment, including runtime components, databases, and storage configurations. This will help assess the infrastructure and identify any potential compatibility requirements. **Remove any fields that do not apply to your setup.** value: | - **Runtime Environment**: - Docker: Version `X.X.X` (Build `Y.Y.Y`) - Docker Compose: Version `X.X.X` - Portainer (BE/CE): Version `X.X.X` (LTS: Yes/No) - MariaDB: Version `X.X.X` (LTS: Yes/No) - Node.js: Version `X.X.X` (LTS: Yes/No) - Kubernetes (K3S/K8S): Version `X.X.X` (LTS: Yes/No, via `[method/tool]`) - **Database**: - SQLite: Embedded - MariaDB: Embedded/External - **Database Storage**: - **Filesystem**: - Linux: ext4/XFS/Btrfs/ZFS/F2FS - macOS: APFS/ HFS+ - Windows: NTFS/ReFS - **Storage Medium**: HDD/eMMC/SSD/NVMe - **Uptime Kuma Setup**: - Number of monitors: `X` validations: required: true - type: textarea id: logs attributes: label: 📝 Relevant log output description: | Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. render: bash session validations: required: false ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ --- blank_issues_enabled: false ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.yml ================================================ --- name: 🚀 Feature Request description: | Submit a proposal for a new feature # title: "[Feature]" labels: ["feature-request"] body: - type: markdown attributes: value: | ### 🚫 Please Avoid Unnecessary Pinging of Maintainers We kindly ask you to refrain from pinging maintainers unless absolutely necessary. Pings are for critical/urgent pull requests that require immediate attention. - type: textarea id: related-issues validations: required: true attributes: label: 📑 I have found these related issues/pull requests description: | Please search for related **[ISSUES](https://github.com/louislam/uptime-kuma/issues?q=is%3Aissue%20sort%3Acreated-desc)** and **[PULL REQUESTS](https://github.com/louislam/uptime-kuma/pulls?q=is%3Apr+sort%3Acreated-desc+)**. Explain the differences between them or clarify if you were unable to find any related issues/pull requests. placeholder: | Example: This relates to issue #1, which also affects the ... system. It should not be merged because ... - type: textarea id: feature-description validations: required: true attributes: label: 🔖 Feature description description: | A clear and concise description of what the feature request is. placeholder: | You should add ... - type: textarea id: solution validations: required: true attributes: label: ✔️ Solution description: | A clear and concise description of what you want to happen. placeholder: | In my use-case, ... - type: textarea id: alternatives validations: required: false attributes: label: ❓ Alternatives description: | A clear and concise description of any alternative solutions or features you've considered. placeholder: | I have considered ... - type: textarea id: additional-context validations: required: false attributes: label: 📝 Additional Context description: | Add any other context or screenshots about the feature request here. placeholder: | ... ================================================ FILE: .github/ISSUE_TEMPLATE/security_issue.yml ================================================ --- name: 🛡️ Security Issue description: | Notify Louis Lam about a security concern. Please do NOT include any sensitive details in this issue. # title: "Security Issue" labels: ["security"] assignees: [louislam] body: - type: markdown attributes: value: | ## ❗ IMPORTANT: DO NOT SHARE VULNERABILITY DETAILS HERE ## Please do not open issues for upstream dependency scan results. Automated security tools often report false-positive issues that are not exploitable in the context of Uptime Kuma. Reviewing these without concrete impact does not scale for us. If you can demonstrate that an upstream issue is actually exploitable in Uptime Kuma (e.g. with a PoC or reproducible steps), we’re happy to take a look. ### ⚠️ Report a Security Vulnerability **If you have discovered a security vulnerability, please report it securely using the GitHub Security Advisory.** **Note**: This issue is only for notifying the maintainers of the repository, as the GitHub Security Advisory does not automatically send notifications. - **Confidentiality**: The information you provide in the GitHub Security Advisory will initially remain confidential. However, once the vulnerability is addressed, the advisory will be publicly disclosed on GitHub. - **Access and Visibility**: Until the advisory is published, it will only be visible to the maintainers of the repository and invited collaborators. - **Credit**: You will be automatically credited as a contributor for identifying and reporting the vulnerability. Your contribution will be reflected in the MITRE Credit System. - **Important Reminder**: **Do not include any sensitive or detailed vulnerability information in this issue.** This issue is only for sharing the advisory URL to notify the maintainers of the repository, not for discussing the vulnerability itself. **Thank you for helping us keep Uptime Kuma secure!** ## **Step 1: Submit a GitHub Security Advisory** Right-click the link below and select `Open link in new tab` to access the page. This will keep the security issue open, allowing you to easily return and paste the Advisory URL here later. ➡️ [Create a New Security Advisory](https://github.com/louislam/uptime-kuma/security/advisories/new) ## **Step 2: Share the Advisory URL** Once you've created your advisory, please share the URL below. This will notify Louis Lam and enable them to take the appropriate action. - type: textarea id: github-advisory-url validations: required: true attributes: label: GitHub Advisory URL for @louislam placeholder: | Please paste the GitHub Advisory URL here. Only the URL is required. Example: https://github.com/louislam/uptime-kuma/security/advisories/GHSA-8h5r-7t6l-q3kz ================================================ FILE: .github/PULL_REQUEST_TEMPLATE.md ================================================ # Summary In this pull request, the following changes are made: - Foobar was changed to FooFoo, because ... - Relates to #issue-number - Resolves #issue-number
Please follow this checklist to avoid unnecessary back and forth (click to expand) - [ ] ⚠️ If there are Breaking change (a fix or feature that alters existing functionality in a way that could cause issues) I have called them out - [ ] 🧠 I have disclosed any use of LLMs/AI in this contribution and reviewed all generated content. I understand that I am responsible for and able to explain every line of code I submit. - [ ] 🔍 Any UI changes adhere to visual style of this project. - [ ] 🛠️ I have self-reviewed and self-tested my code to ensure it works as expected. - [ ] 📝 I have commented my code, especially in hard-to-understand areas (e.g., using JSDoc for methods). - [ ] 🤖 I added or updated automated tests where appropriate. - [ ] 📄 Documentation updates are included (if applicable). - [ ] 🧰 Dependency updates are listed and explained. - [ ] ⚠️ CI passes and is green.
## Screenshots for Visual Changes - **UI Modifications**: Highlight any changes made to the user interface. - **Before & After**: Include screenshots or comparisons (if applicable). | Event | Before | After | | ------------------ | --------------------- | -------------------- | | `UP` | ![Before](image-link) | ![After](image-link) | | `DOWN` | ![Before](image-link) | ![After](image-link) | | Certificate-expiry | ![Before](image-link) | ![After](image-link) | | Testing | ![Before](image-link) | ![After](image-link) | ================================================ FILE: .github/REVIEW_GUIDELINES.md ================================================ # Uptime Kuma Review Guidelines > [!NOTE] > These review guidelines are a work in progress, and are frequently > updated and improved, so please check back frequently for the latest version. ## Preparing for a PR Review ### Read the PR description carefully Make sure you understand what the PR is trying to solve or implement. This could be a bug fix, a new feature, or a refactor. ### Check the linked issues If the PR has a linked issue, read it to better understand the context and the reason for the change. ### Check the test coverage Make sure relevant tests have been added or modified. If the PR adds new functionality, there should be tests covering the change. ## General Review ### Code formatting and style Check if the code adheres to the style guidelines of the project. Make sure there are no unused imports, variables, `console.log` for debugging in the PR. - [Project Style](../CONTRIBUTING.md#project-styles) - [Coding Style](../CONTRIBUTING.md#coding-styles) ### Readability and maintainability Is the code easy to understand for other developers? Make sure complex parts are explained with comments about **_why_** something is done, and use clear names to show **_how_**. Are variables and functions well-named, and is there a consistent naming style? Also, check if the code is maintainable: - Is it unnecessarily complex? Could it be simplified? - Does it follow the **[Single Responsibility Principle (SRP)]**? [Single Responsibility Principle (SRP)]: https://www.geeksforgeeks.org/single-responsibility-in-solid-design-principle/ ### Documentation Is the PR well documented? Check if the descriptions of functions, parameters, and return values are present. Are there any changes needed to the README or other documentation, for example, if new features or configurations are introduced? ## Functional Review ### Testing Ensure that the new code is properly tested. This includes unit tests, integration tests, and if necessary, end-to-end tests. ### Test results Did all tests pass in the CI pipeline (e.g., GitHub Actions, Travis, CircleCI)? ### Testing in different environments If the changes depend on certain environments or configurations, verify that the code has been tested in various environments (e.g., local development, staging, production). - [How to test Pull Requests](https://github.com/louislam/uptime-kuma/wiki/Test-Pull-Requests) ### Edge cases and regressions - Are there test cases for possible edge cases? - Could this change introduce regressions in other parts of the system? ## Security ### Security issues Check for potential security problems, such as SQL injection, XSS attacks, or unsafe API calls. Are there passwords, tokens, or other sensitive data left in the code by mistake? ### Authentication and authorization Is access to sensitive data or functionality properly secured? Check that the correct authorization and authentication mechanisms are in place. ### Security Best Practices - Ensure that the code is free from common vulnerabilities like **SQL injection**, **XSS attacks**, and **insecure API calls**. - Check for proper encryption of sensitive data, and ensure that **passwords** or **API tokens** are not hardcoded in the code. ## Performance ### Performance impact Check if the changes negatively impact performance. This can include factors like load times, memory usage, or other performance aspects. ### Use of external libraries - Have the right libraries been chosen? - Are there unnecessary dependencies that might reduce performance or increase code complexity? - Are these dependencies actively maintained and free of known vulnerabilities? ### Performance Best Practices - **Measure performance** using tools like Lighthouse or profiling libraries. - **Avoid unnecessary dependencies** that may bloat the codebase. - Ensure that the **code does not degrade the user experience** (e.g., by increasing load times or memory consumption). ## Compliance and Integration ### Alignment with the project Are the changes consistent with the project goals and requirements? Ensure the PR aligns with the architecture and design principles of the project. ### Integration If the PR depends on other PRs or changes, verify that they integrate well with the rest of the project. Ensure the code does not cause conflicts with other active PRs. ### Backward compatibility Does the change break compatibility with older versions of the software or dependencies? If so, is there a migration plan in place? ## Logging and Error Handling ### Proper error handling - Are errors properly caught and handled instead of being silently ignored? - Are exceptions used appropriately? ### Logging - Is sufficient logging included for debugging and monitoring? - Is there excessive logging that could affect performance? ## Accessibility (for UI-related changes) If the PR affects the user interface, ensure that it meets accessibility standards: - Can users navigate using only the keyboard? - Are screen readers supported? - Is there proper color contrast for readability? - Are there **WCAG** (Web Content Accessibility Guidelines) compliance issues? - Use tools like **Axe** or **Lighthouse** to evaluate accessibility. ## Providing Feedback ### Constructive feedback Provide clear, constructive feedback on what is good and what can be improved. If improvements are needed, be specific about what should change. ### Clarity and collaboration Ensure your feedback is friendly and open, so the team member who submitted the PR feels supported and motivated to make improvements.
For Maintainers only (click to expand)

## Go/No-Go Decision ### Go If the code has no issues and meets the project requirements, approve it (and possibly merge it). ### No-Go If there are significant issues, such as missing tests, security vulnerabilities, or performance problems, request the necessary changes before the PR can be approved. Some examples of **significant issues** include: - Missing tests for new functionality. - Identified **security vulnerabilities**. - Code changes that break **backward compatibility** without a proper migration plan. - Code that causes **major performance regressions** (e.g., high CPU/memory usage). ## After the Review ### Reordering and merging Once the necessary changes have been made and the PR is approved, the code can be merged into the main branch (e.g., `main` or `master`). ### Testing after merging Ensure that the build passes after merging the PR, and re-test the functionality in the production environment if necessary. ## Follow-up ### Communication with team members If the PR has long-term technical or functional implications, communicate the changes to the team. ### Monitoring Continue monitoring the production environment for any unexpected issues that may arise after the code has been merged.

--- This process ensures that PRs are systematically and thoroughly reviewed, improving overall code quality. ================================================ FILE: .github/config/exclude.txt ================================================ # This is a .gitignore style file for 'GrantBirki/json-yaml-validate' Action workflow ================================================ FILE: .github/copilot-instructions.md ================================================ # Copilot Instructions for Uptime Kuma ## Copilot's Goals/Tasks - Check spelling - Do not show "Pull Request Overview" - You do not have to reply if there are no issues ## Repository Overview **Uptime Kuma** is a self-hosted monitoring tool for HTTP(s), TCP, DNS, Docker, etc. Built with Vue 3 (frontend) and Node.js/Express (backend), using Socket.IO for real-time communication. - **Languages**: JavaScript, Vue 3, TypeScript (limited), HTML, CSS/SCSS - **Backend**: Node.js >= 20.4, Express.js, Socket.IO, SQLite - **Frontend**: Vue 3, Vite, Bootstrap 5, Chart.js - **Package Manager**: npm with `legacy-peer-deps=true` (.npmrc) ## Build & Validation Commands ### Prerequisites - Node.js >= 20.4.0, npm >= 9.3, Git ### Essential Command Sequence 1. **Install Dependencies**: ```bash npm ci # Use npm ci NOT npm install (~60-90 seconds) ``` 2. **Linting** (required before committing): ```bash npm run lint # Both linters (~15-30 seconds) npm run lint:prod # For production (zero warnings) ``` 3. **Build Frontend**: ```bash npm run build # Takes ~90-120 seconds, builds to dist/ ``` 4. **Run Tests**: ```bash npm run test-backend # Backend tests (~50-60 seconds) npm test # All tests ``` ### Development Workflow ```bash npm run dev # Starts frontend (port 3000) and backend (port 3001) ``` ## Project Architecture ### Directory Structure ``` / ├── server/ Backend source code │ ├── model/ Database models (auto-mapped to tables) │ ├── monitor-types/ Monitor type implementations │ ├── notification-providers/ Notification integrations │ ├── routers/ Express routers │ ├── socket-handlers/ Socket.IO event handlers │ ├── server.js Server entry point │ └── uptime-kuma-server.js Main server logic ├── src/ Frontend source code (Vue 3 SPA) │ ├── components/ Vue components │ ├── pages/ Page components │ ├── lang/ i18n translations │ ├── router.js Vue Router configuration │ └── main.js Frontend entry point ├── db/ Database related │ ├── knex_migrations/ Knex migration files │ └── kuma.db SQLite database (gitignored) ├── test/ Test files │ ├── backend-test/ Backend unit tests │ └── e2e/ Playwright E2E tests ├── config/ Build configuration │ ├── vite.config.js Vite build config │ └── playwright.config.js Playwright test config ├── dist/ Frontend build output (gitignored) ├── data/ App data directory (gitignored) ├── public/ Static frontend assets (dev only) ├── docker/ Docker build files └── extra/ Utility scripts ``` ### Key Configuration Files - **package.json**: Scripts, dependencies, Node.js version requirement - **.eslintrc.js**: ESLint rules (4 spaces, double quotes, unix line endings, JSDoc required) - **.stylelintrc**: Stylelint rules (4 spaces indentation) - **.editorconfig**: Editor settings (4 spaces, LF, UTF-8) - **tsconfig-backend.json**: TypeScript config for backend (only src/util.ts) - **.npmrc**: `legacy-peer-deps=true` (required for dependency resolution) - **.gitignore**: Excludes node_modules, dist, data, tmp, private ### Code Style (strictly enforced by linters) - 4 spaces indentation, double quotes, Unix line endings (LF), semicolons required - **Naming**: JavaScript/TypeScript (camelCase), SQLite (snake_case), CSS/SCSS (kebab-case) - JSDoc required for all functions/methods ## CI/CD Workflows **auto-test.yml** (runs on PR/push to master/1.23.X): - Linting, building, backend tests on multiple OS/Node versions (15 min timeout) - E2E Playwright tests **validate.yml**: Validates JSON/YAML files, language files, knex migrations **PR Requirements**: All linters pass, tests pass, code follows style guidelines ## Common Issues 1. **npm install vs npm ci**: Always use `npm ci` for reproducible builds 2. **TypeScript errors**: `npm run tsc` shows 1400+ errors - ignore them, they don't affect builds 3. **Stylelint warnings**: Deprecation warnings are expected, ignore them 4. **Test failures**: Always run `npm run build` before running tests 5. **Port conflicts**: Dev server uses ports 3000 and 3001 6. **First run**: Server shows "db-config.json not found" - this is expected, starts setup wizard ## Translations - Managed via Weblate. Add keys to `src/lang/en.json` only - Don't include other languages in PRs - Use `$t("key")` in Vue templates ## Database - Primary: SQLite (also supports MariaDB/MySQL) - Migrations in `db/knex_migrations/` using Knex.js - Filename format validated by CI: `node ./extra/check-knex-filenames.mjs` ## Testing - **Backend**: Node.js test runner, fast unit tests - **E2E**: Playwright (requires `npx playwright install` first time) - Test data in `data/playwright-test` ## Adding New Features ### New Notification Provider Files to modify: 1. `server/notification-providers/PROVIDER_NAME.js` (backend logic) 2. `server/notification.js` (register provider) 3. `src/components/notifications/PROVIDER_NAME.vue` (frontend UI) 4. `src/components/notifications/index.js` (register frontend) 5. `src/components/NotificationDialog.vue` (add to list) 6. `src/lang/en.json` (add translation keys) ### New Monitor Type Files to modify: 1. `server/monitor-types/MONITORING_TYPE.js` (backend logic) 2. `server/uptime-kuma-server.js` (register monitor type) 3. `src/pages/EditMonitor.vue` (frontend UI) 4. `src/lang/en.json` (add translation keys) ## Important Notes 1. **Trust these instructions** - based on testing. Search only if incomplete/incorrect 2. **Dependencies**: 5 known vulnerabilities (3 moderate, 2 high) - acknowledged, don't fix without discussion 3. **Git Branches**: `master` (v2 development), `1.23.X` (v1 maintenance) 4. **Node Version**: >= 20.4.0 required 5. **Socket.IO**: Most backend logic in `server/socket-handlers/`, not REST 6. **Never commit**: `data/`, `dist/`, `tmp/`, `private/`, `node_modules/` ================================================ FILE: .github/dependabot.yml ================================================ # Dependabot configuration for Uptime Kuma # See: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file version: 2 updates: # Enable version updates for GitHub Actions - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" day: "monday" # Group all GitHub Actions updates into a single PR groups: github-actions: patterns: - "*" open-pull-requests-limit: 5 commit-message: prefix: "chore" include: "scope" cooldown: default-days: 7 ================================================ FILE: .github/workflows/auto-test.yml ================================================ name: Auto Test concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-server cancel-in-progress: true on: push: branches: [master, 1.23.X, 3.0.0] pull_request: permissions: {} jobs: auto-test: runs-on: ${{ matrix.os }} permissions: contents: read strategy: fail-fast: false matrix: os: [macos-latest, ubuntu-22.04, windows-latest, ubuntu-22.04-arm] # See supported Node.js release schedule at https://nodejs.org/en/about/releases/ node: [20, 24] # Also test non-LTS, but only on Ubuntu. include: - os: ubuntu-22.04 node: 25 steps: - run: git config --global core.autocrlf false # Mainly for Windows - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Cache/Restore node_modules uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: node-modules-cache with: path: node_modules key: node-modules-${{ runner.os }}-node${{ matrix.node }}-${{ hashFiles('**/package-lock.json') }} - name: Use Node.js ${{ matrix.node }} uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: ${{ matrix.node }} - run: npm clean-install --no-fund - name: Rebuild native modules for ARM64 if: matrix.os == 'ubuntu-22.04-arm' run: npm rebuild @louislam/sqlite3 - run: npm run build - run: npm run test-backend env: HEADLESS_TEST: 1 JUST_FOR_TEST: ${{ secrets.JUST_FOR_TEST }} # As a lot of dev dependencies are not supported on ARMv7, we have to test it separately and just test if `npm ci --production` works armv7-simple-test: runs-on: ubuntu-latest permissions: contents: read strategy: fail-fast: false matrix: node: [20, 22] # See supported Node.js release schedule at https://nodejs.org/en/about/releases/ steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Set up QEMU uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 with: platforms: linux/arm/v7 - name: Set up Docker Buildx uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Test on ARMv7 using Docker with QEMU run: | docker run --rm --platform linux/arm/v7 \ -v $PWD:/workspace \ -w /workspace \ arm32v7/node:${{ matrix.node }} \ npm clean-install --no-fund --production check-linters: runs-on: ubuntu-latest permissions: contents: read steps: - run: git config --global core.autocrlf false # Mainly for Windows - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Cache/Restore node_modules uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: node-modules-cache with: path: node_modules key: node-modules-${{ runner.os }}-node${{ matrix.node }}-${{ hashFiles('**/package-lock.json') }} - name: Use Node.js 20 uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: 20 - run: npm clean-install --no-fund - run: npm run lint:prod e2e-test: runs-on: ubuntu-22.04-arm permissions: contents: read env: PLAYWRIGHT_VERSION: ~1.39.0 steps: - run: git config --global core.autocrlf false # Mainly for Windows - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Cache/Restore node_modules uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: node-modules-cache with: path: node_modules key: node-modules-${{ runner.os }}-node${{ matrix.node }}-${{ hashFiles('**/package-lock.json') }} - name: Setup Node.js uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: 22 - run: npm clean-install --no-fund - name: Rebuild native modules for ARM64 run: npm rebuild @louislam/sqlite3 - name: Install Playwright ${{ env.PLAYWRIGHT_VERSION }} run: npx playwright@${{ env.PLAYWRIGHT_VERSION }} install - run: npm run build - run: npm run test-e2e ================================================ FILE: .github/workflows/autofix.yml ================================================ name: autofix.ci on: push: branches: ["master", "1.23.X"] pull_request: permissions: {} jobs: autofix: runs-on: ubuntu-latest permissions: contents: read steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Cache/Restore node_modules uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: node-modules-cache with: path: node_modules key: node-modules-${{ runner.os }}-${{ hashFiles('**/package-lock.json') }} - name: Setup Node.js uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: 20 - name: Install dependencies run: npm ci - name: Auto-fix JavaScript/Vue linting issues run: npm run lint-fix:js continue-on-error: true - name: Auto-fix CSS/SCSS linting issues run: npm run lint-fix:style continue-on-error: true - name: Auto-format code with Prettier run: npm run fmt continue-on-error: true - name: Compile TypeScript run: npm run tsc continue-on-error: true - uses: autofix-ci/action@635ffb0c9798bd160680f18fd73371e355b85f27 ================================================ FILE: .github/workflows/build-docker-base.yml ================================================ name: Build Docker Base Images on: workflow_dispatch: # Allow manual trigger permissions: {} jobs: build-docker-base: runs-on: ubuntu-latest timeout-minutes: 120 permissions: contents: read packages: write steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Set up QEMU uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Login to Docker Hub uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io username: ${{ secrets.GHCR_USERNAME }} password: ${{ secrets.GHCR_TOKEN }} - name: Use Node.js 20 uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: 20 - name: Build and push base2-slim image run: npm run build-docker-base-slim - name: Build and push base2 image run: npm run build-docker-base ================================================ FILE: .github/workflows/build-docker-push.yml ================================================ name: Build Docker Push Image on: schedule: # Runs at 2:00 AM UTC on the 1st of every month - cron: "0 2 1 * *" workflow_dispatch: # Allow manual trigger permissions: {} jobs: build-docker-push: # Only run on the original repository, not on forks if: github.repository == 'louislam/uptime-kuma' runs-on: ubuntu-latest timeout-minutes: 120 permissions: contents: read steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Set up QEMU uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Login to Docker Hub uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Use Node.js 20 uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: 20 - name: Set up Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - name: Install cross-env run: npm install -g cross-env - name: Build and push Docker image working-directory: extra/uptime-kuma-push run: npm run build-docker ================================================ FILE: .github/workflows/close-incorrect-issue.yml ================================================ name: Close Incorrect Issue on: issues: types: [opened] permissions: {} jobs: close-incorrect-issue: runs-on: ${{ matrix.os }} permissions: issues: write strategy: matrix: os: [ubuntu-latest] node-version: [20] steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Use Node.js ${{ matrix.node-version }} uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: ${{ matrix.node-version }} - run: npm ci - name: Close incorrect issue run: node extra/close-incorrect-issue.js ${{ secrets.GITHUB_TOKEN }} ${{ github.event.issue.number }} "$ISSUE_USER_LOGIN" env: ISSUE_USER_LOGIN: ${{ github.event.issue.user.login }} ================================================ FILE: .github/workflows/codeql-analysis.yml ================================================ name: "CodeQL" on: push: branches: ["master", "1.23.X"] pull_request: branches: ["master", "1.23.X"] schedule: - cron: "16 22 * * 0" jobs: analyze: # Only run scheduled analysis on the original repository, not on forks if: github.event_name != 'schedule' || github.repository == 'louislam/uptime-kuma' name: Analyze runs-on: ubuntu-latest timeout-minutes: 360 permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: ["go", "javascript-typescript"] steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 with: languages: ${{ matrix.language }} - name: Autobuild uses: github/codeql-action/autobuild@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 with: category: "/language:${{matrix.language}}" zizmor: # Only run scheduled analysis on the original repository, not on forks if: github.event_name != 'schedule' || github.repository == 'louislam/uptime-kuma' runs-on: ubuntu-latest permissions: security-events: write contents: read actions: read steps: - name: Checkout repository uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Run zizmor uses: zizmorcore/zizmor-action@e639db99335bc9038abc0e066dfcd72e23d26fb4 # v0.3.0 ================================================ FILE: .github/workflows/conflict-labeler.yml ================================================ name: Merge Conflict Labeler # pull_request_target is safe here because: # 1. Only uses a pinned trusted action (by SHA) # 2. Has minimal permissions (contents: read, pull-requests: write) # 3. Doesn't checkout or execute any untrusted code from PRs # 4. Only adds/removes labels based on merge conflict status on: # zizmor: ignore[dangerous-triggers] push: branches: - master pull_request_target: branches: - master types: [synchronize] jobs: label: name: Labeling runs-on: ubuntu-latest if: ${{ github.repository == 'louislam/uptime-kuma' }} permissions: contents: read pull-requests: write steps: - name: Apply label uses: eps1lon/actions-label-merge-conflict@1df065ebe6e3310545d4f4c4e862e43bdca146f0 # v3.0.3 with: dirtyLabel: "needs:resolve-merge-conflict" repoToken: "${{ secrets.GITHUB_TOKEN }}" ================================================ FILE: .github/workflows/mark-as-draft-on-requesting-changes.yml ================================================ name: Mark PR as draft when changes are requested # pull_request_target is safe here because: # 1. Does not use any external actions; only uses the GitHub CLI via run commands # 2. Has minimal permissions # 3. Doesn't checkout or execute any untrusted code from PRs # 4. Only adds/removes labels or changes the draft status on: # zizmor: ignore[dangerous-triggers] pull_request_target: types: - review_submitted - labeled - ready_for_review permissions: {} jobs: mark-draft: runs-on: ubuntu-latest permissions: pull-requests: write if: | ( github.event.action == 'review_submitted' && github.event.review.state == 'changes_requested' ) || ( github.event.action == 'labeled' && github.event.label.name == 'pr:please address review comments' ) steps: - name: Add label on requested changes if: github.event.review.state == 'changes_requested' env: GH_TOKEN: ${{ github.token }} run: | gh issue edit "${{ github.event.pull_request.number }}" \ --repo "${{ github.repository }}" \ --add-label "pr:please address review comments" - name: Mark PR as draft env: GH_TOKEN: ${{ github.token }} run: | gh pr ready "${{ github.event.pull_request.number }}" \ --repo "${{ github.repository }}" \ --undo || true # || true to ignore the case where the pr is already a draft ready-for-review: runs-on: ubuntu-latest permissions: pull-requests: write if: github.event.action == 'ready_for_review' steps: - name: Update labels for review env: GH_TOKEN: ${{ github.token }} run: | gh issue edit "${{ github.event.pull_request.number }}" \ --repo "${{ github.repository }}" \ --remove-label "pr:please address review comments" || true gh issue edit "${{ github.event.pull_request.number }}" \ --repo "${{ github.repository }}" \ --add-label "pr:needs review" ================================================ FILE: .github/workflows/new-contributor-pr.yml ================================================ name: New contributor message on: # Safety # This workflow uses pull_request_target so it can run with write permissions on first-time contributor PRs. # It is safe because it does not check out or execute any code from the pull request and # only uses the pinned, trusted plbstl/first-contribution action pull_request_target: # zizmor: ignore[dangerous-triggers] types: [opened, closed] branches: - master permissions: pull-requests: write jobs: build: if: github.repository == 'louislam/uptime-kuma' name: Hello new contributor runs-on: ubuntu-latest timeout-minutes: 60 steps: - uses: plbstl/first-contribution@4b2b042fffa26792504a18e49aa9543a87bec077 # v4.1.0 with: pr-reactions: rocket pr-opened-msg: > Hello and thanks for lending a paw to Uptime Kuma! 🐻👋 As this is your first contribution, please be sure to check out our [Pull Request guidelines](https://github.com/louislam/uptime-kuma/blob/master/CONTRIBUTING.md#can-i-create-a-pull-request-for-uptime-kuma). In particular: - Mark your PR as Draft while you’re still making changes - Mark it as Ready for review once it’s fully ready If you have any design or process questions, feel free to ask them right here in this pull request - unclear documentation is a bug too. pr-merged-msg: > @{fc-author} congrats on your first contribution to Uptime Kuma! 🐻 We hope you enjoy contributing to our project and look forward to seeing more of your work in the future! If you want to see your contribution in action, please see our [nightly builds here](https://hub.docker.com/layers/louislam/uptime-kuma/nightly2). ================================================ FILE: .github/workflows/npm-update.yml ================================================ name: NPM Update on: workflow_dispatch: schedule: - cron: "0 0 * * *" # Run daily at midnight UTC permissions: contents: write pull-requests: write jobs: npm-update: # Only run on the original repository, not on forks if: github.repository == 'louislam/uptime-kuma' runs-on: ubuntu-latest permissions: contents: write pull-requests: write steps: - name: Checkout master branch uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: master token: ${{ secrets.GITHUB_TOKEN }} - name: Setup Node.js uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: 20 - name: Generate lockfile from scratch run: | rm -f package-lock.json npm install --package-lock-only - name: Check if there are changes id: check_changes run: | if git diff --quiet package-lock.json; then echo "has_changes=false" >> $GITHUB_OUTPUT else echo "has_changes=true" >> $GITHUB_OUTPUT fi - name: Configure git if: steps.check_changes.outputs.has_changes == 'true' run: | git config --global user.name "github-actions[bot]" git config --global user.email "github-actions[bot]@users.noreply.github.com" - name: Commit changes if: steps.check_changes.outputs.has_changes == 'true' run: | git add package-lock.json git commit -m "chore: Update dependencies" - name: Force push to npm-update branch if: steps.check_changes.outputs.has_changes == 'true' run: | git push -f origin HEAD:npm-update - name: Check if PR exists if: steps.check_changes.outputs.has_changes == 'true' id: check_pr env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | PR_EXISTS=$(gh pr list --base master --head npm-update --json number --jq 'length') if [ "$PR_EXISTS" -eq "0" ]; then echo "pr_exists=false" >> $GITHUB_OUTPUT else echo "pr_exists=true" >> $GITHUB_OUTPUT fi - name: Create Pull Request if: steps.check_changes.outputs.has_changes == 'true' && steps.check_pr.outputs.pr_exists == 'false' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | gh pr create \ --base master \ --head npm-update \ --title "chore: Update dependencies" \ --body "" ================================================ FILE: .github/workflows/pr-description-check.yml ================================================ name: "PR description template check" on: # zizmor: ignore[dangerous-triggers] pull_request_target: types: [opened, reopened] permissions: pull-requests: write issues: write contents: read jobs: check-pr-description: name: Check PR description and close if missing template phrase runs-on: ubuntu-latest permissions: pull-requests: write issues: write steps: - name: Check PR description uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const pr = context.payload.pull_request; const body = (pr && pr.body) ? pr.body : ""; const requiredPhrase = "avoid unnecessary back and forth"; const exclude = ["UptimeKumaBot", "Copilot", "copilot-swe-agent"]; const excludeLower = exclude.map((e) => e.toLowerCase()); const author = pr?.user?.login || ""; // If author is in exclude list, skip if (author && excludeLower.includes(author.toLowerCase())) { core.info(`PR #${pr.number} opened by excluded user '${author}', skipping template check.`); return; } if (!body || !body.toLowerCase().includes(requiredPhrase.toLowerCase())) { const owner = context.repo.owner; const repo = context.repo.repo; const number = pr.number; const commentBody = `Hello! This pull request does not follow the repository's PR template and is being closed automatically.`; // Post comment await github.rest.issues.createComment({ owner, repo, issue_number: number, body: commentBody }); // Close await github.rest.pulls.update({ owner, repo, pull_number: number, state: "closed" }); core.info(`Closed PR #${number} because required phrase was not present.`); } else { core.info("PR description contains required phrase; no action taken."); } ================================================ FILE: .github/workflows/pr-title.yml ================================================ name: "PR Metadata" # if someone opens a PR, edits it, or reopens it we want to validate the title # This is separate from the rest of the CI as the title may change without code changes on: # SECURITY: pull_request_target is used here to allow validation of PRs from forks. # This is safe because: # 1. No code from the PR is checked out # 2. Permissions are restricted to pull-requests: read # 3. Only a trusted third-party action is used to validate the PR title # 4. No user-controlled code is executed pull_request_target: # zizmor: ignore[dangerous-triggers] types: - opened - edited - reopened - synchronize permissions: pull-requests: read jobs: pr-title: name: Validate PR title follows https://conventionalcommits.org runs-on: ubuntu-latest permissions: pull-requests: read steps: - uses: amannn/action-semantic-pull-request@48f256284bd46cdaab1048c3721360e808335d50 # v6.1.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} ================================================ FILE: .github/workflows/prevent-file-change.yml ================================================ name: prevent-file-change on: pull_request: permissions: {} jobs: check-file-changes: runs-on: ubuntu-latest permissions: pull-requests: read steps: - name: Prevent file change uses: xalvarez/prevent-file-change-action@004d9f17c2e4a7afa037cda5f38dc55a5e9c9c06 # v1.9.1 with: githubToken: ${{ secrets.GITHUB_TOKEN }} # Regex, /src/lang/*.json is not allowed to be changed, except for /src/lang/en.json pattern: '^(?!src/lang/en\.json$)src/lang/.*\.json$' trustedAuthors: UptimeKumaBot ================================================ FILE: .github/workflows/release-beta.yml ================================================ name: Beta Release on: workflow_dispatch: inputs: version: description: "Beta version number (e.g., 2.1.0-beta.2)" required: true type: string previous_version: description: "Previous version tag for changelog (e.g., 2.1.0-beta.1)" required: true type: string dry_run: description: "Dry Run (The docker image will not be pushed to registries. PR will still be created.)" required: false type: boolean default: false permissions: contents: write pull-requests: write jobs: beta-release: runs-on: ubuntu-latest timeout-minutes: 120 steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: master persist-credentials: true fetch-depth: 0 # Fetch all history for changelog generation - name: Set up Node.js uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: 24 - name: Create release branch env: VERSION: ${{ inputs.version }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | git config user.name "github-actions[bot]" git config user.email "github-actions[bot]@users.noreply.github.com" git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@github.com/${{ github.repository }}.git" # Delete remote branch if it exists git push origin --delete "release-${VERSION}" || true # Delete local branch if it exists git branch -D "release-${VERSION}" || true # For testing purpose # git checkout beta-workflow git checkout -b "release-${VERSION}" - name: Install dependencies run: npm clean-install --no-fund - name: Set up Docker Buildx uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Set up QEMU uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - name: Login to Docker Hub uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io username: ${{ secrets.GHCR_USERNAME }} password: ${{ secrets.GHCR_TOKEN }} - name: Run release-beta env: RELEASE_BETA_VERSION: ${{ inputs.version }} RELEASE_PREVIOUS_VERSION: ${{ inputs.previous_version }} DRY_RUN: ${{ inputs.dry_run }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_RUN_ID: ${{ github.run_id }} run: npm run release-beta - name: Upload dist.tar.gz as artifact uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: dist-${{ inputs.version }} path: ./tmp/dist.tar.gz retention-days: 90 ================================================ FILE: .github/workflows/release-final.yml ================================================ name: Final Release on: workflow_dispatch: inputs: version: description: "Release version number (e.g., 2.1.0)" required: true type: string previous_version: description: "Previous version tag for changelog (e.g., 2.1.0-beta.3)" required: true type: string dry_run: description: "Dry Run (The docker image will not be pushed to registries. PR will still be created.)" required: false type: boolean default: false permissions: contents: write pull-requests: write jobs: release: runs-on: ubuntu-latest timeout-minutes: 120 steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: ref: master persist-credentials: true fetch-depth: 0 # Fetch all history for changelog generation - name: Set up Node.js uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0 with: node-version: 24 - name: Create release branch env: VERSION: ${{ inputs.version }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | git config user.name "github-actions[bot]" git config user.email "github-actions[bot]@users.noreply.github.com" git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@github.com/${{ github.repository }}.git" # Delete remote branch if it exists git push origin --delete "release-${VERSION}" || true # Delete local branch if it exists git branch -D "release-${VERSION}" || true # For testing purpose # git checkout beta-workflow git checkout -b "release-${VERSION}" - name: Install dependencies run: npm clean-install --no-fund - name: Set up Docker Buildx uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 - name: Set up QEMU uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - name: Login to Docker Hub uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 with: registry: ghcr.io username: ${{ secrets.GHCR_USERNAME }} password: ${{ secrets.GHCR_TOKEN }} - name: Run release-final env: RELEASE_VERSION: ${{ inputs.version }} RELEASE_PREVIOUS_VERSION: ${{ inputs.previous_version }} DRY_RUN: ${{ inputs.dry_run }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_RUN_ID: ${{ github.run_id }} run: npm run release-final - name: Upload dist.tar.gz as artifact uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: dist-${{ inputs.version }} path: ./tmp/dist.tar.gz retention-days: 90 ================================================ FILE: .github/workflows/release-nightly.yml ================================================ name: Nightly Release on: schedule: # Runs at 2:00 AM UTC every day - cron: "0 2 * * *" workflow_dispatch: # Allow manual trigger permissions: {} jobs: release-nightly: # Only run on the original repository, not on forks if: github.repository == 'louislam/uptime-kuma' runs-on: ubuntu-latest timeout-minutes: 120 permissions: contents: read packages: write steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Set up QEMU uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Login to Docker Hub uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io username: ${{ secrets.GHCR_USERNAME }} password: ${{ secrets.GHCR_TOKEN }} - name: Use Node.js 20 uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: 20 - name: Cache/Restore node_modules uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: node-modules-cache with: path: node_modules key: node-modules-${{ runner.os }}-node20-${{ hashFiles('**/package-lock.json') }} - name: Install dependencies run: npm clean-install --no-fund - name: Run release-nightly run: npm run release-nightly ================================================ FILE: .github/workflows/stale-bot.yml ================================================ name: "Automatically close stale issues" on: workflow_dispatch: schedule: - cron: "0 */6 * * *" #Run every 6 hours permissions: {} jobs: stale: # Only run on the original repository, not on forks if: github.repository == 'louislam/uptime-kuma' runs-on: ubuntu-latest permissions: actions: write issues: write steps: - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1 with: stale-issue-message: |- We are clearing up our old `help`-issues and your issue has been open for 60 days with no activity. If no comment is made and the stale label is not removed, this issue will be closed in 7 days. days-before-stale: 60 days-before-close: 7 days-before-pr-stale: -1 days-before-pr-close: -1 exempt-issue-labels: "News,discussion,bug,doc,feature-request" exempt-issue-assignees: "louislam" operations-per-run: 200 - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1 with: stale-issue-message: |- This issue was marked as `cannot-reproduce` by a maintainer. If an issue is non-reproducible, we cannot fix it, as we do not know what the underlying issue is. If you have any ideas how we can reproduce this issue, we would love to hear them. We don't have a good way to deal with truely unreproducible issues and are going to close this issue in a month. If think there might be other differences in our environment or in how we tried to reproduce this, we would appreciate any ideas. close-issue-message: |- This issue will be closed as no way to reproduce it has been found. If you/somebody finds a way how to (semi-reliably) reproduce this, we can reopen this issue. ^^ days-before-stale: 180 days-before-close: 30 days-before-pr-stale: -1 days-before-pr-close: -1 any-of-issue-labels: "cannot-reproduce" operations-per-run: 200 ================================================ FILE: .github/workflows/validate.yml ================================================ name: validate on: push: branches: - master pull_request: branches: - master - 1.23.X workflow_dispatch: permissions: {} jobs: json-yaml-validate: runs-on: ubuntu-latest permissions: contents: read pull-requests: write # enable write permissions for pull request comments steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: json-yaml-validate id: json-yaml-validate uses: GrantBirki/json-yaml-validate@9bbaa8474e3af4e91f25eda8ac194fdc30564d96 # v4.0.0 with: comment: "true" # enable comment mode exclude_file: ".github/config/exclude.txt" # gitignore style file for exclusions # General validations validate: runs-on: ubuntu-latest permissions: contents: read steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: { persist-credentials: false } - name: Use Node.js 25 uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: 25 - name: Validate language JSON files run: node ./extra/check-lang-json.js - name: Validate knex migrations filename run: node ./extra/check-knex-filenames.mjs - name: Validate package.json run: node ./extra/check-package-json.mjs ================================================ FILE: .gitignore ================================================ node_modules .DS_Store dist dist-ssr *.local .idea /data !/data/.gitkeep /data* .vscode /private /out /tmp .env /extra/healthcheck.exe /extra/healthcheck /extra/healthcheck-armv7 extra/exe-builder/bin extra/exe-builder/obj .vs .vscode ================================================ FILE: .npmrc ================================================ legacy-peer-deps=true ================================================ FILE: .prettierignore ================================================ # language files src/lang/*.json ================================================ FILE: .prettierrc.js ================================================ /** * Prettier Configuration for Uptime Kuma * * Usage: * npm run fmt - Format all files (auto-runs in CI via autofix workflow) * npm run fmt -- --check - Check formatting without making changes * * TIP: This formatter is automatically run in CI, so no need to worry about it */ module.exports = { // Core formatting options - matching original ESLint rules semi: true, singleQuote: false, trailingComma: "es5", printWidth: 120, tabWidth: 4, useTabs: false, endOfLine: "lf", arrowParens: "always", bracketSpacing: true, bracketSameLine: false, // Vue-specific settings vueIndentScriptAndStyle: false, singleAttributePerLine: false, htmlWhitespaceSensitivity: "ignore", // More forgiving with whitespace in HTML // Override settings for specific file types overrides: [ { files: "*.vue", options: { parser: "vue", }, }, { files: ["*.json"], options: { tabWidth: 4, trailingComma: "none", }, }, { files: ["*.yml", "*.yaml"], options: { tabWidth: 2, trailingComma: "none", }, }, { files: ["src/icon.js"], options: { trailingComma: "all", }, }, { files: ["*.md"], options: { printWidth: 100, proseWrap: "preserve", tabWidth: 2, }, }, ], }; ================================================ FILE: .stylelintrc ================================================ { "extends": [ "stylelint-config-standard", "stylelint-config-prettier" ], "customSyntax": "postcss-html", "rules": { "no-descending-specificity": null, "declaration-empty-line-before": null, "alpha-value-notation": "number", "color-function-notation": "legacy", "shorthand-property-no-redundant-values": null, "color-hex-length": null, "declaration-block-no-redundant-longhand-properties": null, "at-rule-no-unknown": null } } ================================================ FILE: CNAME ================================================ git.kuma.pet ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: - Demonstrating empathy and kindness toward other people - Being respectful of differing opinions, viewpoints, and experiences - Giving and gracefully accepting constructive feedback - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience - Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: - The use of sexualized language or imagery, and sexual attention or advances of any kind - Trolling, insulting or derogatory comments, and personal or political attacks - Public or private harassment - Publishing others' private information, such as a physical or email address, without their explicit permission - Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official email address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq [translations]: https://www.contributor-covenant.org/translations ================================================ FILE: CONTRIBUTING.md ================================================ # Project Info First of all, I want to thank everyone who has submitted issues or shared pull requests for Uptime Kuma. I never thought the GitHub community would be so nice! Because of this, I also never thought that other people would actually read and edit my code. Parts of the code are not very well-structured or commented, sorry about that. Before you start, please read our [Code of Conduct](CODE_OF_CONDUCT.md) to understand our community standards. The project was created with `vite` and is written in `vue3`. Our backend lives in the `server`-directory and mostly communicates via websockets. Both frontend and backend share the same `package.json`. For production, the frontend is built into the `dist`-directory and the server (`express.js`) exposes the `dist` directory as the root of the endpoint. For development, we run vite in development mode on another port. ## Directories - `config` (dev config files) - `data` (App data) - `db` (Base database and migration scripts) - `dist` (Frontend build) - `docker` (Dockerfiles) - `extra` (Extra useful scripts) - `public` (Frontend resources for dev only) - `server` (Server source code) - `src` (Frontend source code) - `test` (unit test) ## Can I Create a Pull Request for Uptime Kuma? Whether or not you can create a pull request depends on the nature of your contribution. We value both your time and our maintainers' time, so we want to make sure it's spent efficiently. If you're unsure about any process or step, you're probably not the only one with that question—please feel free to ask. We're happy to help! Different types of pull requests (PRs) may have different guidelines, so be sure to review the appropriate one for your contribution. -
Security Fixes (click to expand)

Submitting security fixes is something that may put the community at risk. Please read through our [security policy](SECURITY.md) and submit vulnerabilities via an [advisory] + [issue] instead. We encourage you to submit how to fix a vulnerability if you know how to, this is not required. Following the security policy allows us to properly test, fix bugs. This review allows us to notice, if there are any changes necessary to unrelated parts like the documentation. [**PLEASE SEE OUR SECURITY POLICY.**](SECURITY.md) [advisory]: https://github.com/louislam/uptime-kuma/security/advisories/new [issue]: https://github.com/louislam/uptime-kuma/issues/new?template=security_issue.yml

-
Small, Non-Breaking Bug Fixes (click to expand)

If you come across a bug and think you can solve, we appreciate your work. Please make sure that you follow these rules: - keep the PR as small as possible, fix only one thing at a time => keeping it reviewable - test that your code does what you claim it does. Because maintainer time is precious, junior maintainers may merge uncontroversial PRs in this area.

-
Translations / Internationalisation (i18n) (click to expand)

Please add **all** strings that are translatable to `src/lang/en.json`. If translation keys are omitted, they cannot be translated. **Do not include any other languages in your initial pull request** (even if it is your mother tongue) to avoid merge conflicts between Weblate and `master`. Once your PR is merged into `master`, the strings can be translated by awesome people donating their language skills. We use Weblate to localise this project into many languages. If you want to help translate Uptime Kuma into your language, please see [these instructions on how to translate using Weblate](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md). There are some cases where a change cannot be done directly in Weblate and requires a PR: - A text may not yet be localisable. In this case, **adding a new language key** via `{{ $t("Translation key") }}` or [``](https://vue-i18n.intlify.dev/guide/advanced/component.html) might be necessary. - Language keys need to be **added to `en.json`** to appear in Weblate. If this has not been done, a PR is appreciated. - **Adding a new language** requires creating a new file. See [these instructions](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md). Because maintainer time is precious, junior maintainers may merge uncontroversial PRs in this area.

-
New Notification Providers (click to expand)

To set up a new notification provider these files need to be modified/created: - `server/notification-providers/PROVIDER_NAME.js` is where the heart of the notification provider lives. - Both `monitorJSON` and `heartbeatJSON` can be `null` for some events. If both are `null`, this is a general testing message, but if just `heartbeatJSON` is `null` this is a certificate expiry. - Please wrap the axios call into a ```js try { let result = await axios.post(...); if (result.status === ...) ... } catch (error) { this.throwGeneralAxiosError(error); } ``` - `server/notification.js` is where the backend of the notification provider needs to be registered. _If you have an idea how we can skip this step, we would love to hear about it ^^_ - `src/components/NotificationDialog.vue` you need to decide if the provider is a regional or a global one and add it with a name to the respective list - `src/components/notifications/PROVIDER_NAME.vue` is where the frontend of each provider lives. Please make sure that you have: - used `HiddenInput` for secret credentials - included all the necessary helptexts/placeholder/.. to make sure the notification provider is simple to setup for new users. - include all translations (`{{ $t("Translation key") }}`, [`i18n-t keypath="Translation key">`](https://vue-i18n.intlify.dev/guide/advanced/component.html)) in `src/lang/en.json` to enable our translators to translate this - `src/components/notifications/index.js` is where the frontend of the provider needs to be registered. _If you have an idea how we can skip this step, we would love to hear about it ^^_ Offering notifications is close to the core of what we are as an uptime monitor. Therefore, making sure that they work is also really important. Because testing notification providers is quite time intensive, we mostly offload this onto the person contributing a notification provider. To make sure you have tested the notification provider, please include screenshots of the following events in the pull-request description: - `UP`/`DOWN` - Certificate Expiry via - Domain Expiry via and a larger time set - Testing (the test button on the notification provider setup page)
Using the following way to format this is encouraged: ```md | Event | Before | After | | ------------------ | --------------------- | -------------------- | | `UP` | ![Before](image-link) | ![After](image-link) | | `DOWN` | ![Before](image-link) | ![After](image-link) | | Certificate-expiry | ![Before](image-link) | ![After](image-link) | | Domain-expiry | ![Before](image-link) | ![After](image-link) | | Testing | ![Before](image-link) | ![After](image-link) | ``` Because maintainer time is precious, junior maintainers may merge uncontroversial PRs in this area.

-
New Monitoring Types (click to expand)

To set up a new notification provider these files need to be modified/created: - `server/monitor-types/MONITORING_TYPE.js` is the core of each monitor. The `async check(...)`-function should: - in the happy-path: set `heartbeat.msg` to a successful message and set `heartbeat.status = UP` - in the unhappy-path: throw an `Error` for each fault that is detected with an actionable error message. - NEVER set `heartbeat.status = DOWN` unless you want to explicitly ignore retries. - `server/uptime-kuma-server.js` is where the monitoring backend needs to be registered. _If you have an idea how we can skip this step, we would love to hear about it ^^_ - `src/pages/EditMonitor.vue` is the shared frontend users interact with. Please make sure that you have: - used `HiddenInput` for secret credentials - included all the necessary helptexts/placeholder/.. to make sure the notification provider is simple to setup for new users. - include all translations (`{{ $t("Translation key") }}`, [``](https://vue-i18n.intlify.dev/guide/advanced/component.html)) in `src/lang/en.json` to enable our translators to translate this Because maintainer time is precious, junior maintainers may merge uncontroversial PRs in this area.

-
New Features / Major Changes / Breaking Bugfixes (click to expand)

be sure to **create an empty draft pull request or open an issue, so we can have a discussion first**. This is especially important for large pull requests or when you don't know if it will be merged or not. When adding new features, please also add tests to ensure your changes work as expected and to prevent future regressions. Because of the large impact of this work, only senior maintainers may merge PRs in this area.

-
As a First-Time Contributor (click to expand)

Contributing is easy and fun. We will guide you through the process: 1. **Fork** the [Uptime-Kuma repository](https://github.com/louislam/uptime-kuma/) and **clone** it to your local machine. 2. **Create a new branch** for your changes (e.g., `signal-notification-provider`). 3. **Make your changes** and **commit** them with a clear message. 4. **Push** your changes to your forked repository. 5. **Open a pull request** to the `master` branch of the Uptime Kuma repository. - For large changes, please open a **draft pull request** first to discuss the changes with the maintainers. 6. **Provide a clear and concise description** of the changes you've made and link any related issues. 7. **Complete the PR checklist** and make sure all CI checks pass. 8. **Request a review** when your pull request is ready. ## When Can You Change the PR Status to "Ready for Review"? A PR should remain in **draft status** until all tasks are completed. Only change the status to **Ready for Review** when: - You have implemented all planned changes. - Your code is fully tested and ready for review. - You have updated or created the necessary tests. - You have verified that CI/CD checks pass successfully. A volunteer maintainer will review your PR as soon as possible. You can help us by reviewing other PRs or taking a look at open issues. ## The following rules are essential for making your PR mergeable - Merging multiple issues by a huge PR is more difficult to review and causes conflicts with other PRs. Please - (if possible) **create one PR for one issue** or - (if not possible) **explain which issues a PR addresses and why this PR should not be broken apart** - Make sure your **PR passes our continuous integration**. PRs will not be merged unless all CI-Checks are green. - **Breaking changes** (unless for a good reason and discussed beforehand) will not get merged / not get merged quickly. Such changes require a major version release. - **Test your code** before submitting a PR. Buggy PRs will not be merged. - Make sure the **UI/UX is close to Uptime Kuma**. - **Think about the maintainability**: Don't add functionality that is completely **out of scope**. Keep in mind that we need to be able to maintain the functionality. - Don't modify or delete existing logic without a valid reason. - Don't convert existing code into other programming languages for no reason. ### Continuous Integration All pull requests must pass our continuous integration checks. These checks include: - **Linting**: We use ESLint and Stylelint for code quality checks. You can run the linter locally with `npm run lint`. - **Formatting**: We use Prettier for code formatting. You can format your code with `npm run fmt` (or CI will do this for you) - **Testing**: We use Playwright for end-to-end tests and have a suite of backend tests. You can run the tests locally with `npm test`. I ([@louislam](https://github.com/louislam)) have the final say. If your pull request does not meet my expectations, I will reject it, no matter how much time you spent on it. We will assign your pull request to a [milestone](https://github.com/louislam/uptime-kuma/milestones), if we plan to review and merge it. Please don't rush or ask for an ETA. We have to understand the pull request, make sure it has no breaking changes and stick to the vision of this project, especially for large pull requests. ## I'd Like to Work on an Issue. How Do I Do That? We have found that assigning people to issues is unnecessary management overhead. Instead, a short comment stating that you want to work on an issue is appreciated, as it saves time for other developers. If you encounter any problems during development, feel free to leave a comment describing what you are stuck on. We are here to help. ## Project Style I personally do not like something that requires a lot of configuration before you can finally start the app. The goal is to make the Uptime Kuma installation as easy as installing a mobile app. - Easy to install for non-Docker users - no native build dependency is needed (for `x86_64`/`armv7`/`arm64`) - no extra configuration and - no extra effort required to get it running - Single container for Docker users - no complex docker-compose file - mapping the volume and exposing the port should be the only requirements - Settings should be configurable in the frontend. Environment variables are discouraged, unless it is related to startup such as `DATA_DIR` - Easy to use - The web UI styling should be consistent and nice ## Coding Styles - 4 spaces indentation - Follow `.editorconfig` - Follow ESLint - Methods and functions should be documented with JSDoc ## Name Conventions - Javascript/Typescript: camelCaseType - SQLite: snake_case (Underscore) - CSS/SCSS: kebab-case (Dash) ## Tools - [`Node.js`](https://nodejs.org/) >= 20.4.0 - [`npm`](https://www.npmjs.com/) >= 9.3 - [`git`](https://git-scm.com/) - IDE that supports [`ESLint`](https://eslint.org/) and EditorConfig (I am using [`IntelliJ IDEA`](https://www.jetbrains.com/idea/)) - A SQLite GUI tool (f.ex. [`SQLite Expert Personal`](https://www.sqliteexpert.com/download.html) or [`DBeaver Community`](https://dbeaver.io/download/)) ## Git Branches - `master`: 2.X.X development. If you want to add a new feature, your pull request should base on this. - `1.23.X`: 1.23.X development. If you want to fix a bug for v1 and v2, your pull request should base on this. - All other branches are unused, outdated or for dev. ## Install Dependencies for Development ```bash npm ci ``` ## Dev Server We can start the frontend dev server and the backend dev server in one command. Port `3000` and port `3001` will be used. ```bash npm run dev ``` But sometimes you may want to restart the server without restarting the frontend. In that case, you can run these commands in two terminals: ```bash npm run start-frontend-dev npm run start-server-dev ``` ## Backend Server It binds to `0.0.0.0:3001` by default. The backend is an `express.js` server with `socket.io` integrated. It uses `socket.io` to communicate with clients, and most server logic is encapsulated in the `socket.io` handlers. `express.js` is also used to serve: - as an entry point for redirecting to a status page or the dashboard - the frontend built files (`index.html`, `*.js`, `*.css`, etc.) - internal APIs of the status page ### Structure in `/server/` - `jobs/` (Jobs that are running in another process) - `model/` (Object model, auto-mapping to the database table name) - `modules/` (Modified 3rd-party modules) - `monitor_types/` (Monitor Types) - `notification-providers/` (individual notification logic) - `routers/` (Express Routers) - `socket-handler/` (Socket.io Handlers) - `server.js` (Server entry point) - `uptime-kuma-server.js` (UptimeKumaServer class, main logic should be here, but some still in `server.js`) ## Frontend Dev Server It binds to `0.0.0.0:3000` by default. The frontend dev server is used for development only. For production, it is not used. It will be compiled to `dist` directory instead via `npm run build`. You can use Vue.js devtools Chrome extension for debugging. ### Frontend Details Uptime Kuma Frontend is a single page application (SPA). Most paths are handled by Vue Router. The router is in `src/router.js` Most data in the frontend is stored at the root level, even though the router can navigate to different pages. The data and socket logic are in `src/mixins/socket.js`. ## Database Migration See: ## Unit Test To run unit tests, use the following command: ```bash npm run build npm test ``` ## Dependencies Both frontend and backend share the same `package.json`. However, the frontend dependencies are eventually not used in the production environment, because it is usually also baked into `dist` files. So: - Frontend dependencies = "devDependencies" - Examples: - `vue`, `chart.js` - Backend dependencies = "dependencies" - Examples: `socket.io`, `sqlite3` - Development dependencies = "devDependencies" - Examples: `eslint`, `sass` ### Update Dependencies Since previously updating Vite 2.5.10 to 2.6.0 broke the application completely, from now on, it should update the patch release version only. Patch release = the third digit ([Semantic Versioning](https://semver.org/)) If for security / bug / other reasons, a library must be updated, breaking changes need to be checked by the person proposing the change. ## Spelling & Grammar Feel free to correct the spelling and grammar in the documentation or code. English is not the native language of the maintainers. ## Wiki Since there is no way to make a pull request to the wiki, I have set up another repo to do that. ## Maintainer ### What is a maintainer and what are their roles? This project has multiple maintainers who specialise in different areas. Currently, there are 3 maintainers: | Person | Role | Main Area | | ----------------- | ----------------- | ---------------- | | `@louislam` | senior maintainer | major features | | `@chakflying` | junior maintainer | fixing bugs | | `@commanderstorm` | junior maintainer | issue-management | ### Procedures We have a few procedures we follow. These are documented here: -

Set up a Docker Builder (click to expand)

- amd64, armv7 using local. - arm64 using remote arm64 cpu, as the emulator is too slow and can no longer pass the `npm ci` command. 1. Add the public key to the remote server. 2. Add the remote context. The remote machine must be arm64 and installed Docker CE. ```bash docker context create oracle-arm64-jp --docker "host=ssh://root@100.107.174.88" ``` 3. Create a new builder. ```bash docker buildx create --name kuma-builder --platform linux/amd64,linux/arm/v7 docker buildx use kuma-builder docker buildx inspect --bootstrap ``` 4. Append the remote context to the builder. ```bash docker buildx create --append --name kuma-builder --platform linux/arm64 oracle-arm64-jp ``` 5. Verify the builder and check if the builder is using `kuma-builder`. `docker buildx inspect kuma-builder docker buildx ls`

-
Release (click to expand)

1. Draft a release note 2. Make sure the repo is cleared 3. If the healthcheck is updated, remember to re-compile it: `npm run build-docker-builder-go` 4. `npm run release-final` with env vars: `VERSION` and `GITHUB_TOKEN` 5. Wait until the `Press any key to continue` 6. `git push` 7. Publish the release note as `1.X.X` 8. Press any key to continue 9. Deploy to the demo server: `npm run deploy-demo-server` These Items need to be checked: - [ ] Check all tags is fine on - [ ] Try the Docker image with tag 1.X.X (Clean install / amd64 / arm64 / armv7) - [ ] Try clean installation with Node.js

-
Release Beta (click to expand)

1. Draft a release note, check `This is a pre-release` 2. Make sure the repo is cleared 3. `npm run release-beta` with env vars: `VERSION` and `GITHUB_TOKEN` 4. Wait until the `Press any key to continue` 5. Publish the release note as `1.X.X-beta.X` 6. Press any key to continue

-
Release Wiki (click to expand)

**Setup Repo** ```bash git clone https://github.com/louislam/uptime-kuma-wiki.git cd uptime-kuma-wiki git remote add production https://github.com/louislam/uptime-kuma.wiki.git ``` **Push to Production Wiki** ```bash git pull git push production master ```

-
Change the base of a pull request such as master to 1.23.X (click to expand)

```bash git rebase --onto ```

================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2021 Louis Lam Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================
Uptime Kuma Logo
# Uptime Kuma Uptime Kuma is an easy-to-use self-hosted monitoring tool. [![GitHub Sponsors](https://img.shields.io/github/sponsors/louislam?label=GitHub%20Sponsors)](https://github.com/sponsors/louislam) Translation status Uptime Kuma Dashboard Screenshot ## 🥔 Live Demo Try it! Demo Server (Location: Frankfurt - Germany): It is a temporary live demo, all data will be deleted after 10 minutes. Sponsored by [Uptime Kuma Sponsors](https://github.com/louislam/uptime-kuma#%EF%B8%8F-sponsors). ## ⭐ Features - Monitoring uptime for HTTP(s) / TCP / HTTP(s) Keyword / HTTP(s) Json Query / Websocket / Ping / DNS Record / Push / Steam Game Server / Docker Containers - Fancy, Reactive, Fast UI/UX - Notifications via Telegram, Discord, Gotify, Slack, Pushover, Email (SMTP), and [90+ notification services, click here for the full list](https://github.com/louislam/uptime-kuma/tree/master/src/components/notifications) - 20-second intervals - [Multi Languages](https://github.com/louislam/uptime-kuma/tree/master/src/lang) - Multiple status pages - Map status pages to specific domains - Ping chart - Certificate info - Proxy support - 2FA support ## 🔧 How to Install ### 🐳 Docker Compose ```bash mkdir uptime-kuma cd uptime-kuma curl -o compose.yaml https://raw.githubusercontent.com/louislam/uptime-kuma/master/compose.yaml docker compose up -d ``` Uptime Kuma is now running on all network interfaces (e.g. http://localhost:3001 or http://your-ip:3001). > [!WARNING] > File Systems like **NFS** (Network File System) are **NOT** supported. Please map to a local directory or volume. ### 🐳 Docker Command ```bash docker run -d --restart=always -p 3001:3001 -v uptime-kuma:/app/data --name uptime-kuma louislam/uptime-kuma:2 ``` Uptime Kuma is now running on all network interfaces (e.g. http://localhost:3001 or http://your-ip:3001). If you want to limit exposure to localhost only: ```bash docker run ... -p 127.0.0.1:3001:3001 ... ``` ### 💪🏻 Non-Docker Requirements: - Platform - ✅ Major Linux distros such as Debian, Ubuntu, Fedora and ArchLinux etc. - ✅ Windows 10 (x64), Windows Server 2012 R2 (x64) or higher - ❌ FreeBSD / OpenBSD / NetBSD - ❌ Replit / Heroku - [Node.js](https://nodejs.org/en/download/) >= 20.4 - [Git](https://git-scm.com/downloads) - [pm2](https://pm2.keymetrics.io/) - For running Uptime Kuma in the background ```bash git clone https://github.com/louislam/uptime-kuma.git cd uptime-kuma npm run setup # Option 1. Try it node server/server.js # (Recommended) Option 2. Run in the background using PM2 # Install PM2 if you don't have it: npm install pm2 -g && pm2 install pm2-logrotate # Start Server pm2 start server/server.js --name uptime-kuma ``` Uptime Kuma is now running on all network interfaces (e.g. http://localhost:3001 or http://your-ip:3001). More useful PM2 Commands ```bash # If you want to see the current console output pm2 monit # If you want to add it to startup pm2 startup && pm2 save ``` ### Advanced Installation If you need more options or need to browse via a reverse proxy, please read: ## 🆙 How to Update Please read: ## 🆕 What's Next? I will assign requests/issues to the next milestone. ## ❤️ Sponsors Thank you so much! (GitHub Sponsors will be updated manually. OpenCollective sponsors will be updated automatically, the list will be cached by GitHub though. It may need some time to be updated) Uptime Kuma Sponsors ## 🖼 More Screenshots Light Mode: Uptime Kuma Light Mode Screenshot of how the Dashboard looks Status Page: Uptime Kuma Status Page Screenshot Settings Page: Uptime Kuma Settings Page Screenshot Telegram Notification Sample: Uptime Kuma Telegram Notification Sample Screenshot ## Motivation - I was looking for a self-hosted monitoring tool like "Uptime Robot", but it is hard to find a suitable one. One of the closest ones is statping. Unfortunately, it is not stable and no longer maintained. - Wanted to build a fancy UI. - Learn Vue 3 and vite.js. - Show the power of Bootstrap 5. - Try to use WebSocket with SPA instead of a REST API. - Deploy my first Docker image to Docker Hub. If you love this project, please consider giving it a ⭐. ## 🗣️ Discussion / Ask for Help ⚠️ For any general or technical questions, please don't send me an email, as I am unable to provide support in that manner. I will not respond if you ask questions there. I recommend using Google, GitHub Issues, or Uptime Kuma's subreddit for finding answers to your question. If you cannot find the information you need, feel free to ask: - [GitHub Issues](https://github.com/louislam/uptime-kuma/issues) - [Subreddit (r/UptimeKuma)](https://www.reddit.com/r/UptimeKuma/) My Reddit account: [u/louislamlam](https://reddit.com/u/louislamlam) You can mention me if you ask a question on the subreddit. ## Contributions ### Create Pull Requests Pull requests are awesome. To keep reviews fast and effective, please make sure you’ve [read our pull request guidelines](https://github.com/louislam/uptime-kuma/blob/master/CONTRIBUTING.md#can-i-create-a-pull-request-for-uptime-kuma). ### Test Pull Requests There are a lot of pull requests right now, but I don't have time to test them all. If you want to help, you can check this: ### Test Beta Version Check out the latest beta release here: ### Bug Reports / Feature Requests If you want to report a bug or request a new feature, feel free to open a [new issue](https://github.com/louislam/uptime-kuma/issues). ### Translations If you want to translate Uptime Kuma into your language, please visit [Weblate Readme](https://github.com/louislam/uptime-kuma/blob/master/src/lang/README.md). ### Spelling & Grammar Feel free to correct the grammar in the documentation or code. My mother language is not English and my grammar is not that great. ================================================ FILE: SECURITY.md ================================================ # Security Policy > [!CAUTION] > Unfortunately, AI slop reports keep wasting my time. It will be closed and you will get banned immediately if you try to do that. ## Reporting a Vulnerability 1. Please report security issues to . 2. Please also create an empty security issue to alert me, as GitHub Advisories do not send a notification, I probably will miss it without this. - Do not report any upstream dependency issues / scan result by any tools. It will be closed immediately without explanations. Unless you have PoC to prove that the upstream issue affected Uptime Kuma. - Do not use the public issue tracker or discuss it in public as it will cause more damage. - Do not report any SSRF issues. ## Do you accept other 3rd-party bug bounty platforms? At this moment, I DO NOT accept other bug bounty platforms, because I am not familiar with these platforms and someone has tried to send a phishing link to me by doing this already. To minimize my own risk, please report through GitHub Advisories only. I will ignore all 3rd-party bug bounty platforms emails. ## Supported Versions ### Uptime Kuma Versions You should use or upgrade to the latest version of Uptime Kuma. All versions are upgradable to the latest version. ### Upgradable Docker Tags | Tag | Supported | | --------------- | ------------------------------------------------------------------------------------- | | 2 | :white_check_mark: | | 2-slim | :white_check_mark: | | next | :white_check_mark: | | next-slim | :white_check_mark: | | 2-rootless | :white_check_mark: | | 2-slim-rootless | :white_check_mark: | | 1 | [⚠️ Deprecated](https://github.com/louislam/uptime-kuma/wiki/Migration-From-v1-To-v2) | | 1-debian | [⚠️ Deprecated](https://github.com/louislam/uptime-kuma/wiki/Migration-From-v1-To-v2) | | latest | [⚠️ Deprecated](https://github.com/louislam/uptime-kuma/wiki/Migration-From-v1-To-v2) | | debian | [⚠️ Deprecated](https://github.com/louislam/uptime-kuma/wiki/Migration-From-v1-To-v2) | | All other tags | ❌ | ================================================ FILE: compose.yaml ================================================ services: uptime-kuma: image: louislam/uptime-kuma:2 restart: unless-stopped volumes: - ./data:/app/data ports: # : - "3001:3001" ================================================ FILE: config/playwright.config.js ================================================ import { defineConfig, devices } from "@playwright/test"; const port = 30001; export const url = `http://localhost:${port}`; export default defineConfig({ // Look for test files in the "tests" directory, relative to this configuration file. testDir: "../test/e2e/specs", outputDir: "../private/playwright-test-results", fullyParallel: false, locale: "en-US", // Fail the build on CI if you accidentally left test.only in the source code. forbidOnly: !!process.env.CI, // Retry on CI only. retries: process.env.CI ? 2 : 0, // Opt out of parallel tests on CI. workers: 1, // Reporter to use reporter: [ [ "html", { outputFolder: "../private/playwright-report", open: "never", }, ], ], use: { // Base URL to use in actions like `await page.goto('/')`. baseURL: url, // Collect trace when retrying the failed test. trace: "on-first-retry", }, // Configure projects for major browsers. projects: [ { name: "run-once setup", testMatch: /setup-process\.once\.js/, use: { ...devices["Desktop Chrome"] }, }, { name: "specs", use: { ...devices["Desktop Chrome"] }, dependencies: ["run-once setup"], }, /* { name: "firefox", use: { browserName: "firefox" } },*/ ], // Run your local dev server before starting the tests. webServer: { command: `node extra/remove-playwright-test-data.js && cross-env NODE_ENV=development node server/server.js --port=${port} --data-dir=./data/playwright-test`, url, reuseExistingServer: false, cwd: "../", }, }); ================================================ FILE: config/vite.config.js ================================================ import vue from "@vitejs/plugin-vue"; import { defineConfig } from "vite"; import visualizer from "rollup-plugin-visualizer"; import viteCompression from "vite-plugin-compression"; const postCssScss = require("postcss-scss"); const postcssRTLCSS = require("postcss-rtlcss"); const viteCompressionFilter = /\.(js|mjs|json|css|html|svg)$/i; // https://vitejs.dev/config/ export default defineConfig({ server: { port: 3000, }, define: { FRONTEND_VERSION: JSON.stringify(process.env.npm_package_version), "process.env": {}, }, plugins: [ vue(), visualizer({ filename: "tmp/dist-stats.html", }), viteCompression({ algorithm: "gzip", filter: viteCompressionFilter, }), viteCompression({ algorithm: "brotliCompress", filter: viteCompressionFilter, }), ], css: { postcss: { parser: postCssScss, map: false, plugins: [postcssRTLCSS], }, }, build: { commonjsOptions: { include: [/.js$/], }, rollupOptions: { output: { manualChunks(id, { getModuleInfo, getModuleIds }) {}, }, }, }, }); ================================================ FILE: db/knex_init_db.js ================================================ const { R } = require("redbean-node"); const { log } = require("../src/util"); /** * ⚠️⚠️⚠️⚠️⚠️⚠️ DO NOT ADD ANYTHING HERE! * IF YOU NEED TO ADD FIELDS, ADD IT TO ./db/knex_migrations * See ./db/knex_migrations/README.md for more information * @returns {Promise} */ async function createTables() { log.info("mariadb", "Creating basic tables for MariaDB"); const knex = R.knex; // TODO: Should check later if it is really the final patch sql file. // docker_host await knex.schema.createTable("docker_host", (table) => { table.increments("id"); table.integer("user_id").unsigned().notNullable(); table.string("docker_daemon", 255); table.string("docker_type", 255); table.string("name", 255); }); // group await knex.schema.createTable("group", (table) => { table.increments("id"); table.string("name", 255).notNullable(); table.datetime("created_date").notNullable().defaultTo(knex.fn.now()); table.boolean("public").notNullable().defaultTo(false); table.boolean("active").notNullable().defaultTo(true); table.integer("weight").notNullable().defaultTo(1000); table.integer("status_page_id").unsigned(); }); // proxy await knex.schema.createTable("proxy", (table) => { table.increments("id"); table.integer("user_id").unsigned().notNullable(); table.string("protocol", 10).notNullable(); table.string("host", 255).notNullable(); table.smallint("port").notNullable(); // TODO: Maybe a issue with MariaDB, need migration to int table.boolean("auth").notNullable(); table.string("username", 255).nullable(); table.string("password", 255).nullable(); table.boolean("active").notNullable().defaultTo(true); table.boolean("default").notNullable().defaultTo(false); table.datetime("created_date").notNullable().defaultTo(knex.fn.now()); table.index("user_id", "proxy_user_id"); }); // user await knex.schema.createTable("user", (table) => { table.increments("id"); table.string("username", 255).notNullable().unique().collate("utf8_general_ci"); table.string("password", 255); table.boolean("active").notNullable().defaultTo(true); table.string("timezone", 150); table.string("twofa_secret", 64); table.boolean("twofa_status").notNullable().defaultTo(false); table.string("twofa_last_token", 6); }); // monitor await knex.schema.createTable("monitor", (table) => { table.increments("id"); table.string("name", 150); table.boolean("active").notNullable().defaultTo(true); table.integer("user_id").unsigned().references("id").inTable("user").onDelete("SET NULL").onUpdate("CASCADE"); table.integer("interval").notNullable().defaultTo(20); table.text("url"); table.string("type", 20); table.integer("weight").defaultTo(2000); table.string("hostname", 255); table.integer("port"); table.datetime("created_date").notNullable().defaultTo(knex.fn.now()); table.string("keyword", 255); table.integer("maxretries").notNullable().defaultTo(0); table.boolean("ignore_tls").notNullable().defaultTo(false); table.boolean("upside_down").notNullable().defaultTo(false); table.integer("maxredirects").notNullable().defaultTo(10); table.text("accepted_statuscodes_json").notNullable().defaultTo('["200-299"]'); table.string("dns_resolve_type", 5); table.string("dns_resolve_server", 255); table.string("dns_last_result", 255); table.integer("retry_interval").notNullable().defaultTo(0); table.string("push_token", 20).defaultTo(null); table.text("method").notNullable().defaultTo("GET"); table.text("body").defaultTo(null); table.text("headers").defaultTo(null); table.text("basic_auth_user").defaultTo(null); table.text("basic_auth_pass").defaultTo(null); table.integer("docker_host").unsigned().references("id").inTable("docker_host"); table.string("docker_container", 255); table.integer("proxy_id").unsigned().references("id").inTable("proxy"); table.boolean("expiry_notification").defaultTo(true); table.text("mqtt_topic"); table.string("mqtt_success_message", 255); table.string("mqtt_username", 255); table.string("mqtt_password", 255); table.string("database_connection_string", 2000); table.text("database_query"); table.string("auth_method", 250); table.text("auth_domain"); table.text("auth_workstation"); table.string("grpc_url", 255).defaultTo(null); table.text("grpc_protobuf").defaultTo(null); table.text("grpc_body").defaultTo(null); table.text("grpc_metadata").defaultTo(null); table.text("grpc_method").defaultTo(null); table.text("grpc_service_name").defaultTo(null); table.boolean("grpc_enable_tls").notNullable().defaultTo(false); table.string("radius_username", 255); table.string("radius_password", 255); table.string("radius_calling_station_id", 50); table.string("radius_called_station_id", 50); table.string("radius_secret", 255); table.integer("resend_interval").notNullable().defaultTo(0); table.integer("packet_size").notNullable().defaultTo(56); table.string("game", 255); }); // heartbeat await knex.schema.createTable("heartbeat", (table) => { table.increments("id"); table.boolean("important").notNullable().defaultTo(false); table .integer("monitor_id") .unsigned() .notNullable() .references("id") .inTable("monitor") .onDelete("CASCADE") .onUpdate("CASCADE"); table.smallint("status").notNullable(); table.text("msg"); table.datetime("time").notNullable(); table.integer("ping"); table.integer("duration").notNullable().defaultTo(0); table.integer("down_count").notNullable().defaultTo(0); table.index("important"); table.index(["monitor_id", "time"], "monitor_time_index"); table.index("monitor_id"); table.index(["monitor_id", "important", "time"], "monitor_important_time_index"); }); // incident await knex.schema.createTable("incident", (table) => { table.increments("id"); table.string("title", 255).notNullable(); table.text("content", 255).notNullable(); table.string("style", 30).notNullable().defaultTo("warning"); table.datetime("created_date").notNullable().defaultTo(knex.fn.now()); table.datetime("last_updated_date"); table.boolean("pin").notNullable().defaultTo(true); table.boolean("active").notNullable().defaultTo(true); table.integer("status_page_id").unsigned(); }); // maintenance await knex.schema.createTable("maintenance", (table) => { table.increments("id"); table.string("title", 150).notNullable(); table.text("description").notNullable(); table.integer("user_id").unsigned().references("id").inTable("user").onDelete("SET NULL").onUpdate("CASCADE"); table.boolean("active").notNullable().defaultTo(true); table.string("strategy", 50).notNullable().defaultTo("single"); table.datetime("start_date"); table.datetime("end_date"); table.time("start_time"); table.time("end_time"); table.string("weekdays", 250).defaultTo("[]"); table.text("days_of_month").defaultTo("[]"); table.integer("interval_day"); table.index("active"); table.index(["strategy", "active"], "manual_active"); table.index("user_id", "maintenance_user_id"); }); // status_page await knex.schema.createTable("status_page", (table) => { table.increments("id"); table.string("slug", 255).notNullable().unique().collate("utf8_general_ci"); table.string("title", 255).notNullable(); table.text("description"); table.string("icon", 255).notNullable(); table.string("theme", 30).notNullable(); table.boolean("published").notNullable().defaultTo(true); table.boolean("search_engine_index").notNullable().defaultTo(true); table.boolean("show_tags").notNullable().defaultTo(false); table.string("password"); table.datetime("created_date").notNullable().defaultTo(knex.fn.now()); table.datetime("modified_date").notNullable().defaultTo(knex.fn.now()); table.text("footer_text"); table.text("custom_css"); table.boolean("show_powered_by").notNullable().defaultTo(true); table.string("google_analytics_tag_id"); }); // maintenance_status_page await knex.schema.createTable("maintenance_status_page", (table) => { table.increments("id"); table .integer("status_page_id") .unsigned() .notNullable() .references("id") .inTable("status_page") .onDelete("CASCADE") .onUpdate("CASCADE"); table .integer("maintenance_id") .unsigned() .notNullable() .references("id") .inTable("maintenance") .onDelete("CASCADE") .onUpdate("CASCADE"); }); // maintenance_timeslot await knex.schema.createTable("maintenance_timeslot", (table) => { table.increments("id"); table .integer("maintenance_id") .unsigned() .notNullable() .references("id") .inTable("maintenance") .onDelete("CASCADE") .onUpdate("CASCADE"); table.datetime("start_date").notNullable(); table.datetime("end_date"); table.boolean("generated_next").defaultTo(false); table.index("maintenance_id"); table.index(["maintenance_id", "start_date", "end_date"], "active_timeslot_index"); table.index("generated_next", "generated_next_index"); }); // monitor_group await knex.schema.createTable("monitor_group", (table) => { table.increments("id"); table .integer("monitor_id") .unsigned() .notNullable() .references("id") .inTable("monitor") .onDelete("CASCADE") .onUpdate("CASCADE"); table .integer("group_id") .unsigned() .notNullable() .references("id") .inTable("group") .onDelete("CASCADE") .onUpdate("CASCADE"); table.integer("weight").notNullable().defaultTo(1000); table.boolean("send_url").notNullable().defaultTo(false); table.index(["monitor_id", "group_id"], "fk"); }); // monitor_maintenance await knex.schema.createTable("monitor_maintenance", (table) => { table.increments("id"); table .integer("monitor_id") .unsigned() .notNullable() .references("id") .inTable("monitor") .onDelete("CASCADE") .onUpdate("CASCADE"); table .integer("maintenance_id") .unsigned() .notNullable() .references("id") .inTable("maintenance") .onDelete("CASCADE") .onUpdate("CASCADE"); table.index("maintenance_id", "maintenance_id_index2"); table.index("monitor_id", "monitor_id_index"); }); // notification await knex.schema.createTable("notification", (table) => { table.increments("id"); table.string("name", 255); table.boolean("active").notNullable().defaultTo(true); table.integer("user_id").unsigned(); table.boolean("is_default").notNullable().defaultTo(false); table.text("config", "longtext"); }); // monitor_notification await knex.schema.createTable("monitor_notification", (table) => { table.increments("id").unsigned(); // TODO: no auto increment???? table .integer("monitor_id") .unsigned() .notNullable() .references("id") .inTable("monitor") .onDelete("CASCADE") .onUpdate("CASCADE"); table .integer("notification_id") .unsigned() .notNullable() .references("id") .inTable("notification") .onDelete("CASCADE") .onUpdate("CASCADE"); table.index(["monitor_id", "notification_id"], "monitor_notification_index"); }); // tag await knex.schema.createTable("tag", (table) => { table.increments("id"); table.string("name", 255).notNullable(); table.string("color", 255).notNullable(); table.datetime("created_date").notNullable().defaultTo(knex.fn.now()); }); // monitor_tag await knex.schema.createTable("monitor_tag", (table) => { table.increments("id"); table .integer("monitor_id") .unsigned() .notNullable() .references("id") .inTable("monitor") .onDelete("CASCADE") .onUpdate("CASCADE"); table .integer("tag_id") .unsigned() .notNullable() .references("id") .inTable("tag") .onDelete("CASCADE") .onUpdate("CASCADE"); table.text("value"); }); // monitor_tls_info await knex.schema.createTable("monitor_tls_info", (table) => { table.increments("id"); table .integer("monitor_id") .unsigned() .notNullable() .references("id") .inTable("monitor") .onDelete("CASCADE") .onUpdate("CASCADE"); table.text("info_json"); }); // notification_sent_history await knex.schema.createTable("notification_sent_history", (table) => { table.increments("id"); table.string("type", 50).notNullable(); table.integer("monitor_id").unsigned().notNullable(); table.integer("days").notNullable(); table.unique(["type", "monitor_id", "days"]); table.index(["type", "monitor_id", "days"], "good_index"); }); // setting await knex.schema.createTable("setting", (table) => { table.increments("id"); table.string("key", 200).notNullable().unique().collate("utf8_general_ci"); table.text("value"); table.string("type", 20); }); // status_page_cname await knex.schema.createTable("status_page_cname", (table) => { table.increments("id"); table .integer("status_page_id") .unsigned() .references("id") .inTable("status_page") .onDelete("CASCADE") .onUpdate("CASCADE"); table.string("domain").notNullable().unique().collate("utf8_general_ci"); }); /********************* * Converted Patch here *********************/ // 2023-06-30-1348-http-body-encoding.js // ALTER TABLE monitor ADD http_body_encoding VARCHAR(25); // UPDATE monitor SET http_body_encoding = 'json' WHERE (type = 'http' or type = 'keyword') AND http_body_encoding IS NULL; await knex.schema.table("monitor", function (table) { table.string("http_body_encoding", 25); }); await knex("monitor") .where(function () { this.where("type", "http").orWhere("type", "keyword"); }) .whereNull("http_body_encoding") .update({ http_body_encoding: "json", }); // 2023-06-30-1354-add-description-monitor.js // ALTER TABLE monitor ADD description TEXT default null; await knex.schema.table("monitor", function (table) { table.text("description").defaultTo(null); }); // 2023-06-30-1357-api-key-table.js /* CREATE TABLE [api_key] ( [id] INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, [key] VARCHAR(255) NOT NULL, [name] VARCHAR(255) NOT NULL, [user_id] INTEGER NOT NULL, [created_date] DATETIME DEFAULT (DATETIME('now')) NOT NULL, [active] BOOLEAN DEFAULT 1 NOT NULL, [expires] DATETIME DEFAULT NULL, CONSTRAINT FK_user FOREIGN KEY ([user_id]) REFERENCES [user]([id]) ON DELETE CASCADE ON UPDATE CASCADE ); */ await knex.schema.createTable("api_key", function (table) { table.increments("id").primary(); table.string("key", 255).notNullable(); table.string("name", 255).notNullable(); table .integer("user_id") .unsigned() .notNullable() .references("id") .inTable("user") .onDelete("CASCADE") .onUpdate("CASCADE"); table.dateTime("created_date").defaultTo(knex.fn.now()).notNullable(); table.boolean("active").defaultTo(1).notNullable(); table.dateTime("expires").defaultTo(null); }); // 2023-06-30-1400-monitor-tls.js /* ALTER TABLE monitor ADD tls_ca TEXT default null; ALTER TABLE monitor ADD tls_cert TEXT default null; ALTER TABLE monitor ADD tls_key TEXT default null; */ await knex.schema.table("monitor", function (table) { table.text("tls_ca").defaultTo(null); table.text("tls_cert").defaultTo(null); table.text("tls_key").defaultTo(null); }); // 2023-06-30-1401-maintenance-cron.js /* -- 999 characters. https://stackoverflow.com/questions/46134830/maximum-length-for-cron-job DROP TABLE maintenance_timeslot; ALTER TABLE maintenance ADD cron TEXT; ALTER TABLE maintenance ADD timezone VARCHAR(255); ALTER TABLE maintenance ADD duration INTEGER; */ await knex.schema.dropTableIfExists("maintenance_timeslot").table("maintenance", function (table) { table.text("cron"); table.string("timezone", 255); table.integer("duration"); }); // 2023-06-30-1413-add-parent-monitor.js. /* ALTER TABLE monitor ADD parent INTEGER REFERENCES [monitor] ([id]) ON DELETE SET NULL ON UPDATE CASCADE; */ await knex.schema.table("monitor", function (table) { table.integer("parent").unsigned().references("id").inTable("monitor").onDelete("SET NULL").onUpdate("CASCADE"); }); /* patch-add-invert-keyword.sql ALTER TABLE monitor ADD invert_keyword BOOLEAN default 0 not null; */ await knex.schema.table("monitor", function (table) { table.boolean("invert_keyword").defaultTo(0).notNullable(); }); /* patch-added-json-query.sql ALTER TABLE monitor ADD json_path TEXT; ALTER TABLE monitor ADD expected_value VARCHAR(255); */ await knex.schema.table("monitor", function (table) { table.text("json_path"); table.string("expected_value", 255); }); /* patch-added-kafka-producer.sql ALTER TABLE monitor ADD kafka_producer_topic VARCHAR(255); ALTER TABLE monitor ADD kafka_producer_brokers TEXT; ALTER TABLE monitor ADD kafka_producer_ssl INTEGER; ALTER TABLE monitor ADD kafka_producer_allow_auto_topic_creation VARCHAR(255); ALTER TABLE monitor ADD kafka_producer_sasl_options TEXT; ALTER TABLE monitor ADD kafka_producer_message TEXT; */ await knex.schema.table("monitor", function (table) { table.string("kafka_producer_topic", 255); table.text("kafka_producer_brokers"); // patch-fix-kafka-producer-booleans.sql table.boolean("kafka_producer_ssl").defaultTo(0).notNullable(); table.boolean("kafka_producer_allow_auto_topic_creation").defaultTo(0).notNullable(); table.text("kafka_producer_sasl_options"); table.text("kafka_producer_message"); }); /* patch-add-certificate-expiry-status-page.sql ALTER TABLE status_page ADD show_certificate_expiry BOOLEAN default 0 NOT NULL; */ await knex.schema.table("status_page", function (table) { table.boolean("show_certificate_expiry").defaultTo(0).notNullable(); }); /* patch-monitor-oauth-cc.sql ALTER TABLE monitor ADD oauth_client_id TEXT default null; ALTER TABLE monitor ADD oauth_client_secret TEXT default null; ALTER TABLE monitor ADD oauth_token_url TEXT default null; ALTER TABLE monitor ADD oauth_scopes TEXT default null; ALTER TABLE monitor ADD oauth_auth_method TEXT default null; */ await knex.schema.table("monitor", function (table) { table.text("oauth_client_id").defaultTo(null); table.text("oauth_client_secret").defaultTo(null); table.text("oauth_token_url").defaultTo(null); table.text("oauth_scopes").defaultTo(null); table.text("oauth_auth_method").defaultTo(null); }); /* patch-add-timeout-monitor.sql ALTER TABLE monitor ADD timeout DOUBLE default 0 not null; */ await knex.schema.table("monitor", function (table) { table.double("timeout").defaultTo(0).notNullable(); }); /* patch-add-gamedig-given-port.sql ALTER TABLE monitor ADD gamedig_given_port_only BOOLEAN default 1 not null; */ await knex.schema.table("monitor", function (table) { table.boolean("gamedig_given_port_only").defaultTo(1).notNullable(); }); log.info("mariadb", "Created basic tables for MariaDB"); } module.exports = { createTables, }; ================================================ FILE: db/knex_migrations/2023-08-16-0000-create-uptime.js ================================================ exports.up = function (knex) { return knex.schema .createTable("stat_minutely", function (table) { table.increments("id"); table.comment("This table contains the minutely aggregate statistics for each monitor"); table .integer("monitor_id") .unsigned() .notNullable() .references("id") .inTable("monitor") .onDelete("CASCADE") .onUpdate("CASCADE"); table.integer("timestamp").notNullable().comment("Unix timestamp rounded down to the nearest minute"); table.float("ping").notNullable().comment("Average ping in milliseconds"); table.smallint("up").notNullable(); table.smallint("down").notNullable(); table.unique(["monitor_id", "timestamp"]); }) .createTable("stat_daily", function (table) { table.increments("id"); table.comment("This table contains the daily aggregate statistics for each monitor"); table .integer("monitor_id") .unsigned() .notNullable() .references("id") .inTable("monitor") .onDelete("CASCADE") .onUpdate("CASCADE"); table.integer("timestamp").notNullable().comment("Unix timestamp rounded down to the nearest day"); table.float("ping").notNullable().comment("Average ping in milliseconds"); table.smallint("up").notNullable(); table.smallint("down").notNullable(); table.unique(["monitor_id", "timestamp"]); }); }; exports.down = function (knex) { return knex.schema.dropTable("stat_minutely").dropTable("stat_daily"); }; ================================================ FILE: db/knex_migrations/2023-08-18-0301-heartbeat.js ================================================ exports.up = function (knex) { // Add new column heartbeat.end_time return knex.schema.alterTable("heartbeat", function (table) { table.datetime("end_time").nullable().defaultTo(null); }); }; exports.down = function (knex) { // Rename heartbeat.start_time to heartbeat.time return knex.schema.alterTable("heartbeat", function (table) { table.dropColumn("end_time"); }); }; ================================================ FILE: db/knex_migrations/2023-09-29-0000-heartbeat-retires.js ================================================ exports.up = function (knex) { // Add new column heartbeat.retries return knex.schema.alterTable("heartbeat", function (table) { table.integer("retries").notNullable().defaultTo(0); }); }; exports.down = function (knex) { return knex.schema.alterTable("heartbeat", function (table) { table.dropColumn("retries"); }); }; ================================================ FILE: db/knex_migrations/2023-10-08-0000-mqtt-query.js ================================================ exports.up = function (knex) { // Add new column monitor.mqtt_check_type return knex.schema.alterTable("monitor", function (table) { table.string("mqtt_check_type", 255).notNullable().defaultTo("keyword"); }); }; exports.down = function (knex) { // Drop column monitor.mqtt_check_type return knex.schema.alterTable("monitor", function (table) { table.dropColumn("mqtt_check_type"); }); }; ================================================ FILE: db/knex_migrations/2023-10-11-1915-push-token-to-32.js ================================================ exports.up = function (knex) { // update monitor.push_token to 32 length return knex.schema.alterTable("monitor", function (table) { table.string("push_token", 32).alter(); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.string("push_token", 20).alter(); }); }; ================================================ FILE: db/knex_migrations/2023-10-16-0000-create-remote-browsers.js ================================================ exports.up = function (knex) { return knex.schema .createTable("remote_browser", function (table) { table.increments("id"); table.string("name", 255).notNullable(); table.string("url", 255).notNullable(); table.integer("user_id").unsigned(); }) .alterTable("monitor", function (table) { // Add new column monitor.remote_browser table .integer("remote_browser") .nullable() .defaultTo(null) .unsigned() .index() .references("id") .inTable("remote_browser"); }); }; exports.down = function (knex) { return knex.schema.dropTable("remote_browser").alterTable("monitor", function (table) { table.dropColumn("remote_browser"); }); }; ================================================ FILE: db/knex_migrations/2023-12-20-0000-alter-status-page.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("status_page", function (table) { table.integer("auto_refresh_interval").defaultTo(300).unsigned(); }); }; exports.down = function (knex) { return knex.schema.alterTable("status_page", function (table) { table.dropColumn("auto_refresh_interval"); }); }; ================================================ FILE: db/knex_migrations/2023-12-21-0000-stat-ping-min-max.js ================================================ exports.up = function (knex) { return knex.schema .alterTable("stat_daily", function (table) { table .float("ping_min") .notNullable() .defaultTo(0) .comment("Minimum ping during this period in milliseconds"); table .float("ping_max") .notNullable() .defaultTo(0) .comment("Maximum ping during this period in milliseconds"); }) .alterTable("stat_minutely", function (table) { table .float("ping_min") .notNullable() .defaultTo(0) .comment("Minimum ping during this period in milliseconds"); table .float("ping_max") .notNullable() .defaultTo(0) .comment("Maximum ping during this period in milliseconds"); }); }; exports.down = function (knex) { return knex.schema .alterTable("stat_daily", function (table) { table.dropColumn("ping_min"); table.dropColumn("ping_max"); }) .alterTable("stat_minutely", function (table) { table.dropColumn("ping_min"); table.dropColumn("ping_max"); }); }; ================================================ FILE: db/knex_migrations/2023-12-22-0000-hourly-uptime.js ================================================ exports.up = function (knex) { return knex.schema.createTable("stat_hourly", function (table) { table.increments("id"); table.comment("This table contains the hourly aggregate statistics for each monitor"); table .integer("monitor_id") .unsigned() .notNullable() .references("id") .inTable("monitor") .onDelete("CASCADE") .onUpdate("CASCADE"); table.integer("timestamp").notNullable().comment("Unix timestamp rounded down to the nearest hour"); table.float("ping").notNullable().comment("Average ping in milliseconds"); table.float("ping_min").notNullable().defaultTo(0).comment("Minimum ping during this period in milliseconds"); table.float("ping_max").notNullable().defaultTo(0).comment("Maximum ping during this period in milliseconds"); table.smallint("up").notNullable(); table.smallint("down").notNullable(); table.unique(["monitor_id", "timestamp"]); }); }; exports.down = function (knex) { return knex.schema.dropTable("stat_hourly"); }; ================================================ FILE: db/knex_migrations/2024-01-22-0000-stats-extras.js ================================================ exports.up = function (knex) { return knex.schema .alterTable("stat_daily", function (table) { table.text("extras").defaultTo(null).comment("Extra statistics during this time period"); }) .alterTable("stat_minutely", function (table) { table.text("extras").defaultTo(null).comment("Extra statistics during this time period"); }) .alterTable("stat_hourly", function (table) { table.text("extras").defaultTo(null).comment("Extra statistics during this time period"); }); }; exports.down = function (knex) { return knex.schema .alterTable("stat_daily", function (table) { table.dropColumn("extras"); }) .alterTable("stat_minutely", function (table) { table.dropColumn("extras"); }) .alterTable("stat_hourly", function (table) { table.dropColumn("extras"); }); }; ================================================ FILE: db/knex_migrations/2024-04-26-0000-snmp-monitor.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.string("snmp_oid").defaultTo(null); table.enum("snmp_version", ["1", "2c", "3"]).defaultTo("2c"); table.string("json_path_operator").defaultTo(null); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("snmp_oid"); table.dropColumn("snmp_version"); table.dropColumn("json_path_operator"); }); }; ================================================ FILE: db/knex_migrations/2024-08-24-000-add-cache-bust.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.boolean("cache_bust").notNullable().defaultTo(false); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("cache_bust"); }); }; ================================================ FILE: db/knex_migrations/2024-08-24-0000-conditions.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.text("conditions").notNullable().defaultTo("[]"); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("conditions"); }); }; ================================================ FILE: db/knex_migrations/2024-10-1315-rabbitmq-monitor.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.text("rabbitmq_nodes"); table.string("rabbitmq_username"); table.string("rabbitmq_password"); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("rabbitmq_nodes"); table.dropColumn("rabbitmq_username"); table.dropColumn("rabbitmq_password"); }); }; ================================================ FILE: db/knex_migrations/2024-10-31-0000-fix-snmp-monitor.js ================================================ exports.up = function (knex) { return knex("monitor").whereNull("json_path_operator").update("json_path_operator", "=="); }; exports.down = function (knex) { // changing the json_path_operator back to null for all "==" is not possible anymore // we have lost the context which fields have been set explicitely in >= v2.0 and which would need to be reverted }; ================================================ FILE: db/knex_migrations/2024-11-27-1927-fix-info-json-data-type.js ================================================ // Update info_json column to LONGTEXT mainly for MariaDB exports.up = function (knex) { return knex.schema.alterTable("monitor_tls_info", function (table) { table.text("info_json", "longtext").alter(); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor_tls_info", function (table) { table.text("info_json", "text").alter(); }); }; ================================================ FILE: db/knex_migrations/2025-01-01-0000-add-smtp.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.string("smtp_security").defaultTo(null); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("smtp_security"); }); }; ================================================ FILE: db/knex_migrations/2025-02-15-2312-add-wstest.js ================================================ // Add websocket ignore headers and websocket subprotocol exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.boolean("ws_ignore_sec_websocket_accept_header").notNullable().defaultTo(false); table.string("ws_subprotocol", 255).notNullable().defaultTo(""); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("ws_ignore_sec_websocket_accept_header"); table.dropColumn("ws_subprotocol"); }); }; ================================================ FILE: db/knex_migrations/2025-02-17-2142-generalize-analytics.js ================================================ // Udpate status_page table to generalize analytics fields exports.up = function (knex) { return knex.schema .alterTable("status_page", function (table) { table.renameColumn("google_analytics_tag_id", "analytics_id"); table.string("analytics_script_url"); table.enu("analytics_type", ["google", "umami", "plausible", "matomo"]).defaultTo(null); }) .then(() => { // After a succesful migration, add google as default for previous pages knex("status_page").whereNotNull("analytics_id").update({ analytics_type: "google", }); }); }; exports.down = function (knex) { return knex.schema.alterTable("status_page", function (table) { table.renameColumn("analytics_id", "google_analytics_tag_id"); table.dropColumn("analytics_script_url"); table.dropColumn("analytics_type"); }); }; ================================================ FILE: db/knex_migrations/2025-03-04-0000-ping-advanced-options.js ================================================ /* SQL: ALTER TABLE monitor ADD ping_count INTEGER default 1 not null; ALTER TABLE monitor ADD ping_numeric BOOLEAN default true not null; ALTER TABLE monitor ADD ping_per_request_timeout INTEGER default 2 not null; */ exports.up = function (knex) { // Add new columns to table monitor return knex.schema.alterTable("monitor", function (table) { table.integer("ping_count").defaultTo(1).notNullable(); table.boolean("ping_numeric").defaultTo(true).notNullable(); table.integer("ping_per_request_timeout").defaultTo(2).notNullable(); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("ping_count"); table.dropColumn("ping_numeric"); table.dropColumn("ping_per_request_timeout"); }); }; ================================================ FILE: db/knex_migrations/2025-03-25-0127-fix-5721.js ================================================ // Fix #5721: Change proxy port column type to integer to support larger port numbers exports.up = function (knex) { return knex.schema.alterTable("proxy", function (table) { table.integer("port").alter(); }); }; exports.down = function (knex) { return knex.schema.alterTable("proxy", function (table) { table.smallint("port").alter(); }); }; ================================================ FILE: db/knex_migrations/2025-05-09-0000-add-custom-url.js ================================================ // Add column custom_url to monitor_group table exports.up = function (knex) { return knex.schema.alterTable("monitor_group", function (table) { table.text("custom_url", "text"); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor_group", function (table) { table.dropColumn("custom_url"); }); }; ================================================ FILE: db/knex_migrations/2025-06-03-0000-add-ip-family.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.boolean("ip_family").defaultTo(null); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("ip_family"); }); }; ================================================ FILE: db/knex_migrations/2025-06-11-0000-add-manual-monitor.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.string("manual_status").defaultTo(null); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("manual_status"); }); }; ================================================ FILE: db/knex_migrations/2025-06-13-0000-maintenance-add-last-start.js ================================================ // Add column last_start_date to maintenance table exports.up = async function (knex) { await knex.schema.alterTable("maintenance", function (table) { table.datetime("last_start_date"); }); // Perform migration for recurring-interval strategy const recurringMaintenances = await knex("maintenance") .where({ strategy: "recurring-interval", cron: "* * * * *", }) .select("id", "start_time"); // eslint-disable-next-line camelcase const maintenanceUpdates = recurringMaintenances.map(async ({ start_time, id }) => { // eslint-disable-next-line camelcase const [hourStr, minuteStr] = start_time.split(":"); const hour = parseInt(hourStr, 10); const minute = parseInt(minuteStr, 10); const cron = `${minute} ${hour} * * *`; await knex("maintenance").where({ id }).update({ cron }); }); await Promise.all(maintenanceUpdates); }; exports.down = function (knex) { return knex.schema.alterTable("maintenance", function (table) { table.dropColumn("last_start_date"); }); }; ================================================ FILE: db/knex_migrations/2025-06-15-0001-manual-monitor-fix.js ================================================ // Fix: Change manual_status column type to smallint exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.smallint("manual_status").alter(); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.string("manual_status").alter(); }); }; ================================================ FILE: db/knex_migrations/2025-06-24-0000-add-audience-to-oauth.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.string("oauth_audience").nullable().defaultTo(null); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.string("oauth_audience").alter(); }); }; ================================================ FILE: db/knex_migrations/2025-07-17-0000-mqtt-websocket-path.js ================================================ exports.up = function (knex) { // Add new column monitor.mqtt_websocket_path return knex.schema.alterTable("monitor", function (table) { table.string("mqtt_websocket_path", 255).nullable(); }); }; exports.down = function (knex) { // Drop column monitor.mqtt_websocket_path return knex.schema.alterTable("monitor", function (table) { table.dropColumn("mqtt_websocket_path"); }); }; ================================================ FILE: db/knex_migrations/2025-09-02-0000-add-domain-expiry.js ================================================ exports.up = function (knex) { return knex.schema .alterTable("monitor", function (table) { table.boolean("domain_expiry_notification").defaultTo(1); }) .createTable("domain_expiry", (table) => { table.increments("id"); table.datetime("last_check"); // Use VARCHAR(255) for MySQL/MariaDB compatibility with unique constraint // Maximum domain name length is 253 characters (255 octets on the wire) table.string("domain", 255).unique().notNullable(); table.datetime("expiry"); table.integer("last_expiry_notification_sent").defaultTo(null); }); }; exports.down = function (knex) { return knex.schema .alterTable("monitor", function (table) { table.boolean("domain_expiry_notification").alter(); }) .dropTable("domain_expiry"); }; ================================================ FILE: db/knex_migrations/2025-10-14-0000-add-ip-family-fix.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { // Fix ip_family, change to varchar instead of boolean // possible values are "ipv4" and "ipv6" table.string("ip_family", 4).defaultTo(null).alter(); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { // Rollback to boolean table.boolean("ip_family").defaultTo(null).alter(); }); }; ================================================ FILE: db/knex_migrations/2025-10-15-0000-stat-table-fix.js ================================================ // Fix for #4315. Logically, setting it to 0 ping may not be correct, but it is better than throwing errors exports.up = function (knex) { return knex.schema .alterTable("stat_daily", function (table) { table.integer("ping").defaultTo(0).alter(); }) .alterTable("stat_hourly", function (table) { table.integer("ping").defaultTo(0).alter(); }) .alterTable("stat_minutely", function (table) { table.integer("ping").defaultTo(0).alter(); }); }; exports.down = function (knex) { return knex.schema .alterTable("stat_daily", function (table) { table.integer("ping").alter(); }) .alterTable("stat_hourly", function (table) { table.integer("ping").alter(); }) .alterTable("stat_minutely", function (table) { table.integer("ping").alter(); }); }; ================================================ FILE: db/knex_migrations/2025-10-15-0001-add-monitor-response-config.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.boolean("save_response").notNullable().defaultTo(false); table.boolean("save_error_response").notNullable().defaultTo(true); table.integer("response_max_length").notNullable().defaultTo(1024); // Default 1KB }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("save_response"); table.dropColumn("save_error_response"); table.dropColumn("response_max_length"); }); }; ================================================ FILE: db/knex_migrations/2025-10-15-0002-add-response-to-heartbeat.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("heartbeat", function (table) { table.text("response").nullable().defaultTo(null); }); }; exports.down = function (knex) { return knex.schema.alterTable("heartbeat", function (table) { table.dropColumn("response"); }); }; ================================================ FILE: db/knex_migrations/2025-10-24-0000-show-only-last-heartbeat.js ================================================ exports.up = function (knex) { // Add new column status_page.show_only_last_heartbeat return knex.schema.alterTable("status_page", function (table) { table.boolean("show_only_last_heartbeat").notNullable().defaultTo(false); }); }; exports.down = function (knex) { // Drop column status_page.show_only_last_heartbeat return knex.schema.alterTable("status_page", function (table) { table.dropColumn("show_only_last_heartbeat"); }); }; ================================================ FILE: db/knex_migrations/2025-12-09-0000-add-system-service-monitor.js ================================================ /** * @param {import("knex").Knex} knex The Knex.js instance for database interaction. * @returns {Promise} */ exports.up = async (knex) => { await knex.schema.alterTable("monitor", (table) => { table.string("system_service_name"); }); }; /** * @param {import("knex").Knex} knex The Knex.js instance for database interaction. * @returns {Promise} */ exports.down = async (knex) => { await knex.schema.alterTable("monitor", (table) => { table.dropColumn("system_service_name"); }); }; ================================================ FILE: db/knex_migrations/2025-12-17-0000-add-globalping-monitor.js ================================================ exports.up = function (knex) { // Add new columns return knex.schema.alterTable("monitor", function (table) { table.string("subtype", 10).nullable(); table.string("location", 255).nullable(); table.string("protocol", 20).nullable(); }); }; exports.down = function (knex) { // Drop columns return knex.schema.alterTable("monitor", function (table) { table.dropColumn("subtype"); table.dropColumn("location"); table.dropColumn("protocol"); }); }; ================================================ FILE: db/knex_migrations/2025-12-22-0121-optimize-important-indexes.js ================================================ exports.up = async function (knex) { const isSQLite = knex.client.dialect === "sqlite3"; if (isSQLite) { // For SQLite: Use partial indexes with WHERE important = 1 // Drop existing indexes using IF EXISTS await knex.raw("DROP INDEX IF EXISTS monitor_important_time_index"); await knex.raw("DROP INDEX IF EXISTS heartbeat_important_index"); // Create partial indexes with predicate await knex.schema.alterTable("heartbeat", function (table) { table.index(["monitor_id", "time"], "monitor_important_time_index", { predicate: knex.whereRaw("important = 1"), }); table.index(["important"], "heartbeat_important_index", { predicate: knex.whereRaw("important = 1"), }); }); } // For MariaDB/MySQL: No changes (partial indexes not supported) }; exports.down = async function (knex) { const isSQLite = knex.client.dialect === "sqlite3"; if (isSQLite) { // Restore original indexes await knex.raw("DROP INDEX IF EXISTS monitor_important_time_index"); await knex.raw("DROP INDEX IF EXISTS heartbeat_important_index"); await knex.schema.alterTable("heartbeat", function (table) { table.index(["monitor_id", "important", "time"], "monitor_important_time_index"); table.index(["important"]); }); } // For MariaDB/MySQL: No changes }; ================================================ FILE: db/knex_migrations/2025-12-29-0000-remove-line-notify.js ================================================ exports.up = async function (knex) { const notifications = await knex("notification").select("id", "config"); const lineNotifyIDs = []; for (const { id, config } of notifications) { try { const parsedConfig = JSON.parse(config || "{}"); const type = typeof parsedConfig.type === "string" ? parsedConfig.type.toLowerCase() : ""; if (type === "linenotify" || type === "line-notify") { lineNotifyIDs.push(id); } } catch (error) { // Ignore invalid JSON blobs here; they are handled elsewhere in the app. } } if (lineNotifyIDs.length === 0) { return; } await knex.transaction(async (trx) => { await trx("monitor_notification").whereIn("notification_id", lineNotifyIDs).del(); await trx("notification").whereIn("id", lineNotifyIDs).del(); }); }; exports.down = async function () { // Removal of LINE Notify configs is not reversible. }; ================================================ FILE: db/knex_migrations/2025-12-31-2143-add-snmp-v3-username.js ================================================ exports.up = async function (knex) { await knex.schema.alterTable("monitor", (table) => { table.string("snmp_v3_username", 255); }); }; exports.down = async function (knex) { await knex.schema.alterTable("monitor", (table) => { table.dropColumn("snmp_v3_username"); }); }; ================================================ FILE: db/knex_migrations/2026-01-02-0551-dns-last-result-to-text.js ================================================ // Change dns_last_result column from VARCHAR(255) to TEXT to handle longer DNS TXT records exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.text("dns_last_result").alter(); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.string("dns_last_result", 255).alter(); }); }; ================================================ FILE: db/knex_migrations/2026-01-02-0713-gamedig-v4-to-v5.js ================================================ // Migration to update monitor.game from GameDig v4 to v5 game IDs // Reference: https://github.com/gamedig/node-gamedig/blob/master/MIGRATE_IDS.md // Lookup table mapping v4 game IDs to v5 game IDs const gameDig4to5IdMap = { americasarmypg: "aapg", "7d2d": "sdtd", as: "actionsource", ageofchivalry: "aoc", arkse: "ase", arcasimracing: "asr08", arma: "aaa", arma2oa: "a2oa", armacwa: "acwa", armar: "armaresistance", armare: "armareforger", armagetron: "armagetronadvanced", bat1944: "battalion1944", bf1942: "battlefield1942", bfv: "battlefieldvietnam", bf2: "battlefield2", bf2142: "battlefield2142", bfbc2: "bbc2", bf3: "battlefield3", bf4: "battlefield4", bfh: "battlefieldhardline", bd: "basedefense", bs: "bladesymphony", buildandshoot: "bas", cod4: "cod4mw", callofjuarez: "coj", chivalry: "cmw", commandos3: "c3db", cacrenegade: "cacr", contactjack: "contractjack", cs15: "counterstrike15", cs16: "counterstrike16", cs2: "counterstrike2", crossracing: "crce", darkesthour: "dhe4445", daysofwar: "dow", deadlydozenpt: "ddpt", dh2005: "deerhunter2005", dinodday: "ddd", dirttrackracing2: "dtr2", dmc: "deathmatchclassic", dnl: "dal", drakan: "dootf", dys: "dystopia", em: "empiresmod", empyrion: "egs", f12002: "formulaone2002", flashpointresistance: "ofr", fivem: "gta5f", forrest: "theforrest", graw: "tcgraw", graw2: "tcgraw2", giantscitizenkabuto: "gck", ges: "goldeneyesource", gore: "gus", hldm: "hld", hldms: "hlds", hlopfor: "hlof", hl2dm: "hl2d", hidden: "thehidden", had2: "hiddendangerous2", igi2: "i2cs", il2: "il2sturmovik", insurgencymic: "imic", isle: "theisle", jamesbondnightfire: "jb007n", jc2mp: "jc2m", jc3mp: "jc3m", kingpin: "kloc", kisspc: "kpctnc", kspdmp: "kspd", kzmod: "kreedzclimbing", left4dead: "l4d", left4dead2: "l4d2", m2mp: "m2m", mohsh: "mohaas", mohbt: "mohaab", mohab: "moha", moh2010: "moh", mohwf: "mohw", minecraftbe: "mbe", mtavc: "gtavcmta", mtasa: "gtasamta", ns: "naturalselection", ns2: "naturalselection2", nwn: "neverwinternights", nwn2: "neverwinternights2", nolf: "tonolf", nolf2: "nolf2asihw", pvkii: "pvak2", ps: "postscriptum", primalcarnage: "pce", pc: "projectcars", pc2: "projectcars2", prbf2: "prb2", przomboid: "projectzomboid", quake1: "quake", quake3: "q3a", ragdollkungfu: "rdkf", r6: "rainbowsix", r6roguespear: "rs2rs", r6ravenshield: "rs3rs", redorchestraost: "roo4145", redm: "rdr2r", riseofnations: "ron", rs2: "rs2v", samp: "gtasam", saomp: "gtasao", savage2: "s2ats", ss: "serioussam", ss2: "serioussam2", ship: "theship", sinep: "sinepisodes", sonsoftheforest: "sotf", swbf: "swb", swbf2: "swb2", swjk: "swjkja", swjk2: "swjk2jo", takeonhelicopters: "toh", tf2: "teamfortress2", terraria: "terrariatshock", tribes1: "t1s", ut: "unrealtournament", ut2003: "unrealtournament2003", ut2004: "unrealtournament2004", ut3: "unrealtournament3", v8supercar: "v8sc", vcmp: "vcm", vs: "vampireslayer", wheeloftime: "wot", wolfenstein2009: "wolfenstein", wolfensteinet: "wet", wurm: "wurmunlimited", }; /** * Migrate game IDs from v4 to v5 * @param {import("knex").Knex} knex - Knex instance * @returns {Promise} */ exports.up = async function (knex) { await knex.transaction(async (trx) => { // Get all monitors that use the gamedig type const monitors = await trx("monitor").select("id", "game").where("type", "gamedig").whereNotNull("game"); // Update each monitor with the new game ID if it needs migration for (const monitor of monitors) { const oldGameId = monitor.game; const newGameId = gameDig4to5IdMap[oldGameId]; if (newGameId) { await trx("monitor").where("id", monitor.id).update({ game: newGameId }); } } }); }; /** * Revert game IDs from v5 back to v4 * @param {import("knex").Knex} knex - Knex instance * @returns {Promise} */ exports.down = async function (knex) { // Create reverse mapping from the same LUT const gameDig5to4IdMap = Object.fromEntries(Object.entries(gameDig4to5IdMap).map(([v4, v5]) => [v5, v4])); await knex.transaction(async (trx) => { // Get all monitors that use the gamedig type const monitors = await trx("monitor").select("id", "game").where("type", "gamedig").whereNotNull("game"); // Revert each monitor back to the old game ID if it was migrated for (const monitor of monitors) { const newGameId = monitor.game; const oldGameId = gameDig5to4IdMap[newGameId]; if (oldGameId) { await trx("monitor").where("id", monitor.id).update({ game: oldGameId }); } } }); }; ================================================ FILE: db/knex_migrations/2026-01-05-0000-add-rss-title.js ================================================ exports.up = async function (knex) { await knex.schema.alterTable("status_page", function (table) { table.string("rss_title", 255); }); }; exports.down = function (knex) { return knex.schema.alterTable("status_page", function (table) { table.dropColumn("rss_title"); }); }; ================================================ FILE: db/knex_migrations/2026-01-05-0000-add-tls-monitor.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.string("expected_tls_alert", 50).defaultTo(null); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("expected_tls_alert"); }); }; ================================================ FILE: db/knex_migrations/2026-01-06-0000-fix-domain-expiry-column-type.js ================================================ // Ensure domain column is VARCHAR(255) across all database types. // This migration ensures MySQL, SQLite, and MariaDB have consistent column type, // even if a user installed 2.1.0-beta.0 or 2.1.0-beta.1 which had TEXT type for this column. // Maximum domain name length is 253 characters (255 octets on the wire). // Note: The unique constraint is already present from the original migration. exports.up = function (knex) { return knex.schema.alterTable("domain_expiry", function (table) { table.string("domain", 255).notNullable().alter(); }); }; exports.down = function (knex) { // No rollback needed - keeping VARCHAR(255) is the correct state }; ================================================ FILE: db/knex_migrations/2026-01-10-0000-convert-float-precision.js ================================================ exports.up = function (knex) { return knex.schema .alterTable("heartbeat", function (table) { table.bigInteger("ping").alter(); }) .alterTable("stat_minutely", function (table) { table.float("ping", 20, 2).notNullable().alter(); table.float("ping_min", 20, 2).notNullable().defaultTo(0).alter(); table.float("ping_max", 20, 2).notNullable().defaultTo(0).alter(); }) .alterTable("stat_daily", function (table) { table.float("ping", 20, 2).notNullable().alter(); table.float("ping_min", 20, 2).notNullable().defaultTo(0).alter(); table.float("ping_max", 20, 2).notNullable().defaultTo(0).alter(); }) .alterTable("stat_hourly", function (table) { table.float("ping", 20, 2).notNullable().alter(); table.float("ping_min", 20, 2).notNullable().defaultTo(0).alter(); table.float("ping_max", 20, 2).notNullable().defaultTo(0).alter(); }); }; exports.down = function (knex) { return knex.schema .alterTable("heartbeat", function (table) { table.integer("ping").alter(); }) .alterTable("stat_minutely", function (table) { table.float("ping").notNullable().alter(); table.float("ping_min").notNullable().defaultTo(0).alter(); table.float("ping_max").notNullable().defaultTo(0).alter(); }) .alterTable("stat_daily", function (table) { table.float("ping").notNullable().alter(); table.float("ping_min").notNullable().defaultTo(0).alter(); table.float("ping_max").notNullable().defaultTo(0).alter(); }) .alterTable("stat_hourly", function (table) { table.float("ping").notNullable().alter(); table.float("ping_min").notNullable().defaultTo(0).alter(); table.float("ping_max").notNullable().defaultTo(0).alter(); }); }; ================================================ FILE: db/knex_migrations/2026-01-15-0000-add-json-query-retry-only-status-code.js ================================================ exports.up = function (knex) { // Add new column to table monitor for json-query retry behavior return knex.schema.alterTable("monitor", function (table) { table.boolean("retry_only_on_status_code_failure").defaultTo(false).notNullable(); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("retry_only_on_status_code_failure"); }); }; ================================================ FILE: db/knex_migrations/2026-01-16-0000-add-screenshot-delay.js ================================================ exports.up = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.integer("screenshot_delay").notNullable().unsigned().defaultTo(0); }); }; exports.down = function (knex) { return knex.schema.alterTable("monitor", function (table) { table.dropColumn("screenshot_delay"); }); }; ================================================ FILE: db/knex_migrations/2026-02-07-0000-disable-domain-expiry-unsupported-tlds.js ================================================ const { parse: parseTld } = require("tldts"); /* * TODO: * This migration file is scary, because the json file is dynamically updated. * Problem 1: Migration files should ideally be stateless. * Problem 2: This migration only runs once, what happens if rdp-dns.json is updated after this migration has run? * Have to investigate later. */ const rdapDnsData = require("../../extra/rdap-dns.json"); const TYPES_WITH_DOMAIN_EXPIRY_SUPPORT_VIA_FIELD = { http: "url", keyword: "url", "json-query": "url", "real-browser": "url", "websocket-upgrade": "url", port: "hostname", ping: "hostname", "grpc-keyword": "grpc_url", dns: "hostname", smtp: "hostname", snmp: "hostname", gamedig: "hostname", steam: "hostname", mqtt: "hostname", radius: "hostname", "tailscale-ping": "hostname", "sip-options": "hostname", }; /** * Build set of root TLDs that have RDAP support * @returns {Set} Set of supported root TLDs */ function getSupportedTlds() { const supported = new Set(); const services = rdapDnsData["services"] ?? []; for (const [tlds] of services) { for (const tld of tlds) { supported.add(tld); } } return supported; } /** * Check if a target URL/hostname has RDAP support * @param {string} target URL or hostname * @param {Set} supportedTlds Set of supported root TLDs * @returns {boolean} Whether the target's TLD has RDAP support */ function hasRdapSupport(target, supportedTlds) { if (!target || typeof target !== "string") { return false; } const tld = parseTld(target); if (!tld.publicSuffix || !tld.isIcann) { return false; } const rootTld = tld.publicSuffix.split(".").pop(); return supportedTlds.has(rootTld); } exports.up = async function (knex) { const supportedTlds = getSupportedTlds(); const monitors = await knex("monitor") .where("domain_expiry_notification", 1) .select("id", "type", "url", "hostname", "grpc_url"); const idsToDisable = []; for (const monitor of monitors) { const targetField = TYPES_WITH_DOMAIN_EXPIRY_SUPPORT_VIA_FIELD[monitor.type]; if (!targetField || !hasRdapSupport(monitor[targetField], supportedTlds)) { idsToDisable.push(monitor.id); } } if (idsToDisable.length > 0) { await knex("monitor").whereIn("id", idsToDisable).update("domain_expiry_notification", 0); } await knex.schema.alterTable("monitor", function (table) { table.boolean("domain_expiry_notification").defaultTo(0).alter(); }); }; exports.down = async function (knex) { await knex.schema.alterTable("monitor", function (table) { table.boolean("domain_expiry_notification").defaultTo(1).alter(); }); }; ================================================ FILE: db/knex_migrations/README.md ================================================ # Info https://knexjs.org/guide/migrations.html#knexfile-in-other-languages ## Basic rules - All tables must have a primary key named `id` - Filename format: `YYYY-MM-DD-HHMM-patch-name.js` - Avoid native SQL syntax, use knex methods, because Uptime Kuma supports SQLite and MariaDB. ## Template ```js exports.up = function (knex) {}; exports.down = function (knex) {}; // exports.config = { transaction: false }; ``` ## Example Filename: 2023-06-30-1348-create-user-and-product.js ```js exports.up = function (knex) { return knex.schema .createTable("user", function (table) { table.increments("id"); table.string("first_name", 255).notNullable(); table.string("last_name", 255).notNullable(); }) .createTable("product", function (table) { table.increments("id"); table.decimal("price").notNullable(); table.string("name", 1000).notNullable(); }) .then(() => { knex("products").insert([ { price: 10, name: "Apple" }, { price: 20, name: "Orange" }, ]); }); }; exports.down = function (knex) { return knex.schema.dropTable("product").dropTable("user"); }; ``` https://knexjs.org/guide/migrations.html#transactions-in-migrations ================================================ FILE: db/old_migrations/README.md ================================================ # Don't create a new migration file here Please go to ./db/knex_migrations/README.md ================================================ FILE: db/old_migrations/patch-2fa-invalidate-used-token.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE user ADD twofa_last_token VARCHAR(6); COMMIT; ================================================ FILE: db/old_migrations/patch-2fa.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE user ADD twofa_secret VARCHAR(64); ALTER TABLE user ADD twofa_status BOOLEAN default 0 NOT NULL; COMMIT; ================================================ FILE: db/old_migrations/patch-add-certificate-expiry-status-page.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE status_page ADD show_certificate_expiry BOOLEAN default 0 NOT NULL; COMMIT; ================================================ FILE: db/old_migrations/patch-add-clickable-status-page-link.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor_group ADD send_url BOOLEAN DEFAULT 0 NOT NULL; COMMIT; ================================================ FILE: db/old_migrations/patch-add-description-monitor.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD description TEXT default null; COMMIT; ================================================ FILE: db/old_migrations/patch-add-docker-columns.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; CREATE TABLE docker_host ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, user_id INT NOT NULL, docker_daemon VARCHAR(255), docker_type VARCHAR(255), name VARCHAR(255) ); ALTER TABLE monitor ADD docker_host INTEGER REFERENCES docker_host(id); ALTER TABLE monitor ADD docker_container VARCHAR(255); COMMIT; ================================================ FILE: db/old_migrations/patch-add-gamedig-given-port.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD gamedig_given_port_only BOOLEAN default 1 not null; COMMIT; ================================================ FILE: db/old_migrations/patch-add-gamedig-monitor.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD game VARCHAR(255); COMMIT; ================================================ FILE: db/old_migrations/patch-add-google-analytics-status-page-tag.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE status_page ADD google_analytics_tag_id VARCHAR; COMMIT; ================================================ FILE: db/old_migrations/patch-add-invert-keyword.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD invert_keyword BOOLEAN default 0 not null; COMMIT; ================================================ FILE: db/old_migrations/patch-add-other-auth.sql ================================================ BEGIN TRANSACTION; ALTER TABLE monitor ADD auth_method VARCHAR(250); ALTER TABLE monitor ADD auth_domain TEXT; ALTER TABLE monitor ADD auth_workstation TEXT; COMMIT; BEGIN TRANSACTION; UPDATE monitor SET auth_method = 'basic' WHERE basic_auth_user is not null; COMMIT; ================================================ FILE: db/old_migrations/patch-add-parent-monitor.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD parent INTEGER REFERENCES [monitor] ([id]) ON DELETE SET NULL ON UPDATE CASCADE; COMMIT; ================================================ FILE: db/old_migrations/patch-add-radius-monitor.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD radius_username VARCHAR(255); ALTER TABLE monitor ADD radius_password VARCHAR(255); ALTER TABLE monitor ADD radius_calling_station_id VARCHAR(50); ALTER TABLE monitor ADD radius_called_station_id VARCHAR(50); ALTER TABLE monitor ADD radius_secret VARCHAR(255); COMMIT; ================================================ FILE: db/old_migrations/patch-add-retry-interval-monitor.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD retry_interval INTEGER default 0 not null; COMMIT; ================================================ FILE: db/old_migrations/patch-add-sqlserver-monitor.sql ================================================ BEGIN TRANSACTION; ALTER TABLE monitor ADD database_connection_string VARCHAR(2000); ALTER TABLE monitor ADD database_query TEXT; COMMIT ================================================ FILE: db/old_migrations/patch-add-timeout-monitor.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD timeout DOUBLE default 0 not null; COMMIT; ================================================ FILE: db/old_migrations/patch-added-json-query.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD json_path TEXT; ALTER TABLE monitor ADD expected_value VARCHAR(255); COMMIT; ================================================ FILE: db/old_migrations/patch-added-kafka-producer.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD kafka_producer_topic VARCHAR(255); ALTER TABLE monitor ADD kafka_producer_brokers TEXT; ALTER TABLE monitor ADD kafka_producer_ssl INTEGER; ALTER TABLE monitor ADD kafka_producer_allow_auto_topic_creation VARCHAR(255); ALTER TABLE monitor ADD kafka_producer_sasl_options TEXT; ALTER TABLE monitor ADD kafka_producer_message TEXT; COMMIT; ================================================ FILE: db/old_migrations/patch-added-mqtt-monitor.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD mqtt_topic TEXT; ALTER TABLE monitor ADD mqtt_success_message VARCHAR(255); ALTER TABLE monitor ADD mqtt_username VARCHAR(255); ALTER TABLE monitor ADD mqtt_password VARCHAR(255); COMMIT; ================================================ FILE: db/old_migrations/patch-api-key-table.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; CREATE TABLE [api_key] ( [id] INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, [key] VARCHAR(255) NOT NULL, [name] VARCHAR(255) NOT NULL, [user_id] INTEGER NOT NULL, [created_date] DATETIME DEFAULT (DATETIME('now')) NOT NULL, [active] BOOLEAN DEFAULT 1 NOT NULL, [expires] DATETIME DEFAULT NULL, CONSTRAINT FK_user FOREIGN KEY ([user_id]) REFERENCES [user]([id]) ON DELETE CASCADE ON UPDATE CASCADE ); COMMIT; ================================================ FILE: db/old_migrations/patch-fix-kafka-producer-booleans.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; -- Rename COLUMNs to another one (suffixed by `_old`) ALTER TABLE monitor RENAME COLUMN kafka_producer_ssl TO kafka_producer_ssl_old; ALTER TABLE monitor RENAME COLUMN kafka_producer_allow_auto_topic_creation TO kafka_producer_allow_auto_topic_creation_old; -- Add correct COLUMNs ALTER TABLE monitor ADD COLUMN kafka_producer_ssl BOOLEAN default 0 NOT NULL; ALTER TABLE monitor ADD COLUMN kafka_producer_allow_auto_topic_creation BOOLEAN default 0 NOT NULL; -- These SQL is still not fully safe. See https://github.com/louislam/uptime-kuma/issues/4039. -- Set bring old values from `_old` COLUMNs to correct ones -- UPDATE monitor SET kafka_producer_allow_auto_topic_creation = monitor.kafka_producer_allow_auto_topic_creation_old -- WHERE monitor.kafka_producer_allow_auto_topic_creation_old IS NOT NULL; -- UPDATE monitor SET kafka_producer_ssl = monitor.kafka_producer_ssl_old -- WHERE monitor.kafka_producer_ssl_old IS NOT NULL; -- Remove old COLUMNs ALTER TABLE monitor DROP COLUMN kafka_producer_allow_auto_topic_creation_old; ALTER TABLE monitor DROP COLUMN kafka_producer_ssl_old; COMMIT; ================================================ FILE: db/old_migrations/patch-group-table.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; create table `group` ( id INTEGER not null constraint group_pk primary key autoincrement, name VARCHAR(255) not null, created_date DATETIME default (DATETIME('now')) not null, public BOOLEAN default 0 not null, active BOOLEAN default 1 not null, weight BOOLEAN NOT NULL DEFAULT 1000 ); CREATE TABLE [monitor_group] ( [id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, [monitor_id] INTEGER NOT NULL REFERENCES [monitor] ([id]) ON DELETE CASCADE ON UPDATE CASCADE, [group_id] INTEGER NOT NULL REFERENCES [group] ([id]) ON DELETE CASCADE ON UPDATE CASCADE, weight BOOLEAN NOT NULL DEFAULT 1000 ); CREATE INDEX [fk] ON [monitor_group] ( [monitor_id], [group_id]); COMMIT; ================================================ FILE: db/old_migrations/patch-grpc-monitor.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD grpc_url VARCHAR(255) default null; ALTER TABLE monitor ADD grpc_protobuf TEXT default null; ALTER TABLE monitor ADD grpc_body TEXT default null; ALTER TABLE monitor ADD grpc_metadata TEXT default null; ALTER TABLE monitor ADD grpc_method VARCHAR(255) default null; ALTER TABLE monitor ADD grpc_service_name VARCHAR(255) default null; ALTER TABLE monitor ADD grpc_enable_tls BOOLEAN default 0 not null; COMMIT; ================================================ FILE: db/old_migrations/patch-http-body-encoding.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD http_body_encoding VARCHAR(25); COMMIT; BEGIN TRANSACTION; UPDATE monitor SET http_body_encoding = 'json' WHERE (type = 'http' or type = 'keyword') AND http_body_encoding IS NULL; COMMIT; ================================================ FILE: db/old_migrations/patch-http-monitor-method-body-and-headers.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD method TEXT default 'GET' not null; ALTER TABLE monitor ADD body TEXT default null; ALTER TABLE monitor ADD headers TEXT default null; COMMIT; ================================================ FILE: db/old_migrations/patch-improve-performance.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; -- For sendHeartbeatList CREATE INDEX monitor_time_index ON heartbeat (monitor_id, time); -- For sendImportantHeartbeatList CREATE INDEX monitor_important_time_index ON heartbeat (monitor_id, important,time); COMMIT; ================================================ FILE: db/old_migrations/patch-incident-table.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; create table incident ( id INTEGER not null constraint incident_pk primary key autoincrement, title VARCHAR(255) not null, content TEXT not null, style VARCHAR(30) default 'warning' not null, created_date DATETIME default (DATETIME('now')) not null, last_updated_date DATETIME, pin BOOLEAN default 1 not null, active BOOLEAN default 1 not null ); COMMIT; ================================================ FILE: db/old_migrations/patch-maintenance-cron.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; DROP TABLE maintenance_timeslot; -- 999 characters. https://stackoverflow.com/questions/46134830/maximum-length-for-cron-job ALTER TABLE maintenance ADD cron TEXT; ALTER TABLE maintenance ADD timezone VARCHAR(255); ALTER TABLE maintenance ADD duration INTEGER; COMMIT; ================================================ FILE: db/old_migrations/patch-maintenance-table2.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; -- Just for someone who tested maintenance before (patch-maintenance-table.sql) DROP TABLE IF EXISTS maintenance_status_page; DROP TABLE IF EXISTS monitor_maintenance; DROP TABLE IF EXISTS maintenance; DROP TABLE IF EXISTS maintenance_timeslot; -- maintenance CREATE TABLE [maintenance] ( [id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, [title] VARCHAR(150) NOT NULL, [description] TEXT NOT NULL, [user_id] INTEGER REFERENCES [user]([id]) ON DELETE SET NULL ON UPDATE CASCADE, [active] BOOLEAN NOT NULL DEFAULT 1, [strategy] VARCHAR(50) NOT NULL DEFAULT 'single', [start_date] DATETIME, [end_date] DATETIME, [start_time] TIME, [end_time] TIME, [weekdays] VARCHAR2(250) DEFAULT '[]', [days_of_month] TEXT DEFAULT '[]', [interval_day] INTEGER ); CREATE INDEX [manual_active] ON [maintenance] ( [strategy], [active] ); CREATE INDEX [active] ON [maintenance] ([active]); CREATE INDEX [maintenance_user_id] ON [maintenance] ([user_id]); -- maintenance_status_page CREATE TABLE maintenance_status_page ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, status_page_id INTEGER NOT NULL, maintenance_id INTEGER NOT NULL, CONSTRAINT FK_maintenance FOREIGN KEY (maintenance_id) REFERENCES maintenance (id) ON DELETE CASCADE ON UPDATE CASCADE, CONSTRAINT FK_status_page FOREIGN KEY (status_page_id) REFERENCES status_page (id) ON DELETE CASCADE ON UPDATE CASCADE ); CREATE INDEX [status_page_id_index] ON [maintenance_status_page]([status_page_id]); CREATE INDEX [maintenance_id_index] ON [maintenance_status_page]([maintenance_id]); -- maintenance_timeslot CREATE TABLE [maintenance_timeslot] ( [id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, [maintenance_id] INTEGER NOT NULL CONSTRAINT [FK_maintenance] REFERENCES [maintenance]([id]) ON DELETE CASCADE ON UPDATE CASCADE, [start_date] DATETIME NOT NULL, [end_date] DATETIME, [generated_next] BOOLEAN DEFAULT 0 ); CREATE INDEX [maintenance_id] ON [maintenance_timeslot] ([maintenance_id] DESC); CREATE INDEX [active_timeslot_index] ON [maintenance_timeslot] ( [maintenance_id] DESC, [start_date] DESC, [end_date] DESC ); CREATE INDEX [generated_next_index] ON [maintenance_timeslot] ([generated_next]); -- monitor_maintenance CREATE TABLE monitor_maintenance ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, monitor_id INTEGER NOT NULL, maintenance_id INTEGER NOT NULL, CONSTRAINT FK_maintenance FOREIGN KEY (maintenance_id) REFERENCES maintenance (id) ON DELETE CASCADE ON UPDATE CASCADE, CONSTRAINT FK_monitor FOREIGN KEY (monitor_id) REFERENCES monitor (id) ON DELETE CASCADE ON UPDATE CASCADE ); CREATE INDEX [maintenance_id_index2] ON [monitor_maintenance]([maintenance_id]); CREATE INDEX [monitor_id_index] ON [monitor_maintenance]([monitor_id]); COMMIT; ================================================ FILE: db/old_migrations/patch-monitor-add-resend-interval.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD resend_interval INTEGER default 0 not null; ALTER TABLE heartbeat ADD down_count INTEGER default 0 not null; COMMIT; ================================================ FILE: db/old_migrations/patch-monitor-basic-auth.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD basic_auth_user TEXT default null; ALTER TABLE monitor ADD basic_auth_pass TEXT default null; COMMIT; ================================================ FILE: db/old_migrations/patch-monitor-expiry-notification.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD expiry_notification BOOLEAN default 1; COMMIT; ================================================ FILE: db/old_migrations/patch-monitor-oauth-cc.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD oauth_client_id TEXT default null; ALTER TABLE monitor ADD oauth_client_secret TEXT default null; ALTER TABLE monitor ADD oauth_token_url TEXT default null; ALTER TABLE monitor ADD oauth_scopes TEXT default null; ALTER TABLE monitor ADD oauth_auth_method TEXT default null; COMMIT; ================================================ FILE: db/old_migrations/patch-monitor-push_token.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD push_token VARCHAR(20) DEFAULT NULL; COMMIT; ================================================ FILE: db/old_migrations/patch-monitor-tls-info-add-fk.sql ================================================ BEGIN TRANSACTION; PRAGMA writable_schema = TRUE; UPDATE SQLITE_MASTER SET sql = replace(sql, 'monitor_id INTEGER NOT NULL', 'monitor_id INTEGER NOT NULL REFERENCES [monitor] ([id]) ON DELETE CASCADE ON UPDATE CASCADE' ) WHERE name = 'monitor_tls_info' AND type = 'table'; PRAGMA writable_schema = RESET; COMMIT; ================================================ FILE: db/old_migrations/patch-monitor-tls.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD tls_ca TEXT default null; ALTER TABLE monitor ADD tls_cert TEXT default null; ALTER TABLE monitor ADD tls_key TEXT default null; COMMIT; ================================================ FILE: db/old_migrations/patch-notification-config.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; -- SQLite: Change the data type of the column "config" from VARCHAR to TEXT ALTER TABLE notification RENAME COLUMN config TO config_old; ALTER TABLE notification ADD COLUMN config TEXT; UPDATE notification SET config = config_old; ALTER TABLE notification DROP COLUMN config_old; COMMIT; ================================================ FILE: db/old_migrations/patch-notification_sent_history.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; CREATE TABLE [notification_sent_history] ( [id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, [type] VARCHAR(50) NOT NULL, [monitor_id] INTEGER NOT NULL, [days] INTEGER NOT NULL, UNIQUE([type], [monitor_id], [days]) ); CREATE INDEX [good_index] ON [notification_sent_history] ( [type], [monitor_id], [days] ); COMMIT; ================================================ FILE: db/old_migrations/patch-ping-packet-size.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD packet_size INTEGER DEFAULT 56 NOT NULL; COMMIT; ================================================ FILE: db/old_migrations/patch-proxy.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; CREATE TABLE proxy ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, user_id INT NOT NULL, protocol VARCHAR(10) NOT NULL, host VARCHAR(255) NOT NULL, port SMALLINT NOT NULL, auth BOOLEAN NOT NULL, username VARCHAR(255) NULL, password VARCHAR(255) NULL, active BOOLEAN NOT NULL DEFAULT 1, 'default' BOOLEAN NOT NULL DEFAULT 0, created_date DATETIME DEFAULT (DATETIME('now')) NOT NULL ); ALTER TABLE monitor ADD COLUMN proxy_id INTEGER REFERENCES proxy(id); CREATE INDEX proxy_id ON monitor (proxy_id); CREATE INDEX proxy_user_id ON proxy (user_id); COMMIT; ================================================ FILE: db/old_migrations/patch-setting-value-type.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; -- Generated by Intellij IDEA create table setting_dg_tmp ( id INTEGER primary key autoincrement, key VARCHAR(200) not null unique, value TEXT, type VARCHAR(20) ); insert into setting_dg_tmp(id, key, value, type) select id, key, value, type from setting; drop table setting; alter table setting_dg_tmp rename to setting; COMMIT; ================================================ FILE: db/old_migrations/patch-status-page-footer-css.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE status_page ADD footer_text TEXT; ALTER TABLE status_page ADD custom_css TEXT; ALTER TABLE status_page ADD show_powered_by BOOLEAN NOT NULL DEFAULT 1; COMMIT; ================================================ FILE: db/old_migrations/patch-status-page.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; CREATE TABLE [status_page]( [id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, [slug] VARCHAR(255) NOT NULL UNIQUE, [title] VARCHAR(255) NOT NULL, [description] TEXT, [icon] VARCHAR(255) NOT NULL, [theme] VARCHAR(30) NOT NULL, [published] BOOLEAN NOT NULL DEFAULT 1, [search_engine_index] BOOLEAN NOT NULL DEFAULT 1, [show_tags] BOOLEAN NOT NULL DEFAULT 0, [password] VARCHAR, [created_date] DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, [modified_date] DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ); CREATE UNIQUE INDEX [slug] ON [status_page]([slug]); CREATE TABLE [status_page_cname]( [id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, [status_page_id] INTEGER NOT NULL REFERENCES [status_page]([id]) ON DELETE CASCADE ON UPDATE CASCADE, [domain] VARCHAR NOT NULL UNIQUE ); ALTER TABLE incident ADD status_page_id INTEGER; ALTER TABLE [group] ADD status_page_id INTEGER; COMMIT; ================================================ FILE: db/old_migrations/patch-timeout.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; UPDATE monitor SET timeout = (interval * 0.8) WHERE timeout IS NULL OR timeout <= 0; COMMIT; ================================================ FILE: db/old_migrations/patch1.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. -- Change Monitor.created_date from "TIMESTAMP" to "DATETIME" -- SQL Generated by Intellij Idea PRAGMA foreign_keys=off; BEGIN TRANSACTION; create table monitor_dg_tmp ( id INTEGER not null primary key autoincrement, name VARCHAR(150), active BOOLEAN default 1 not null, user_id INTEGER references user on update cascade on delete set null, interval INTEGER default 20 not null, url TEXT, type VARCHAR(20), weight INTEGER default 2000, hostname VARCHAR(255), port INTEGER, created_date DATETIME, keyword VARCHAR(255) ); insert into monitor_dg_tmp(id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword) select id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword from monitor; drop table monitor; alter table monitor_dg_tmp rename to monitor; create index user_id on monitor (user_id); COMMIT; PRAGMA foreign_keys=on; ================================================ FILE: db/old_migrations/patch10.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. CREATE TABLE tag ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, name VARCHAR(255) NOT NULL, color VARCHAR(255) NOT NULL, created_date DATETIME DEFAULT (DATETIME('now')) NOT NULL ); CREATE TABLE monitor_tag ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, monitor_id INTEGER NOT NULL, tag_id INTEGER NOT NULL, value TEXT, CONSTRAINT FK_tag FOREIGN KEY (tag_id) REFERENCES tag(id) ON DELETE CASCADE ON UPDATE CASCADE, CONSTRAINT FK_monitor FOREIGN KEY (monitor_id) REFERENCES monitor(id) ON DELETE CASCADE ON UPDATE CASCADE ); CREATE INDEX monitor_tag_monitor_id_index ON monitor_tag (monitor_id); CREATE INDEX monitor_tag_tag_id_index ON monitor_tag (tag_id); ================================================ FILE: db/old_migrations/patch2.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; CREATE TABLE monitor_tls_info ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, monitor_id INTEGER NOT NULL, info_json TEXT ); COMMIT; ================================================ FILE: db/old_migrations/patch3.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. -- Add maxretries column to monitor PRAGMA foreign_keys=off; BEGIN TRANSACTION; create table monitor_dg_tmp ( id INTEGER not null primary key autoincrement, name VARCHAR(150), active BOOLEAN default 1 not null, user_id INTEGER references user on update cascade on delete set null, interval INTEGER default 20 not null, url TEXT, type VARCHAR(20), weight INTEGER default 2000, hostname VARCHAR(255), port INTEGER, created_date DATETIME, keyword VARCHAR(255), maxretries INTEGER NOT NULL DEFAULT 0 ); insert into monitor_dg_tmp(id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword) select id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword from monitor; drop table monitor; alter table monitor_dg_tmp rename to monitor; create index user_id on monitor (user_id); COMMIT; PRAGMA foreign_keys=on; ================================================ FILE: db/old_migrations/patch4.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. -- OK.... serious wrong, missing maxretries column -- Developers should patch it manually if you have missing the maxretries column PRAGMA foreign_keys=off; BEGIN TRANSACTION; create table monitor_dg_tmp ( id INTEGER not null primary key autoincrement, name VARCHAR(150), active BOOLEAN default 1 not null, user_id INTEGER references user on update cascade on delete set null, interval INTEGER default 20 not null, url TEXT, type VARCHAR(20), weight INTEGER default 2000, hostname VARCHAR(255), port INTEGER, created_date DATETIME, keyword VARCHAR(255), maxretries INTEGER NOT NULL DEFAULT 0, ignore_tls BOOLEAN default 0 not null, upside_down BOOLEAN default 0 not null ); insert into monitor_dg_tmp(id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword, maxretries) select id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword, maxretries from monitor; drop table monitor; alter table monitor_dg_tmp rename to monitor; create index user_id on monitor (user_id); COMMIT; PRAGMA foreign_keys=on; ================================================ FILE: db/old_migrations/patch5.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. PRAGMA foreign_keys = off; BEGIN TRANSACTION; create table monitor_dg_tmp ( id INTEGER not null primary key autoincrement, name VARCHAR(150), active BOOLEAN default 1 not null, user_id INTEGER references user on update cascade on delete set null, interval INTEGER default 20 not null, url TEXT, type VARCHAR(20), weight INTEGER default 2000, hostname VARCHAR(255), port INTEGER, created_date DATETIME default (DATETIME('now')) not null, keyword VARCHAR(255), maxretries INTEGER NOT NULL DEFAULT 0, ignore_tls BOOLEAN default 0 not null, upside_down BOOLEAN default 0 not null ); insert into monitor_dg_tmp( id, name, active, user_id, interval, url, type, weight, hostname, port, keyword, maxretries, ignore_tls, upside_down ) select id, name, active, user_id, interval, url, type, weight, hostname, port, keyword, maxretries, ignore_tls, upside_down from monitor; drop table monitor; alter table monitor_dg_tmp rename to monitor; create index user_id on monitor (user_id); COMMIT; PRAGMA foreign_keys = on; ================================================ FILE: db/old_migrations/patch6.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. PRAGMA foreign_keys = off; BEGIN TRANSACTION; create table monitor_dg_tmp ( id INTEGER not null primary key autoincrement, name VARCHAR(150), active BOOLEAN default 1 not null, user_id INTEGER references user on update cascade on delete set null, interval INTEGER default 20 not null, url TEXT, type VARCHAR(20), weight INTEGER default 2000, hostname VARCHAR(255), port INTEGER, created_date DATETIME default (DATETIME('now')) not null, keyword VARCHAR(255), maxretries INTEGER NOT NULL DEFAULT 0, ignore_tls BOOLEAN default 0 not null, upside_down BOOLEAN default 0 not null, maxredirects INTEGER default 10 not null, accepted_statuscodes_json TEXT default '["200-299"]' not null ); insert into monitor_dg_tmp( id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword, maxretries, ignore_tls, upside_down ) select id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword, maxretries, ignore_tls, upside_down from monitor; drop table monitor; alter table monitor_dg_tmp rename to monitor; create index user_id on monitor (user_id); COMMIT; PRAGMA foreign_keys = on; ================================================ FILE: db/old_migrations/patch7.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD dns_resolve_type VARCHAR(5); ALTER TABLE monitor ADD dns_resolve_server VARCHAR(255); COMMIT; ================================================ FILE: db/old_migrations/patch8.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE monitor ADD dns_last_result VARCHAR(255); COMMIT; ================================================ FILE: db/old_migrations/patch9.sql ================================================ -- You should not modify if this have pushed to Github, unless it does serious wrong with the db. BEGIN TRANSACTION; ALTER TABLE notification ADD is_default BOOLEAN default 0 NOT NULL; COMMIT; ================================================ FILE: db/patch-monitor-tls-info-add-fk.sql ================================================ BEGIN TRANSACTION; PRAGMA writable_schema = TRUE; UPDATE SQLITE_MASTER SET sql = replace(sql, 'monitor_id INTEGER NOT NULL', 'monitor_id INTEGER NOT NULL REFERENCES [monitor] ([id]) ON DELETE CASCADE ON UPDATE CASCADE' ) WHERE name = 'monitor_tls_info' AND type = 'table'; PRAGMA writable_schema = RESET; COMMIT; ================================================ FILE: docker/builder-go.dockerfile ================================================ ############################################ # Build in Golang # Run npm run build-healthcheck-armv7 in the host first, another it will be super slow where it is building the armv7 healthcheck ############################################ FROM golang:1-buster WORKDIR /app ARG TARGETPLATFORM COPY ./extra/ ./extra/ ## Switch to archive.debian.org RUN sed -i '/^deb/s/^/#/' /etc/apt/sources.list \ && echo "deb http://archive.debian.org/debian buster main contrib non-free" | tee -a /etc/apt/sources.list \ && echo "deb http://archive.debian.org/debian-security buster/updates main contrib non-free" | tee -a /etc/apt/sources.list \ && echo "deb http://archive.debian.org/debian buster-updates main contrib non-free" | tee -a /etc/apt/sources.list # Compile healthcheck.go RUN apt update && \ apt --yes --no-install-recommends install curl && \ curl -sL https://deb.nodesource.com/setup_18.x | bash && \ apt --yes --no-install-recommends install nodejs && \ node ./extra/build-healthcheck.js $TARGETPLATFORM && \ apt --yes remove nodejs ================================================ FILE: docker/debian-base.dockerfile ================================================ # Download Apprise deb package FROM node:22-bookworm-slim AS download-apprise WORKDIR /app COPY ./extra/download-apprise.mjs ./download-apprise.mjs RUN apt update && \ apt --yes --no-install-recommends install curl && \ npm install cheerio semver && \ node ./download-apprise.mjs # Base Image (Slim) # If the image changed, the second stage image should be changed too FROM node:22-bookworm-slim AS base2-slim ARG TARGETPLATFORM # Specify --no-install-recommends to skip unused dependencies, make the base much smaller! # sqlite3 = for debugging # iputils-ping = for ping # util-linux = for setpriv (Should be dropped in 2.0.0?) # dumb-init = avoid zombie processes (#480) # curl = for debugging # ca-certificates = keep the cert up-to-date # sudo = for start service nscd with non-root user # nscd = for better DNS caching RUN apt update && \ apt --yes --no-install-recommends install \ sqlite3 \ ca-certificates \ iputils-ping \ util-linux \ dumb-init \ curl \ sudo \ nscd && \ rm -rf /var/lib/apt/lists/* && \ apt --yes autoremove # apprise = for notifications (Install from the deb package, as the stable one is too old) (workaround for #4867) # Switching to testing repo is no longer working, as the testing repo is not bookworm anymore. # python3-paho-mqtt (#4859) # TODO: no idea how to delete the deb file after installation as it becomes a layer already COPY --from=download-apprise /app/apprise.deb ./apprise.deb RUN apt update && \ apt --yes --no-install-recommends install ./apprise.deb python3-paho-mqtt && \ rm -rf /var/lib/apt/lists/* && \ rm -f apprise.deb && \ apt --yes autoremove # Install cloudflared RUN curl https://pkg.cloudflare.com/cloudflare-main.gpg --output /usr/share/keyrings/cloudflare-main.gpg && \ echo 'deb [signed-by=/usr/share/keyrings/cloudflare-main.gpg] https://pkg.cloudflare.com/cloudflared bookworm main' | tee /etc/apt/sources.list.d/cloudflared.list && \ apt update && \ apt install --yes --no-install-recommends cloudflared && \ cloudflared version && \ rm -rf /var/lib/apt/lists/* && \ apt --yes autoremove # For nscd COPY ./docker/etc/nscd.conf /etc/nscd.conf COPY ./docker/etc/sudoers /etc/sudoers # Full Base Image # MariaDB, Chromium and fonts # Make sure to reuse the slim image here. Uncomment the above line if you want to build it from scratch. # FROM base2-slim AS base2 FROM louislam/uptime-kuma:base2-slim AS base2 ENV UPTIME_KUMA_ENABLE_EMBEDDED_MARIADB=1 RUN apt update && \ apt --yes --no-install-recommends install chromium fonts-indic fonts-noto fonts-noto-cjk mariadb-server && \ rm -rf /var/lib/apt/lists/* && \ apt --yes autoremove && \ chown -R node:node /var/lib/mysql ================================================ FILE: docker/docker-compose-dev.yml ================================================ version: "3.8" services: uptime-kuma: container_name: uptime-kuma-dev image: louislam/uptime-kuma:nightly2 volumes: #- ./data:/app/data - ../server:/app/server - ../db:/app/db ports: - "3001:3001" # : - "3307:3306" ================================================ FILE: docker/dockerfile ================================================ ARG BASE_IMAGE=louislam/uptime-kuma:base2 ############################################ # Build in Golang # Run npm run build-healthcheck-armv7 in the host first, otherwise it will be super slow where it is building the armv7 healthcheck # Check file: builder-go.dockerfile ############################################ FROM louislam/uptime-kuma:builder-go AS build_healthcheck ############################################ # Build in Node.js ############################################ FROM louislam/uptime-kuma:base2 AS build USER node WORKDIR /app ENV PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1 COPY --chown=node:node .npmrc .npmrc COPY --chown=node:node package.json package.json COPY --chown=node:node package-lock.json package-lock.json RUN npm ci --omit=dev COPY . . COPY --chown=node:node --from=build_healthcheck /app/extra/healthcheck /app/extra/healthcheck RUN mkdir ./data ############################################ # ⭐ Main Image ############################################ FROM $BASE_IMAGE AS release WORKDIR /app LABEL org.opencontainers.image.source="https://github.com/louislam/uptime-kuma" ENV UPTIME_KUMA_IS_CONTAINER=1 # Copy app files from build layer COPY --chown=node:node --from=build /app /app EXPOSE 3001 HEALTHCHECK --interval=60s --timeout=30s --start-period=180s --retries=5 CMD extra/healthcheck ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["node", "server/server.js"] ############################################ # Rootless Image ############################################ FROM release AS rootless USER node ############################################ # Mark as Nightly ############################################ FROM release AS nightly RUN npm run mark-as-nightly FROM nightly AS nightly-rootless USER node ############################################ # Build an image for testing pr ############################################ FROM louislam/uptime-kuma:base2 AS pr-test2 WORKDIR /app ENV PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1 ## Install Git RUN apt update \ && apt --yes --no-install-recommends install curl \ && curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \ && chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ && apt update \ && apt --yes --no-install-recommends install git ## Empty the directory, because we have to clone the Git repo. RUN rm -rf ./* && chown node /app USER node RUN git config --global user.email "no-reply@no-reply.com" RUN git config --global user.name "PR Tester" RUN git clone https://github.com/louislam/uptime-kuma.git . # Hide the warning when running in detached head state RUN git config --global advice.detachedHead false RUN npm ci EXPOSE 3000 3001 HEALTHCHECK --interval=60s --timeout=30s --start-period=180s --retries=5 CMD extra/healthcheck CMD ["npm", "run", "start-pr-test"] ############################################ # Upload the artifact to Github ############################################ FROM louislam/uptime-kuma:base2 AS upload-artifact WORKDIR / RUN apt update && \ apt --yes install curl file COPY --from=build /app /app ARG VERSION ARG GITHUB_TOKEN ARG TARGETARCH ARG PLATFORM=debian ARG FILE=$PLATFORM-$TARGETARCH-$VERSION.tar.gz ARG DIST=dist.tar.gz RUN chmod +x /app/extra/upload-github-release-asset.sh # Full Build # RUN tar -zcvf $FILE app # RUN /app/extra/upload-github-release-asset.sh github_api_token=$GITHUB_TOKEN owner=louislam repo=uptime-kuma tag=$VERSION filename=$FILE # Dist only RUN cd /app && tar -zcvf $DIST dist RUN /app/extra/upload-github-release-asset.sh github_api_token=$GITHUB_TOKEN owner=louislam repo=uptime-kuma tag=$VERSION filename=/app/$DIST ================================================ FILE: docker/etc/nscd.conf ================================================ # # /etc/nscd.conf # # An example Name Service Cache config file. This file is needed by nscd. # # Legal entries are: # # logfile # debug-level # threads # max-threads # server-user # server-user is ignored if nscd is started with -S parameters # stat-user # reload-count unlimited| # paranoia # restart-interval