Repository: Ozark-Connect/NetworkOptimizer
Branch: main
Commit: acc3dc0a2689
Files: 900
Total size: 45.7 MB
Directory structure:
gitextract_r0774cwm/
├── .dockerignore
├── .editorconfig
├── .github/
│ ├── FUNDING.yml
│ └── workflows/
│ ├── ci.yml
│ └── release.yml
├── .gitignore
├── Directory.Build.props
├── LICENSE
├── NetworkOptimizer.sln
├── README.md
├── TODO.md
├── docker/
│ ├── .dockerignore
│ ├── .env.example
│ ├── DEPLOYMENT.md
│ ├── Dockerfile
│ ├── NATIVE-DEPLOYMENT.md
│ ├── QUICK-REFERENCE.md
│ ├── README.md
│ ├── docker-compose.local.yml
│ ├── docker-compose.macos.yml
│ ├── docker-compose.prod.yml
│ ├── docker-compose.yml
│ ├── entrypoint.sh
│ ├── grafana/
│ │ ├── dashboards/
│ │ │ ├── network-overview.json
│ │ │ ├── security-posture.json
│ │ │ ├── sqm-performance.json
│ │ │ └── switch-deep-dive.json
│ │ └── provisioning/
│ │ ├── dashboards/
│ │ │ └── dashboards.yml
│ │ └── datasources/
│ │ └── influxdb.yml
│ └── openspeedtest/
│ ├── Dockerfile
│ ├── entrypoint.sh
│ └── nginx.conf
├── docs/
│ ├── MACOS-INSTALLATION.md
│ ├── PLAN-unifi-api-abstraction.md
│ └── features/
│ └── speed-test-roadmap.md
├── nuget.config
├── packages/
│ ├── Blazor-ApexCharts.6.1.1-ozarkconnect.1.nupkg
│ └── Blazor-ApexCharts.6.1.1-ozarkconnect.2.nupkg
├── renovate.json
├── scripts/
│ ├── README.md
│ ├── build-installer.ps1
│ ├── build.sh
│ ├── clean.sh
│ ├── coverage.runsettings
│ ├── coverage.sh
│ ├── deploy-external-speedtest.sh
│ ├── docker-build.sh
│ ├── docker-run.sh
│ ├── docker-stop.sh
│ ├── extract-elevation0-from-images.py
│ ├── install-macos-native.sh
│ ├── parse-antenna-patterns.ps1
│ ├── proxmox/
│ │ ├── README.md
│ │ └── install.sh
│ ├── publish.sh
│ ├── reset-password.ps1
│ ├── reset-password.sh
│ ├── sync-perf-tweaks.ps1
│ ├── test.sh
│ └── watch.sh
├── src/
│ ├── NetworkOptimizer.Agents/
│ │ ├── .gitignore
│ │ ├── AgentDeployer.cs
│ │ ├── AgentHealthMonitor.cs
│ │ ├── Models/
│ │ │ ├── AgentConfiguration.cs
│ │ │ ├── DeploymentResult.cs
│ │ │ └── SshCredentials.cs
│ │ ├── NetworkOptimizer.Agents.csproj
│ │ ├── README.md
│ │ ├── ScriptRenderer.cs
│ │ └── Templates/
│ │ ├── install-linux.sh.template
│ │ ├── linux-agent.service.template
│ │ └── linux-agent.sh.template
│ ├── NetworkOptimizer.Alerts/
│ │ ├── AlertCooldownTracker.cs
│ │ ├── AlertCorrelationService.cs
│ │ ├── AlertProcessingService.cs
│ │ ├── AlertRuleEvaluator.cs
│ │ ├── DefaultAlertRules.cs
│ │ ├── Delivery/
│ │ │ ├── DiscordChannelConfig.cs
│ │ │ ├── DiscordDeliveryChannel.cs
│ │ │ ├── EmailChannelConfig.cs
│ │ │ ├── EmailDeliveryChannel.cs
│ │ │ ├── IAlertDeliveryChannel.cs
│ │ │ ├── ISecretDecryptor.cs
│ │ │ ├── NtfyChannelConfig.cs
│ │ │ ├── NtfyDeliveryChannel.cs
│ │ │ ├── SlackChannelConfig.cs
│ │ │ ├── SlackDeliveryChannel.cs
│ │ │ ├── TeamsChannelConfig.cs
│ │ │ ├── TeamsDeliveryChannel.cs
│ │ │ ├── TimestampFormatter.cs
│ │ │ ├── WebhookChannelConfig.cs
│ │ │ └── WebhookDeliveryChannel.cs
│ │ ├── DigestService.cs
│ │ ├── Events/
│ │ │ ├── AlertEvent.cs
│ │ │ ├── AlertEventBus.cs
│ │ │ └── IAlertEventBus.cs
│ │ ├── Interfaces/
│ │ │ ├── IAlertRepository.cs
│ │ │ ├── IDigestStateStore.cs
│ │ │ └── IScheduleRepository.cs
│ │ ├── Models/
│ │ │ ├── AlertHistoryEntry.cs
│ │ │ ├── AlertIncident.cs
│ │ │ ├── AlertRule.cs
│ │ │ ├── DeliveryChannel.cs
│ │ │ └── ScheduledTask.cs
│ │ ├── NetworkOptimizer.Alerts.csproj
│ │ ├── ScheduleService.cs
│ │ └── Templates/
│ │ ├── alert-email.html
│ │ └── digest-email.html
│ ├── NetworkOptimizer.Audit/
│ │ ├── Analyzers/
│ │ │ ├── AuditScorer.cs
│ │ │ ├── FirewallGroupHelper.cs
│ │ │ ├── FirewallRuleAnalyzer.cs
│ │ │ ├── FirewallRuleEvaluator.cs
│ │ │ ├── FirewallRuleOverlapDetector.cs
│ │ │ ├── FirewallRuleParser.cs
│ │ │ ├── HttpAppIds.cs
│ │ │ ├── PortSecurityAnalyzer.cs
│ │ │ ├── UpnpSecurityAnalyzer.cs
│ │ │ └── VlanAnalyzer.cs
│ │ ├── CHANGELOG.md
│ │ ├── ConfigAuditEngine.cs
│ │ ├── Constants/
│ │ │ └── DetectionConstants.cs
│ │ ├── DeviceNameHints.cs
│ │ ├── Dns/
│ │ │ ├── DnatDnsAnalyzer.cs
│ │ │ ├── DnsAppIds.cs
│ │ │ ├── DnsSecurityAnalyzer.cs
│ │ │ ├── DnsStampDecoder.cs
│ │ │ ├── DohProviderRegistry.cs
│ │ │ └── ThirdPartyDnsDetector.cs
│ │ ├── IssueTypes.cs
│ │ ├── Models/
│ │ │ ├── AuditIssue.cs
│ │ │ ├── AuditRequest.cs
│ │ │ ├── AuditResult.cs
│ │ │ ├── AuditSeverity.cs
│ │ │ ├── DeviceAllowanceSettings.cs
│ │ │ ├── DeviceDetectionResult.cs
│ │ │ ├── FirewallAction.cs
│ │ │ ├── FirewallRule.cs
│ │ │ ├── NetworkInfo.cs
│ │ │ ├── OfflineClientInfo.cs
│ │ │ ├── PortInfo.cs
│ │ │ ├── SwitchInfo.cs
│ │ │ └── WirelessClientInfo.cs
│ │ ├── NetworkOptimizer.Audit.csproj
│ │ ├── README.md
│ │ ├── Rules/
│ │ │ ├── AccessPortVlanRule.cs
│ │ │ ├── CameraVlanRule.cs
│ │ │ ├── FirewallAnyAnyRule.cs
│ │ │ ├── IAuditRule.cs
│ │ │ ├── IWirelessAuditRule.cs
│ │ │ ├── IotVlanRule.cs
│ │ │ ├── MacRestrictionRule.cs
│ │ │ ├── PortIsolationRule.cs
│ │ │ ├── PortNameHelper.cs
│ │ │ ├── UnusedPortRule.cs
│ │ │ ├── VlanPlacementChecker.cs
│ │ │ ├── VlanSubnetMismatchRule.cs
│ │ │ ├── WiredSubnetMismatchRule.cs
│ │ │ ├── WirelessCameraVlanRule.cs
│ │ │ └── WirelessIotVlanRule.cs
│ │ ├── Scoring/
│ │ │ └── ScoreConstants.cs
│ │ └── Services/
│ │ ├── Detectors/
│ │ │ ├── FingerprintDetector.cs
│ │ │ ├── MacOuiDetector.cs
│ │ │ └── NamePatternDetector.cs
│ │ ├── DeviceTypeDetectionService.cs
│ │ ├── FirewallZoneLookup.cs
│ │ ├── IIeeeOuiDatabase.cs
│ │ └── IeeeOuiDatabase.cs
│ ├── NetworkOptimizer.Core/
│ │ ├── Caching/
│ │ │ └── AsyncCachedValue.cs
│ │ ├── Enums/
│ │ │ ├── AgentType.cs
│ │ │ ├── AlertSeverity.cs
│ │ │ ├── AlertStatus.cs
│ │ │ ├── AuditSeverity.cs
│ │ │ ├── ClientDeviceCategory.cs
│ │ │ ├── DeviceType.cs
│ │ │ └── MeasurementType.cs
│ │ ├── Extensions/
│ │ │ └── ServiceProviderExtensions.cs
│ │ ├── FeatureFlags.cs
│ │ ├── Helpers/
│ │ │ ├── CloudflareIpRanges.cs
│ │ │ ├── DisplayFormatters.cs
│ │ │ ├── JsonExtensions.cs
│ │ │ ├── NetworkFormatHelpers.cs
│ │ │ ├── NetworkUtilities.cs
│ │ │ └── ProcessUtilities.cs
│ │ ├── Interfaces/
│ │ │ ├── IAgentDeployer.cs
│ │ │ ├── IAuditEngine.cs
│ │ │ ├── IMetricsStorage.cs
│ │ │ ├── IReportGenerator.cs
│ │ │ ├── ISqmManager.cs
│ │ │ └── IUniFiApiClient.cs
│ │ ├── Models/
│ │ │ ├── AgentStatus.cs
│ │ │ ├── AuditResult.cs
│ │ │ ├── NetworkConfiguration.cs
│ │ │ ├── ProtectCamera.cs
│ │ │ ├── SqmConfiguration.cs
│ │ │ └── UniFiDevice.cs
│ │ ├── NetworkOptimizer.Core.csproj
│ │ └── VendorSpecificAttribute.cs
│ ├── NetworkOptimizer.Diagnostics/
│ │ ├── Analyzers/
│ │ │ ├── ApLockAnalyzer.cs
│ │ │ ├── PerformanceAnalyzer.cs
│ │ │ ├── PortProfile8021xAnalyzer.cs
│ │ │ ├── PortProfileSuggestionAnalyzer.cs
│ │ │ ├── StreamingAppIds.cs
│ │ │ └── TrunkConsistencyAnalyzer.cs
│ │ ├── DiagnosticsEngine.cs
│ │ ├── Models/
│ │ │ ├── AccessPortVlanIssue.cs
│ │ │ ├── ApLockIssue.cs
│ │ │ ├── DiagnosticSeverity.cs
│ │ │ ├── DiagnosticsResult.cs
│ │ │ ├── PerformanceIssue.cs
│ │ │ ├── PortProfile8021xIssue.cs
│ │ │ ├── PortProfileSuggestion.cs
│ │ │ └── TrunkConsistencyIssue.cs
│ │ └── NetworkOptimizer.Diagnostics.csproj
│ ├── NetworkOptimizer.Installer/
│ │ ├── CustomStrings.wxl
│ │ ├── Iperf3/
│ │ │ ├── .gitignore
│ │ │ └── Download-Iperf3.ps1
│ │ ├── Iperf3Component.wxs
│ │ ├── LICENSE.txt
│ │ ├── License.rtf
│ │ ├── NetworkOptimizer.Installer.wixproj
│ │ ├── Package.wxs
│ │ ├── ServiceComponent.wxs
│ │ ├── SpeedTest/
│ │ │ ├── .gitignore
│ │ │ ├── Download-Nginx.ps1
│ │ │ ├── Start-SpeedTest.ps1
│ │ │ ├── config.js.template
│ │ │ └── nginx.conf
│ │ ├── SpeedTestComponent.wxs
│ │ ├── Traefik/
│ │ │ ├── .gitignore
│ │ │ └── Download-Traefik.ps1
│ │ └── TraefikComponent.wxs
│ ├── NetworkOptimizer.Monitoring/
│ │ ├── AlertEngine.cs
│ │ ├── MetricsAggregator.cs
│ │ ├── Models/
│ │ │ ├── Alert.cs
│ │ │ ├── AlertThreshold.cs
│ │ │ ├── CellularModemStats.cs
│ │ │ ├── DeviceMetrics.cs
│ │ │ └── InterfaceMetrics.cs
│ │ ├── NetworkOptimizer.Monitoring.csproj
│ │ ├── QmicliParser.cs
│ │ ├── README.md
│ │ ├── SnmpConfiguration.cs
│ │ ├── SnmpPoller.cs
│ │ └── UniFiOids.cs
│ ├── NetworkOptimizer.Reports/
│ │ ├── BrandingOptions.cs
│ │ ├── INDEX.md
│ │ ├── MarkdownReportGenerator.cs
│ │ ├── NetworkOptimizer.Reports.csproj
│ │ ├── PdfReportGenerator.cs
│ │ ├── README.md
│ │ ├── ReportData.cs
│ │ └── Templates/
│ │ └── .gitkeep
│ ├── NetworkOptimizer.Sqm/
│ │ ├── ARCHITECTURE.md
│ │ ├── BaselineCalculator.cs
│ │ ├── InputSanitizer.cs
│ │ ├── LatencyMonitor.cs
│ │ ├── Models/
│ │ │ ├── BaselineData.cs
│ │ │ ├── ConnectionProfile.cs
│ │ │ ├── SpeedtestResult.cs
│ │ │ ├── SqmConfiguration.cs
│ │ │ └── SqmStatus.cs
│ │ ├── NetworkOptimizer.Sqm.csproj
│ │ ├── README.md
│ │ ├── ScriptGenerator.cs
│ │ ├── SpeedtestIntegration.cs
│ │ └── SqmManager.cs
│ ├── NetworkOptimizer.Storage/
│ │ ├── .gitignore
│ │ ├── Helpers/
│ │ │ └── SpeedTestFilterHelper.cs
│ │ ├── InfluxDbStorage.cs
│ │ ├── Interfaces/
│ │ │ ├── IAgentRepository.cs
│ │ │ ├── IAuditRepository.cs
│ │ │ ├── IMetricsStorage.cs
│ │ │ ├── IModemRepository.cs
│ │ │ ├── ISettingsRepository.cs
│ │ │ ├── ISpeedTestRepository.cs
│ │ │ ├── ISqmRepository.cs
│ │ │ └── IUniFiRepository.cs
│ │ ├── Migrations/
│ │ │ ├── 20251208000000_InitialCreate.Designer.cs
│ │ │ ├── 20251208000000_InitialCreate.cs
│ │ │ ├── 20251210000000_AddModemAndSpeedTables.Designer.cs
│ │ │ ├── 20251210000000_AddModemAndSpeedTables.cs
│ │ │ ├── 20251216000000_AddUniFiSshSettings.Designer.cs
│ │ │ ├── 20251216000000_AddUniFiSshSettings.cs
│ │ │ ├── 20251217000000_AddDismissedIssues.Designer.cs
│ │ │ ├── 20251217000000_AddDismissedIssues.cs
│ │ │ ├── 20251217100000_AddGatewaySshSettings.Designer.cs
│ │ │ ├── 20251217100000_AddGatewaySshSettings.cs
│ │ │ ├── 20251217200000_AddStartIperf3ServerToDeviceConfig.Designer.cs
│ │ │ ├── 20251217200000_AddStartIperf3ServerToDeviceConfig.cs
│ │ │ ├── 20251217300000_AddSystemSettings.Designer.cs
│ │ │ ├── 20251217300000_AddSystemSettings.cs
│ │ │ ├── 20251218000000_AddSshCredentialOverridesToDeviceConfig.Designer.cs
│ │ │ ├── 20251218000000_AddSshCredentialOverridesToDeviceConfig.cs
│ │ │ ├── 20251219000000_AddUniFiConnectionSettings.Designer.cs
│ │ │ ├── 20251219000000_AddUniFiConnectionSettings.cs
│ │ │ ├── 20251224000000_AddPathAnalysisJson.Designer.cs
│ │ │ ├── 20251224000000_AddPathAnalysisJson.cs
│ │ │ ├── 20251227000000_AddTcMonitorPort.Designer.cs
│ │ │ ├── 20251227000000_AddTcMonitorPort.cs
│ │ │ ├── 20251227100000_AddSqmWanConfiguration.Designer.cs
│ │ │ ├── 20251227100000_AddSqmWanConfiguration.cs
│ │ │ ├── 20251228000000_AddAdminSettings.Designer.cs
│ │ │ ├── 20251228000000_AddAdminSettings.cs
│ │ │ ├── 20251228100000_AddSqmSpeedtestSchedule.Designer.cs
│ │ │ ├── 20251228100000_AddSqmSpeedtestSchedule.cs
│ │ │ ├── 20251229000000_AddReportDataJson.Designer.cs
│ │ │ ├── 20251229000000_AddReportDataJson.cs
│ │ │ ├── 20260102000000_AddLocalIpToIperf3Result.Designer.cs
│ │ │ ├── 20260102000000_AddLocalIpToIperf3Result.cs
│ │ │ ├── 20260103000000_AddIgnoreControllerSSLErrors.Designer.cs
│ │ │ ├── 20260103000000_AddIgnoreControllerSSLErrors.cs
│ │ │ ├── 20260104100000_AddClientSpeedTestFieldsToIperf3Result.Designer.cs
│ │ │ ├── 20260104100000_AddClientSpeedTestFieldsToIperf3Result.cs
│ │ │ ├── 20260106000000_AddLocationAndWifiSignal.Designer.cs
│ │ │ ├── 20260106000000_AddLocationAndWifiSignal.cs
│ │ │ ├── 20260107000000_AddWifiRadio.Designer.cs
│ │ │ ├── 20260107000000_AddWifiRadio.cs
│ │ │ ├── 20260107100000_AddWifiMlo.Designer.cs
│ │ │ ├── 20260107100000_AddWifiMlo.cs
│ │ │ ├── 20260107200000_AddWifiTxRxRates.Designer.cs
│ │ │ ├── 20260107200000_AddWifiTxRxRates.cs
│ │ │ ├── 20260110000000_AddIperf3BinaryPathToDeviceConfig.Designer.cs
│ │ │ ├── 20260110000000_AddIperf3BinaryPathToDeviceConfig.cs
│ │ │ ├── 20260113000000_AddUpnpNotes.Designer.cs
│ │ │ ├── 20260113000000_AddUpnpNotes.cs
│ │ │ ├── 20260124000000_AddNotesToIperf3Result.Designer.cs
│ │ │ ├── 20260124000000_AddNotesToIperf3Result.cs
│ │ │ ├── 20260209200000_AddApLocations.Designer.cs
│ │ │ ├── 20260209200000_AddApLocations.cs
│ │ │ ├── 20260210100000_AddLoadedLatencyColumns.Designer.cs
│ │ │ ├── 20260210100000_AddLoadedLatencyColumns.cs
│ │ │ ├── 20260211000000_AddWanIdentityColumns.Designer.cs
│ │ │ ├── 20260211000000_AddWanIdentityColumns.cs
│ │ │ ├── 20260211200000_AddBuildingsAndFloorPlans.Designer.cs
│ │ │ ├── 20260211200000_AddBuildingsAndFloorPlans.cs
│ │ │ ├── 20260211300000_AddApOrientationDeg.Designer.cs
│ │ │ ├── 20260211300000_AddApOrientationDeg.cs
│ │ │ ├── 20260211400000_AddApMountType.Designer.cs
│ │ │ ├── 20260211400000_AddApMountType.cs
│ │ │ ├── 20260212000000_AddFloorMaterial.Designer.cs
│ │ │ ├── 20260212000000_AddFloorMaterial.cs
│ │ │ ├── 20260213000000_AddClientSignalLog.Designer.cs
│ │ │ ├── 20260213000000_AddClientSignalLog.cs
│ │ │ ├── 20260213000000_AddPlannedAps.Designer.cs
│ │ │ ├── 20260213000000_AddPlannedAps.cs
│ │ │ ├── 20260214100000_AddPerBandTxPower.Designer.cs
│ │ │ ├── 20260214100000_AddPerBandTxPower.cs
│ │ │ ├── 20260220000000_AddFloorPlanImages.Designer.cs
│ │ │ ├── 20260220000000_AddFloorPlanImages.cs
│ │ │ ├── 20260221000000_AddAlertTables.Designer.cs
│ │ │ ├── 20260221000000_AddAlertTables.cs
│ │ │ ├── 20260221100000_AddThreatTables.Designer.cs
│ │ │ ├── 20260221100000_AddThreatTables.cs
│ │ │ ├── 20260222100000_AddTrafficFlowFields.Designer.cs
│ │ │ ├── 20260222100000_AddTrafficFlowFields.cs
│ │ │ ├── 20260222200000_AddThreatNoiseFilters.Designer.cs
│ │ │ ├── 20260222200000_AddThreatNoiseFilters.cs
│ │ │ ├── 20260223000000_AddScheduledTasks.Designer.cs
│ │ │ ├── 20260223000000_AddScheduledTasks.cs
│ │ │ ├── 20260223100000_AddAlertRuleThreshold.Designer.cs
│ │ │ ├── 20260223100000_AddAlertRuleThreshold.cs
│ │ │ ├── 20260225200000_AddPatternLastAlertedAt.Designer.cs
│ │ │ ├── 20260225200000_AddPatternLastAlertedAt.cs
│ │ │ ├── 20260226010000_AddPatternDedupKey.Designer.cs
│ │ │ ├── 20260226010000_AddPatternDedupKey.cs
│ │ │ ├── 20260226100000_AddAuditIsScheduled.Designer.cs
│ │ │ ├── 20260226100000_AddAuditIsScheduled.cs
│ │ │ ├── 20260226120000_AddSqmBaselineLatency.Designer.cs
│ │ │ ├── 20260226120000_AddSqmBaselineLatency.cs
│ │ │ ├── 20260228000000_AddWanDataUsageTables.Designer.cs
│ │ │ ├── 20260228000000_AddWanDataUsageTables.cs
│ │ │ ├── 20260301000000_AddSignalLogChannelWidth.Designer.cs
│ │ │ ├── 20260301000000_AddSignalLogChannelWidth.cs
│ │ │ ├── 20260301200000_AddSnapshotGatewayBootTime.Designer.cs
│ │ │ ├── 20260301200000_AddSnapshotGatewayBootTime.cs
│ │ │ ├── 20260306000000_AddAlertSourceUrl.Designer.cs
│ │ │ ├── 20260306000000_AddAlertSourceUrl.cs
│ │ │ ├── 20260311000000_AddDeviceIperf3Overrides.Designer.cs
│ │ │ ├── 20260311000000_AddDeviceIperf3Overrides.cs
│ │ │ ├── 20260312000000_PurgeStaleCrowdSecNegativeCache.Designer.cs
│ │ │ ├── 20260312000000_PurgeStaleCrowdSecNegativeCache.cs
│ │ │ ├── 20260318000000_AddExternalServerName.Designer.cs
│ │ │ ├── 20260318000000_AddExternalServerName.cs
│ │ │ ├── 20260320000000_AddWanSteerTrafficClasses.Designer.cs
│ │ │ ├── 20260320000000_AddWanSteerTrafficClasses.cs
│ │ │ ├── 20260402100000_AddCongestionSeverity.Designer.cs
│ │ │ ├── 20260402100000_AddCongestionSeverity.cs
│ │ │ ├── 20260404000000_AddApiKey.Designer.cs
│ │ │ ├── 20260404000000_AddApiKey.cs
│ │ │ ├── 20260405000000_AddExternalSpeedTestServers.Designer.cs
│ │ │ ├── 20260405000000_AddExternalSpeedTestServers.cs
│ │ │ ├── 20260428000000_AddSqmLinkSpeedOverride.Designer.cs
│ │ │ ├── 20260428000000_AddSqmLinkSpeedOverride.cs
│ │ │ ├── 20260505000000_AddSqmBootDelay.Designer.cs
│ │ │ ├── 20260505000000_AddSqmBootDelay.cs
│ │ │ ├── 20260507000000_AddPerfTweakSettings.Designer.cs
│ │ │ ├── 20260507000000_AddPerfTweakSettings.cs
│ │ │ └── NetworkOptimizerDbContextModelSnapshot.cs
│ │ ├── Models/
│ │ │ ├── AdminSettings.cs
│ │ │ ├── AgentConfiguration.cs
│ │ │ ├── ApLocation.cs
│ │ │ ├── AuditResult.cs
│ │ │ ├── Building.cs
│ │ │ ├── ClientSignalLog.cs
│ │ │ ├── DeviceSshConfiguration.cs
│ │ │ ├── DismissedIssue.cs
│ │ │ ├── ExternalSpeedTestServer.cs
│ │ │ ├── FloorPlan.cs
│ │ │ ├── FloorPlanImage.cs
│ │ │ ├── GatewaySshSettings.cs
│ │ │ ├── Iperf3Result.cs
│ │ │ ├── LicenseInfo.cs
│ │ │ ├── ModemConfiguration.cs
│ │ │ ├── NetworkOptimizerDbContext.cs
│ │ │ ├── PerfTweakSetting.cs
│ │ │ ├── PlannedAp.cs
│ │ │ ├── SqmBaseline.cs
│ │ │ ├── SqmWanConfiguration.cs
│ │ │ ├── SystemSetting.cs
│ │ │ ├── UniFiConnectionSettings.cs
│ │ │ ├── UniFiSshSettings.cs
│ │ │ ├── UpnpNote.cs
│ │ │ ├── WanDataUsageConfig.cs
│ │ │ ├── WanDataUsageSnapshot.cs
│ │ │ └── WanSteerTrafficClass.cs
│ │ ├── NetworkOptimizer.Storage.csproj
│ │ ├── README.md
│ │ ├── Repositories/
│ │ │ ├── AgentRepository.cs
│ │ │ ├── AlertRepository.cs
│ │ │ ├── AuditRepository.cs
│ │ │ ├── ModemRepository.cs
│ │ │ ├── ScheduleRepository.cs
│ │ │ ├── SettingsRepository.cs
│ │ │ ├── SpeedTestRepository.cs
│ │ │ ├── SqmRepository.cs
│ │ │ ├── ThreatRepository.cs
│ │ │ └── UniFiRepository.cs
│ │ ├── RepositoryBase.cs
│ │ ├── Services/
│ │ │ ├── CredentialProtectionService.cs
│ │ │ └── ICredentialProtectionService.cs
│ │ ├── StorageConfiguration.cs
│ │ └── StorageServiceExtensions.cs
│ ├── NetworkOptimizer.Threats/
│ │ ├── Analysis/
│ │ │ ├── BruteForceDetector.cs
│ │ │ ├── DDoSDetector.cs
│ │ │ ├── ExploitCampaignDetector.cs
│ │ │ ├── ExposureValidator.cs
│ │ │ ├── FlowInterestFilter.cs
│ │ │ ├── KillChainClassifier.cs
│ │ │ ├── ScanSweepDetector.cs
│ │ │ └── ThreatPatternAnalyzer.cs
│ │ ├── CrowdSec/
│ │ │ ├── CrowdSecClient.cs
│ │ │ ├── CrowdSecEnrichmentService.cs
│ │ │ └── CrowdSecModels.cs
│ │ ├── Enrichment/
│ │ │ └── GeoEnrichmentService.cs
│ │ ├── Interfaces/
│ │ │ ├── IThreatRepository.cs
│ │ │ ├── IThreatSettingsAccessor.cs
│ │ │ └── IUniFiClientAccessor.cs
│ │ ├── Models/
│ │ │ ├── CrowdSecReputation.cs
│ │ │ ├── EventSource.cs
│ │ │ ├── ExposureReport.cs
│ │ │ ├── GeoInfo.cs
│ │ │ ├── KillChainStage.cs
│ │ │ ├── PatternType.cs
│ │ │ ├── ThreatAction.cs
│ │ │ ├── ThreatEvent.cs
│ │ │ ├── ThreatNoiseFilter.cs
│ │ │ └── ThreatPattern.cs
│ │ ├── NetworkOptimizer.Threats.csproj
│ │ ├── ThreatCollectionService.cs
│ │ └── ThreatEventNormalizer.cs
│ ├── NetworkOptimizer.UniFi/
│ │ ├── ClientIpEnricher.cs
│ │ ├── Helpers/
│ │ │ ├── GlobalSwitchSettings.cs
│ │ │ └── VlanAnalysisHelper.cs
│ │ ├── Models/
│ │ │ ├── NetworkHop.cs
│ │ │ ├── NetworkPath.cs
│ │ │ ├── PathAnalysisResult.cs
│ │ │ ├── UniFiApiResponse.cs
│ │ │ ├── UniFiClientDetailResponse.cs
│ │ │ ├── UniFiClientResponse.cs
│ │ │ ├── UniFiDeviceResponse.cs
│ │ │ ├── UniFiFingerprintDatabase.cs
│ │ │ ├── UniFiFirewallGroup.cs
│ │ │ ├── UniFiFirewallRule.cs
│ │ │ ├── UniFiFirewallZone.cs
│ │ │ ├── UniFiIpsEvent.cs
│ │ │ ├── UniFiNetworkConfig.cs
│ │ │ ├── UniFiPortForwardRule.cs
│ │ │ ├── UniFiPortProfile.cs
│ │ │ ├── UniFiProtectDeviceResponse.cs
│ │ │ ├── UniFiSysInfo.cs
│ │ │ ├── UniFiThreatLogEntry.cs
│ │ │ ├── UniFiWlanConfig.cs
│ │ │ ├── WiFiManClientResponse.cs
│ │ │ └── WirelessRateSnapshot.cs
│ │ ├── NetworkOptimizer.UniFi.csproj
│ │ ├── NetworkPathAnalyzer.cs
│ │ ├── README.md
│ │ ├── RadioFormatHelper.cs
│ │ ├── UniFiApiClient.cs
│ │ ├── UniFiDiscovery.cs
│ │ └── UniFiProductDatabase.cs
│ ├── NetworkOptimizer.Web/
│ │ ├── App.razor
│ │ ├── Components/
│ │ │ ├── Layout/
│ │ │ │ ├── AuthLayout.razor
│ │ │ │ ├── MainLayout.razor
│ │ │ │ └── NavMenu.razor
│ │ │ ├── Pages/
│ │ │ │ ├── Agents.razor
│ │ │ │ ├── Alerts.razor
│ │ │ │ ├── Audit.razor
│ │ │ │ ├── ClientDashboard.razor
│ │ │ │ ├── ClientSpeedTest.razor
│ │ │ │ ├── ClientWanSpeedTest.razor
│ │ │ │ ├── Dashboard.razor
│ │ │ │ ├── Login.razor
│ │ │ │ ├── Optimize.razor
│ │ │ │ ├── PerformanceTweaks.razor
│ │ │ │ ├── PwaInstall.razor
│ │ │ │ ├── Settings.razor
│ │ │ │ ├── SpeedTest.razor
│ │ │ │ ├── Sqm.razor
│ │ │ │ ├── ThreatDashboard.razor
│ │ │ │ ├── UpnpInspector.razor
│ │ │ │ ├── WanSpeedTest.razor
│ │ │ │ ├── WanSteering.razor
│ │ │ │ └── WiFiOptimizer.razor
│ │ │ ├── Routes.razor
│ │ │ ├── ScrollRestoration.razor
│ │ │ ├── Shared/
│ │ │ │ ├── AgentStatusTable.razor
│ │ │ │ ├── AlertsList.razor
│ │ │ │ ├── CellularStatsPanel.razor
│ │ │ │ ├── DeviceCard.razor
│ │ │ │ ├── DeviceIcon.razor
│ │ │ │ ├── IssuesList.razor
│ │ │ │ ├── PwaBanner.razor
│ │ │ │ ├── SecurityScoreGauge.razor
│ │ │ │ ├── SpeedTestDetails.razor
│ │ │ │ ├── SpeedTestMap.razor
│ │ │ │ ├── SpeedTestSearchFilter.razor
│ │ │ │ ├── SponsorshipBanner.razor
│ │ │ │ ├── SqmStatusPanel.razor
│ │ │ │ ├── SshTroubleshootingTooltip.razor
│ │ │ │ ├── UpdateChecker.razor
│ │ │ │ ├── WanOption.cs
│ │ │ │ └── WiFi/
│ │ │ │ ├── AirtimeFairness.razor
│ │ │ │ ├── ApLoadBalance.razor
│ │ │ │ ├── BandSteeringAnalysis.razor
│ │ │ │ ├── ChannelAnalysis.razor
│ │ │ │ ├── ClientTimeline.razor
│ │ │ │ ├── ConnectivityFlow.razor
│ │ │ │ ├── EnvironmentalCorrelation.razor
│ │ │ │ ├── FloorPlanEditor.razor
│ │ │ │ ├── HealthScoreGauge.razor
│ │ │ │ ├── Metrics.razor
│ │ │ │ ├── PowerCoverageAnalysis.razor
│ │ │ │ ├── RoamingAnalytics.razor
│ │ │ │ ├── SpectrumAnalysis.razor
│ │ │ │ └── WiFiDashboardPanel.razor
│ │ │ └── _Imports.razor
│ │ ├── Endpoints/
│ │ │ ├── AlertEndpoints.cs
│ │ │ ├── EndpointHelpers.cs
│ │ │ └── SpeedTestEndpoints.cs
│ │ ├── Models/
│ │ │ ├── ApMapMarker.cs
│ │ │ └── ClientDashboardModels.cs
│ │ ├── NetworkOptimizer.Web.csproj
│ │ ├── Program.cs
│ │ ├── Properties/
│ │ │ └── launchSettings.json
│ │ ├── README.md
│ │ ├── Resources/
│ │ │ └── PerfTweaks/
│ │ │ ├── 06-mongodb-ssd-offload.sh
│ │ │ ├── 07-mongodb-ssd-backup.sh
│ │ │ ├── 10-journald-volatile.sh
│ │ │ ├── 15-fan-control-tuning.sh
│ │ │ ├── 20-sfp-sgmiiplus.sh
│ │ │ └── force_uniphy1_sgmiiplus.ko
│ │ ├── Services/
│ │ │ ├── AdminAuthService.cs
│ │ │ ├── AgentService.cs
│ │ │ ├── ApMapService.cs
│ │ │ ├── AuditService.cs
│ │ │ ├── CellularModemService.cs
│ │ │ ├── ClientDashboardService.cs
│ │ │ ├── ClientSpeedTestService.cs
│ │ │ ├── CloudflareSpeedTestService.cs
│ │ │ ├── ConfigTransferService.cs
│ │ │ ├── DashboardLayoutService.cs
│ │ │ ├── DashboardService.cs
│ │ │ ├── DiagnosticsService.cs
│ │ │ ├── FileVersionProvider.cs
│ │ │ ├── FingerprintDatabaseService.cs
│ │ │ ├── FloorPlanService.cs
│ │ │ ├── GatewaySpeedTestService.cs
│ │ │ ├── GatewayWanSpeedTestService.cs
│ │ │ ├── HeatmapDataCache.cs
│ │ │ ├── IAgentService.cs
│ │ │ ├── ICellularModemService.cs
│ │ │ ├── IDashboardService.cs
│ │ │ ├── IFingerprintDatabaseService.cs
│ │ │ ├── IGatewaySpeedTestService.cs
│ │ │ ├── IIperf3SpeedTestService.cs
│ │ │ ├── ISponsorshipService.cs
│ │ │ ├── ISqmDeploymentService.cs
│ │ │ ├── ISqmService.cs
│ │ │ ├── ISystemSettingsService.cs
│ │ │ ├── ITcMonitorClient.cs
│ │ │ ├── IUniFiSshService.cs
│ │ │ ├── Iperf3JsonParser.cs
│ │ │ ├── Iperf3ServerService.cs
│ │ │ ├── Iperf3SpeedTestService.cs
│ │ │ ├── JwtService.cs
│ │ │ ├── NginxHostedService.cs
│ │ │ ├── PasswordHasher.cs
│ │ │ ├── PdfStorageService.cs
│ │ │ ├── PerfTweaksDeploymentService.cs
│ │ │ ├── PlannedApService.cs
│ │ │ ├── PullToRefreshState.cs
│ │ │ ├── ScheduleExecutorRegistration.cs
│ │ │ ├── SponsorshipService.cs
│ │ │ ├── SqmDeploymentService.cs
│ │ │ ├── SqmService.cs
│ │ │ ├── Ssh/
│ │ │ │ ├── GatewaySshService.cs
│ │ │ │ ├── IGatewaySshService.cs
│ │ │ │ ├── SshClientService.cs
│ │ │ │ ├── SshCommandResult.cs
│ │ │ │ └── SshConnectionInfo.cs
│ │ │ ├── SystemSettingsService.cs
│ │ │ ├── TcMonitorClient.cs
│ │ │ ├── ThreatDashboardService.cs
│ │ │ ├── ThreatSettingsAccessor.cs
│ │ │ ├── TimeFormatHelper.cs
│ │ │ ├── TopologySnapshotService.cs
│ │ │ ├── TraefikHostedService.cs
│ │ │ ├── UniFiClientAccessor.cs
│ │ │ ├── UniFiConnectionService.cs
│ │ │ ├── UniFiSshService.cs
│ │ │ ├── UwnSpeedTestService.cs
│ │ │ ├── WanDataUsageService.cs
│ │ │ ├── WanSpeedTestServiceBase.cs
│ │ │ ├── WanSteerDeploymentService.cs
│ │ │ ├── WanSteerValidation.cs
│ │ │ └── WiFiOptimizerService.cs
│ │ ├── appsettings.Development.json
│ │ ├── appsettings.json
│ │ └── wwwroot/
│ │ ├── css/
│ │ │ └── app.css
│ │ ├── data/
│ │ │ ├── antenna-patterns.json
│ │ │ └── cloudflare-colos.json
│ │ ├── downloads/
│ │ │ ├── iperf3_3.18-1_mips-3.4.ipk
│ │ │ └── libiperf3_3.18-1_mips-3.4.ipk
│ │ ├── js/
│ │ │ ├── demo-mask.js
│ │ │ ├── floorPlanEditor.js
│ │ │ ├── scrollRestoration.js
│ │ │ ├── steppedScaleBar.js
│ │ │ └── updateCheck.js
│ │ ├── lib/
│ │ │ ├── pdf.min.mjs
│ │ │ └── pdf.worker.min.mjs
│ │ └── manifest.webmanifest
│ ├── NetworkOptimizer.WiFi/
│ │ ├── Analyzers/
│ │ │ └── SiteHealthScorer.cs
│ │ ├── BssidIdentifier.cs
│ │ ├── Data/
│ │ │ ├── AntennaPatternLoader.cs
│ │ │ ├── ApModelCatalog.cs
│ │ │ ├── MaterialAttenuation.cs
│ │ │ └── MountTypeHelper.cs
│ │ ├── Helpers/
│ │ │ ├── ChannelSpanHelper.cs
│ │ │ └── SignalClassification.cs
│ │ ├── IWiFiDataProvider.cs
│ │ ├── Models/
│ │ │ ├── AccessPointSnapshot.cs
│ │ │ ├── ChannelRecommendation.cs
│ │ │ ├── ChannelScanResult.cs
│ │ │ ├── ClientConnectionEvent.cs
│ │ │ ├── PropagationModels.cs
│ │ │ ├── RegulatoryChannelData.cs
│ │ │ ├── RoamingEvent.cs
│ │ │ ├── RoamingTopology.cs
│ │ │ ├── WiFiMetrics.cs
│ │ │ ├── WirelessClientSnapshot.cs
│ │ │ └── WlanConfiguration.cs
│ │ ├── NetworkOptimizer.WiFi.csproj
│ │ ├── Providers/
│ │ │ └── UniFiLiveDataProvider.cs
│ │ ├── Rules/
│ │ │ ├── BandSteeringRule.cs
│ │ │ ├── CoChannelInterferenceRule.cs
│ │ │ ├── CoverageGapRule.cs
│ │ │ ├── DhcpIssuesRule.cs
│ │ │ ├── High2GHzConcentrationRule.cs
│ │ │ ├── HighApLoadRule.cs
│ │ │ ├── HighPowerOverlapRule.cs
│ │ │ ├── HighPowerRule.cs
│ │ │ ├── HighRadioUtilizationRule.cs
│ │ │ ├── HighTxRetryRule.cs
│ │ │ ├── IWiFiOptimizerRule.cs
│ │ │ ├── IoTSsidSeparationRule.cs
│ │ │ ├── LegacyClientAirtimeRule.cs
│ │ │ ├── LoadImbalanceRule.cs
│ │ │ ├── MinRssiEnabledRule.cs
│ │ │ ├── MinRssiRule.cs
│ │ │ ├── MinimumDataRatesRule.cs
│ │ │ ├── NonStandardChannelRule.cs
│ │ │ ├── RoamingAssistantRule.cs
│ │ │ ├── TxPowerVariationRule.cs
│ │ │ ├── WeakSignalPopulationRule.cs
│ │ │ ├── WiFiOptimizerContext.cs
│ │ │ ├── WiFiOptimizerEngine.cs
│ │ │ └── WideChannelWidthRule.cs
│ │ ├── Services/
│ │ │ ├── ChannelRecommendationService.cs
│ │ │ └── PropagationService.cs
│ │ ├── SiteHealthScore.cs
│ │ └── WiFiAnalysisHelpers.cs
│ ├── OpenSpeedTest/
│ │ ├── .gitignore
│ │ ├── ATTRIBUTION.md
│ │ ├── License.md
│ │ ├── README.md
│ │ ├── assets/
│ │ │ ├── css/
│ │ │ │ ├── app.css
│ │ │ │ ├── darkmode.css
│ │ │ │ └── ozark-overrides.css
│ │ │ ├── images/
│ │ │ │ └── icons/
│ │ │ │ ├── browserconfig.xml
│ │ │ │ └── site.webmanifest
│ │ │ └── js/
│ │ │ ├── app-2.5.4.js
│ │ │ ├── config.js
│ │ │ ├── darkmode.js
│ │ │ └── geolocation.js
│ │ ├── downloading
│ │ ├── hosted.html
│ │ ├── index.html
│ │ └── upload
│ ├── cfspeedtest/
│ │ ├── .gitignore
│ │ ├── Makefile
│ │ ├── go.mod
│ │ ├── main.go
│ │ └── speedtest/
│ │ ├── latency.go
│ │ ├── metadata.go
│ │ ├── servertiming.go
│ │ ├── sockopt_unix.go
│ │ ├── sockopt_windows.go
│ │ ├── throughput.go
│ │ ├── transport.go
│ │ └── types.go
│ ├── uwnspeedtest/
│ │ ├── Makefile
│ │ ├── go.mod
│ │ ├── main.go
│ │ └── uwn/
│ │ ├── discovery.go
│ │ ├── latency.go
│ │ ├── throughput.go
│ │ └── types.go
│ └── wansteer/
│ ├── Makefile
│ ├── config.go
│ ├── config.sample.json
│ ├── go.mod
│ ├── health.go
│ ├── main.go
│ ├── rules.go
│ ├── status.go
│ └── wansteer_test.go
└── tests/
├── Directory.Build.props
├── FluentAssertionsLicense.cs
├── NetworkOptimizer.Agents.Tests/
│ ├── DeploymentResultTests.cs
│ ├── NetworkOptimizer.Agents.Tests.csproj
│ └── ScriptRendererTests.cs
├── NetworkOptimizer.Alerts.Tests/
│ ├── AlertCooldownTrackerTests.cs
│ ├── AlertCorrelationServiceTests.cs
│ ├── AlertEventBusTests.cs
│ ├── AlertRuleEvaluatorTests.cs
│ ├── Delivery/
│ │ ├── NtfyDeliveryChannelTests.cs
│ │ └── WebhookDeliveryChannelTests.cs
│ ├── NetworkOptimizer.Alerts.Tests.csproj
│ └── ScheduleCalculationTests.cs
├── NetworkOptimizer.Audit.Tests/
│ ├── Analyzers/
│ │ ├── FirewallGroupHelperTests.cs
│ │ ├── FirewallRuleAnalyzerTests.cs
│ │ ├── FirewallRuleEvaluatorTests.cs
│ │ ├── FirewallRuleOverlapDetectorTests.cs
│ │ ├── FirewallRuleParserTests.cs
│ │ ├── HttpAppIdsTests.cs
│ │ ├── PortProfileResolutionTests.cs
│ │ ├── PortSecurityAnalyzerTests.cs
│ │ ├── ProtectCameraFallbackTests.cs
│ │ ├── UpnpSecurityAnalyzerTests.cs
│ │ └── VlanAnalyzerTests.cs
│ ├── AuditScorerTests.cs
│ ├── ConfigAuditEngineTests.cs
│ ├── Constants/
│ │ └── DetectionConstantsTests.cs
│ ├── DeviceNameHintsTests.cs
│ ├── Dns/
│ │ ├── DnatDnsAnalyzerTests.cs
│ │ ├── DnsAppIdsTests.cs
│ │ ├── DnsSecurityAnalyzerTests.cs
│ │ ├── DnsStampDecoderTests.cs
│ │ ├── DohProviderRegistryTests.cs
│ │ └── ThirdPartyDnsDetectorTests.cs
│ ├── Models/
│ │ ├── AuditRequestTests.cs
│ │ ├── ClientInfoDisplayNameTests.cs
│ │ ├── DeviceAllowanceSettingsTests.cs
│ │ ├── FirewallActionTests.cs
│ │ ├── FirewallRuleTests.cs
│ │ └── NetworkPurposeExtensionsTests.cs
│ ├── NetworkOptimizer.Audit.Tests.csproj
│ ├── Rules/
│ │ ├── AccessPortVlanRuleTests.cs
│ │ ├── AuditRuleBaseTests.cs
│ │ ├── CameraVlanRuleTests.cs
│ │ ├── FirewallAnyAnyRuleTests.cs
│ │ ├── IotVlanRuleTests.cs
│ │ ├── MacRestrictionRuleTests.cs
│ │ ├── PortIsolationRuleTests.cs
│ │ ├── PortNameHelperTests.cs
│ │ ├── UnusedPortRuleTests.cs
│ │ ├── VlanPlacementCheckerTests.cs
│ │ ├── VlanSubnetMismatchRuleTests.cs
│ │ ├── WiredSubnetMismatchRuleTests.cs
│ │ ├── WirelessCameraVlanRuleTests.cs
│ │ └── WirelessIotVlanRuleTests.cs
│ ├── Services/
│ │ ├── DeviceTypeDetectionServiceTests.cs
│ │ ├── FingerprintDetectorTests.cs
│ │ ├── FirewallZoneLookupTests.cs
│ │ └── MacOuiDetectorTests.cs
│ └── xunit.runner.json
├── NetworkOptimizer.Core.Tests/
│ ├── Caching/
│ │ └── AsyncCachedValueTests.cs
│ ├── Extensions/
│ │ └── ServiceProviderExtensionsTests.cs
│ ├── Helpers/
│ │ ├── CloudflareIpRangesTests.cs
│ │ ├── DisplayFormattersTests.cs
│ │ └── NetworkUtilitiesTests.cs
│ └── NetworkOptimizer.Core.Tests.csproj
├── NetworkOptimizer.Diagnostics.Tests/
│ ├── Analyzers/
│ │ ├── ApLockAnalyzerTests.cs
│ │ ├── PerformanceAnalyzerTests.cs
│ │ ├── PortProfile8021xAnalyzerTests.cs
│ │ ├── PortProfileSuggestionAnalyzerTests.cs
│ │ └── TrunkConsistencyAnalyzerTests.cs
│ ├── DiagnosticsEngineTests.cs
│ ├── NetworkOptimizer.Diagnostics.Tests.csproj
│ └── xunit.runner.json
├── NetworkOptimizer.Monitoring.Tests/
│ ├── AlertEngineTests.cs
│ ├── AlertThresholdTests.cs
│ ├── CellularModemStatsTests.cs
│ ├── DeviceMetricsTests.cs
│ ├── InterfaceMetricsTests.cs
│ ├── MetricsAggregatorTests.cs
│ ├── NetworkOptimizer.Monitoring.Tests.csproj
│ ├── QmicliParserTests.cs
│ └── SnmpConfigurationTests.cs
├── NetworkOptimizer.Reports.Tests/
│ ├── BrandingOptionsTests.cs
│ ├── NetworkOptimizer.Reports.Tests.csproj
│ └── ReportDataTests.cs
├── NetworkOptimizer.Sqm.Tests/
│ ├── BaselineCalculatorTests.cs
│ ├── InputSanitizerTests.cs
│ ├── LatencyMonitorTests.cs
│ ├── NetworkOptimizer.Sqm.Tests.csproj
│ ├── ScriptGeneratorTests.cs
│ ├── SpeedtestIntegrationTests.cs
│ ├── SqmManagerTests.cs
│ └── WanInterfaceExtractionTests.cs
├── NetworkOptimizer.Storage.Tests/
│ ├── AgentRepositoryTests.cs
│ ├── AuditRepositoryTests.cs
│ ├── CredentialProtectionServiceTests.cs
│ ├── ModemRepositoryTests.cs
│ ├── NetworkOptimizer.Storage.Tests.csproj
│ ├── SettingsRepositoryTests.cs
│ ├── SpeedTestRepositoryTests.cs
│ ├── SqmRepositoryTests.cs
│ ├── UniFiRepositoryTests.cs
│ └── WanDataUsageServiceTests.cs
├── NetworkOptimizer.Threats.Tests/
│ ├── BruteForceDetectorTests.cs
│ ├── CrowdSecClientTests.cs
│ ├── DDoSDetectorTests.cs
│ ├── ExploitCampaignDetectorTests.cs
│ ├── ExposureValidatorTests.cs
│ ├── FlowInterestFilterTests.cs
│ ├── KillChainClassifierTests.cs
│ ├── NetworkOptimizer.Threats.Tests.csproj
│ ├── ScanSweepDetectorTests.cs
│ ├── ThreatEventNormalizerTests.cs
│ └── ThreatPatternAnalyzerTests.cs
├── NetworkOptimizer.UniFi.Tests/
│ ├── CgnatIpDetectionTests.cs
│ ├── ClientIpEnricherTests.cs
│ ├── DaisyChainPathTests.cs
│ ├── DeviceTypeClassificationTests.cs
│ ├── DiscoveredClientTests.cs
│ ├── Fixtures/
│ │ ├── NetworkTestData.cs
│ │ └── TopologyBuilder.cs
│ ├── GatewayApExclusionTests.cs
│ ├── LagSpeedTests.cs
│ ├── NetworkOptimizer.UniFi.Tests.csproj
│ ├── NetworkPathAnalyzerIntegrationTests.cs
│ ├── NetworkPathAnalyzerTests.cs
│ ├── NetworkPathTests.cs
│ ├── PathAnalysisResultTests.cs
│ ├── PathTrace/
│ │ └── BuildHopListTests.cs
│ ├── RadioFormatHelperTests.cs
│ ├── SnapshotIntegrationTests.cs
│ ├── UniFiClientResponseTests.cs
│ ├── UniFiFingerprintDatabaseTests.cs
│ ├── UniFiFirewallZoneTests.cs
│ ├── UniFiNetworkConfigTests.cs
│ ├── UniFiProductDatabaseTests.cs
│ ├── UniFiWlanConfigTests.cs
│ └── WirelessRateSnapshotTests.cs
├── NetworkOptimizer.Web.Tests/
│ ├── NetworkOptimizer.Web.Tests.csproj
│ ├── WanSteerDeploymentServiceTests.cs
│ └── WanSteerValidationTests.cs
└── NetworkOptimizer.WiFi.Tests/
├── BssidIdentifierTests.cs
├── ChannelRecommendationServiceTests.cs
├── ChannelSpanHelperTests.cs
├── CoChannelInterferenceRuleTests.cs
├── CoverageGapRuleTests.cs
├── LoadImbalanceRuleTests.cs
├── NetworkOptimizer.WiFi.Tests.csproj
├── PropagationInterferenceTests.cs
├── RegulatoryChannelDataTests.cs
├── SignalClassificationTests.cs
└── WiFiAnalysisHelpersTests.cs
================================================
FILE CONTENTS
================================================
================================================
FILE: .dockerignore
================================================
# Git
.git
.gitignore
.gitattributes
# IDE
.vs
.vscode
.idea
*.user
*.suo
# Build outputs
**/bin
**/obj
**/out
# Test results
**/TestResults
**/coverage
# Temporary files
**/tmp
**/temp
*.tmp
*.log
# OS files
.DS_Store
Thumbs.db
# Node (if any frontend build tools)
**/node_modules
# Docker
docker/data
docker/logs
docker/ssh-keys
.env
*.env.local
# Documentation build
docs/_site
# Secrets (never include)
*.pfx
*.key
*.pem
credentials.json
secrets.json
================================================
FILE: .editorconfig
================================================
# EditorConfig for NetworkOptimizer
# https://editorconfig.org
root = true
[*]
indent_style = space
indent_size = 4
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.{cs,csx}]
# Organize usings
dotnet_sort_system_directives_first = true
dotnet_separate_import_directive_groups = false
# Remove unused usings
dotnet_diagnostic.IDE0005.severity = warning
# Namespace preferences
csharp_style_namespace_declarations = file_scoped:suggestion
# var preferences
csharp_style_var_for_built_in_types = false:suggestion
csharp_style_var_when_type_is_apparent = true:suggestion
csharp_style_var_elsewhere = true:suggestion
# Expression-bodied members
csharp_style_expression_bodied_methods = when_on_single_line:suggestion
csharp_style_expression_bodied_constructors = false:suggestion
csharp_style_expression_bodied_properties = true:suggestion
csharp_style_expression_bodied_accessors = true:suggestion
csharp_style_expression_bodied_lambdas = true:suggestion
# Null checking
csharp_style_throw_expression = true:suggestion
csharp_style_conditional_delegate_call = true:suggestion
# Braces
csharp_prefer_braces = true:suggestion
# New line preferences
csharp_new_line_before_open_brace = all
csharp_new_line_before_else = true
csharp_new_line_before_catch = true
csharp_new_line_before_finally = true
[*.{json,yml,yaml}]
indent_size = 2
[*.md]
trim_trailing_whitespace = false
[Makefile]
indent_style = tab
================================================
FILE: .github/FUNDING.yml
================================================
github: tvancott42
ko_fi: tjtuna42
================================================
FILE: .github/workflows/ci.yml
================================================
name: CI
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: '10.0.200'
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
cache: false
- name: Build cfspeedtest
working-directory: src/cfspeedtest
run: go build -trimpath ./...
- name: Build cfspeedtest (arm64 cross-compile)
working-directory: src/cfspeedtest
run: GOOS=linux GOARCH=arm64 go build -trimpath ./...
- name: Build uwnspeedtest
working-directory: src/uwnspeedtest
run: go build -trimpath ./...
- name: Build uwnspeedtest (arm64 cross-compile)
working-directory: src/uwnspeedtest
run: GOOS=linux GOARCH=arm64 go build -trimpath ./...
- name: Build wansteer
working-directory: src/wansteer
run: go build -trimpath ./...
- name: Build wansteer (arm64 cross-compile)
working-directory: src/wansteer
run: GOOS=linux GOARCH=arm64 go build -trimpath ./...
- name: Test wansteer
working-directory: src/wansteer
run: go test ./...
- name: Restore dependencies
run: dotnet restore
- name: Build
run: dotnet build --no-restore --configuration Release
- name: Test
run: dotnet test --no-build --configuration Release --verbosity normal
env:
FLUENT_ASSERTIONS_LICENSED: ${{ secrets.FLUENT_ASSERTIONS_LICENSED }}
================================================
FILE: .github/workflows/release.yml
================================================
name: Release
on:
push:
tags:
- 'v*'
env:
REGISTRY: ghcr.io
IMAGE_NAME: ozark-connect/network-optimizer
SPEEDTEST_IMAGE_NAME: ozark-connect/speedtest
jobs:
build:
strategy:
fail-fast: true
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
- platform: linux/arm64
runner: ubuntu-24.04-arm
runs-on: ${{ matrix.runner }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v4
- name: Set platform pair
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Extract version from tag
id: version
run: echo "version=${GITHUB_REF_NAME#v}" >> $GITHUB_OUTPUT
- name: Build and push by digest
id: build
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/Dockerfile
platforms: ${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
VERSION=${{ steps.version.outputs.version }}
outputs: type=image,name=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: type=gha,scope=build-${{ env.PLATFORM_PAIR }}
cache-to: type=gha,scope=build-${{ env.PLATFORM_PAIR }},mode=max
- name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM_PAIR }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
merge:
needs: build
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v4
- name: Download digests
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=raw,value=latest,enable={{is_default_branch}}
- name: Create manifest list and push
working-directory: /tmp/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@sha256:%s ' *)
- name: Inspect image
run: |
docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.meta.outputs.version }}
# Speedtest image - trivial build (copy files into nginx:alpine), QEMU is fine
- name: Extract metadata for speedtest image
id: speedtest-meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.SPEEDTEST_IMAGE_NAME }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push speedtest image
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/openspeedtest/Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.speedtest-meta.outputs.tags }}
labels: ${{ steps.speedtest-meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
publish-release:
needs: merge
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Publish draft release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Find draft release matching this tag and publish it
gh release edit ${{ github.ref_name }} --draft=false --repo ${{ github.repository }} || echo "No draft release found for ${{ github.ref_name }}"
================================================
FILE: .gitignore
================================================
# Build outputs
bin/
obj/
out/
publish/
# Go build artifacts
src/uwnspeedtest/uwnspeedtest*
src/cfspeedtest/cfspeedtest*
# IDE
.vs/
.vscode/
*.user
*.suo
# .NET
*.dll
*.pdb
*.cache
# Docker data
docker/data/
docker/logs/
# Per-host compose overrides (local customizations not checked in)
docker/docker-compose.override.yml
# Secrets
*.env
!*.env.example
docker/.env
ssh-keys/
# OS files
.DS_Store
Thumbs.db
nul
# Temp files
*.tmp
*.log
# Internal development notes
CLAUDE.md
.claude/
tmpclaude-*
plans/
# Local research work / scratch
research/
code-review/
# Local development scripts
scripts/local-dev/
# Archived code
archive/
# Coverage reports
coverage/
TestResults/
# Backup
backup/
================================================
FILE: Directory.Build.props
================================================
v
================================================
FILE: LICENSE
================================================
License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved.
"Business Source License" is a trademark of MariaDB Corporation Ab.
Parameters
Licensor: Ozark Connect
Licensed Work: Network Optimizer for UniFi. The Licensed Work is (c) 2026
Ozark Connect.
Additional Use Grant: You may make production use of the Licensed Work for personal,
non-commercial purposes on up to three sites.
Commercial use, including but not limited to use by managed
service providers (MSPs), network installers, or any entity
using the Licensed Work in the delivery of paid services to
clients, requires a separate commercial license from the
Licensor.
For commercial licensing inquiries, please contact
tj@ozarkconnect.net.
Change Date: 2028-01-01
Change License: Apache License 2.0
For information about alternative licensing arrangements for the Licensed Work,
please contact tj@ozarkconnect.net.
Notice
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
================================================
FILE: NetworkOptimizer.sln
================================================
Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 17
VisualStudioVersion = 17.0.31903.59
MinimumVisualStudioVersion = 10.0.40219.1
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Core", "src\NetworkOptimizer.Core\NetworkOptimizer.Core.csproj", "{A1B2C3D4-0001-0001-0001-000000000001}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Storage", "src\NetworkOptimizer.Storage\NetworkOptimizer.Storage.csproj", "{A1B2C3D4-0002-0002-0002-000000000002}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.UniFi", "src\NetworkOptimizer.UniFi\NetworkOptimizer.UniFi.csproj", "{A1B2C3D4-0003-0003-0003-000000000003}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Audit", "src\NetworkOptimizer.Audit\NetworkOptimizer.Audit.csproj", "{A1B2C3D4-0004-0004-0004-000000000004}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Sqm", "src\NetworkOptimizer.Sqm\NetworkOptimizer.Sqm.csproj", "{A1B2C3D4-0005-0005-0005-000000000005}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Monitoring", "src\NetworkOptimizer.Monitoring\NetworkOptimizer.Monitoring.csproj", "{A1B2C3D4-0006-0006-0006-000000000006}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Agents", "src\NetworkOptimizer.Agents\NetworkOptimizer.Agents.csproj", "{A1B2C3D4-0007-0007-0007-000000000007}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Reports", "src\NetworkOptimizer.Reports\NetworkOptimizer.Reports.csproj", "{A1B2C3D4-0008-0008-0008-000000000008}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Web", "src\NetworkOptimizer.Web\NetworkOptimizer.Web.csproj", "{A1B2C3D4-0009-0009-0009-000000000009}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tests", "tests", "{5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Audit.Tests", "tests\NetworkOptimizer.Audit.Tests\NetworkOptimizer.Audit.Tests.csproj", "{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Storage.Tests", "tests\NetworkOptimizer.Storage.Tests\NetworkOptimizer.Storage.Tests.csproj", "{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Monitoring.Tests", "tests\NetworkOptimizer.Monitoring.Tests\NetworkOptimizer.Monitoring.Tests.csproj", "{6E45D264-3A7D-40EB-9B5E-C1685212B561}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.UniFi.Tests", "tests\NetworkOptimizer.UniFi.Tests\NetworkOptimizer.UniFi.Tests.csproj", "{6BCA4A03-EC08-48D5-9789-0F23C416B062}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{827E0CD3-B72D-47B6-A68D-7590B98EB39B}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Core.Tests", "tests\NetworkOptimizer.Core.Tests\NetworkOptimizer.Core.Tests.csproj", "{D24105B5-B804-4E55-9064-98179F6DFBF2}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Sqm.Tests", "tests\NetworkOptimizer.Sqm.Tests\NetworkOptimizer.Sqm.Tests.csproj", "{E8182317-73B2-4196-B628-4747C11A238D}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Agents.Tests", "tests\NetworkOptimizer.Agents.Tests\NetworkOptimizer.Agents.Tests.csproj", "{E4902895-D017-4B52-B024-53F9FC237CF5}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Reports.Tests", "tests\NetworkOptimizer.Reports.Tests\NetworkOptimizer.Reports.Tests.csproj", "{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Diagnostics", "src\NetworkOptimizer.Diagnostics\NetworkOptimizer.Diagnostics.csproj", "{58377D73-D053-4EF0-99B2-14F6E9547ED4}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Diagnostics.Tests", "tests\NetworkOptimizer.Diagnostics.Tests\NetworkOptimizer.Diagnostics.Tests.csproj", "{9F192F42-4B9A-49F3-99E9-273298D5AC93}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.WiFi", "src\NetworkOptimizer.WiFi\NetworkOptimizer.WiFi.csproj", "{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.WiFi.Tests", "tests\NetworkOptimizer.WiFi.Tests\NetworkOptimizer.WiFi.Tests.csproj", "{EEF0B083-6131-4C4E-96AD-FC9EA571E941}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Alerts", "src\NetworkOptimizer.Alerts\NetworkOptimizer.Alerts.csproj", "{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Alerts.Tests", "tests\NetworkOptimizer.Alerts.Tests\NetworkOptimizer.Alerts.Tests.csproj", "{45AED52D-E4D4-40FE-B310-433B93853F1C}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Threats", "src\NetworkOptimizer.Threats\NetworkOptimizer.Threats.csproj", "{D23999B0-B2F7-4DD9-AA35-09F385E36726}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Threats.Tests", "tests\NetworkOptimizer.Threats.Tests\NetworkOptimizer.Threats.Tests.csproj", "{AC78B418-5216-49F6-9084-BB4A0241A2DA}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NetworkOptimizer.Web.Tests", "tests\NetworkOptimizer.Web.Tests\NetworkOptimizer.Web.Tests.csproj", "{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Release|Any CPU = Release|Any CPU
Release|x64 = Release|x64
Release|x86 = Release|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{A1B2C3D4-0001-0001-0001-000000000001}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Debug|x64.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Debug|x64.Build.0 = Debug|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Debug|x86.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Debug|x86.Build.0 = Debug|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Release|Any CPU.Build.0 = Release|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Release|x64.ActiveCfg = Release|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Release|x64.Build.0 = Release|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Release|x86.ActiveCfg = Release|Any CPU
{A1B2C3D4-0001-0001-0001-000000000001}.Release|x86.Build.0 = Release|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Debug|x64.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Debug|x64.Build.0 = Debug|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Debug|x86.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Debug|x86.Build.0 = Debug|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Release|Any CPU.Build.0 = Release|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Release|x64.ActiveCfg = Release|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Release|x64.Build.0 = Release|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Release|x86.ActiveCfg = Release|Any CPU
{A1B2C3D4-0002-0002-0002-000000000002}.Release|x86.Build.0 = Release|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Debug|x64.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Debug|x64.Build.0 = Debug|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Debug|x86.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Debug|x86.Build.0 = Debug|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Release|Any CPU.Build.0 = Release|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Release|x64.ActiveCfg = Release|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Release|x64.Build.0 = Release|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Release|x86.ActiveCfg = Release|Any CPU
{A1B2C3D4-0003-0003-0003-000000000003}.Release|x86.Build.0 = Release|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Debug|x64.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Debug|x64.Build.0 = Debug|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Debug|x86.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Debug|x86.Build.0 = Debug|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Release|Any CPU.Build.0 = Release|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Release|x64.ActiveCfg = Release|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Release|x64.Build.0 = Release|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Release|x86.ActiveCfg = Release|Any CPU
{A1B2C3D4-0004-0004-0004-000000000004}.Release|x86.Build.0 = Release|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Debug|x64.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Debug|x64.Build.0 = Debug|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Debug|x86.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Debug|x86.Build.0 = Debug|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Release|Any CPU.Build.0 = Release|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Release|x64.ActiveCfg = Release|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Release|x64.Build.0 = Release|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Release|x86.ActiveCfg = Release|Any CPU
{A1B2C3D4-0005-0005-0005-000000000005}.Release|x86.Build.0 = Release|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Debug|x64.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Debug|x64.Build.0 = Debug|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Debug|x86.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Debug|x86.Build.0 = Debug|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Release|Any CPU.Build.0 = Release|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Release|x64.ActiveCfg = Release|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Release|x64.Build.0 = Release|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Release|x86.ActiveCfg = Release|Any CPU
{A1B2C3D4-0006-0006-0006-000000000006}.Release|x86.Build.0 = Release|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Debug|x64.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Debug|x64.Build.0 = Debug|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Debug|x86.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Debug|x86.Build.0 = Debug|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Release|Any CPU.Build.0 = Release|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Release|x64.ActiveCfg = Release|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Release|x64.Build.0 = Release|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Release|x86.ActiveCfg = Release|Any CPU
{A1B2C3D4-0007-0007-0007-000000000007}.Release|x86.Build.0 = Release|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Debug|x64.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Debug|x64.Build.0 = Debug|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Debug|x86.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Debug|x86.Build.0 = Debug|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Release|Any CPU.Build.0 = Release|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Release|x64.ActiveCfg = Release|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Release|x64.Build.0 = Release|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Release|x86.ActiveCfg = Release|Any CPU
{A1B2C3D4-0008-0008-0008-000000000008}.Release|x86.Build.0 = Release|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Debug|x64.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Debug|x64.Build.0 = Debug|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Debug|x86.ActiveCfg = Debug|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Debug|x86.Build.0 = Debug|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Release|Any CPU.Build.0 = Release|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Release|x64.ActiveCfg = Release|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Release|x64.Build.0 = Release|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Release|x86.ActiveCfg = Release|Any CPU
{A1B2C3D4-0009-0009-0009-000000000009}.Release|x86.Build.0 = Release|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Debug|Any CPU.Build.0 = Debug|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Debug|x64.ActiveCfg = Debug|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Debug|x64.Build.0 = Debug|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Debug|x86.ActiveCfg = Debug|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Debug|x86.Build.0 = Debug|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Release|Any CPU.ActiveCfg = Release|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Release|Any CPU.Build.0 = Release|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Release|x64.ActiveCfg = Release|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Release|x64.Build.0 = Release|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Release|x86.ActiveCfg = Release|Any CPU
{0F787DF2-4792-43F8-89F8-1DA862AD9FE6}.Release|x86.Build.0 = Release|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Debug|Any CPU.Build.0 = Debug|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Debug|x64.ActiveCfg = Debug|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Debug|x64.Build.0 = Debug|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Debug|x86.ActiveCfg = Debug|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Debug|x86.Build.0 = Debug|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Release|Any CPU.ActiveCfg = Release|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Release|Any CPU.Build.0 = Release|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Release|x64.ActiveCfg = Release|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Release|x64.Build.0 = Release|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Release|x86.ActiveCfg = Release|Any CPU
{5315FA3C-19CC-41FE-BF3E-3E20351AB9BF}.Release|x86.Build.0 = Release|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Debug|Any CPU.Build.0 = Debug|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Debug|x64.ActiveCfg = Debug|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Debug|x64.Build.0 = Debug|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Debug|x86.ActiveCfg = Debug|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Debug|x86.Build.0 = Debug|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Release|Any CPU.ActiveCfg = Release|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Release|Any CPU.Build.0 = Release|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Release|x64.ActiveCfg = Release|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Release|x64.Build.0 = Release|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Release|x86.ActiveCfg = Release|Any CPU
{6E45D264-3A7D-40EB-9B5E-C1685212B561}.Release|x86.Build.0 = Release|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Debug|Any CPU.Build.0 = Debug|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Debug|x64.ActiveCfg = Debug|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Debug|x64.Build.0 = Debug|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Debug|x86.ActiveCfg = Debug|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Debug|x86.Build.0 = Debug|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Release|Any CPU.ActiveCfg = Release|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Release|Any CPU.Build.0 = Release|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Release|x64.ActiveCfg = Release|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Release|x64.Build.0 = Release|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Release|x86.ActiveCfg = Release|Any CPU
{6BCA4A03-EC08-48D5-9789-0F23C416B062}.Release|x86.Build.0 = Release|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Debug|Any CPU.Build.0 = Debug|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Debug|x64.ActiveCfg = Debug|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Debug|x64.Build.0 = Debug|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Debug|x86.ActiveCfg = Debug|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Debug|x86.Build.0 = Debug|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Release|Any CPU.ActiveCfg = Release|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Release|Any CPU.Build.0 = Release|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Release|x64.ActiveCfg = Release|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Release|x64.Build.0 = Release|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Release|x86.ActiveCfg = Release|Any CPU
{D24105B5-B804-4E55-9064-98179F6DFBF2}.Release|x86.Build.0 = Release|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Debug|Any CPU.Build.0 = Debug|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Debug|x64.ActiveCfg = Debug|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Debug|x64.Build.0 = Debug|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Debug|x86.ActiveCfg = Debug|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Debug|x86.Build.0 = Debug|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Release|Any CPU.ActiveCfg = Release|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Release|Any CPU.Build.0 = Release|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Release|x64.ActiveCfg = Release|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Release|x64.Build.0 = Release|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Release|x86.ActiveCfg = Release|Any CPU
{E8182317-73B2-4196-B628-4747C11A238D}.Release|x86.Build.0 = Release|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Debug|Any CPU.Build.0 = Debug|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Debug|x64.ActiveCfg = Debug|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Debug|x64.Build.0 = Debug|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Debug|x86.ActiveCfg = Debug|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Debug|x86.Build.0 = Debug|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Release|Any CPU.ActiveCfg = Release|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Release|Any CPU.Build.0 = Release|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Release|x64.ActiveCfg = Release|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Release|x64.Build.0 = Release|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Release|x86.ActiveCfg = Release|Any CPU
{E4902895-D017-4B52-B024-53F9FC237CF5}.Release|x86.Build.0 = Release|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Debug|x64.ActiveCfg = Debug|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Debug|x64.Build.0 = Debug|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Debug|x86.ActiveCfg = Debug|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Debug|x86.Build.0 = Debug|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Release|Any CPU.ActiveCfg = Release|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Release|Any CPU.Build.0 = Release|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Release|x64.ActiveCfg = Release|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Release|x64.Build.0 = Release|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Release|x86.ActiveCfg = Release|Any CPU
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B}.Release|x86.Build.0 = Release|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Debug|Any CPU.Build.0 = Debug|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Debug|x64.ActiveCfg = Debug|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Debug|x64.Build.0 = Debug|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Debug|x86.ActiveCfg = Debug|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Debug|x86.Build.0 = Debug|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Release|Any CPU.ActiveCfg = Release|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Release|Any CPU.Build.0 = Release|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Release|x64.ActiveCfg = Release|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Release|x64.Build.0 = Release|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Release|x86.ActiveCfg = Release|Any CPU
{58377D73-D053-4EF0-99B2-14F6E9547ED4}.Release|x86.Build.0 = Release|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Debug|Any CPU.Build.0 = Debug|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Debug|x64.ActiveCfg = Debug|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Debug|x64.Build.0 = Debug|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Debug|x86.ActiveCfg = Debug|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Debug|x86.Build.0 = Debug|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Release|Any CPU.ActiveCfg = Release|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Release|Any CPU.Build.0 = Release|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Release|x64.ActiveCfg = Release|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Release|x64.Build.0 = Release|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Release|x86.ActiveCfg = Release|Any CPU
{9F192F42-4B9A-49F3-99E9-273298D5AC93}.Release|x86.Build.0 = Release|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Debug|Any CPU.Build.0 = Debug|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Debug|x64.ActiveCfg = Debug|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Debug|x64.Build.0 = Debug|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Debug|x86.ActiveCfg = Debug|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Debug|x86.Build.0 = Debug|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Release|Any CPU.ActiveCfg = Release|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Release|Any CPU.Build.0 = Release|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Release|x64.ActiveCfg = Release|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Release|x64.Build.0 = Release|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Release|x86.ActiveCfg = Release|Any CPU
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E}.Release|x86.Build.0 = Release|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Debug|Any CPU.Build.0 = Debug|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Debug|x64.ActiveCfg = Debug|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Debug|x64.Build.0 = Debug|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Debug|x86.ActiveCfg = Debug|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Debug|x86.Build.0 = Debug|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Release|Any CPU.ActiveCfg = Release|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Release|Any CPU.Build.0 = Release|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Release|x64.ActiveCfg = Release|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Release|x64.Build.0 = Release|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Release|x86.ActiveCfg = Release|Any CPU
{EEF0B083-6131-4C4E-96AD-FC9EA571E941}.Release|x86.Build.0 = Release|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Debug|Any CPU.Build.0 = Debug|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Debug|x64.ActiveCfg = Debug|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Debug|x64.Build.0 = Debug|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Debug|x86.ActiveCfg = Debug|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Debug|x86.Build.0 = Debug|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Release|Any CPU.ActiveCfg = Release|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Release|Any CPU.Build.0 = Release|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Release|x64.ActiveCfg = Release|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Release|x64.Build.0 = Release|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Release|x86.ActiveCfg = Release|Any CPU
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28}.Release|x86.Build.0 = Release|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Debug|Any CPU.Build.0 = Debug|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Debug|x64.ActiveCfg = Debug|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Debug|x64.Build.0 = Debug|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Debug|x86.ActiveCfg = Debug|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Debug|x86.Build.0 = Debug|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Release|Any CPU.ActiveCfg = Release|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Release|Any CPU.Build.0 = Release|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Release|x64.ActiveCfg = Release|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Release|x64.Build.0 = Release|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Release|x86.ActiveCfg = Release|Any CPU
{45AED52D-E4D4-40FE-B310-433B93853F1C}.Release|x86.Build.0 = Release|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Debug|Any CPU.Build.0 = Debug|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Debug|x64.ActiveCfg = Debug|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Debug|x64.Build.0 = Debug|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Debug|x86.ActiveCfg = Debug|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Debug|x86.Build.0 = Debug|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Release|Any CPU.ActiveCfg = Release|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Release|Any CPU.Build.0 = Release|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Release|x64.ActiveCfg = Release|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Release|x64.Build.0 = Release|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Release|x86.ActiveCfg = Release|Any CPU
{D23999B0-B2F7-4DD9-AA35-09F385E36726}.Release|x86.Build.0 = Release|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Debug|Any CPU.Build.0 = Debug|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Debug|x64.ActiveCfg = Debug|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Debug|x64.Build.0 = Debug|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Debug|x86.ActiveCfg = Debug|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Debug|x86.Build.0 = Debug|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Release|Any CPU.ActiveCfg = Release|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Release|Any CPU.Build.0 = Release|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Release|x64.ActiveCfg = Release|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Release|x64.Build.0 = Release|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Release|x86.ActiveCfg = Release|Any CPU
{AC78B418-5216-49F6-9084-BB4A0241A2DA}.Release|x86.Build.0 = Release|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Debug|Any CPU.Build.0 = Debug|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Debug|x64.ActiveCfg = Debug|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Debug|x64.Build.0 = Debug|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Debug|x86.ActiveCfg = Debug|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Debug|x86.Build.0 = Debug|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Release|Any CPU.ActiveCfg = Release|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Release|Any CPU.Build.0 = Release|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Release|x64.ActiveCfg = Release|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Release|x64.Build.0 = Release|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Release|x86.ActiveCfg = Release|Any CPU
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{6BCA4A03-EC08-48D5-9789-0F23C416B062} = {5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}
{D24105B5-B804-4E55-9064-98179F6DFBF2} = {5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}
{E8182317-73B2-4196-B628-4747C11A238D} = {5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}
{E4902895-D017-4B52-B024-53F9FC237CF5} = {5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}
{BF01305D-EC29-40DA-B9E4-B4E29FDB601B} = {5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}
{58377D73-D053-4EF0-99B2-14F6E9547ED4} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B}
{9F192F42-4B9A-49F3-99E9-273298D5AC93} = {5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}
{7E555A86-2585-4D7A-BBB5-E4F71D14FD0E} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B}
{EEF0B083-6131-4C4E-96AD-FC9EA571E941} = {5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}
{EDDEBF6E-19A7-46F4-8BA4-FDFF5F4D5F28} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B}
{45AED52D-E4D4-40FE-B310-433B93853F1C} = {5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}
{D23999B0-B2F7-4DD9-AA35-09F385E36726} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B}
{AC78B418-5216-49F6-9084-BB4A0241A2DA} = {5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}
{EC72C9AD-625C-4AA8-A7CC-744515E06F1E} = {5691A6DD-53B9-4CE0-A3C9-3D4F815E2120}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {F7E6D5C4-B3A2-9180-7F6E-5D4C3B2A1908}
EndGlobalSection
EndGlobal
================================================
FILE: README.md
================================================
# Network Optimizer for UniFi
[](https://github.com/Ozark-Connect/NetworkOptimizer/releases)
[](https://github.com/orgs/Ozark-Connect/packages?repo_name=NetworkOptimizer)
[](https://github.com/Ozark-Connect/NetworkOptimizer/releases)
[](https://github.com/Ozark-Connect/NetworkOptimizer/commits)
[](https://github.com/Ozark-Connect/NetworkOptimizer/stargazers)
[](https://github.com/Ozark-Connect/NetworkOptimizer/blob/main/LICENSE)
## THANK YOU to all of my Sponsors
Genuinely, thank you so much to everybody for taking the time to use Network Optimizer and have it find a place on your network(s). It really means a lot to receive all of the bug reports, feature requests, feedback, support, and donations from everybody. Totally a whole new experience from writing code in a dayjob, and it greatly motivates me to keep on going!
## New: API Key auth to console
Connect to your UniFi Console using an API key instead of username and password. Generated in UniFi Network under Integrations -> Create New API Key. The key is encrypted at rest and never exposed in logs or the UI. Useful for sites where you don't necessarily want to create a Local Admin, or when you're using UniFi Fabrics which no longer lets you create Local Admin users.
## New: WAN Steering
UniFi makes you choose between WAN Failover and Load Balancing, and its Policy-Based Routes can only match by destination IP or domain - not port or protocol. WAN Steering removes both limitations. Keep your primary WAN for responsive, latency-sensitive traffic by default, and selectively load balance bulk traffic - Steam downloads, OS updates, Xbox downloads - across your secondary connections so they're not just sitting idle waiting for a failover event.
Route by source, destination, port, or protocol with full load balancing support. Pin gaming traffic to your fastest link while HTTP/HTTPS flows get split 50/50 across all your WANs. Health-check failover, automatic rule recovery after gateway reprovisioning, and zero impact to gateway performance.
## New: HTTPS Reverse Proxy
Enable HTTPS with automatic Let's Encrypt certificates using the included [Traefik reverse proxy](https://github.com/Ozark-Connect/NetworkOptimizer-Proxy). It forces HTTP/1.1 for speed tests (HTTP/2 multiplexing skews results) while keeping HTTP/2 for the main app. Windows MSI users can enable Traefik as an optional feature during install. HTTPS also unlocks GPS-based tagging on your self-hosted Speed Test and Signal walk test data, since browsers require a secure context for location access.
## New: Threat Intelligence
Your UniFi gateway's IPS is blocking threats all day long, but the UniFi Console buries this data in a flat event log with no context. Threat Intelligence pulls those IPS events and actually analyzes them: who's attacking you, where they're coming from, what they're after, and whether it's random noise or a coordinated effort.
The exposure analysis is where it gets useful. It cross-references your port forwards with actual threat data, so you can see which of your exposed services are getting hammered and from where. Attack sequence detection watches for the same source IP progressing through kill chain stages (reconnaissance to exploitation to post-exploitation) and flags the ones that look like real campaigns rather than drive-by scanning. Geographic and ASN breakdowns show you which countries and networks are generating the most traffic against your infrastructure.
CrowdSec CTI integration adds reputation scoring and MITRE ATT&CK classification to each source IP, so you're not just looking at raw events - you know whether that IP has a history of malicious activity across the broader internet.
## New: Alerts & Scheduling
Set up automated speed tests and security audits on a schedule, and get notified when something goes wrong. The scheduling engine handles recurring WAN and LAN speed tests with configurable frequency and time windows, plus periodic security audits that track your score over time.
Alert rules watch for the things that matter: audit score drops, WAN speed degradation, LAN speed regression against recent baselines, IPS attack chains reaching active exploitation, and scheduled task failures. Each rule has configurable severity thresholds and cooldown periods so you're not drowning in noise. Threshold-based rules (like "alert me when WAN speed drops 40% below the recent average") let you tune sensitivity to your environment.
Delivery channels support email (SMTP with STARTTLS), Discord, Slack, Microsoft Teams, and generic webhooks. Low-priority alerts can be set to digest-only mode so they get bundled into a daily summary instead of pinging you every time your neighbor microwaves lunch and your 2.4 GHz channel gets congested.
## New: Client Performance
A per-device analytics dashboard for any client on your network. Pick a device and get live signal monitoring, speed test history with download/upload trends, latency and jitter charts, network path visualization showing every hop and bottleneck link, and a connection timeline tracking AP roams and disconnects. Walk around with the page open on your phone (over HTTPS) and it builds a GPS-based signal heatmap of your actual coverage. Three tabs - Speed, Signal, and Connection - give you everything you need to troubleshoot why a device is slow or unstable.
---
You've set up VLANs, configured firewall rules, maybe even deployed a Pi-hole for DNS filtering. The UniFi controller gives you all this power, but it never actually tells you whether your configuration is any good. Are your firewall rules doing what you think they're doing? Is that IoT VLAN actually isolated, or did you miss something? When a device bypasses your DNS settings and phones home directly, would you even know?
Network Optimizer answers those questions. It connects to your UniFi controller, analyzes your configuration, and tells you what's working, what's broken, and what you should fix. No more guessing.
## Main Features
### Wi-Fi Optimizer & Signal Map
Site health scoring, RF environment analysis, client stats, roaming tracking, band steering, and airtime fairness across twelve analysis tabs. The Channel Recommendation engine models pairwise AP interference using signal propagation, live RF scan data, and triangulated neighbor networks, then factors in historical channel stress (utilization, interference, TX retries) to find the lowest-interference channel assignment across your entire network. It respects mesh uplink constraints, DFS preferences, and regulatory channel availability, and validates every recommended move against improvement thresholds so it won’t suggest changes that aren’t worth the disruption.
On the client side, you get a sortable, searchable table view with online/offline filtering, per-client signal and roaming history, and band-segmented Wi-Fi generation breakdowns showing exactly where your airtime is going. Environmental correlation heatmaps surface interference patterns by time of day and day of week, and every recommendation includes the specific UniFi Network UI navigation path to apply the change.
Signal Map lets you draw your building layout, place APs, and see a real-time RF propagation heatmap. Supports wall materials (drywall, concrete, glass, etc.), multi-floor buildings with cross-floor signal propagation, and per-AP antenna patterns pulled from your controller. Simulate TX power and antenna mode changes to see how they’d affect coverage before touching your actual config. Add planned APs to simulate coverage before buying or mounting hardware.
### Security Auditing
The audit engine runs 83 security checks across five categories and scores your network 0-100. This isn't a checkbox audit that just confirms you have a firewall; it actually analyzes what your rules do and whether they're doing it correctly.
Firewall analysis catches the subtle stuff: rules that shadow each other, allow rules that subvert your deny rules, allow rules that punch holes through your network isolation. VLAN security checks whether your IoT devices and cameras are actually on the networks you intended (using UniFi fingerprints, MAC OUI lookup, and port naming patterns). DNS security validates your DoH configuration, checks for bypass routes (including DoT, DoQ, and HTTP/3 DoH bypass), and verifies that your WAN interface DNS settings match what you configured. Port security looks at MAC restrictions, port isolation, and whether you've left unused ports enabled. UPnP analysis flags enabled UPnP, exposed privileged ports, and static port forwards you may have forgotten about.
You get a score, a breakdown by severity (critical, recommended, informational), and specific recommendations for each issue. Dismiss false positives if your setup is intentional, export PDF reports for documentation, track your score over time.
### WAN Steering
UniFi's WAN Failover keeps secondary connections idle until your primary goes down. Load Balancing splits everything across all WANs but gives you no control over what goes where. WAN Steering lets you have both: keep your primary WAN as the default for latency-sensitive traffic, and selectively load balance bulk traffic across your secondary connections with full port and protocol matching.
Define traffic classes by source, destination, port, or protocol, assign them to specific WANs or load balance across multiple WANs with configurable weight. A lightweight Go binary on your gateway inserts rules above UniFi's routing table, watches for WAN state changes and reprovisioning events, and recovers automatically. Health-check failover still works as expected - if a WAN goes down, traffic redistributes to healthy links.
### Adaptive SQM
If you're on cable, DSL, or cellular, you know bufferbloat. That lag spike when someone starts a download or joins a video call. SQM fixes it, but setting the bandwidth limits correctly is a guessing game; too high and SQM can't shape traffic effectively, too low and you're leaving speed on the table.
Network Optimizer handles this automatically. It supports dual-WAN with independent configuration per interface, connection profiles tuned for DOCSIS, fiber, wireless, Starlink, and cellular (each has different characteristics that matter). Scheduled speedtests adjust your rates based on actual measured performance. Latency monitoring backs off when congestion appears. One-click deployment pushes the configuration to your UDM or UCG gateway with persistence through reboots.
### WAN Speed Testing
Test your internet connection speed directly from the server. Measures download, upload, latency, loaded latency (bufferbloat detection), and jitter with full history and per-WAN connection tracking. Results are plotted in time-series charts filterable by connection, so you can compare providers and track performance over time across multi-WAN setups.
Also includes a standalone OpenSpeedTest server you can host on a VPS or remote machine, so you can run WAN speed tests against your own private infrastructure instead of relying on third-party speed test services. Configure it in Settings and get a ready-to-copy deploy command - see [External WAN Speed Test Server](docker/DEPLOYMENT.md#external-wan-speed-test-server-optional) in the deployment guide. If you're that kind of nerd.
### LAN Speed Testing
Ever wonder if that new switch is actually delivering 10 gigabit speeds? Or whether the cable run to the shop is the bottleneck?
Network Optimizer runs iperf3 tests between your gateway and network devices, auto-discovers UniFi equipment from your controller, supports custom devices with per-device SSH credentials, auto indexes iperf3 results from tests initiated by other devices against the built in server (if enabled), and correlates results with hop count and infrastructure path, with detailed Wi-Fi stats and link speeds recorded along with UniFi firmware versions.
Test history lets you track performance over time with these relevant data in order to identify and characterize any changes to performance.

### Client Speed Testing
Test LAN speeds from any device without SSH access. Open a browser on your phone, tablet, or laptop and run a speed test; results are automatically recorded with device identification. For CLI users, the bundled iperf3 server accepts client connections and logs results. See [Client Speed Testing](docker/DEPLOYMENT.md#client-speed-testing-optional) in the deployment guide.

With HTTPS enabled, browser tests can collect location data (with permission) to build a Speed / Coverage Map showing real-world performance across your property or campus.

### Cellular Modem Monitoring
If you're running a U-LTE or U5G-Max for backup (or primary) connectivity, you can monitor signal quality from the dashboard: RSRP, RSRQ, SNR, cell tower info, and connection status. Supports multiple modems with easy navigation between them.

### UPnP Inspector
Ever wonder what ports your network is actually exposing to the internet? Your Xbox, Plex server, and smart home devices are all punching holes through your firewall via UPnP, and UniFi doesn't make it easy to see what's going on.
The UPnP Inspector puts it all in one place: every dynamic UPnP mapping and static port forward, grouped by device, with color-coded status so you can see at a glance what's active, what's idle, and what's about to expire. Add notes to remember what each mapping is for (because you will forget). Search and filter when you're hunting for that one port that's causing problems.
### Coming Soon
Cable modem stats (signal levels, uncorrectables, T3/T4 timeouts) for those of you fighting with your ISP about line quality.
## Requirements
- UniFi Console (aka Controller) - UDM, UCG, UDR, CloudKey, or self-hosted UniFi Network Server
- Network access to your UniFi Console API (HTTPS)
Most features work with just API access. SSH is only needed for speed testing and Adaptive SQM:
| Feature | SSH needed? |
|---------|------------|
| Security Audit | No |
| Config Optimizer | No, but Gateway SSH required for upcoming features |
| Wi-Fi Optimizer | No |
| Threat Intelligence | No |
| Alerts & Scheduling | No (schedules speed tests that may require SSH) |
| Client Speed Test | No |
| WAN Speed Test | No, but gateway-based requires Gateway SSH |
| LAN Speed Test | Yes - Gateway SSH and/or Device SSH |
| WAN Steering | Yes - Gateway SSH |
| Adaptive SQM | Yes - Gateway SSH |
To enable SSH, see [SSH Configuration](docker/DEPLOYMENT.md#unifi-ssh-configuration) in the Deployment Guide. SSH must be configured via the UniFi web interface (not the mobile app).
## Installation
| Platform | Method | Guide |
|----------|--------|-------|
| Linux Server | Docker (recommended) | [Deployment Guide](docker/DEPLOYMENT.md#1-linux--docker-recommended) |
| Proxmox VE | LXC one-liner | [Proxmox Guide](scripts/proxmox/README.md) |
| Synology/QNAP/Unraid | Docker | [NAS Deployment](docker/DEPLOYMENT.md#3-nas-deployment-docker) |
| Home Assistant | Add-ons | [Home Assistant](docker/DEPLOYMENT.md#5-home-assistant) |
| Windows | Installer (recommended) | [Download from Releases](https://github.com/Ozark-Connect/NetworkOptimizer/releases) |
| macOS | Native (best performance) | [macOS Installation](docs/MACOS-INSTALLATION.md) |
| Linux | Native (no Docker) | [Linux Native](docker/NATIVE-DEPLOYMENT.md#linux-deployment) |
Docker Desktop on macOS and Windows limits network throughput for speed testing. For accurate multi-gigabit measurements, use native deployment.
### HTTPS Reverse Proxy
For HTTPS with automatic Let's Encrypt certificates, use [NetworkOptimizer-Proxy](https://github.com/Ozark-Connect/NetworkOptimizer-Proxy) - a Traefik setup that forces HTTP/1.1 for speed tests (HTTP/2 multiplexing skews results) while keeping HTTP/2 for the main app. Proxmox LXC and Windows MSI users can enable Traefik as an optional feature during install. This also allows for simpler enablement of GPS-based tagging on your self-hosted Speed Test and Signal walk test data as browsers require HTTPS for location data to flow.
### Quick Start (Linux Docker)
**Option A: Pull Docker Image (Recommended)**
```bash
mkdir network-optimizer && cd network-optimizer
curl -o docker-compose.yml https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/docker-compose.prod.yml
curl -O https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/.env.example
cp .env.example .env
docker compose up -d
# Check logs for the auto-generated admin password
docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
```
**Option B: Build from Source**
```bash
git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
cd NetworkOptimizer/docker
cp .env.example .env
docker compose build
docker compose up -d
# Check logs for the auto-generated admin password
docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
```
Open http://localhost:8042
### Quick Start (Proxmox)
```bash
bash -c "$(wget -qLO - https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/proxmox/install.sh)"
```
### First Run
1. Go to Settings and enter your UniFi controller URL
2. Create a **Local Access Only** account on your controller (Ubiquiti SSO won't work):
- Quick: Super Admin role
- Restricted: Network View Only, Protect View Only, User Management None
- See the in-app setup guide or [detailed instructions](docker/DEPLOYMENT.md#unifi-account)
3. Click Connect to authenticate
4. Navigate to Audit to run your first security scan
## Project Structure
```
src/
├── NetworkOptimizer.Web # Blazor web UI
├── NetworkOptimizer.Alerts # Alerts & Scheduling engine
├── NetworkOptimizer.Audit # Security Audit
├── NetworkOptimizer.Core # Shared helpers and utilities
├── NetworkOptimizer.Diagnostics # Config Optimizer
├── NetworkOptimizer.Monitoring # SNMP/SSH polling
├── NetworkOptimizer.Reports # PDF/Markdown report generation
├── NetworkOptimizer.Sqm # Adaptive SQM
├── NetworkOptimizer.Storage # SQLite database
├── NetworkOptimizer.Threats # Threat Intelligence
├── NetworkOptimizer.UniFi # UniFi API client
├── NetworkOptimizer.WiFi # Wi-Fi Optimizer
├── cfspeedtest/ # WAN Speed Test (binary for gateway)
└── OpenSpeedTest/ # Client Speed Test
```
## Tech Stack
.NET 10, Blazor Server, SQLite, iperf3, SSH.NET, QuestPDF, OpenSpeedTest™, Go (WAN speed test binary)
## Password Reset
If you forget the admin password, use the reset script for your platform:
**Docker / macOS / Linux:**
```bash
curl -fsSL https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/reset-password.sh | bash
```
**Windows (PowerShell as Administrator):**
```powershell
irm https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/reset-password.ps1 -OutFile reset-password.ps1
.\reset-password.ps1
```
The script stops the service, clears the password, restarts, and shows you the new temporary password.
## Contributing
If you find issues, report them via GitHub Issues. Include your UniFi device models and controller version. Sanitize credentials and IPs before attaching logs.
## License
Business Source License 1.1
**Licensor:** Ozark Connect
**Licensed Work:** Network Optimizer for UniFi
**Personal Use:** You may use the Licensed Work for personal, non-commercial purposes on up to three sites.
**Commercial Use:** Use by managed service providers (MSPs), network installers, IT consultants, or any entity using this software in the delivery of paid services requires a commercial license.
**Change Date:** January 1, 2028
**Change License:** Apache License 2.0
For commercial licensing inquiries, contact tj@ozarkconnect.net.
© 2026 Ozark Connect
## Support
- Issues: [GitHub Issues](https://github.com/Ozark-Connect/NetworkOptimizer/issues)
- Documentation: See component READMEs in `src/` and `docker/`
## Other Projects
- [UniFi Lightshow](https://github.com/Ozark-Connect/unifi-lightshow) - Custom RGB light show controller for UniFi Etherlighting LEDs. Turn your switch rack into a spatial light canvas with SignalRGB integration, seasonal effects, and multi-switch support.
- [UNVR NAS Backup](https://github.com/Ozark-Connect/unvr-nas-backup) - Automated Protect camera backup from UniFi NVR to NAS storage.
---
Network Optimizer for UniFi is an independent project by Ozark Connect and is not affiliated with, endorsed by, or sponsored by Ubiquiti, Inc. Ubiquiti, UniFi, UDM, and Cloud Key are trademarks or registered trademarks of Ubiquiti, Inc. All other trademarks are the property of their respective owners.
================================================
FILE: TODO.md
================================================
# Network Optimizer - TODO / Future Enhancements
## LAN Speed Test
### Path Analysis Enhancements
- ✅ ~~Direction-aware bottleneck calculation~~ (done - `GetDirectionalEfficiency()` in PathAnalysisResult, separate TX/RX bottleneck in NetworkPathAnalyzer)
- More gateway models in routing limits table as we gather data
- Threshold tuning based on real-world data collection
- **Consistent wireless bottleneck attribution across test types:** LAN client speed tests show the bottleneck relative to the AP (e.g., "[AP] Back Yard (wireless)") while WAN client speed tests show it relative to the client (e.g., "[Phone] TJ iPhone (wireless)"). This is because WAN client paths reverse hops and swap ingress/egress, which flips the perspective. The wireless link is the same physical connection - both descriptions are technically correct but inconsistent. Investigate unifying to always name the AP side, since that's what users can control. Relevant code: `CalculateWanClientPathAsync` hop reversal/swap and `CalculateBottleneck` wireless link attribution.
### ✅ ~~Scheduled LAN Speed Test~~ (done - Alerts & Scheduling feature)
### ✅ ~~Scheduled WAN Speed Test~~ (done - Alerts & Scheduling feature)
## Alerts & Scheduling
### ✅ ~~LAN Speed Test Schedule: UniFi Device Targets~~ (done)
### DST-Aware Schedule Time Display
- Schedule start times are stored as UTC hour/minute and converted to local for display using `DateTime.UtcNow.Date.ToLocalTime()`
- This uses the current day's DST offset, so a schedule created at 6:00 AM CDT (UTC-5) displays as 5:00 AM during CST (UTC-6)
- The read-only view (`FormatStartTime`) and edit form (`UtcToLocalTimeOnly`) are consistent with each other, but both shift by an hour across DST transitions
- Actual execution time is correct (UTC-based) - only the displayed local time drifts
- **Affected code:** `Alerts.razor: FormatStartTime()`, `UtcToLocalTimeOnly()`, `ParseTimeInput()`
- **Options:** Store IANA timezone per schedule, use `TimeZoneInfo.ConvertTimeFromUtc`, or store local time + timezone
### Threat Alert Dedup Tuning (if users report noise)
Current state (as of v1.5.x): Dedup is working - event-level dedup via InnerAlertId, pattern-level dedup via DedupKey with 6h merge window, rule-level cooldown at 1h. No spam reported yet, but here are levers to pull if it gets noisy:
**ScanSweep re-alerting for persistent scanners**
- Currently: Same IP re-alerts every ~2h if it keeps scanning (new events push LastSeen past LastAlertedAt, then 1h rule cooldown expires)
- Option A: Bump `attack_pattern` rule cooldown from 1h to 6h (matches the pattern merge window - one alert per scan window)
- Option B: Change `GetUnalertedPatternsAsync` to require event count increase (e.g., `EventCount > previousEventCount * 1.5`) instead of just `LastSeen > LastAlertedAt`
- Option C: Leave as-is - ongoing scanning is arguably worth periodic notification
- Trade-off: Less noise vs missing escalation of an ongoing scan that adds new ports
**DDoS alert cooldown key uses wrong IP**
- Currently: `DeviceIp = firstSourceIp` means the cooldown key is `{ruleId}:{randomSourceIp}`. For multi-source attacks (DDoS), the first source IP in the sorted list can shift between cycles, defeating cooldown.
- Fix: Use the target IP (from DedupKey `ddos:{targetIp}:{port}`) as DeviceIp for DDoS patterns, so cooldown groups by what's being attacked, not who's attacking
- Low priority since DDoS pattern dedup (DedupKey) now merges patterns correctly - this only matters if the pattern is re-detected after the 6h window
**Early-stage chain alert granularity**
- Currently: Re-alerts on more stages OR (6h elapsed AND 2x events). The `attack_chain_attempt` rule has 1h cooldown.
- If noisy: Increase cooldown to 6h, or only re-alert on stage progression (not event count growth)
- If too quiet: Reduce the 2x event multiplier to 1.5x
- These are Info severity - users who find them noisy can disable rule 13 in alert settings
## Security Audit / PDF Report
### Manual Network Purpose Override
- Allow users to manually set the purpose/classification of their Networks in Security Audit Settings
- Currently: Network purpose (IoT, Security, Guest, Management, etc.) is auto-detected from network name patterns
- Problem: Users with non-standard naming conventions get incorrect VLAN placement recommendations
- Implementation:
- Add "Network Classifications" section to Security Audit Settings page
- List all detected networks with current auto-detected purpose
- Allow override via dropdown: Corporate, Home, IoT, Security, Guest, Management, Printer, Unknown
- Store overrides in database (new table or extend existing settings)
- VlanAnalyzer should check for user overrides before applying name-based detection
- Benefits:
- Users with custom naming schemes can get accurate audits
- Explicit classification removes ambiguity
- Auto-detection still works as default for users who don't configure
### Home → IoT Return Traffic Rule Suggestion
- When Home network has isolation blocking IoT, suggest adding a return traffic rule or explicit allow
- **Problem:** If Home blocks all traffic to IoT (good for security), return traffic from IoT devices won't work
- Example: Smart TV on IoT can't respond to casting from phone on Home
- Example: IoT device can't respond to control commands from Home devices
- **Detection:** Check for block rule Home → IoT without a corresponding:
- Allow rule Home → IoT (with specific IPs/devices/ports), OR
- Return traffic allow rule IoT → Home (RESPOND_ONLY / ESTABLISHED,RELATED)
- **Recommendation options:**
1. Add specific allow rules from Home to IoT devices that need control (e.g., smart TVs, speakers)
2. Add a RESPOND_ONLY allow rule from IoT → Home to permit return traffic
- **Severity:** Informational (user may have intentionally blocked bidirectional)
- **Context:** This is a usability issue, not a security issue - blocking return traffic is actually more secure
### Third-Party DNS Firewall Rule Check
- When third-party DNS (Pi-hole, AdGuard, etc.) is detected on a network, check for a firewall rule blocking UDP 53 to the gateway
- Without this rule, clients could bypass third-party DNS by using the gateway directly
- Implementation: Look for firewall rules that DROP/REJECT UDP 53 from the affected VLANs to the gateway IP
- Severity: Recommended (not Critical, since some users intentionally allow fallback)
- **Status:** Awaiting user feedback on current third-party DNS feature before implementing
### ✅ ~~Printer/Scanner Audit Logic Consolidation~~ (done)
- Consolidated in `VlanPlacementChecker.CheckPrinterPlacement()`, called from `ConfigAuditEngine`
## Performance Audit
New audit section focused on network performance issues (distinct from security audit).
### Port Link Speed Analysis
- Crawl the entire network topology and identify port link speeds that don't make sense
- Reuse the logic from Speed Test network path tracing
- Examples of issues to detect:
- 1 Gbps uplink on a switch with 2.5/10 Gbps devices behind it
- Mismatched duplex settings
- Ports negotiated below their capability (e.g., 100 Mbps on a Gbps port)
- Bottleneck chains where downstream capacity exceeds upstream link
- Display as performance findings with recommendations
### Jumbo Frames Suggestion
- Suggest enabling Jumbo Frames as a global switching setting when high-speed devices are present
- Trigger: 2+ devices connected at 5 GbE or 10 GbE on access ports (not infrastructure uplinks)
- Rationale: Jumbo frames (9000 MTU) reduce CPU overhead and improve throughput for high-speed transfers
- Implementation:
- Scan port_table for ports with speed >= 5000 Mbps
- Exclude infrastructure ports (uplinks, trunks between switches)
- If count >= 2, check if Jumbo Frames is already enabled globally
- If not enabled, suggest enabling with explanation of benefits
- Caveats to mention in recommendation:
- All devices in the path must support jumbo frames
- Some IoT devices may not support non-standard MTU
- WAN traffic still uses standard 1500 MTU
- Severity: Informational (performance optimization, not a problem)
### MTU Mismatch Detection
- Detect MTU mismatches along network paths that cause fragmentation or packet drops
- Implementation:
- During path tracing, SSH into each hop (gateway, switches) to query interface MTU
- Gateway: `ip link show ` or parse `/sys/class/net//mtu`
- Switches: Check port MTU via SSH (UniFi switches support shell access)
- Compare MTU values across the path - all devices should match
- Issues to detect:
- Standard MTU (1500) mixed with Jumbo Frames (9000) in same path
- Intermediate device with lower MTU than endpoints (causes fragmentation)
- Jumbo Frames enabled on LAN but not on inter-switch uplinks
- VPN/tunnel overhead not accounted for (e.g., WireGuard needs ~1420 MTU)
- Display: Show MTU at each hop in path analysis, flag mismatches
- Severity: Warning (mismatches cause performance degradation or silent drops)
- Prerequisite: Reuse SSH infrastructure from SQM/gateway speed tests
### WiFi Optimizer Enhancements
- **Power & Coverage: per-band signal classification** - `GetSignalClass` and `GetSignalBucketClass` in PowerCoverageAnalysis.razor hardcode `RadioBand.Band5GHz` because they operate on aggregate values (avg signal, dBm bucket ranges) without per-client band context. Could classify each client by their actual band first, then aggregate the results. The signal distribution bar chart would need to either split by band or color each client's contribution by their band. Current behavior matches pre-band-aware thresholds so no regression, just a missed opportunity.
- **MLO per-AP detection:** Check MLO status per-AP based on which SSIDs each AP broadcasts (via vap_table), not just global WLAN config. An AP only has MLO impact if it broadcasts an MLO-enabled SSID.
### AP Catalog: Enforce 5 GHz EIRP Cap (US Regulatory)
- FCC caps EIRP at 36 dBm for 5 GHz non-DFS (UNII-3, ch 149-165) and 30 dBm for UNII-1 (ch 36-48)
- The TX Power by Access Point section currently shows uncapped EIRP (TX + gain), which can exceed 36 dBm for high-gain models, implying there's TX power headroom when there isn't
- Already handled for some models on 6 GHz (E7-Campus, E7-Audience have EIRP-aware TX caps in catalog)
- **Affected 5 GHz models (TX + gain > 36):**
- U7-Outdoor directional: 26 + 13 = 39 (cap TX to 23)
- U7-Pro-Outdoor directional: 26 + 11 = 37 (cap TX to 25)
- E7-Campus: 30 + 12 = 42 (cap TX to 24)
- E7-Audience narrow: 30 + 15 = 45 (cap TX to 21)
- E7-Audience wide: 30 + 11 = 41 (cap TX to 25)
- UWB-XG narrow: 25 + 15 = 40 (cap TX to 21)
- **Options:**
1. Cap MaxTxPowerDbm in the catalog so TX + gain <= 36 for all 5 GHz entries (like we do for 6 GHz on E7 models)
2. Add regulatory-domain-aware EIRP capping in the display/calculation layer (more complex, handles UNII-1 vs UNII-3 differently)
3. Show "regulatory max EIRP" alongside "hardware max EIRP" in the UI
- Option 1 is simplest and matches the existing 6 GHz pattern. Option 2 is more accurate but needs channel-to-sub-band mapping.
- **Note:** DFS channels (UNII-2/2C) have lower limits but are dynamic - firmware handles those
### Floor Plan Heatmap - Per-Channel Frequency
- Current heatmap uses a single center frequency per band (2437, 5500, 6500 MHz)
- 5 GHz spans 5150-5850 MHz (channels 36-165), ~1 dB FSPL difference at the extremes
- Material attenuation also varies across the band range
- Implementation:
- Add `Channel` (or `FrequencyMhz`) to `PropagationAp` from UniFi radio config
- Map channel number to center frequency (e.g., ch 36 = 5180, ch 149 = 5745)
- Pass actual frequency to `ComputeSignalAtPoint` instead of band center
- Update `MaterialAttenuation` to interpolate between band values if needed
### Floor Plan Heatmap - Channel Bandwidth & Per-Client Signal Modeling
- Current heatmap shows raw RSSI (dBm) with no awareness of channel bandwidth
- Wider channels raise the thermal noise floor, reducing effective SNR and usable range:
- 20 MHz: -96 dBm noise floor, 40 MHz: -93, 80 MHz: -90, 160 MHz: -87, 320 MHz: -84
- (assumes ~5 dB receiver noise figure)
- A -80 dBm signal gives 16 dB SNR on 20 MHz (decent) but only 7 dB on 160 MHz (unusable)
- Noise floor formula: -174 + 10*log10(BW_Hz) + NF_dB
#### Per-Client Channel Width Negotiation (critical nuance)
- 802.11 negotiates channel width per-client based on capabilities. The AP does NOT force a
single channel width on all clients. A 160 MHz AP transmits to an 80 MHz client using 80 MHz.
- From the client's perspective, the noise floor matches ITS supported width, not the AP's config:
- Client supports 80 MHz on a 160 MHz AP -> client sees -90 dBm noise floor, not -87 dBm
- Client supports 40 MHz -> sees -93 dBm noise floor regardless of AP config
- The client's receiver only processes its supported bandwidth. The extra spectrum the AP has
configured is simply unused for that client's transmissions.
- This means UniFi Design Center's heatmap (and our current one) shows worst-case coverage for
clients negotiating the FULL configured width - which are typically the newest devices sitting
close to the AP where it doesn't matter anyway. The heatmap makes it look like coverage is
bricked when most clients actually have much better coverage than shown.
- Real-world: most clients are 80 MHz capable. Configuring 160 MHz gives 80 MHz coverage
footprint for those devices plus throughput bonus for 160 MHz clients when close enough.
- Downsides of wider AP config: consumes more spectrum (matters for multi-AP channel planning),
and DFS events on the secondary 80 MHz segment can force the whole channel to shift,
briefly disrupting all clients including 80 MHz ones.
#### Implementation
- Add `ChannelWidthMhz` to `PropagationAp` (pull from UniFi radio config)
- **Default view**: show coverage based on the AP's configured channel width (current behavior
plus bandwidth-aware color thresholds) - this is the conservative/worst-case view
- **Per-capability tier view**: let users toggle between client capability tiers to see what
coverage actually looks like for their devices:
- "160 MHz clients" (worst case, smallest coverage)
- "80 MHz clients" (most common, realistic coverage)
- "40 MHz clients" (older devices, best coverage)
- "20 MHz clients" (legacy, maximum coverage)
The selected tier overrides the AP's configured width for noise floor and color threshold
calculations. Signal strength (RSSI) stays the same - only SNR interpretation changes.
- Alternatively/additionally, offer an SNR view mode that shows signal quality (dB above noise
floor) rather than raw power (dBm), making bandwidth impact visually obvious
- Consider showing a summary callout: "Most of your clients support 80 MHz - here's what they
actually experience" to educate users about the per-client negotiation reality
#### Implemented Features (v1.x)
The following were implemented in the WiFi Optimizer feature:
- ✅ Channel utilization analysis per AP (Airtime Fairness tab)
- ✅ Client distribution balance across APs (AP Load Balance tab)
- ✅ Signal strength / SNR reporting per client (multiple components)
- ✅ Interference detection - co-channel, adjacent channel (Spectrum Analysis tab)
- ✅ Band steering effectiveness analysis (Band Steering tab)
- ✅ Roaming topology visualization (Connectivity Flow tab)
- ✅ Airtime fairness issues - legacy client impact (Airtime Fairness tab)
- ✅ Site health score with dimensional breakdown
- ✅ Power/coverage analysis with TX power recommendations
## SQM (Smart Queue Management)
### Retrofit Custom Cloudflare Speed Test Binary into Adaptive SQM
- Replace current WAN speed test approach in Adaptive SQM with the custom Cloudflare speed test binary
- The Cloudflare speed test provides more accurate and consistent WAN throughput measurements
- Integration points: SQM calibration, periodic re-calibration, manual speed test triggers
- Should use the same binary/approach as the standalone Cloudflare speed test projects
### Multi-WAN Support
- Support for 3rd, 4th, and N number of WAN connections
- Currently limited to two WAN connections
- Should dynamically detect and configure all available WAN interfaces
### GRE Tunnel Support (Cellular WAN)
- Support GRE tunnel connections from cellular modems (U5G-Max, U-LTE)
- These create GRE tunnels that should be treated as valid WAN interfaces for SQM
- ✅ ~~PPPoE support~~ (done - uses physical interface for lookup, tunnel interface for SQM)
## Multi-Tenant / Multi-Site Support
### Multi-Tenant Architecture
- Add multi-tenant support for single deployment serving multiple sites
- Current architecture: Local console access with local UniFi API
- Target architecture: Support tunneled access to multiple UniFi sites from one deployment
- Deployment models:
- **Local (default):** Deploy instance at each site for direct LAN API access
- **Centralized (optional):** Single deployment with VPN/tunnel access to multiple client networks
- Requires unique IP structure per client (no overlapping subnets)
- Relies on same local API access, just over tunnel instead of local LAN
- Use cases: MSPs managing multiple customer sites, enterprises with distributed locations
- Considerations:
- Site/tenant isolation for data and configuration
- Per-site authentication and API credentials
- Tenant-aware database schema or separate databases per tenant
- Site selector/switcher in UI
- Aggregate dashboard views across sites (optional)
### Federated Authentication & Identity
- External IdP integration for enterprise/MSP deployments
- Protocol support:
- **SAML 2.0:** Enterprise SSO (Okta, Azure AD, ADFS, etc.)
- **OIDC/OAuth 2.0:** Modern identity providers (Auth0, Keycloak, Google Workspace)
- Architectural preparation for RBAC (Role-Based Access Control):
- Abstract authentication layer to support pluggable identity sources
- Claims/roles mapping from IdP to local permissions
- Future: Granular permissions per site/tenant (view-only, operator, admin)
- **Token model upgrade** (prerequisite for multi-user):
- Move from current single JWT to proper access_token + refresh_token OIDC model
- Short-lived access tokens (1 hour) with long-lived refresh tokens
- Applies to local auth as well, not just external IdP
- Token rotation and revocation support
- Secure refresh token storage (DB-backed with family tracking)
- Considerations:
- SP-initiated vs IdP-initiated login flows
- Just-in-time (JIT) user provisioning from IdP claims
- Session management and token refresh across federated sessions
- Fallback local auth for break-glass scenarios
## Distribution
### ISO/OVA Image for MSP Deployment
- Create distributable ISO and/or OVA image for MSP users
- Pre-configured Linux appliance with Network Optimizer installed
- Easy deployment to customer sites without Docker expertise
- Consider: Ubuntu Server base, auto-updates, web-based initial setup
## General
### Refactor Program.cs - Extract Business Logic and Break Up API Sets
- **Issue:** `Program.cs` has grown into a monolith with schedule executor implementations, API endpoint registrations, and business logic all inline
- **Goal:** Clean separation of concerns:
- Extract schedule executor registrations into a dedicated class (e.g., `ScheduleExecutorSetup.cs`)
- Break API endpoints into logical groups using minimal API route groups or extension methods (e.g., `SpeedTestEndpoints.cs`, `AuditEndpoints.cs`, `ThreatEndpoints.cs`)
- Move inline business logic out of endpoint handlers into services
- **Priority:** Medium - not blocking but makes maintenance harder as the app grows
### Refactor DnsSecurityAnalyzer.AnalyzeAsync() Parameter Hell
- **Issue:** `DnsSecurityAnalyzer.AnalyzeAsync()` now takes 12 parameters (was 7, grew during DNAT/firewall groups/URL work):
```csharp
public async Task AnalyzeAsync(
JsonElement? settingsData, List? firewallRules,
List? switches, List? networks,
JsonElement? deviceData, int? customDnsManagementPort,
JsonElement? natRulesData, List? dnatExcludedVlanIds,
string? externalZoneId, FirewallZoneLookup? zoneLookup,
Dictionary? firewallGroups,
string? customDnsManagementUrl)
```
Plus 5 convenience overloads that chain to it.
- **Problems:**
- Easy to pass arguments in wrong order (all are nullable)
- Tests are verbose with many `null` placeholders
- Adding new parameters requires updating all call sites and overloads
- The overload chain (lines 47-77) is getting unwieldy
- **Proposed fix:** Create `DnsAnalysisRequest` record/class:
```csharp
public record DnsAnalysisRequest
{
public JsonElement? SettingsData { get; init; }
public List? FirewallRules { get; init; }
public List? Switches { get; init; }
public List? Networks { get; init; }
public JsonElement? DeviceData { get; init; }
public int? CustomDnsManagementPort { get; init; }
public string? CustomDnsManagementUrl { get; init; }
public JsonElement? NatRulesData { get; init; }
public List? DnatExcludedVlanIds { get; init; }
public string? ExternalZoneId { get; init; }
public FirewallZoneLookup? ZoneLookup { get; init; }
public Dictionary? FirewallGroups { get; init; }
}
```
- **Benefits:**
- Named parameters make call sites self-documenting
- Adding new fields doesn't break existing callers
- Eliminates the 5 overloads - just one method with a request object
- Test setup becomes clearer
- **Also applies to:** Other analyzers with similar parameter patterns
### Consolidate DNAT Rule Coverage Type Strings
- **Issue:** `DnatRuleInfo.CoverageType` uses magic strings: `"network"`, `"subnet"`, `"single_ip"`, `"inverted_address"`, `"interface"`
- **Current usage:** Set in `ParseSourceFilter()`, consumed in `Analyze()` switch statement
- **Fix:** Replace with an enum `DnatCoverageType` for type safety and discoverability
- **Scope:** `DnatDnsAnalyzer.cs` only - fully self-contained
### ThirdPartyDnsDetector Probe Method Duplication
- **Issue:** Two overloads of `TryProbePiholeEndpointAsync` and `TryProbeAdGuardHomeEndpointAsync` - one takes a full URL, one takes IP+port+scheme. The logic is nearly identical.
- **Fix:** Unify into a single method that takes a URL string. The IP+port caller can construct the URL before calling.
- **Scope:** `ThirdPartyDnsDetector.cs` only
### Rename ISpeedTestRepository to IGatewayRepository
- **Issue:** `ISpeedTestRepository` is a misleading name - it handles Gateway SSH settings, iperf3 results, AND SQM WAN configuration
- **Current location:** `src/NetworkOptimizer.Storage/Interfaces/ISpeedTestRepository.cs`
- **Proposed name:** `IGatewayRepository` (all methods are gateway-related)
- **Refactor scope:**
- Rename interface and implementation (`SpeedTestRepository.cs`)
- Update all DI registrations in `Program.cs`
- Update all injection sites across the codebase
- Consider if gateway SSH settings should be a separate repository
### Database Normalization Review
- Review SQLite schema for proper normal form (1NF, 2NF, 3NF)
- Ensure proper use of primary keys, foreign keys, and indices
- Audit table relationships and consider splitting denormalized data
- JSON columns are intentional for flexible nested data (e.g., PathAnalysisJson, RawJson)
- Consider: Separate Clients table with FK references instead of storing ClientMac/ClientName inline
### Normalize Environment Variable Handling
- Current: Mixed patterns for reading configuration
- Direct env var reads: `HOST_IP`, `APP_PASSWORD`, `HOST_NAME` (via `Environment.GetEnvironmentVariable()`)
- .NET configuration: `Iperf3Server:Enabled` (via `IConfiguration`, requires `Iperf3Server__Enabled` env var format)
- Problem: Inconsistent for native deployments (Docker translates `IPERF3_SERVER_ENABLED` → `Iperf3Server__Enabled`)
- Options:
1. Route everything through .NET configuration (use `__` notation everywhere)
2. Route everything through direct env var reads (simpler for native)
3. Support both patterns in app (check env var first, fall back to config)
- Low priority but would improve consistency
### Debounce UI-Triggered Modem Polls
- **Issue:** Multiple rapid modem polls can occur when navigating between pages
- **Cause:** `CellularStatsPanel` triggers `PollModemAsync` on render when no cached stats exist; multiple component instances can poll simultaneously before any completes
- **Observed:** 4-5 polls within 4 seconds when navigating dashboard → settings
- **Fix:** Add debounce or lock around UI-triggered polls in `CellularModemService`
- **Severity:** Low (causes extra SSH traffic but no errors)
- **Partial:** Basic `_isPolling` lock prevents concurrent polls, but no time-based debounce yet
### Shared IP-to-Client-Name Resolver
- Threat Dashboard resolves local IPs to UniFi client names inline (fetches clients, builds IP→name dict)
- Currently cached for 30 seconds (static across Blazor circuits) to avoid hammering the API
- **Note:** Real-time features (e.g., live threat feed, active monitoring) will need to invalidate/refresh the cache before using it, since device IPs can change via DHCP
- Other pages that display IPs could benefit from the same lookup:
- Security Audit (firewall rules referencing IPs)
- Config Optimizer (device references)
- Refactor into a shared service (e.g., `IClientNameResolver` in `NetworkOptimizer.Web/Services/`)
- Shared service should expose `InvalidateCache()` for real-time consumers
### Uniform Date/Time Formatting in UI
- Audit all date/time displays across the UI for consistency
- Standardize format (e.g., "Jan 4, 2026 3:45 PM" vs "2026-01-04 15:45:00")
- Consider user timezone preferences
- Affected areas: Speed test results, audit history, device last seen, logs
## UniFi Device Classification (v2 API)
The UniFi v2 device API (`/proxy/network/v2/api/site/{site}/device`) returns multiple device arrays for improved device classification and VLAN security auditing.
### Device Arrays from v2 API
| Array | Description | VLAN Recommendation | Status |
|-------|-------------|---------------------|--------|
| `network_devices` | APs, Switches, Gateways | Management VLAN | Existing |
| `protect_devices` | Cameras, Doorbells, NVRs, Sensors | Security VLAN | Done |
| `access_devices` | Door locks, readers | Security VLAN | TODO |
| `connect_devices` | EV chargers, other Connect devices | IoT VLAN | TODO |
| `talk_devices` | Intercoms, phones | IoT/VoIP VLAN | TODO |
| `led_devices` | LED controllers, lighting | IoT VLAN | TODO |
### Protect Infrastructure Devices (SuperLink, Sensors, Chimes)
- Currently excluded from VLAN placement checks: SuperLink Hub, Sensors, Chimes, Bridges
- These are wired (SuperLink) or wireless Protect devices that aren't cameras/doorbells/NVRs
- VLAN placement is ambiguous - depends on user's network design:
- If Protect Console is on Security VLAN, these should follow
- If Protect Console is on Management VLAN, SuperLink could go either way
- Sensors and chimes carry security-sensitive data (motion, door open/close) - some users consider this Security VLAN worthy, others treat them as IoT
- Current `RequiresSecurityVlan` only covers the unambiguous set: cameras, doorbells, NVRs, AI Key
- Options:
1. Add these to `RequiresSecurityVlan` and always recommend Security VLAN
2. Tie recommendation to where the Protect Console itself lives (if Console is on Security, recommend Security for all Protect devices)
3. Leave it to the Manual Network Purpose Override feature (let users decide)
- Likely best approach: option 2 (follow the Console) with option 3 as fallback
### Phase 2: Access Devices (Door Access)
- [ ] Parse `access_devices` array
- [ ] Identify door locks, card readers, intercoms
- [ ] Map to `ClientDeviceCategory.SmartLock` or new `AccessControl` category
- [ ] Recommend Security VLAN placement
### Phase 3: Connect Devices (EV Chargers, etc.)
- [ ] Parse `connect_devices` array
- [ ] Identify EV chargers, power devices
- [ ] Map to `ClientDeviceCategory.SmartPlug` or new `EVCharger` category
- [ ] Recommend IoT VLAN placement
### Phase 4: Talk Devices (Intercoms/Phones)
- [ ] Parse `talk_devices` array
- [ ] Identify intercoms, VoIP phones
- [ ] Map to `ClientDeviceCategory.VoIP` or `SmartSpeaker`
- [ ] Consider VoIP VLAN vs IoT VLAN recommendation
### Phase 5: LED Devices
- [ ] Parse `led_devices` array
- [ ] Identify LED controllers, smart lighting
- [ ] Map to `ClientDeviceCategory.SmartLighting`
- [ ] Recommend IoT VLAN placement
**Note:** The v2 API is only available on UniFi OS controllers (UDM, UCG, etc.). Device classification from the controller API is 100% confidence since the controller knows its own devices.
## Standalone Controller Support
### API Path Differences
Currently only tested with UniFi OS controllers (UDM, Cloud Gateway). Standalone controllers use different API paths:
| Controller Type | API Path Pattern |
|-----------------|------------------|
| UniFi OS (UDM/UCG) | `https:///proxy/network/api/s/{site}/stat/sta` |
| Standalone Controller | `https:///api/s/{site}/stat/sta` |
The app auto-detects controller type via login response, but needs testing with standalone controllers to verify:
- Path detection logic in `UniFiApiClient`
- All API endpoints work correctly
- Authentication flow differences (if any)
================================================
FILE: docker/.dockerignore
================================================
# Git
.git/
.gitignore
.gitattributes
# Docker
docker/
Dockerfile
docker-compose.yml
.dockerignore
# Documentation
*.md
docs/
*.pdf
# IDE
.vs/
.vscode/
.idea/
*.suo
*.user
*.userosscache
*.sln.docstates
# Build artifacts
**/bin/
**/obj/
**/out/
# Test coverage
**/TestResults/
**/*.coverage
**/*.coveragexml
# NuGet (exclude default caches, but keep our local packages source)
!packages/
!packages/*.nupkg
# Node modules (if any)
node_modules/
npm-debug.log
# OS files
.DS_Store
Thumbs.db
*.swp
*.swo
*~
# Logs
*.log
logs/
# Data directories
data/
ssh-keys/
# Environment files
.env
.env.local
.env.production
# Temporary files
tmp/
temp/
*.tmp
================================================
FILE: docker/.env.example
================================================
# Network Optimizer Environment Configuration
# Copy this file to .env and update with your values
# ===== Network Binding =====
# BIND_LOCALHOST_ONLY: Controls which network interfaces the app listens on
# - false (default): Binds to 0.0.0.0:8042 (accessible from network)
# - true: Binds to 127.0.0.1:8042 (localhost only, use with reverse proxy on same host)
# BIND_LOCALHOST_ONLY=false
# ===== Timezone Configuration =====
# Common US timezones:
# America/New_York (Eastern), America/Chicago (Central),
# America/Denver (Mountain), America/Los_Angeles (Pacific)
# Other examples:
# Europe/London, Europe/Paris, Asia/Tokyo, Australia/Sydney
TZ=America/New_York
# ===== Application Password =====
# Password precedence: Database (Settings UI) > APP_PASSWORD env var > Auto-generated
#
# On first run, an auto-generated password is shown in the logs.
# You can then set a permanent password in Settings > Admin Password (recommended).
# APP_PASSWORD is a fallback - useful for Docker deployments where you want
# to set the password before the first login.
# APP_PASSWORD=your_secure_password
# ===== Host Identity & Canonical URL Enforcement =====
# These settings identify the server for speed testing and optionally enforce a canonical URL.
# Redirects (302) only occur when HOST_NAME or REVERSE_PROXIED_HOST_NAME is set.
# HOST_IP alone does NOT trigger redirects (allows access via any hostname).
# HOST_IP: Server's IP address
# - Used for: Speed test path analysis, CORS, OpenSpeedTest URL in UI
# - Required for: Path analysis when server IP can't be auto-detected (bridge networking)
# - Note: Does NOT enforce redirects (users can still access via hostname)
# HOST_IP=192.168.1.100
# HOST_NAME: Server's hostname (recommended for better UX)
# - Used for: Canonical URL enforcement, user-facing URLs, OpenSpeedTest link in UI
# - Requires: DNS resolution by clients (can be local DNS via router/Pi-hole)
# - Examples: nas, server.local, optimizer.home.arpa
# HOST_NAME=nas
# REVERSE_PROXIED_HOST_NAME: Hostname when behind a reverse proxy
# - Used for: Canonical URL (https, no port), API URL for OpenSpeedTest result reporting
# - Set to: The hostname your reverse proxy serves (internal or public)
# - Note: OpenSpeedTest container is still accessed via HOST_NAME/HOST_IP:3005
# REVERSE_PROXIED_HOST_NAME=optimizer.example.com
# ===== Client Speed Testing =====
# Browser-based: Speed Test runs on port 3005 (configurable), results auto-reported if HOST_IP/HOST_NAME set
# To disable: comment out the network-optimizer-speedtest service in docker-compose.yml
# CLI-based: Enable iperf3 server mode for testing from devices with iperf3 installed
# Enable iperf3 server (listens on port 5201)
# IPERF3_SERVER_ENABLED=true
# OpenSpeedTest port (default 3005)
# - Used for direct access without a reverse proxy (e.g., http://server:3005)
# - When behind a reverse proxy, clients use HTTP_PORT/HTTPS_PORT below instead
# - Change if: Port 3005 conflicts with another service
# OPENSPEEDTEST_PORT=3005
# OpenSpeedTest hostname (defaults to HOST_NAME)
# - Set if speedtest is accessed via a different hostname than the main app
# - Example: speedtest.example.com when main app is at optimizer.example.com
# OPENSPEEDTEST_HOST=speedtest.example.com
# OpenSpeedTest HTTPS mode (default false)
# Set to "true" when the speed test is behind a TLS-terminating reverse proxy.
# UI links will use https:// and CORS will include the HTTPS origin.
# OPENSPEEDTEST_HTTPS=true
#
# HTTPS proxy port (default 443)
# Change if your TLS proxy listens on a non-standard HTTPS port
# OPENSPEEDTEST_HTTPS_PORT=443
#
# IMPORTANT: Speedtest reverse proxies MUST force HTTP/1.1 for accurate results.
# HTTP/2+ multiplexing inflates speeds. However, HTTP/1.1 will BREAK the main
# Network Optimizer app (Blazor requires HTTP/2+ for WebSockets).
#
# If you use a TLS proxy, you need TWO separate hostnames:
# - speedtest.example.com → HTTP/1.1 → localhost:3005 (speedtest)
# - optimizer.example.com → HTTP/2+ → localhost:8042 (main app)
#
# See NetworkOptimizer-Proxy for ready-made Traefik configs that handle this.
# ===== Advanced Settings =====
# Log levels: Trace, Debug, Information, Warning, Error, Critical
# LOG_LEVEL=Information # General (framework, EF Core, etc.)
# APP_LOG_LEVEL=Debug # Network Optimizer application
================================================
FILE: docker/DEPLOYMENT.md
================================================
# Deployment Guide
Production deployment guide for Network Optimizer.
## Deployment Options
| Option | Best For | Guide |
|--------|----------|-------|
| Linux + Docker | Self-built servers, VMs, cloud (recommended) | [Below](#1-linux--docker-recommended) |
| Proxmox LXC | Homelab virtualization, one-liner install | [Proxmox Guide](#2-proxmox-lxc) |
| NAS + Docker | Synology, QNAP, Unraid | [NAS Deployment](#3-nas-deployment-docker) |
| Home Assistant | Add-ons | [Home Assistant](#5-home-assistant) |
| Windows Installer | Windows desktops/servers | [Download from Releases](https://github.com/Ozark-Connect/NetworkOptimizer/releases) |
| macOS Native | Mac servers, multi-gigabit speed testing | [macOS Installation](../docs/MACOS-INSTALLATION.md) |
| Linux Native | Maximum performance, no Docker | [Native Guide](NATIVE-DEPLOYMENT.md#linux-deployment) |
---
### 1. Linux + Docker (Recommended)
Deploy on any Linux server using Docker Compose. This is the recommended approach for self-built NAS, home servers, VMs, and cloud instances.
**Requirements:**
- Docker 20.10+ and Docker Compose 2.0+
- 2GB RAM minimum (4GB recommended)
- 10GB disk space
- Ubuntu 20.04+, Debian 11+, RHEL/CentOS 8+, or compatible
#### Quick Start
```bash
# Install Docker (if not already installed)
curl -fsSL https://get.docker.com | sh
sudo usermod -aG docker $USER
# Log out and back in for group changes
```
> **Choose a stable location:** Deploy to a permanent directory like `/opt/network-optimizer`. Avoid home directories or `/tmp` which may cause issues with permissions, cleanup, or migrations.
**Option A: Pull Docker Image (Recommended)**
```bash
# Create directory in /opt (recommended)
sudo mkdir -p /opt/network-optimizer && sudo chown $USER: /opt/network-optimizer
cd /opt/network-optimizer
curl -o docker-compose.yml https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/docker-compose.prod.yml
curl -O https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/.env.example
cp .env.example .env
nano .env # Set timezone and other options (optional)
docker compose up -d
```
**Option B: Build from Source**
```bash
cd /opt # or your preferred stable location
sudo git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
sudo chown -R $USER: NetworkOptimizer
cd NetworkOptimizer/docker
cp .env.example .env
nano .env # Set timezone and other options (optional)
docker compose build
docker compose up -d
```
**Verify Installation:**
```bash
# Check logs for the auto-generated admin password
docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
# Verify health
docker compose ps
curl http://localhost:8042/api/health
```
Access at: **http://your-server:8042**
#### Network Mode Options
**Host Networking (Recommended for Linux):**
```yaml
# docker-compose.yml uses network_mode: host by default
# This provides best performance and accurate IP detection
```
**Bridge Networking (if host mode unavailable):**
```bash
# Use docker-compose.macos.yml which uses port mapping
# IMPORTANT: Set HOST_IP in .env to your server's IP for accurate path analysis
docker compose -f docker-compose.macos.yml up -d
```
#### Service Management
```bash
# View logs
docker compose logs -f
# Restart
docker compose restart
# Stop
docker compose down
# Update to latest
docker compose pull
docker compose up -d
# Full rebuild (after Dockerfile changes)
docker compose build --no-cache
docker compose up -d
```
#### Systemd Integration (Auto-Start on Boot)
```bash
# Enable Docker to start on boot
sudo systemctl enable docker
# Docker Compose containers with restart: unless-stopped will auto-start
```
Or create a dedicated systemd service:
```bash
sudo cat > /etc/systemd/system/network-optimizer.service << 'EOF'
[Unit]
Description=Network Optimizer
Requires=docker.service
After=docker.service
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/opt/network-optimizer/docker
ExecStart=/usr/bin/docker compose up -d
ExecStop=/usr/bin/docker compose down
TimeoutStartSec=0
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sudo systemctl enable network-optimizer
```
---
### 2. Proxmox LXC
The easiest way to deploy on Proxmox. Run this one-liner on your **Proxmox VE host**:
```bash
bash -c "$(wget -qLO - https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/proxmox/install.sh)"
```
The interactive script will:
1. Create a privileged Debian LXC container
2. Install Docker and Docker Compose
3. Deploy Network Optimizer with Docker Compose
4. Optionally deploy a [Traefik HTTPS proxy](https://github.com/Ozark-Connect/NetworkOptimizer-Proxy) with automatic Let's Encrypt certificates (requires Cloudflare DNS)
5. Configure auto-start on boot
**Requirements:**
- Proxmox VE 7.0 or later
- 10GB disk space, 2GB RAM minimum
- Internet access for downloading images
**After Installation:**
```bash
# Get the auto-generated admin password
pct exec -- docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
# Access the web UI
http://:8042
```
For advanced configuration, troubleshooting, and manual installation see the [full Proxmox guide](../scripts/proxmox/README.md).
---
### 3. NAS Deployment (Docker)
For commercial NAS devices with container support.
#### Synology NAS
1. Install Container Manager from Package Center
2. Clone or upload the repository to `/docker/network-optimizer`
3. Copy `.env.example` to `.env` and configure
4. Create project in Container Manager pointing to docker-compose.yml
5. Start containers
**Note:** If using bridge networking, set `HOST_IP` in `.env` to your NAS IP address.
#### QNAP NAS
1. Install Container Station
2. Create shared folders
3. Import `docker-compose.yml`
4. Configure environment variables
5. Deploy stack
#### Unraid
1. Install Community Applications plugin
2. Search for "Network Optimizer"
3. Deploy both network-optimizer and network-optimizer-speedtest containers
Community templates maintained by [@stefan-matic](https://github.com/stefan-matic/unraid-templates).
---
Or use manual Docker Compose deployment (note: cannot be managed by Unraid GUI if deployed via compose)
### 4. Native Deployment (No Docker)
For maximum network performance or systems without Docker, run natively on the host.
**Best for:**
- macOS systems (avoids Docker Desktop's ~1.8 Gbps network throughput limitation)
- Systems where Docker overhead is undesirable
- Dedicated appliances
**Supported Platforms:**
- macOS 11+ (Intel or Apple Silicon)
- Linux (Ubuntu 20.04+, Debian 11+, RHEL 8+)
- Windows: Use the [Windows Installer](https://github.com/Ozark-Connect/NetworkOptimizer/releases) instead
See [Native Deployment Guide](NATIVE-DEPLOYMENT.md) for macOS and Linux instructions.
---
### 5. Home Assistant
Network Optimizer can be installed as two Home Assistant add-ons. See [issue #201](https://github.com/Ozark-Connect/NetworkOptimizer/issues/201) for setup instructions and discussion.
For the initial admin password, check the add-on's **Log** tab instead of using the `docker logs` command.
## Pre-Deployment Checklist
- [ ] Docker and Docker Compose installed
- [ ] Sufficient disk space (10GB minimum)
- [ ] Network access to UniFi Controller
- [ ] Firewall rules configured (if applicable)
- [ ] `.env` file configured with secure passwords
- [ ] SSL certificates ready (if using HTTPS)
- [ ] SSH enabled on UniFi devices (required for SQM and LAN speed testing, see below)
## Installation Steps (NAS)
These detailed steps are for NAS deployment. For other deployment options, see the guides above.
> **Note:** If `docker compose` doesn't work on older NAS firmware, try `docker-compose` (hyphenated).
> **Choose a stable location:** Deploy to a permanent directory like `/volume1/docker/network-optimizer` (Synology) or equivalent. Avoid temporary locations that may be cleaned up or have permission issues.
### 1. Download Files
**Option A: Pull Docker Image (Recommended)**
```bash
mkdir network-optimizer && cd network-optimizer
curl -o docker-compose.yml https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/docker-compose.prod.yml
curl -O https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/.env.example
```
**Option B: Build from Source**
```bash
git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
cd NetworkOptimizer/docker
```
### 2. Configure Environment
```bash
# Copy template
cp .env.example .env
# Edit with your settings
nano .env
```
**Recommended changes:**
```env
# Set your timezone
TZ=America/Chicago
```
**Admin Password:**
On first run, an auto-generated password is displayed in the logs. After logging in,
go to **Settings > Admin Password** to set your own password (recommended).
Password precedence: Database (Settings UI) > `APP_PASSWORD` env var > Auto-generated
Optionally, set `APP_PASSWORD` in `.env` if you want to configure a password before first login.
### 3. Deploy Stack
```bash
docker compose up -d
```
### 4. Verify Deployment
```bash
# Check service health
docker compose ps
# View logs
docker compose logs -f
# Test health endpoint
curl http://localhost:8042/api/health
```
Expected output:
```
NAME STATUS
network-optimizer Up (healthy)
```
### 5. Access Web UI
- Web UI: http://your-server:8042
## Production Configuration
### HTTPS with Reverse Proxy
Use nginx, Caddy, or Traefik for SSL termination.
**If the reverse proxy is on the same host**, add to your `.env`:
```env
BIND_LOCALHOST_ONLY=true
```
This binds the app to `127.0.0.1:8042` instead of all interfaces, so only the local proxy can access it.
#### Traefik (Recommended for Speed Testing)
If you use the browser-based speed test (OpenSpeedTest), Traefik is the recommended reverse proxy. Most proxies negotiate HTTP/2 at the TLS level, and HTTP/2 multiplexing interferes with speed test throughput measurements. Traefik's per-router TLS options let you force HTTP/1.1 for the speed test hostname while keeping HTTP/2 for the main app - all on one port 443.
See [NetworkOptimizer-Proxy](https://github.com/Ozark-Connect/NetworkOptimizer-Proxy) for a ready-to-use Docker Compose setup with automatic Let's Encrypt certificates via Cloudflare DNS-01.
**Proxmox users:** The [Proxmox LXC installer](../scripts/proxmox/README.md) can set up Traefik automatically during installation.
**Windows users:** Traefik is available as an optional feature in the MSI installer.
#### Nginx Example
```nginx
# /etc/nginx/sites-available/network-optimizer
server {
listen 80;
server_name network-optimizer.example.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
server_name network-optimizer.example.com;
ssl_certificate /etc/letsencrypt/live/network-optimizer.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/network-optimizer.example.com/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
# Blazor Web UI
location / {
proxy_pass http://localhost:8042;
proxy_http_version 1.1;
# WebSocket support for Blazor
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Timeouts for long-running operations
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
}
```
Enable and restart:
```bash
sudo ln -s /etc/nginx/sites-available/network-optimizer /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
```
#### Caddy Example (Automatic HTTPS)
```caddy
# /etc/caddy/Caddyfile
network-optimizer.example.com {
reverse_proxy localhost:8042
}
```
Restart Caddy:
```bash
sudo systemctl reload caddy
```
### Firewall Configuration
#### UFW (Ubuntu/Debian)
```bash
# Allow SSH
sudo ufw allow 22/tcp
# Allow HTTP/HTTPS (if using reverse proxy)
sudo ufw allow 80/tcp
sudo ufw allow 443/tcp
# Or allow direct access to the web UI
sudo ufw allow 8042/tcp # Web UI
sudo ufw enable
```
#### firewalld (RHEL/CentOS)
```bash
sudo firewall-cmd --permanent --add-service=http
sudo firewall-cmd --permanent --add-service=https
sudo firewall-cmd --permanent --add-port=8042/tcp
sudo firewall-cmd --reload
```
### Backup Strategy
#### Automated Backups
Create backup script:
```bash
#!/bin/bash
# /usr/local/bin/backup-network-optimizer.sh
BACKUP_DIR=/backups/network-optimizer
DATE=$(date +%Y%m%d-%H%M%S)
# Create backup directory
mkdir -p $BACKUP_DIR
# Backup SQLite data and configuration
tar czf $BACKUP_DIR/data-$DATE.tar.gz -C /path/to/docker data/
# Cleanup old backups (keep last 7 days)
find $BACKUP_DIR -type f -mtime +7 -delete
echo "Backup completed: $DATE"
```
Add to crontab:
```bash
# Daily backup at 2 AM
0 2 * * * /usr/local/bin/backup-network-optimizer.sh >> /var/log/network-optimizer-backup.log 2>&1
```
#### Restore from Backup
```bash
# Stop services
docker compose down
# Restore data
tar xzf /backups/network-optimizer/data-20240101-020000.tar.gz -C /path/to/docker/
# Start services
docker compose up -d
```
### Monitoring and Alerting
#### System Monitoring
Use Docker healthchecks:
```bash
# Check all services
watch docker compose ps
# Monitor resource usage
docker stats
```
#### Log Monitoring
Centralized logging with rsyslog or similar:
```yaml
# docker-compose.yml addition
logging:
driver: syslog
options:
syslog-address: "udp://your-syslog-server:514"
tag: "network-optimizer"
```
#### Uptime Monitoring
Use external monitoring:
- UptimeRobot
- Healthchecks.io
- Self-hosted Uptime Kuma
Configure health check endpoint:
```bash
# Monitor this endpoint
http://your-server:8042/api/health
```
### Resource Limits
Add resource constraints for production:
```yaml
# docker-compose.override.yml
services:
network-optimizer:
deploy:
resources:
limits:
cpus: '2.0'
memory: 2G
reservations:
cpus: '1.0'
memory: 1G
restart: always
```
Apply with:
```bash
docker compose up -d
```
### Logging Configuration
Control log verbosity via environment variables in `.env`:
```env
# General framework logging (Microsoft, EF Core, ASP.NET, etc.)
LOG_LEVEL=Information
# Network Optimizer application logging
APP_LOG_LEVEL=Debug
```
**Log Levels (least to most verbose):** Critical, Error, Warning, Information, Debug, Trace
**Common configurations:**
| Scenario | LOG_LEVEL | APP_LOG_LEVEL |
|----------|-----------|---------------|
| Production (default) | Information | Information |
| Debugging app issues | Information | Debug |
| Full diagnostics | Debug | Debug |
After changing `.env`, recreate the container to apply:
```bash
docker compose down && docker compose up -d
```
**Note:** `docker compose restart` does NOT reload environment variables. You must recreate the container.
View logs:
```bash
# Follow logs
docker compose logs -f network-optimizer
# Last 100 lines
docker compose logs --tail=100 network-optimizer
```
#### Windows Service
On Windows, logs are written to `\logs\networkoptimizer-YYYY-MM-DD.log` (rolling daily, 7-day retention).
To change log levels, set environment variables on the Windows service via the registry. This avoids modifying any config files.
**Enable debug logging for Network Optimizer:**
```powershell
$regPath = "HKLM:\SYSTEM\CurrentControlSet\Services\NetworkOptimizer"
$existing = (Get-ItemProperty $regPath -Name Environment -ErrorAction SilentlyContinue).Environment
$env = [string[]](@($existing | Where-Object { $_ }) + "Logging__LogLevel__NetworkOptimizer=Debug")
Set-ItemProperty $regPath -Name Environment -Value $env
Restart-Service NetworkOptimizer
```
**Enable debug logging for Traefik (HTTPS certificate issues):**
If HTTPS isn't working after a couple minutes (certificate errors in the browser), enable Traefik debug logging to see why certificate issuance is failing. Traefik runs as a child process and its output is captured into the app log. You need both the Traefik log level (controls what Traefik emits) and the app log level (controls what gets written to the log file):
```powershell
$regPath = "HKLM:\SYSTEM\CurrentControlSet\Services\NetworkOptimizer"
$existing = (Get-ItemProperty $regPath -Name Environment -ErrorAction SilentlyContinue).Environment
$env = [string[]](@($existing | Where-Object { $_ }) + "Logging__LogLevel__NetworkOptimizer=Debug")
Set-ItemProperty $regPath -Name Environment -Value $env
# Also set Traefik's own log level to DEBUG (this is separate from the app log level)
Set-ItemProperty -Path "HKLM:\SOFTWARE\Ozark Connect\Network Optimizer" -Name "TRAEFIK_LOG_LEVEL" -Value "DEBUG"
Restart-Service NetworkOptimizer
```
**Remove debug logging when done:**
```powershell
# Remove service environment variables
$regPath = "HKLM:\SYSTEM\CurrentControlSet\Services\NetworkOptimizer"
$env = [string[]]((Get-ItemProperty $regPath -Name Environment).Environment | Where-Object { $_ -notlike "Logging__*" })
if ($env.Count -gt 0) {
Set-ItemProperty $regPath -Name Environment -Value $env
} else {
Remove-ItemProperty $regPath -Name Environment -ErrorAction SilentlyContinue
}
# Reset Traefik log level
Set-ItemProperty -Path "HKLM:\SOFTWARE\Ozark Connect\Network Optimizer" -Name "TRAEFIK_LOG_LEVEL" -Value "INFO"
Restart-Service NetworkOptimizer
```
**View logs:**
```powershell
# Follow the current log file
Get-Content "\logs\networkoptimizer-*.log" -Tail 50 -Wait
```
## Upgrade Procedure
### Option A: Using Docker Image (Recommended)
If you deployed using the pre-built Docker image:
```bash
cd /path/to/network-optimizer
docker compose pull
docker compose up -d
```
### Option B: Building from Source
If you cloned the repository and build locally:
```bash
cd /path/to/NetworkOptimizer
git fetch origin
git checkout main
git pull
cd docker && docker compose build && docker compose up -d
```
For significant updates (major version changes or Dockerfile modifications), use `--no-cache`:
```bash
docker compose build --no-cache
docker compose up -d
```
### Windows Installer
Download the latest MSI from [GitHub Releases](https://github.com/Ozark-Connect/NetworkOptimizer/releases) and run it. The installer upgrades in-place, preserving your database, settings, and encryption keys. The Network Optimizer service restarts automatically after the upgrade.
### macOS Native
```bash
cd NetworkOptimizer
git pull
./scripts/install-macos-native.sh
```
The install script preserves your database, encryption keys, and `start.sh` configuration by backing them up before reinstalling. See the [macOS Installation Guide](../docs/MACOS-INSTALLATION.md) for details.
### Verify Update
```bash
docker compose ps
docker compose logs -f
```
## Migrating from Build-from-Source to Pre-Built Images
If you've been building from source and want to switch to the pre-built Docker images:
**Why migrate?** Pre-built images are faster to update (no build step), tested before release, and don't require the full git repository.
**Important:** When you build locally, Docker tags your image as `ghcr.io/ozark-connect/network-optimizer:latest`. Simply running `docker compose pull` won't overwrite this because the compose file has a `build:` directive. You need to force the pull and switch to the production compose file.
```bash
cd /opt/network-optimizer # or wherever you deployed
# Stop running containers
docker compose down
# Force pull registry images (overwrites locally-built images)
docker pull ghcr.io/ozark-connect/network-optimizer:latest
docker pull ghcr.io/ozark-connect/speedtest:latest
# Back up your current compose file (optional)
mv docker-compose.yml docker-compose.yml.build-backup
# Download the production compose file (no build directives)
curl -o docker-compose.yml https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/docker-compose.prod.yml
# Start with pre-built images
docker compose up -d
# Optional: clean up old build cache to free disk space
docker builder prune -f
```
Your `data/`, `logs/`, and `.env` files are preserved. Future updates are now just:
```bash
docker compose pull && docker compose up -d
```
## Troubleshooting
### Reset Admin Password
If you've forgotten your password or need to reset it, use the reset script:
```bash
curl -fsSL https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/reset-password.sh | bash
```
Or download and run it (useful inside Proxmox LXC or restricted environments):
```bash
curl -fsSL https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/reset-password.sh -o reset-password.sh
bash reset-password.sh
```
The script auto-detects your Docker container, clears the password, restarts, and displays the new temporary password.
**Manual fallback** (if you prefer not to use the script):
```bash
# Clear the password from the database
docker exec network-optimizer sqlite3 /app/data/network_optimizer.db \
"UPDATE AdminSettings SET Password = NULL, Enabled = 0;"
# Restart to trigger auto-generated password
docker restart network-optimizer
# View the new auto-generated password
docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
```
### Container Won't Start
```bash
# Check logs for errors
docker compose logs network-optimizer
# Common issues:
# - Port 8042 already in use: stop conflicting service or change port
# - Permission denied on data directory: check ownership of mounted volumes
# - Out of disk space: df -h
```
### Can't Connect to UniFi Controller
1. Verify the controller URL is correct (include https:// and port if non-standard)
2. Ensure you're using a **Local Access Only** account, not Ubiquiti SSO (see [UniFi Account setup](#unifi-account))
3. Check network connectivity: `curl -k https://your-controller:443`
4. For self-signed certificates, enable "Ignore SSL errors" in Settings
### SSH Connection Failures
```bash
# Test SSH manually from the container
docker exec -it network-optimizer ssh username@gateway-ip
# Common issues:
# - SSH not enabled on device (see UniFi SSH Configuration section)
# - Wrong credentials
# - Firewall blocking port 22
# - Host key verification (container may need to accept new host keys)
```
### Blazor UI Not Loading / Disconnects
Blazor Server uses WebSocket connections. If the UI shows "Reconnecting..." or won't load:
1. Check that your reverse proxy supports WebSockets (see nginx/Caddy examples above)
2. Ensure proxy timeouts are sufficient (60s+)
3. Check browser console for connection errors
### Database Issues
The SQLite database is stored in the `data/` volume. If you encounter database errors:
```bash
# Check database file exists and has correct permissions
docker exec network-optimizer ls -la /app/data/
# View recent application logs
docker compose logs --tail=100 network-optimizer
```
## Security Considerations
### Protect Your Credentials
The `.env` file and SQLite database contain sensitive information:
```bash
# Restrict .env file permissions
chmod 600 .env
# Data directory contains the database with stored credentials
chmod 700 data/
```
### Network Access
Network Optimizer stores UniFi controller credentials and SSH passwords. Limit access to the web UI:
- Use a reverse proxy with authentication if exposing beyond your local network
- Consider firewall rules to restrict access to trusted IPs
- Use HTTPS via reverse proxy (see examples above)
### UniFi Account
Network Optimizer supports UniFi OS devices (UDM, UCG, UDR, Cloud Key) and self-hosted UniFi Network Server installations.
Create a dedicated **Local Access Only** account on your UniFi controller for Network Optimizer. Ubiquiti SSO accounts will not work.
**Quick Setup:** Create a Local Access Only account with **Super Admin** role.
**Restricted Setup (recommended):**
1. Open UniFi Network: `https://` or `https://unifi.ui.com`
2. Click **Admin & Users** at the bottom of the side menu
3. Click **Create New** → **Create New User**
4. Enter a name and email for this service account
5. Check **Admin** and **Restrict to Local Access Only**
6. Uncheck **Use a Predefined Role** and set:
- **Network:** View Only
- **Protect:** View Only
- **User & Account Management:** None
7. Set a secure password and save
Use this username and password in Network Optimizer Settings.
## Support
- GitHub Issues: https://github.com/Ozark-Connect/NetworkOptimizer/issues
- Email: tj@ozarkconnect.net
## UniFi SSH Configuration
SSH access is required for some features but not others. Here's what needs what:
| Feature | Gateway SSH | Device SSH |
|---------|:-----------:|:----------:|
| Adaptive SQM | Required | - |
| WAN Speed Test (gateway-based) | Required | - |
| WAN Speed Test (server-based) | - | - |
| LAN Speed Test (gateway) | Required | - |
| LAN Speed Test (devices) | - | Required |
| Client Speed Test | - | - |
| Security Audit | - | - |
| Config Optimizer | - | - |
| Wi-Fi Optimizer | - | - |
### Enabling SSH in UniFi
**Important:** Both SSH settings must be configured via the UniFi Network web interface. These options are not available in the iOS or Android UniFi apps.
#### Gateway SSH (Console SSH)
Enables SSH access to Cloud Gateways (UCG, UDM, UDM Pro, etc.):
1. Open **UniFi Network**: `https://` or `https://unifi.ui.com`
2. Sign in to your Console
3. Click **Settings** on the bottom portion of the side menu
4. Navigate to **Control Plane** → **Console**
5. Enable **SSH** and set a secure password
Use `root` as the username and the password you set above.
**For UXG (non-Cloud Gateway):** Enable SSH using the Device SSH steps below, but enter those credentials in Network Optimizer's Gateway SSH settings.
#### Device SSH (UniFi Network 9.5+)
Enables SSH access to adopted devices (switches, access points, modems):
1. Open **UniFi Network**: `https://` or `https://unifi.ui.com`
2. Sign in to your Console
3. Click **UniFi Devices** on the side menu
4. In the left-hand filter menu, select **Device Updates and Settings** at the bottom
5. Expand **Device SSH Settings** at the bottom
6. Check **Device SSH Authentication**
7. Set a username and secure password (optionally add SSH public keys)
8. Save
**Note:** This is a separate credential from Gateway SSH.
### Configuring SSH in Network Optimizer
Once SSH is enabled in UniFi, enter the same credentials in Network Optimizer's **Settings** page.
#### Gateway SSH
1. Go to **Settings** → **Gateway SSH**
2. Enter your gateway's IP address, username (`root`), and the SSH password you set in UniFi
3. Click **Test SSH Connection** to verify connectivity
4. Click **Check iperf3 Status** to confirm iperf3 is available for speed tests
As an alternative to password authentication, you can provide a **Private Key Path** (e.g., `/app/ssh-keys/gateway_key`). Leave the password blank when using key-based authentication.
#### Device SSH
1. Go to **Settings** → **Device SSH**
2. Enter the username and password you configured in UniFi's Device SSH Settings
3. Click **Test SSH Connection** - it will automatically find a device on your network to test against
Private key authentication is also supported. Enter the key path (e.g., `/app/ssh-keys/id_rsa`) and leave the password blank.
### Per-Device SSH Overrides
In **LAN Speed Test**, when you add a custom speed test device and check **Start iperf3 server before test**, you can override the global Device SSH credentials for that specific device. Override fields include username, password, and private key path. Leave any field blank to fall back to the global Device SSH settings.
This is useful for non-UniFi equipment or devices with different credentials.
### Troubleshooting SSH Connections
If SSH connections are failing:
1. **Check credentials** - Use the **Test SSH Connection** button in Settings to verify your credentials are correct
2. **Check UniFi firewall rules** - Ensure SSH traffic is allowed between the Network Optimizer server and your gateway/devices
3. **Check CyberSecure IDS/IPS** - If your CyberSecure Detection Mode is set to **Notify and Block**, SSH connections may be blocked by the rule **"ET SCAN Potential SSH Scan OUTBOUND"**. You can fix this three ways:
- **Recommended:** Look for blocked connections in **Insights → Flows**, then create a **Suppression** for this specific signature in the Logs section
- **Alternative:** Add the Network Optimizer server's IP as a source in **Detection Exclusions**
- **Alternative:** In CyberSecure settings, uncheck **Scanning Activity** under the Attacks and Reconnaissance category (this disables the entire category, so the suppression approach is preferred)
## Client Speed Testing (Optional)
Enable speed testing from any device on your LAN (phones, tablets, laptops, IoT devices) without requiring SSH access.
### Overview
Two methods are available:
| Method | Best For | Port |
|--------|----------|------|
| **OpenSpeedTest™** | Browser-based testing from any device | 3005 (configurable) |
| **iperf3 Server** | CLI testing with iperf3 clients | 5201 |
Results from both methods are stored in Network Optimizer and visible in the Client Speed Test page.
**Why separate containers?** OpenSpeedTest runs as its own container (not proxied through Network Optimizer) for performance reasons. Speed tests can push massive bandwidth (multi-gigabit to 100 Gbps on high-end networks), and routing that traffic through a reverse proxy or the .NET application would add overhead and reduce accuracy. The only data sent to Network Optimizer is the small JSON result payload after the test completes.
### OpenSpeedTest™ (Browser-Based)
Bundled as part of the Docker Compose stack. Access at `http://your-server:3005`.
**Configuration (in `.env`):**
```env
# Main app identity (feel free to omit N/A settings)
HOST_IP=192.168.1.100 # Optional - for path analysis if auto-detection fails
HOST_NAME=nas # Optional - friendly hostname (requires DNS)
REVERSE_PROXIED_HOST_NAME=... # Optional - if main app is behind HTTPS proxy
# SpeedTest-specific (feel free to omit N/A settings)
OPENSPEEDTEST_PORT=3005 # Optional - change if port 3005 conflicts
OPENSPEEDTEST_HOST=speedtest.local # Optional - if speedtest uses different hostname than main app
OPENSPEEDTEST_HTTPS=true # Optional - if speedtest is behind TLS proxy (for geolocated speed test result map)
OPENSPEEDTEST_HTTPS_PORT=443 # Optional - HTTPS port if not 443
```
See `.env.example` for full documentation on each setting.
**Usage:**
1. Open `http://your-server:3005` from any device on your network
2. Run the speed test
3. Results automatically appear in Network Optimizer's Client Speed Test page
### HTTPS Configuration Requirements
When serving OpenSpeedTest over HTTPS (`OPENSPEEDTEST_HTTPS=true`), the main Network Optimizer app **must also be accessible via HTTPS**. This is a browser security requirement - HTTPS pages cannot make requests to HTTP endpoints (mixed active content).
**Valid Configurations:**
| Speedtest Protocol | Main App Protocol | Configuration Required |
|-------------------|-------------------|------------------------|
| HTTP | HTTP | `HOST_NAME` or `HOST_IP` |
| HTTP | HTTPS | `REVERSE_PROXIED_HOST_NAME` |
| HTTPS | HTTPS | `OPENSPEEDTEST_HTTPS=true` + `REVERSE_PROXIED_HOST_NAME` |
| HTTPS | HTTP | ❌ **Not supported** (browser blocks mixed content) |
**Example - Both behind HTTPS reverse proxy:**
```env
HOST_NAME=nas
REVERSE_PROXIED_HOST_NAME=optimizer.example.com
OPENSPEEDTEST_HOST=speedtest.example.com
OPENSPEEDTEST_HTTPS=true
```
**If you see this error in browser console:**
```
Blocked loading mixed active content "http://..."
```
It means your speedtest is HTTPS but trying to POST results to an HTTP endpoint. Set `REVERSE_PROXIED_HOST_NAME` to fix.
### iperf3 Server Mode
Run iperf3 as a server inside the Network Optimizer container for CLI-based testing.
**Enable in `.env`:**
```env
IPERF3_SERVER_ENABLED=true
```
**Usage from client devices:**
```bash
# Upload test (client to server, 4 streams)
iperf3 -c your-server -P 4
# Download test (server to client, 4 streams)
iperf3 -c your-server -P 4 -R
# Bidirectional test (runs both directions simultaneously)
iperf3 -c your-server -P 4 --bidir
```
Results are captured automatically and stored with client IP identification.
### Port Conflicts
**Before enabling these features, check for existing services using the same ports:**
```bash
# Check for iperf3 server already running
sudo netstat -tlnp | grep 5201
# or
sudo ss -tlnp | grep 5201
# Check for existing services on port 3005
sudo netstat -tlnp | grep 3005
docker ps | grep -E "3000|3005"
```
**Common conflicts:**
| Port | Service | Resolution |
|------|---------|------------|
| 5201 | Existing iperf3 server | Stop: `sudo systemctl stop iperf3` |
| 3005 | OpenSpeedTest port conflict | Set `OPENSPEEDTEST_PORT=3006` (or another free port) in `.env` |
**Container name conflicts:**
The bundled OpenSpeedTest uses container name `openspeedtest`. If you have an existing container with this name:
```bash
# Remove existing container
docker stop openspeedtest && docker rm openspeedtest
# Then start the Network Optimizer stack
docker compose up -d
```
### External WAN Speed Test Server (Optional)
Deploy an OpenSpeedTest instance to a remote server (VPS, cloud VM, etc.) to let clients test their **internet (WAN) speed** from any device on your network. Results are automatically posted back to your Network Optimizer instance.
**How it works:** The client's browser connects to the remote speed test server. Traffic flows: client → your WAN → internet → remote server → internet → your WAN → client. The result is posted back to Network Optimizer with a server identifier, and stored as a WAN speed test result.
**Requirements:**
- A remote server with Docker (any cloud VPS works)
- Port 3005 (or your chosen port) open on the remote server
- **HTTPS on the external server** (strongly recommended - see note below)
**Why HTTPS?** Chrome and Edge enforce [Private Network Access](https://developer.chrome.com/blog/private-network-access-update) rules. The speed test page is served from a public IP, and the browser posts results back to Network Optimizer on your LAN (a private IP). These browsers block this unless the page origin is HTTPS (a secure context). Firefox and Safari do not currently enforce this restriction, but HTTPS is still strongly recommended.
**Setup:**
1. In Network Optimizer, go to **Settings → External Speed Test Server**
2. Enter the server name, hostname/IP, port, and scheme (HTTPS)
3. Save - a **deploy command** will appear with everything pre-filled
4. SSH to your remote server and run the deploy command
The deploy command handles downloading files, building the container, and starting the server. The Server ID is automatically generated from the name you entered and links results back to this server.
**Interactive deploy** (if you haven't configured Settings yet, the script will walk you through it):
```bash
curl -fsSL https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/deploy-external-speedtest.sh | bash
```
**Updating** an existing installation (re-downloads files and rebuilds the container):
```bash
curl -fsSL https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/deploy-external-speedtest.sh | bash -s -- --update
```
**Setting up HTTPS:** If you use [NetworkOptimizer-Proxy](https://github.com/Ozark-Connect/NetworkOptimizer-Proxy) (Traefik), the WAN speed test route is already included in `config.example.yml` - just uncomment the `speedtest-wan` router and service, update the hostname and VPS address, and you're done. The config enforces HTTP/1.1 and strips compression headers automatically.
If you use a different reverse proxy, add a route for the external speed test hostname pointing to your remote server on port 3005. The reverse proxy must force HTTP/1.1 for accurate speed test results (HTTP/2 multiplexing interferes with throughput measurement).
Then update the external server settings in Network Optimizer to use `https` scheme and port `443`.
### Disabling Optional Services
To disable client speed testing components:
```env
# Disable iperf3 server (default)
IPERF3_SERVER_ENABLED=false
# To completely disable OpenSpeedTest, comment it out in docker-compose.yml
# or use a custom override file
```
## Next Steps
After deployment:
1. Access web UI and complete initial setup
2. Connect to UniFi Controller
3. Configure SSH access for gateway and devices (see above)
4. Run security audit
5. Configure SQM settings (if applicable)
6. Set up client speed testing (optional, see above)
See main documentation for feature guides.
================================================
FILE: docker/Dockerfile
================================================
# Multi-stage build for Network Optimizer
# Stage 1: Build stage with full .NET SDK
FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build
# Version passed from CI/CD (MinVer can't access .git in Docker context)
ARG VERSION=0.0.0-alpha.0
WORKDIR /src
# Copy solution file, build props, NuGet config, and all project files first for layer caching
COPY ["NetworkOptimizer.sln", "./"]
COPY ["Directory.Build.props", "./"]
COPY ["nuget.config", "./"]
COPY ["packages/", "packages/"]
COPY ["src/NetworkOptimizer.Core/NetworkOptimizer.Core.csproj", "src/NetworkOptimizer.Core/"]
COPY ["src/NetworkOptimizer.Web/NetworkOptimizer.Web.csproj", "src/NetworkOptimizer.Web/"]
COPY ["src/NetworkOptimizer.UniFi/NetworkOptimizer.UniFi.csproj", "src/NetworkOptimizer.UniFi/"]
COPY ["src/NetworkOptimizer.Audit/NetworkOptimizer.Audit.csproj", "src/NetworkOptimizer.Audit/"]
COPY ["src/NetworkOptimizer.Sqm/NetworkOptimizer.Sqm.csproj", "src/NetworkOptimizer.Sqm/"]
COPY ["src/NetworkOptimizer.Monitoring/NetworkOptimizer.Monitoring.csproj", "src/NetworkOptimizer.Monitoring/"]
COPY ["src/NetworkOptimizer.Storage/NetworkOptimizer.Storage.csproj", "src/NetworkOptimizer.Storage/"]
COPY ["src/NetworkOptimizer.Agents/NetworkOptimizer.Agents.csproj", "src/NetworkOptimizer.Agents/"]
COPY ["src/NetworkOptimizer.Reports/NetworkOptimizer.Reports.csproj", "src/NetworkOptimizer.Reports/"]
COPY ["src/NetworkOptimizer.Diagnostics/NetworkOptimizer.Diagnostics.csproj", "src/NetworkOptimizer.Diagnostics/"]
COPY ["src/NetworkOptimizer.WiFi/NetworkOptimizer.WiFi.csproj", "src/NetworkOptimizer.WiFi/"]
COPY ["src/NetworkOptimizer.Alerts/NetworkOptimizer.Alerts.csproj", "src/NetworkOptimizer.Alerts/"]
COPY ["src/NetworkOptimizer.Threats/NetworkOptimizer.Threats.csproj", "src/NetworkOptimizer.Threats/"]
# Restore dependencies
RUN dotnet restore "src/NetworkOptimizer.Web/NetworkOptimizer.Web.csproj"
# Copy all source files
COPY src/ src/
# Build and publish Web UI (includes all dependent projects)
# MinVerVersionOverride tells MinVer to use this version instead of reading git tags
RUN dotnet publish "src/NetworkOptimizer.Web/NetworkOptimizer.Web.csproj" \
-c Release \
-o /app/publish \
--no-restore \
-p:MinVerVersionOverride=${VERSION}
# Stage 2: Build uwnspeedtest Go binaries
# - Container's native arch: for server-side WAN speed tests (local execution)
# - linux/arm64: for gateway-direct WAN speed tests (deployed via SSH to UniFi gateways)
FROM golang:1.22-alpine AS uwnspeedtest-build
ARG VERSION=""
ARG TARGETARCH=amd64
WORKDIR /src
# Copy both modules (uwnspeedtest imports cfspeedtest/speedtest)
COPY src/cfspeedtest/ cfspeedtest/
COPY src/uwnspeedtest/ uwnspeedtest/
WORKDIR /src/uwnspeedtest
# Build for container's target architecture (local server-side tests)
# VERSION override only applies when explicitly passed (GHA releases); otherwise uses main.go static version
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} go build -trimpath \
-ldflags "-s -w ${VERSION:+-X main.version=${VERSION}}" \
-o /uwnspeedtest-local .
# Build for gateway (always linux/arm64, deployed to UniFi gateways via SSH)
RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -trimpath \
-ldflags "-s -w ${VERSION:+-X main.version=${VERSION}}" \
-o /uwnspeedtest-gateway .
# Stage 2.5: Build wansteer Go binary (gateway-only, always linux/arm64)
# WAN Steering daemon - manages iptables rules to load-balance traffic across multiple WANs
FROM golang:1.22-alpine AS wansteer-build
ARG VERSION=""
WORKDIR /src
COPY src/wansteer/ wansteer/
WORKDIR /src/wansteer
RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -trimpath \
-ldflags "-s -w ${VERSION:+-X main.version=${VERSION}}" \
-o /wansteer-gateway .
# Stage 3: Build iperf3 from source for latest version
FROM mcr.microsoft.com/dotnet/aspnet:10.0 AS iperf-build
# iperf3 version - check https://github.com/esnet/iperf/releases for updates
ARG IPERF3_VERSION=3.20
RUN apt-get update && apt-get install -y \
build-essential \
curl \
libssl-dev \
&& rm -rf /var/lib/apt/lists/*
# Build iperf3 from source
WORKDIR /tmp
RUN curl -fLO --retry 3 --retry-delay 5 https://github.com/esnet/iperf/releases/download/${IPERF3_VERSION}/iperf-${IPERF3_VERSION}.tar.gz \
&& tar xzf iperf-${IPERF3_VERSION}.tar.gz \
&& cd iperf-${IPERF3_VERSION} \
&& ./configure --prefix=/usr/local \
&& make \
&& make install
# Stage 4: Runtime stage with ASP.NET runtime only
FROM mcr.microsoft.com/dotnet/aspnet:10.0 AS runtime
ARG TARGETARCH=amd64
WORKDIR /app
# Install necessary utilities (without iperf3 - we copy it from build)
# Note: openssh-client and sshpass removed - SSH.NET handles SSH natively
RUN apt-get update && apt-get install -y \
curl \
iputils-ping \
libssl3 \
gosu \
sqlite3 \
&& rm -rf /var/lib/apt/lists/*
# Copy iperf3 from build stage
COPY --from=iperf-build /usr/local/bin/iperf3 /usr/local/bin/
COPY --from=iperf-build /usr/local/lib/libiperf* /usr/local/lib/
RUN ldconfig
# Copy published application
COPY --from=build /app/publish .
# Copy uwnspeedtest binaries:
# - Local binary for server-side WAN speed tests (matches container arch)
# - Gateway binary for deployment via SSH to UniFi gateways (always linux/arm64)
RUN mkdir -p /app/tools
COPY --from=uwnspeedtest-build /uwnspeedtest-local /app/tools/uwnspeedtest-linux-${TARGETARCH:-amd64}
COPY --from=uwnspeedtest-build /uwnspeedtest-gateway /app/tools/uwnspeedtest-linux-arm64
# Copy wansteer binary (gateway-only, deployed via SSH to UniFi gateways)
COPY --from=wansteer-build /wansteer-gateway /app/tools/wansteer-linux-arm64
# Create directories for volumes
RUN mkdir -p /app/data /app/ssh-keys /app/logs
# Set environment variables
ENV ASPNETCORE_ENVIRONMENT=Production \
ASPNETCORE_HTTP_PORTS=8042 \
DOTNET_RUNNING_IN_CONTAINER=true \
DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=false
# Expose ports
EXPOSE 8042
EXPOSE 5201
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD curl -f http://localhost:8042/api/health || exit 1
# Set ownership for app directories
RUN chown -R app:app /app
# Copy entrypoint script
COPY docker/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Run as root initially; entrypoint will fix volume permissions then drop to app user
ENTRYPOINT ["/entrypoint.sh"]
================================================
FILE: docker/NATIVE-DEPLOYMENT.md
================================================
# Native Deployment Guide
Run Network Optimizer directly on the host without Docker for maximum network performance.
## When to Use Native Deployment
**Recommended for:**
- **macOS/Windows users** - Docker Desktop adds virtualization overhead that can limit network throughput
- **Speed test accuracy** - Native deployment provides accurate multi-gigabit measurements
- **Low-overhead systems** - Minimal resource usage without container overhead
- **Dedicated appliances** - Purpose-built network monitoring devices
**Use Docker instead if:**
- You prefer containerized deployments
- You need easy updates via image pulls
- Your network speeds are under 2 Gbps (except macOS - see below)
**macOS note:** Docker Desktop limits network throughput for speed testing. For accurate multi-gigabit measurements on macOS, use native deployment. The native install script includes OpenSpeedTest setup, so you get both maximum performance and browser-based speed testing.
## Platform-Specific Instructions
- [macOS Deployment](#macos-deployment)
- [Linux Deployment](#linux-deployment)
- [Windows Deployment](#windows-deployment) - Use the Windows Installer instead
---
## macOS Deployment
For the quickest macOS installation, see [macOS Installation Guide](../docs/MACOS-INSTALLATION.md).
For manual installation or customization, continue with the steps below.
---
### Manual Installation
### Prerequisites
**System Requirements:**
- macOS 11 (Big Sur) or later
- Intel or Apple Silicon (M1/M2/M3)
- 2GB RAM minimum
- 1GB disk space
**Required Software:**
```bash
# Install Homebrew if not present
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
# Install required tools
brew install sshpass iperf3
```
### Build from Source
```bash
# Install .NET SDK (if not present)
brew install dotnet
# Clone repository
git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
# or via SSH: git clone git@github.com:Ozark-Connect/NetworkOptimizer.git
cd NetworkOptimizer
# Build for your architecture
# Apple Silicon (M1/M2/M3):
dotnet publish src/NetworkOptimizer.Web -c Release -r osx-arm64 --self-contained -o ~/network-optimizer
# Intel Macs:
# dotnet publish src/NetworkOptimizer.Web -c Release -r osx-x64 --self-contained -o ~/network-optimizer
cd ~/network-optimizer
```
### Code Signing
macOS requires binaries to be signed. Sign with an ad-hoc signature:
```bash
cd ~/network-optimizer
# Sign all dynamic libraries
find . -name '*.dylib' -exec codesign --force --sign - {} \;
# Sign main executable
codesign --force --sign - NetworkOptimizer.Web
# Verify signature
codesign -v NetworkOptimizer.Web
```
### Create Startup Script
```bash
cat > ~/network-optimizer/start.sh << 'EOF'
#!/bin/bash
cd "$(dirname "$0")"
# Add Homebrew to PATH
export PATH="/opt/homebrew/bin:/usr/local/bin:$PATH"
# Environment configuration
export TZ="America/Chicago" # Change to your timezone
export ASPNETCORE_URLS="http://0.0.0.0:8042"
# Host IP - required for iperf3 client result tracking
export HOST_IP="192.168.1.100" # Change to this Mac's IP address
# Enable iperf3 server for client speed testing (port 5201)
export Iperf3Server__Enabled=true
# Optional: Set admin password (otherwise auto-generated on first run)
# export APP_PASSWORD="your-secure-password"
# Start the application
./NetworkOptimizer.Web
EOF
chmod +x ~/network-optimizer/start.sh
```
### Create Log Directory
```bash
mkdir -p ~/network-optimizer/logs
```
### Install as System Service (launchd)
Create the service definition:
```bash
cat > ~/Library/LaunchAgents/com.networkoptimizer.app.plist << 'EOF'
Label
com.networkoptimizer.app
ProgramArguments
/Users/YOUR_USERNAME/network-optimizer/start.sh
WorkingDirectory
/Users/YOUR_USERNAME/network-optimizer
KeepAlive
RunAtLoad
StandardOutPath
/Users/YOUR_USERNAME/network-optimizer/logs/stdout.log
StandardErrorPath
/Users/YOUR_USERNAME/network-optimizer/logs/stderr.log
EOF
```
**Important:** Replace `YOUR_USERNAME` with your actual username:
```bash
sed -i '' "s/YOUR_USERNAME/$(whoami)/g" ~/Library/LaunchAgents/com.networkoptimizer.app.plist
```
### Start the Service
```bash
# Load and start the service
launchctl load ~/Library/LaunchAgents/com.networkoptimizer.app.plist
# Verify it's running
launchctl list | grep networkoptimizer
# Check health
curl -s http://localhost:8042/api/health
```
### Access the Application
Open your browser to: **http://localhost:8042**
On first run, check the logs for the auto-generated admin password:
```bash
grep -A5 "AUTO-GENERATED" ~/network-optimizer/logs/stdout.log
```
### Service Management
```bash
# Stop service
launchctl unload ~/Library/LaunchAgents/com.networkoptimizer.app.plist
# Start service
launchctl load ~/Library/LaunchAgents/com.networkoptimizer.app.plist
# Restart service
launchctl unload ~/Library/LaunchAgents/com.networkoptimizer.app.plist && \
launchctl load ~/Library/LaunchAgents/com.networkoptimizer.app.plist
# View logs
tail -f ~/network-optimizer/logs/stdout.log
# Check status
launchctl list | grep networkoptimizer && curl -s http://localhost:8042/api/health
```
### Data Location
Network Optimizer stores data in:
- **Database:** `~/Library/Application Support/NetworkOptimizer/network_optimizer.db`
- **Credentials:** `~/Library/Application Support/NetworkOptimizer/.credential_key`
- **Logs:** `~/network-optimizer/logs/`
### Updating
```bash
# Stop service
launchctl unload ~/Library/LaunchAgents/com.networkoptimizer.app.plist
# Backup database (optional)
cp ~/Library/Application\ Support/NetworkOptimizer/network_optimizer.db ~/network_optimizer.db.backup
# Pull latest from main and rebuild
cd ~/NetworkOptimizer
git fetch origin && git checkout main && git pull
dotnet publish src/NetworkOptimizer.Web -c Release -r osx-arm64 --self-contained -o ~/network-optimizer
# Re-sign binaries
cd ~/network-optimizer
find . -name '*.dylib' -exec codesign --force --sign - {} \;
codesign --force --sign - NetworkOptimizer.Web
# Start service
launchctl load ~/Library/LaunchAgents/com.networkoptimizer.app.plist
```
### Uninstall
```bash
# Stop and remove service
launchctl unload ~/Library/LaunchAgents/com.networkoptimizer.app.plist
rm ~/Library/LaunchAgents/com.networkoptimizer.app.plist
# Remove application
rm -rf ~/network-optimizer
# Remove data (optional - keeps your settings if you reinstall)
rm -rf ~/Library/Application\ Support/NetworkOptimizer
```
---
## Linux Deployment
### Prerequisites
**System Requirements:**
- Ubuntu 20.04+, Debian 11+, RHEL 8+, or compatible
- x64 or ARM64 architecture
- 2GB RAM minimum
- 1GB disk space
**Required Software:**
```bash
# Debian/Ubuntu
sudo apt update
sudo apt install -y sshpass iperf3
# RHEL/CentOS/Fedora
sudo dnf install -y epel-release
sudo dnf install -y sshpass iperf3
```
### Build from Source
```bash
# Install .NET SDK
# Debian/Ubuntu:
wget https://dot.net/v1/dotnet-install.sh -O dotnet-install.sh
chmod +x dotnet-install.sh
./dotnet-install.sh --channel 10.0
export PATH="$HOME/.dotnet:$PATH"
# Clone and build
git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
# or via SSH: git clone git@github.com:Ozark-Connect/NetworkOptimizer.git
cd NetworkOptimizer
# Create installation directory
sudo mkdir -p /opt/network-optimizer
sudo chown $USER:$USER /opt/network-optimizer
# Build for your architecture (x64)
dotnet publish src/NetworkOptimizer.Web -c Release -r linux-x64 --self-contained -o /opt/network-optimizer
# For ARM64, use:
# dotnet publish src/NetworkOptimizer.Web -c Release -r linux-arm64 --self-contained -o /opt/network-optimizer
# Make executable
chmod +x /opt/network-optimizer/NetworkOptimizer.Web
```
### Create Startup Script
```bash
cat > /opt/network-optimizer/start.sh << 'EOF'
#!/bin/bash
cd "$(dirname "$0")"
# Environment configuration
export TZ="America/Chicago" # Change to your timezone
export ASPNETCORE_URLS="http://0.0.0.0:8042"
# Host IP - required for iperf3 client result tracking
export HOST_IP="192.168.1.100" # Change to this server's IP address
# Enable iperf3 server for client speed testing (port 5201)
export Iperf3Server__Enabled=true
# Optional: Set admin password
# export APP_PASSWORD="your-secure-password"
# Start the application
./NetworkOptimizer.Web
EOF
chmod +x /opt/network-optimizer/start.sh
```
### Install as System Service (systemd)
```bash
sudo cat > /etc/systemd/system/network-optimizer.service << 'EOF'
[Unit]
Description=Network Optimizer
After=network.target
[Service]
Type=simple
User=YOUR_USERNAME
WorkingDirectory=/opt/network-optimizer
ExecStart=/opt/network-optimizer/start.sh
Restart=always
RestartSec=10
StandardOutput=append:/opt/network-optimizer/logs/stdout.log
StandardError=append:/opt/network-optimizer/logs/stderr.log
[Install]
WantedBy=multi-user.target
EOF
# Replace YOUR_USERNAME
sudo sed -i "s/YOUR_USERNAME/$USER/g" /etc/systemd/system/network-optimizer.service
# Create log directory
mkdir -p /opt/network-optimizer/logs
# Enable and start
sudo systemctl daemon-reload
sudo systemctl enable network-optimizer
sudo systemctl start network-optimizer
```
### Service Management
```bash
# Check status
sudo systemctl status network-optimizer
# Stop
sudo systemctl stop network-optimizer
# Start
sudo systemctl start network-optimizer
# Restart
sudo systemctl restart network-optimizer
# View logs
tail -f /opt/network-optimizer/logs/stdout.log
journalctl -u network-optimizer -f
```
### Data Location
- **Database:** `~/.local/share/NetworkOptimizer/network_optimizer.db`
- **Credentials:** `~/.local/share/NetworkOptimizer/.credential_key`
- **Logs:** `/opt/network-optimizer/logs/`
---
## Windows Deployment
**Use the Windows Installer instead of manual deployment.**
Download the MSI installer from [GitHub Releases](https://github.com/Ozark-Connect/NetworkOptimizer/releases). The installer provides:
- One-click installation
- Automatic Windows Service setup (starts at boot)
- Bundled iperf3 for speed testing
- Proper uninstall via Windows Settings
After installation, access the web UI at **http://localhost:8042** (or use the machine's IP/hostname from other devices).
---
## Client Speed Testing
Native deployments support both browser-based and CLI-based client speed testing.
### OpenSpeedTest™ (Browser-Based)
The macOS install script (`scripts/install-macos-native.sh`) automatically sets up OpenSpeedTest with nginx, providing browser-based speed testing from any device - no client software required.
After installation, access SpeedTest at: **http://your-mac-ip:3005**
For manual setup or Linux, see [Manual OpenSpeedTest Setup](#manual-openspeedtest-setup) below.
### iperf3 Server Mode
For CLI-based testing with iperf3 clients.
### Enable iperf3 Server Mode
Add to your startup script:
```bash
export Iperf3Server__Enabled=true
```
### Port Conflicts
If you already have an iperf3 server running:
```bash
# Linux - stop existing service
sudo systemctl stop iperf3
# Check if port 5201 is in use
sudo ss -tlnp | grep 5201
```
### Testing from Clients
From any device with iperf3 installed:
```bash
# Download test
iperf3 -c your-server-ip
# Upload test
iperf3 -c your-server-ip -R
```
Results appear in Network Optimizer's Client Speed Test page.
### Manual OpenSpeedTest Setup
If you installed manually (without the install script), you can set up OpenSpeedTest:
**macOS:**
```bash
# Install nginx
brew install nginx
# Create SpeedTest directory
mkdir -p ~/network-optimizer/SpeedTest/{conf,logs,temp,html/assets/{css,js,fonts,images/icons}}
cd ~/network-optimizer/SpeedTest
# Copy files from repo (adjust path as needed)
REPO=~/NetworkOptimizer
cp $REPO/src/NetworkOptimizer.Installer/SpeedTest/nginx.conf conf/
cp $REPO/src/NetworkOptimizer.Installer/SpeedTest/nginx/conf/mime.types conf/
cp $REPO/src/OpenSpeedTest/{index.html,hosted.html,downloading,upload} html/
cp -r $REPO/src/OpenSpeedTest/assets/* html/assets/
# Create config.js with your server's IP
cat > html/assets/js/config.js << 'EOF'
window.NETWORK_OPTIMIZER_CONFIG = {
resultsApiUrl: "http://YOUR_IP:8042/api/public/speedtest/results"
};
EOF
# Start nginx
nginx -c ~/network-optimizer/SpeedTest/conf/nginx.conf -p ~/network-optimizer/SpeedTest
```
**Linux:**
```bash
# Install nginx
sudo apt install nginx # Debian/Ubuntu
# or
sudo dnf install nginx # RHEL/Fedora
# Create SpeedTest directory
sudo mkdir -p /opt/network-optimizer/SpeedTest/{conf,logs,temp,html/assets/{css,js,fonts,images/icons}}
sudo chown -R $USER: /opt/network-optimizer/SpeedTest
# Copy files from repo and create config.js (same as macOS, adjust paths)
# Start nginx with the SpeedTest config
sudo nginx -c /opt/network-optimizer/SpeedTest/conf/nginx.conf -p /opt/network-optimizer/SpeedTest
```
Access SpeedTest at `http://your-server:3005`. Results automatically appear in Network Optimizer.
## Firewall Configuration
Ensure port 8042 (or your configured port) is accessible:
**macOS:**
```bash
# Usually not needed for local access
# For remote access, allow in System Preferences > Security & Privacy > Firewall
```
**Linux (UFW):**
```bash
sudo ufw allow 8042/tcp
```
**Linux (firewalld):**
```bash
sudo firewall-cmd --permanent --add-port=8042/tcp
sudo firewall-cmd --reload
```
**Windows:**
```powershell
netsh advfirewall firewall add rule name="Network Optimizer" dir=in action=allow protocol=tcp localport=8042
```
---
## Reverse Proxy (Optional)
For HTTPS access, place behind a reverse proxy like Caddy, nginx, or Traefik.
### Caddy Example
```caddy
network-optimizer.example.com {
reverse_proxy localhost:8042
}
```
### nginx Example
```nginx
server {
listen 443 ssl http2;
server_name network-optimizer.example.com;
ssl_certificate /path/to/cert.pem;
ssl_certificate_key /path/to/key.pem;
location / {
proxy_pass http://localhost:8042;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
```
---
## Troubleshooting
### macOS: "Killed: 9" Error
The binary needs code signing:
```bash
find ~/network-optimizer -name '*.dylib' -exec codesign --force --sign - {} \;
codesign --force --sign - ~/network-optimizer/NetworkOptimizer.Web
```
### macOS: sshpass/iperf3 Not Found
Add Homebrew to PATH in `start.sh`:
```bash
export PATH="/opt/homebrew/bin:/usr/local/bin:$PATH"
```
### Linux: Permission Denied
```bash
chmod +x /opt/network-optimizer/NetworkOptimizer.Web
chmod +x /opt/network-optimizer/start.sh
```
### All Platforms: Port Already in Use
Change the port in your startup script:
```bash
export ASPNETCORE_URLS="http://0.0.0.0:8080" # Use different port
```
### Check Application Logs
```bash
# macOS
tail -f ~/network-optimizer/logs/stdout.log
# Linux
tail -f /opt/network-optimizer/logs/stdout.log
journalctl -u network-optimizer -f
# Windows
type C:\NetworkOptimizer\logs\stdout.log
```
### Reset Admin Password
If you forget the admin password, use the reset script:
```bash
curl -fsSL https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/reset-password.sh | bash
```
The script auto-detects macOS or Linux native installations, clears the password, restarts the service, and displays the new temporary password. Use `--macos` or `--linux` to force a specific mode.
**Manual fallback:**
```bash
# macOS
launchctl unload ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
sqlite3 ~/Library/Application\ Support/NetworkOptimizer/network_optimizer.db \
"UPDATE AdminSettings SET Password = NULL, Enabled = 0;"
launchctl load ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
grep "Password:" ~/network-optimizer/logs/stdout.log | tail -1
# Linux
sudo systemctl stop network-optimizer
sqlite3 /opt/network-optimizer/data/network_optimizer.db \
"UPDATE AdminSettings SET Password = NULL, Enabled = 0;"
sudo systemctl start network-optimizer
journalctl -u network-optimizer --since "2 minutes ago" | grep "Password:"
```
---
## Support
- Documentation: See `docs/` folder in repository
- GitHub Issues: https://github.com/Ozark-Connect/NetworkOptimizer/issues
- Email: tj@ozarkconnect.net
================================================
FILE: docker/QUICK-REFERENCE.md
================================================
# Network Optimizer - Quick Reference Card
## Quick Start
### Option A: Pull Docker Image (Recommended)
**Linux / Windows:**
```bash
mkdir network-optimizer && cd network-optimizer
curl -o docker-compose.yml https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/docker-compose.prod.yml
curl -O https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/.env.example
cp .env.example .env
docker compose up -d
```
**macOS:**
```bash
mkdir network-optimizer && cd network-optimizer
curl -O https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/docker-compose.macos.yml
curl -O https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/.env.example
cp .env.example .env
docker compose -f docker-compose.macos.yml up -d
```
### Option B: Build from Source
**Linux / Windows:**
```bash
git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
cd NetworkOptimizer/docker
docker compose build && docker compose up -d
```
**macOS:**
```bash
git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
cd NetworkOptimizer/docker
docker compose -f docker-compose.macos.yml build
docker compose -f docker-compose.macos.yml up -d
```
### First Run - Get Admin Password
```bash
docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
```
Access at: **http://localhost:8042**
## Common Commands
### Service Management
```bash
docker-compose up -d # Start
docker-compose down # Stop
docker-compose restart # Restart
docker-compose ps # Check status
```
### Logs
```bash
docker-compose logs -f network-optimizer
```
### Updates
**Docker Image:**
```bash
docker compose pull && docker compose up -d
```
**From Source:**
```bash
git pull && docker compose build && docker compose up -d
```
## Configuration
**Environment File:** `.env` (optional)
```bash
cp .env.example .env
nano .env
docker-compose up -d
```
**Key Settings:**
```env
WEB_PORT=8042 # Web UI port
TZ=America/Chicago # Timezone
APP_PASSWORD= # Optional preset password
HOST_IP= # Required for bridge networking
```
## Admin Password
**Priority order:**
1. Database password (Settings → Admin Password) - recommended
2. `APP_PASSWORD` environment variable
3. Auto-generated on first run (check logs)
**Set permanent password:**
1. Log in with auto-generated password from logs
2. Go to Settings → Admin Password
3. Enter and save new password
## Troubleshooting
### Service Won't Start
```bash
docker-compose down
docker-compose up -d
docker-compose logs -f network-optimizer
```
### Port Already in Use
```bash
# Edit .env
WEB_PORT=8090
docker-compose up -d
```
### Reset Everything
```bash
docker-compose down -v
rm -rf data/
docker-compose up -d
```
## Client Speed Testing
### Browser-Based (OpenSpeedTest™)
Access at: **http://localhost:3005** (port configurable via `OPENSPEEDTEST_PORT`)
Configure in `.env` (also enforces canonical URL via 302 redirect):
```env
HOST_IP=192.168.1.100 # For path analysis (if auto-detect fails)
HOST_NAME=nas # Canonical URL + friendlier URLs (needs DNS)
REVERSE_PROXIED_HOST_NAME=optimizer.example.com # If behind proxy (https)
```
To disable: comment out `openspeedtest` service in `docker-compose.yml`
### CLI-Based (iperf3)
Enable in `.env`:
```env
IPERF3_SERVER_ENABLED=true
```
Test from clients:
```bash
iperf3 -c your-server # Download
iperf3 -c your-server -R # Upload
```
## Important Files
| File | Purpose |
|------|---------|
| `.env` | Configuration (optional) |
| `data/` | SQLite database, credentials |
| `logs/` | Application logs |
| `ssh-keys/` | SSH keys for device access |
## Health Check
```bash
docker-compose ps
curl http://localhost:8042/api/health
```
## Backup & Restore
### Backup
```bash
tar czf backup-$(date +%Y%m%d).tar.gz data/
```
### Restore
```bash
docker-compose down
tar xzf backup-YYYYMMDD.tar.gz
docker-compose up -d
```
## Security Checklist
- [ ] Set permanent password in Settings
- [ ] Firewall configured (allow 8042/tcp)
- [ ] HTTPS via reverse proxy (production)
- [ ] Regular backups of `data/` directory
## Docker Commands
```bash
docker-compose up -d # Start in background
docker-compose down # Stop and remove
docker-compose restart # Restart
docker-compose exec network-optimizer bash # Shell into container
docker stats # Resource usage
docker system prune # Clean up unused objects
```
## Getting Help
- **Logs**: `docker-compose logs -f network-optimizer`
- **Health**: `curl http://localhost:8042/api/health`
- **GitHub**: https://github.com/Ozark-Connect/NetworkOptimizer
## System Requirements
- Docker 20.10+
- Docker Compose 2.0+
- 1GB RAM minimum
- 500MB disk minimum
================================================
FILE: docker/README.md
================================================
# Network Optimizer Docker Deployment
Complete Docker infrastructure for the Ozark Connect Network Optimizer for UniFi.
## Quick Start
### Option A: Pull Docker Image (Recommended)
The fastest way to get started. No build required.
**Linux / Windows:**
```bash
mkdir network-optimizer && cd network-optimizer
curl -o docker-compose.yml https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/docker-compose.prod.yml
curl -O https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/.env.example
cp .env.example .env
docker compose up -d
```
**macOS:**
```bash
mkdir network-optimizer && cd network-optimizer
curl -O https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/docker-compose.macos.yml
curl -O https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/docker/.env.example
cp .env.example .env
docker compose -f docker-compose.macos.yml up -d
```
### Option B: Build from Source
Clone the repository and build locally.
**Linux / Windows:**
```bash
git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
cd NetworkOptimizer/docker
cp .env.example .env
docker compose build
docker compose up -d
```
**macOS:**
macOS doesn't support `network_mode: host`, so use the macOS-specific compose file:
```bash
git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
cd NetworkOptimizer/docker
cp .env.example .env
docker compose -f docker-compose.macos.yml build
docker compose -f docker-compose.macos.yml up -d
```
### First Run
1. **Get the auto-generated admin password:**
```bash
docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
```
On first run, a secure password is generated and displayed in the logs.
2. **Access the Web UI:**
- Network Optimizer: http://localhost:8042 (use password from logs)
- Wait ~60 seconds on first startup
3. **Set a permanent password:**
After logging in, go to Settings → Admin Password to set your own password (recommended).
**No `.env` file required** - defaults work out of the box. Optionally edit `.env` to set `APP_PASSWORD` or timezone.
## Architecture
```
┌─────────────────────────────────────────────────────────────────┐
│ Docker Compose Stack │
├─────────────────────────────────────────────────────────────────┤
│ │
│ ┌──────────────────────────────────────────────────────────┐ │
│ │ Network Optimizer │ │
│ │ - Blazor Web UI :8042 │ │
│ │ - iperf3 Server :5201 (optional) │ │
│ │ - SQLite Database (persistent in ./data) │ │
│ │ - Security Auditing, SQM, Speed Tests │ │
│ └──────────────────────────────────────────────────────────┘ │
│ │
│ ┌──────────────────────────────────────────────────────────┐ │
│ │ OpenSpeedTest │ │
│ │ - Browser-based speed test :3005 (configurable) │ │
│ │ - Results sent to Network Optimizer API │ │
│ └──────────────────────────────────────────────────────────┘ │
│ │
└─────────────────────────────────────────────────────────────────┘
```
## Services
### Network Optimizer (Port 8042)
The main application providing:
- **Web UI**: Blazor Server web interface
- Dashboard and monitoring
- SQM configuration and management
- Security audit results
- Speed testing with path analysis
- Report generation
**Volumes:**
- `./data` → `/app/data` - SQLite database, configurations
- `./ssh-keys` → `/app/ssh-keys` - SSH keys for agent deployment (optional)
- `./logs` → `/app/logs` - Application logs
### OpenSpeedTest (Port 3005, configurable)
Browser-based speed testing from any device. Results are automatically sent to Network Optimizer.
**Configuration:** Set `HOST_NAME` in `.env` for canonical URL enforcement and friendlier URLs. Set `HOST_IP` if path analysis can't auto-detect the server IP (bridge networking). See [Client Speed Testing](DEPLOYMENT.md#client-speed-testing-optional) for details.
**HTTPS Note:** If serving OpenSpeedTest over HTTPS (`OPENSPEEDTEST_HTTPS=true`), you must also set `REVERSE_PROXIED_HOST_NAME` so the main app is accessible via HTTPS. Browsers block mixed content (HTTPS pages cannot POST to HTTP endpoints). See [HTTPS Configuration Requirements](DEPLOYMENT.md#https-configuration-requirements).
**To disable:** Comment out the `openspeedtest` service in `docker-compose.yml`.
See [Client Speed Testing](DEPLOYMENT.md#client-speed-testing-optional) for full setup details.
## Admin Authentication
The web UI requires authentication. Password sources (in priority order):
1. **Database password** - Set via Settings → Admin Password (recommended)
2. **Environment variable** - Set `APP_PASSWORD` in `.env`
3. **Auto-generated** - On first run, a secure password is generated and shown in logs
### First Run
```bash
# View the auto-generated password (shown only once on first startup)
docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
```
### Setting a Permanent Password
1. Log in with the auto-generated password
2. Go to Settings → Admin Password
3. Enter and confirm your new password
4. Click Save
### Using Environment Variable
Alternatively, set `APP_PASSWORD` in `.env`:
```env
APP_PASSWORD=your_secure_password
```
**Note:** Database passwords override the environment variable. Clear the database password in Settings to use `APP_PASSWORD`.
### Reset Admin Password
If you've forgotten your password or need to reset it:
```bash
# Clear the password from the database
docker exec network-optimizer sqlite3 /app/data/network_optimizer.db "UPDATE AdminSettings SET Password = NULL;"
# Restart to trigger auto-generated password
cd /path/to/network-optimizer/docker
docker compose up -d
# View the new auto-generated password
docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
```
## Configuration
### Environment Variables
See `.env.example` for all available options. Key variables:
```env
WEB_PORT=8042 # Blazor web UI (default)
TZ=America/Chicago # Your timezone
APP_PASSWORD= # Optional: preset admin password (otherwise auto-generated)
HOST_IP= # Required for bridge networking (path analysis)
```
### Volume Mounts
#### Persistent Data
The `./data` directory contains:
- SQLite database (configs, audit results)
- Encrypted credentials
- Application state
**Backup:** Regular backups of `./data` directory recommended.
#### SSH Keys (Optional)
Place SSH keys in `./ssh-keys/` for automated agent deployment:
```bash
./ssh-keys/
├── id_rsa # Private key
└── id_rsa.pub # Public key
```
Set permissions:
```bash
chmod 600 ./ssh-keys/id_rsa
chmod 644 ./ssh-keys/id_rsa.pub
```
## Management
### Starting the Stack
```bash
docker-compose up -d
```
### Stopping the Stack
```bash
docker-compose down
```
### View Logs
```bash
docker-compose logs -f network-optimizer
```
### Restart a Service
```bash
docker-compose restart network-optimizer
```
### Update Images
```bash
docker-compose pull
docker-compose up -d
```
### Health Checks
```bash
docker-compose ps
curl http://localhost:8042/api/health
```
Healthy output:
```
NAME STATUS
network-optimizer Up (healthy)
```
## Troubleshooting
### Service Won't Start
**Check logs:**
```bash
docker-compose logs
```
**Common issues:**
1. **Port conflicts:** Another service using 8042
- Solution: Change `WEB_PORT` in `.env`
2. **Permission errors:** Cannot write to volumes
- Solution: `chmod` the directories or check Docker volume permissions
### Reset Everything
**Complete reset (deletes all data):**
```bash
docker-compose down -v
rm -rf data/
docker-compose up -d
```
## Security Considerations
### Production Deployment
1. **Set a strong admin password** via Settings → Admin Password after first login
2. **Restrict network access:**
- Use firewall rules to limit who can access port 8042
- Consider reverse proxy with SSL (nginx, Caddy, Traefik)
3. **Enable HTTPS** with reverse proxy:
```nginx
server {
listen 443 ssl;
server_name network-optimizer.example.com;
ssl_certificate /path/to/cert.pem;
ssl_certificate_key /path/to/key.pem;
location / {
proxy_pass http://localhost:8042;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
```
4. **Backup regularly:** Back up the `./data` directory which contains the SQLite database and credentials.
## Upgrading
### Standard Updates
```bash
docker compose down
docker compose pull
docker compose up -d
```
Data persists in the `./data` volume.
### Migrating from Build-from-Source
If you've been building locally with `docker compose build` and want to switch to pre-built images, see the [Migration Guide](DEPLOYMENT.md#migrating-from-build-from-source-to-pre-built-images). A simple `docker compose pull` won't work because your locally-built image already has the registry tag.
### Before Major Updates
```bash
# Backup data first
tar czf backup-$(date +%Y%m%d).tar.gz data/
```
## Support
For issues, feature requests, or questions:
- GitHub: https://github.com/Ozark-Connect/NetworkOptimizer
- Documentation: See `docs/` folder in repository
## License
Business Source License 1.1. See [LICENSE](../LICENSE) in the repository root.
© 2026 Ozark Connect
================================================
FILE: docker/docker-compose.local.yml
================================================
# Local Development Configuration
#
# Usage:
# cd docker
# docker compose -f docker-compose.local.yml build
# docker compose -f docker-compose.local.yml up -d
#
# Access at: http://localhost:8042
#
# Note: Uses bridge networking with port mapping.
# Set HOST_IP in .env to your machine's IP for accurate path analysis.
services:
network-optimizer:
build:
context: ..
dockerfile: docker/Dockerfile
image: ozark-connect/network-optimizer:latest
container_name: network-optimizer
restart: unless-stopped
ports:
- "${WEB_PORT:-8042}:8042" # Blazor web UI
volumes:
- ./data:/app/data # SQLite, configs, license
- ./ssh-keys:/app/ssh-keys # Optional: SSH keys for agent deployment
- ./logs:/app/logs # Application logs
environment:
- TZ=${TZ:-UTC}
- APP_PASSWORD=${APP_PASSWORD:-}
- DEMO_MODE_MAPPINGS=${DEMO_MODE_MAPPINGS:-}
# Host IP/name for path analysis and CORS (required for client speed tests)
- HOST_IP=${HOST_IP:-}
- HOST_NAME=${HOST_NAME:-}
# Reverse proxy hostname (e.g., optimizer.example.com) - overrides HOST_NAME for API URLs
- REVERSE_PROXIED_HOST_NAME=${REVERSE_PROXIED_HOST_NAME:-}
# OpenSpeedTest configuration (for UI display and CORS)
- OPENSPEEDTEST_PORT=${OPENSPEEDTEST_PORT:-3005}
- OPENSPEEDTEST_HOST=${OPENSPEEDTEST_HOST:-}
- OPENSPEEDTEST_HTTPS=${OPENSPEEDTEST_HTTPS:-false}
- OPENSPEEDTEST_HTTPS_PORT=${OPENSPEEDTEST_HTTPS_PORT:-443}
# iperf3 server mode - enable to accept client-initiated speed tests on port 5201
- Iperf3Server__Enabled=${IPERF3_SERVER_ENABLED:-false}
# Logging (Trace, Debug, Information, Warning, Error, Critical)
- Logging__LogLevel__Default=${LOG_LEVEL:-Information}
- Logging__LogLevel__NetworkOptimizer=${APP_LOG_LEVEL:-Information}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8042/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# Network Optimizer Speed Test - customized OpenSpeedTest that sends results to Network Optimizer
# Requires HOST_IP or HOST_NAME to be set in .env for result reporting
network-optimizer-speedtest:
build:
context: ..
dockerfile: docker/openspeedtest/Dockerfile
image: ozark-connect/speedtest:latest
container_name: network-optimizer-speedtest
restart: unless-stopped
ports:
- "${OPENSPEEDTEST_PORT:-3005}:3000"
environment:
- TZ=${TZ:-UTC}
# For URL construction and host enforcement redirect
- HOST_NAME=${HOST_NAME:-}
- HOST_IP=${HOST_IP:-}
- OPENSPEEDTEST_PORT=${OPENSPEEDTEST_PORT:-3005}
- OPENSPEEDTEST_HOST=${OPENSPEEDTEST_HOST:-}
- OPENSPEEDTEST_HTTPS=${OPENSPEEDTEST_HTTPS:-false}
- OPENSPEEDTEST_HTTPS_PORT=${OPENSPEEDTEST_HTTPS_PORT:-443}
# For result reporting URL
- REVERSE_PROXIED_HOST_NAME=${REVERSE_PROXIED_HOST_NAME:-}
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# InfluxDB and Grafana available if needed for future monitoring features
# Uncomment below when time-series metrics are implemented
# influxdb:
# image: influxdb:2.8
# container_name: network-optimizer-influxdb
# restart: unless-stopped
# ports:
# - "${INFLUXDB_PORT:-8086}:8086"
# volumes:
# - influxdb-data:/var/lib/influxdb2
# - influxdb-config:/etc/influxdb2
# environment:
# - DOCKER_INFLUXDB_INIT_MODE=setup
# - DOCKER_INFLUXDB_INIT_USERNAME=${INFLUXDB_USERNAME:-admin}
# - DOCKER_INFLUXDB_INIT_PASSWORD=${INFLUXDB_PASSWORD}
# - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=${INFLUXDB_TOKEN}
# - DOCKER_INFLUXDB_INIT_ORG=${INFLUXDB_ORG:-network-optimizer}
# - DOCKER_INFLUXDB_INIT_BUCKET=${INFLUXDB_BUCKET:-network_optimizer}
# - DOCKER_INFLUXDB_INIT_RETENTION=${INFLUXDB_RETENTION:-30d}
# - TZ=${TZ:-UTC}
# networks:
# - network-optimizer
# healthcheck:
# test: ["CMD", "influx", "ping"]
# interval: 30s
# timeout: 10s
# retries: 5
# start_period: 60s
# grafana:
# image: grafana/grafana:latest
# container_name: network-optimizer-grafana
# restart: unless-stopped
# ports:
# - "${GRAFANA_PORT:-3000}:3000"
# volumes:
# - grafana-data:/var/lib/grafana
# - ./grafana/provisioning:/etc/grafana/provisioning:ro
# - ./grafana/dashboards:/var/lib/grafana/dashboards:ro
# environment:
# - GF_SECURITY_ADMIN_USER=${GRAFANA_USERNAME:-admin}
# - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD}
# - GF_USERS_ALLOW_SIGN_UP=false
# - GF_AUTH_ANONYMOUS_ENABLED=true
# - GF_AUTH_ANONYMOUS_ORG_ROLE=Viewer
# - GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=/var/lib/grafana/dashboards/network-overview.json
# - TZ=${TZ:-UTC}
# networks:
# - network-optimizer
# depends_on:
# influxdb:
# condition: service_healthy
# healthcheck:
# test: ["CMD-SHELL", "curl -f http://localhost:3000/api/health || exit 1"]
# interval: 30s
# timeout: 10s
# retries: 3
# start_period: 60s
# networks:
# network-optimizer:
# driver: bridge
# volumes:
# influxdb-data:
# driver: local
# influxdb-config:
# driver: local
# grafana-data:
# driver: local
================================================
FILE: docker/docker-compose.macos.yml
================================================
# macOS Development/Testing Configuration
#
# Usage:
# cd docker
# docker compose -f docker-compose.macos.yml build
# docker compose -f docker-compose.macos.yml up -d
#
# Access at: http://localhost:8042
#
# Note: macOS doesn't support network_mode: host, so we use port mapping instead.
services:
network-optimizer:
build:
context: ..
dockerfile: docker/Dockerfile
image: ghcr.io/ozark-connect/network-optimizer:latest
container_name: network-optimizer
restart: unless-stopped
ports:
- "8042:8042" # Blazor web UI
volumes:
- ./data:/app/data # SQLite, configs, license
- ./ssh-keys:/app/ssh-keys # Optional: SSH keys for agent deployment
- ./logs:/app/logs # Application logs
environment:
- TZ=${TZ:-America/Chicago}
- APP_PASSWORD=${APP_PASSWORD:-}
- DEMO_MODE_MAPPINGS=${DEMO_MODE_MAPPINGS:-}
# Host IP/name for path analysis and CORS (required for client speed tests)
- HOST_IP=${HOST_IP:-}
- HOST_NAME=${HOST_NAME:-}
# Reverse proxy hostname (e.g., optimizer.example.com) - overrides HOST_NAME for API URLs
- REVERSE_PROXIED_HOST_NAME=${REVERSE_PROXIED_HOST_NAME:-}
# OpenSpeedTest configuration (for UI display and CORS)
- OPENSPEEDTEST_PORT=${OPENSPEEDTEST_PORT:-3005}
- OPENSPEEDTEST_HOST=${OPENSPEEDTEST_HOST:-}
- OPENSPEEDTEST_HTTPS=${OPENSPEEDTEST_HTTPS:-false}
- OPENSPEEDTEST_HTTPS_PORT=${OPENSPEEDTEST_HTTPS_PORT:-443}
# iperf3 server mode - enable to accept client-initiated speed tests on port 5201
- Iperf3Server__Enabled=${IPERF3_SERVER_ENABLED:-false}
# Logging (Trace, Debug, Information, Warning, Error, Critical)
- Logging__LogLevel__Default=${LOG_LEVEL:-Information}
- Logging__LogLevel__NetworkOptimizer=${APP_LOG_LEVEL:-Information}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8042/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# Network Optimizer Speed Test - customized OpenSpeedTest that sends results to Network Optimizer
# Requires HOST_IP or HOST_NAME to be set in .env for result reporting
network-optimizer-speedtest:
build:
context: ..
dockerfile: docker/openspeedtest/Dockerfile
image: ghcr.io/ozark-connect/speedtest:latest
container_name: network-optimizer-speedtest
restart: unless-stopped
ports:
- "${OPENSPEEDTEST_PORT:-3005}:3000"
environment:
- TZ=${TZ:-America/Chicago}
# For URL construction and host enforcement redirect
- HOST_NAME=${HOST_NAME:-}
- HOST_IP=${HOST_IP:-}
- OPENSPEEDTEST_PORT=${OPENSPEEDTEST_PORT:-3005}
- OPENSPEEDTEST_HOST=${OPENSPEEDTEST_HOST:-}
- OPENSPEEDTEST_HTTPS=${OPENSPEEDTEST_HTTPS:-false}
- OPENSPEEDTEST_HTTPS_PORT=${OPENSPEEDTEST_HTTPS_PORT:-443}
# For result reporting URL
- REVERSE_PROXIED_HOST_NAME=${REVERSE_PROXIED_HOST_NAME:-}
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# InfluxDB and Grafana available if needed for monitoring
# Uncomment below and run: docker compose -f docker-compose.macos.yml up -d
# influxdb:
# image: influxdb:2.7
# container_name: network-optimizer-influxdb
# restart: unless-stopped
# ports:
# - "8086:8086"
# volumes:
# - influxdb-data:/var/lib/influxdb2
# - influxdb-config:/etc/influxdb2
# environment:
# - DOCKER_INFLUXDB_INIT_MODE=setup
# - DOCKER_INFLUXDB_INIT_USERNAME=${INFLUXDB_USERNAME:-admin}
# - DOCKER_INFLUXDB_INIT_PASSWORD=${INFLUXDB_PASSWORD}
# - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=${INFLUXDB_TOKEN}
# - DOCKER_INFLUXDB_INIT_ORG=${INFLUXDB_ORG:-network-optimizer}
# - DOCKER_INFLUXDB_INIT_BUCKET=${INFLUXDB_BUCKET:-network_optimizer}
# - DOCKER_INFLUXDB_INIT_RETENTION=${INFLUXDB_RETENTION:-30d}
# - TZ=${TZ:-UTC}
# healthcheck:
# test: ["CMD", "influx", "ping"]
# interval: 30s
# timeout: 10s
# retries: 5
# start_period: 60s
# grafana:
# image: grafana/grafana:latest
# container_name: network-optimizer-grafana
# restart: unless-stopped
# ports:
# - "3000:3000"
# volumes:
# - grafana-data:/var/lib/grafana
# - ./grafana/provisioning:/etc/grafana/provisioning:ro
# - ./grafana/dashboards:/var/lib/grafana/dashboards:ro
# environment:
# - GF_SECURITY_ADMIN_USER=${GRAFANA_USERNAME:-admin}
# - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD}
# - GF_USERS_ALLOW_SIGN_UP=false
# - GF_AUTH_ANONYMOUS_ENABLED=true
# - GF_AUTH_ANONYMOUS_ORG_ROLE=Viewer
# - TZ=${TZ:-UTC}
# depends_on:
# influxdb:
# condition: service_healthy
# healthcheck:
# test: ["CMD-SHELL", "curl -f http://localhost:3000/api/health || exit 1"]
# interval: 30s
# timeout: 10s
# retries: 3
# start_period: 60s
# volumes:
# influxdb-data:
# driver: local
# influxdb-config:
# driver: local
# grafana-data:
# driver: local
================================================
FILE: docker/docker-compose.prod.yml
================================================
services:
network-optimizer:
image: ghcr.io/ozark-connect/network-optimizer:latest
container_name: network-optimizer
restart: unless-stopped
network_mode: host
volumes:
- ./data:/app/data # SQLite, configs, license
- ./ssh-keys:/app/ssh-keys # Optional: SSH keys for agent deployment
- ./logs:/app/logs # Application logs
environment:
- TZ=${TZ:-America/Chicago}
# Bind to localhost only (for reverse proxy) or all interfaces (direct access)
- BIND_LOCALHOST_ONLY=${BIND_LOCALHOST_ONLY:-false}
- APP_PASSWORD=${APP_PASSWORD:-}
- DEMO_MODE_MAPPINGS=${DEMO_MODE_MAPPINGS:-}
# Host IP/name for path analysis and CORS (required for client speed tests)
- HOST_IP=${HOST_IP:-}
- HOST_NAME=${HOST_NAME:-}
# Reverse proxy hostname (e.g., optimizer.example.com) - overrides HOST_NAME for API URLs
- REVERSE_PROXIED_HOST_NAME=${REVERSE_PROXIED_HOST_NAME:-}
# OpenSpeedTest configuration (for UI display and CORS)
- OPENSPEEDTEST_PORT=${OPENSPEEDTEST_PORT:-3005}
- OPENSPEEDTEST_HOST=${OPENSPEEDTEST_HOST:-}
- OPENSPEEDTEST_HTTPS=${OPENSPEEDTEST_HTTPS:-false}
- OPENSPEEDTEST_HTTPS_PORT=${OPENSPEEDTEST_HTTPS_PORT:-443}
# iperf3 server mode - enable to accept client-initiated speed tests on port 5201
- Iperf3Server__Enabled=${IPERF3_SERVER_ENABLED:-false}
# Logging (Trace, Debug, Information, Warning, Error, Critical)
- Logging__LogLevel__Default=${LOG_LEVEL:-Information}
- Logging__LogLevel__NetworkOptimizer=${APP_LOG_LEVEL:-Information}
# Point to existing InfluxDB if desired (optional)
# - INFLUXDB_URL=http://localhost:8086
# - INFLUXDB_TOKEN=${INFLUXDB_TOKEN}
# - INFLUXDB_ORG=${INFLUXDB_ORG:-network-optimizer}
# - INFLUXDB_BUCKET=${INFLUXDB_BUCKET:-network_optimizer}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8042/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# Network Optimizer Speed Test - customized OpenSpeedTest that sends results to Network Optimizer
# Requires HOST_IP or HOST_NAME to be set in .env for result reporting
network-optimizer-speedtest:
image: ghcr.io/ozark-connect/speedtest:latest
container_name: network-optimizer-speedtest
restart: unless-stopped
ports:
- "${OPENSPEEDTEST_PORT:-3005}:3000"
sysctls:
# Raise TCP autotuning ceiling so long-RTT single-stream speedtests are not rwnd-bound.
# Container has its own netns; without this the ceiling is the 6MB kernel default,
# which caps single-stream throughput at ~rwnd/RTT (e.g. ~225 Mbps at 100ms RTT).
net.ipv4.tcp_rmem: "4096 131072 33554432"
net.ipv4.tcp_wmem: "4096 65536 33554432"
net.ipv4.tcp_mtu_probing: "1"
# tcp_congestion_control is intentionally not set here — it hard-fails
# container start on kernels without the bbr module loaded (Synology, QNAP,
# some Proxmox/LXC setups) and compose sysctls are all-or-nothing. The
# entrypoint reports CC state and tells the operator how to enable bbr on
# the host if desired.
environment:
- TZ=${TZ:-America/Chicago}
# For URL construction and host enforcement redirect
- HOST_NAME=${HOST_NAME:-}
- HOST_IP=${HOST_IP:-}
- OPENSPEEDTEST_PORT=${OPENSPEEDTEST_PORT:-3005}
- OPENSPEEDTEST_HOST=${OPENSPEEDTEST_HOST:-}
- OPENSPEEDTEST_HTTPS=${OPENSPEEDTEST_HTTPS:-false}
- OPENSPEEDTEST_HTTPS_PORT=${OPENSPEEDTEST_HTTPS_PORT:-443}
# For result reporting URL
- REVERSE_PROXIED_HOST_NAME=${REVERSE_PROXIED_HOST_NAME:-}
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# Production config - pre-built images from GHCR, host network mode
# For local dev with build from source, use docker-compose.yml
================================================
FILE: docker/docker-compose.yml
================================================
services:
network-optimizer:
build:
context: ..
dockerfile: docker/Dockerfile
image: ghcr.io/ozark-connect/network-optimizer:latest
container_name: network-optimizer
restart: unless-stopped
network_mode: host
volumes:
- ./data:/app/data # SQLite, configs, license
- ./ssh-keys:/app/ssh-keys # Optional: SSH keys for agent deployment
- ./logs:/app/logs # Application logs
environment:
- TZ=${TZ:-America/Chicago}
# Bind to localhost only (for reverse proxy) or all interfaces (direct access)
- BIND_LOCALHOST_ONLY=${BIND_LOCALHOST_ONLY:-false}
- APP_PASSWORD=${APP_PASSWORD:-}
- DEMO_MODE_MAPPINGS=${DEMO_MODE_MAPPINGS:-}
# Host IP/name for path analysis and CORS (required for client speed tests)
- HOST_IP=${HOST_IP:-}
- HOST_NAME=${HOST_NAME:-}
# Reverse proxy hostname (e.g., optimizer.example.com) - overrides HOST_NAME for API URLs
- REVERSE_PROXIED_HOST_NAME=${REVERSE_PROXIED_HOST_NAME:-}
# OpenSpeedTest configuration (for UI display and CORS)
- OPENSPEEDTEST_PORT=${OPENSPEEDTEST_PORT:-3005}
- OPENSPEEDTEST_HOST=${OPENSPEEDTEST_HOST:-}
- OPENSPEEDTEST_HTTPS=${OPENSPEEDTEST_HTTPS:-false}
- OPENSPEEDTEST_HTTPS_PORT=${OPENSPEEDTEST_HTTPS_PORT:-443}
# iperf3 server mode - enable to accept client-initiated speed tests on port 5201
- Iperf3Server__Enabled=${IPERF3_SERVER_ENABLED:-false}
# Logging (Trace, Debug, Information, Warning, Error, Critical)
- Logging__LogLevel__Default=${LOG_LEVEL:-Information}
- Logging__LogLevel__NetworkOptimizer=${APP_LOG_LEVEL:-Information}
# Point to existing InfluxDB if desired (optional)
# - INFLUXDB_URL=http://localhost:8086
# - INFLUXDB_TOKEN=${INFLUXDB_TOKEN}
# - INFLUXDB_ORG=${INFLUXDB_ORG:-network-optimizer}
# - INFLUXDB_BUCKET=${INFLUXDB_BUCKET:-network_optimizer}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8042/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# Network Optimizer Speed Test - customized OpenSpeedTest that sends results to Network Optimizer
# Requires HOST_IP or HOST_NAME to be set in .env for result reporting
network-optimizer-speedtest:
build:
context: ..
dockerfile: docker/openspeedtest/Dockerfile
image: ghcr.io/ozark-connect/speedtest:latest
container_name: network-optimizer-speedtest
restart: unless-stopped
ports:
- "${OPENSPEEDTEST_PORT:-3005}:3000"
sysctls:
# Raise TCP autotuning ceiling so long-RTT single-stream speedtests are not rwnd-bound.
# Container has its own netns; without this the ceiling is the 6MB kernel default,
# which caps single-stream throughput at ~rwnd/RTT (e.g. ~225 Mbps at 100ms RTT).
net.ipv4.tcp_rmem: "4096 131072 33554432"
net.ipv4.tcp_wmem: "4096 65536 33554432"
net.ipv4.tcp_mtu_probing: "1"
# tcp_congestion_control is intentionally not set here — it hard-fails
# container start on kernels without the bbr module loaded (Synology, QNAP,
# some Proxmox/LXC setups) and compose sysctls are all-or-nothing. The
# entrypoint reports CC state and tells the operator how to enable bbr on
# the host if desired.
environment:
- TZ=${TZ:-America/Chicago}
# For URL construction and host enforcement redirect
- HOST_NAME=${HOST_NAME:-}
- HOST_IP=${HOST_IP:-}
- OPENSPEEDTEST_PORT=${OPENSPEEDTEST_PORT:-3005}
- OPENSPEEDTEST_HOST=${OPENSPEEDTEST_HOST:-}
- OPENSPEEDTEST_HTTPS=${OPENSPEEDTEST_HTTPS:-false}
- OPENSPEEDTEST_HTTPS_PORT=${OPENSPEEDTEST_HTTPS_PORT:-443}
# For result reporting URL
- REVERSE_PROXIED_HOST_NAME=${REVERSE_PROXIED_HOST_NAME:-}
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# Production config - host network mode, localhost only (behind Caddy)
# For local dev with InfluxDB/Grafana, use docker-compose.local.yml
================================================
FILE: docker/entrypoint.sh
================================================
#!/bin/bash
set -e
# Set container timezone from TZ env var so .NET TimeZoneInfo.Local is correct
if [ -n "$TZ" ] && [ -f "/usr/share/zoneinfo/$TZ" ]; then
ln -sf "/usr/share/zoneinfo/$TZ" /etc/localtime
echo "$TZ" > /etc/timezone
fi
# Fix ownership of mounted volumes (they may be created as root by Docker)
# This runs as root before dropping to the app user
chown -R app:app /app/data /app/logs /app/ssh-keys 2>/dev/null || true
# Set bind address based on BIND_LOCALHOST_ONLY
# Default: false (bind to all interfaces for direct network access)
# Set to true when behind a reverse proxy on the same host
if [ "${BIND_LOCALHOST_ONLY,,}" = "true" ]; then
export ASPNETCORE_URLS="http://127.0.0.1:8042"
echo "Binding to localhost only (127.0.0.1:8042)"
else
export ASPNETCORE_URLS="http://0.0.0.0:8042"
echo "Binding to all interfaces (0.0.0.0:8042)"
fi
# Drop to app user and run the application
exec gosu app dotnet NetworkOptimizer.Web.dll "$@"
================================================
FILE: docker/grafana/dashboards/network-overview.json
================================================
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": null,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"gridPos": {
"h": 3,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
"content": "\n Network Optimizer - Overview Dashboard
\n Real-time network health and performance monitoring\n
",
"mode": "html"
},
"pluginVersion": "10.0.0",
"title": "Dashboard Header",
"type": "text"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 6,
"w": 4,
"x": 0,
"y": 3
},
"id": 2,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"device_metrics\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> group()\n |> count()\n |> yield(name: \"device_count\")",
"refId": "A"
}
],
"title": "Total Devices",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [
{
"options": {
"0": {
"color": "red",
"index": 1,
"text": "Offline"
},
"1": {
"color": "green",
"index": 0,
"text": "Online"
}
},
"type": "value"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 0
},
{
"color": "green",
"value": 1
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 6,
"w": 4,
"x": 4,
"y": 3
},
"id": 3,
"options": {
"colorMode": "background",
"graphMode": "none",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "value_and_name"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"rate\")\n |> last()\n |> map(fn: (r) => ({ r with _value: if exists r._value then 1 else 0 }))\n |> yield(name: \"sqm_status\")",
"refId": "A"
}
],
"title": "SQM Status",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "orange",
"value": 50
},
{
"color": "yellow",
"value": 70
},
{
"color": "green",
"value": 85
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 6,
"w": 4,
"x": 8,
"y": 3
},
"id": 4,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_score\")\n |> filter(fn: (r) => r[\"_field\"] == \"score\")\n |> last()\n |> yield(name: \"security_score\")",
"refId": "A"
}
],
"title": "Security Score",
"type": "gauge"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 1
},
{
"color": "orange",
"value": 3
},
{
"color": "red",
"value": 5
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 6,
"w": 4,
"x": 12,
"y": 3
},
"id": 5,
"options": {
"colorMode": "background",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_issues\")\n |> filter(fn: (r) => r[\"_field\"] == \"count\")\n |> filter(fn: (r) => r[\"severity\"] == \"critical\")\n |> last()\n |> yield(name: \"critical_issues\")",
"refId": "A"
}
],
"title": "Critical Issues",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "Mbits"
},
"overrides": []
},
"gridPos": {
"h": 6,
"w": 8,
"x": 16,
"y": 3
},
"id": 6,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"rate\" or r[\"_field\"] == \"baseline\")\n |> aggregateWindow(every: 1m, fn: mean, createEmpty: false)\n |> yield(name: \"bandwidth\")",
"refId": "A"
}
],
"title": "WAN Bandwidth (Current vs Baseline)",
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"custom": {
"align": "auto",
"cellOptions": {
"type": "auto"
},
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Status"
},
"properties": [
{
"id": "mappings",
"value": [
{
"options": {
"1": {
"color": "green",
"index": 0,
"text": "Online"
},
"0": {
"color": "red",
"index": 1,
"text": "Offline"
}
},
"type": "value"
}
]
},
{
"id": "custom.cellOptions",
"value": {
"mode": "basic",
"type": "color-background"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 9
},
"id": 7,
"options": {
"cellHeight": "sm",
"footer": {
"countRows": false,
"fields": "",
"reducer": [
"sum"
],
"show": false
},
"showHeader": true,
"sortBy": []
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"device_metrics\")\n |> filter(fn: (r) => r[\"_field\"] == \"cpu\" or r[\"_field\"] == \"memory_used\" or r[\"_field\"] == \"uptime\")\n |> last()\n |> pivot(rowKey:[\"device\", \"type\"], columnKey: [\"_field\"], valueColumn: \"_value\")\n |> map(fn: (r) => ({ r with status: if exists r.uptime and r.uptime > 0 then 1 else 0 }))\n |> yield(name: \"device_status\")",
"refId": "A"
}
],
"title": "Device Status",
"type": "table"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "ms"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 9
},
"id": 8,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"latency\")\n |> aggregateWindow(every: 1m, fn: mean, createEmpty: false)\n |> yield(name: \"latency\")",
"refId": "A"
}
],
"title": "WAN Latency",
"type": "timeseries"
}
],
"refresh": "30s",
"schemaVersion": 38,
"style": "dark",
"tags": [
"network-optimizer",
"overview"
],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "InfluxDB-NetworkOptimizer",
"value": "InfluxDB-NetworkOptimizer"
},
"hide": 0,
"includeAll": false,
"label": "Data Source",
"multi": false,
"name": "DS_INFLUXDB",
"options": [],
"query": "influxdb",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h"
]
},
"timezone": "",
"title": "Network Overview",
"uid": "network-overview",
"version": 1,
"weekStart": ""
}
================================================
FILE: docker/grafana/dashboards/security-posture.json
================================================
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"enable": true,
"iconColor": "red",
"name": "Audit Events",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
}
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": null,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"gridPos": {
"h": 2,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
"content": "\n Security Posture Dashboard
\n Configuration audit scores, security issues, and compliance trends\n
",
"mode": "html"
},
"pluginVersion": "10.0.0",
"title": "Dashboard Header",
"type": "text"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [
{
"options": {
"from": 0,
"result": {
"color": "red",
"index": 3,
"text": "NEEDS WORK"
},
"to": 49
},
"type": "range"
},
{
"options": {
"from": 50,
"result": {
"color": "orange",
"index": 2,
"text": "FAIR"
},
"to": 69
},
"type": "range"
},
{
"options": {
"from": 70,
"result": {
"color": "yellow",
"index": 1,
"text": "GOOD"
},
"to": 84
},
"type": "range"
},
{
"options": {
"from": 85,
"result": {
"color": "green",
"index": 0,
"text": "EXCELLENT"
},
"to": 100
},
"type": "range"
}
],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "orange",
"value": 50
},
{
"color": "yellow",
"value": 70
},
{
"color": "green",
"value": 85
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 8,
"x": 0,
"y": 2
},
"id": 2,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_score\")\n |> filter(fn: (r) => r[\"_field\"] == \"score\")\n |> last()\n |> yield(name: \"security_score\")",
"refId": "A"
}
],
"title": "Overall Security Score",
"type": "gauge"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 1
},
{
"color": "orange",
"value": 3
},
{
"color": "red",
"value": 5
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 4,
"x": 8,
"y": 2
},
"id": 3,
"options": {
"colorMode": "background",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "value_and_name"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_issues\")\n |> filter(fn: (r) => r[\"_field\"] == \"count\")\n |> filter(fn: (r) => r[\"severity\"] == \"critical\")\n |> last()\n |> yield(name: \"critical_issues\")",
"refId": "A"
}
],
"title": "Critical Issues",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 5
},
{
"color": "orange",
"value": 10
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 4,
"x": 12,
"y": 2
},
"id": 4,
"options": {
"colorMode": "background",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "value_and_name"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_issues\")\n |> filter(fn: (r) => r[\"_field\"] == \"count\")\n |> filter(fn: (r) => r[\"severity\"] == \"warning\")\n |> last()\n |> yield(name: \"warnings\")",
"refId": "A"
}
],
"title": "Warnings",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 4,
"x": 16,
"y": 2
},
"id": 5,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "value_and_name"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_issues\")\n |> filter(fn: (r) => r[\"_field\"] == \"count\")\n |> filter(fn: (r) => r[\"severity\"] == \"info\")\n |> last()\n |> yield(name: \"info\")",
"refId": "A"
}
],
"title": "Info Items",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "dateTimeAsIso"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 4,
"x": 20,
"y": 2
},
"id": 6,
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "value_and_name"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_score\")\n |> filter(fn: (r) => r[\"_field\"] == \"score\")\n |> last()\n |> map(fn: (r) => ({ r with _value: uint(v: r._time) }))\n |> yield(name: \"last_audit\")",
"refId": "A"
}
],
"title": "Last Audit",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "opacity",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 3,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line"
}
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "transparent",
"value": null
},
{
"color": "red",
"value": 50
},
{
"color": "yellow",
"value": 70
},
{
"color": "green",
"value": 85
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 16,
"x": 8,
"y": 6
},
"id": 7,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max",
"min"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_score\")\n |> filter(fn: (r) => r[\"_field\"] == \"score\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"score_trend\")",
"refId": "A"
}
],
"title": "Security Score Trend",
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "stepAfter",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "critical"
},
"properties": [
{
"id": "displayName",
"value": "Critical"
},
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "warning"
},
"properties": [
{
"id": "displayName",
"value": "Warnings"
},
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "info"
},
"properties": [
{
"id": "displayName",
"value": "Info"
},
{
"id": "color",
"value": {
"fixedColor": "blue",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 10
},
"id": 8,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_issues\")\n |> filter(fn: (r) => r[\"_field\"] == \"count\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"issues_by_severity\")",
"refId": "A"
}
],
"title": "Issues by Severity Over Time",
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 5
},
{
"color": "red",
"value": 10
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 14
},
"id": 9,
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showUnfilled": true
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_issues\")\n |> filter(fn: (r) => r[\"_field\"] == \"count\")\n |> group(columns: [\"category\"])\n |> sum()\n |> yield(name: \"issues_by_category\")",
"refId": "A"
}
],
"title": "Issues by Category",
"type": "bargauge"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"custom": {
"align": "auto",
"cellOptions": {
"type": "auto"
},
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "severity"
},
"properties": [
{
"id": "displayName",
"value": "Severity"
},
{
"id": "mappings",
"value": [
{
"options": {
"critical": {
"color": "red",
"index": 0,
"text": "CRITICAL"
},
"warning": {
"color": "orange",
"index": 1,
"text": "WARNING"
},
"info": {
"color": "blue",
"index": 2,
"text": "INFO"
}
},
"type": "value"
}
]
},
{
"id": "custom.cellOptions",
"value": {
"mode": "basic",
"type": "color-background"
}
},
{
"id": "custom.width",
"value": 100
}
]
},
{
"matcher": {
"id": "byName",
"options": "category"
},
"properties": [
{
"id": "displayName",
"value": "Category"
},
{
"id": "custom.width",
"value": 150
}
]
},
{
"matcher": {
"id": "byName",
"options": "issue_type"
},
"properties": [
{
"id": "displayName",
"value": "Issue Type"
},
{
"id": "custom.width",
"value": 200
}
]
},
{
"matcher": {
"id": "byName",
"options": "description"
},
"properties": [
{
"id": "displayName",
"value": "Description"
}
]
}
]
},
"gridPos": {
"h": 10,
"w": 24,
"x": 0,
"y": 18
},
"id": 10,
"options": {
"cellHeight": "sm",
"footer": {
"countRows": false,
"fields": "",
"reducer": [
"sum"
],
"show": false
},
"showHeader": true,
"sortBy": [
{
"desc": false,
"displayName": "severity"
}
]
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_issues\")\n |> filter(fn: (r) => r[\"_field\"] == \"description\")\n |> last()\n |> keep(columns: [\"severity\", \"category\", \"issue_type\", \"_value\", \"device\", \"_time\"])\n |> rename(columns: {_value: \"description\"})\n |> sort(columns: [\"severity\", \"category\"])\n |> yield(name: \"issue_details\")",
"refId": "A"
}
],
"title": "Current Security Issues",
"type": "table"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"mappings": []
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "firewall"
},
"properties": [
{
"id": "displayName",
"value": "Firewall Rules"
},
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "vlan"
},
"properties": [
{
"id": "displayName",
"value": "VLAN Security"
},
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "port"
},
"properties": [
{
"id": "displayName",
"value": "Port Security"
},
{
"id": "color",
"value": {
"fixedColor": "yellow",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "dns"
},
"properties": [
{
"id": "displayName",
"value": "DNS Leak"
},
{
"id": "color",
"value": {
"fixedColor": "blue",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 28
},
"id": 11,
"options": {
"legend": {
"displayMode": "table",
"placement": "right",
"showLegend": true,
"values": [
"value"
]
},
"pieType": "pie",
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_issues\")\n |> filter(fn: (r) => r[\"_field\"] == \"count\")\n |> group(columns: [\"category\"])\n |> sum()\n |> yield(name: \"category_distribution\")",
"refId": "A"
}
],
"title": "Issue Distribution by Category",
"type": "piechart"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"custom": {
"align": "auto",
"cellOptions": {
"type": "auto"
},
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "score_change"
},
"properties": [
{
"id": "displayName",
"value": "Change"
},
{
"id": "custom.cellOptions",
"value": {
"mode": "gradient",
"type": "color-background"
}
},
{
"id": "thresholds",
"value": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "green",
"value": 0
}
]
}
},
{
"id": "custom.width",
"value": 100
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 28
},
"id": 12,
"options": {
"cellHeight": "sm",
"footer": {
"countRows": false,
"fields": "",
"reducer": [
"sum"
],
"show": false
},
"showHeader": true,
"sortBy": [
{
"desc": true,
"displayName": "_time"
}
]
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"audit_score\")\n |> filter(fn: (r) => r[\"_field\"] == \"score\")\n |> sort(columns: [\"_time\"], desc: true)\n |> limit(n: 20)\n |> difference()\n |> rename(columns: {_value: \"score_change\"})\n |> yield(name: \"audit_history\")",
"refId": "A"
}
],
"title": "Audit History",
"type": "table"
}
],
"refresh": "1m",
"schemaVersion": 38,
"style": "dark",
"tags": [
"network-optimizer",
"security",
"audit"
],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "InfluxDB-NetworkOptimizer",
"value": "InfluxDB-NetworkOptimizer"
},
"hide": 0,
"includeAll": false,
"label": "Data Source",
"multi": false,
"name": "DS_INFLUXDB",
"options": [],
"query": "influxdb",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
}
]
},
"time": {
"from": "now-7d",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h"
]
},
"timezone": "",
"title": "Security Posture",
"uid": "security-posture",
"version": 1,
"weekStart": ""
}
================================================
FILE: docker/grafana/dashboards/sqm-performance.json
================================================
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"enable": true,
"iconColor": "red",
"name": "Speedtest Events",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
}
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": null,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"gridPos": {
"h": 2,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
"content": "\n SQM Performance Dashboard
\n Adaptive bandwidth management and bufferbloat prevention\n
",
"mode": "html"
},
"pluginVersion": "10.0.0",
"title": "Dashboard Header",
"type": "text"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "Mbits"
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 6,
"x": 0,
"y": 2
},
"id": 2,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"rate\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> filter(fn: (r) => r[\"interface\"] == \"${interface}\")\n |> last()\n |> yield(name: \"current_rate\")",
"refId": "A"
}
],
"title": "Current Rate",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "blue",
"value": null
}
]
},
"unit": "Mbits"
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 6,
"x": 6,
"y": 2
},
"id": 3,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"baseline\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> filter(fn: (r) => r[\"interface\"] == \"${interface}\")\n |> last()\n |> yield(name: \"baseline\")",
"refId": "A"
}
],
"title": "Baseline Rate",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 20
},
{
"color": "orange",
"value": 50
},
{
"color": "red",
"value": 100
}
]
},
"unit": "ms"
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 6,
"x": 12,
"y": 2
},
"id": 4,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"latency\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> filter(fn: (r) => r[\"interface\"] == \"${interface}\")\n |> last()\n |> yield(name: \"latency\")",
"refId": "A"
}
],
"title": "Current Latency",
"type": "gauge"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [
{
"options": {
"none": {
"color": "green",
"index": 0,
"text": "Stable"
},
"increase": {
"color": "blue",
"index": 1,
"text": "Increased"
},
"decrease": {
"color": "orange",
"index": 2,
"text": "Decreased"
}
},
"type": "value"
}
],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 6,
"x": 18,
"y": 2
},
"id": 5,
"options": {
"colorMode": "background",
"graphMode": "none",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "value_and_name"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"adjustment\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> filter(fn: (r) => r[\"interface\"] == \"${interface}\")\n |> last()\n |> yield(name: \"adjustment\")",
"refId": "A"
}
],
"title": "Last Adjustment",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "opacity",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "Mbits"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "rate"
},
"properties": [
{
"id": "displayName",
"value": "Current Rate"
},
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "baseline"
},
"properties": [
{
"id": "displayName",
"value": "Baseline"
},
{
"id": "color",
"value": {
"fixedColor": "blue",
"mode": "fixed"
}
},
{
"id": "custom.lineStyle",
"value": {
"dash": [10, 10],
"fill": "dash"
}
}
]
}
]
},
"gridPos": {
"h": 10,
"w": 24,
"x": 0,
"y": 7
},
"id": 6,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max",
"min"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"rate\" or r[\"_field\"] == \"baseline\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> filter(fn: (r) => r[\"interface\"] == \"${interface}\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"bandwidth_over_time\")",
"refId": "A"
}
],
"title": "Rate vs Baseline Over Time",
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 20
},
{
"color": "red",
"value": 50
}
]
},
"unit": "ms"
},
"overrides": []
},
"gridPos": {
"h": 10,
"w": 12,
"x": 0,
"y": 17
},
"id": 7,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max",
"min"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"latency\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> filter(fn: (r) => r[\"interface\"] == \"${interface}\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"latency\")",
"refId": "A"
}
],
"title": "Latency Over Time",
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 80,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 10,
"w": 12,
"x": 12,
"y": 17
},
"id": 8,
"options": {
"legend": {
"calcs": [
"count"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"adjustment\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> filter(fn: (r) => r[\"interface\"] == \"${interface}\")\n |> filter(fn: (r) => r[\"_value\"] != \"none\")\n |> aggregateWindow(every: 1h, fn: count, createEmpty: false)\n |> yield(name: \"adjustments\")",
"refId": "A"
}
],
"title": "SQM Adjustments (Hourly Count)",
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "points",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 8,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "always",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "Mbits"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "download"
},
"properties": [
{
"id": "displayName",
"value": "Download"
},
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "upload"
},
"properties": [
{
"id": "displayName",
"value": "Upload"
},
{
"id": "color",
"value": {
"fixedColor": "blue",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 10,
"w": 24,
"x": 0,
"y": 27
},
"id": 9,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max",
"min"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"speedtest\")\n |> filter(fn: (r) => r[\"_field\"] == \"download\" or r[\"_field\"] == \"upload\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> filter(fn: (r) => r[\"interface\"] == \"${interface}\")\n |> yield(name: \"speedtest_history\")",
"refId": "A"
}
],
"title": "Speedtest History",
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"custom": {
"align": "auto",
"cellOptions": {
"type": "auto"
},
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "latency"
},
"properties": [
{
"id": "unit",
"value": "ms"
},
{
"id": "thresholds",
"value": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 20
},
{
"color": "red",
"value": 50
}
]
}
},
{
"id": "custom.cellOptions",
"value": {
"mode": "gradient",
"type": "color-background"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "download"
},
"properties": [
{
"id": "unit",
"value": "Mbits"
},
{
"id": "displayName",
"value": "Download"
}
]
},
{
"matcher": {
"id": "byName",
"options": "upload"
},
"properties": [
{
"id": "unit",
"value": "Mbits"
},
{
"id": "displayName",
"value": "Upload"
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 37
},
"id": 10,
"options": {
"cellHeight": "sm",
"footer": {
"countRows": false,
"fields": "",
"reducer": [
"sum"
],
"show": false
},
"showHeader": true,
"sortBy": [
{
"desc": true,
"displayName": "_time"
}
]
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"speedtest\")\n |> filter(fn: (r) => r[\"_field\"] == \"download\" or r[\"_field\"] == \"upload\" or r[\"_field\"] == \"latency\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> filter(fn: (r) => r[\"interface\"] == \"${interface}\")\n |> pivot(rowKey:[\"_time\"], columnKey: [\"_field\"], valueColumn: \"_value\")\n |> sort(columns: [\"_time\"], desc: true)\n |> limit(n: 50)\n |> yield(name: \"recent_speedtests\")",
"refId": "A"
}
],
"title": "Recent Speedtest Results",
"type": "table"
}
],
"refresh": "30s",
"schemaVersion": 38,
"style": "dark",
"tags": [
"network-optimizer",
"sqm"
],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "InfluxDB-NetworkOptimizer",
"value": "InfluxDB-NetworkOptimizer"
},
"hide": 0,
"includeAll": false,
"label": "Data Source",
"multi": false,
"name": "DS_INFLUXDB",
"options": [],
"query": "influxdb",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"definition": "from(bucket: \"network_optimizer\")\n |> range(start: -7d)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> keep(columns: [\"device\"])\n |> distinct(column: \"device\")",
"hide": 0,
"includeAll": false,
"label": "Device",
"multi": false,
"name": "device",
"options": [],
"query": {
"query": "from(bucket: \"network_optimizer\")\n |> range(start: -7d)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> keep(columns: [\"device\"])\n |> distinct(column: \"device\")",
"refId": "InfluxVariableQueryEditor-VariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"definition": "from(bucket: \"network_optimizer\")\n |> range(start: -7d)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> keep(columns: [\"interface\"])\n |> distinct(column: \"interface\")",
"hide": 0,
"includeAll": false,
"label": "Interface",
"multi": false,
"name": "interface",
"options": [],
"query": {
"query": "from(bucket: \"network_optimizer\")\n |> range(start: -7d)\n |> filter(fn: (r) => r[\"_measurement\"] == \"sqm_stats\")\n |> filter(fn: (r) => r[\"device\"] == \"${device}\")\n |> keep(columns: [\"interface\"])\n |> distinct(column: \"interface\")",
"refId": "InfluxVariableQueryEditor-VariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
}
]
},
"time": {
"from": "now-24h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h"
]
},
"timezone": "",
"title": "SQM Performance",
"uid": "sqm-performance",
"version": 1,
"weekStart": ""
}
================================================
FILE: docker/grafana/dashboards/switch-deep-dive.json
================================================
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": null,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"gridPos": {
"h": 2,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"code": {
"language": "plaintext",
"showLineNumbers": false,
"showMiniMap": false
},
"content": "\n Switch Deep-Dive Dashboard
\n Per-port utilization, PoE, errors, and traffic analysis\n
",
"mode": "html"
},
"pluginVersion": "10.0.0",
"title": "Dashboard Header",
"type": "text"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 2
},
"id": 2,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> filter(fn: (r) => r[\"_field\"] == \"port_enabled\")\n |> filter(fn: (r) => r[\"_value\"] == 1)\n |> group()\n |> count()\n |> yield(name: \"active_ports\")",
"refId": "A"
}
],
"title": "Active Ports",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 50
},
{
"color": "orange",
"value": 75
},
{
"color": "red",
"value": 90
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 6,
"y": 2
},
"id": 3,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"device_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> filter(fn: (r) => r[\"_field\"] == \"cpu\")\n |> last()\n |> yield(name: \"cpu_usage\")",
"refId": "A"
}
],
"title": "Switch CPU Usage",
"type": "gauge"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 50
},
{
"color": "orange",
"value": 75
},
{
"color": "red",
"value": 90
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 12,
"y": 2
},
"id": 4,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"device_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> filter(fn: (r) => r[\"_field\"] == \"memory_used\")\n |> last()\n |> yield(name: \"memory_usage\")",
"refId": "A"
}
],
"title": "Switch Memory Usage",
"type": "gauge"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "watt"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 18,
"y": 2
},
"id": 5,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> filter(fn: (r) => r[\"_field\"] == \"poe_power\")\n |> group()\n |> sum()\n |> yield(name: \"total_poe\")",
"refId": "A"
}
],
"title": "Total PoE Power",
"type": "stat"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"custom": {
"align": "auto",
"cellOptions": {
"type": "auto"
},
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "rx_rate"
},
"properties": [
{
"id": "displayName",
"value": "RX Rate"
},
{
"id": "unit",
"value": "bps"
},
{
"id": "custom.cellOptions",
"value": {
"mode": "gradient",
"type": "color-background"
}
},
{
"id": "thresholds",
"value": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 500000000
},
{
"color": "orange",
"value": 750000000
},
{
"color": "red",
"value": 900000000
}
]
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "tx_rate"
},
"properties": [
{
"id": "displayName",
"value": "TX Rate"
},
{
"id": "unit",
"value": "bps"
},
{
"id": "custom.cellOptions",
"value": {
"mode": "gradient",
"type": "color-background"
}
},
{
"id": "thresholds",
"value": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "yellow",
"value": 500000000
},
{
"color": "orange",
"value": 750000000
},
{
"color": "red",
"value": 900000000
}
]
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "poe_power"
},
"properties": [
{
"id": "displayName",
"value": "PoE (W)"
},
{
"id": "unit",
"value": "watt"
}
]
},
{
"matcher": {
"id": "byName",
"options": "port_enabled"
},
"properties": [
{
"id": "displayName",
"value": "Status"
},
{
"id": "mappings",
"value": [
{
"options": {
"0": {
"color": "red",
"index": 1,
"text": "Down"
},
"1": {
"color": "green",
"index": 0,
"text": "Up"
}
},
"type": "value"
}
]
},
{
"id": "custom.cellOptions",
"value": {
"mode": "basic",
"type": "color-background"
}
}
]
}
]
},
"gridPos": {
"h": 10,
"w": 24,
"x": 0,
"y": 6
},
"id": 6,
"options": {
"cellHeight": "sm",
"footer": {
"countRows": false,
"fields": "",
"reducer": [
"sum"
],
"show": false
},
"showHeader": true,
"sortBy": [
{
"desc": false,
"displayName": "port"
}
]
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> filter(fn: (r) => r[\"_field\"] == \"in_octets\" or r[\"_field\"] == \"out_octets\" or r[\"_field\"] == \"poe_power\" or r[\"_field\"] == \"port_enabled\")\n |> last()\n |> pivot(rowKey:[\"port\", \"port_name\"], columnKey: [\"_field\"], valueColumn: \"_value\")\n |> map(fn: (r) => ({ r with \n rx_rate: if exists r.in_octets then r.in_octets * 8 else 0,\n tx_rate: if exists r.out_octets then r.out_octets * 8 else 0\n }))\n |> sort(columns: [\"port\"])\n |> yield(name: \"port_status\")",
"refId": "A"
}
],
"title": "Port Status and Utilization",
"type": "table"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "bps"
},
"overrides": []
},
"gridPos": {
"h": 10,
"w": 12,
"x": 0,
"y": 16
},
"id": 7,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> filter(fn: (r) => r[\"port\"] == \"${port}\")\n |> filter(fn: (r) => r[\"_field\"] == \"in_octets\")\n |> derivative(unit: 1s, nonNegative: true)\n |> map(fn: (r) => ({ r with _value: r._value * 8.0 }))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rx_rate\")",
"refId": "A"
}
],
"title": "Port ${port} - RX Rate",
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "bps"
},
"overrides": []
},
"gridPos": {
"h": 10,
"w": 12,
"x": 12,
"y": 16
},
"id": 8,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> filter(fn: (r) => r[\"port\"] == \"${port}\")\n |> filter(fn: (r) => r[\"_field\"] == \"out_octets\")\n |> derivative(unit: 1s, nonNegative: true)\n |> map(fn: (r) => ({ r with _value: r._value * 8.0 }))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"tx_rate\")",
"refId": "A"
}
],
"title": "Port ${port} - TX Rate",
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "cps"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "in_errors"
},
"properties": [
{
"id": "displayName",
"value": "RX Errors"
},
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "out_errors"
},
"properties": [
{
"id": "displayName",
"value": "TX Errors"
},
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 26
},
"id": 9,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> filter(fn: (r) => r[\"port\"] == \"${port}\")\n |> filter(fn: (r) => r[\"_field\"] == \"in_errors\" or r[\"_field\"] == \"out_errors\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"errors\")",
"refId": "A"
}
],
"title": "Port ${port} - Errors",
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": {
"tooltip": false,
"viz": false,
"legend": false
},
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "watt"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 26
},
"id": 10,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"query": "from(bucket: \"network_optimizer\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> filter(fn: (r) => r[\"_field\"] == \"poe_power\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"poe_power\")",
"refId": "A"
}
],
"title": "PoE Power Consumption Over Time",
"type": "timeseries"
}
],
"refresh": "30s",
"schemaVersion": 38,
"style": "dark",
"tags": [
"network-optimizer",
"switch"
],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "InfluxDB-NetworkOptimizer",
"value": "InfluxDB-NetworkOptimizer"
},
"hide": 0,
"includeAll": false,
"label": "Data Source",
"multi": false,
"name": "DS_INFLUXDB",
"options": [],
"query": "influxdb",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"definition": "from(bucket: \"network_optimizer\")\n |> range(start: -7d)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"type\"] == \"switch\")\n |> keep(columns: [\"device\"])\n |> distinct(column: \"device\")",
"hide": 0,
"includeAll": false,
"label": "Switch",
"multi": false,
"name": "switch",
"options": [],
"query": {
"query": "from(bucket: \"network_optimizer\")\n |> range(start: -7d)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"type\"] == \"switch\")\n |> keep(columns: [\"device\"])\n |> distinct(column: \"device\")",
"refId": "InfluxVariableQueryEditor-VariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
},
"definition": "from(bucket: \"network_optimizer\")\n |> range(start: -7d)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> keep(columns: [\"port\"])\n |> distinct(column: \"port\")",
"hide": 0,
"includeAll": false,
"label": "Port",
"multi": false,
"name": "port",
"options": [],
"query": {
"query": "from(bucket: \"network_optimizer\")\n |> range(start: -7d)\n |> filter(fn: (r) => r[\"_measurement\"] == \"interface_metrics\")\n |> filter(fn: (r) => r[\"device\"] == \"${switch}\")\n |> keep(columns: [\"port\"])\n |> distinct(column: \"port\")",
"refId": "InfluxVariableQueryEditor-VariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h"
]
},
"timezone": "",
"title": "Switch Deep-Dive",
"uid": "switch-deep-dive",
"version": 1,
"weekStart": ""
}
================================================
FILE: docker/grafana/provisioning/dashboards/dashboards.yml
================================================
apiVersion: 1
providers:
- name: 'Network Optimizer Dashboards'
orgId: 1
folder: 'Network Optimizer'
type: file
disableDeletion: false
updateIntervalSeconds: 30
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards
foldersFromFilesStructure: false
================================================
FILE: docker/grafana/provisioning/datasources/influxdb.yml
================================================
apiVersion: 1
datasources:
- name: InfluxDB-NetworkOptimizer
type: influxdb
access: proxy
url: http://influxdb:8086
jsonData:
version: Flux
organization: network-optimizer
defaultBucket: network_optimizer
tlsSkipVerify: true
secureJsonData:
token: ${INFLUXDB_TOKEN}
isDefault: true
editable: true
================================================
FILE: docker/openspeedtest/Dockerfile
================================================
# Ozark Connect Speed Test - based on OpenSpeedTest
# Built from local customized source instead of upstream image
# Pin to nginx 1.26 to match OpenSpeedTest (1.29+ has caching behavior changes)
FROM nginx:1.26-alpine
# Copy our customized OpenSpeedTest files
COPY src/OpenSpeedTest/ /usr/share/nginx/html/
# Copy nginx configuration
COPY docker/openspeedtest/nginx.conf /etc/nginx/conf.d/default.conf
# Copy entrypoint script
COPY docker/openspeedtest/entrypoint.sh /docker-entrypoint.sh
RUN chmod +x /docker-entrypoint.sh
EXPOSE 3000
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["nginx", "-g", "daemon off;"]
================================================
FILE: docker/openspeedtest/entrypoint.sh
================================================
#!/bin/sh
# Ozark Connect Speed Test - Entrypoint
# Injects runtime configuration into config.js
# Report TCP congestion control state. We can't set it from inside the container
# (/proc/sys is mounted read-only except for sysctls explicitly declared in the
# compose sysctls: block, and we can't put tcp_congestion_control there because it
# hard-fails container start on kernels without bbr — Synology, QNAP, some Proxmox
# setups). The container inherits whatever the host's default CC is, so we just
# surface the state and point users at the fix if bbr is available but not active.
CC_FILE="/proc/sys/net/ipv4/tcp_congestion_control"
AVAIL_FILE="/proc/sys/net/ipv4/tcp_available_congestion_control"
if [ -r "$CC_FILE" ]; then
CURRENT_CC=$(cat "$CC_FILE")
AVAIL_CC=$(cat "$AVAIL_FILE" 2>/dev/null || echo "unknown")
echo "TCP congestion control: $CURRENT_CC (available: $AVAIL_CC)"
case " $AVAIL_CC " in
*" bbr "*)
if [ "$CURRENT_CC" != "bbr" ]; then
echo "NOTE: bbr is loaded on the host but not the default. For best speedtest accuracy on shallow-policer WAN paths, set it as default on the host: sysctl -w net.ipv4.tcp_congestion_control=bbr"
fi
;;
*)
echo "NOTE: bbr kernel module is not loaded on the host. For best speedtest accuracy on shallow-policer WAN paths, load it on the host: modprobe tcp_bbr (and persist via /etc/modules-load.d/bbr.conf)"
;;
esac
fi
# API endpoint path (single source of truth)
API_PATH="/api/public/speedtest/results"
# Construct the save URL from environment variables
# Priority: REVERSE_PROXIED_HOST_NAME > HOST_NAME > HOST_IP
# IMPORTANT: Keep this logic in sync with NginxHostedService.cs:ConstructSaveDataUrl() (Windows installer)
if [ -n "$REVERSE_PROXIED_HOST_NAME" ]; then
# Behind reverse proxy - use https and no port (proxy handles it)
SAVE_DATA_URL="https://${REVERSE_PROXIED_HOST_NAME}${API_PATH}"
elif [ -n "$HOST_NAME" ]; then
SAVE_DATA_URL="http://${HOST_NAME}:8042${API_PATH}"
elif [ -n "$HOST_IP" ]; then
SAVE_DATA_URL="http://${HOST_IP}:8042${API_PATH}"
else
# No explicit host configured - use dynamic URL (constructed client-side from browser location)
SAVE_DATA_URL="__DYNAMIC__"
fi
# Inject configuration into config.js
CONFIG_FILE="/usr/share/nginx/html/assets/js/config.js"
if [ -f "$CONFIG_FILE" ]; then
echo "Configuring speed test..."
# saveData is always enabled - URL is either explicit or dynamic
SAVE_DATA_VALUE="true"
if [ "$SAVE_DATA_URL" = "__DYNAMIC__" ]; then
echo "Results will be sent to: (dynamic - based on browser location):8042"
else
echo "Results will be sent to: $SAVE_DATA_URL"
fi
# External server ID (set for WAN speed test servers, empty for LAN)
EXTERNAL_ID="${EXTERNAL_SERVER_ID:-}"
# Replace placeholders with actual values
sed -i "s|__SAVE_DATA__|$SAVE_DATA_VALUE|g" "$CONFIG_FILE"
sed -i "s|__SAVE_DATA_URL__|$SAVE_DATA_URL|g" "$CONFIG_FILE"
sed -i "s|__API_PATH__|$API_PATH|g" "$CONFIG_FILE"
sed -i "s|__EXTERNAL_SERVER_ID__|$EXTERNAL_ID|g" "$CONFIG_FILE"
if [ -n "$EXTERNAL_ID" ]; then
echo "External server ID: $EXTERNAL_ID (WAN speed test mode)"
fi
echo "Configuration complete"
else
echo "Warning: config.js not found at $CONFIG_FILE"
fi
# Enforce canonical URL via 302 redirect (matches UI logic exactly)
# Prevents browser caching issues on mobile
NGINX_CONF="/etc/nginx/conf.d/default.conf"
OST_PORT="${OPENSPEEDTEST_PORT:-3005}"
OST_HTTPS_PORT="${OPENSPEEDTEST_HTTPS_PORT:-443}"
# Match UI: OPENSPEEDTEST_HOST defaults to HOST_NAME
OST_HOST="${OPENSPEEDTEST_HOST:-$HOST_NAME}"
# Build canonical URL (same logic as ClientSpeedTest.razor)
# "true" = HTTPS via proxy, "false"/unset = HTTP direct
CANONICAL_URL=""
CANONICAL_HOST=""
if [ -n "$OST_HOST" ]; then
CANONICAL_HOST="$OST_HOST"
if [ "$OPENSPEEDTEST_HTTPS" = "true" ]; then
if [ "$OST_HTTPS_PORT" = "443" ]; then
CANONICAL_URL="https://$OST_HOST"
else
CANONICAL_URL="https://$OST_HOST:$OST_HTTPS_PORT"
fi
else
CANONICAL_URL="http://$OST_HOST:$OST_PORT"
fi
elif [ -n "$HOST_IP" ]; then
CANONICAL_HOST="$HOST_IP"
CANONICAL_URL="http://$HOST_IP:$OST_PORT"
fi
if [ -n "$CANONICAL_HOST" ] && [ -f "$NGINX_CONF" ]; then
echo "Enforcing canonical URL: $CANONICAL_URL"
# Redirect HTTP to HTTPS when behind a TLS proxy
if [ "$OPENSPEEDTEST_HTTPS" = "true" ]; then
sed -i "/server_name/a\\
# Redirect HTTP to HTTPS\\
if (\$http_x_forwarded_proto != \"https\") {\\
return 302 $CANONICAL_URL\$request_uri;\\
}" "$NGINX_CONF"
echo "Added HTTP->HTTPS redirect rule"
else
# Host enforcement only (no scheme redirect)
sed -i "/server_name/a\\
# Enforce canonical host - prevents browser caching issues on mobile\\
if (\$host != \"$CANONICAL_HOST\") {\\
return 302 $CANONICAL_URL\$request_uri;\\
}" "$NGINX_CONF"
echo "Added host redirect rule"
fi
fi
# Start nginx
exec "$@"
================================================
FILE: docker/openspeedtest/nginx.conf
================================================
server {
server_name _;
listen 3000 reuseport;
listen [::]:3000 reuseport;
root /usr/share/nginx/html;
index index.html;
client_max_body_size 50m;
error_page 405 =200 $uri;
access_log off;
gzip off;
fastcgi_read_timeout 999;
log_not_found off;
server_tokens off;
error_log /dev/null;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors off;
# Upload endpoint - reads entire POST body before responding.
# Without this, the error_page 405 hack responds before the body is
# fully received, causing ERR_CONNECTION_RESET behind reverse proxies.
location = /upload {
add_header 'Access-Control-Allow-Origin' "*" always;
add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
add_header Cache-Control 'no-store, no-cache, max-age=0, no-transform';
client_body_buffer_size 35m;
client_max_body_size 50m;
proxy_pass http://127.0.0.1:3000/upload-sink;
proxy_set_header Host $host;
}
location = /upload-sink {
add_header 'Access-Control-Allow-Origin' "*" always;
return 200;
}
location / {
add_header 'Access-Control-Allow-Origin' "*" always;
add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
add_header Cache-Control 'no-store, no-cache, max-age=0, no-transform';
add_header Last-Modified $date_gmt;
if_modified_since off;
expires off;
etag off;
if ($request_method = OPTIONS) {
add_header 'Access-Control-Allow-Credentials' "true";
add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always;
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
add_header 'Access-Control-Allow-Methods' "GET, POST, OPTIONS" always;
return 200;
}
}
# Static assets - match OpenSpeedTest config exactly
location ~* ^.+\.(?:css|cur|js|jpe?g|gif|htc|ico|png|html|xml|otf|ttf|eot|woff|woff2|svg)$ {
access_log off;
expires -1;
add_header Cache-Control "no-cache, no-store, must-revalidate";
add_header Vary Accept-Encoding;
tcp_nodelay off;
open_file_cache max=3000 inactive=120s;
open_file_cache_valid 45s;
open_file_cache_min_uses 2;
open_file_cache_errors off;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript image/svg+xml;
}
}
================================================
FILE: docs/MACOS-INSTALLATION.md
================================================
# macOS Native Installation
Install Network Optimizer natively on macOS for maximum performance. Native installation is recommended over Docker Desktop, which limits network throughput to ~1.8 Gbps.
## Quick Start
```bash
git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
cd NetworkOptimizer
./scripts/install-macos-native.sh
```
The script will:
1. Install prerequisites via Homebrew (iperf3, nginx, .NET SDK)
2. Build the application from source
3. Sign binaries for macOS
4. Set up OpenSpeedTest with nginx for browser-based speed testing
5. Create a launchd service for auto-start
## Configuration
After installation, edit `~/network-optimizer/start.sh` to configure environment variables:
```bash
# Timezone
export TZ="America/Chicago"
# Optional: Set admin password (auto-generated on first run if not set)
# export APP_PASSWORD="your-secure-password"
```
Additional environment variables can be added to `start.sh` - see [docker/.env.example](../docker/.env.example) for all available options including:
- `HOST_NAME` - Hostname for canonical URL enforcement
- `REVERSE_PROXIED_HOST_NAME` - Hostname when behind a reverse proxy (enables HTTPS)
- `OPENSPEEDTEST_HTTPS` - Enable HTTPS for speed tests (required for geolocation)
- `Logging__LogLevel__NetworkOptimizer` / `Logging__LogLevel__Default` - Logging verbosity (see [Enable Debug Logging](#enable-debug-logging))
Note: The app auto-detects its IP address, so `HOST_IP` is not required for native installations.
After editing, restart the service:
```bash
launchctl unload ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
launchctl load ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
```
## Access
- **Web UI**: http://localhost:8042 or http://\:8042
- **SpeedTest**: http://localhost:3005 or http://\:3005
On first run, check the logs for the auto-generated admin password:
```bash
grep -A5 'AUTO-GENERATED' ~/network-optimizer/logs/stdout.log
```
## Service Management
```bash
# Stop
launchctl unload ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
# Start
launchctl load ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
# View logs
tail -f ~/network-optimizer/logs/stdout.log
```
## Upgrading
To upgrade to a newer version:
```bash
cd NetworkOptimizer
git pull
./scripts/install-macos-native.sh
```
The install script preserves your database, encryption keys, and `start.sh` configuration by backing them up before reinstalling.
## Logs and Debugging
Application logs are in `~/network-optimizer/logs/`:
```bash
# Follow live logs
tail -f ~/network-optimizer/logs/stdout.log
# View errors
tail -f ~/network-optimizer/logs/stderr.log
# Search for specific events
grep "UniFi" ~/network-optimizer/logs/stdout.log | tail -20
```
### Enable Debug Logging
For more detailed logs, edit `~/network-optimizer/start.sh` and add:
```bash
# Debug logging for Network Optimizer application code only (recommended):
export Logging__LogLevel__NetworkOptimizer=Debug
# Or debug everything (verbose - includes framework/EF Core noise):
export Logging__LogLevel__Default=Debug
```
Then restart the service:
```bash
launchctl unload ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
launchctl load ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
```
Remember to set it back to `Information` when done - debug logging is verbose.
### Log Rotation
Logs are not rotated automatically. To clear them:
```bash
# Truncate without restarting
: > ~/network-optimizer/logs/stdout.log
: > ~/network-optimizer/logs/stderr.log
```
## Troubleshooting
### Previous sudo Installation
If you previously ran the install script with `sudo`, files and processes end up owned by root, which breaks future installs and upgrades. The install script detects this automatically and offers to fix it. Just run the script normally (without sudo):
```bash
./scripts/install-macos-native.sh
```
It will prompt for your password once to clean up root-owned files and kill root-owned processes, then continue the installation as your regular user.
**Never run the install script with sudo.** Everything installs to your home directory and does not need root access.
### Reset Admin Password
If you forget the admin password, use the reset script:
```bash
curl -fsSL https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/reset-password.sh | bash
```
The script auto-detects the macOS native installation, clears the password, restarts the service, and displays the new temporary password.
**Manual fallback:**
```bash
# Stop the service
launchctl unload ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
# Clear the password
sqlite3 ~/Library/Application\ Support/NetworkOptimizer/network_optimizer.db \
"UPDATE AdminSettings SET Password = NULL, Enabled = 0;"
# Restart
launchctl load ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
# View the new password
grep "Password:" ~/network-optimizer/logs/stdout.log | tail -1
```
## Uninstalling
```bash
# Stop and remove the service
launchctl unload ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
rm ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
# Remove application files
rm -rf ~/network-optimizer
# Remove data (database, keys) - optional
rm -rf ~/Library/Application\ Support/NetworkOptimizer
```
================================================
FILE: docs/PLAN-unifi-api-abstraction.md
================================================
# Plan: UniFi API Abstraction Layer
## Background
Currently, the Network Optimizer "hijacks" the UniFi Controller's internal web UI API endpoints. This works but:
- API endpoints can change between UniFi versions without notice
- No official documentation or stability guarantees
- Rate limiting and session management mimic browser behavior
- Device data structure may change unexpectedly
When/if Ubiquiti provides a proper documented API, we need to easily swap implementations.
## Objective
Create an abstraction layer that:
1. Decouples business logic from UniFi API implementation details
2. Enables easy swap to official API when available
3. Maintains backward compatibility with current "web UI" approach
4. Provides a clean interface for device/client/network data
## Current Architecture
```
┌─────────────────────┐ ┌──────────────────────┐
│ ConfigAuditEngine │────>│ SecurityAuditEngine │
└─────────────────────┘ └──────────────────────┘
│ │
v v
┌─────────────────────────────────────────────────┐
│ UniFiApiClient │
│ (Direct HTTP calls to controller endpoints) │
└─────────────────────────────────────────────────┘
│
v
┌─────────────────────────────────────────────────┐
│ UniFi Controller (Web UI API) │
└─────────────────────────────────────────────────┘
```
## Proposed Architecture
```
┌─────────────────────┐ ┌──────────────────────┐
│ ConfigAuditEngine │────>│ SecurityAuditEngine │
└─────────────────────┘ └──────────────────────┘
│ │
v v
┌─────────────────────────────────────────────────┐
│ IUniFiDataProvider │
│ (Abstract interface for UniFi data) │
└─────────────────────────────────────────────────┘
│
├──────────────────┐
│ │
v v
┌───────────────────┐ ┌─────────────────────┐
│ WebUIDataProvider │ │ OfficialAPIProvider │
│ (Current impl) │ │ (Future impl) │
└───────────────────┘ └─────────────────────┘
│ │
v v
┌─────────────────┐ ┌────────────────────────┐
│ Controller │ │ UniFi Official API │
│ (Web UI API) │ │ (When available) │
└─────────────────┘ └────────────────────────┘
```
## Implementation Steps
### Phase 1: Define Core Interfaces
**File: `src/NetworkOptimizer.UniFi/Abstractions/IUniFiDataProvider.cs`**
```csharp
public interface IUniFiDataProvider
{
// Connection
Task ConnectAsync(string host, string username, string password);
Task DisconnectAsync();
bool IsConnected { get; }
// Site discovery
Task> GetSitesAsync();
// Device data
Task> GetDevicesAsync(string siteId);
Task> GetClientsAsync(string siteId);
Task> GetNetworksAsync(string siteId);
// Fingerprint database
Task GetFingerprintDatabaseAsync();
}
```
### Phase 2: Define Domain Models
Create clean domain models separate from JSON response DTOs:
**File: `src/NetworkOptimizer.UniFi/Domain/`**
- `UniFiDevice.cs` - Normalized device info (switches, APs, gateways)
- `UniFiClient.cs` - Normalized client info (wired/wireless)
- `UniFiNetwork.cs` - Normalized network/VLAN info
- `UniFiSite.cs` - Site info
### Phase 3: Refactor Current Implementation
1. Move current `UniFiApiClient` logic into `WebUIDataProvider`
2. Have it implement `IUniFiDataProvider`
3. Map JSON responses to domain models internally
### Phase 4: Update Consumers
1. Update `ConfigAuditEngine` to use `IUniFiDataProvider`
2. Update `SecurityAuditEngine` to work with domain models
3. Remove direct JSON parsing from audit logic
### Phase 5: Dependency Injection
```csharp
// Program.cs
services.AddScoped();
// Future:
// services.AddScoped();
```
## Key Domain Models
### UniFiDevice
```csharp
public class UniFiDevice
{
public string Id { get; set; }
public string Mac { get; set; }
public string Name { get; set; }
public string Model { get; set; }
public DeviceType Type { get; set; } // Gateway, Switch, AP
public string IpAddress { get; set; }
public bool IsOnline { get; set; }
// For switches/gateways
public List? Ports { get; set; }
// For APs
public bool IsAccessPoint { get; set; }
}
```
### UniFiClient
```csharp
public class UniFiClient
{
public string Mac { get; set; }
public string? Name { get; set; }
public string? Hostname { get; set; }
public bool IsWired { get; set; }
// Wired client info
public string? SwitchMac { get; set; }
public int? SwitchPort { get; set; }
// Wireless client info
public string? AccessPointMac { get; set; }
// Network assignment
public string? NetworkId { get; set; }
// Fingerprint data
public int? DeviceCategory { get; set; }
public int? DeviceFamily { get; set; }
public int? DeviceIdOverride { get; set; }
public string? Oui { get; set; }
}
```
## Migration Strategy
1. **Phase 1-2**: Define interfaces and domain models (non-breaking)
2. **Phase 3**: Create `WebUIDataProvider` alongside existing code
3. **Phase 4**: Gradually migrate consumers one at a time
4. **Phase 5**: Remove old direct API code when migration complete
## Benefits
- **Testability**: Mock `IUniFiDataProvider` for unit tests
- **Flexibility**: Swap implementations without code changes
- **Maintainability**: API-specific quirks isolated in providers
- **Future-proof**: Ready for official API when available
## Open Questions
1. Should we support multiple providers simultaneously? (e.g., different controllers)
2. How to handle provider-specific features not in interface?
3. Caching strategy - at provider or consumer level?
## Timeline Estimate
| Phase | Description | Effort |
|-------|-------------|--------|
| 1 | Define interfaces | 2-4 hours |
| 2 | Create domain models | 4-6 hours |
| 3 | Implement WebUIDataProvider | 8-12 hours |
| 4 | Migrate consumers | 8-12 hours |
| 5 | DI setup and testing | 4-6 hours |
**Total: 26-40 hours**
================================================
FILE: docs/features/speed-test-roadmap.md
================================================
# Speed Test & Network Trace - Future Enhancements
## Current State (Jan 2026)
- Network path visualization with device icons
- Wireless link indicators on mesh hops
- Bottleneck detection and highlighting
- Efficiency grades (% of theoretical max)
- Inter-VLAN routing detection
- LocalIp parsing from iperf3 for accurate server positioning
- Path analysis persistence as snapshot
- Test history with expandable details
## Proposed Enhancements
### Low Effort / High Impact
**1. Latency & Jitter Display**
- iperf3 already outputs latency/jitter data - just need to parse and display
- Critical for VoIP, video conferencing, gaming users
- Show alongside throughput in results
**2. Bottleneck Recommendations**
- We already detect bottlenecks - add actionable suggestions
- Examples:
- "Upgrade 1G link to 2.5G for estimated 2.5x improvement"
- "Wireless mesh is limiting - consider wired backhaul"
- "This switch port supports 2.5G but is negotiating at 1G"
**3. Wireless Signal Quality on Path**
- Show RSSI/SNR for wireless hops (data available from UniFi API)
- Correlate signal quality with throughput
- Help identify weak wireless links
### Medium Effort / High Impact
**4. Scheduled Tests + Trend Graphs**
- Configure recurring tests (e.g., every 6 hours)
- Track performance over time per device/path
- Answer: "Is my AP degrading over time?"
- Detect intermittent issues
**5. Performance Alerts**
- Threshold-based notifications
- "Alert me if throughput drops below 500 Mbps"
- "Alert if efficiency drops below 70%"
- Integration with notification system
**6. PDF Export of Path Analysis**
- Export current path analysis to PDF
- Useful for documentation, client reports, troubleshooting
- Leverage existing PDF infrastructure
### Higher Effort / Differentiating
**7. Live Path Monitoring**
- Real-time visualization during test execution
- Show per-second throughput on each hop
- Animated data flow through path
**8. Multi-Device Comparison**
- Side-by-side comparison of multiple devices/paths
- "Which AP has the best backhaul?"
- Identify network-wide patterns
**9. Historical Path Change Detection**
- Detect when network topology changes
- "Your path now goes through an additional switch"
- Track infrastructure changes over time
## Notes
- Architecture: Network Optimizer acts as iperf3 client, SSHs to target to start iperf3 server
- Path analysis uses LocalIp from iperf3 output for accurate server detection
- Device icons resolved via UniFi product database shortname lookup
================================================
FILE: nuget.config
================================================
================================================
FILE: renovate.json
================================================
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:base",
":semanticCommits"
],
"nuget": {
"enabled": true
},
"dockerfile": {
"enabled": true
},
"customManagers": [
{
"customType": "regex",
"fileMatch": ["^docker/Dockerfile$"],
"matchStrings": ["ARG IPERF3_VERSION=(?\\d+\\.\\d+)"],
"datasourceTemplate": "github-releases",
"depNameTemplate": "esnet/iperf",
"extractVersionTemplate": "^(?\\d+\\.\\d+)$"
}
],
"packageRules": [
{
"matchPackageNames": ["esnet/iperf"],
"description": "iperf3 updates",
"groupName": "iperf3"
},
{
"matchDatasources": ["docker"],
"matchPackagePatterns": ["mcr.microsoft.com/dotnet/*"],
"description": ".NET Docker images",
"groupName": "dotnet-docker"
},
{
"matchDatasources": ["nuget"],
"matchPackagePatterns": ["Microsoft.*", "System.*"],
"description": "Microsoft NuGet packages",
"groupName": "microsoft-nuget"
},
{
"matchDatasources": ["nuget"],
"excludePackagePatterns": ["Microsoft.*", "System.*"],
"description": "Third-party NuGet packages",
"groupName": "nuget-dependencies"
},
{
"matchUpdateTypes": ["major"],
"labels": ["major-update"],
"automerge": false
},
{
"matchUpdateTypes": ["minor", "patch"],
"matchDatasources": ["nuget"],
"automerge": false
}
],
"labels": ["dependencies"],
"prHourlyLimit": 5,
"prConcurrentLimit": 10
}
================================================
FILE: scripts/README.md
================================================
# Development Scripts
Bash scripts for common development tasks. Works with Git Bash on Windows or native bash on macOS/Linux.
## Usage
```bash
# Make scripts executable (first time only)
chmod +x scripts/*.sh
# Run a script
./scripts/test.sh
```
## Available Scripts
| Script | Description |
|--------|-------------|
| `build.sh [Debug\|Release]` | Build the project |
| `test.sh` | Run all tests |
| `coverage.sh` | Run tests with code coverage report |
| `watch.sh` | Run web app with hot reload |
| `clean.sh` | Clean build artifacts and coverage |
| `publish.sh [output-dir]` | Publish for production |
| `docker-build.sh` | Build Docker image |
| `docker-run.sh` | Run container locally (port 8042) |
| `docker-stop.sh` | Stop container |
| `build-installer.ps1` | Build Windows MSI installer (PowerShell) |
| `install-macos-native.sh` | Install natively on macOS |
## Windows Installer
Build the MSI installer for Windows:
```powershell
powershell -ExecutionPolicy Bypass -File scripts/build-installer.ps1
```
Output: `publish/NetworkOptimizer-{version}-win-x64.msi`
The installer creates a single-file executable (~67 MB) with all dependencies embedded.
## macOS Native Installation
Install Network Optimizer natively on macOS (no Docker required):
```bash
# Clone the repository
git clone https://github.com/Ozark-Connect/NetworkOptimizer.git
cd NetworkOptimizer
# Run the installer
./scripts/install-macos-native.sh
```
The script:
1. Installs prerequisites via Homebrew (iperf3, nginx, .NET SDK)
2. Builds a single-file executable (~58 MB)
3. Sets up OpenSpeedTest with nginx
4. Creates a launchd service for auto-start
Install location: `~/network-optimizer/`
Service management:
```bash
# Stop
launchctl unload ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
# Start
launchctl load ~/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist
# Logs
tail -f ~/network-optimizer/logs/stdout.log
```
## Code Coverage
The `coverage.sh` script generates a coverage report:
```bash
./scripts/coverage.sh
```
Coverage results are saved to `./coverage/`. To get an HTML report, install ReportGenerator:
```bash
dotnet tool install -g dotnet-reportgenerator-globaltool
```
Then run `coverage.sh` again - it will automatically generate the HTML report.
================================================
FILE: scripts/build-installer.ps1
================================================
# Build Network Optimizer Windows Installer
# Creates a self-contained MSI package
param(
[string]$Configuration = "Release",
[string]$OutputDir = "$PSScriptRoot\..\publish"
)
$ErrorActionPreference = "Stop"
$RepoRoot = Split-Path -Parent $PSScriptRoot
$WebProject = Join-Path $RepoRoot "src\NetworkOptimizer.Web\NetworkOptimizer.Web.csproj"
$InstallerProject = Join-Path $RepoRoot "src\NetworkOptimizer.Installer\NetworkOptimizer.Installer.wixproj"
$PublishDir = Join-Path $RepoRoot "src\NetworkOptimizer.Web\bin\Release\net10.0\win-x64\publish"
# Get version from git tags (MinVer style)
Push-Location $RepoRoot
try {
$gitDescribe = git describe --tags --abbrev=0 2>$null
if ($gitDescribe) {
$Version = $gitDescribe -replace '^v', ''
} else {
# Fallback: count commits for version
$commitCount = git rev-list --count HEAD 2>$null
$Version = "0.0.$commitCount"
}
} catch {
$Version = "0.0.0"
}
Pop-Location
Write-Host "=== Building Network Optimizer Windows Installer ===" -ForegroundColor Cyan
Write-Host ""
Write-Host "Version: $Version"
Write-Host "Configuration: $Configuration"
Write-Host ""
# Step 1: Publish self-contained single-file application
Write-Host "[1/5] Publishing self-contained single-file application for win-x64..." -ForegroundColor Yellow
dotnet publish $WebProject `
-c $Configuration `
-r win-x64 `
--self-contained `
-p:PublishSingleFile=true `
-p:IncludeNativeLibrariesForSelfExtract=true `
-p:EnableCompressionInSingleFile=true `
-p:DebugType=None `
-p:MinVerVersionOverride=$Version `
-p:Version=$Version `
-p:FileVersion=$Version `
-p:AssemblyVersion=$Version `
-p:IncludeSourceRevisionInInformationalVersion=false `
/nodeReuse:false
if ($LASTEXITCODE -ne 0) {
Write-Error "Publish failed!"
exit 1
}
Write-Host "Published to: $PublishDir" -ForegroundColor Green
Write-Host ""
# Step 2: Build uwnspeedtest binaries
Write-Host "[2/5] Building uwnspeedtest binaries..." -ForegroundColor Yellow
$UwnSpeedTestSrc = Join-Path $RepoRoot "src\uwnspeedtest"
$ToolsDir = Join-Path $PublishDir "tools"
if (-not (Test-Path $ToolsDir)) { New-Item -ItemType Directory -Path $ToolsDir | Out-Null }
$GoCmd = Get-Command go -ErrorAction SilentlyContinue
if ($GoCmd) {
Push-Location $UwnSpeedTestSrc
# Build targets: local Windows binary + gateway linux/arm64 binary
$targets = @(
@{ GOOS = "windows"; GOARCH = "amd64"; Output = "uwnspeedtest-windows-amd64.exe"; Label = "windows/amd64" },
@{ GOOS = "windows"; GOARCH = "arm64"; Output = "uwnspeedtest-windows-arm64.exe"; Label = "windows/arm64" },
@{ GOOS = "windows"; GOARCH = "386"; Output = "uwnspeedtest-windows-386.exe"; Label = "windows/386" },
@{ GOOS = "linux"; GOARCH = "arm64"; Output = "uwnspeedtest-linux-arm64"; Label = "linux/arm64 (gateway)" }
)
foreach ($target in $targets) {
$env:CGO_ENABLED = "0"
$env:GOOS = $target.GOOS
$env:GOARCH = $target.GOARCH
go build -trimpath -ldflags "-s -w -X main.version=$Version" -o "$ToolsDir\$($target.Output)" .
if ($LASTEXITCODE -ne 0) {
Write-Warning "uwnspeedtest build failed for $($target.Label)"
} else {
Write-Host "Built uwnspeedtest for $($target.Label)" -ForegroundColor Green
}
}
$env:CGO_ENABLED = $null
$env:GOOS = $null
$env:GOARCH = $null
Pop-Location
} else {
Write-Warning "Go not installed - uwnspeedtest binaries will not be available in this installer"
}
Write-Host ""
# Step 3: Build wansteer binary (gateway-only, deployed via SSH to UniFi gateways)
Write-Host "[3/5] Building wansteer binary..." -ForegroundColor Yellow
$WanSteerSrc = Join-Path $RepoRoot "src\wansteer"
if (-not (Test-Path $ToolsDir)) { New-Item -ItemType Directory -Path $ToolsDir | Out-Null }
if ($GoCmd) {
Push-Location $WanSteerSrc
$env:CGO_ENABLED = "0"
$env:GOOS = "linux"
$env:GOARCH = "arm64"
go build -trimpath -ldflags "-s -w -X main.version=$Version" -o "$ToolsDir\wansteer-linux-arm64" .
if ($LASTEXITCODE -ne 0) {
Write-Warning "wansteer build failed for linux/arm64"
} else {
Write-Host "Built wansteer for linux/arm64 (gateway)" -ForegroundColor Green
}
$env:CGO_ENABLED = $null
$env:GOOS = $null
$env:GOARCH = $null
Pop-Location
} else {
Write-Warning "Go not installed - wansteer binary will not be available in this installer"
}
Write-Host ""
# Step 4: Build WiX installer
Write-Host "[4/5] Building MSI installer with WiX..." -ForegroundColor Yellow
dotnet build $InstallerProject -c $Configuration /nodeReuse:false
if ($LASTEXITCODE -ne 0) {
Write-Error "WiX build failed!"
exit 1
}
Write-Host ""
# Step 5: Copy to output
Write-Host "[5/5] Copying installer to publish folder..." -ForegroundColor Yellow
if (-not (Test-Path $OutputDir)) {
New-Item -ItemType Directory -Path $OutputDir | Out-Null
}
$InstallerBin = Join-Path $RepoRoot "src\NetworkOptimizer.Installer\bin\$Configuration"
$MsiFile = Get-ChildItem -Path $InstallerBin -Filter "*.msi" -Recurse | Select-Object -First 1
if ($MsiFile) {
$OutputName = "NetworkOptimizer-$Version-win-x64.msi"
$OutputPath = Join-Path $OutputDir $OutputName
Copy-Item $MsiFile.FullName $OutputPath -Force
$SizeMB = [math]::Round((Get-Item $OutputPath).Length / 1MB, 2)
Write-Host ""
Write-Host "=== Build Complete ===" -ForegroundColor Green
Write-Host "Installer: $OutputPath"
Write-Host "Size: $SizeMB MB"
}
else {
Write-Error "MSI file not found in $InstallerBin"
exit 1
}
================================================
FILE: scripts/build.sh
================================================
#!/bin/bash
# Build the project
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
cd "$PROJECT_ROOT"
CONFIG="${1:-Debug}"
echo "Building ($CONFIG)..."
dotnet build -c "$CONFIG"
echo ""
echo "Build complete!"
================================================
FILE: scripts/clean.sh
================================================
#!/bin/bash
# Clean build artifacts and coverage
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
cd "$PROJECT_ROOT"
echo "Cleaning build artifacts..."
dotnet clean -v q
echo "Removing bin/obj directories..."
find . -type d \( -name "bin" -o -name "obj" \) -not -path "./node_modules/*" -exec rm -rf {} + 2>/dev/null || true
echo "Removing coverage directory..."
rm -rf "$PROJECT_ROOT/coverage"
echo ""
echo "Clean complete!"
================================================
FILE: scripts/coverage.runsettings
================================================
cobertura
[*Tests*]*,[*]*.Migrations.*
Obsolete,GeneratedCodeAttribute,CompilerGeneratedAttribute
false
true
================================================
FILE: scripts/coverage.sh
================================================
#!/bin/bash
# Run tests with code coverage and generate HTML report
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
COVERAGE_DIR="$PROJECT_ROOT/coverage"
cd "$PROJECT_ROOT"
# Clean previous coverage
rm -rf "$COVERAGE_DIR"
mkdir -p "$COVERAGE_DIR"
echo "Running tests with coverage..."
dotnet test \
--collect:"XPlat Code Coverage" \
--results-directory "$COVERAGE_DIR" \
--settings "$SCRIPT_DIR/coverage.runsettings"
# Find all coverage files
COVERAGE_FILES=$(find "$COVERAGE_DIR" -name "coverage.cobertura.xml" | tr '\n' ';' | sed 's/;$//')
if [ -z "$COVERAGE_FILES" ]; then
echo "Error: No coverage files generated"
exit 1
fi
echo ""
echo "Coverage files found:"
find "$COVERAGE_DIR" -name "coverage.cobertura.xml"
# Try to generate HTML report if reportgenerator is available
if command -v reportgenerator &> /dev/null; then
echo ""
echo "Generating HTML report..."
reportgenerator \
-reports:"$COVERAGE_FILES" \
-targetdir:"$COVERAGE_DIR/report" \
-reporttypes:"Html;TextSummary"
echo ""
echo "=== Coverage Summary ==="
cat "$COVERAGE_DIR/report/Summary.txt" 2>/dev/null || true
echo ""
echo "HTML report: $COVERAGE_DIR/report/index.html"
else
echo ""
echo "=== Coverage Summary (first file) ==="
FIRST_FILE=$(find "$COVERAGE_DIR" -name "coverage.cobertura.xml" | head -1)
grep -E "line-rate|branch-rate" "$FIRST_FILE" | head -1 | \
sed 's/.*line-rate="\([^"]*\)".*branch-rate="\([^"]*\)".*/Line: \1, Branch: \2/' | \
awk -F',' '{
split($1, l, ": ");
split($2, b, ": ");
printf "Line Coverage: %.1f%%\n", l[2] * 100;
printf "Branch Coverage: %.1f%%\n", b[2] * 100;
}'
echo ""
echo "To get a merged report, install ReportGenerator:"
echo " dotnet tool install -g dotnet-reportgenerator-globaltool"
fi
================================================
FILE: scripts/deploy-external-speedtest.sh
================================================
#!/bin/bash
# Deploy or update an external OpenSpeedTest server for WAN speed testing
# This fetches only the speedtest files needed - not the full repo
#
# Usage:
# Fresh install (interactive):
# ./deploy-external-speedtest.sh
#
# Fresh install (from Settings-generated command):
# ./deploy-external-speedtest.sh [port]
#
# Update existing installation:
# ./deploy-external-speedtest.sh --update
#
# Prerequisites: Docker and Docker Compose on the target machine
set -e
INSTALL_DIR="/opt/netopt-speed-test"
BRANCH="${BRANCH:-main}"
GITHUB_REPO="Ozark-Connect/NetworkOptimizer"
# --- Slug generation (must match C# ExternalSpeedTestServer.GenerateServerId) ---
generate_server_id() {
echo "$1" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/--*/-/g' | sed 's/^-//;s/-$//'
}
# --- Download all required files from GitHub via tarball ---
# Uses the GitHub API to download a tarball of the repo, then extracts only
# the files needed for the speed test container. No file list to maintain.
download_files() {
local TARBALL_URL="https://github.com/$GITHUB_REPO/archive/refs/heads/$BRANCH.tar.gz"
local TEMP_TAR=$(mktemp)
local TEMP_DIR=$(mktemp -d)
curl -sL "$TARBALL_URL" -o "$TEMP_TAR"
# Extract only the directories we need
# Tarball root is NetworkOptimizer-/
local STRIP=1 # strip the root directory
# Extract only the directories we need
# --wildcards is needed on GNU tar (Linux), but errors on BSD tar (macOS)
if tar --version 2>&1 | grep -q GNU; then
tar -xzf "$TEMP_TAR" -C "$TEMP_DIR" --strip-components=$STRIP --wildcards \
"*/src/OpenSpeedTest/" \
"*/docker/openspeedtest/"
else
tar -xzf "$TEMP_TAR" -C "$TEMP_DIR" --strip-components=$STRIP \
"*/src/OpenSpeedTest/" \
"*/docker/openspeedtest/"
fi
# Copy into install directory
mkdir -p docker/openspeedtest src/OpenSpeedTest
cp -r "$TEMP_DIR/docker/openspeedtest/"* docker/openspeedtest/
cp -r "$TEMP_DIR/src/OpenSpeedTest/"* src/OpenSpeedTest/
rm -rf "$TEMP_TAR" "$TEMP_DIR"
}
# --- Update mode ---
if [ "${1}" = "--update" ]; then
if [ ! -f "$INSTALL_DIR/docker-compose.yml" ]; then
echo "Error: No existing installation found at $INSTALL_DIR"
echo "Run without --update to do a fresh install."
exit 1
fi
cd "$INSTALL_DIR"
echo "=== Updating External Speed Test Server ==="
echo ""
echo "Downloading latest files..."
download_files
echo "Rebuilding container..."
docker compose build
docker compose up -d
echo ""
echo "=== Update Complete ==="
exit 0
fi
# --- Fresh install ---
# Check Docker first
if ! command -v docker &> /dev/null; then
echo "Error: Docker is not installed"
exit 1
fi
if ! docker compose version &> /dev/null; then
echo "Error: Docker Compose is not installed"
exit 1
fi
# If args provided, use them (non-interactive / Settings-generated command)
if [ -n "$1" ]; then
OPTIMIZER_URL="$1"
SERVER_ID="${2:-external}"
PORT="${3:-3005}"
else
# Interactive mode
echo "=== Network Optimizer - External Speed Test Server Setup ==="
echo ""
echo "This sets up a remote speed test server that your network clients can use"
echo "to measure their real internet (WAN) speed. Results are posted back to your"
echo "Network Optimizer instance automatically."
echo ""
# Optimizer URL
echo "What is the URL of your Network Optimizer instance?"
echo " This is the address your browser uses to access Network Optimizer."
echo " Examples: https://optimizer.example.com, http://192.168.1.100:8042"
echo ""
read -rp "Optimizer URL: " OPTIMIZER_URL < /dev/tty
if [ -z "$OPTIMIZER_URL" ]; then
echo "Error: Optimizer URL is required."
exit 1
fi
echo ""
# Server ID
echo "If you've already configured this server in Network Optimizer Settings,"
echo "enter the Server ID shown there. Otherwise, enter a friendly name and"
echo "we'll generate the ID for you."
echo ""
read -rp "Server ID or name (e.g. vps-chicago or VPS Chicago): " SERVER_INPUT < /dev/tty
if [ -z "$SERVER_INPUT" ]; then
echo "Error: Server ID or name is required."
exit 1
fi
# Check if it looks like a slug already (lowercase, hyphens, numbers only) or a display name
if echo "$SERVER_INPUT" | grep -qE '^[a-z0-9][a-z0-9-]*[a-z0-9]$'; then
SERVER_ID="$SERVER_INPUT"
else
SERVER_ID=$(generate_server_id "$SERVER_INPUT")
echo ""
echo " Generated Server ID: $SERVER_ID"
echo ""
echo " Important: When you configure this server in Network Optimizer Settings,"
echo " use the name \"$SERVER_INPUT\" so the Server IDs match."
fi
echo ""
# Port
read -rp "Port [3005]: " PORT < /dev/tty
PORT="${PORT:-3005}"
echo ""
fi
echo "=== Network Optimizer - External Speed Test Server ==="
echo "Optimizer URL: $OPTIMIZER_URL"
echo "Server ID: $SERVER_ID"
echo "Port: $PORT"
echo "Install Dir: $INSTALL_DIR"
echo ""
# Create install directory
mkdir -p "$INSTALL_DIR"
cd "$INSTALL_DIR"
echo "Downloading speed test files..."
download_files
# Create .dockerignore
cat > .dockerignore << 'EOF'
.git
*.md
tests/
scripts/
research/
plans/
EOF
# Create docker-compose.yml
cat > docker-compose.yml << COMPOSE_EOF
services:
speedtest:
build:
context: .
dockerfile: docker/openspeedtest/Dockerfile
container_name: netopt-wan-speedtest
restart: unless-stopped
ports:
- "${PORT}:3000"
environment:
- TZ=\${TZ:-UTC}
- REVERSE_PROXIED_HOST_NAME=$(echo "$OPTIMIZER_URL" | sed 's|https\?://||' | sed 's|/.*||')
- EXTERNAL_SERVER_ID=${SERVER_ID}
COMPOSE_EOF
echo "Building and starting speed test server..."
docker compose build
docker compose up -d
echo ""
echo "=== Deployment Complete ==="
echo "Speed test URL: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo 'localhost'):$PORT"
echo ""
echo "IMPORTANT: HTTPS is strongly recommended. Chrome and Edge block speed test"
echo "results from posting back when the page is served over HTTP (Private Network"
echo "Access). Firefox and Safari don't currently enforce this, but HTTPS is still"
echo "recommended. Set up a reverse proxy with TLS and HTTP/1.1."
echo "See DEPLOYMENT.md for setup instructions."
echo ""
echo "Then configure Network Optimizer Settings -> External Speed Test Server:"
echo " - Name: (use the same name you entered here so the Server IDs match)"
echo " - Host: speedtest.yourdomain.com"
echo " - Port: 443"
echo " - Scheme: HTTPS"
echo ""
echo "To update in the future, run:"
echo " curl -fsSL https://raw.githubusercontent.com/$GITHUB_REPO/main/scripts/deploy-external-speedtest.sh | bash -s -- --update"
================================================
FILE: scripts/docker-build.sh
================================================
#!/bin/bash
# Build Docker image locally
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
cd "$PROJECT_ROOT/docker"
IMAGE_NAME="${1:-network-optimizer}"
TAG="${2:-latest}"
echo "Building Docker image: $IMAGE_NAME:$TAG"
docker compose build network-optimizer
echo ""
echo "Image built: $IMAGE_NAME:$TAG"
================================================
FILE: scripts/docker-run.sh
================================================
#!/bin/bash
# Run Docker container locally (macOS compatible)
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
cd "$PROJECT_ROOT/docker"
echo "Starting container..."
echo "Access at http://localhost:8042"
echo ""
docker compose -f docker-compose.macos.yml up -d
echo ""
echo "Container started. View logs with: docker logs -f network-optimizer"
================================================
FILE: scripts/docker-stop.sh
================================================
#!/bin/bash
# Stop Docker container
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
cd "$PROJECT_ROOT/docker"
echo "Stopping container..."
docker compose -f docker-compose.macos.yml down
echo "Container stopped."
================================================
FILE: scripts/extract-elevation0-from-images.py
================================================
"""
Extract Elevation 0 deg antenna pattern data from Ubiquiti reference images.
Uses Elevation 90 deg plots (column 2) as ground truth to calibrate extraction.
The .ant files have el90 data, so we extract el90 from the image, compare to .ant,
and use the match quality to validate center detection, radius, and angular mapping.
Usage:
python scripts/extract-elevation0-from-images.py --db-max 15 --db-min -20 [--debug]
For images with multiple antenna variants (e.g., E7-Audience narrow + wide):
python scripts/extract-elevation0-from-images.py --db-max 10 --variants narrow,wide
To correct center detection offset (positive = move center down on page):
python scripts/extract-elevation0-from-images.py --db-max 10 --cy-shift 15
For images where row detection merges adjacent rows (e.g., E7 Summary with 7 bands):
python scripts/extract-elevation0-from-images.py --db-max 10 --n-bands 7
"""
import sys
import json
import math
from pathlib import Path
from PIL import Image, ImageDraw, ImageFont
import numpy as np
# ── Color detection ──────────────────────────────────────────────────────────
def is_pattern_line(r, g, b):
"""Blue pattern line: R~11, G~11, B~253."""
return int(r) < 50 and int(g) < 50 and int(b) > 200
def is_grid_pixel(r, g, b):
"""Grid lines have a distinct blue tint (B significantly > R and G).
Excludes pure gray pixels from AP product images and text."""
ri, gi, bi = int(r), int(g), int(b)
return (130 < bi < 255 and
bi > ri + 15 and bi > gi + 15 and
not is_pattern_line(r, g, b))
# ── Plot detection ───────────────────────────────────────────────────────────
def find_row_spans(arr, col_start, col_end, n_expected=None):
"""Find vertical spans containing blue pattern pixels in a column range.
If n_expected is given, adaptively merges the closest spans until
exactly n_expected remain. Otherwise uses a 20px gap threshold.
"""
h = arr.shape[0]
# Build mask of blue pattern pixels
mask = np.zeros((h, col_end - col_start), dtype=bool)
for y in range(h):
for x in range(col_start, col_end):
r, g, b = arr[y, x, :3]
if is_pattern_line(r, g, b):
mask[y, x - col_start] = True
# Find row spans with blue pixels
row_has_blue = mask.any(axis=1)
spans = []
in_span = False
start = 0
for y in range(h):
if row_has_blue[y] and not in_span:
start = y
in_span = True
elif not row_has_blue[y] and in_span:
if y - start > 30:
spans.append((start, y))
in_span = False
if in_span and h - start > 30:
spans.append((start, h))
# Merge spans with < 20px gap (within-plot pixel noise)
merged = [spans[0]] if spans else []
for s, e in spans[1:]:
if s - merged[-1][1] < 20:
merged[-1] = (merged[-1][0], e)
else:
merged.append((s, e))
return merged, mask
def find_grid_center(arr, y_start, y_end, col_start, col_end, rough_cx, rough_cy, pat_r):
"""Find true plot center using grid pixel centroid.
Grid circles are symmetric around the center, unlike the pattern which
is asymmetric for directional antennas. The centroid of grid pixels
gives us the actual polar plot origin.
"""
h = arr.shape[0]
margin = int(pat_r * 0.3)
scan_y0 = max(0, rough_cy - int(pat_r) - margin)
scan_y1 = min(h, rough_cy + int(pat_r) + margin)
scan_x0 = max(col_start, rough_cx - int(pat_r) - margin)
scan_x1 = min(col_end, rough_cx + int(pat_r) + margin)
grid_xs, grid_ys = [], []
for y in range(scan_y0, scan_y1):
for x in range(scan_x0, scan_x1):
r, g, b = arr[y, x, :3]
if is_grid_pixel(r, g, b):
grid_xs.append(x)
grid_ys.append(y)
if len(grid_xs) > 100:
return int(np.mean(grid_xs)), int(np.mean(grid_ys)), len(grid_xs)
return rough_cx, rough_cy, len(grid_xs)
def is_grid_tinted(r, g, b):
"""Relaxed grid pixel test for faint crosshair/ring lines.
The standard is_grid_pixel requires B > R+15 and B > G+15 and B > 130,
which misses faint grid lines (e.g., U7-Outdoor where B≈121, R≈105).
This uses a gentler threshold: B > R+8 and B > G+8 with brightness check.
"""
ri, gi, bi = int(r), int(g), int(b)
brightness = (ri + gi + bi) / 3
if brightness > 215 or brightness < 30:
return False
if ri < 50 and gi < 50 and bi > 200: # pattern line
return False
return bi > ri + 8 and bi > gi + 8
def trace_crosshair_extent(arr, cx, cy, col_start, col_end):
"""Find the outer radius by tracing both horizontal and vertical crosshairs.
Each crosshair line (horizontal 90-270 and vertical 0-180) extends from
center to the edge of the grid. We trace each line outward from center
and find where it definitively ends (last non-white pixel before sustained
white background). The maximum extent across all 4 directions gives the
radius.
This works regardless of whether the plot has a blue-tinted background.
"""
icx = int(round(cx))
icy = int(round(cy))
h, w = arr.shape[:2]
def trace_line(start, step, get_pixel):
"""Trace a crosshair line outward from center.
Only tracks blue-tinted pixels (the grid/crosshair always has blue tint).
Gray text labels (R≈G≈B) are ignored so they don't inflate the extent.
Returns distance from center to the last blue-tinted pixel."""
pos = start
last_tinted = 0
white_run = 0
while True:
pixel = get_pixel(pos)
if pixel is None:
break # out of bounds
r, g, b = pixel
ri, gi, bi = int(r), int(g), int(b)
brightness = (ri + gi + bi) / 3
dist = abs(pos - start) + 20 # +20 because we start 20px from center
# Only count pixels with blue tint (crosshair/grid always has it)
# This skips gray text labels where R≈G≈B
has_tint = (bi > ri + 3 and bi > gi + 3) and brightness < 240
if has_tint:
last_tinted = dist
white_run = 0
elif brightness >= 245:
white_run += 1
if white_run > 30:
break # line has ended
pos += step
return last_tinted
def get_h_pixel(x):
if col_start <= x < col_end:
return arr[icy, x, :3]
return None
def get_v_pixel(y):
if 0 <= y < h:
return arr[y, icx, :3]
return None
# Trace all 4 directions, starting 20px from center to skip AP image
left_r = trace_line(icx - 20, -1, get_h_pixel)
right_r = trace_line(icx + 20, 1, get_h_pixel)
down_r = trace_line(icy + 20, 1, get_v_pixel)
up_r = trace_line(icy - 20, -1, get_v_pixel)
best = max(left_r, right_r, down_r, up_r)
print(f" crosshair_trace: L={left_r} R={right_r} "
f"U={up_r} D={down_r} -> best={best}")
return best if best > 40 else None
def find_horizontal_crosshairs(arr, col_start, col_end):
"""Find plot centers and approximate radii by scanning for horizontal crosshairs.
Each polar plot has a horizontal crosshair (the 90-270 degree line) that
spans the full grid diameter. For each row in the column, we measure the
extent (leftmost to rightmost) of blue-tinted pixels. The crosshair row
has the widest extent for each plot.
Returns list of (cy, cx, half_diameter, min_x, max_x) sorted by cy.
The min_x/max_x are the horizontal extent endpoints for later refinement.
"""
h = arr.shape[0]
# For each row, find the extent of blue-tinted pixels
row_data = [] # (y, min_x, max_x, extent)
for y in range(h):
min_x = None
max_x = None
for x in range(col_start, col_end):
r, g, b = arr[y, x, :3]
if is_grid_tinted(r, g, b) or is_grid_pixel(r, g, b):
if min_x is None:
min_x = x
max_x = x
if min_x is not None:
extent = max_x - min_x
if extent > 40: # minimum plausible plot diameter
row_data.append((y, min_x, max_x, extent))
if not row_data:
return []
# Group into vertical clusters separated by gaps > 30px
clusters = [[row_data[0]]]
for i in range(1, len(row_data)):
if row_data[i][0] - row_data[i - 1][0] > 30:
clusters.append([row_data[i]])
else:
clusters[-1].append(row_data[i])
# For each cluster, find the row with the widest extent = crosshair row
results = []
for cluster in clusters:
if len(cluster) < 10:
continue # too small to be a plot
best = max(cluster, key=lambda r: r[3])
y, min_x, max_x, extent = best
cx = (min_x + max_x) / 2
half_r = extent / 2
results.append((y, cx, half_r, min_x, max_x))
return results
def compute_outer_ring_radius(blue_half_span, n_rings):
"""Compute the outer ring radius from the crosshair blue-tinted extent.
The blue-tinted crosshair gradient extends to the second-to-outermost
grid ring. The outer ring (at db_max) is one ring-spacing further out:
outer_r = blue_half_span * (n_rings - 1) / (n_rings - 2)
For 7 rings (30 dB range, 5 dB spacing): outer_r = half_span * 6/5
"""
n_intervals = n_rings - 1
return blue_half_span * n_intervals / (n_intervals - 1)
def _trace_vertical_crosshair(arr, cx, cy):
"""Trace the vertical crosshair (0-180 degree line) up and down from center.
Uses the same blue-tint-only logic as horizontal crosshair tracing.
Returns max(up_extent, down_extent), or None if no tinted pixels found.
"""
h = arr.shape[0]
icx = int(round(cx))
icy = int(round(cy))
def trace_v(start_y, step):
pos = start_y
last_tinted = 0
white_run = 0
while True:
if pos < 0 or pos >= h:
break
r, g, b = arr[pos, icx, :3]
ri, gi, bi = int(r), int(g), int(b)
brightness = (ri + gi + bi) / 3
dist = abs(pos - icy)
has_tint = (bi > ri + 3 and bi > gi + 3) and brightness < 240
if has_tint:
last_tinted = dist
white_run = 0
elif brightness >= 245:
white_run += 1
if white_run > 30:
break
pos += step
return last_tinted
up_r = trace_v(icy - 20, -1)
down_r = trace_v(icy + 20, 1)
best = max(up_r, down_r)
print(f" vertical_trace: U={up_r} D={down_r} -> best={best}")
return best if best > 40 else None
def _find_ring_spacing_from_crosshair(arr, cx, cy, current_radius):
"""Refine center and outer ring radius by detecting ring crossings on the crosshair row.
Scans LEFT from center along the horizontal crosshair, detecting brightness
dips where concentric grid rings cross the line. If 3+ consecutive dips have
consistent spacing:
1. Snaps current_radius to the nearest ring boundary
2. Computes a refined center from the ring positions (each dip should be
at an integer multiple of spacing from the true center)
Returns (outer_radius, refined_cx) or (None, None) if not detected.
"""
icx = int(round(cx))
icy = int(round(cy))
# Scan LEFT from center, collecting brightness dips
start_dist = 80 # skip center area
max_dist = int(current_radius * 1.3)
dips = []
in_dip = False
dip_min_bright = 999
dip_min_dist = 0
for dist in range(start_dist, max_dist):
x = icx - dist
if x < 0:
break
r, g, b = arr[icy, x, :3]
bright = (int(r) + int(g) + int(b)) / 3
if bright < 95:
if not in_dip:
in_dip = True
dip_min_bright = bright
dip_min_dist = dist
elif bright < dip_min_bright:
dip_min_bright = bright
dip_min_dist = dist
else:
if in_dip:
dips.append(dip_min_dist)
in_dip = False
dip_min_bright = 999
# Cluster dips within 4px
if not dips:
return None, None
clustered = [dips[0]]
for d in dips[1:]:
if d - clustered[-1] < 4:
clustered[-1] = (clustered[-1] + d) // 2
else:
clustered.append(d)
print(f" ring_dips LEFT: {clustered}")
# Find 3+ consecutive dips with consistent spacing
spacing = _find_consistent_spacing(clustered, tol=5)
if spacing is None or spacing < 15:
print(f" no consistent ring spacing found"
f"{f' (spacing={spacing:.1f} too small)' if spacing else ''}")
return None, None
# Snap current_radius to nearest ring boundary
n_intervals = round(current_radius / spacing)
outer_r = n_intervals * spacing
# Refine center: each dip at distance d from icx should be at
# ring_n * spacing from true center. Compute true_cx from each dip
# using the consistent-spacing dips only.
# Dip at distance d (going left) means ring is at x = icx - d.
# Ring number n (from center) satisfies: true_cx - (icx - d) = n * spacing
# So true_cx = icx - d + n * spacing, where n = round(d / spacing).
cx_estimates = []
for d in clustered:
n = round(d / spacing)
if n >= 1:
est_cx = icx - d + n * spacing
cx_estimates.append(est_cx)
if cx_estimates:
import statistics
refined_cx = statistics.median(cx_estimates)
else:
refined_cx = cx
print(f" ring_spacing={spacing:.1f}, snap {current_radius:.0f} "
f"-> {n_intervals} intervals -> outer_r={outer_r:.0f}, "
f"cx {cx:.0f}->{refined_cx:.1f}")
return outer_r, refined_cx
def _find_consistent_spacing(dips, tol=5):
"""Find consistent spacing among 3+ consecutive dips.
Returns the median spacing if found, None otherwise.
"""
if len(dips) < 3:
return None
for i in range(len(dips) - 2):
spacings = []
for j in range(i, len(dips) - 1):
spacings.append(dips[j + 1] - dips[j])
# Check for 3+ consecutive spacings within tolerance
for start in range(len(spacings) - 1):
consistent = [spacings[start]]
for k in range(start + 1, len(spacings)):
if abs(spacings[k] - spacings[start]) <= tol:
consistent.append(spacings[k])
else:
break
if len(consistent) >= 2: # 2 spacings = 3 dips
import statistics
return statistics.median(consistent)
return None
def find_plots_in_column(arr, col_start, col_end, n_expected=None, n_rings=7):
"""Find polar plot centers in a column range.
Primary method: scan for horizontal crosshair lines. Each plot's crosshair
(the 90-270 degree line) spans the full grid diameter. The widest row of
grid-tinted pixels per plot gives both center and radius.
Falls back to blue-pixel span detection if horizontal line scan finds
fewer plots than the blue-pixel detection.
"""
h = arr.shape[0]
# Primary: find plots via horizontal crosshair lines
crosshairs = find_horizontal_crosshairs(arr, col_start, col_end)
# Also find blue pattern spans (for pattern_radius and fallback)
merged, mask = find_row_spans(arr, col_start, col_end, n_expected=n_expected)
# Match each crosshair to its nearest blue pattern span
plots = []
for ch_cy, ch_cx, ch_half_r, ch_min_x, ch_max_x in crosshairs:
cx = int(round(ch_cx))
cy = ch_cy
radius = ch_half_r
print(f" horiz_line: cy={cy} cx={cx} half_diameter={ch_half_r:.0f}"
f" min_x={ch_min_x} max_x={ch_max_x}")
# Find the blue pattern span containing this crosshair
blue_xs, blue_ys = [], []
best_span = None
for y_start, y_end in merged:
if y_start - 30 <= cy <= y_end + 30:
best_span = (y_start, y_end)
for y in range(y_start, y_end):
for x in range(col_start, col_end):
if mask[y, x - col_start]:
blue_xs.append(x)
blue_ys.append(y)
break
if blue_xs:
rough_cx = (min(blue_xs) + max(blue_xs)) // 2
rough_cy = (min(blue_ys) + max(blue_ys)) // 2
dists = [math.sqrt((bx - cx) ** 2 + (by - cy) ** 2)
for bx, by in zip(blue_xs, blue_ys)]
dists.sort()
pat_r = dists[int(len(dists) * 0.95)]
else:
rough_cx, rough_cy = cx, cy
pat_r = radius * 0.8
plots.append({
"cx": cx, "cy": cy,
"blue_cx": rough_cx, "blue_cy": rough_cy,
"pattern_radius": pat_r,
"radius": radius,
"h_min_x": ch_min_x, "h_max_x": ch_max_x,
"y_range": best_span or (cy - int(radius), cy + int(radius)),
"n_grid_pixels": 0,
})
# Deduplicate: if two plots are within 150px vertically, keep wider one
plots.sort(key=lambda p: p["cy"])
deduped = []
i = 0
while i < len(plots):
if i + 1 < len(plots) and abs(plots[i + 1]["cy"] - plots[i]["cy"]) < 150:
deduped.append(plots[i] if plots[i]["radius"] >= plots[i + 1]["radius"]
else plots[i + 1])
i += 2
else:
deduped.append(plots[i])
i += 1
plots = deduped
# Enforce consistent cx across plots with similar centers.
# Plots with very different raw centers (e.g., 2.4 GHz vs 5 GHz rows with
# different plot sizes) keep their own center to avoid offset errors.
if len(plots) > 1:
median_cx = int(sorted(p["cx"] for p in plots)[len(plots) // 2])
for p in plots:
old_cx = p["cx"]
if abs(old_cx - median_cx) <= 8:
p["cx"] = median_cx
print(f" enforce cx {old_cx}->{median_cx}")
else:
print(f" keep raw cx {old_cx} (median={median_cx}, "
f"diff={abs(old_cx - median_cx)})")
# Recompute radius from (possibly enforced) center using endpoints
left_r = p["cx"] - p["h_min_x"]
right_r = p["h_max_x"] - p["cx"]
h_radius = max(left_r, right_r)
print(f" H left_r={left_r} right_r={right_r} -> "
f"h_radius={h_radius}")
p["radius"] = h_radius
# Compute scan y_range for each plot: midpoint to adjacent plots (or image edge)
h = arr.shape[0]
for i, p in enumerate(plots):
top = plots[i - 1]["cy"] if i > 0 else 0
bot = plots[i + 1]["cy"] if i < len(plots) - 1 else h
mid_top = (top + p["cy"]) // 2 if i > 0 else 0
mid_bot = (p["cy"] + bot) // 2 if i < len(plots) - 1 else h
p["scan_y_range"] = (mid_top, mid_bot)
# Refine radius using vertical crosshair trace and ring spacing snap
for p in plots:
# Vertical crosshair trace (blue-tint-only)
v_radius = _trace_vertical_crosshair(arr, p["cx"], p["cy"])
if v_radius is not None:
p["radius"] = max(p["radius"], v_radius)
# Ring spacing snap: detect ring crossings on crosshair row, then
# snap the current radius to the nearest ring boundary and refine cx.
ring_r, refined_cx = _find_ring_spacing_from_crosshair(
arr, p["cx"], p["cy"], p["radius"])
if ring_r is not None:
old_r = p["radius"]
old_cx = p["cx"]
if ring_r != old_r:
p["radius"] = int(round(ring_r))
if abs(refined_cx - old_cx) > 1:
p["cx"] = int(round(refined_cx))
if p["radius"] != old_r or p["cx"] != old_cx:
print(f" ring_snap refined cy={p['cy']}: "
f"cx {old_cx}->{p['cx']} r {old_r:.0f}->{p['radius']}")
return plots
def detect_outer_from_crosshair_extent(arr, cx, cy, col_start, col_end):
"""Find outer ring radius by scanning along crosshair lines.
The horizontal and vertical crosshairs are continuous lines of grid-colored
pixels that extend from center to the outer ring. The last grid pixel in
each direction is on the outer ring.
Scans 4 directions (right, left, up, down), takes the median to handle
edges or occlusion, and verifies there's a clear gap beyond (white background,
not stray grid-colored pixels).
Returns the computed outer ring radius, or None if detection fails.
"""
h, w = arr.shape[:2]
extents = []
# Scan right along horizontal crosshair
last_grid = None
gap_after = 0
for x in range(cx + 5, min(col_end, cx + 400)):
r, g, b = arr[cy, x, :3]
if is_grid_pixel(r, g, b):
last_grid = x
gap_after = 0
else:
gap_after += 1
if gap_after > 15 and last_grid is not None:
break # clear gap after last grid pixel = beyond outer ring
if last_grid is not None:
extents.append(("right", last_grid - cx))
# Scan left
last_grid = None
gap_after = 0
for x in range(cx - 5, max(col_start, cx - 400), -1):
r, g, b = arr[cy, x, :3]
if is_grid_pixel(r, g, b):
last_grid = x
gap_after = 0
else:
gap_after += 1
if gap_after > 15 and last_grid is not None:
break
if last_grid is not None:
extents.append(("left", cx - last_grid))
# Scan up along vertical crosshair
last_grid = None
gap_after = 0
for y in range(cy - 5, max(0, cy - 400), -1):
r, g, b = arr[y, cx, :3]
if is_grid_pixel(r, g, b):
last_grid = y
gap_after = 0
else:
gap_after += 1
if gap_after > 15 and last_grid is not None:
break
if last_grid is not None:
extents.append(("up", cy - last_grid))
# Scan down
last_grid = None
gap_after = 0
for y in range(cy + 5, min(h, cy + 400)):
r, g, b = arr[y, cx, :3]
if is_grid_pixel(r, g, b):
last_grid = y
gap_after = 0
else:
gap_after += 1
if gap_after > 15 and last_grid is not None:
break
if last_grid is not None:
extents.append(("down", last_grid - cy))
if not extents:
return None
# Take median of all valid extents
values = sorted(v for _, v in extents)
if len(values) >= 3:
outer_r = values[len(values) // 2]
else:
outer_r = int(sum(values) / len(values))
labels = ", ".join(f"{d}={v}" for d, v in extents)
print(f" crosshair_extent: {labels} -> outer_r={outer_r}")
return outer_r
def detect_grid_boundary(arr, cx, cy, pattern_radius, col_start, col_end):
"""Find outermost grid ring using distance histogram of grid pixels.
Collects distances from center to all grid pixels, builds a histogram,
and finds peaks. Grid rings create peaks; scattered label pixels don't.
The outermost clear peak is the outer grid ring.
"""
h = arr.shape[0]
margin = int(pattern_radius * 0.5)
scan_r = int(pattern_radius) + margin
# Scan region around center for grid pixels and record their distance
scan_y0 = max(0, cy - scan_r)
scan_y1 = min(h, cy + scan_r)
scan_x0 = max(col_start, cx - scan_r)
scan_x1 = min(col_end, cx + scan_r)
dist_hist = [0] * (scan_r + 1)
for y in range(scan_y0, scan_y1):
for x in range(scan_x0, scan_x1):
rv, gv, bv = arr[y, x, :3]
if is_grid_pixel(rv, gv, bv):
d = int(math.sqrt((x - cx) ** 2 + (y - cy) ** 2))
if d <= scan_r:
dist_hist[d] += 1
# Smooth histogram (5px window) to find ring peaks
smoothed = [0] * len(dist_hist)
for i in range(2, len(dist_hist) - 2):
smoothed[i] = sum(dist_hist[i - 2:i + 3]) / 5
# Find peaks: local maxima above a threshold.
# Grid rings should have more pixels than inter-ring areas.
threshold = max(smoothed[10:]) * 0.3 if max(smoothed[10:]) > 0 else 1
peaks = []
for r in range(10, len(smoothed) - 3):
if (smoothed[r] >= threshold and
smoothed[r] >= smoothed[r - 3] and
smoothed[r] >= smoothed[r + 3]):
peaks.append((r, smoothed[r]))
# Deduplicate peaks within 5px of each other (keep highest)
deduped = []
for r, v in peaks:
if deduped and r - deduped[-1][0] < 5:
if v > deduped[-1][1]:
deduped[-1] = (r, v)
else:
deduped.append((r, v))
if deduped:
# The outermost peak is the outer grid ring.
# But check: if the outermost peak is much weaker than inner peaks,
# it might be a label artifact. Use the outermost "strong" peak.
max_val = max(v for _, v in deduped)
strong_peaks = [(r, v) for r, v in deduped if v > max_val * 0.2]
outer_r = max(r for r, v in strong_peaks)
return outer_r
return int(pattern_radius)
def _kasa_circle_fit(points):
"""Algebraic circle fit (Kasa method) for a list of (x, y) points.
Returns (cx, cy, radius).
"""
xs = np.array([p[0] for p in points], dtype=float)
ys = np.array([p[1] for p in points], dtype=float)
A = np.column_stack([xs, ys, np.ones(len(xs))])
b_vec = xs ** 2 + ys ** 2
result, _, _, _ = np.linalg.lstsq(A, b_vec, rcond=None)
fit_cx = result[0] / 2
fit_cy = result[1] / 2
fit_r = math.sqrt(abs(result[2] + fit_cx ** 2 + fit_cy ** 2))
return fit_cx, fit_cy, fit_r
def fit_outer_ring(arr, rough_cx, rough_cy, pattern_radius, col_start, col_end):
"""Find the outer grid ring by scanning from outside inward at many angles.
The AP product image can occlude inner rings, but the outer ring is always
visible. Scans from well outside the plot inward, looking for the first
non-white pixel (brightness-based, since grid rings may be gray without
blue tint). Uses iterative outlier removal to reject text label hits.
Returns (fit_cx, fit_cy, fit_radius) or None if insufficient data.
"""
h, w = arr.shape[:2]
scan_start = int(pattern_radius) + 30 # well outside plot
def is_pattern_line_local(r, g, b):
return int(r) < 50 and int(g) < 50 and int(b) > 200
# Scan from outside inward at many angles.
# To distinguish ring pixels (thin line, 1-3px) from text (multi-pixel blocks),
# we require a "thin line" pattern: the hit pixel should have white within
# a few pixels on the inner side (ring is thin, text is thick).
ring_points = []
for angle_deg in range(0, 360, 2): # every 2 degrees
angle_rad = math.radians(angle_deg)
cos_a = math.cos(angle_rad)
sin_a = math.sin(angle_rad)
for r in range(scan_start, 40, -1):
x = int(rough_cx + r * cos_a)
y = int(rough_cy + r * sin_a)
if not (0 <= x < w and 0 <= y < h):
continue
rv, gv, bv = arr[y, x, :3]
brightness = (int(rv) + int(gv) + int(bv)) / 3
if brightness >= 230 or is_pattern_line_local(rv, gv, bv):
continue
# Check that this is a thin line: within 5px inward there should be
# a white pixel (ring lines are 1-3px wide, text is wider)
is_thin = False
for dr in range(1, 6):
ix = int(rough_cx + (r - dr) * cos_a)
iy = int(rough_cy + (r - dr) * sin_a)
if 0 <= ix < w and 0 <= iy < h:
irv, igv, ibv = arr[iy, ix, :3]
ib = (int(irv) + int(igv) + int(ibv)) / 3
if ib > 240:
is_thin = True
break
if is_thin:
ring_points.append((x, y))
break
if len(ring_points) < 30:
return None
# First pass: use median radius from rough center to reject gross outliers
radii = [math.sqrt((x - rough_cx) ** 2 + (y - rough_cy) ** 2)
for x, y in ring_points]
median_r = sorted(radii)[len(radii) // 2]
points = [p for p, r in zip(ring_points, radii) if abs(r - median_r) < 10]
if len(points) < 20:
return None
# Iterative circle fit with outlier removal
for _ in range(3):
if len(points) < 15:
break
fit_cx, fit_cy, fit_r = _kasa_circle_fit(points)
residuals = []
for px, py in points:
d = math.sqrt((px - fit_cx) ** 2 + (py - fit_cy) ** 2)
residuals.append(abs(d - fit_r))
median_res = sorted(residuals)[len(residuals) // 2]
threshold = max(median_res * 2.5, 3.0)
points = [p for p, res in zip(points, residuals) if res < threshold]
if len(points) < 15:
return None
return _kasa_circle_fit(points)
# ── Pattern extraction ───────────────────────────────────────────────────────
def extract_polar_pattern(arr, cx, cy, outer_radius, db_max, db_range,
search_start=None):
"""Extract gain values by ray casting from center outward.
outer_radius: radius that maps to db_max (the outer grid ring) for dB calculation.
search_start: outermost radius to scan from (default: outer_radius + 5).
Set to pattern_radius + margin to avoid hitting legend markers
or other artifacts outside the actual pattern.
Returns 359 values (0-358 degrees) in absolute dBi.
Convention: 0 deg at top of polar plot, clockwise.
"""
h, w = arr.shape[:2]
db_min = db_max - db_range
if search_start is None:
search_start = int(outer_radius) + 5
gains = []
for angle_deg in range(359):
# 0 deg at top (north), clockwise
angle_rad = math.radians(-angle_deg - 90) # CCW from 12 o'clock
cos_a = math.cos(angle_rad)
sin_a = math.sin(angle_rad)
# Cast ray from search_start inward, find outermost blue pixel.
# Use wider perpendicular band near horizontal crosshair angles
# (85-95° and 265-275°) where the crosshair gradient occludes pattern.
near_horizontal = ((85 <= angle_deg <= 95) or (265 <= angle_deg <= 275))
offsets = [-3, -2, -1, 0, 1, 2, 3] if near_horizontal else [-1, 0, 1]
found_r = 0
for r in range(search_start, 2, -1):
hit = False
for offset in offsets:
# Perpendicular offset
x = int(cx + r * cos_a + offset * sin_a)
y = int(cy + r * sin_a - offset * cos_a)
if 0 <= x < w and 0 <= y < h:
rv, gv, bv = arr[y, x, :3]
if is_pattern_line(rv, gv, bv):
hit = True
break
if hit:
found_r = r
break
# Map radius to dB (linear: center=db_min, outer=db_max)
if found_r > 0:
gain = db_min + (found_r / outer_radius) * db_range
else:
gain = db_min
gains.append(round(gain, 1))
# Despike: replace single/double-degree nulls caused by ray-casting misses.
# The pattern line is 1-2px wide; at certain angles the 3px-wide ray can
# slip through, producing a sudden deep null surrounded by normal values.
gains = despike(gains)
return gains
def despike(gains, threshold=4.0):
"""Remove spike artifacts from extracted pattern data.
Detects points that dip sharply below their wider neighborhood
(grid lines, text labels, crosshair artifacts). Uses median of
multiple neighbor distances to avoid paired artifacts shielding
each other. Runs 4 passes to progressively clean.
"""
n = len(gains)
for _ in range(4):
smoothed = list(gains)
for i in range(n):
# Median of neighbors at distances 3, 5, 7 to resist paired artifacts
neighbors = []
for d in (3, 5, 7):
neighbors.append(gains[(i - d) % n])
neighbors.append(gains[(i + d) % n])
neighbors.sort()
median = (neighbors[2] + neighbors[3]) / 2 # median of 6
if gains[i] - median < -threshold:
smoothed[i] = round(median, 1)
gains = smoothed
return gains
# ── Calibration ──────────────────────────────────────────────────────────────
def cross_correlate(extracted, reference):
"""Find angular rotation that best aligns extracted with reference.
Uses Pearson correlation coefficient instead of RMSE to handle cases where
the extracted dynamic range is compressed (pattern doesn't reach grid edge).
Returns (best_offset, best_corr, rmse_at_best).
"""
n = len(extracted)
best_offset = 0
best_corr = -2.0
# Pre-compute reference stats
ref_mean = sum(reference[:n]) / n
ref_dev = [b - ref_mean for b in reference[:n]]
den_b = math.sqrt(sum(d * d for d in ref_dev))
if den_b == 0:
return 0, 0.0, 99.0
for offset in range(n):
rotated = extracted[offset:] + extracted[:offset]
a_mean = sum(rotated) / n
a_dev = [a - a_mean for a in rotated]
den_a = math.sqrt(sum(d * d for d in a_dev))
if den_a > 0:
corr = sum(a * b for a, b in zip(a_dev, ref_dev)) / (den_a * den_b)
if corr > best_corr:
best_corr = corr
best_offset = offset
# Compute RMSE at best offset for reporting
rotated = extracted[best_offset:] + extracted[:best_offset]
rmse = math.sqrt(sum((a - b) ** 2 for a, b in zip(rotated, reference[:n])) / n)
return best_offset, best_corr, rmse
def validate_with_el90(arr, el0_plots, ant_data, model, bands, db_max, db_range,
debug=False):
"""Validate extraction parameters using el90 from column 2 vs .ant reference.
Extracts el90 from the image using the detected grid_r, compares with .ant
reference data, and reports match quality. This is purely validation - the
grid_r and angles come from physical detection in the image, not from fitting.
Returns (detected_grid_r, el90_plots).
"""
h, w = arr.shape[:2]
col2_start = w // 4
col2_end = w // 2
print(f"\n === Validating with Elevation 90 (column 2) ===")
n_rings = int(db_range / 5) + 1
el90_plots = find_plots_in_column(arr, col2_start, col2_end, n_rings=n_rings)
print(f" Found {len(el90_plots)} el90 plots in column 2")
if not el90_plots:
print(" WARNING: No el90 plots found!")
return None, []
if len(el90_plots) != len(el0_plots):
print(f" WARNING: el90 count ({len(el90_plots)}) != el0 count ({len(el0_plots)})")
# Map sub-bands to .ant band keys
band_map = {
"2.4": "2.4", "2.45": "2.4",
"5.15": "5", "5.5": "5", "5.85": "5",
"6.0": "6", "6.5": "6", "6.5b": "6", "7.0": "6",
}
# Extract el90 for each band and compare with .ant reference
for i, band in enumerate(bands):
if i >= len(el90_plots):
break
plot = el90_plots[i]
cx, cy = plot["cx"], plot["cy"]
detected_r = plot.get("radius", plot["pattern_radius"])
ant_band = band_map.get(band, band)
if model not in ant_data or ant_band not in ant_data[model]:
print(f" {band}: no .ant ref for '{ant_band}', skip")
continue
ref_el90 = ant_data[model][ant_band].get("elevation", [])
if not ref_el90:
continue
pat_r = plot["pattern_radius"]
search_r = int(pat_r) + 10
extracted = extract_polar_pattern(arr, cx, cy, detected_r, db_max, db_range,
search_start=search_r)
ext_peak_val = max(extracted)
ext_peak = extracted.index(ext_peak_val)
ref_peak_val = max(ref_el90) if ref_el90 else 0
ref_peak = ref_el90.index(ref_peak_val) if ref_el90 else "?"
# Compute RMSE
n = min(len(extracted), len(ref_el90))
rmse = math.sqrt(sum((a - b) ** 2 for a, b in zip(extracted[:n], ref_el90[:n])) / n)
shift = math.sqrt((cx - plot["blue_cx"]) ** 2 +
(cy - plot["blue_cy"]) ** 2)
print(f" {band}: center=({cx},{cy}) shift={shift:.0f}px "
f"det_r={detected_r:.0f}")
print(f" ext_peak@{ext_peak}deg ref_peak@{ref_peak}deg "
f"RMSE={rmse:.1f}dB")
grid_r = int(el90_plots[0].get("radius", el90_plots[0]["pattern_radius"]))
print(f"\n Grid radius (detected): {grid_r}px")
return grid_r, el90_plots
# ── Debug image ──────────────────────────────────────────────────────────────
def extract_pattern_with_radii(arr, cx, cy, outer_radius, db_max, db_range,
search_start=None):
"""Extract pattern AND return the raw found_r for each angle (for visualization)."""
h, w = arr.shape[:2]
db_min = db_max - db_range
if search_start is None:
search_start = int(outer_radius) + 5
gains = []
radii = []
for angle_deg in range(359):
angle_rad = math.radians(-angle_deg - 90) # CCW from 12 o'clock
cos_a = math.cos(angle_rad)
sin_a = math.sin(angle_rad)
near_horizontal = ((85 <= angle_deg <= 95) or (265 <= angle_deg <= 275))
offsets = [-3, -2, -1, 0, 1, 2, 3] if near_horizontal else [-1, 0, 1]
found_r = 0
for r in range(search_start, 2, -1):
hit = False
for offset in offsets:
x = int(cx + r * cos_a + offset * sin_a)
y = int(cy + r * sin_a - offset * cos_a)
if 0 <= x < w and 0 <= y < h:
rv, gv, bv = arr[y, x, :3]
if is_pattern_line(rv, gv, bv):
hit = True
break
if hit:
found_r = r
break
if found_r > 0:
gain = db_min + (found_r / outer_radius) * db_range
else:
gain = db_min
gains.append(round(gain, 1))
radii.append(found_r)
gains = despike(gains)
return gains, radii
def save_debug_image(arr, img, el0_plots, el90_plots, calibrated_radius, bands,
db_max, db_range, out_path):
"""Save annotated image with detected centers, radii, and extracted patterns."""
debug_img = img.copy()
draw = ImageDraw.Draw(debug_img)
for plots, col_label, color, dot_color in [
(el0_plots, "EL0", "red", "yellow"),
]:
for i, p in enumerate(plots):
cx, cy = p["cx"], p["cy"]
if isinstance(calibrated_radius, list):
r = calibrated_radius[i] if i < len(calibrated_radius) else calibrated_radius[-1]
elif calibrated_radius:
r = calibrated_radius
else:
r = p.get("radius", p["pattern_radius"])
band = bands[i] if i < len(bands) else f"#{i}"
# Crosshair at grid center (large)
draw.line([(cx - 12, cy), (cx + 12, cy)], fill=color, width=2)
draw.line([(cx, cy - 12), (cx, cy + 12)], fill=color, width=2)
# Circle at grid_r
draw.ellipse([(cx - r, cy - r), (cx + r, cy + r)],
outline=color, width=1)
# Blue center marker (bounding box center)
bcx, bcy = p["blue_cx"], p["blue_cy"]
draw.line([(bcx - 5, bcy), (bcx + 5, bcy)], fill="cyan", width=1)
draw.line([(bcx, bcy - 5), (bcx, bcy + 5)], fill="cyan", width=1)
# Extract pattern and draw detected points
pat_r = p["pattern_radius"]
sr = max(int(pat_r) + 10, int(r) + 5)
_, radii = extract_pattern_with_radii(arr, cx, cy, r, db_max, db_range,
search_start=sr)
for angle_deg in range(359):
found_r = radii[angle_deg]
if found_r > 0:
angle_rad = math.radians(-angle_deg - 90) # CCW from 12 o'clock
px = int(cx + found_r * math.cos(angle_rad))
py = int(cy + found_r * math.sin(angle_rad))
# Draw small dot
draw.rectangle([(px, py), (px + 1, py + 1)], fill=dot_color)
# Label
label = f"{col_label} {band}"
draw.text((cx + 15, cy - 15), label, fill=color)
debug_img.save(out_path)
print(f" Debug image: {out_path}")
# ── Band assignment ──────────────────────────────────────────────────────────
def assign_bands(n_plots, filename=""):
"""Assign band labels to plots based on count and filename hints."""
fname = filename.lower()
if n_plots == 3:
# 3 plots: detect from filename
if "6ghz" in fname or "6 ghz" in fname:
return ["6.0", "6.5", "7.0"]
elif "5ghz" in fname or "5 ghz" in fname:
return ["5.15", "5.5", "5.85"]
elif "2.4" in fname or "2_4" in fname:
return ["2.4", "2.4b", "2.4c"]
return ["2.4", "5", "6"]
elif n_plots == 8:
return ["2.4", "5.15", "5.5", "5.85", "6.0", "6.5", "6.5b", "7.0"]
elif n_plots == 7:
return ["2.4", "5.15", "5.5", "5.85", "6.0", "6.5", "7.0"]
elif n_plots == 4:
return ["2.4", "5.15", "5.5", "5.85"]
elif n_plots == 2:
return ["2.4", "5"]
return [f"band{i}" for i in range(n_plots)]
def extract_model_name(filename):
"""Extract model name from image filename, stripping suffixes."""
name = filename.replace(" Total", "").replace(" ", "-")
# Strip -Summary-XGHz suffixes
import re
name = re.sub(r'-Summary-\d+(\.\d+)?GHz$', '', name, flags=re.IGNORECASE)
return name
# ── Per-variant processing ──────────────────────────────────────────────────
def process_variant(arr, plots, bands, model_key, ant_data, db_max, db_range,
debug=False, grid_radius_override=None):
"""Extract el0 patterns for a set of plots (one variant).
Returns dict of {band: gains}.
"""
if not plots:
return {}
print(f"\n === Extracting Elevation 0 for {model_key} ===")
print(f" {len(plots)} plots")
results = {}
for i, (plot, band) in enumerate(zip(plots, bands)):
cx, cy = plot["cx"], plot["cy"]
pat_r = plot["pattern_radius"]
# Use per-band radius (from crosshair/histogram detection)
# or override if specified (can be single value or list)
if grid_radius_override:
if isinstance(grid_radius_override, list):
use_radius = grid_radius_override[i] if i < len(grid_radius_override) else grid_radius_override[-1]
else:
use_radius = grid_radius_override
else:
use_radius = plot.get("radius", pat_r)
search_r = max(int(pat_r) + 10, int(use_radius) + 5)
gains = extract_polar_pattern(arr, cx, cy, use_radius, db_max, db_range,
search_start=search_r)
# Mirror for "from above" convention
gains = [gains[0]] + gains[1:][::-1]
peak_val = max(gains)
peak_idx = gains.index(peak_val)
min_val = min(gains)
print(f" {band}: center=({cx},{cy}) r={use_radius:.0f} "
f"peak={peak_val:.1f}dBi@{peak_idx}deg min={min_val:.1f}dBi")
if debug:
for d in range(0, 359, 15):
print(f" [{d:3d}] = {gains[d]:6.1f} dB")
results[band] = gains
# Validation against .ant data
band_map = {
"2.4": "2.4", "2.45": "2.4",
"5.15": "5", "5.5": "5", "5.85": "5",
"6.0": "6", "6.5": "6", "6.5b": "6", "7.0": "6",
}
if model_key in ant_data:
print(f"\n === Validation: el0 vs el90 for {model_key} ===")
for band in results:
ant_band = band_map.get(band, band)
if ant_band in ant_data.get(model_key, {}):
ref_el90 = ant_data[model_key][ant_band].get("elevation", [])
if ref_el90:
el0 = results[band]
total = 0
close = 0
for a, b in zip(el0, ref_el90):
if a > -25 and b > -25:
total += 1
if abs(a - b) < db_range * 0.25:
close += 1
pct = (close / total * 100) if total > 0 else 0
print(f" {band} -> {ant_band}: {close}/{total} points "
f"within 25% ({pct:.0f}%)")
for d in [0, 90, 180, 270]:
diff = el0[d] - ref_el90[d]
print(f" [{d:3d}] el0={el0[d]:6.1f} el90={ref_el90[d]:6.1f} "
f"diff={diff:+.1f}")
return results
# ── Main ─────────────────────────────────────────────────────────────────────
def main():
debug = "--debug" in sys.argv
args = [a for a in sys.argv[1:] if not a.startswith("--")]
# Parse named arguments
db_max = None
db_min = -20.0
variants = None
cx_shift = 0
cy_shift = None
n_bands = None
grid_radius_override = None
cx_override = None
cy_override = None
for i, a in enumerate(sys.argv):
if a == "--db-max" and i + 1 < len(sys.argv):
db_max = float(sys.argv[i + 1])
if a == "--db-min" and i + 1 < len(sys.argv):
db_min = float(sys.argv[i + 1])
if a == "--variants" and i + 1 < len(sys.argv):
variants = [v.strip() for v in sys.argv[i + 1].split(",")]
if a == "--cx-shift" and i + 1 < len(sys.argv):
cx_shift = int(sys.argv[i + 1])
if a == "--cy-shift" and i + 1 < len(sys.argv):
val = sys.argv[i + 1]
if "," in val:
cy_shift = [int(v) for v in val.split(",")]
else:
cy_shift = int(val)
if a == "--n-bands" and i + 1 < len(sys.argv):
n_bands = int(sys.argv[i + 1])
if a == "--grid-radius" and i + 1 < len(sys.argv):
val = sys.argv[i + 1]
if "," in val:
grid_radius_override = [int(v) for v in val.split(",")]
else:
grid_radius_override = int(val)
if a == "--cx" and i + 1 < len(sys.argv):
cx_override = int(sys.argv[i + 1])
if a == "--cy" and i + 1 < len(sys.argv):
cy_override = int(sys.argv[i + 1])
if not args:
print("Usage: python extract-elevation0-from-images.py "
"--db-max [--variants narrow,wide] [--cy-shift N] [--n-bands N] [--db-min -20] [--debug]")
print("\n --db-max is REQUIRED. Read it from the outer ring label on the polar plot.")
print(" Common values: 10 (indoor APs), 15 (outdoor APs)")
print("\n --variants: Split rows into named variants (e.g., narrow,wide).")
print(" Top rows = first variant, bottom rows = second variant.")
sys.exit(1)
if db_max is None:
print("ERROR: --db-max is required. Read the outer ring dBi label from the polar plot image.")
print(" Common values: 10 (indoor APs like U7-Pro-XGS), 15 (outdoor APs like U7-Outdoor)")
sys.exit(1)
db_range = db_max - db_min
image_path = Path(args[0])
if not image_path.exists():
print(f"Error: {image_path} not found")
sys.exit(1)
print(f"Processing: {image_path.name}")
print(f" dB scale: center={db_min} dBi, outer ring={db_max} dBi, range={db_range} dB")
if cx_shift or cy_shift is not None:
print(f" Center shift: dx={cx_shift}, dy={cy_shift} (positive = right/down on page)")
if variants:
print(f" Variants: {variants}")
img = Image.open(image_path)
arr = np.array(img)
h, w = arr.shape[:2]
print(f" Image: {w}x{h}")
# Number of grid rings (5 dB spacing)
n_rings = int(db_range / 5) + 1
# ── Phase 1: Find el0 plots in column 1 ──
col1_end = w // 4
el0_plots = find_plots_in_column(arr, 0, col1_end, n_expected=n_bands,
n_rings=n_rings)
print(f" Found {len(el0_plots)} el0 plots in column 1")
if not el0_plots:
print(" ERROR: No plots found!")
sys.exit(1)
# Apply center overrides (cx_override sets all plots to same cx)
if cx_override is not None or cy_override is not None:
for p in el0_plots:
if cx_override is not None:
p["cx"] = cx_override
if cy_override is not None:
p["cy"] = cy_override
print(f" Center override: cx={cx_override}, cy={cy_override}")
# Apply cx_shift (immediately, independent of cy)
if cx_shift:
for p in el0_plots:
p["cx"] += cx_shift
for i, p in enumerate(el0_plots):
shift_dist = math.sqrt((p["cx"] - p["blue_cx"]) ** 2 +
(p["cy"] - p["blue_cy"]) ** 2)
print(f" #{i}: grid_center=({p['cx']},{p['cy']}) "
f"blue_center=({p['blue_cx']},{p['blue_cy']}) "
f"shift={shift_dist:.0f}px grid_r={p.get('radius', 0):.0f} "
f"pat_r={p['pattern_radius']:.0f} "
f"n_grid={p['n_grid_pixels']}")
# ── Model name ──
model = extract_model_name(image_path.stem)
# ── Load .ant reference data ──
ant_json = None
search = image_path.parent
for _ in range(8):
candidate = search / "src" / "NetworkOptimizer.Web" / "wwwroot" / "data" / "antenna-patterns.json"
if candidate.exists():
ant_json = candidate
break
search = search.parent
ant_data = {}
if ant_json:
with open(ant_json) as f:
ant_data = json.load(f)
print(f" Loaded .ant data from: {ant_json}")
else:
print(f"\n No antenna-patterns.json found for validation")
# ── Phase 2: Validate with el90 from column 2 ──
# Also use el90 cy values for el0 plots when cx is overridden
# (el90 often detects vertical centers more reliably)
el90_plots = []
n_total = len(el0_plots)
# Pre-detect el90 plots to borrow cy values when el0 centering is overridden
# Only borrow if el90 spacing is more consistent than el0 spacing
if cx_override is not None and len(el0_plots) >= 3:
col2_start = w // 4
col2_end = w // 2
n_rings = int(db_range / 5) + 1
el90_pre = find_plots_in_column(arr, col2_start, col2_end, n_rings=n_rings)
if len(el90_pre) == len(el0_plots):
def _spacing_variance(plots):
cys = [p["cy"] for p in plots]
spacings = [cys[i+1] - cys[i] for i in range(len(cys)-1)]
if not spacings:
return float('inf')
mean = sum(spacings) / len(spacings)
return sum((s - mean) ** 2 for s in spacings) / len(spacings)
el0_var = _spacing_variance(el0_plots)
el90_var = _spacing_variance(el90_pre)
print(f" cy spacing variance: el0={el0_var:.1f} el90={el90_var:.1f}")
if el90_var < el0_var:
for p0, p90 in zip(el0_plots, el90_pre):
old_cy = p0["cy"]
p0["cy"] = p90["cy"]
if old_cy != p90["cy"]:
print(f" cy from el90: {old_cy}->{p90['cy']}")
else:
print(f" keeping el0 cy (more consistent than el90)")
# Apply cy_shift after el90 borrowing
if cy_shift is not None:
if isinstance(cy_shift, list):
for i, p in enumerate(el0_plots):
shift = cy_shift[i] if i < len(cy_shift) else cy_shift[-1]
p["cy"] += shift
print(f" cy_shift (per-plot): {cy_shift}")
else:
for p in el0_plots:
p["cy"] += cy_shift
print(f" cy_shift: {cy_shift:+d}")
if variants:
# Split plots evenly across variants
n_per = n_total // len(variants)
if n_total % len(variants) != 0:
print(f" WARNING: {n_total} plots doesn't divide evenly by "
f"{len(variants)} variants ({n_per} each, {n_total % len(variants)} extra)")
bands_per_variant = assign_bands(n_per, image_path.name)
# Flat band list for el90 validation (repeated for each variant)
all_bands = bands_per_variant * len(variants)
if ant_json:
# Validate using first variant's model key for el90
first_key = f"{model}:{variants[0]}"
_, el90_plots = validate_with_el90(
arr, el0_plots, ant_data, first_key, all_bands, db_max, db_range, debug)
# Process each variant
output = {}
for vi, variant_name in enumerate(variants):
start = vi * n_per
end = start + n_per
variant_plots = el0_plots[start:end]
model_key = f"{model}:{variant_name}"
results = process_variant(
arr, variant_plots, bands_per_variant, model_key, ant_data,
db_max, db_range, debug, grid_radius_override)
output[model_key] = {"elevation_0": results}
# Save output
out_path = image_path.with_suffix(".elevation0.json")
with open(out_path, "w") as f:
json.dump(output, f, indent=2)
print(f"\n Model: {model} (variants: {', '.join(variants)})")
print(f" Saved: {out_path}")
else:
# Original single-variant path
bands = assign_bands(n_total, image_path.name)
if ant_json:
_, el90_plots = validate_with_el90(
arr, el0_plots, ant_data, model, bands, db_max, db_range, debug)
results = process_variant(
arr, el0_plots, bands, model, ant_data, db_max, db_range, debug,
grid_radius_override)
output = {model: {"elevation_0": results}}
out_path = image_path.with_suffix(".elevation0.json")
with open(out_path, "w") as f:
json.dump(output, f, indent=2)
print(f"\n Model: {model}")
print(f" Saved: {out_path}")
# ── Save debug image ──
if debug:
all_bands = assign_bands(n_total, image_path.name) if not variants else (
assign_bands(n_total // len(variants), image_path.name) * len(variants))
debug_path = image_path.with_suffix(".debug.png")
save_debug_image(arr, img, el0_plots, el90_plots, grid_radius_override,
all_bands, db_max, db_range, debug_path)
if __name__ == "__main__":
main()
================================================
FILE: scripts/install-macos-native.sh
================================================
#!/bin/bash
# Install Network Optimizer natively on macOS
# Usage: ./scripts/install-macos-native.sh
#
# This script:
# 1. Installs prerequisites via Homebrew
# 2. Builds the application (or uses pre-built if available)
# 3. Signs binaries for macOS
# 4. Sets up OpenSpeedTest with nginx for browser-based speed testing
# 5. Creates launchd service for auto-start
set -e
# Refuse to run as root - everything installs to $HOME, root is never needed
if [ "$(id -u)" = "0" ]; then
echo "Error: Do not run this script with sudo or as root."
echo ""
echo "This installer puts everything in your home directory and does not need"
echo "root access. Running with sudo causes file ownership problems that break"
echo "future upgrades."
echo ""
echo "If you previously installed with sudo, just run the script normally:"
echo " ./scripts/install-macos-native.sh"
echo ""
echo "The script will detect and clean up any root-owned files automatically."
exit 1
fi
# Configuration
INSTALL_DIR="$HOME/network-optimizer"
DATA_DIR="$HOME/Library/Application Support/NetworkOptimizer"
LAUNCH_AGENT_DIR="$HOME/Library/LaunchAgents"
LAUNCH_AGENT_FILE="net.ozarkconnect.networkoptimizer.plist"
OLD_LAUNCH_AGENT_FILE="com.networkoptimizer.app.plist" # For migration from older installs
# Detect architecture
ARCH=$(uname -m)
if [ "$ARCH" = "arm64" ]; then
RUNTIME="osx-arm64"
BREW_PREFIX="/opt/homebrew"
else
RUNTIME="osx-x64"
BREW_PREFIX="/usr/local"
fi
echo "=== Network Optimizer macOS Native Installation ==="
echo ""
echo "Architecture: $ARCH ($RUNTIME)"
echo "Install directory: $INSTALL_DIR"
echo ""
# Check if running from repo root
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
if [ ! -f "$REPO_ROOT/src/NetworkOptimizer.Web/NetworkOptimizer.Web.csproj" ]; then
echo "Error: This script must be run from the NetworkOptimizer repository."
echo "Clone the repo first: git clone https://github.com/Ozark-Connect/NetworkOptimizer.git"
exit 1
fi
# Check for root-owned remnants from a previous sudo installation.
# If someone ran this script with sudo, all files and processes end up owned by root.
# A normal user can't overwrite those files or kill those processes, so the next
# install fails. This function detects the problem and fixes it with one sudo prompt.
check_root_remnants() {
local root_files=false
# Check for root-owned install directories and files (visible without sudo)
for dir in "$INSTALL_DIR" "$DATA_DIR"; do
if [ -d "$dir" ] && [ "$(stat -f '%Su' "$dir" 2>/dev/null)" = "root" ]; then
root_files=true
fi
done
if [ -f "$LAUNCH_AGENT_DIR/$LAUNCH_AGENT_FILE" ] && \
[ "$(stat -f '%Su' "$LAUNCH_AGENT_DIR/$LAUNCH_AGENT_FILE" 2>/dev/null)" = "root" ]; then
root_files=true
fi
# Check for root-owned .NET directories (blocks dotnet publish)
for dotdir in "$HOME/.nuget" "$HOME/.dotnet"; do
if [ -d "$dotdir" ] && [ "$(stat -f '%Su' "$dotdir" 2>/dev/null)" = "root" ]; then
root_files=true
fi
done
if [ "$root_files" = false ]; then
return 0
fi
echo "Detected root-owned files from a previous sudo installation."
echo "This needs sudo to fix. You'll be prompted for your password once."
echo ""
read -rp "Press Enter to clean up, or Ctrl+C to cancel... "
# Validate sudo credentials upfront so a failed password doesn't leave
# things half-cleaned (some processes killed but files still root-owned)
if ! sudo -v; then
echo "Error: sudo authentication failed. Please re-run the script and try again."
exit 1
fi
local current_user
current_user=$(whoami)
# Now that we have sudo, check for root-owned processes on our ports.
# Regular users can't see root-owned sockets with lsof on macOS.
local root_pids=""
for port in 8042 3005 5201; do
local pids
pids=$(sudo lsof -i ":$port" -sTCP:LISTEN -t 2>/dev/null) || true
for pid in $pids; do
local owner
owner=$(ps -o user= -p "$pid" 2>/dev/null | tr -d ' ') || true
if [ "$owner" = "root" ]; then
root_pids="$root_pids $pid"
fi
done
done
if [ -n "$root_pids" ]; then
echo "Stopping root-owned processes (PIDs:$root_pids)..."
for pid in $root_pids; do
sudo kill "$pid" 2>/dev/null || true
done
sleep 2
fi
# Unload any root-loaded launchd services
sudo launchctl unload "$LAUNCH_AGENT_DIR/$LAUNCH_AGENT_FILE" 2>/dev/null || true
sudo launchctl unload "$LAUNCH_AGENT_DIR/$OLD_LAUNCH_AGENT_FILE" 2>/dev/null || true
# Fix ownership on install directories
if [ -d "$INSTALL_DIR" ]; then
echo "Fixing ownership: $INSTALL_DIR"
sudo chown -R "$current_user:staff" "$INSTALL_DIR"
fi
if [ -d "$DATA_DIR" ]; then
echo "Fixing ownership: $DATA_DIR"
sudo chown -R "$current_user:staff" "$DATA_DIR"
fi
for plist in "$LAUNCH_AGENT_FILE" "$OLD_LAUNCH_AGENT_FILE"; do
if [ -f "$LAUNCH_AGENT_DIR/$plist" ]; then
sudo chown "$current_user:staff" "$LAUNCH_AGENT_DIR/$plist"
fi
done
# Fix .NET directories
for dotdir in "$HOME/.nuget" "$HOME/.dotnet"; do
if [ -d "$dotdir" ] && [ "$(stat -f '%Su' "$dotdir" 2>/dev/null)" = "root" ]; then
echo "Fixing ownership: ${dotdir/#$HOME/~}"
sudo chown -R "$current_user:staff" "$dotdir"
fi
done
echo ""
echo "Cleanup complete. Continuing with installation..."
echo ""
}
check_root_remnants
# Backup existing installation if present
if [ -d "$DATA_DIR" ] || [ -d "$INSTALL_DIR" ]; then
BACKUP_DIR="$HOME/network-optimizer-backup-$(date +%Y%m%d-%H%M%S)"
echo "Backing up existing installation to $BACKUP_DIR..."
mkdir -p "$BACKUP_DIR"
# Backup data directory contents (DB, keys, etc.)
if [ -f "$DATA_DIR/network_optimizer.db" ]; then
cp "$DATA_DIR/network_optimizer.db" "$BACKUP_DIR/"
echo " ✓ Database backed up"
fi
if [ -f "$DATA_DIR/.credential_key" ]; then
cp "$DATA_DIR/.credential_key" "$BACKUP_DIR/"
echo " ✓ Credential key backed up"
fi
if [ -d "$DATA_DIR/keys" ]; then
cp -r "$DATA_DIR/keys" "$BACKUP_DIR/"
echo " ✓ Encryption keys backed up"
fi
# Backup start.sh (has custom env config)
if [ -f "$INSTALL_DIR/start.sh" ]; then
cp "$INSTALL_DIR/start.sh" "$BACKUP_DIR/"
echo " ✓ Startup script backed up"
fi
echo "Backup complete: $BACKUP_DIR"
echo ""
fi
# Step 1: Install prerequisites
echo "[1/9] Installing prerequisites..."
if ! command -v brew &> /dev/null; then
echo "Installing Homebrew..."
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
eval "$($BREW_PREFIX/bin/brew shellenv)"
fi
# Ensure brew is in PATH
eval "$($BREW_PREFIX/bin/brew shellenv)"
echo "Installing required packages..."
brew install sshpass iperf3 nginx go 2>/dev/null || true
# Check for .NET SDK
if ! command -v dotnet &> /dev/null; then
echo "Installing .NET SDK..."
brew install dotnet
fi
# Verify .NET version
DOTNET_VERSION=$(dotnet --version 2>/dev/null | cut -d. -f1)
if [ "$DOTNET_VERSION" -lt 8 ]; then
echo "Warning: .NET $DOTNET_VERSION detected. Network Optimizer requires .NET 8 or later."
echo "Updating .NET SDK..."
brew upgrade dotnet || brew install dotnet
fi
# Step 2: Clean up old installation files (preserving user config and logs)
echo ""
echo "[2/9] Cleaning up old installation files..."
if [ -d "$INSTALL_DIR" ]; then
cd "$INSTALL_DIR"
# Remove old non-single-file artifacts (DLLs, pdb, runtimes folder, etc.)
rm -rf *.dll *.pdb *.json runtimes/ BuildHost-*/ LatoFont/ 2>/dev/null || true
# Note: start.sh, logs/, SpeedTest/, wwwroot/, Templates/ are preserved or rebuilt
fi
# Step 3: Build the application
echo ""
echo "[3/9] Building Network Optimizer for $RUNTIME..."
cd "$REPO_ROOT"
# Ensure NuGet cache is writable (stale cache from brew or failed restores can block builds)
if [ -d "$HOME/.nuget/packages" ] && ! touch "$HOME/.nuget/packages/.write-test" 2>/dev/null; then
echo "NuGet package cache has permission issues, clearing..."
chmod -R u+w "$HOME/.nuget/packages" 2>/dev/null || true
rm -rf "$HOME/.nuget/packages"
if [ -d "$HOME/.nuget/packages" ]; then
echo "Error: Could not clear NuGet cache. Try running: sudo rm -rf ~/.nuget/packages"
exit 1
fi
fi
rm -f "$HOME/.nuget/packages/.write-test" 2>/dev/null
dotnet publish src/NetworkOptimizer.Web/NetworkOptimizer.Web.csproj \
-c Release \
-r "$RUNTIME" \
--self-contained \
-p:PublishSingleFile=true \
-p:IncludeNativeLibrariesForSelfExtract=true \
-p:EnableCompressionInSingleFile=true \
-p:DebugType=None \
-o "$INSTALL_DIR"
# Step 3b: Build Go binaries
echo ""
echo "[3b/9] Building Go binaries..."
if command -v go &> /dev/null; then
mkdir -p "$INSTALL_DIR/tools"
# Get version from git tags for Go binary version stamps
GO_VERSION=$(cd "$REPO_ROOT" && git describe --tags --always 2>/dev/null || echo "dev")
GO_VERSION="${GO_VERSION#v}" # strip leading v
echo "Go binary version: $GO_VERSION"
# Detect Go architecture for local binary
GO_ARCH="amd64"
if [ "$ARCH" = "arm64" ]; then
GO_ARCH="arm64"
fi
CFSPEEDTEST_SRC="$REPO_ROOT/src/cfspeedtest"
if [ -d "$CFSPEEDTEST_SRC" ]; then
cd "$CFSPEEDTEST_SRC"
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -a -trimpath \
-ldflags "-s -w -X main.version=$GO_VERSION" \
-o "$INSTALL_DIR/tools/cfspeedtest-linux-arm64" .
echo "Built cfspeedtest for linux/arm64"
else
echo "Warning: cfspeedtest source not found at $CFSPEEDTEST_SRC"
fi
UWNSPEEDTEST_SRC="$REPO_ROOT/src/uwnspeedtest"
if [ -d "$UWNSPEEDTEST_SRC" ]; then
cd "$UWNSPEEDTEST_SRC"
# Build local binary for server-side WAN speed tests
CGO_ENABLED=0 GOOS=darwin GOARCH=$GO_ARCH go build -a -trimpath \
-ldflags "-s -w -X main.version=$GO_VERSION" \
-o "$INSTALL_DIR/tools/uwnspeedtest-darwin-$GO_ARCH" .
echo "Built uwnspeedtest for darwin/$GO_ARCH (local)"
# Build gateway binary for deployment via SSH to UniFi gateways
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -a -trimpath \
-ldflags "-s -w -X main.version=$GO_VERSION" \
-o "$INSTALL_DIR/tools/uwnspeedtest-linux-arm64" .
echo "Built uwnspeedtest for linux/arm64 (gateway)"
else
echo "Warning: uwnspeedtest source not found at $UWNSPEEDTEST_SRC"
fi
WANSTEER_SRC="$REPO_ROOT/src/wansteer"
if [ -d "$WANSTEER_SRC" ]; then
cd "$WANSTEER_SRC"
# Build gateway binary for WAN steering (deployed via SSH to UniFi gateways)
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -a -trimpath \
-ldflags "-s -w -X main.version=$GO_VERSION" \
-o "$INSTALL_DIR/tools/wansteer-linux-arm64" .
echo "Built wansteer for linux/arm64 (gateway)"
else
echo "Warning: wansteer source not found at $WANSTEER_SRC"
fi
else
echo "Warning: Go not installed - speed test binaries not available"
echo " Install with: brew install go"
fi
# Step 4: Sign binary (single-file executable has native libs embedded)
echo ""
echo "[4/9] Signing binary..."
cd "$INSTALL_DIR"
codesign --force --sign - NetworkOptimizer.Web
echo "Verifying signature..."
codesign -v NetworkOptimizer.Web
# Step 5: Create startup script
echo ""
echo "[5/9] Creating startup script..."
# Get local IP address for display purposes (app auto-detects its own IP)
LOCAL_IP=$(ipconfig getifaddr en0 2>/dev/null || ipconfig getifaddr en1 2>/dev/null || echo "your-mac-ip")
cat > "$INSTALL_DIR/start.sh" << EOF
#!/bin/bash
cd "\$(dirname "\$0")"
# Add Homebrew to PATH
export PATH="$BREW_PREFIX/bin:/usr/local/bin:\$PATH"
# Environment configuration
export TZ="${TZ:-America/Chicago}"
export ASPNETCORE_URLS="http://0.0.0.0:8042"
# Enable iperf3 server for CLI-based client speed testing (port 5201)
export Iperf3Server__Enabled=true
# OpenSpeedTest configuration (browser-based speed tests on port 3005)
export OPENSPEEDTEST_PORT=3005
# Optional: Set admin password (otherwise auto-generated on first run)
# export APP_PASSWORD="your-secure-password"
# Start the application
./NetworkOptimizer.Web
EOF
chmod +x "$INSTALL_DIR/start.sh"
# Restore backed up start.sh if it exists (preserves user's env config on upgrade)
if [ -n "${BACKUP_DIR:-}" ] && [ -f "$BACKUP_DIR/start.sh" ]; then
cp "$BACKUP_DIR/start.sh" "$INSTALL_DIR/start.sh"
echo " ✓ Restored custom startup configuration from backup"
fi
# Step 6: Create log directory
echo ""
echo "[6/9] Creating directories..."
mkdir -p "$INSTALL_DIR/logs"
mkdir -p "$DATA_DIR"
mkdir -p "$LAUNCH_AGENT_DIR"
# Step 7: Set up OpenSpeedTest with nginx
echo ""
echo "[7/9] Setting up OpenSpeedTest..."
SPEEDTEST_DIR="$INSTALL_DIR/SpeedTest"
mkdir -p "$SPEEDTEST_DIR"/{conf,logs,temp,html/assets/{css,js,fonts,images/icons}}
# Copy nginx configuration
if [ -f "$REPO_ROOT/src/OpenSpeedTest/index.html" ]; then
# Copy mime.types from Homebrew's nginx
if [ -f "$BREW_PREFIX/etc/nginx/mime.types" ]; then
cp "$BREW_PREFIX/etc/nginx/mime.types" "$SPEEDTEST_DIR/conf/"
else
echo "Warning: mime.types not found at $BREW_PREFIX/etc/nginx/mime.types"
fi
# Create nginx.conf optimized for SpeedTest (based on Docker config)
cat > "$SPEEDTEST_DIR/conf/nginx.conf" << 'NGINXCONF'
# Run in foreground so the app can track the process
daemon off;
worker_processes 1;
error_log logs/error.log;
pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
tcp_nodelay on;
tcp_nopush on;
keepalive_timeout 65;
access_log off;
gzip off;
server {
listen 3005;
server_name _;
root html;
index index.html;
client_max_body_size 50m;
error_page 405 =200 $uri;
log_not_found off;
server_tokens off;
error_log /dev/null;
# Performance tuning
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors off;
# Upload endpoint - reads entire POST body before responding.
# Without this, the error_page 405 hack responds before the body is
# fully received, causing ERR_CONNECTION_RESET behind reverse proxies.
location = /upload {
add_header 'Access-Control-Allow-Origin' "*" always;
add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
add_header Cache-Control 'no-store, no-cache, max-age=0, no-transform';
client_body_buffer_size 35m;
client_max_body_size 50m;
proxy_pass http://127.0.0.1:3005/upload-sink;
proxy_set_header Host $host;
}
location = /upload-sink {
add_header 'Access-Control-Allow-Origin' "*" always;
return 200;
}
location / {
add_header 'Access-Control-Allow-Origin' "*" always;
add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
add_header Cache-Control 'no-store, no-cache, max-age=0, no-transform';
if_modified_since off;
expires off;
etag off;
if ($request_method = OPTIONS) {
add_header 'Access-Control-Allow-Credentials' "true";
add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With' always;
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
add_header 'Access-Control-Allow-Methods' "GET, POST, OPTIONS" always;
return 200;
}
}
location ~* ^.+\.(?:css|cur|js|jpe?g|gif|htc|ico|png|html|xml|otf|ttf|eot|woff|woff2|svg)$ {
access_log off;
expires -1;
add_header Cache-Control "no-cache, no-store, must-revalidate";
add_header Vary Accept-Encoding;
tcp_nodelay off;
open_file_cache max=3000 inactive=120s;
open_file_cache_valid 45s;
open_file_cache_min_uses 2;
open_file_cache_errors off;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript image/svg+xml;
}
}
}
NGINXCONF
# Copy OpenSpeedTest HTML files
cp "$REPO_ROOT/src/OpenSpeedTest/index.html" "$SPEEDTEST_DIR/html/"
cp "$REPO_ROOT/src/OpenSpeedTest/hosted.html" "$SPEEDTEST_DIR/html/"
cp "$REPO_ROOT/src/OpenSpeedTest/downloading" "$SPEEDTEST_DIR/html/"
cp "$REPO_ROOT/src/OpenSpeedTest/upload" "$SPEEDTEST_DIR/html/"
# Copy assets
cp "$REPO_ROOT/src/OpenSpeedTest/assets/css/"* "$SPEEDTEST_DIR/html/assets/css/" 2>/dev/null || true
cp "$REPO_ROOT/src/OpenSpeedTest/assets/js/"* "$SPEEDTEST_DIR/html/assets/js/" 2>/dev/null || true
cp "$REPO_ROOT/src/OpenSpeedTest/assets/fonts/"* "$SPEEDTEST_DIR/html/assets/fonts/" 2>/dev/null || true
cp "$REPO_ROOT/src/OpenSpeedTest/assets/images/"*.svg "$SPEEDTEST_DIR/html/assets/images/" 2>/dev/null || true
cp "$REPO_ROOT/src/OpenSpeedTest/assets/images/icons/"* "$SPEEDTEST_DIR/html/assets/images/icons/" 2>/dev/null || true
# Copy config.js template and inject runtime values (same approach as Docker entrypoint)
cp "$REPO_ROOT/src/OpenSpeedTest/assets/js/config.js" "$SPEEDTEST_DIR/html/assets/js/config.js"
# Replace placeholders - use __DYNAMIC__ so URL is constructed client-side from browser location
sed -i '' "s|__SAVE_DATA__|true|g" "$SPEEDTEST_DIR/html/assets/js/config.js"
sed -i '' "s|__SAVE_DATA_URL__|__DYNAMIC__|g" "$SPEEDTEST_DIR/html/assets/js/config.js"
sed -i '' "s|__API_PATH__|/api/public/speedtest/results|g" "$SPEEDTEST_DIR/html/assets/js/config.js"
SPEEDTEST_AVAILABLE=true
echo "OpenSpeedTest files installed"
else
echo "Warning: OpenSpeedTest source files not found. Skipping SpeedTest setup."
echo "Browser-based speed testing will not be available."
SPEEDTEST_AVAILABLE=false
fi
# Step 8: Create launchd plist for main app
echo ""
echo "[8/9] Creating launchd service..."
cat > "$LAUNCH_AGENT_DIR/$LAUNCH_AGENT_FILE" << EOF
Label
net.ozarkconnect.networkoptimizer
ProgramArguments
$INSTALL_DIR/start.sh
WorkingDirectory
$INSTALL_DIR
KeepAlive
RunAtLoad
StandardOutPath
$INSTALL_DIR/logs/stdout.log
StandardErrorPath
$INSTALL_DIR/logs/stderr.log
EOF
# Step 9: Start services
# Note: The app manages nginx and iperf3 internally - no separate launchd services needed
echo ""
echo "[9/9] Starting services..."
# Migrate from old plist name if present
if [ -f "$LAUNCH_AGENT_DIR/$OLD_LAUNCH_AGENT_FILE" ]; then
echo "Migrating from old service name..."
launchctl unload "$LAUNCH_AGENT_DIR/$OLD_LAUNCH_AGENT_FILE" 2>/dev/null || true
rm -f "$LAUNCH_AGENT_DIR/$OLD_LAUNCH_AGENT_FILE"
# Also remove the old speedtest plist if it exists
launchctl unload "$LAUNCH_AGENT_DIR/com.networkoptimizer.speedtest.plist" 2>/dev/null || true
rm -f "$LAUNCH_AGENT_DIR/com.networkoptimizer.speedtest.plist"
fi
# Gracefully stop any orphaned processes from previous installs
pkill -f "NetworkOptimizer.Web" 2>/dev/null || true
pkill iperf3 2>/dev/null || true
pkill nginx 2>/dev/null || true
sleep 2 # Give processes time to shut down gracefully
# Unload if already loaded (ignore errors)
launchctl unload "$LAUNCH_AGENT_DIR/$LAUNCH_AGENT_FILE" 2>/dev/null || true
launchctl load "$LAUNCH_AGENT_DIR/$LAUNCH_AGENT_FILE"
# Wait for startup and verify
echo ""
echo "Waiting for service to start..."
# Check launchd service status
if launchctl list | grep -q "net.ozarkconnect.networkoptimizer"; then
echo "✓ Network Optimizer service is running"
else
echo "✗ Network Optimizer service failed to start"
echo " Check logs: tail -f $INSTALL_DIR/logs/stderr.log"
fi
# Wait for health endpoint with retries
echo "Waiting for application to be ready..."
HEALTH_OK=false
for i in {1..12}; do
if curl -sL http://localhost:8042/api/health | grep -qi "healthy"; then
HEALTH_OK=true
break
fi
sleep 5
done
echo ""
echo "=== Installation Complete ==="
echo ""
if [ "$HEALTH_OK" = true ]; then
echo "✓ Health check passed"
else
echo "✗ Health check failed after 60 seconds"
echo " The app may still be starting. Check logs: tail -f $INSTALL_DIR/logs/stdout.log"
fi
echo ""
echo "=== Access Information ==="
echo ""
echo "Web UI: http://localhost:8042"
echo " http://$LOCAL_IP:8042 (from other devices)"
if [ "$SPEEDTEST_AVAILABLE" = true ]; then
echo ""
echo "SpeedTest: http://localhost:3005"
echo " http://$LOCAL_IP:3005 (from other devices)"
fi
echo ""
echo "On first run, check logs for the auto-generated admin password:"
echo " grep -A5 'AUTO-GENERATED' $INSTALL_DIR/logs/stdout.log"
echo ""
echo "Service management:"
echo " Stop: launchctl unload ~/Library/LaunchAgents/$LAUNCH_AGENT_FILE"
echo " Start: launchctl load ~/Library/LaunchAgents/$LAUNCH_AGENT_FILE"
echo " Logs: tail -f $INSTALL_DIR/logs/stdout.log"
echo ""
================================================
FILE: scripts/parse-antenna-patterns.ps1
================================================
<#
.SYNOPSIS
Parses Ubiquiti .ant antenna pattern files from zip archives into a single JSON file.
.DESCRIPTION
Extracts .ant files from all zip archives in the antenna-patterns directory,
parses the gain values, and outputs a consolidated JSON file for the web app.
Each .ant file contains 719+ gain values:
- 360 azimuth values (0-359 degrees)
- 359 elevation values (0-358 degrees)
Files may be UTF-16LE or UTF-8 encoded (both are tried automatically).
macOS resource fork files (._prefix) are skipped.
Antenna variant files (e.g., U7-Outdoor-Omni-Antenna.zip) are stored under
variant keys like "U7-Outdoor:omni". The base model uses the standard key.
.PARAMETER InputDir
Directory containing .zip files with .ant patterns.
Default: research/wifi-optimizer/antenna-patterns/
.PARAMETER OutputFile
Output JSON file path.
Default: src/NetworkOptimizer.Web/wwwroot/data/antenna-patterns.json
#>
param(
[string]$InputDir = (Join-Path $PSScriptRoot ".." "research" "wifi-optimizer" "antenna-patterns"),
[string]$OutputFile = (Join-Path $PSScriptRoot ".." "src" "NetworkOptimizer.Web" "wwwroot" "data" "antenna-patterns.json")
)
$ErrorActionPreference = "Stop"
Add-Type -AssemblyName System.IO.Compression.FileSystem
# Band name extraction from filename
function Get-BandFromFilename($filename) {
if ($filename -match "2\.4GHz|2\.4 GHz|2_4GHz|2\.45GHz") { return "2.4" }
if ($filename -match "(? ("U7-Outdoor", "omni")
# "UACC-UK-Ultra-Panel-Antenna" -> ("UK-Ultra", "panel")
# "U7-Pro" -> ("U7-Pro", $null)
function Get-ModelAndVariant($zipName) {
# Variant patterns and their normalized names
$variantPatterns = @(
@{ Pattern = "-Omni-Antenna$"; Variant = "omni"; StripPrefix = "UACC-" },
@{ Pattern = "-Panel-Antenna$"; Variant = "panel"; StripPrefix = "UACC-" },
@{ Pattern = "-Narrow-Angle-High-Gain$"; Variant = "narrow" },
@{ Pattern = "-Narrow-Angle$"; Variant = "narrow" },
@{ Pattern = "-Wide-Angle-Low-Gain$"; Variant = "wide" },
@{ Pattern = "-Wide-Angle$"; Variant = "wide" }
)
foreach ($vp in $variantPatterns) {
if ($zipName -match $vp.Pattern) {
$baseName = $zipName -replace $vp.Pattern, ""
# Strip accessory prefix (UACC-) if present
if ($vp.StripPrefix -and $baseName.StartsWith($vp.StripPrefix)) {
$baseName = $baseName.Substring($vp.StripPrefix.Length)
}
return @{ Model = $baseName; Variant = $vp.Variant }
}
}
return @{ Model = $zipName; Variant = $null }
}
# Try to parse gain values from raw content string
function Parse-GainValues($content) {
$values = @()
foreach ($line in $content.Split("`n")) {
$trimmed = $line.Trim()
if ($trimmed -ne "" -and $trimmed -match "^-?\d") {
$values += [float]$trimmed
}
}
return $values
}
# Parse a single .ant file, trying UTF-16LE first then UTF-8
function Parse-AntFile($entry) {
# Try UTF-16LE first (newer files)
$stream = $entry.Open()
$reader = [System.IO.StreamReader]::new($stream, [System.Text.Encoding]::Unicode)
$content = $reader.ReadToEnd()
$reader.Dispose()
$stream.Dispose()
$values = Parse-GainValues $content
# Fall back to UTF-8 (older/macOS-created files)
if ($values.Count -lt 719) {
$stream = $entry.Open()
$reader = [System.IO.StreamReader]::new($stream, [System.Text.Encoding]::UTF8)
$content = $reader.ReadToEnd()
$reader.Dispose()
$stream.Dispose()
$values = Parse-GainValues $content
}
if ($values.Count -lt 719) {
Write-Warning " Expected 719+ values, got $($values.Count) in $($entry.Name)"
return $null
}
return @{
azimuth = $values[0..359]
elevation = $values[360..718]
}
}
Write-Host "Parsing antenna patterns from: $InputDir"
Write-Host "Output: $OutputFile"
Write-Host ""
$patterns = @{}
$zipFiles = Get-ChildItem -Path $InputDir -Filter "*.zip" -ErrorAction SilentlyContinue | Sort-Object Name
Write-Host "Found $($zipFiles.Count) zip files"
Write-Host ""
foreach ($zip in $zipFiles) {
$rawName = [System.IO.Path]::GetFileNameWithoutExtension($zip.Name)
$parsed = Get-ModelAndVariant $rawName
# Build the key: "ModelName" for base, "ModelName:variant" for variants
if ($parsed.Variant) {
$patternKey = "$($parsed.Model):$($parsed.Variant)"
} else {
$patternKey = $parsed.Model
}
Write-Host "Processing: $rawName -> $patternKey"
try {
$archive = [System.IO.Compression.ZipFile]::OpenRead($zip.FullName)
# Filter to .ant files, skip macOS resource forks (._prefix)
$antFiles = $archive.Entries | Where-Object {
$_.Name -match "\.(ant|amt)$" -and $_.Name -notmatch "^\._"
}
if ($antFiles.Count -eq 0) {
Write-Warning " No .ant files found in $($zip.Name)"
$archive.Dispose()
continue
}
if (-not $patterns.ContainsKey($patternKey)) {
$patterns[$patternKey] = @{}
}
foreach ($entry in $antFiles) {
$band = Get-BandFromFilename $entry.Name
if (-not $band) {
Write-Warning " Could not determine band for: $($entry.Name)"
continue
}
Write-Host " Band $band : $($entry.Name)"
$result = Parse-AntFile $entry
if ($result) {
$patterns[$patternKey][$band] = $result
}
}
$archive.Dispose()
}
catch {
Write-Warning " Error processing $($zip.Name): $_"
}
}
# Remove models with no band data (failed to parse anything)
$emptyModels = $patterns.Keys | Where-Object { $patterns[$_].Count -eq 0 }
foreach ($key in $emptyModels) {
Write-Warning "Removing $key (no band data parsed)"
$patterns.Remove($key)
}
# Ensure output directory exists
$outputDir = [System.IO.Path]::GetDirectoryName($OutputFile)
if (-not (Test-Path $outputDir)) {
New-Item -ItemType Directory -Path $outputDir -Force | Out-Null
}
# Write JSON
$json = $patterns | ConvertTo-Json -Depth 5 -Compress
[System.IO.File]::WriteAllText($OutputFile, $json)
$fileSize = (Get-Item $OutputFile).Length
$totalBands = ($patterns.Values | ForEach-Object { $_.Count } | Measure-Object -Sum).Sum
$variantCount = ($patterns.Keys | Where-Object { $_ -match ":" }).Count
Write-Host ""
Write-Host "Done! Wrote $($patterns.Count) models ($variantCount with variants, $totalBands total band patterns) to $OutputFile ($([math]::Round($fileSize / 1024))KB)"
================================================
FILE: scripts/proxmox/README.md
================================================
# Proxmox LXC Installation
Install Network Optimizer for UniFi in a Proxmox LXC container with a single command.
## Quick Start
Run this command on your **Proxmox VE host** (not inside a VM or container):
```bash
bash -c "$(wget -qLO - https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/proxmox/install.sh)"
```
Or with curl:
```bash
bash -c "$(curl -fsSL https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/proxmox/install.sh)"
```
The script will guide you through:
1. Container configuration (ID, hostname, resources, network)
2. Application settings (timezone, ports, optional HTTPS via Traefik, optional password)
3. Automatic installation of Docker and Network Optimizer
4. Optional Traefik HTTPS proxy with automatic Let's Encrypt certificates
## Requirements
- **Proxmox VE 7.0** or later
- **10GB** disk space minimum (20GB recommended)
- **2GB** RAM minimum (4GB recommended)
- Internet access for downloading container template and Docker images
## What Gets Installed
The script creates a privileged Debian LXC container (Debian 13 Trixie by default) with:
- Docker CE and Docker Compose (privileged container for reliable Docker operation)
- Network Optimizer (Blazor web UI on port 8042)
- OpenSpeedTest (browser-based speed testing on port 3005)
- Persistent storage in `/opt/network-optimizer/data`
- Auto-start on boot enabled
- Swap space for memory stability
## Default Configuration
| Setting | Default | Description |
|---------|---------|-------------|
| Container ID | Next available | Starting from 100, checks VMs too |
| Hostname | `network-optimizer` | Container hostname |
| Debian Version | 13 (Trixie) | Debian 12 (Bookworm) also supported |
| RAM | 2048 MB | Container memory |
| Swap | 512 MB | Swap space |
| CPU | 2 cores | Container CPU cores |
| Disk | 10 GB | Root filesystem size |
| Storage | `local-lvm` | Proxmox storage for container |
| VLAN Tag | None | Tag network interface for VLAN-aware bridges |
| Network | DHCP | Static IP also supported (with DNS) |
| SSH Access | Disabled | Enable for direct SSH root login |
| Web Port | 8042 | Network Optimizer web UI (fixed) |
| Speedtest Port | 3005 | OpenSpeedTest web UI (configurable) |
| iperf3 Server | Disabled | CLI-based speed testing (port 5201) |
| Host Redirect | Disabled | Redirect IP access to hostname (requires local DNS) |
| HTTPS (Traefik) | Disabled | Automatic HTTPS with Let's Encrypt via Cloudflare DNS |
| Reverse Proxy | None | Optional hostname for reverse proxy setup (skipped if Traefik enabled) |
| Geo Location | Disabled | GPS tagging for speed tests and signal levels (auto-enabled with Traefik) |
| Timezone | America/New_York | Container timezone |
## Post-Installation
### Get Admin Password
If you didn't set a password during installation, an auto-generated one is shown in the logs:
```bash
pct exec -- docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
```
### Access the Web UI
1. Open `http://:8042` in your browser
2. Log in with the admin password
3. Go to **Settings** and connect to your UniFi controller
4. Navigate to **Audit** to run your first security scan
### Container Management
```bash
# Enter container shell
pct enter
# Start/stop container
pct start
pct stop
# View application logs
pct exec -- docker logs -f network-optimizer
# Check container status
pct status
```
### SSH Access (Optional)
If you enabled SSH during installation, set a root password:
```bash
pct exec -- passwd
```
Then connect directly:
```bash
ssh root@
```
### Application Management
All commands run from the Proxmox host:
```bash
# View logs
pct exec -- docker logs -f network-optimizer
# Restart services
pct exec -- bash -c "cd /opt/network-optimizer && docker compose restart"
# Update to latest version
pct exec -- bash -c "cd /opt/network-optimizer && docker compose pull && docker compose up -d"
# Check health
pct exec -- curl -s http://localhost:8042/api/health
```
Or enter the container first:
```bash
pct enter
cd /opt/network-optimizer
docker compose logs -f
docker compose pull && docker compose up -d
```
## HTTPS with Traefik
During installation, you can optionally enable HTTPS via a built-in [Traefik](https://github.com/Ozark-Connect/NetworkOptimizer-Proxy) reverse proxy. This provides:
- Automatic Let's Encrypt certificates via Cloudflare DNS-01 challenge
- HTTP/1.1 for speed tests (HTTP/2 multiplexing skews results), HTTP/2 for the main app
- Geo location tagging for speed tests and signal walk tests (browsers require HTTPS for location access)
**Requirements:**
- A domain managed by Cloudflare (for DNS-01 certificate validation)
- A Cloudflare API token with Zone > DNS > Edit permission ([create one here](https://dash.cloudflare.com/profile/api-tokens))
- Two DNS records pointing to your container's IP (e.g., `optimizer.example.com` and `speedtest.example.com`)
**What gets deployed:**
- Traefik container at `/opt/network-optimizer-proxy/`
- Dynamic configuration in `/opt/network-optimizer-proxy/dynamic/config.yml`
- Certificates stored in `/opt/network-optimizer-proxy/acme/acme.json`
**Management commands:**
```bash
# View Traefik logs
pct exec -- docker logs -f traefik-proxy
# Update Traefik
pct exec -- bash -c "cd /opt/network-optimizer-proxy && docker compose pull && docker compose up -d"
# Edit proxy configuration
pct exec -- nano /opt/network-optimizer-proxy/dynamic/config.yml
# Edit proxy environment (ACME email, API token)
pct exec -- nano /opt/network-optimizer-proxy/.env
```
**Note:** Certificates may take about a minute to issue on first start. If you see certificate errors immediately after installation, wait a moment and refresh.
If you don't enable Traefik during installation, you can still set up a reverse proxy manually later. See the [Deployment Guide](../../docker/DEPLOYMENT.md#https-with-reverse-proxy) for nginx, Caddy, and Traefik examples.
## Advanced Configuration
### Static IP Address
During installation, when prompted for IP address, enter a CIDR notation:
```
IP address [dhcp]: 192.168.1.100/24
Gateway IP: 192.168.1.1
```
### Custom Ports
Modify ports during installation or edit `/opt/network-optimizer/.env` afterward:
```bash
pct exec -- nano /opt/network-optimizer/.env
```
Then restart:
```bash
pct exec -- bash -c "cd /opt/network-optimizer && docker compose down && docker compose up -d"
```
### Resource Adjustments
Adjust container resources via Proxmox:
```bash
# Increase RAM to 4GB
pct set --memory 4096
# Increase CPU to 4 cores
pct set --cores 4
# Resize disk to 20GB
pct resize rootfs 20G
```
### Enable iperf3 Server
For CLI-based speed testing from network devices:
```bash
pct exec -- bash -c "echo 'IPERF3_SERVER_ENABLED=true' >> /opt/network-optimizer/.env"
pct exec -- bash -c "cd /opt/network-optimizer && docker compose down && docker compose up -d"
```
## Network Configuration
### VLAN-Aware Bridges
If your Proxmox bridge is VLAN-aware (`bridge-vlan-aware yes` in `/etc/network/interfaces`) and the default untagged VLAN doesn't have internet access, the installer will prompt for a VLAN tag. This tags the container's network interface so it can reach the internet for package downloads and Docker image pulls.
Example: If your management/setup VLAN is 10, enter `10` when prompted. Leave empty if your default VLAN already has internet access.
### Host Networking
The container uses Docker's host networking mode by default, which provides:
- Best performance for speed testing
- Accurate client IP detection
- No port mapping overhead
### Firewall Rules
If you have Proxmox firewall enabled, allow these ports:
```bash
# Web UI
pct set --firewall 1
pvesh create /nodes//lxc//firewall/rules --type in --action ACCEPT --dport 8042 --proto tcp
# OpenSpeedTest
pvesh create /nodes//lxc//firewall/rules --type in --action ACCEPT --dport 3005 --proto tcp
# iperf3 (if enabled)
pvesh create /nodes//lxc//firewall/rules --type in --action ACCEPT --dport 5201 --proto tcp
```
Or disable container firewall:
```bash
pct set --firewall 0
```
## Backup and Restore
### Backup Container
```bash
# Full container backup
vzdump --storage local --compress zstd --mode snapshot
# Or just the data directory
pct exec -- tar czf /tmp/data-backup.tar.gz -C /opt/network-optimizer data
pct pull /tmp/data-backup.tar.gz ./data-backup.tar.gz
```
### Restore Data
```bash
# Stop services
pct exec -- bash -c "cd /opt/network-optimizer && docker compose down"
# Restore data
pct push ./data-backup.tar.gz /tmp/data-backup.tar.gz
pct exec -- tar xzf /tmp/data-backup.tar.gz -C /opt/network-optimizer
# Start services
pct exec -- bash -c "cd /opt/network-optimizer && docker compose up -d"
```
## Troubleshooting
### Container Won't Start
Check for Docker-related issues:
```bash
# View container config
cat /etc/pve/lxc/.conf
# Ensure nesting is enabled
pct set --features nesting=1
```
### Docker Fails Inside Container
The script creates a privileged container with nesting enabled, which is the most reliable configuration for Docker. If Docker still fails:
```bash
# Check Docker service status
pct exec -- systemctl status docker
# Try restarting Docker
pct exec -- systemctl restart docker
# Check for errors in Docker logs
pct exec -- journalctl -u docker --no-pager -n 50
```
### Application Not Responding
```bash
# Check Docker status
pct exec -- systemctl status docker
# Check container logs
pct exec -- docker logs network-optimizer
# Restart everything
pct exec -- bash -c "cd /opt/network-optimizer && docker compose down && docker compose up -d"
```
### Permission Errors
If you see permission errors with volumes:
```bash
# Check ownership
pct exec -- ls -la /opt/network-optimizer/
# Fix permissions
pct exec -- chown -R 1000:1000 /opt/network-optimizer/data
```
### Reset Admin Password
If you forget the admin password:
```bash
pct exec -- bash -c "curl -fsSL https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/reset-password.sh | bash -s -- --force"
```
**Manual fallback:**
```bash
# Clear the password
pct exec -- docker exec network-optimizer sqlite3 /app/data/network_optimizer.db \
"UPDATE AdminSettings SET Password = NULL, Enabled = 0;"
# Restart and view the new password
pct exec -- docker restart network-optimizer
sleep 10
pct exec -- docker logs network-optimizer 2>&1 | grep -A5 "AUTO-GENERATED"
```
## Uninstall
To completely remove Network Optimizer:
```bash
# Stop and destroy container
pct stop
pct destroy
```
## Manual Installation
If you prefer manual installation or the script doesn't work in your environment:
1. Create an LXC container (Debian 12, **privileged**, nesting enabled)
```bash
pct create :vztmpl/debian-12-standard_*.tar.zst \
--hostname network-optimizer \
--memory 2048 --swap 512 --cores 2 \
--rootfs :10 \
--net0 name=eth0,bridge=vmbr0,ip=dhcp \
--unprivileged 0 --features nesting=1 --onboot 1
```
If using a VLAN-aware bridge, add `,tag=` to the `--net0` value (e.g., `...,ip=dhcp,tag=10`).
2. Start container and install Docker: https://docs.docker.com/engine/install/debian/
3. Follow the [Docker deployment guide](../../docker/DEPLOYMENT.md)
## Support
- **Issues:** [GitHub Issues](https://github.com/Ozark-Connect/NetworkOptimizer/issues)
- **Documentation:** [Deployment Guide](../../docker/DEPLOYMENT.md)
When reporting issues, include:
- Proxmox VE version (`pveversion`)
- Container logs (`pct exec -- docker logs network-optimizer`)
- Any error messages from the installation script
================================================
FILE: scripts/proxmox/install.sh
================================================
#!/usr/bin/env bash
# Network Optimizer for UniFi - Proxmox LXC Installation Script
# https://github.com/Ozark-Connect/NetworkOptimizer
#
# This script creates a Proxmox LXC container and installs Network Optimizer
# using Docker Compose. Designed for the homelab community.
#
# Usage:
# bash -c "$(wget -qLO - https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/proxmox/install.sh)"
#
# Requirements:
# - Proxmox VE 7.0 or later
# - Internet access for downloading container template and Docker images
# - Sufficient storage (10GB minimum recommended)
set -Eeuo pipefail
# =============================================================================
# Configuration Defaults
# =============================================================================
APP_NAME="Network Optimizer"
GITHUB_REPO="Ozark-Connect/NetworkOptimizer"
GITHUB_BRANCH="main"
# Container defaults
DEFAULT_HOSTNAME="network-optimizer"
DEFAULT_DISK_SIZE="10"
DEFAULT_RAM="2048"
DEFAULT_SWAP="512"
DEFAULT_CPU="2"
DEFAULT_BRIDGE="vmbr0"
DEFAULT_STORAGE="local-lvm"
DEFAULT_TEMPLATE_STORAGE="local"
# Application defaults
DEFAULT_TZ="America/New_York"
DEFAULT_SPEEDTEST_PORT="3005"
# =============================================================================
# Colors and Formatting
# =============================================================================
readonly RD='\033[0;31m' # Red
readonly GN='\033[0;32m' # Green
readonly YW='\033[0;33m' # Yellow
readonly BL='\033[0;34m' # Blue
readonly MG='\033[0;35m' # Magenta
readonly CY='\033[0;36m' # Cyan
readonly WH='\033[0;37m' # White
readonly BLD='\033[1m' # Bold
readonly DIM='\033[2m' # Dim
readonly CL='\033[0m' # Clear/Reset
# =============================================================================
# Helper Functions
# =============================================================================
msg_info() {
echo -e "${BL}[INFO]${CL} $1"
}
msg_ok() {
echo -e "${GN}[OK]${CL} $1"
}
msg_warn() {
echo -e "${YW}[WARN]${CL} $1"
}
msg_error() {
echo -e "${RD}[ERROR]${CL} $1"
}
header() {
echo -e "\n${BLD}${CY}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}"
echo -e "${BLD}${CY} $1${CL}"
echo -e "${BLD}${CY}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${CL}\n"
}
cleanup() {
local exit_code=$?
if [[ $exit_code -ne 0 ]]; then
echo ""
msg_error "Installation failed. Check the output above for errors."
if [[ -n "${CT_ID:-}" ]] && pct status "$CT_ID" &>/dev/null; then
echo -e "${DIM}To clean up the failed container:${CL}"
echo -e "${DIM} pct stop $CT_ID 2>/dev/null; pct destroy $CT_ID${CL}"
fi
fi
}
trap cleanup EXIT
# =============================================================================
# Validation Functions
# =============================================================================
check_root() {
if [[ $EUID -ne 0 ]]; then
msg_error "This script must be run as root on Proxmox VE."
echo -e "${DIM}Try: sudo bash install.sh${CL}"
exit 1
fi
}
check_proxmox() {
if ! command -v pveversion &> /dev/null; then
msg_error "This script must be run on Proxmox VE."
echo -e "${DIM}Proxmox VE not detected. Please run this script on your Proxmox host.${CL}"
exit 1
fi
local pve_version
pve_version=$(pveversion --verbose | grep "pve-manager" | awk '{print $2}' | cut -d'/' -f1)
msg_ok "Proxmox VE $pve_version detected"
}
get_next_ct_id() {
local id=100
while pct status "$id" &>/dev/null || qm status "$id" &>/dev/null 2>&1; do
((id++))
done
echo "$id"
}
validate_ct_id() {
local id=$1
if ! [[ "$id" =~ ^[0-9]+$ ]]; then
msg_error "Container ID must be a number."
return 1
fi
if [[ "$id" -lt 100 ]]; then
msg_error "Container ID must be 100 or greater."
return 1
fi
if pct status "$id" &>/dev/null || qm status "$id" &>/dev/null 2>&1; then
msg_error "ID $id already exists (VM or container)."
return 1
fi
return 0
}
validate_hostname() {
local hostname=$1
if ! [[ "$hostname" =~ ^[a-zA-Z0-9]([a-zA-Z0-9.-]*[a-zA-Z0-9])?$ ]]; then
msg_error "Invalid hostname: $hostname"
msg_info "Hostnames may only contain letters, numbers, dots, and hyphens."
return 1
fi
return 0
}
get_storage_list() {
pvesm status -content rootdir 2>/dev/null | awk 'NR>1 {print $1}' | tr '\n' ' '
}
get_template_storage_list() {
pvesm status -content vztmpl 2>/dev/null | awk 'NR>1 {print $1}' | tr '\n' ' '
}
get_bridge_list() {
ip -o link show type bridge 2>/dev/null | awk -F': ' '{print $2}' | tr '\n' ' '
}
validate_storage() {
local storage=$1
local content_type=$2
if ! pvesm status -content "$content_type" 2>/dev/null | awk 'NR>1 {print $1}' | grep -qw "$storage"; then
return 1
fi
return 0
}
# Find the Debian template based on version selection
find_debian_template() {
local storage=$1
local version=${2:-12}
# Update template list
pveam update &>/dev/null || true
# Find the latest debian template for the selected version
local template
template=$(pveam available -section system 2>/dev/null | grep "debian-${version}-standard" | tail -1 | awk '{print $2}')
if [[ -z "$template" ]]; then
msg_error "Could not find Debian ${version} template in repository."
msg_info "Available templates:"
pveam available -section system 2>/dev/null | grep -i debian | head -5
exit 1
fi
echo "$template"
}
# =============================================================================
# Interactive Configuration
# =============================================================================
show_banner() {
clear
echo -e "${BLD}${MG}"
cat << "EOF"
_ __ __ __ ____ __ _ _
/ | / /__ / /__ ______ _____/ /__ / __ \____ / /_(_)___ ___ (_)___ ___ _____
/ |/ / _ \/ __/ | /| / / __ \/ ___/ //_/ / / / / __ \/ __/ / __ `__ \/ /_ / / _ \/ ___/
/ /| / __/ /_ | |/ |/ / /_/ / / / ,< / /_/ / /_/ / /_/ / / / / / / / / /_/ __/ /
/_/ |_/\___/\__/ |__/|__/\____/_/ /_/|_| \____/ .___/\__/_/_/ /_/ /_/_/ /___/\___/_/
/_/
EOF
echo -e "${CL}"
echo -e "${DIM}Proxmox LXC Installation Script${CL}"
echo -e "${DIM}https://github.com/${GITHUB_REPO}${CL}\n"
}
configure_container() {
header "Container Configuration"
# Container ID
local default_id
default_id=$(get_next_ct_id)
echo -e "${WH}Container ID${CL} ${DIM}(next available: $default_id)${CL}"
read -rp "Enter CT ID [$default_id]: " CT_ID
CT_ID=${CT_ID:-$default_id}
if ! validate_ct_id "$CT_ID"; then
exit 1
fi
# Hostname
echo -e "\n${WH}Hostname${CL}"
read -rp "Enter hostname [$DEFAULT_HOSTNAME]: " CT_HOSTNAME
CT_HOSTNAME=${CT_HOSTNAME:-$DEFAULT_HOSTNAME}
# Debian version
echo -e "\n${WH}Debian Version${CL}"
echo -e "${DIM}Debian 13 (Trixie) is the current stable release.${CL}"
echo -e "${DIM}Debian 12 (Bookworm) also supported if preferred.${CL}"
read -rp "Debian version [13]: " DEBIAN_VERSION
DEBIAN_VERSION=${DEBIAN_VERSION:-13}
# Resources
echo -e "\n${WH}Resources${CL}"
read -rp "RAM in MB [$DEFAULT_RAM]: " CT_RAM
CT_RAM=${CT_RAM:-$DEFAULT_RAM}
read -rp "Swap in MB [$DEFAULT_SWAP]: " CT_SWAP
CT_SWAP=${CT_SWAP:-$DEFAULT_SWAP}
read -rp "CPU cores [$DEFAULT_CPU]: " CT_CPU
CT_CPU=${CT_CPU:-$DEFAULT_CPU}
read -rp "Disk size in GB [$DEFAULT_DISK_SIZE]: " CT_DISK
CT_DISK=${CT_DISK:-$DEFAULT_DISK_SIZE}
# Storage
local available_storage
available_storage=$(get_storage_list)
echo -e "\n${WH}Storage${CL} ${DIM}(available: $available_storage)${CL}"
read -rp "Storage for container [$DEFAULT_STORAGE]: " CT_STORAGE
CT_STORAGE=${CT_STORAGE:-$DEFAULT_STORAGE}
if ! validate_storage "$CT_STORAGE" "rootdir"; then
msg_error "Storage '$CT_STORAGE' not found or doesn't support rootdir content."
msg_info "Available storage: $available_storage"
exit 1
fi
local available_template_storage
available_template_storage=$(get_template_storage_list)
echo -e "\n${WH}Template Storage${CL} ${DIM}(available: $available_template_storage)${CL}"
read -rp "Storage for templates [$DEFAULT_TEMPLATE_STORAGE]: " TEMPLATE_STORAGE
TEMPLATE_STORAGE=${TEMPLATE_STORAGE:-$DEFAULT_TEMPLATE_STORAGE}
if ! validate_storage "$TEMPLATE_STORAGE" "vztmpl"; then
msg_error "Storage '$TEMPLATE_STORAGE' not found or doesn't support vztmpl content."
msg_info "Available storage: $available_template_storage"
exit 1
fi
# Network
local available_bridges
available_bridges=$(get_bridge_list)
echo -e "\n${WH}Network Bridge${CL} ${DIM}(available: $available_bridges)${CL}"
read -rp "Network bridge [$DEFAULT_BRIDGE]: " CT_BRIDGE
CT_BRIDGE=${CT_BRIDGE:-$DEFAULT_BRIDGE}
# VLAN tag
echo -e "\n${WH}VLAN Tag${CL}"
echo -e "${DIM}If your bridge is VLAN-aware and the default (untagged) VLAN doesn't have${CL}"
echo -e "${DIM}internet access, specify the VLAN ID to tag the container's network interface.${CL}"
echo -e "${DIM}Leave empty for untagged (default VLAN).${CL}"
read -rp "VLAN tag [none]: " CT_VLAN_TAG
CT_VLAN_TAG=${CT_VLAN_TAG:-}
if [[ -n "$CT_VLAN_TAG" ]]; then
if ! [[ "$CT_VLAN_TAG" =~ ^[0-9]+$ ]] || [[ "$CT_VLAN_TAG" -lt 1 ]] || [[ "$CT_VLAN_TAG" -gt 4094 ]]; then
msg_error "VLAN tag must be a number between 1 and 4094."
exit 1
fi
fi
echo -e "\n${WH}IP Configuration${CL}"
echo -e "${DIM}Enter 'dhcp' for DHCP or static IP in CIDR format (e.g., 192.168.1.100/24)${CL}"
read -rp "IP address [dhcp]: " CT_IP
CT_IP=${CT_IP:-dhcp}
# Initialize gateway and DNS to empty
CT_GW=""
CT_DNS=""
if [[ "$CT_IP" != "dhcp" ]]; then
read -rp "Gateway IP: " CT_GW
if [[ -z "$CT_GW" ]]; then
msg_error "Gateway is required for static IP configuration."
exit 1
fi
echo -e "${DIM}DNS server (press Enter to use gateway as DNS)${CL}"
read -rp "DNS server [$CT_GW]: " CT_DNS
CT_DNS=${CT_DNS:-$CT_GW}
fi
}
configure_application() {
header "Application Configuration"
# Timezone
echo -e "${WH}Timezone${CL}"
echo -e "${DIM}Examples: America/New_York, America/Chicago, America/Los_Angeles, Europe/London${CL}"
read -rp "Timezone [$DEFAULT_TZ]: " APP_TZ
APP_TZ=${APP_TZ:-$DEFAULT_TZ}
# OpenSpeedTest port
echo -e "\n${WH}OpenSpeedTest Port${CL}"
echo -e "${DIM}Browser-based speed testing (main web UI is always on port 8042)${CL}"
read -rp "OpenSpeedTest port [$DEFAULT_SPEEDTEST_PORT]: " APP_SPEEDTEST_PORT
APP_SPEEDTEST_PORT=${APP_SPEEDTEST_PORT:-$DEFAULT_SPEEDTEST_PORT}
# iperf3 server
echo -e "\n${WH}iperf3 Server${CL}"
echo -e "${DIM}Enable CLI-based speed testing from network devices (port 5201)${CL}"
read -rp "Enable iperf3 server? [y/N]: " iperf3_response
if [[ "${iperf3_response,,}" =~ ^(y|yes)$ ]]; then
APP_IPERF3_ENABLED="true"
else
APP_IPERF3_ENABLED="false"
fi
# Hostname-based access (for local DNS users)
echo -e "\n${WH}Hostname-Based Access${CL}"
echo -e "${DIM}Enable if you have local DNS (e.g., Pi-hole) resolving the container hostname.${CL}"
echo -e "${DIM}Uses hostname for redirects, speed test links, and CORS. Requires working DNS.${CL}"
echo -e "${DIM}If disabled, IP address is used instead (works without DNS setup).${CL}"
read -rp "Enable hostname-based access? [y/N]: " hostname_redirect_response
if [[ "${hostname_redirect_response,,}" =~ ^(y|yes)$ ]]; then
APP_HOSTNAME_REDIRECT="true"
else
APP_HOSTNAME_REDIRECT="false"
fi
# Initialize Traefik and proxy variables
APP_TRAEFIK_ENABLED="false"
TRAEFIK_ACME_EMAIL=""
TRAEFIK_CF_DNS_API_TOKEN=""
TRAEFIK_OPTIMIZER_HOSTNAME=""
TRAEFIK_SPEEDTEST_HOSTNAME=""
APP_REVERSE_PROXY_HOST=""
APP_GEOLOCATION="false"
APP_OPENSPEEDTEST_HOST=""
# HTTPS via Traefik
echo -e "\n${WH}HTTPS via Traefik${CL}"
echo -e "${DIM}Automatic HTTPS with Let's Encrypt certificates via Cloudflare DNS.${CL}"
echo -e "${DIM}Enables geo location tagging and solves the HTTP/1.1 speed test requirement.${CL}"
echo -e "${DIM}Requires a domain managed by Cloudflare.${CL}"
read -rp "Set up HTTPS via Traefik? [y/N]: " traefik_response
if [[ "${traefik_response,,}" =~ ^(y|yes)$ ]]; then
APP_TRAEFIK_ENABLED="true"
echo ""
read -rp "ACME email (for Let's Encrypt): " TRAEFIK_ACME_EMAIL
if [[ -z "$TRAEFIK_ACME_EMAIL" ]]; then
msg_error "ACME email is required for Let's Encrypt."
exit 1
fi
echo -e "${DIM}Create a token at: https://dash.cloudflare.com/profile/api-tokens${CL}"
echo -e "${DIM}Required permission: Zone > DNS > Edit${CL}"
read -rsp "Cloudflare DNS API token (hidden): " TRAEFIK_CF_DNS_API_TOKEN
echo ""
if [[ -z "$TRAEFIK_CF_DNS_API_TOKEN" ]]; then
msg_error "Cloudflare API token is required."
exit 1
fi
read -rp "Optimizer hostname (e.g., optimizer.example.com): " TRAEFIK_OPTIMIZER_HOSTNAME
if [[ -z "$TRAEFIK_OPTIMIZER_HOSTNAME" ]]; then
msg_error "Optimizer hostname is required."
exit 1
fi
if ! validate_hostname "$TRAEFIK_OPTIMIZER_HOSTNAME"; then
exit 1
fi
read -rp "SpeedTest hostname (e.g., speedtest.example.com): " TRAEFIK_SPEEDTEST_HOSTNAME
if [[ -z "$TRAEFIK_SPEEDTEST_HOSTNAME" ]]; then
msg_error "SpeedTest hostname is required."
exit 1
fi
if ! validate_hostname "$TRAEFIK_SPEEDTEST_HOSTNAME"; then
exit 1
fi
# Auto-configure reverse proxy and geo location
APP_REVERSE_PROXY_HOST="$TRAEFIK_OPTIMIZER_HOSTNAME"
APP_OPENSPEEDTEST_HOST="$TRAEFIK_SPEEDTEST_HOSTNAME"
APP_GEOLOCATION="true"
else
# Reverse proxy
echo -e "\n${WH}Reverse Proxy${CL}"
echo -e "${DIM}If using a reverse proxy (Caddy, nginx, Traefik), enter the public hostname${CL}"
echo -e "${DIM}Leave empty if accessing directly via IP${CL}"
read -rp "Reverse proxy hostname (e.g., optimizer.example.com): " APP_REVERSE_PROXY_HOST
APP_REVERSE_PROXY_HOST=${APP_REVERSE_PROXY_HOST:-}
# Geo location tagging
echo -e "\n${WH}Geo Location Tagging${CL}"
echo -e "${DIM}Tag speed tests and Wi-Fi signal levels with GPS coordinates to map${CL}"
echo -e "${DIM}coverage and identify dead zones across your property.${CL}"
read -rp "Set up geo location tagging? [y/N]: " geolocation_response
if [[ "${geolocation_response,,}" =~ ^(y|yes)$ ]]; then
echo -e "\n${DIM}Geo location requires HTTPS (browser security requirement), and OpenSpeedTest${CL}"
echo -e "${DIM}needs HTTP/1.1 for accurate speed results. Set up an HTTP/1.1 reverse proxy${CL}"
echo -e "${DIM}(Caddy, nginx, etc.) pointing at the speed test server (port ${APP_SPEEDTEST_PORT}).${CL}"
echo -e "${DIM}See .env.example in /opt/network-optimizer for a sample Caddy config.${CL}"
echo ""
read -rp "Speed test HTTPS hostname (e.g., speedtest.example.com): " APP_OPENSPEEDTEST_HOST
if [[ -n "$APP_OPENSPEEDTEST_HOST" ]]; then
APP_GEOLOCATION="true"
# Mixed content check - main app also needs HTTPS
if [[ -z "$APP_REVERSE_PROXY_HOST" ]]; then
echo -e "\n${YW}The main app also needs HTTPS to avoid mixed content blocking.${CL}"
echo -e "${DIM}Speed test results won't save unless the main app is behind HTTPS too.${CL}"
read -rp "Main app HTTPS hostname (e.g., optimizer.example.com): " APP_REVERSE_PROXY_HOST
APP_REVERSE_PROXY_HOST=${APP_REVERSE_PROXY_HOST:-}
if [[ -z "$APP_REVERSE_PROXY_HOST" ]]; then
msg_warn "No main app hostname set. Speed test results may not save from HTTPS."
fi
fi
else
msg_warn "Hostname required for geo location. Skipping geo location setup."
fi
fi
fi
# SSH access
echo -e "\n${WH}SSH Access${CL}"
echo -e "${DIM}Enable SSH root login for direct container access (alternative to pct enter).${CL}"
read -rp "Enable SSH root access? [y/N]: " ssh_response
if [[ "${ssh_response,,}" =~ ^(y|yes)$ ]]; then
APP_SSH_ENABLED="true"
else
APP_SSH_ENABLED="false"
fi
# Optional password
echo -e "\n${WH}Admin Password${CL}"
echo -e "${DIM}If you skip this, a secure password will be auto-generated and displayed in the container logs on first startup.${CL}"
echo -e "${DIM}You can change it anytime in Settings > Admin Password.${CL}"
read -rsp "Admin password (hidden, press Enter to auto-generate): " APP_PASSWORD
echo ""
APP_PASSWORD=${APP_PASSWORD:-}
}
confirm_settings() {
header "Confirm Settings"
echo -e "${BLD}Container Settings:${CL}"
echo -e " ID: ${GN}$CT_ID${CL}"
echo -e " Hostname: ${GN}$CT_HOSTNAME${CL}"
echo -e " Debian: ${GN}$DEBIAN_VERSION${CL}"
echo -e " RAM: ${GN}${CT_RAM}MB${CL}"
echo -e " Swap: ${GN}${CT_SWAP}MB${CL}"
echo -e " CPU: ${GN}${CT_CPU} cores${CL}"
echo -e " Disk: ${GN}${CT_DISK}GB${CL}"
echo -e " Storage: ${GN}$CT_STORAGE${CL}"
echo -e " Bridge: ${GN}$CT_BRIDGE${CL}"
if [[ -n "${CT_VLAN_TAG:-}" ]]; then
echo -e " VLAN Tag: ${GN}$CT_VLAN_TAG${CL}"
else
echo -e " VLAN Tag: ${DIM}none (untagged)${CL}"
fi
echo -e " IP: ${GN}$CT_IP${CL}"
if [[ "$CT_IP" != "dhcp" ]]; then
echo -e " Gateway: ${GN}$CT_GW${CL}"
echo -e " DNS: ${GN}$CT_DNS${CL}"
fi
if [[ "$APP_SSH_ENABLED" == "true" ]]; then
echo -e " SSH: ${GN}enabled${CL}"
else
echo -e " SSH: ${DIM}disabled${CL}"
fi
echo -e "\n${BLD}Application Settings:${CL}"
echo -e " Timezone: ${GN}$APP_TZ${CL}"
echo -e " Web UI Port: ${GN}8042${CL} ${DIM}(fixed)${CL}"
echo -e " Speedtest Port: ${GN}$APP_SPEEDTEST_PORT${CL}"
if [[ "$APP_IPERF3_ENABLED" == "true" ]]; then
echo -e " iperf3 Server: ${GN}enabled${CL} ${DIM}(port 5201)${CL}"
else
echo -e " iperf3 Server: ${DIM}disabled${CL}"
fi
if [[ "$APP_HOSTNAME_REDIRECT" == "true" ]]; then
echo -e " Host Redirect: ${GN}$CT_HOSTNAME${CL}"
else
echo -e " Host Redirect: ${DIM}disabled${CL}"
fi
if [[ "$APP_TRAEFIK_ENABLED" == "true" ]]; then
echo -e " Traefik HTTPS: ${GN}enabled${CL}"
echo -e " ACME Email: ${GN}$TRAEFIK_ACME_EMAIL${CL}"
echo -e " Optimizer: ${GN}https://$TRAEFIK_OPTIMIZER_HOSTNAME${CL}"
echo -e " SpeedTest: ${GN}https://$TRAEFIK_SPEEDTEST_HOSTNAME${CL}"
else
if [[ -n "$APP_REVERSE_PROXY_HOST" ]]; then
echo -e " Reverse Proxy: ${GN}$APP_REVERSE_PROXY_HOST${CL}"
else
echo -e " Reverse Proxy: ${DIM}none${CL}"
fi
if [[ "$APP_GEOLOCATION" == "true" ]]; then
echo -e " Geo Location: ${GN}${APP_OPENSPEEDTEST_HOST}${CL} ${DIM}(HTTPS)${CL}"
else
echo -e " Geo Location: ${DIM}disabled${CL}"
fi
fi
if [[ -n "$APP_PASSWORD" ]]; then
echo -e " Password: ${GN}(set)${CL}"
else
echo -e " Password: ${YW}(auto-generate)${CL}"
fi
echo ""
read -rp "Proceed with installation? [Y/n]: " confirm
confirm=${confirm:-Y}
if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
msg_warn "Installation cancelled."
exit 0
fi
}
# =============================================================================
# Installation Functions
# =============================================================================
download_template() {
header "Downloading Container Template"
msg_info "Finding Debian ${DEBIAN_VERSION} template..."
CT_TEMPLATE_FILE=$(find_debian_template "$TEMPLATE_STORAGE" "$DEBIAN_VERSION")
msg_ok "Found template: $CT_TEMPLATE_FILE"
local template_path
template_path=$(pvesm path "$TEMPLATE_STORAGE:vztmpl/$CT_TEMPLATE_FILE" 2>/dev/null || echo "")
if [[ -f "$template_path" ]]; then
msg_ok "Template already downloaded"
return 0
fi
msg_info "Downloading template..."
if ! pveam download "$TEMPLATE_STORAGE" "$CT_TEMPLATE_FILE"; then
msg_error "Failed to download container template."
echo -e "${DIM}Try manually: pveam download $TEMPLATE_STORAGE $CT_TEMPLATE_FILE${CL}"
exit 1
fi
msg_ok "Template downloaded successfully"
}
create_container() {
header "Creating LXC Container"
msg_info "Creating container $CT_ID ($CT_HOSTNAME)..."
local net_config
if [[ "$CT_IP" == "dhcp" ]]; then
net_config="name=eth0,bridge=$CT_BRIDGE,ip=dhcp"
else
net_config="name=eth0,bridge=$CT_BRIDGE,ip=$CT_IP,gw=$CT_GW"
fi
# Add VLAN tag if specified
if [[ -n "${CT_VLAN_TAG:-}" ]]; then
net_config="${net_config},tag=${CT_VLAN_TAG}"
fi
# Create privileged container with nesting enabled (required for Docker)
# Note: Privileged is more reliable for Docker; unprivileged requires extra config
# that varies by Proxmox version and kernel
pct create "$CT_ID" "$TEMPLATE_STORAGE:vztmpl/$CT_TEMPLATE_FILE" \
--hostname "$CT_HOSTNAME" \
--memory "$CT_RAM" \
--swap "$CT_SWAP" \
--cores "$CT_CPU" \
--rootfs "$CT_STORAGE:$CT_DISK" \
--net0 "$net_config" \
--ostype debian \
--unprivileged 0 \
--features nesting=1 \
--onboot 1 \
--start 0
# Set DNS for static IP
if [[ "$CT_IP" != "dhcp" ]] && [[ -n "$CT_DNS" ]]; then
pct set "$CT_ID" --nameserver "$CT_DNS"
fi
# Fix Docker-in-LXC compatibility: runc's CVE-2025-52881 security patch uses
# detached procfs mounts that AppArmor blocks (even in privileged containers).
# Disabling AppArmor confinement and ensuring writable proc/sys fixes this.
# See: https://forum.proxmox.com/threads/175437/
{
echo "lxc.apparmor.profile: unconfined"
echo "lxc.mount.auto: proc:rw sys:rw"
} >> "/etc/pve/lxc/${CT_ID}.conf"
msg_ok "Container created"
}
start_container() {
msg_info "Starting container..."
pct start "$CT_ID"
# Wait for container to be fully up
local max_wait=60
local waited=0
while ! pct exec "$CT_ID" -- test -f /etc/os-release 2>/dev/null; do
sleep 1
((waited++))
if [[ $waited -ge $max_wait ]]; then
msg_error "Container failed to start within ${max_wait}s"
exit 1
fi
done
# Additional wait for networking
sleep 3
msg_ok "Container started"
}
configure_ssh() {
if [[ "$APP_SSH_ENABLED" != "true" ]]; then
return
fi
msg_info "Configuring SSH root access..."
# Enable root login via SSH
pct exec "$CT_ID" -- bash -c '
# Ensure SSH is installed
apt-get update -qq && apt-get install -y -qq openssh-server
# Enable root login with password
sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
sed -i "s/PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
# Ensure SSH starts on boot and restart it
systemctl enable ssh
systemctl restart ssh
'
msg_ok "SSH root access enabled"
msg_info "Set root password with: pct exec $CT_ID -- passwd"
}
install_dependencies() {
header "Installing Dependencies"
msg_info "Updating package lists..."
pct exec "$CT_ID" -- bash -c "apt-get update -qq"
msg_ok "Package lists updated"
msg_info "Installing prerequisites..."
pct exec "$CT_ID" -- bash -c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq \
ca-certificates \
curl \
gnupg \
lsb-release \
sudo \
wget"
msg_ok "Prerequisites installed"
msg_info "Installing Docker (this may take a minute)..."
pct exec "$CT_ID" -- bash -c '
set -e
# Add Docker official GPG key
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc
chmod a+r /etc/apt/keyrings/docker.asc
# Add Docker repository
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian $(. /etc/os-release && echo "$VERSION_CODENAME") stable" > /etc/apt/sources.list.d/docker.list
# Install Docker
apt-get update -qq
DEBIAN_FRONTEND=noninteractive apt-get install -y -qq docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
# Enable and start Docker
systemctl enable docker
systemctl start docker
'
msg_ok "Docker installed"
# Verify Docker is running
if ! pct exec "$CT_ID" -- docker info &>/dev/null; then
msg_error "Docker failed to start properly"
msg_info "Checking Docker status..."
pct exec "$CT_ID" -- systemctl status docker --no-pager || true
exit 1
fi
msg_ok "Docker is running"
}
deploy_application() {
header "Deploying $APP_NAME"
local app_dir="/opt/network-optimizer"
msg_info "Creating application directory..."
pct exec "$CT_ID" -- mkdir -p "$app_dir"
msg_ok "Directory created"
msg_info "Creating docker-compose.yml..."
# Generate compose file that pulls from GHCR (no build context)
# This is simpler and faster than building from source
local compose_content='services:
network-optimizer:
image: ghcr.io/ozark-connect/network-optimizer:latest
container_name: network-optimizer
restart: unless-stopped
network_mode: host
volumes:
- ./data:/app/data
- ./ssh-keys:/app/ssh-keys:ro
- ./logs:/app/logs
environment:
- TZ=${TZ:-America/Chicago}
- BIND_LOCALHOST_ONLY=${BIND_LOCALHOST_ONLY:-false}
- APP_PASSWORD=${APP_PASSWORD:-}
- HOST_IP=${HOST_IP:-}
- HOST_NAME=${HOST_NAME:-}
- REVERSE_PROXIED_HOST_NAME=${REVERSE_PROXIED_HOST_NAME:-}
- OPENSPEEDTEST_PORT=${OPENSPEEDTEST_PORT:-3005}
- OPENSPEEDTEST_HOST=${OPENSPEEDTEST_HOST:-}
- OPENSPEEDTEST_HTTPS=${OPENSPEEDTEST_HTTPS:-false}
- OPENSPEEDTEST_HTTPS_PORT=${OPENSPEEDTEST_HTTPS_PORT:-443}
- Iperf3Server__Enabled=${IPERF3_SERVER_ENABLED:-false}
- Logging__LogLevel__Default=${LOG_LEVEL:-Information}
- Logging__LogLevel__NetworkOptimizer=${APP_LOG_LEVEL:-Information}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8042/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
network-optimizer-speedtest:
image: ghcr.io/ozark-connect/speedtest:latest
container_name: network-optimizer-speedtest
restart: unless-stopped
ports:
- "${OPENSPEEDTEST_PORT:-3005}:3000"
environment:
- TZ=${TZ:-America/Chicago}
- HOST_IP=${HOST_IP:-}
- HOST_NAME=${HOST_NAME:-}
- OPENSPEEDTEST_PORT=${OPENSPEEDTEST_PORT:-3005}
- OPENSPEEDTEST_HOST=${OPENSPEEDTEST_HOST:-}
- OPENSPEEDTEST_HTTPS=${OPENSPEEDTEST_HTTPS:-false}
- OPENSPEEDTEST_HTTPS_PORT=${OPENSPEEDTEST_HTTPS_PORT:-443}
- REVERSE_PROXIED_HOST_NAME=${REVERSE_PROXIED_HOST_NAME:-}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
'
local encoded_compose
encoded_compose=$(echo "$compose_content" | base64 -w 0)
pct exec "$CT_ID" -- bash -c "echo '$encoded_compose' | base64 -d > $app_dir/docker-compose.yml"
msg_ok "docker-compose.yml created"
msg_info "Downloading .env.example (reference)..."
pct exec "$CT_ID" -- curl -fsSL \
"https://raw.githubusercontent.com/${GITHUB_REPO}/${GITHUB_BRANCH}/docker/.env.example" \
-o "$app_dir/.env.example"
msg_ok ".env.example downloaded"
msg_info "Creating environment configuration..."
# Get container IP for HOST_IP setting
local container_ip
container_ip=$(pct exec "$CT_ID" -- hostname -I 2>/dev/null | awk '{print $1}')
# Build .env content
local env_content="# Network Optimizer Configuration
# Generated by Proxmox installation script
# See .env.example for all available options
TZ=${APP_TZ}
# Host identity for speed testing and CORS
HOST_IP=${container_ip}"
# Only set HOST_NAME if user explicitly enabled hostname redirects
if [[ "$APP_HOSTNAME_REDIRECT" == "true" ]]; then
env_content="${env_content}
HOST_NAME=${CT_HOSTNAME}"
fi
env_content="${env_content}
# Speed testing
OPENSPEEDTEST_PORT=${APP_SPEEDTEST_PORT}
IPERF3_SERVER_ENABLED=${APP_IPERF3_ENABLED}"
if [[ -n "$APP_REVERSE_PROXY_HOST" ]]; then
env_content="${env_content}
# Reverse proxy configuration
REVERSE_PROXIED_HOST_NAME=${APP_REVERSE_PROXY_HOST}"
fi
if [[ "$APP_TRAEFIK_ENABLED" == "true" ]]; then
env_content="${env_content}
# Traefik handles public access - bind app to localhost only
BIND_LOCALHOST_ONLY=true"
fi
if [[ "$APP_GEOLOCATION" == "true" ]]; then
env_content="${env_content}
# Geo location tagging (HTTPS speed test)
OPENSPEEDTEST_HTTPS=true
OPENSPEEDTEST_HOST=${APP_OPENSPEEDTEST_HOST}"
fi
if [[ -n "$APP_PASSWORD" ]]; then
env_content="${env_content}
# Admin password
APP_PASSWORD=${APP_PASSWORD}"
fi
# Write .env file using base64 encoding to handle special characters
local encoded_content
encoded_content=$(echo "$env_content" | base64 -w 0)
pct exec "$CT_ID" -- bash -c "echo '$encoded_content' | base64 -d > $app_dir/.env"
msg_ok "Environment configured"
# Create data directories
msg_info "Creating data directories..."
pct exec "$CT_ID" -- bash -c "mkdir -p $app_dir/data $app_dir/logs $app_dir/ssh-keys"
msg_ok "Data directories created"
msg_info "Pulling Docker images (this may take a few minutes)..."
pct exec "$CT_ID" -- bash -c "cd $app_dir && docker compose pull"
msg_ok "Docker images pulled"
msg_info "Starting services..."
pct exec "$CT_ID" -- bash -c "cd $app_dir && docker compose up -d"
msg_ok "Services started"
}
deploy_traefik() {
if [[ "$APP_TRAEFIK_ENABLED" != "true" ]]; then
return
fi
header "Deploying Traefik HTTPS Proxy"
local proxy_dir="/opt/network-optimizer-proxy"
local proxy_repo="Ozark-Connect/NetworkOptimizer-Proxy"
local proxy_branch="main"
msg_info "Creating proxy directory..."
pct exec "$CT_ID" -- mkdir -p "$proxy_dir/dynamic" "$proxy_dir/acme"
msg_ok "Directory created"
msg_info "Downloading Traefik configuration files..."
pct exec "$CT_ID" -- curl -fsSL \
"https://raw.githubusercontent.com/${proxy_repo}/${proxy_branch}/docker-compose.yml" \
-o "$proxy_dir/docker-compose.yml"
pct exec "$CT_ID" -- curl -fsSL \
"https://raw.githubusercontent.com/${proxy_repo}/${proxy_branch}/config.example.yml" \
-o "$proxy_dir/config.example.yml"
pct exec "$CT_ID" -- curl -fsSL \
"https://raw.githubusercontent.com/${proxy_repo}/${proxy_branch}/.env.example" \
-o "$proxy_dir/.env.example"
msg_ok "Configuration files downloaded"
msg_info "Generating dynamic configuration..."
pct exec "$CT_ID" -- bash -c "
sed -e 's/optimizer\\.example\\.com/${TRAEFIK_OPTIMIZER_HOSTNAME}/g' \
-e 's/speedtest\\.example\\.com/${TRAEFIK_SPEEDTEST_HOSTNAME}/g' \
-e 's|http://localhost:3005|http://localhost:${APP_SPEEDTEST_PORT}|g' \
'$proxy_dir/config.example.yml' > '$proxy_dir/dynamic/config.yml'
"
msg_ok "Dynamic configuration generated"
msg_info "Creating environment file..."
local proxy_env_content="# Traefik Proxy - Generated by Proxmox installation script
ACME_EMAIL=${TRAEFIK_ACME_EMAIL}
CF_DNS_API_TOKEN=${TRAEFIK_CF_DNS_API_TOKEN}"
local encoded_proxy_env
encoded_proxy_env=$(echo "$proxy_env_content" | base64 -w 0)
pct exec "$CT_ID" -- bash -c "echo '$encoded_proxy_env' | base64 -d > $proxy_dir/.env"
msg_ok "Environment file created"
msg_info "Setting up certificate storage..."
pct exec "$CT_ID" -- bash -c "touch $proxy_dir/acme/acme.json && chmod 600 $proxy_dir/acme/acme.json"
msg_ok "Certificate storage ready"
msg_info "Pulling Traefik image..."
pct exec "$CT_ID" -- bash -c "cd $proxy_dir && docker compose pull"
msg_ok "Traefik image pulled"
msg_info "Starting Traefik..."
pct exec "$CT_ID" -- bash -c "cd $proxy_dir && docker compose up -d"
msg_ok "Traefik started"
}
wait_for_healthy() {
header "Waiting for Application"
local max_wait=120
local waited=0
echo -ne "${BL}[...]${CL} Waiting for health check..."
while ! pct exec "$CT_ID" -- curl -sf http://localhost:8042/api/health &>/dev/null; do
sleep 2
((waited+=2))
echo -ne "\r${BL}[...]${CL} Waiting for health check... ${waited}s "
if [[ $waited -ge $max_wait ]]; then
echo ""
msg_warn "Health check timed out, but services may still be starting."
msg_info "Check status with: pct exec $CT_ID -- docker logs network-optimizer"
return 1
fi
done
echo ""
msg_ok "Application is healthy"
return 0
}
get_container_ip() {
pct exec "$CT_ID" -- hostname -I 2>/dev/null | awk '{print $1}'
}
show_completion() {
header "Installation Complete!"
local container_ip
container_ip=$(get_container_ip)
echo -e "${GN}${BLD}$APP_NAME has been successfully installed!${CL}\n"
echo -e "${BLD}Access Information:${CL}"
if [[ "$APP_TRAEFIK_ENABLED" == "true" ]]; then
echo -e " Web UI: ${CY}https://${TRAEFIK_OPTIMIZER_HOSTNAME}${CL}"
echo -e " SpeedTest: ${CY}https://${TRAEFIK_SPEEDTEST_HOSTNAME}${CL} ${DIM}(geo location enabled)${CL}"
else
echo -e " Web UI: ${CY}http://${container_ip}:8042${CL}"
echo -e " OpenSpeedTest: ${CY}http://${container_ip}:${APP_SPEEDTEST_PORT}${CL}"
fi
if [[ "$APP_IPERF3_ENABLED" == "true" ]]; then
echo -e " iperf3 Server: ${CY}${container_ip}:5201${CL}"
fi
if [[ "$APP_TRAEFIK_ENABLED" != "true" ]]; then
if [[ -n "$APP_REVERSE_PROXY_HOST" ]]; then
echo -e " Reverse Proxy: ${CY}https://${APP_REVERSE_PROXY_HOST}${CL}"
fi
if [[ "$APP_GEOLOCATION" == "true" ]]; then
echo -e " Speed Test: ${CY}https://${APP_OPENSPEEDTEST_HOST}${CL} ${DIM}(geo location enabled)${CL}"
fi
fi
if [[ -z "$APP_PASSWORD" ]]; then
echo -e "\n${BLD}Admin Password:${CL}"
echo -e " ${YW}Auto-generated on first run. View with:${CL}"
echo -e " ${DIM}pct exec $CT_ID -- docker logs network-optimizer 2>&1 | grep -A5 'AUTO-GENERATED'${CL}"
echo -e " ${DIM}Set a permanent password in Settings > Admin Password after login.${CL}"
fi
echo -e "\n${BLD}Container Management:${CL}"
echo -e " Console: ${DIM}pct enter $CT_ID${CL}"
echo -e " Start: ${DIM}pct start $CT_ID${CL}"
echo -e " Stop: ${DIM}pct stop $CT_ID${CL}"
echo -e " Logs: ${DIM}pct exec $CT_ID -- docker logs -f network-optimizer${CL}"
if [[ "$APP_SSH_ENABLED" == "true" ]]; then
echo -e " SSH: ${DIM}ssh root@${container_ip}${CL}"
echo -e " ${YW}Set root password: pct exec $CT_ID -- passwd${CL}"
fi
echo -e "\n${BLD}Application Management:${CL}"
echo -e " Directory: ${DIM}/opt/network-optimizer${CL}"
echo -e " Config: ${DIM}/opt/network-optimizer/.env${CL}"
echo -e " Reference: ${DIM}/opt/network-optimizer/.env.example${CL} ${DIM}(all options)${CL}"
echo -e " Update: ${DIM}pct exec $CT_ID -- bash -c 'cd /opt/network-optimizer && docker compose pull && docker compose up -d'${CL}"
if [[ "$APP_TRAEFIK_ENABLED" == "true" ]]; then
echo -e "\n${BLD}Traefik HTTPS Proxy:${CL}"
echo -e " ${YW}Certificates may take a minute to issue on first start.${CL}"
echo -e " Directory: ${DIM}/opt/network-optimizer-proxy${CL}"
echo -e " Config: ${DIM}/opt/network-optimizer-proxy/dynamic/config.yml${CL}"
echo -e " Logs: ${DIM}pct exec $CT_ID -- docker logs -f traefik-proxy${CL}"
echo -e " Update: ${DIM}pct exec $CT_ID -- bash -c 'cd /opt/network-optimizer-proxy && docker compose pull && docker compose up -d'${CL}"
fi
echo -e "\n${BLD}First Run:${CL}"
if [[ "$APP_TRAEFIK_ENABLED" == "true" ]]; then
echo -e " 1. Open ${CY}https://${TRAEFIK_OPTIMIZER_HOSTNAME}${CL} ${DIM}(wait ~1 min for certificates)${CL}"
else
echo -e " 1. Open ${CY}http://${container_ip}:8042${CL}"
fi
echo -e " 2. Log in with the auto-generated password (or the one you set)"
echo -e " 3. Go to Settings and connect to your UniFi controller"
echo -e " 4. Run your first Security Audit!"
echo -e "\n${BLD}Documentation:${CL}"
echo -e " ${DIM}https://github.com/${GITHUB_REPO}/blob/main/docker/DEPLOYMENT.md${CL}"
echo ""
}
# =============================================================================
# Main Execution
# =============================================================================
main() {
show_banner
# Pre-flight checks
check_root
check_proxmox
# Interactive configuration
configure_container
configure_application
confirm_settings
# Installation
download_template
create_container
start_container
configure_ssh
install_dependencies
deploy_application
deploy_traefik
wait_for_healthy || true
# Done
show_completion
}
# Run main - this script is designed to be executed, not sourced
main "$@"
================================================
FILE: scripts/publish.sh
================================================
#!/bin/bash
# Publish for production
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
cd "$PROJECT_ROOT"
OUTPUT_DIR="${1:-$PROJECT_ROOT/publish}"
echo "Publishing to $OUTPUT_DIR..."
dotnet publish src/NetworkOptimizer.Web/NetworkOptimizer.Web.csproj \
-c Release \
-o "$OUTPUT_DIR"
echo ""
echo "Published to: $OUTPUT_DIR"
================================================
FILE: scripts/reset-password.ps1
================================================
<#
.SYNOPSIS
Resets the Network Optimizer admin password on Windows.
.DESCRIPTION
Stops the NetworkOptimizer service, clears the admin password from the
SQLite database, restarts the service, and extracts the auto-generated
temporary password from the log file.
.PARAMETER InstallDir
Override the install directory. By default, auto-detected from the
Windows service registration or defaults to
"C:\Program Files\Ozark Connect\Network Optimizer".
.PARAMETER Force
Skip the confirmation prompt.
.PARAMETER TimeoutSeconds
How long to wait for the service to become healthy (default: 60).
.EXAMPLE
.\reset-password.ps1
.\reset-password.ps1 -Force
.\reset-password.ps1 -InstallDir "D:\NetworkOptimizer"
#>
[CmdletBinding()]
param(
[string]$InstallDir,
[switch]$Force,
[int]$TimeoutSeconds = 60
)
$ErrorActionPreference = 'Stop'
# =============================================================================
# Require Administrator
# =============================================================================
$isAdmin = ([Security.Principal.WindowsPrincipal] `
[Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole(
[Security.Principal.WindowsBuiltInRole]::Administrator)
if (-not $isAdmin) {
Write-Host "ERROR: This script must be run as Administrator." -ForegroundColor Red
Write-Host "Right-click PowerShell and select 'Run as administrator', then try again."
exit 1
}
# =============================================================================
# Constants
# =============================================================================
$ServiceName = "NetworkOptimizer"
$DbFileName = "network_optimizer.db"
$HealthUrl = "http://localhost:8042/api/health"
# =============================================================================
# Auto-detect Install Directory
# =============================================================================
if (-not $InstallDir) {
# Try 1: Get path from Windows service
$svc = Get-CimInstance Win32_Service -Filter "Name='$ServiceName'" -ErrorAction SilentlyContinue
if ($svc -and $svc.PathName) {
$exePath = $svc.PathName -replace '"', ''
$InstallDir = Split-Path $exePath -Parent
}
# Try 2: Registry (WiX installer writes InstallFolder)
if (-not $InstallDir) {
$regPaths = @(
"HKLM:\SOFTWARE\Ozark Connect\Network Optimizer",
"HKLM:\SOFTWARE\WOW6432Node\Ozark Connect\Network Optimizer"
)
foreach ($rp in $regPaths) {
if (Test-Path $rp) {
$regVal = Get-ItemProperty $rp -Name "InstallFolder" -ErrorAction SilentlyContinue
if ($regVal) { $InstallDir = $regVal.InstallFolder; break }
}
}
}
# Try 3: Default path
if (-not $InstallDir) {
$InstallDir = "C:\Program Files\Ozark Connect\Network Optimizer"
}
}
Write-Host ""
Write-Host "Network Optimizer - Password Reset" -ForegroundColor Cyan
Write-Host "===================================" -ForegroundColor Cyan
Write-Host ""
Write-Host "Install directory: $InstallDir"
# =============================================================================
# Verify database exists
# =============================================================================
$dbPath = Join-Path $InstallDir "data\$DbFileName"
if (-not (Test-Path $dbPath)) {
Write-Host "ERROR: Database not found at $dbPath" -ForegroundColor Red
Write-Host "Use -InstallDir to specify the correct installation directory."
exit 1
}
Write-Host "Database found: $dbPath" -ForegroundColor Green
# =============================================================================
# Check for sqlite3
# =============================================================================
$sqlite3 = Get-Command sqlite3 -ErrorAction SilentlyContinue
if (-not $sqlite3) {
# Check in the install directory (bundled with WiX installer)
$bundled = Join-Path $InstallDir "sqlite3.exe"
if (Test-Path $bundled) {
$sqlite3Path = $bundled
} else {
Write-Host ""
Write-Host "ERROR: sqlite3 not found in PATH or install directory." -ForegroundColor Red
Write-Host ""
Write-Host "Install it with: winget install SQLite.SQLite" -ForegroundColor Yellow
Write-Host "Then restart this terminal and try again."
exit 1
}
} else {
$sqlite3Path = $sqlite3.Source
}
Write-Host "sqlite3: $sqlite3Path" -ForegroundColor Green
Write-Host ""
# =============================================================================
# Confirm with user
# =============================================================================
if (-not $Force) {
Write-Host "This will:" -ForegroundColor Yellow
Write-Host " 1. Stop the NetworkOptimizer service"
Write-Host " 2. Clear the admin password from the database"
Write-Host " 3. Restart the service"
Write-Host " 4. Display the new auto-generated temporary password"
Write-Host ""
$confirm = Read-Host "Continue? (y/N)"
if ($confirm -notmatch '^[Yy]') {
Write-Host "Cancelled."
exit 0
}
Write-Host ""
}
# =============================================================================
# Stop the service
# =============================================================================
$svcObj = Get-Service $ServiceName -ErrorAction SilentlyContinue
if (-not $svcObj) {
Write-Host "ERROR: Service '$ServiceName' not found." -ForegroundColor Red
Write-Host "Is Network Optimizer installed as a Windows service?"
exit 1
}
if ($svcObj.Status -eq 'Running') {
Write-Host "Stopping service..." -NoNewline
Stop-Service $ServiceName -Force
$svcObj.WaitForStatus('Stopped', [TimeSpan]::FromSeconds(30))
Write-Host " done." -ForegroundColor Green
} else {
Write-Host "Service is already stopped." -ForegroundColor Yellow
}
# =============================================================================
# Clear admin password
# =============================================================================
Write-Host "Clearing admin password..." -NoNewline
& $sqlite3Path $dbPath "UPDATE AdminSettings SET Password = NULL, Enabled = 0;"
if ($LASTEXITCODE -ne 0) {
Write-Host " FAILED." -ForegroundColor Red
Write-Host "sqlite3 returned exit code $LASTEXITCODE"
exit 1
}
Write-Host " done." -ForegroundColor Green
# =============================================================================
# Start the service
# =============================================================================
Write-Host "Starting service..." -NoNewline
Start-Service $ServiceName
Write-Host " done." -ForegroundColor Green
# =============================================================================
# Wait for health endpoint
# =============================================================================
Write-Host "Waiting for application to start..." -NoNewline
$deadline = (Get-Date).AddSeconds($TimeoutSeconds)
$healthy = $false
while ((Get-Date) -lt $deadline) {
try {
$resp = Invoke-WebRequest -Uri $HealthUrl -UseBasicParsing -TimeoutSec 3 -ErrorAction SilentlyContinue
if ($resp.StatusCode -eq 200) {
$healthy = $true
break
}
} catch {
# Not ready yet
}
Start-Sleep -Seconds 2
Write-Host "." -NoNewline
}
if ($healthy) {
Write-Host " ready!" -ForegroundColor Green
} else {
Write-Host " timed out." -ForegroundColor Yellow
Write-Host "The service may still be starting. Check the logs manually."
}
# =============================================================================
# Extract password from log
# =============================================================================
Write-Host ""
$logDir = Join-Path $InstallDir "logs"
$today = (Get-Date).ToString("yyyyMMdd")
$logFile = Join-Path $logDir "networkoptimizer-$today.log"
$password = $null
if (Test-Path $logFile) {
# Find the last occurrence of the password line after AUTO-GENERATED banner
$logContent = Get-Content $logFile -Tail 100
for ($i = $logContent.Count - 1; $i -ge 0; $i--) {
if ($logContent[$i] -match 'Password:\s+(\S+)') {
$password = $Matches[1]
break
}
}
}
if ($password) {
Write-Host "===================================" -ForegroundColor Green
Write-Host " Password reset successful!" -ForegroundColor Green
Write-Host "===================================" -ForegroundColor Green
Write-Host ""
Write-Host " Temporary password: $password" -ForegroundColor Cyan
Write-Host ""
Write-Host " Log in to Network Optimizer with this password,"
Write-Host " then go to Settings to set a permanent one."
Write-Host ""
} else {
Write-Host "Password reset completed, but could not extract the new password from logs." -ForegroundColor Yellow
Write-Host ""
Write-Host "Check the log file manually:"
Write-Host " $logFile" -ForegroundColor Cyan
Write-Host ""
Write-Host "Or look for the password in the Windows Event Viewer under Application logs."
Write-Host "Search for 'AUTO-GENERATED' in the log output."
Write-Host ""
}
================================================
FILE: scripts/reset-password.sh
================================================
#!/usr/bin/env bash
# Network Optimizer - Password Reset Script
# https://github.com/Ozark-Connect/NetworkOptimizer
#
# Resets the admin password by clearing it from the database and restarting
# the service. Works with Docker, macOS native, and Linux native deployments.
#
# Usage:
# curl -fsSL https://raw.githubusercontent.com/Ozark-Connect/NetworkOptimizer/main/scripts/reset-password.sh | bash
# bash reset-password.sh [--docker|--macos|--linux] [--container NAME] [--data-dir PATH] [--force]
set -euo pipefail
# =============================================================================
# Colors and Formatting (matches proxmox/install.sh)
# =============================================================================
if [[ -t 1 ]]; then
readonly RD='\033[0;31m'
readonly GN='\033[0;32m'
readonly YW='\033[0;33m'
readonly BL='\033[0;34m'
readonly CY='\033[0;36m'
readonly BLD='\033[1m'
readonly CL='\033[0m'
else
readonly RD='' GN='' YW='' BL='' CY='' BLD='' CL=''
fi
msg_info() { echo -e "${BL}[INFO]${CL} $1"; }
msg_ok() { echo -e "${GN}[OK]${CL} $1"; }
msg_warn() { echo -e "${YW}[WARN]${CL} $1"; }
msg_error() { echo -e "${RD}[ERROR]${CL} $1"; }
header() {
echo ""
echo -e "${BLD}${CY}Network Optimizer - Password Reset${CL}"
echo -e "${BLD}${CY}===================================${CL}"
echo ""
}
# =============================================================================
# Defaults
# =============================================================================
MODE="" # docker, macos, linux (auto-detected if empty)
CONTAINER="network-optimizer"
DATA_DIR=""
FORCE=false
TIMEOUT=60
HEALTH_URL="http://localhost:8042/api/health"
# =============================================================================
# Parse Arguments
# =============================================================================
while [[ $# -gt 0 ]]; do
case "$1" in
--docker) MODE="docker"; shift ;;
--macos) MODE="macos"; shift ;;
--linux) MODE="linux"; shift ;;
--container) CONTAINER="$2"; shift 2 ;;
--data-dir) DATA_DIR="$2"; shift 2 ;;
--force) FORCE=true; shift ;;
--timeout) TIMEOUT="$2"; shift 2 ;;
-h|--help)
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
echo " --docker Force Docker mode"
echo " --macos Force macOS native mode"
echo " --linux Force Linux native mode"
echo " --container NAME Docker container name (default: network-optimizer)"
echo " --data-dir PATH Override database directory path"
echo " --force Skip confirmation prompt"
echo " --timeout SECS Health check timeout (default: 60)"
echo " -h, --help Show this help"
exit 0
;;
*)
msg_error "Unknown option: $1"
echo "Use --help for usage information."
exit 1
;;
esac
done
# Auto-force when stdin is not a terminal (e.g., curl | bash)
if [[ ! -t 0 ]]; then
FORCE=true
fi
# =============================================================================
# Auto-detect Mode
# =============================================================================
detect_mode() {
if [[ -n "$MODE" ]]; then
return
fi
# Check for Docker container
if command -v docker &>/dev/null; then
if docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER}$"; then
MODE="docker"
msg_info "Detected Docker container: $CONTAINER"
return
fi
fi
# Check for macOS native install
if [[ "$(uname)" == "Darwin" ]]; then
if [[ -d "$HOME/network-optimizer" ]] || \
[[ -f "$HOME/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist" ]]; then
MODE="macos"
msg_info "Detected macOS native installation"
return
fi
fi
# Check for Linux native install
if [[ "$(uname)" == "Linux" ]]; then
if systemctl list-unit-files 2>/dev/null | grep -qi "networkoptimizer\|network-optimizer"; then
MODE="linux"
msg_info "Detected Linux native installation (systemd)"
return
fi
if pgrep -f "NetworkOptimizer.Web" &>/dev/null; then
MODE="linux"
msg_info "Detected running NetworkOptimizer process"
return
fi
if [[ -d "/opt/network-optimizer" ]]; then
MODE="linux"
msg_info "Detected Linux installation at /opt/network-optimizer"
return
fi
fi
msg_error "Could not auto-detect installation type."
echo ""
echo "Please specify one of:"
echo " --docker Docker container"
echo " --macos macOS native install"
echo " --linux Linux native install"
exit 1
}
# =============================================================================
# Check for sqlite3
# =============================================================================
check_sqlite3() {
if command -v sqlite3 &>/dev/null; then
return 0
fi
msg_error "sqlite3 is not installed."
echo ""
if [[ "$(uname)" == "Darwin" ]]; then
echo "sqlite3 should be included with macOS. Try:"
echo " brew install sqlite3"
elif command -v apt-get &>/dev/null; then
echo "Install with: sudo apt-get install -y sqlite3"
elif command -v dnf &>/dev/null; then
echo "Install with: sudo dnf install -y sqlite"
elif command -v pacman &>/dev/null; then
echo "Install with: sudo pacman -S sqlite"
else
echo "Install sqlite3 using your package manager."
fi
exit 1
}
# =============================================================================
# Wait for health endpoint
# =============================================================================
wait_for_health() {
msg_info "Waiting for application to start..."
local deadline=$((SECONDS + TIMEOUT))
while [[ $SECONDS -lt $deadline ]]; do
if curl -sf "$HEALTH_URL" -o /dev/null --max-time 3 2>/dev/null; then
msg_ok "Application is ready"
return 0
fi
sleep 2
done
msg_warn "Health check timed out after ${TIMEOUT}s. The service may still be starting."
return 1
}
# =============================================================================
# Confirm with user
# =============================================================================
confirm() {
if [[ "$FORCE" == true ]]; then
return 0
fi
echo "This will:"
echo " 1. Stop the Network Optimizer service"
echo " 2. Clear the admin password from the database"
echo " 3. Restart the service"
echo " 4. Display the new auto-generated temporary password"
echo ""
read -rp "Continue? (y/N) " answer
if [[ ! "$answer" =~ ^[Yy] ]]; then
echo "Cancelled."
exit 0
fi
echo ""
}
# =============================================================================
# Docker Mode
# =============================================================================
reset_docker() {
msg_info "Mode: Docker (container: $CONTAINER)"
echo ""
# Check container exists
if ! docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER}$"; then
msg_error "Container '$CONTAINER' not found."
echo "Use --container NAME to specify a different container name."
exit 1
fi
# If container is stopped, start it temporarily for docker exec
if ! docker ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER}$"; then
msg_warn "Container is stopped. Starting it temporarily..."
docker start "$CONTAINER" >/dev/null
sleep 3
fi
confirm
# Clear password via docker exec
msg_info "Clearing admin password..."
docker exec "$CONTAINER" sqlite3 /app/data/network_optimizer.db \
"UPDATE AdminSettings SET Password = NULL, Enabled = 0;"
msg_ok "Password cleared"
# Restart container
msg_info "Restarting container..."
docker restart "$CONTAINER" >/dev/null
msg_ok "Container restarted"
# Wait for health
wait_for_health || true
# Extract password from docker logs
echo ""
local password
password=$(docker logs --since 2m "$CONTAINER" 2>&1 \
| grep "Password:" | tail -1 \
| sed -E 's/.*Password:[[:space:]]+//' | tr -d '[:space:]')
show_result "$password"
}
# =============================================================================
# macOS Native Mode
# =============================================================================
reset_macos() {
msg_info "Mode: macOS native"
echo ""
local plist="$HOME/Library/LaunchAgents/net.ozarkconnect.networkoptimizer.plist"
local db_dir="${DATA_DIR:-$HOME/Library/Application Support/NetworkOptimizer}"
local db_path="$db_dir/network_optimizer.db"
# Detect install directory from plist WorkingDirectory or running process
local install_dir=""
if [[ -f "$plist" ]]; then
install_dir=$(/usr/libexec/PlistBuddy -c "Print :WorkingDirectory" "$plist" 2>/dev/null || true)
fi
if [[ -z "$install_dir" ]]; then
install_dir=$(ps aux | grep 'NetworkOptimizer.Web' | grep -v grep | awk '{for(i=11;i<=NF;i++) printf "%s ",$i}' | sed 's|/NetworkOptimizer.Web.*||' | tr -d '[:space:]')
fi
if [[ -z "$install_dir" ]]; then
install_dir="$HOME/network-optimizer"
fi
local log_file="$install_dir/logs/stdout.log"
msg_info "Install directory: $install_dir"
# Verify database
if [[ ! -f "$db_path" ]]; then
msg_error "Database not found at: $db_path"
echo "Use --data-dir to specify the correct data directory."
exit 1
fi
msg_ok "Database found: $db_path"
check_sqlite3
confirm
# Record log size before restart so we only read new output
local log_size_before=0
if [[ -f "$log_file" ]]; then
log_size_before=$(wc -c < "$log_file")
fi
# Stop service
if [[ -f "$plist" ]]; then
msg_info "Stopping service..."
launchctl unload "$plist" 2>/dev/null || true
sleep 2
msg_ok "Service stopped"
else
msg_warn "LaunchAgent plist not found at $plist"
msg_warn "You may need to stop the service manually."
fi
# Clear password
msg_info "Clearing admin password..."
sqlite3 "$db_path" "UPDATE AdminSettings SET Password = NULL, Enabled = 0;"
msg_ok "Password cleared"
# Start service
if [[ -f "$plist" ]]; then
msg_info "Starting service..."
launchctl load "$plist"
msg_ok "Service started"
else
msg_warn "Cannot auto-start - start the service manually."
fi
# Wait for health
wait_for_health || true
# Poll for new password in log (up to 15s)
echo ""
local password=""
if [[ -f "$log_file" ]]; then
local deadline=$((SECONDS + 15))
while [[ $SECONDS -lt $deadline ]]; do
local new_bytes=$(( $(wc -c < "$log_file") - log_size_before ))
if [[ $new_bytes -gt 0 ]]; then
password=$(tail -c "$new_bytes" "$log_file" \
| grep "Password:" | tail -1 \
| sed -E 's/.*Password:[[:space:]]+//' | tr -d '[:space:]')
if [[ -n "$password" ]]; then break; fi
fi
sleep 1
done
fi
show_result "$password"
}
# =============================================================================
# Linux Native Mode
# =============================================================================
reset_linux() {
msg_info "Mode: Linux native"
echo ""
# Find the systemd service name
local service_name=""
for name in networkoptimizer NetworkOptimizer network-optimizer; do
if systemctl list-unit-files "${name}.service" &>/dev/null 2>&1; then
if systemctl list-unit-files "${name}.service" 2>/dev/null | grep -q "$name"; then
service_name="$name"
break
fi
fi
done
# Find database
local db_path=""
if [[ -n "$DATA_DIR" ]]; then
db_path="$DATA_DIR/network_optimizer.db"
else
for candidate in \
"/opt/network-optimizer/data/network_optimizer.db" \
"$HOME/.local/share/NetworkOptimizer/network_optimizer.db" \
"/var/lib/network-optimizer/network_optimizer.db"; do
if [[ -f "$candidate" ]]; then
db_path="$candidate"
break
fi
done
fi
if [[ -z "$db_path" ]] || [[ ! -f "$db_path" ]]; then
msg_error "Database not found."
echo "Searched:"
echo " /opt/network-optimizer/data/network_optimizer.db"
echo " ~/.local/share/NetworkOptimizer/network_optimizer.db"
echo " /var/lib/network-optimizer/network_optimizer.db"
echo ""
echo "Use --data-dir to specify the correct data directory."
exit 1
fi
msg_ok "Database found: $db_path"
# Detect install directory from systemd or running process
local install_dir=""
if [[ -n "$service_name" ]]; then
install_dir=$(systemctl show "$service_name" -p WorkingDirectory --value 2>/dev/null || true)
fi
if [[ -z "$install_dir" ]]; then
install_dir=$(readlink -f /proc/$(pgrep -f "NetworkOptimizer.Web" | head -1)/cwd 2>/dev/null || true)
fi
if [[ -z "$install_dir" ]]; then
install_dir="/opt/network-optimizer"
fi
msg_info "Install directory: $install_dir"
check_sqlite3
confirm
# Record log size before restart so we only read new output
local log_file="$install_dir/logs/stdout.log"
local log_size_before=0
if [[ -f "$log_file" ]]; then
log_size_before=$(wc -c < "$log_file")
fi
# Stop service
if [[ -n "$service_name" ]]; then
msg_info "Stopping service ($service_name)..."
sudo systemctl stop "$service_name"
msg_ok "Service stopped"
else
msg_warn "No systemd service found. Attempting to kill the process..."
if pkill -f "NetworkOptimizer.Web" 2>/dev/null; then
msg_ok "Process stopped"
else
msg_warn "Could not stop process. It may not be running."
fi
fi
# Clear password
msg_info "Clearing admin password..."
sqlite3 "$db_path" "UPDATE AdminSettings SET Password = NULL, Enabled = 0;"
msg_ok "Password cleared"
# Start service
if [[ -n "$service_name" ]]; then
msg_info "Starting service ($service_name)..."
sudo systemctl start "$service_name"
msg_ok "Service started"
else
msg_warn "No systemd service found. Start the application manually."
msg_warn "The new password will appear in the application logs."
echo ""
return
fi
# Wait for health
wait_for_health || true
# Poll for new password in journalctl or log file (up to 15s)
echo ""
local password=""
local deadline=$((SECONDS + 15))
while [[ $SECONDS -lt $deadline ]] && [[ -z "$password" ]]; do
if [[ -n "$service_name" ]]; then
password=$(journalctl -u "$service_name" --since "2 minutes ago" --no-pager 2>/dev/null \
| grep "Password:" | tail -1 \
| sed -E 's/.*Password:[[:space:]]+//' | tr -d '[:space:]')
fi
# Fallback: check new log output only
if [[ -z "$password" ]] && [[ -f "$log_file" ]]; then
local new_bytes=$(( $(wc -c < "$log_file") - log_size_before ))
if [[ $new_bytes -gt 0 ]]; then
password=$(tail -c "$new_bytes" "$log_file" \
| grep "Password:" | tail -1 \
| sed -E 's/.*Password:[[:space:]]+//' | tr -d '[:space:]')
fi
fi
if [[ -z "$password" ]]; then sleep 1; fi
done
show_result "$password"
}
# =============================================================================
# Display Result
# =============================================================================
show_result() {
local password="$1"
if [[ -n "$password" ]]; then
echo -e "${GN}===================================${CL}"
echo -e "${GN} Password reset successful!${CL}"
echo -e "${GN}===================================${CL}"
echo ""
echo -e " Temporary password: ${CY}${BLD}${password}${CL}"
echo ""
echo " Log in to Network Optimizer with this password,"
echo " then go to Settings to set a permanent one."
echo ""
else
msg_warn "Password reset completed, but could not extract the new password from logs."
echo ""
echo "Check the logs manually:"
if [[ "$MODE" == "docker" ]]; then
echo " docker logs $CONTAINER 2>&1 | grep -A5 'AUTO-GENERATED'"
elif [[ "$MODE" == "macos" ]]; then
echo " grep 'Password:' ~/network-optimizer/logs/stdout.log | tail -1"
else
echo " journalctl -u networkoptimizer --since '5 minutes ago' | grep 'Password:'"
fi
echo ""
echo "Look for the line containing 'AUTO-GENERATED ADMIN PASSWORD'."
echo ""
fi
}
# =============================================================================
# Main
# =============================================================================
header
detect_mode
echo ""
case "$MODE" in
docker) reset_docker ;;
macos) reset_macos ;;
linux) reset_linux ;;
*)
msg_error "Unknown mode: $MODE"
exit 1
;;
esac
================================================
FILE: scripts/sync-perf-tweaks.ps1
================================================
# Syncs performance tweak scripts from the unifi-perf-tweaks source repo
# into the NetworkOptimizer embedded resources directory.
#
# Run before building to pick up the latest scripts:
# pwsh scripts/sync-perf-tweaks.ps1
#
# Source repo: https://github.com/tvancott42/unifi-perf-tweaks (private)
param(
[string]$SourceRepo = "$env:USERPROFILE\OneDrive\PersonalProjects\OpenSource\unifi-perf-tweaks"
)
$DestDir = Join-Path $PSScriptRoot "..\src\NetworkOptimizer.Web\Resources\PerfTweaks"
if (-not (Test-Path $SourceRepo)) {
Write-Error "Source repo not found at: $SourceRepo"
exit 1
}
if (-not (Test-Path $DestDir)) {
New-Item -ItemType Directory -Path $DestDir -Force | Out-Null
}
$scripts = @(
"scripts/06-mongodb-ssd-offload.sh",
"scripts/07-mongodb-ssd-backup.sh",
"scripts/10-journald-volatile.sh",
"scripts/15-fan-control-tuning.sh",
"scripts/20-sfp-sgmiiplus.sh"
)
$binaries = @(
"modules/force-uniphy1-sgmiiplus/force_uniphy1_sgmiiplus.ko"
)
foreach ($file in $scripts + $binaries) {
$src = Join-Path $SourceRepo $file
$dest = Join-Path $DestDir (Split-Path $file -Leaf)
if (Test-Path $src) {
Copy-Item $src $dest -Force
Write-Host " Copied: $(Split-Path $file -Leaf)"
} else {
Write-Warning " Missing: $file"
}
}
Write-Host "Sync complete."
================================================
FILE: scripts/test.sh
================================================
#!/bin/bash
# Run all tests
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
cd "$PROJECT_ROOT"
echo "Running all tests..."
dotnet test --no-restore
echo ""
echo "All tests passed!"
================================================
FILE: scripts/watch.sh
================================================
#!/bin/bash
# Run the web app with hot reload
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
cd "$PROJECT_ROOT/src/NetworkOptimizer.Web"
echo "Starting with hot reload..."
echo "Access at http://localhost:5000"
echo ""
dotnet watch run
================================================
FILE: src/NetworkOptimizer.Agents/.gitignore
================================================
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
# User-specific files
*.suo
*.user
*.userosscache
*.sln.docstates
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
[Aa][Rr][Mm]/
[Aa][Rr][Mm]64/
bld/
[Bb]in/
[Oo]bj/
[Ll]og/
# Visual Studio cache/options directory
.vs/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# Agent database files
*.db
*.db-shm
*.db-wal
# Sensitive configuration files
**/appsettings.*.json
!**/appsettings.json
credentials.json
secrets.json
# SSH keys
*.pem
*.ppk
id_rsa*
id_ed25519*
# Log files
*.log
logs/
# OS files
.DS_Store
Thumbs.db
================================================
FILE: src/NetworkOptimizer.Agents/AgentDeployer.cs
================================================
using System.Diagnostics;
using System.Text;
using Microsoft.Extensions.Logging;
using NetworkOptimizer.Agents.Models;
using Renci.SshNet;
using Renci.SshNet.Common;
namespace NetworkOptimizer.Agents;
///
/// Deploys monitoring agents to remote systems via SSH
///
public class AgentDeployer
{
private readonly ILogger _logger;
private readonly ScriptRenderer _scriptRenderer;
public AgentDeployer(ILogger logger, ScriptRenderer scriptRenderer)
{
_logger = logger;
_scriptRenderer = scriptRenderer;
}
///
/// Deploys an agent to a remote system
///
///
/// Some step lambdas are marked async for future SSH command execution but currently
/// perform synchronous validation. Suppressing CS1998 until full async implementation.
///
#pragma warning disable CS1998 // Async lambdas without await - will be async when SSH commands are added
public async Task DeployAgentAsync(AgentConfiguration config, CancellationToken cancellationToken = default)
{
var result = new DeploymentResult
{
AgentId = config.AgentId,
DeviceName = config.DeviceName,
AgentType = config.AgentType
};
try
{
_logger.LogInformation("Starting deployment of {AgentType} agent to {Device} ({Host})",
config.AgentType, config.DeviceName, config.SshCredentials.Host);
// Step 1: Validate credentials
await AddStepAsync(result, "Validate Credentials", async () =>
{
if (!config.SshCredentials.IsValid())
{
throw new InvalidOperationException("Invalid SSH credentials");
}
return "Credentials validated";
});
// Step 2: Test SSH connection
await AddStepAsync(result, "Test SSH Connection", async () =>
{
await TestConnectionAsync(config.SshCredentials, cancellationToken);
return $"Successfully connected to {config.SshCredentials.Host}";
});
// Step 3: Render templates
Dictionary renderedScripts = new();
await AddStepAsync(result, "Render Templates", async () =>
{
var templates = _scriptRenderer.GetTemplatesForAgent(config.AgentType);
foreach (var template in templates)
{
var rendered = await _scriptRenderer.RenderTemplateAsync(template, config);
var scriptName = template.Replace(".template", "");
renderedScripts[scriptName] = rendered;
}
return $"Rendered {renderedScripts.Count} templates";
});
// Step 4: Deploy scripts based on agent type
if (config.AgentType == AgentType.UDM || config.AgentType == AgentType.UCG)
{
await DeployUniFiAgentAsync(config, renderedScripts, result, cancellationToken);
}
else if (config.AgentType == AgentType.Linux)
{
await DeployLinuxAgentAsync(config, renderedScripts, result, cancellationToken);
}
// Step 5: Verify deployment
await AddStepAsync(result, "Verify Deployment", async () =>
{
result.Verification = await VerifyDeploymentAsync(config, cancellationToken);
if (!result.Verification.Passed)
{
throw new InvalidOperationException("Deployment verification failed: " +
string.Join(", ", result.Verification.Messages));
}
return "Deployment verified successfully";
});
result.Success = true;
result.Message = $"Successfully deployed {config.AgentType} agent to {config.DeviceName}";
_logger.LogInformation("Successfully deployed agent {AgentId} to {Device}",
config.AgentId, config.DeviceName);
}
catch (Exception ex)
{
result.Success = false;
result.Message = $"Deployment failed: {ex.Message}";
_logger.LogError(ex, "Failed to deploy agent {AgentId} to {Device}",
config.AgentId, config.DeviceName);
}
return result;
}
#pragma warning restore CS1998
///
/// Tests SSH connection to the remote host
///
public async Task TestConnectionAsync(SshCredentials credentials, CancellationToken cancellationToken = default)
{
using var client = CreateSshClient(credentials);
try
{
await Task.Run(() =>
{
client.Connect();
_logger.LogDebug("SSH connection test successful to {Host}", credentials.Host);
}, cancellationToken);
}
catch (SshAuthenticationException ex)
{
_logger.LogError("SSH authentication failed for {Host}: {Error}", credentials.Host, ex.Message);
throw new InvalidOperationException($"SSH authentication failed: {ex.Message}", ex);
}
catch (SshConnectionException ex)
{
_logger.LogError("SSH connection failed for {Host}: {Error}", credentials.Host, ex.Message);
throw new InvalidOperationException($"SSH connection failed: {ex.Message}", ex);
}
finally
{
if (client.IsConnected)
{
client.Disconnect();
}
}
}
///
/// Deploys agent to UniFi device (UDM/UCG)
///
private async Task DeployUniFiAgentAsync(
AgentConfiguration config,
Dictionary scripts,
DeploymentResult result,
CancellationToken cancellationToken)
{
using var client = CreateSshClient(config.SshCredentials);
using var sftp = CreateSftpClient(config.SshCredentials);
await Task.Run(() =>
{
client.Connect();
sftp.Connect();
try
{
// Create directories
ExecuteCommand(client, "mkdir -p /data/on_boot.d");
ExecuteCommand(client, "mkdir -p /data/network-optimizer");
// Deploy boot script
if (scripts.TryGetValue("udm-agent-boot.sh", out var bootScript))
{
var bootPath = "/data/on_boot.d/99-network-optimizer.sh";
UploadScript(sftp, bootScript, bootPath);
ExecuteCommand(client, $"chmod +x \"{bootPath}\"");
result.DeployedFiles.Add(bootPath);
_logger.LogDebug("Deployed boot script to {Path}", bootPath);
}
// Deploy metrics collector
if (scripts.TryGetValue("udm-metrics-collector.sh", out var metricsScript))
{
var metricsPath = "/data/network-optimizer/metrics-collector.sh";
UploadScript(sftp, metricsScript, metricsPath);
ExecuteCommand(client, $"chmod +x \"{metricsPath}\"");
result.DeployedFiles.Add(metricsPath);
_logger.LogDebug("Deployed metrics collector to {Path}", metricsPath);
}
// Run installation script if present
if (scripts.TryGetValue("install-udm.sh", out var installScript))
{
var installPath = "/tmp/install-network-optimizer.sh";
UploadScript(sftp, installScript, installPath);
ExecuteCommand(client, $"chmod +x \"{installPath}\"");
var installOutput = ExecuteCommand(client, $"sh \"{installPath}\"");
_logger.LogDebug("Installation output: {Output}", installOutput);
ExecuteCommand(client, $"rm \"{installPath}\"");
}
_logger.LogInformation("Successfully deployed UniFi agent scripts");
}
finally
{
client.Disconnect();
sftp.Disconnect();
}
}, cancellationToken);
}
///
/// Deploys agent to Linux system
///
private async Task DeployLinuxAgentAsync(
AgentConfiguration config,
Dictionary scripts,
DeploymentResult result,
CancellationToken cancellationToken)
{
using var client = CreateSshClient(config.SshCredentials);
using var sftp = CreateSftpClient(config.SshCredentials);
await Task.Run(() =>
{
client.Connect();
sftp.Connect();
try
{
// Create directories
ExecuteCommand(client, "mkdir -p /opt/network-optimizer");
ExecuteCommand(client, "mkdir -p /var/log/network-optimizer");
// Deploy agent script
if (scripts.TryGetValue("linux-agent.sh", out var agentScript))
{
var agentPath = "/opt/network-optimizer/agent.sh";
UploadScript(sftp, agentScript, agentPath);
ExecuteCommand(client, $"chmod +x \"{agentPath}\"");
result.DeployedFiles.Add(agentPath);
_logger.LogDebug("Deployed agent script to {Path}", agentPath);
}
// Deploy systemd service
if (scripts.TryGetValue("linux-agent.service", out var serviceScript))
{
var servicePath = "/etc/systemd/system/network-optimizer-agent.service";
UploadScript(sftp, serviceScript, servicePath);
result.DeployedFiles.Add(servicePath);
_logger.LogDebug("Deployed systemd service to {Path}", servicePath);
// Reload systemd and enable service
ExecuteCommand(client, "systemctl daemon-reload");
ExecuteCommand(client, "systemctl enable network-optimizer-agent.service");
ExecuteCommand(client, "systemctl restart network-optimizer-agent.service");
}
// Run installation script if present
if (scripts.TryGetValue("install-linux.sh", out var installScript))
{
var installPath = "/tmp/install-network-optimizer.sh";
UploadScript(sftp, installScript, installPath);
ExecuteCommand(client, $"chmod +x \"{installPath}\"");
var installOutput = ExecuteCommand(client, $"bash \"{installPath}\"");
_logger.LogDebug("Installation output: {Output}", installOutput);
ExecuteCommand(client, $"rm \"{installPath}\"");
}
_logger.LogInformation("Successfully deployed Linux agent");
}
finally
{
client.Disconnect();
sftp.Disconnect();
}
}, cancellationToken);
}
///
/// Verifies that the deployment was successful
///
private async Task VerifyDeploymentAsync(
AgentConfiguration config,
CancellationToken cancellationToken)
{
var verification = new VerificationResult();
using var client = CreateSshClient(config.SshCredentials);
await Task.Run(() =>
{
client.Connect();
try
{
if (config.AgentType == AgentType.UDM || config.AgentType == AgentType.UCG)
{
// Verify UniFi agent files
var bootScriptExists = FileExists(client, "/data/on_boot.d/99-network-optimizer.sh");
var metricsScriptExists = FileExists(client, "/data/network-optimizer/metrics-collector.sh");
if (bootScriptExists)
verification.VerifiedFiles.Add("/data/on_boot.d/99-network-optimizer.sh");
if (metricsScriptExists)
verification.VerifiedFiles.Add("/data/network-optimizer/metrics-collector.sh");
// Check if metrics collector is running
var processCheck = ExecuteCommand(client, "pgrep -f metrics-collector.sh");
verification.AgentRunning = !string.IsNullOrWhiteSpace(processCheck);
verification.Passed = bootScriptExists && metricsScriptExists;
if (!verification.Passed)
{
if (!bootScriptExists)
verification.Messages.Add("Boot script not found");
if (!metricsScriptExists)
verification.Messages.Add("Metrics collector script not found");
}
}
else if (config.AgentType == AgentType.Linux)
{
// Verify Linux agent files
var agentScriptExists = FileExists(client, "/opt/network-optimizer/agent.sh");
var serviceExists = FileExists(client, "/etc/systemd/system/network-optimizer-agent.service");
if (agentScriptExists)
verification.VerifiedFiles.Add("/opt/network-optimizer/agent.sh");
if (serviceExists)
verification.VerifiedFiles.Add("/etc/systemd/system/network-optimizer-agent.service");
// Check service status
var serviceStatus = ExecuteCommand(client, "systemctl is-active network-optimizer-agent.service");
verification.ServiceStatus = serviceStatus.Trim();
verification.AgentRunning = verification.ServiceStatus == "active";
verification.Passed = agentScriptExists && serviceExists && verification.AgentRunning;
if (!verification.Passed)
{
if (!agentScriptExists)
verification.Messages.Add("Agent script not found");
if (!serviceExists)
verification.Messages.Add("Systemd service not found");
if (!verification.AgentRunning)
verification.Messages.Add($"Service not running (status: {verification.ServiceStatus})");
}
}
}
finally
{
client.Disconnect();
}
}, cancellationToken);
return verification;
}
///
/// Creates an SSH client with the given credentials
///
private SshClient CreateSshClient(SshCredentials credentials)
{
var authMethods = new List();
if (credentials.GetAuthenticationType() == AuthenticationType.PrivateKey)
{
var keyFile = credentials.PrivateKeyPassphrase != null
? new PrivateKeyFile(credentials.PrivateKeyPath!, credentials.PrivateKeyPassphrase)
: new PrivateKeyFile(credentials.PrivateKeyPath!);
authMethods.Add(new PrivateKeyAuthenticationMethod(credentials.Username, keyFile));
}
else if (credentials.GetAuthenticationType() == AuthenticationType.Password)
{
authMethods.Add(new PasswordAuthenticationMethod(credentials.Username, credentials.Password!));
}
var connectionInfo = new ConnectionInfo(
credentials.Host,
credentials.Port,
credentials.Username,
authMethods.ToArray())
{
Timeout = TimeSpan.FromSeconds(credentials.TimeoutSeconds)
};
return new SshClient(connectionInfo);
}
///
/// Creates an SFTP client with the given credentials
///
private SftpClient CreateSftpClient(SshCredentials credentials)
{
var authMethods = new List();
if (credentials.GetAuthenticationType() == AuthenticationType.PrivateKey)
{
var keyFile = credentials.PrivateKeyPassphrase != null
? new PrivateKeyFile(credentials.PrivateKeyPath!, credentials.PrivateKeyPassphrase)
: new PrivateKeyFile(credentials.PrivateKeyPath!);
authMethods.Add(new PrivateKeyAuthenticationMethod(credentials.Username, keyFile));
}
else if (credentials.GetAuthenticationType() == AuthenticationType.Password)
{
authMethods.Add(new PasswordAuthenticationMethod(credentials.Username, credentials.Password!));
}
var connectionInfo = new ConnectionInfo(
credentials.Host,
credentials.Port,
credentials.Username,
authMethods.ToArray())
{
Timeout = TimeSpan.FromSeconds(credentials.TimeoutSeconds)
};
return new SftpClient(connectionInfo);
}
///
/// Executes a command on the remote system
///
private string ExecuteCommand(SshClient client, string command)
{
using var cmd = client.CreateCommand(command);
var result = cmd.Execute();
if (cmd.ExitStatus != 0 && !string.IsNullOrEmpty(cmd.Error))
{
_logger.LogWarning("Command '{Command}' returned non-zero exit code {ExitCode}: {Error}",
command, cmd.ExitStatus, cmd.Error);
}
return result;
}
///
/// Uploads a script to the remote system
///
private void UploadScript(SftpClient sftp, string content, string remotePath)
{
using var stream = new MemoryStream(Encoding.UTF8.GetBytes(content));
sftp.UploadFile(stream, remotePath, true);
}
///
/// Checks if a file exists on the remote system
///
private bool FileExists(SshClient client, string path)
{
var result = ExecuteCommand(client, $"test -f \"{path}\" && echo 'exists' || echo 'not found'");
return result.Trim() == "exists";
}
///
/// Helper to add a deployment step with error handling
///
private async Task AddStepAsync(DeploymentResult result, string stepName, Func> action)
{
var step = new DeploymentStep { Name = stepName };
var sw = Stopwatch.StartNew();
try
{
step.Message = await action();
step.Success = true;
_logger.LogDebug("Step '{Step}' completed: {Message}", stepName, step.Message);
}
catch (Exception ex)
{
step.Success = false;
step.Message = ex.Message;
_logger.LogError(ex, "Step '{Step}' failed", stepName);
throw;
}
finally
{
sw.Stop();
step.DurationMs = sw.ElapsedMilliseconds;
result.Steps.Add(step);
}
}
}
================================================
FILE: src/NetworkOptimizer.Agents/AgentHealthMonitor.cs
================================================
using System.Data;
using Microsoft.Data.Sqlite;
using Microsoft.Extensions.Logging;
using NetworkOptimizer.Agents.Models;
namespace NetworkOptimizer.Agents;
///
/// Monitors agent health and tracks heartbeats
///
public class AgentHealthMonitor : IDisposable
{
private readonly ILogger _logger;
private readonly string _connectionString;
private readonly TimeSpan _offlineThreshold;
public AgentHealthMonitor(
ILogger logger,
string databasePath,
TimeSpan? offlineThreshold = null)
{
_logger = logger;
_connectionString = $"Data Source={databasePath}";
_offlineThreshold = offlineThreshold ?? TimeSpan.FromMinutes(5);
InitializeDatabase();
}
///
/// Records a heartbeat from an agent
///
public async Task RecordHeartbeatAsync(string agentId, string deviceName, AgentType agentType, Dictionary? metadata = null)
{
try
{
using var connection = new SqliteConnection(_connectionString);
await connection.OpenAsync();
var metadataJson = metadata != null ? System.Text.Json.JsonSerializer.Serialize(metadata) : null;
var command = connection.CreateCommand();
command.CommandText = @"
INSERT OR REPLACE INTO agent_heartbeats (agent_id, device_name, agent_type, last_heartbeat, metadata)
VALUES (@agentId, @deviceName, @agentType, @lastHeartbeat, @metadata)";
command.Parameters.AddWithValue("@agentId", agentId);
command.Parameters.AddWithValue("@deviceName", deviceName);
command.Parameters.AddWithValue("@agentType", agentType.ToString());
command.Parameters.AddWithValue("@lastHeartbeat", DateTime.UtcNow);
command.Parameters.AddWithValue("@metadata", (object?)metadataJson ?? DBNull.Value);
await command.ExecuteNonQueryAsync();
_logger.LogDebug("Recorded heartbeat for agent {AgentId} ({DeviceName})", agentId, deviceName);
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to record heartbeat for agent {AgentId}", agentId);
throw;
}
}
///
/// Gets the status of a specific agent
///
public async Task GetAgentStatusAsync(string agentId)
{
try
{
using var connection = new SqliteConnection(_connectionString);
await connection.OpenAsync();
var command = connection.CreateCommand();
command.CommandText = @"
SELECT agent_id, device_name, agent_type, last_heartbeat, metadata, first_seen
FROM agent_heartbeats
WHERE agent_id = @agentId";
command.Parameters.AddWithValue("@agentId", agentId);
using var reader = await command.ExecuteReaderAsync();
if (await reader.ReadAsync())
{
return ReadAgentStatus(reader);
}
return null;
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to get status for agent {AgentId}", agentId);
throw;
}
}
///
/// Gets all registered agents
///
public async Task> GetAllAgentsAsync()
{
try
{
using var connection = new SqliteConnection(_connectionString);
await connection.OpenAsync();
var command = connection.CreateCommand();
command.CommandText = @"
SELECT agent_id, device_name, agent_type, last_heartbeat, metadata, first_seen
FROM agent_heartbeats
ORDER BY last_heartbeat DESC";
var agents = new List();
using var reader = await command.ExecuteReaderAsync();
while (await reader.ReadAsync())
{
agents.Add(ReadAgentStatus(reader));
}
return agents;
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to get all agents");
throw;
}
}
///
/// Gets all offline agents
///
public async Task> GetOfflineAgentsAsync()
{
var allAgents = await GetAllAgentsAsync();
return allAgents.Where(a => !a.IsOnline).ToList();
}
///
/// Gets all online agents
///
public async Task> GetOnlineAgentsAsync()
{
var allAgents = await GetAllAgentsAsync();
return allAgents.Where(a => a.IsOnline).ToList();
}
///
/// Removes an agent from monitoring
///
public async Task RemoveAgentAsync(string agentId)
{
try
{
using var connection = new SqliteConnection(_connectionString);
await connection.OpenAsync();
var command = connection.CreateCommand();
command.CommandText = "DELETE FROM agent_heartbeats WHERE agent_id = @agentId";
command.Parameters.AddWithValue("@agentId", agentId);
var rowsAffected = await command.ExecuteNonQueryAsync();
if (rowsAffected > 0)
{
_logger.LogInformation("Removed agent {AgentId} from monitoring", agentId);
}
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to remove agent {AgentId}", agentId);
throw;
}
}
///
/// Gets statistics about agent health
///
public async Task GetHealthStatsAsync()
{
var allAgents = await GetAllAgentsAsync();
return new AgentHealthStats
{
TotalAgents = allAgents.Count,
OnlineAgents = allAgents.Count(a => a.IsOnline),
OfflineAgents = allAgents.Count(a => !a.IsOnline),
AgentsByType = allAgents
.GroupBy(a => a.AgentType)
.ToDictionary(g => g.Key, g => g.Count()),
OldestHeartbeat = allAgents.Any() ? allAgents.Min(a => a.LastHeartbeat) : null,
NewestHeartbeat = allAgents.Any() ? allAgents.Max(a => a.LastHeartbeat) : null
};
}
///
/// Cleans up old heartbeat records
///
public async Task CleanupOldRecordsAsync(TimeSpan retentionPeriod)
{
try
{
using var connection = new SqliteConnection(_connectionString);
await connection.OpenAsync();
var cutoffDate = DateTime.UtcNow - retentionPeriod;
var command = connection.CreateCommand();
command.CommandText = "DELETE FROM agent_heartbeats WHERE last_heartbeat < @cutoffDate";
command.Parameters.AddWithValue("@cutoffDate", cutoffDate);
var rowsAffected = await command.ExecuteNonQueryAsync();
if (rowsAffected > 0)
{
_logger.LogInformation("Cleaned up {Count} old agent records", rowsAffected);
}
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to cleanup old records");
throw;
}
}
///
/// Initializes the SQLite database
///
private void InitializeDatabase()
{
try
{
using var connection = new SqliteConnection(_connectionString);
connection.Open();
var command = connection.CreateCommand();
command.CommandText = @"
CREATE TABLE IF NOT EXISTS agent_heartbeats (
agent_id TEXT PRIMARY KEY,
device_name TEXT NOT NULL,
agent_type TEXT NOT NULL,
last_heartbeat TEXT NOT NULL,
first_seen TEXT NOT NULL DEFAULT (datetime('now')),
metadata TEXT
);
CREATE INDEX IF NOT EXISTS idx_last_heartbeat ON agent_heartbeats(last_heartbeat);
CREATE INDEX IF NOT EXISTS idx_agent_type ON agent_heartbeats(agent_type);
";
command.ExecuteNonQuery();
_logger.LogDebug("Initialized agent health database");
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to initialize database");
throw;
}
}
///
/// Reads an AgentStatus from a data reader
///
private AgentStatus ReadAgentStatus(SqliteDataReader reader)
{
var agentId = reader.GetString(0);
var deviceName = reader.GetString(1);
var agentTypeStr = reader.GetString(2);
var lastHeartbeat = reader.GetDateTime(3);
var metadataJson = reader.IsDBNull(4) ? null : reader.GetString(4);
var firstSeen = reader.GetDateTime(5);
var agentType = Enum.Parse(agentTypeStr);
var metadata = metadataJson != null
? System.Text.Json.JsonSerializer.Deserialize>(metadataJson)
: null;
var timeSinceLastHeartbeat = DateTime.UtcNow - lastHeartbeat;
var isOnline = timeSinceLastHeartbeat <= _offlineThreshold;
return new AgentStatus
{
AgentId = agentId,
DeviceName = deviceName,
AgentType = agentType,
LastHeartbeat = lastHeartbeat,
FirstSeen = firstSeen,
IsOnline = isOnline,
SecondsSinceLastHeartbeat = (int)timeSinceLastHeartbeat.TotalSeconds,
Metadata = metadata ?? new Dictionary()
};
}
public void Dispose()
{
// Clean up any resources if needed
GC.SuppressFinalize(this);
}
}
///
/// Current status of an agent
///
public class AgentStatus
{
public required string AgentId { get; set; }
public required string DeviceName { get; set; }
public required AgentType AgentType { get; set; }
public DateTime LastHeartbeat { get; set; }
public DateTime FirstSeen { get; set; }
public bool IsOnline { get; set; }
public int SecondsSinceLastHeartbeat { get; set; }
public Dictionary Metadata { get; set; } = new();
}
///
/// Overall health statistics for all agents
///
public class AgentHealthStats
{
public int TotalAgents { get; set; }
public int OnlineAgents { get; set; }
public int OfflineAgents { get; set; }
public Dictionary AgentsByType { get; set; } = new();
public DateTime? OldestHeartbeat { get; set; }
public DateTime? NewestHeartbeat { get; set; }
public double OnlinePercentage => TotalAgents > 0
? (double)OnlineAgents / TotalAgents * 100
: 0;
}
================================================
FILE: src/NetworkOptimizer.Agents/Models/AgentConfiguration.cs
================================================
namespace NetworkOptimizer.Agents.Models;
///
/// Configuration for a deployed agent
///
public class AgentConfiguration
{
///
/// Unique identifier for the agent
///
public required string AgentId { get; set; }
///
/// Friendly name for the device
///
public required string DeviceName { get; set; }
///
/// Type of agent being deployed
///
public required AgentType AgentType { get; set; }
///
/// InfluxDB endpoint URL
///
public required string InfluxDbUrl { get; set; }
///
/// InfluxDB organization
///
public required string InfluxDbOrg { get; set; }
///
/// InfluxDB bucket name
///
public required string InfluxDbBucket { get; set; }
///
/// InfluxDB authentication token
///
public required string InfluxDbToken { get; set; }
///
/// Metric collection interval in seconds
///
public int CollectionIntervalSeconds { get; set; } = 30;
///
/// Speedtest interval in minutes (UDM/UCG only)
///
public int SpeedtestIntervalMinutes { get; set; } = 60;
///
/// Enable Docker metrics collection (Linux agent only)
///
public bool EnableDockerMetrics { get; set; } = false;
///
/// Additional tags to apply to all metrics
///
public Dictionary Tags { get; set; } = new();
///
/// SSH credentials for deployment
///
public required SshCredentials SshCredentials { get; set; }
}
public enum AgentType
{
///
/// UniFi Dream Machine (UDM/UDM-Pro/UDM-SE)
///
UDM,
///
/// UniFi Cloud Gateway (UCG-Ultra/UCG-Max)
///
UCG,
///
/// Generic Linux system
///
Linux
}
================================================
FILE: src/NetworkOptimizer.Agents/Models/DeploymentResult.cs
================================================
namespace NetworkOptimizer.Agents.Models;
///
/// Result of an agent deployment operation
///
public class DeploymentResult
{
///
/// Whether the deployment was successful
///
public bool Success { get; set; }
///
/// Agent ID that was deployed
///
public required string AgentId { get; set; }
///
/// Device name
///
public required string DeviceName { get; set; }
///
/// Agent type deployed
///
public AgentType AgentType { get; set; }
///
/// Deployment timestamp
///
public DateTime DeployedAt { get; set; } = DateTime.UtcNow;
///
/// Success or error message
///
public string Message { get; set; } = string.Empty;
///
/// Detailed deployment steps and their results
///
public List Steps { get; set; } = new();
///
/// Files that were deployed
///
public List DeployedFiles { get; set; } = new();
///
/// Verification results
///
public VerificationResult? Verification { get; set; }
///
/// Creates a successful deployment result
///
public static DeploymentResult CreateSuccess(string agentId, string deviceName, AgentType agentType, string message)
{
return new DeploymentResult
{
Success = true,
AgentId = agentId,
DeviceName = deviceName,
AgentType = agentType,
Message = message
};
}
///
/// Creates a failed deployment result
///
public static DeploymentResult CreateFailure(string agentId, string deviceName, AgentType agentType, string message)
{
return new DeploymentResult
{
Success = false,
AgentId = agentId,
DeviceName = deviceName,
AgentType = agentType,
Message = message
};
}
}
///
/// Individual deployment step
///
public class DeploymentStep
{
///
/// Step name
///
public required string Name { get; set; }
///
/// Whether the step succeeded
///
public bool Success { get; set; }
///
/// Step message or error
///
public string Message { get; set; } = string.Empty;
///
/// When the step was executed
///
public DateTime ExecutedAt { get; set; } = DateTime.UtcNow;
///
/// Step duration in milliseconds
///
public long DurationMs { get; set; }
}
///
/// Post-deployment verification results
///
public class VerificationResult
{
///
/// Whether verification passed
///
public bool Passed { get; set; }
///
/// Files verified as present
///
public List VerifiedFiles { get; set; } = new();
///
/// Service status (if applicable)
///
public string? ServiceStatus { get; set; }
///
/// Agent process is running
///
public bool AgentRunning { get; set; }
///
/// Verification messages
///
public List Messages { get; set; } = new();
}
================================================
FILE: src/NetworkOptimizer.Agents/Models/SshCredentials.cs
================================================
namespace NetworkOptimizer.Agents.Models;
///
/// SSH connection credentials supporting both password and key-based authentication
///
public class SshCredentials
{
///
/// SSH hostname or IP address
///
public required string Host { get; set; }
///
/// SSH port (default: 22)
///
public int Port { get; set; } = 22;
///
/// SSH username
///
public required string Username { get; set; }
///
/// Password for password-based authentication
///
public string? Password { get; set; }
///
/// Private key path for key-based authentication
///
public string? PrivateKeyPath { get; set; }
///
/// Private key passphrase (if the key is encrypted)
///
public string? PrivateKeyPassphrase { get; set; }
///
/// Connection timeout in seconds
///
public int TimeoutSeconds { get; set; } = 30;
///
/// Validates that credentials are properly configured
///
public bool IsValid()
{
if (string.IsNullOrWhiteSpace(Host) || string.IsNullOrWhiteSpace(Username))
return false;
// Must have either password or private key
return !string.IsNullOrWhiteSpace(Password) || !string.IsNullOrWhiteSpace(PrivateKeyPath);
}
///
/// Gets authentication type
///
public AuthenticationType GetAuthenticationType()
{
if (!string.IsNullOrWhiteSpace(PrivateKeyPath))
return AuthenticationType.PrivateKey;
if (!string.IsNullOrWhiteSpace(Password))
return AuthenticationType.Password;
return AuthenticationType.None;
}
}
public enum AuthenticationType
{
None,
Password,
PrivateKey
}
================================================
FILE: src/NetworkOptimizer.Agents/NetworkOptimizer.Agents.csproj
================================================
net10.0
enable
enable
PreserveNewest
================================================
FILE: src/NetworkOptimizer.Agents/README.md
================================================
# NetworkOptimizer.Agents
> **Status: Future Project** - This library is planned but not yet implemented. The structure and interfaces below represent the intended design.
Generic agent deployment and monitoring system for Linux systems. This library will provide SSH-based deployment, health monitoring, and template-based configuration management.
## Features
### Agent Deployment
- **SSH-Based Deployment**: Secure deployment via SSH.NET with support for:
- Password authentication
- Private key authentication (with or without passphrase)
- Connection testing before deployment
- Deployment verification after installation
### Supported Platforms
- **Linux Systems**: Generic Linux servers (Ubuntu, Debian, CentOS, RHEL, Fedora)
- Deploys as systemd service
- Collects CPU, memory, disk, and network metrics
- Optional Docker container monitoring
- Automatic service management
### Health Monitoring
- Track agent heartbeats in SQLite database
- Detect offline agents
- Monitor agent status and uptime
- View comprehensive health statistics
### Template System
- Scriban-based template rendering
- Dynamic configuration injection
## Project Structure
```
NetworkOptimizer.Agents/
├── Models/
│ ├── AgentConfiguration.cs # Agent configuration model
│ ├── DeploymentResult.cs # Deployment result tracking
│ └── SshCredentials.cs # SSH authentication credentials
├── Templates/
│ ├── linux-agent.sh.template # Linux agent script
│ ├── linux-agent.service.template # Systemd service definition
│ └── install-linux.sh.template # Linux installation
├── AgentDeployer.cs # Main deployment orchestrator
├── AgentHealthMonitor.cs # Health monitoring and heartbeat tracking
├── ScriptRenderer.cs # Template rendering engine
└── NetworkOptimizer.Agents.csproj # Project file
```
## Related Projects
- **NetworkOptimizer.Sqm** - Adaptive SQM (Smart Queue Management) for UniFi gateways
- **NetworkOptimizer.Web/Services/SqmDeploymentService.cs** - SQM script deployment via SSH
## Metrics Collected (Linux Agents)
- **CPU**: Usage percentage, load averages (1m, 5m, 15m)
- **Memory**: Total, used, free, available, swap usage
- **Disk**: Usage, I/O operations (reads/writes)
- **Network**: Per-interface traffic, packets, errors
- **Docker** (optional): Container count, per-container CPU and memory
- **Heartbeats**: Agent online status, system uptime
## Dependencies
- **SSH.NET** (2025.1.0): SSH/SFTP client library
- **Scriban** (6.5.2): Template rendering engine
- **Microsoft.Data.Sqlite** (10.0.1): SQLite database for health monitoring
- **Microsoft.Extensions.Logging.Abstractions** (10.0.1): Logging infrastructure
## .NET Version
Built for **.NET 10.0** with nullable reference types enabled and implicit usings.
================================================
FILE: src/NetworkOptimizer.Agents/ScriptRenderer.cs
================================================
using Microsoft.Extensions.Logging;
using NetworkOptimizer.Agents.Models;
using Scriban;
using Scriban.Runtime;
namespace NetworkOptimizer.Agents;
///
/// Renders script templates with configuration values using Scriban
///
public class ScriptRenderer
{
private readonly ILogger _logger;
private readonly string _templatesPath;
public ScriptRenderer(ILogger logger, string? templatesPath = null)
{
_logger = logger;
_templatesPath = templatesPath ?? Path.Combine(AppContext.BaseDirectory, "Templates");
}
///
/// Renders a template file with the given configuration
///
public async Task RenderTemplateAsync(string templateName, AgentConfiguration config)
{
try
{
var templatePath = Path.Combine(_templatesPath, templateName);
if (!File.Exists(templatePath))
{
throw new FileNotFoundException($"Template not found: {templatePath}");
}
var templateContent = await File.ReadAllTextAsync(templatePath);
var template = Template.Parse(templateContent);
if (template.HasErrors)
{
var errors = string.Join(", ", template.Messages.Select(m => m.Message));
throw new InvalidOperationException($"Template parsing errors: {errors}");
}
var scriptObject = BuildScriptObject(config);
var rendered = await template.RenderAsync(scriptObject);
_logger.LogDebug("Successfully rendered template {Template} for agent {AgentId}",
templateName, config.AgentId);
return rendered;
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to render template {Template}", templateName);
throw;
}
}
///
/// Renders a template from a string rather than a file
///
public async Task RenderTemplateStringAsync(string templateContent, AgentConfiguration config)
{
try
{
var template = Template.Parse(templateContent);
if (template.HasErrors)
{
var errors = string.Join(", ", template.Messages.Select(m => m.Message));
throw new InvalidOperationException($"Template parsing errors: {errors}");
}
var scriptObject = BuildScriptObject(config);
return await template.RenderAsync(scriptObject);
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to render template string");
throw;
}
}
///
/// Gets the appropriate template names for an agent type
///
public List GetTemplatesForAgent(AgentType agentType)
{
return agentType switch
{
AgentType.UDM or AgentType.UCG => new List
{
"udm-agent-boot.sh.template",
"udm-metrics-collector.sh.template",
"install-udm.sh.template"
},
AgentType.Linux => new List
{
"linux-agent.sh.template",
"linux-agent.service.template",
"install-linux.sh.template"
},
_ => throw new ArgumentException($"Unknown agent type: {agentType}")
};
}
///
/// Builds a Scriban script object with all configuration values
///
private ScriptObject BuildScriptObject(AgentConfiguration config)
{
var scriptObject = new ScriptObject
{
{ "agent_id", config.AgentId },
{ "device_name", config.DeviceName },
{ "agent_type", config.AgentType.ToString().ToLower() },
{ "influxdb_url", config.InfluxDbUrl },
{ "influxdb_org", config.InfluxDbOrg },
{ "influxdb_bucket", config.InfluxDbBucket },
{ "influxdb_token", config.InfluxDbToken },
{ "collection_interval", config.CollectionIntervalSeconds },
{ "speedtest_interval", config.SpeedtestIntervalMinutes },
{ "enable_docker", config.EnableDockerMetrics },
{ "is_udm", config.AgentType == AgentType.UDM },
{ "is_ucg", config.AgentType == AgentType.UCG },
{ "is_linux", config.AgentType == AgentType.Linux },
{ "is_unifi", config.AgentType == AgentType.UDM || config.AgentType == AgentType.UCG }
};
// Add tags as a dictionary
if (config.Tags.Any())
{
var tagsObject = new ScriptObject();
foreach (var tag in config.Tags)
{
tagsObject.Add(tag.Key, tag.Value);
}
scriptObject.Add("tags", tagsObject);
}
var context = new TemplateContext();
context.PushGlobal(scriptObject);
return scriptObject;
}
///
/// Validates that all required templates exist
///
public bool ValidateTemplates(AgentType agentType, out List missingTemplates)
{
missingTemplates = new List();
var templates = GetTemplatesForAgent(agentType);
foreach (var template in templates)
{
var path = Path.Combine(_templatesPath, template);
if (!File.Exists(path))
{
missingTemplates.Add(template);
}
}
return missingTemplates.Count == 0;
}
///
/// Lists all available templates
///
public List ListAvailableTemplates()
{
if (!Directory.Exists(_templatesPath))
{
return new List();
}
return Directory.GetFiles(_templatesPath, "*.template")
.Select(Path.GetFileName)
.Where(n => n != null)
.Cast()
.ToList();
}
}
================================================
FILE: src/NetworkOptimizer.Agents/Templates/install-linux.sh.template
================================================
#!/bin/bash
#
# Network Optimizer Agent - Linux Installation Script
#
set -e
echo "=== Network Optimizer Agent Installation ==="
echo "Agent ID: {{ agent_id }}"
echo "Device: {{ device_name }}"
echo ""
# Check if running as root
if [ "$EUID" -ne 0 ]; then
echo "ERROR: This script must be run as root"
echo "Please run: sudo $0"
exit 1
fi
echo "✓ Running as root"
# Detect Linux distribution
if [ -f /etc/os-release ]; then
. /etc/os-release
OS=$ID
OS_VERSION=$VERSION_ID
echo "✓ Detected $PRETTY_NAME"
else
echo "WARNING: Could not detect Linux distribution"
OS="unknown"
fi
# Create necessary directories
echo "Creating directories..."
mkdir -p /opt/network-optimizer
mkdir -p /var/log/network-optimizer
chmod 755 /opt/network-optimizer
chmod 755 /var/log/network-optimizer
# Check for required commands
echo "Checking dependencies..."
if ! command -v curl &> /dev/null; then
echo "ERROR: curl is not installed"
echo "Please install curl:"
case $OS in
ubuntu|debian)
echo " sudo apt-get install curl"
;;
centos|rhel|fedora)
echo " sudo yum install curl"
;;
*)
echo " Use your package manager to install curl"
;;
esac
exit 1
fi
echo "✓ curl found"
if ! command -v bc &> /dev/null; then
echo "Installing bc (basic calculator)..."
case $OS in
ubuntu|debian)
apt-get install -y bc
;;
centos|rhel|fedora)
yum install -y bc
;;
*)
echo "WARNING: Could not install bc automatically"
;;
esac
fi
# Check for Docker if Docker metrics are enabled
ENABLE_DOCKER="{{ enable_docker }}"
if [ "$ENABLE_DOCKER" = "True" ] || [ "$ENABLE_DOCKER" = "true" ]; then
if ! command -v docker &> /dev/null; then
echo "WARNING: Docker metrics enabled but Docker not found"
echo "Docker metrics will be disabled"
else
echo "✓ Docker found"
# Check if current user can access Docker
if docker ps &> /dev/null; then
echo "✓ Docker access verified"
else
echo "WARNING: Cannot access Docker socket"
echo "You may need to run: sudo usermod -aG docker $USER"
fi
fi
fi
# Test InfluxDB connection
echo "Testing InfluxDB connection..."
INFLUXDB_URL="{{ influxdb_url }}"
INFLUXDB_ORG="{{ influxdb_org }}"
INFLUXDB_BUCKET="{{ influxdb_bucket }}"
INFLUXDB_TOKEN="{{ influxdb_token }}"
curl_output=$(curl -s -o /dev/null -w "%{http_code}" \
-X POST "${INFLUXDB_URL}/api/v2/write?org=${INFLUXDB_ORG}&bucket=${INFLUXDB_BUCKET}" \
-H "Authorization: Token ${INFLUXDB_TOKEN}" \
-H "Content-Type: text/plain" \
--data-binary "test,source=installer value=1")
if [ "$curl_output" = "204" ]; then
echo "✓ InfluxDB connection successful"
else
echo "WARNING: InfluxDB connection returned HTTP $curl_output"
echo "Please verify your InfluxDB configuration"
fi
# Make agent script executable
if [ -f "/opt/network-optimizer/agent.sh" ]; then
chmod +x /opt/network-optimizer/agent.sh
echo "✓ Agent script configured"
fi
# Check if systemd is available
if command -v systemctl &> /dev/null; then
if [ -f "/etc/systemd/system/network-optimizer-agent.service" ]; then
echo "Configuring systemd service..."
systemctl daemon-reload
systemctl enable network-optimizer-agent.service
systemctl restart network-optimizer-agent.service
# Wait a moment and check status
sleep 2
if systemctl is-active --quiet network-optimizer-agent.service; then
echo "✓ Service started successfully"
else
echo "WARNING: Service may have failed to start"
echo "Check status with: systemctl status network-optimizer-agent.service"
fi
fi
else
echo "WARNING: systemd not found, service will not auto-start"
echo "You can run the agent manually:"
echo " /opt/network-optimizer/agent.sh"
fi
echo ""
echo "=== Installation Complete ==="
echo ""
echo "Service status:"
if command -v systemctl &> /dev/null; then
systemctl status network-optimizer-agent.service --no-pager || true
fi
echo ""
echo "To view logs:"
echo " tail -f /var/log/network-optimizer/agent.log"
if command -v journalctl &> /dev/null; then
echo " journalctl -u network-optimizer-agent.service -f"
fi
echo ""
echo "To stop the agent:"
echo " systemctl stop network-optimizer-agent.service"
echo ""
echo "To restart the agent:"
echo " systemctl restart network-optimizer-agent.service"
echo ""
================================================
FILE: src/NetworkOptimizer.Agents/Templates/linux-agent.service.template
================================================
[Unit]
Description=Network Optimizer Agent
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=root
ExecStart=/opt/network-optimizer/agent.sh
Restart=always
RestartSec=10
# Logging
StandardOutput=append:/var/log/network-optimizer/agent.log
StandardError=append:/var/log/network-optimizer/agent.log
# Security hardening
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/var/log/network-optimizer
# Resource limits
MemoryLimit=256M
CPUQuota=10%
[Install]
WantedBy=multi-user.target
================================================
FILE: src/NetworkOptimizer.Agents/Templates/linux-agent.sh.template
================================================
#!/bin/bash
#
# Network Optimizer - Linux Agent
#
# Configuration
AGENT_ID="{{ agent_id }}"
DEVICE_NAME="{{ device_name }}"
AGENT_TYPE="{{ agent_type }}"
INFLUXDB_URL="{{ influxdb_url }}"
INFLUXDB_ORG="{{ influxdb_org }}"
INFLUXDB_BUCKET="{{ influxdb_bucket }}"
INFLUXDB_TOKEN="{{ influxdb_token }}"
COLLECTION_INTERVAL={{ collection_interval }}
ENABLE_DOCKER={{ enable_docker }}
# Paths
LOG_DIR="/var/log/network-optimizer"
LOG_FILE="$LOG_DIR/agent.log"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Logging function
log() {
echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] $1" | tee -a "$LOG_FILE"
}
# Send metrics to InfluxDB
send_metric() {
local measurement=$1
local tags=$2
local fields=$3
local timestamp=$(date +%s)000000000
local line_protocol="${measurement},agent_id=${AGENT_ID},device=${DEVICE_NAME},agent_type=${AGENT_TYPE}${tags} ${fields} ${timestamp}"
curl -s -X POST "${INFLUXDB_URL}/api/v2/write?org=${INFLUXDB_ORG}&bucket=${INFLUXDB_BUCKET}" \
-H "Authorization: Token ${INFLUXDB_TOKEN}" \
-H "Content-Type: text/plain" \
--data-binary "$line_protocol" >/dev/null 2>&1
if [ $? -eq 0 ]; then
log "Sent metric: $measurement"
else
log "ERROR: Failed to send metric: $measurement"
fi
}
# Collect CPU metrics
collect_cpu_metrics() {
# Get CPU usage using top
local cpu_usage=$(top -bn2 -d 0.5 | grep "Cpu(s)" | tail -1 | awk '{print $2}' | sed 's/%us,//')
# Get load averages
local load_avg_1=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $1}' | sed 's/,//')
local load_avg_5=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $2}' | sed 's/,//')
local load_avg_15=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $3}' | sed 's/,//')
# Get CPU count
local cpu_count=$(nproc)
send_metric "cpu_stats" "" "usage_percent=${cpu_usage},load_1m=${load_avg_1},load_5m=${load_avg_5},load_15m=${load_avg_15},cpu_count=${cpu_count}i"
}
# Collect memory metrics
collect_memory_metrics() {
local mem_total=$(free -b | grep Mem | awk '{print $2}')
local mem_used=$(free -b | grep Mem | awk '{print $3}')
local mem_free=$(free -b | grep Mem | awk '{print $4}')
local mem_available=$(free -b | grep Mem | awk '{print $7}')
local mem_percent=$(awk "BEGIN {printf \"%.2f\", ($mem_used/$mem_total)*100}")
local swap_total=$(free -b | grep Swap | awk '{print $2}')
local swap_used=$(free -b | grep Swap | awk '{print $3}')
send_metric "memory_stats" "" "total=${mem_total}i,used=${mem_used}i,free=${mem_free}i,available=${mem_available}i,percent=${mem_percent},swap_total=${swap_total}i,swap_used=${swap_used}i"
}
# Collect disk metrics
collect_disk_metrics() {
# Get disk usage for root filesystem
local disk_info=$(df -B1 / | tail -1)
local disk_total=$(echo $disk_info | awk '{print $2}')
local disk_used=$(echo $disk_info | awk '{print $3}')
local disk_available=$(echo $disk_info | awk '{print $4}')
local disk_percent=$(echo $disk_info | awk '{print $5}' | sed 's/%//')
send_metric "disk_stats" ",mount=/" "total=${disk_total}i,used=${disk_used}i,available=${disk_available}i,percent=${disk_percent}"
# Get disk I/O stats if available
if [ -f /proc/diskstats ]; then
local disk_device=$(df / | tail -1 | awk '{print $1}' | sed 's/\/dev\///' | sed 's/[0-9]*$//')
local disk_stats=$(grep " ${disk_device} " /proc/diskstats | head -1)
if [ -n "$disk_stats" ]; then
local reads_completed=$(echo $disk_stats | awk '{print $4}')
local writes_completed=$(echo $disk_stats | awk '{print $8}')
send_metric "disk_io" ",device=${disk_device}" "reads=${reads_completed}i,writes=${writes_completed}i"
fi
fi
}
# Collect network metrics
collect_network_metrics() {
# Get all network interfaces except loopback
for iface in $(ls /sys/class/net/ | grep -v lo); do
if [ -f "/sys/class/net/${iface}/statistics/rx_bytes" ]; then
local rx_bytes=$(cat /sys/class/net/${iface}/statistics/rx_bytes)
local tx_bytes=$(cat /sys/class/net/${iface}/statistics/tx_bytes)
local rx_packets=$(cat /sys/class/net/${iface}/statistics/rx_packets)
local tx_packets=$(cat /sys/class/net/${iface}/statistics/tx_packets)
local rx_errors=$(cat /sys/class/net/${iface}/statistics/rx_errors)
local tx_errors=$(cat /sys/class/net/${iface}/statistics/tx_errors)
send_metric "network_stats" ",interface=${iface}" "rx_bytes=${rx_bytes}i,tx_bytes=${tx_bytes}i,rx_packets=${rx_packets}i,tx_packets=${tx_packets}i,rx_errors=${rx_errors}i,tx_errors=${tx_errors}i"
fi
done
}
# Collect Docker metrics
collect_docker_metrics() {
if [ "$ENABLE_DOCKER" != "True" ] && [ "$ENABLE_DOCKER" != "true" ]; then
return
fi
if ! command -v docker &> /dev/null; then
return
fi
# Get running container count
local container_count=$(docker ps -q 2>/dev/null | wc -l)
send_metric "docker_stats" "" "running_containers=${container_count}i"
# Get stats for each running container
docker ps --format '{{.Names}}' 2>/dev/null | while read container; do
local stats=$(docker stats --no-stream --format "{{.CPUPerc}},{{.MemUsage}},{{.NetIO}},{{.BlockIO}}" "$container" 2>/dev/null)
if [ -n "$stats" ]; then
local cpu=$(echo $stats | cut -d',' -f1 | sed 's/%//')
local mem=$(echo $stats | cut -d',' -f2 | awk '{print $1}' | numfmt --from=iec)
send_metric "docker_container" ",container=${container}" "cpu_percent=${cpu},memory_bytes=${mem}i"
fi
done
}
# Send heartbeat
send_heartbeat() {
local uptime_seconds=$(cat /proc/uptime | awk '{print int($1)}')
send_metric "agent_heartbeat" "" "status=1i,uptime=${uptime_seconds}i"
}
# Signal handlers for graceful shutdown
shutdown() {
log "Received shutdown signal, stopping agent..."
exit 0
}
trap shutdown SIGTERM SIGINT
# Main collection loop
log "Starting Network Optimizer Linux agent"
log "Agent ID: $AGENT_ID"
log "Device: $DEVICE_NAME"
log "Collection interval: ${COLLECTION_INTERVAL}s"
log "Docker metrics: $ENABLE_DOCKER"
# Send initial heartbeat
send_heartbeat
while true; do
# Collect all metrics
collect_cpu_metrics
collect_memory_metrics
collect_disk_metrics
collect_network_metrics
collect_docker_metrics
send_heartbeat
# Wait for next collection
sleep $COLLECTION_INTERVAL
done
================================================
FILE: src/NetworkOptimizer.Alerts/AlertCooldownTracker.cs
================================================
using System.Collections.Concurrent;
namespace NetworkOptimizer.Alerts;
///
/// In-memory cooldown tracker. Keyed by "{ruleId}:{deviceId}" to avoid DB round-trips.
///
public class AlertCooldownTracker
{
private readonly ConcurrentDictionary _lastFired = new();
///
/// Check if the given key is currently in cooldown.
///
public bool IsInCooldown(string key, int cooldownSeconds)
{
if (cooldownSeconds <= 0)
return false;
if (!_lastFired.TryGetValue(key, out var lastFired))
return false;
return (DateTime.UtcNow - lastFired).TotalSeconds < cooldownSeconds;
}
///
/// Record that an alert was fired for the given key.
///
public void RecordFired(string key)
{
_lastFired[key] = DateTime.UtcNow;
}
///
/// Clear expired entries to prevent unbounded growth.
///
public void Cleanup(TimeSpan maxAge)
{
var cutoff = DateTime.UtcNow - maxAge;
foreach (var kvp in _lastFired)
{
if (kvp.Value < cutoff)
_lastFired.TryRemove(kvp.Key, out _);
}
}
}
================================================
FILE: src/NetworkOptimizer.Alerts/AlertCorrelationService.cs
================================================
using Microsoft.Extensions.Logging;
using NetworkOptimizer.Alerts.Events;
using NetworkOptimizer.Alerts.Interfaces;
using NetworkOptimizer.Alerts.Models;
using NetworkOptimizer.Core.Enums;
namespace NetworkOptimizer.Alerts;
///
/// Groups related alerts into incidents using correlation keys.
///
public class AlertCorrelationService
{
private readonly ILogger _logger;
private static readonly TimeSpan CorrelationWindow = TimeSpan.FromMinutes(30);
public AlertCorrelationService(ILogger logger)
{
_logger = logger;
}
///
/// Derive incident status from the statuses of its constituent alerts.
///
public static (AlertStatus Status, DateTime? ResolvedAt) DeriveIncidentStatus(List alerts)
{
if (alerts.Count == 0)
return (AlertStatus.Active, null);
if (alerts.All(a => a.Status == AlertStatus.Resolved))
return (AlertStatus.Resolved, DateTime.UtcNow);
if (alerts.All(a => a.Status is AlertStatus.Acknowledged or AlertStatus.Resolved))
return (AlertStatus.Acknowledged, null);
return (AlertStatus.Active, null);
}
///
/// Derive a correlation key from an alert event.
/// Events with the same key within the correlation window will be grouped.
///
public string? GetCorrelationKey(AlertEvent alertEvent)
{
// Device-level correlation: group by device IP
if (!string.IsNullOrEmpty(alertEvent.DeviceIp))
return $"device:{alertEvent.DeviceIp}";
// Source-level correlation: group by event source + type prefix
var dotIndex = alertEvent.EventType.IndexOf('.');
if (dotIndex > 0)
{
var prefix = alertEvent.EventType[..dotIndex];
return $"source:{prefix}";
}
return null;
}
///
/// Find or create an incident for the given alert event.
/// Returns the incident if correlated, null if no correlation applies.
///
public async Task CorrelateAsync(
AlertEvent alertEvent,
AlertHistoryEntry historyEntry,
IAlertRepository repository,
CancellationToken cancellationToken = default)
{
var correlationKey = GetCorrelationKey(alertEvent);
if (correlationKey == null)
return null;
try
{
// Look for existing active incident with the same key within the window
var existingIncident = await repository.GetActiveIncidentByKeyAsync(correlationKey, cancellationToken);
if (existingIncident != null &&
(DateTime.UtcNow - existingIncident.LastTriggeredAt) < CorrelationWindow)
{
// Add to existing incident
existingIncident.AlertCount++;
existingIncident.LastTriggeredAt = DateTime.UtcNow;
if (alertEvent.Severity > existingIncident.Severity)
existingIncident.Severity = alertEvent.Severity;
await repository.UpdateIncidentAsync(existingIncident, cancellationToken);
historyEntry.IncidentId = existingIncident.Id;
_logger.LogDebug("Correlated alert to incident {IncidentId} ({Key})", existingIncident.Id, correlationKey);
return existingIncident;
}
// Create new incident
var incident = new AlertIncident
{
Title = alertEvent.Title,
Severity = alertEvent.Severity,
AlertCount = 1,
CorrelationKey = correlationKey,
FirstTriggeredAt = DateTime.UtcNow,
LastTriggeredAt = DateTime.UtcNow
};
await repository.SaveIncidentAsync(incident, cancellationToken);
historyEntry.IncidentId = incident.Id;
_logger.LogDebug("Created new incident {IncidentId} ({Key})", incident.Id, correlationKey);
return incident;
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Failed to correlate alert");
return null;
}
}
}
================================================
FILE: src/NetworkOptimizer.Alerts/AlertProcessingService.cs
================================================
using System.Text.Json;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using NetworkOptimizer.Alerts.Delivery;
using NetworkOptimizer.Alerts.Events;
using NetworkOptimizer.Alerts.Interfaces;
using NetworkOptimizer.Alerts.Models;
using NetworkOptimizer.Core.Enums;
using NetworkOptimizer.Core.Helpers;
namespace NetworkOptimizer.Alerts;
///
/// Background service that consumes alert events from the event bus,
/// evaluates them against configured rules, persists history,
/// correlates into incidents, and dispatches to delivery channels.
///
public class AlertProcessingService : BackgroundService
{
private readonly ILogger _logger;
private readonly IAlertEventBus _eventBus;
private readonly IServiceScopeFactory _scopeFactory;
private readonly AlertRuleEvaluator _ruleEvaluator;
private readonly AlertCorrelationService _correlationService;
private readonly IEnumerable _deliveryChannels;
private readonly AlertCooldownTracker _cooldownTracker;
private readonly string? _appBaseUrl;
// In-memory rule cache (refreshed periodically)
private List _cachedRules = [];
private DateTime _rulesCachedAt = DateTime.MinValue;
private static readonly TimeSpan RuleCacheDuration = TimeSpan.FromSeconds(60);
private DateTime _lastCooldownCleanup = DateTime.UtcNow;
private static readonly TimeSpan CooldownCleanupInterval = TimeSpan.FromMinutes(30);
public AlertProcessingService(
ILogger logger,
IAlertEventBus eventBus,
IServiceScopeFactory scopeFactory,
AlertRuleEvaluator ruleEvaluator,
AlertCorrelationService correlationService,
IEnumerable deliveryChannels,
AlertCooldownTracker cooldownTracker,
IConfiguration configuration)
{
_logger = logger;
_eventBus = eventBus;
_scopeFactory = scopeFactory;
_ruleEvaluator = ruleEvaluator;
_correlationService = correlationService;
_deliveryChannels = deliveryChannels;
_cooldownTracker = cooldownTracker;
// Build base URL using same priority as canonical host redirect in Program.cs:
// REVERSE_PROXIED_HOST_NAME (https) > HOST_NAME (http:8042) > HOST_IP (http:8042)
var reverseProxy = configuration["REVERSE_PROXIED_HOST_NAME"];
var hostName = configuration["HOST_NAME"];
var hostIp = configuration["HOST_IP"];
if (!string.IsNullOrEmpty(reverseProxy))
_appBaseUrl = $"https://{reverseProxy}";
else if (!string.IsNullOrEmpty(hostName))
_appBaseUrl = $"http://{hostName}:8042";
else if (!string.IsNullOrEmpty(hostIp))
_appBaseUrl = $"http://{hostIp}:8042";
else
{
var detectedIp = NetworkUtilities.DetectLocalIpFromInterfaces();
if (!string.IsNullOrEmpty(detectedIp))
_appBaseUrl = $"http://{detectedIp}:8042";
}
}
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
{
_logger.LogInformation("Alert processing service started");
try
{
await foreach (var alertEvent in _eventBus.ConsumeAsync(stoppingToken))
{
try
{
await ProcessEventAsync(alertEvent, stoppingToken);
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to process alert event {EventType}", alertEvent.EventType);
}
}
}
catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested)
{
// Normal shutdown
}
_logger.LogInformation("Alert processing service stopped");
}
private async Task ProcessEventAsync(AlertEvent alertEvent, CancellationToken cancellationToken)
{
using var scope = _scopeFactory.CreateScope();
var repository = scope.ServiceProvider.GetRequiredService();
// Refresh rule cache if stale
await RefreshRuleCacheAsync(repository, cancellationToken);
// Periodic cooldown cleanup to prevent unbounded growth
if ((DateTime.UtcNow - _lastCooldownCleanup) > CooldownCleanupInterval)
{
CleanupCooldowns();
_lastCooldownCleanup = DateTime.UtcNow;
}
// Evaluate event against rules
var matchingRules = _ruleEvaluator.Evaluate(alertEvent, _cachedRules);
if (matchingRules.Count == 0)
{
_logger.LogWarning("No matching rules for event {EventType}", alertEvent.EventType);
return;
}
foreach (var rule in matchingRules)
{
try
{
_ruleEvaluator.RecordFired(rule, alertEvent);
await ProcessRuleMatchAsync(alertEvent, rule, repository, cancellationToken);
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to process rule {RuleId} for event {EventType}", rule.Id, alertEvent.EventType);
}
}
}
private async Task ProcessRuleMatchAsync(
AlertEvent alertEvent,
AlertRule rule,
IAlertRepository repository,
CancellationToken cancellationToken)
{
// Create history entry
var historyEntry = new AlertHistoryEntry
{
EventType = alertEvent.EventType,
Severity = alertEvent.Severity,
Source = alertEvent.Source,
Title = alertEvent.Title,
Message = alertEvent.Message,
DeviceId = alertEvent.DeviceId,
DeviceName = alertEvent.DeviceName,
DeviceIp = alertEvent.DeviceIp,
SourceUrl = ResolveSourceUrl(alertEvent.SourceUrl),
RuleId = rule.Id,
TriggeredAt = DateTime.UtcNow,
ContextJson = alertEvent.Context.Count > 0
? JsonSerializer.Serialize(alertEvent.Context)
: null
};
await repository.SaveAlertAsync(historyEntry, cancellationToken);
// Correlate into incidents
await _correlationService.CorrelateAsync(alertEvent, historyEntry, repository, cancellationToken);
// Persist incident correlation even for digest-only rules
if (historyEntry.IncidentId.HasValue)
{
await repository.UpdateAlertAsync(historyEntry, cancellationToken);
}
// Skip delivery for digest-only rules
if (rule.DigestOnly)
{
_logger.LogDebug("Rule {RuleId} is digest-only, skipping immediate delivery", rule.Id);
return;
}
// Deliver to matching channels (use resolved absolute URL for delivery)
var deliveryEvent = alertEvent with { SourceUrl = historyEntry.SourceUrl };
await DeliverAsync(deliveryEvent, historyEntry, repository, cancellationToken);
}
private async Task DeliverAsync(
AlertEvent alertEvent,
AlertHistoryEntry historyEntry,
IAlertRepository repository,
CancellationToken cancellationToken)
{
var channels = await repository.GetEnabledChannelsAsync(cancellationToken);
var deliveredTo = new List();
var errors = new List();
foreach (var channel in channels)
{
// Skip channels with higher minimum severity than this alert
if (alertEvent.Severity < channel.MinSeverity)
continue;
// Channels with digest enabled still get immediate alerts too
// (digest is an additional summary, not a replacement for immediate delivery)
var handler = _deliveryChannels.FirstOrDefault(d => d.ChannelType == channel.ChannelType);
if (handler == null)
{
_logger.LogWarning("No delivery handler for channel type {Type}", channel.ChannelType);
continue;
}
try
{
var success = await handler.SendAsync(alertEvent, historyEntry, channel, cancellationToken);
if (success)
{
deliveredTo.Add(channel.Id);
}
else
{
errors.Add($"Channel {channel.Id} ({channel.Name}): delivery returned false");
}
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to deliver alert to channel {ChannelId} ({ChannelName})",
channel.Id, channel.Name);
errors.Add($"Channel {channel.Id} ({channel.Name}): {ex.Message}");
}
}
// Update history entry with delivery results
historyEntry.DeliveredToChannels = deliveredTo.Count > 0
? string.Join(",", deliveredTo)
: null;
historyEntry.DeliverySucceeded = deliveredTo.Count > 0 && errors.Count == 0;
historyEntry.DeliveryError = errors.Count > 0
? string.Join("; ", errors)
: null;
await repository.UpdateAlertAsync(historyEntry, cancellationToken);
}
private async Task RefreshRuleCacheAsync(IAlertRepository repository, CancellationToken cancellationToken)
{
if ((DateTime.UtcNow - _rulesCachedAt) < RuleCacheDuration)
return;
try
{
_cachedRules = await repository.GetEnabledRulesAsync(cancellationToken);
_rulesCachedAt = DateTime.UtcNow;
_logger.LogDebug("Refreshed alert rule cache ({Count} enabled rules)", _cachedRules.Count);
}
catch (Exception ex)
{
_logger.LogError(ex, "Failed to refresh alert rule cache");
// Keep using stale cache rather than failing
}
}
///
/// Resolves a relative SourceUrl (e.g., "/audit") to an absolute URL using the app's
/// configured hostname. Falls back to the relative path if no hostname is configured.
///
private string? ResolveSourceUrl(string? relativeUrl)
{
if (string.IsNullOrEmpty(relativeUrl))
return null;
if (_appBaseUrl != null)
return $"{_appBaseUrl}{relativeUrl}";
return relativeUrl;
}
private void CleanupCooldowns()
{
_cooldownTracker.Cleanup(TimeSpan.FromHours(2));
}
}
================================================
FILE: src/NetworkOptimizer.Alerts/AlertRuleEvaluator.cs
================================================
using Microsoft.Extensions.Logging;
using NetworkOptimizer.Alerts.Events;
using NetworkOptimizer.Alerts.Models;
namespace NetworkOptimizer.Alerts;
///
/// Evaluates alert events against configured rules to determine which rules match.
///
public class AlertRuleEvaluator
{
private readonly AlertCooldownTracker _cooldownTracker;
private readonly ILogger _logger;
public AlertRuleEvaluator(AlertCooldownTracker cooldownTracker, ILogger logger)
{
_cooldownTracker = cooldownTracker;
_logger = logger;
}
///
/// Find all rules that match the given event and are not in cooldown.
///
public List Evaluate(AlertEvent alertEvent, IReadOnlyList rules)
{
var matches = new List();
foreach (var rule in rules)
{
if (!rule.IsEnabled)
continue;
if (alertEvent.Severity < rule.MinSeverity)
continue;
if (!MatchesEventType(alertEvent.EventType, rule.EventTypePattern))
continue;
if (!string.IsNullOrEmpty(rule.Source) &&
!string.Equals(rule.Source, alertEvent.Source, StringComparison.OrdinalIgnoreCase))
continue;
if (!MatchesTargetDevice(alertEvent.DeviceId, alertEvent.DeviceIp, rule.TargetDevices))
continue;
if (!MeetsThreshold(alertEvent, rule))
{
_logger.LogDebug("Rule '{RuleName}' matched event {EventType} but below threshold ({ThresholdPercent}%)",
rule.Name, alertEvent.EventType, rule.ThresholdPercent);
continue;
}
var cooldownKey = $"{rule.Id}:{alertEvent.DeviceId ?? alertEvent.DeviceIp ?? "global"}";
if (_cooldownTracker.IsInCooldown(cooldownKey, rule.CooldownSeconds))
{
_logger.LogDebug("Rule '{RuleName}' matched event {EventType} but in cooldown",
rule.Name, alertEvent.EventType);
continue;
}
matches.Add(rule);
}
return matches;
}
///
/// Record that a rule was fired (for cooldown tracking).
///
public void RecordFired(AlertRule rule, AlertEvent alertEvent)
{
var cooldownKey = $"{rule.Id}:{alertEvent.DeviceId ?? alertEvent.DeviceIp ?? "global"}";
_cooldownTracker.RecordFired(cooldownKey);
}
///
/// Match event type against pattern. Supports exact match and trailing wildcard (e.g., "audit.*").
///
internal static bool MatchesEventType(string eventType, string pattern)
{
if (string.IsNullOrEmpty(pattern) || pattern == "*")
return true;
if (pattern.EndsWith(".*"))
{
var prefix = pattern[..^2];
return eventType.StartsWith(prefix, StringComparison.OrdinalIgnoreCase) &&
eventType.Length > prefix.Length && eventType[prefix.Length] == '.';
}
return string.Equals(eventType, pattern, StringComparison.OrdinalIgnoreCase);
}
///
/// Check if the event meets the rule's degradation threshold.
/// If the rule has a ThresholdPercent, the event must have a "drop_percent" context value >= threshold.
///
private static bool MeetsThreshold(AlertEvent alertEvent, AlertRule rule)
{
if (rule.ThresholdPercent == null)
return true;
if (alertEvent.Context.TryGetValue("drop_percent", out var dropStr) ||
alertEvent.Context.TryGetValue("drop", out dropStr))
{
if (double.TryParse(dropStr, out var dropValue))
return dropValue >= rule.ThresholdPercent.Value;
}
// No drop context = not a threshold event, let it through
return true;
}
///
/// Check if event matches the rule's target device filter.
///
private static bool MatchesTargetDevice(string? deviceId, string? deviceIp, string? targetDevices)
{
if (string.IsNullOrEmpty(targetDevices))
return true;
var targets = targetDevices.Split(',', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries);
if (targets.Length == 0)
return true;
if (!string.IsNullOrEmpty(deviceId) && targets.Contains(deviceId, StringComparer.OrdinalIgnoreCase))
return true;
if (!string.IsNullOrEmpty(deviceIp) && targets.Contains(deviceIp, StringComparer.OrdinalIgnoreCase))
return true;
return false;
}
}
================================================
FILE: src/NetworkOptimizer.Alerts/DefaultAlertRules.cs
================================================
using NetworkOptimizer.Alerts.Models;
using NetworkOptimizer.Core.Enums;
namespace NetworkOptimizer.Alerts;
///
/// Default alert rules seeded when the AlertRules table is empty on startup.
/// Rule names use "Nav Title: Description" format to match the app's menu structure.
/// Rules that need infrastructure configured (speed tests, etc.) are disabled by default
/// as helpful starting points for users to enable after setup.
///
public static class DefaultAlertRules
{
public static List GetDefaults() =>
[
// --- Security Audit rules (enabled - only needs UniFi connection) ---
new AlertRule
{
Name = "Security Audit: Score Drop",
IsEnabled = true,
EventTypePattern = "audit.score_dropped",
Source = "audit",
MinSeverity = AlertSeverity.Warning,
ThresholdPercent = 15,
CooldownSeconds = 3600 // 1 hour
},
new AlertRule
{
Name = "Security Audit: Completed",
IsEnabled = false,
EventTypePattern = "audit.completed",
Source = "audit",
MinSeverity = AlertSeverity.Info,
CooldownSeconds = 3600 // 1 hour
},
new AlertRule
{
Name = "Security Audit: Critical Finding",
IsEnabled = true,
EventTypePattern = "audit.critical_findings",
Source = "audit",
MinSeverity = AlertSeverity.Critical,
CooldownSeconds = 0
},
// --- Device monitoring (disabled - can be noisy until user configures which devices matter) ---
new AlertRule
{
Name = "Device Offline",
IsEnabled = false,
EventTypePattern = "device.offline",
Source = "device",
MinSeverity = AlertSeverity.Error,
CooldownSeconds = 300 // 5 minutes
},
// --- Wi-Fi Optimizer (enabled, digest only - works automatically) ---
new AlertRule
{
Name = "Wi-Fi Optimizer: Channel Congestion",
IsEnabled = true,
EventTypePattern = "wifi.congestion",
Source = "wifi",
MinSeverity = AlertSeverity.Warning,
CooldownSeconds = 3600, // 1 hour
DigestOnly = true // High frequency, low urgency
},
// --- Threat Intelligence (enabled - works with IPS data) ---
new AlertRule
{
Name = "Threat Intelligence: Critical Event",
IsEnabled = true,
EventTypePattern = "threats.ips_event",
Source = "threats",
MinSeverity = AlertSeverity.Critical,
CooldownSeconds = 60 // 1 minute
},
// --- Threat Intelligence: Attack Chain (enabled - multi-stage attacks are high signal) ---
new AlertRule
{
Name = "Threat Intelligence: Attack Chain",
IsEnabled = true,
EventTypePattern = "threats.attack_chain",
Source = "threats",
MinSeverity = AlertSeverity.Warning,
CooldownSeconds = 3600 // 1 hour
},
new AlertRule
{
Name = "Threat Intelligence: Early-Stage Attack Chain",
IsEnabled = false,
EventTypePattern = "threats.attack_chain_attempt",
Source = "threats",
MinSeverity = AlertSeverity.Info,
CooldownSeconds = 3600 // 1 hour
},
new AlertRule
{
Name = "Threat Intelligence: Attack Pattern",
IsEnabled = false,
EventTypePattern = "threats.attack_pattern",
Source = "threats",
MinSeverity = AlertSeverity.Warning,
CooldownSeconds = 3600 // 1 hour
},
// --- WAN Speed Test (disabled - needs gateway SSH configured) ---
new AlertRule
{
Name = "WAN Speed Test: Degradation",
IsEnabled = false,
EventTypePattern = "wan.speed_degradation",
Source = "wan",
MinSeverity = AlertSeverity.Warning,
ThresholdPercent = 40,
CooldownSeconds = 1800 // 30 minutes
},
// --- LAN Speed Test (disabled - needs device SSH configured) ---
new AlertRule
{
Name = "LAN Speed Test: Regression",
IsEnabled = false,
EventTypePattern = "speedtest.regression",
Source = "speedtest",
MinSeverity = AlertSeverity.Warning,
ThresholdPercent = 25,
CooldownSeconds = 3600 // 1 hour
},
// --- Schedule (enabled - monitors scheduled task failures) ---
new AlertRule
{
Name = "Scheduled Task Failed",
IsEnabled = true,
EventTypePattern = "schedule.task_failed",
Source = "schedule",
MinSeverity = AlertSeverity.Error,
CooldownSeconds = 3600 // 1 hour
},
// --- WAN Data Usage (disabled - needs data usage tracking configured) ---
new AlertRule
{
Name = "WAN Data Usage: Warning",
IsEnabled = false,
EventTypePattern = "wan.data_usage_warning",
Source = "wan",
MinSeverity = AlertSeverity.Warning,
CooldownSeconds = 86400 // 24 hours
},
new AlertRule
{
Name = "WAN Data Usage: Cap Exceeded",
IsEnabled = false,
EventTypePattern = "wan.data_usage_exceeded",
Source = "wan",
MinSeverity = AlertSeverity.Error,
CooldownSeconds = 86400 // 24 hours
}
];
}
================================================
FILE: src/NetworkOptimizer.Alerts/Delivery/DiscordChannelConfig.cs
================================================
namespace NetworkOptimizer.Alerts.Delivery;
public class DiscordChannelConfig
{
public string WebhookUrl { get; set; } = string.Empty;
}
================================================
FILE: src/NetworkOptimizer.Alerts/Delivery/DiscordDeliveryChannel.cs
================================================
using System.Text;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using NetworkOptimizer.Alerts.Events;
using NetworkOptimizer.Alerts.Models;
using NetworkOptimizer.Core.Enums;
namespace NetworkOptimizer.Alerts.Delivery;
///
/// Discord delivery via webhook with embed formatting and severity colors.
///
public class DiscordDeliveryChannel : IAlertDeliveryChannel
{
private readonly ILogger _logger;
private readonly HttpClient _httpClient;
public DeliveryChannelType ChannelType => DeliveryChannelType.Discord;
public DiscordDeliveryChannel(ILogger logger, HttpClient httpClient)
{
_logger = logger;
_httpClient = httpClient;
}
public async Task SendAsync(AlertEvent alertEvent, AlertHistoryEntry historyEntry, DeliveryChannel channel, CancellationToken cancellationToken = default)
{
var config = JsonSerializer.Deserialize(channel.ConfigJson);
if (config == null || string.IsNullOrEmpty(config.WebhookUrl)) return false;
var fields = new List